hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ecbc2a5a9d0a3618b16e1f22cd8275ab7c2c8270
| 625
|
py
|
Python
|
test_attestation_record.py
|
blockchainhelppro/Ether-Based-App
|
8c52abd30adcad5b120a9d1f238ea34cdefd2ca1
|
[
"MIT"
] | null | null | null |
test_attestation_record.py
|
blockchainhelppro/Ether-Based-App
|
8c52abd30adcad5b120a9d1f238ea34cdefd2ca1
|
[
"MIT"
] | null | null | null |
test_attestation_record.py
|
blockchainhelppro/Ether-Based-App
|
8c52abd30adcad5b120a9d1f238ea34cdefd2ca1
|
[
"MIT"
] | null | null | null |
import pytest
from beacon_chain.state.attestation_record import (
AttestationRecord,
)
@pytest.mark.parametrize(
'param,default_value',
[
('slot', 0),
('shard_id', 0),
('oblique_parent_hashes', []),
('shard_block_hash', b'\x00'*32),
('attester_bitfield', b''),
('aggregate_sig', [0, 0]),
]
)
def test_defaults(param, default_value, sample_attestation_record_params):
del sample_attestation_record_params[param]
attestation_record = AttestationRecord(**sample_attestation_record_params)
assert getattr(attestation_record, param) == default_value
| 26.041667
| 78
| 0.68
|
0d4c59b6a0dbfb277d2e9d7b0217187673384735
| 7,095
|
py
|
Python
|
aiida/tools/data/array/kpoints/seekpath.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2016-09-12T10:51:00.000Z
|
2016-09-12T10:51:00.000Z
|
aiida/tools/data/array/kpoints/seekpath.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 17
|
2020-03-11T17:04:05.000Z
|
2020-05-01T09:34:45.000Z
|
aiida/tools/data/array/kpoints/seekpath.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tool to automatically determine k-points for a given structure using SeeK-path."""
import seekpath
from aiida.orm import KpointsData, Dict
__all__ = ('get_explicit_kpoints_path', 'get_kpoints_path')
def get_explicit_kpoints_path(structure, parameters):
"""
Return the kpoint path for band structure (in scaled and absolute
coordinates), given a crystal structure,
using the paths proposed in the various publications (see description
of the 'recipe' input parameter). The parameters are the same
as get get_explicit_k_path in __init__, but here all structures are
input and returned as AiiDA structures rather than tuples, and similarly
k-points-related information as a AiiDA KpointsData class.
:param structure: The AiiDA StructureData for which we want to obtain
the suggested path.
:param parameters: A dictionary whose key-value pairs are passed as
additional kwargs to the ``seekpath.get_explicit_k_path`` function.
:return: A dictionary with four nodes:
- ``explicit_kpoints``: a KpointsData with the (explicit) kpoints
(with labels set).
- ``parameters``: a Dict, whose content is
the same dictionary as returned by the ``seekpath.get_explicit_k_path`` function
(see `seekpath documentation <https://seekpath.readthedocs.io/>`_),
except that:
- ``conv_lattice``, ``conv_positions``, ``conv_types``
are removed and replaced by the ``conv_structure`` output node
- ``primitive_lattice``, ``primitive_positions``, ``primitive_types``
are removed and replaced by the `primitive_structure` output node
- ``reciprocal_primitive_lattice``, ``explicit_kpoints_abs``,
``explicit_kpoints_rel`` and ``explicit_kpoints_labels`` are removed
and replaced by the ``explicit_kpoints`` output node
- ``primitive_structure``: A StructureData with the primitive structure
- ``conv_structure``: A StructureData with the primitive structure
"""
# pylint: disable=too-many-locals
from aiida.tools.data.structure import spglib_tuple_to_structure, structure_to_spglib_tuple
structure_tuple, kind_info, kinds = structure_to_spglib_tuple(structure)
result = {}
rawdict = seekpath.get_explicit_k_path(structure=structure_tuple, **parameters)
# Replace primitive structure with AiiDA StructureData
primitive_lattice = rawdict.pop('primitive_lattice')
primitive_positions = rawdict.pop('primitive_positions')
primitive_types = rawdict.pop('primitive_types')
primitive_tuple = (primitive_lattice, primitive_positions, primitive_types)
primitive_structure = spglib_tuple_to_structure(primitive_tuple, kind_info, kinds)
# Replace conv structure with AiiDA StructureData
conv_lattice = rawdict.pop('conv_lattice')
conv_positions = rawdict.pop('conv_positions')
conv_types = rawdict.pop('conv_types')
conv_tuple = (conv_lattice, conv_positions, conv_types)
conv_structure = spglib_tuple_to_structure(conv_tuple, kind_info, kinds)
# Remove reciprocal_primitive_lattice, recalculated by kpoints class
rawdict.pop('reciprocal_primitive_lattice')
kpoints_abs = rawdict.pop('explicit_kpoints_abs')
kpoints_labels = rawdict.pop('explicit_kpoints_labels')
# set_kpoints expects labels like [[0,'X'],[34,'L'],...], so generate it here skipping empty labels
labels = [[idx, label] for idx, label in enumerate(kpoints_labels) if label]
kpoints = KpointsData()
kpoints.set_cell_from_structure(primitive_structure)
kpoints.set_kpoints(kpoints_abs, cartesian=True, labels=labels)
result['parameters'] = Dict(dict=rawdict)
result['explicit_kpoints'] = kpoints
result['primitive_structure'] = primitive_structure
result['conv_structure'] = conv_structure
return result
def get_kpoints_path(structure, parameters):
"""
Return the kpoint path information for band structure given a
crystal structure, using the paths from the chosen recipe/reference.
The parameters are the same
as get get_path in __init__, but here all structures are
input and returned as AiiDA structures rather than tuples.
If you use this module, please cite the paper of the corresponding
recipe (see documentation of seekpath).
:param structure: The crystal structure for which we want to obtain
the suggested path. It should be an AiiDA StructureData object.
:param parameters: A dictionary whose key-value pairs are passed as
additional kwargs to the ``seekpath.get_path`` function.
:return: A dictionary with three nodes:
- ``parameters``: a Dict, whose content is
the same dictionary as returned by the ``seekpath.get_path`` function
(see `seekpath documentation <https://seekpath.readthedocs.io/>`_),
except that:
- ``conv_lattice``, ``conv_positions``, ``conv_types``
are removed and replaced by the ``conv_structure`` output node
- ``primitive_lattice``, ``primitive_positions``, ``primitive_types``
are removed and replaced by the ``primitive_structure`` output node
- ``primitive_structure``: A StructureData with the primitive structure
- ``conv_structure``: A StructureData with the primitive structure
"""
from aiida.tools.data.structure import spglib_tuple_to_structure, structure_to_spglib_tuple
structure_tuple, kind_info, kinds = structure_to_spglib_tuple(structure)
result = {}
rawdict = seekpath.get_path(structure=structure_tuple, **parameters)
result['parameters'] = Dict(dict=rawdict)
# Replace conv structure with AiiDA StructureData
conv_lattice = rawdict.pop('conv_lattice')
conv_positions = rawdict.pop('conv_positions')
conv_types = rawdict.pop('conv_types')
result['conv_structure'] = spglib_tuple_to_structure((conv_lattice, conv_positions, conv_types), kind_info, kinds)
# Replace primitive structure with AiiDA StructureData
primitive_lattice = rawdict.pop('primitive_lattice')
primitive_positions = rawdict.pop('primitive_positions')
primitive_types = rawdict.pop('primitive_types')
result['primitive_structure'] = spglib_tuple_to_structure((primitive_lattice, primitive_positions, primitive_types),
kind_info, kinds)
return result
| 45.191083
| 120
| 0.691755
|
4b2b8e5792c282ff7472a3c8808448ccf123d471
| 605
|
py
|
Python
|
examples/106-git-pipeline.py
|
sarabala1979/argo-dataflow
|
1f296034d0746b01a70e281faf2c0e8640d37ea1
|
[
"Apache-2.0"
] | null | null | null |
examples/106-git-pipeline.py
|
sarabala1979/argo-dataflow
|
1f296034d0746b01a70e281faf2c0e8640d37ea1
|
[
"Apache-2.0"
] | null | null | null |
examples/106-git-pipeline.py
|
sarabala1979/argo-dataflow
|
1f296034d0746b01a70e281faf2c0e8640d37ea1
|
[
"Apache-2.0"
] | null | null | null |
from argo_dataflow import pipeline, kafka
if __name__ == '__main__':
(pipeline("106-git")
.owner('argoproj-labs')
.describe("""This example of a pipeline using Git.
The Git handler allows you to check your application source code into Git. Dataflow will checkout and build
your code when the step starts.
[Learn about Git steps](../docs/GIT.md)""")
.step(
(kafka('input-topic')
.git('main', 'https://github.com/argoproj-labs/argo-dataflow', 'main', 'examples/git', 'quay.io/argoproj/dataflow-go1-16:latest')
.kafka('output-topic')
))
.save())
| 33.611111
| 138
| 0.654545
|
5809ed112b734ba5e6c2934eaca245cded265249
| 1,918
|
py
|
Python
|
python/lib/ucscGb/qa/qaUtils.py
|
psteinb/kent
|
3ff439f4e5194805359405bb4452c8d96a343932
|
[
"IJG"
] | null | null | null |
python/lib/ucscGb/qa/qaUtils.py
|
psteinb/kent
|
3ff439f4e5194805359405bb4452c8d96a343932
|
[
"IJG"
] | null | null | null |
python/lib/ucscGb/qa/qaUtils.py
|
psteinb/kent
|
3ff439f4e5194805359405bb4452c8d96a343932
|
[
"IJG"
] | null | null | null |
import subprocess
import pipes
def callHgsql(database, command):
""" Run hgsql command using subprocess, return stdout data if no error."""
cmd = ["hgsql", database, "-Ne", command]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmdout, cmderr = p.communicate()
if p.returncode != 0:
# keep command arguments nicely quoted
cmdstr = " ".join([pipes.quote(arg) for arg in cmd])
raise Exception("Error from: " + cmdstr + ": " + cmderr)
return cmdout
def runCommand(command):
"""Runs command in subprocess and raises exception if return code is not 0.
Returns tuple (stdoutdata, stderrdata). """
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmdout, cmderr = p.communicate()
if p.returncode != 0:
# keep command arguments nicely quoted
cmdstr = " ".join([pipes.quote(arg) for arg in command])
raise Exception("Error from: " + cmdstr)
return cmdout, cmderr
def runCommandMergedOutErr(command):
"""Runs command in subprocess and raises exception if return code is not 0.
Combines stdout and stderr into a single output. Returns stdoutdata string."""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# cmderr is empty, but we still need to call both so cmdout is properly set
cmdout, cmderr = p.communicate()
if p.returncode != 0:
# keep command arguments nicely quoted
cmdstr = " ".join([pipes.quote(arg) for arg in command])
raise Exception("Error from: " + cmdstr)
return cmdout
def runCommandNoAbort(command):
"""Runs command in subprocess. Returns tuple (stdoutdata, stderrdata)
and returncode from process."""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmdout, cmderr = p.communicate()
return cmdout, cmderr, p.returncode
| 43.590909
| 83
| 0.688738
|
0a6fe1a63652d60ffc637bba6eaeeb302d70d13a
| 4,803
|
py
|
Python
|
typeidea/blog/views.py
|
starpsp/typeidea-1
|
84e5c34b0e51fd62ccf2dc30a06c060b501d4f4d
|
[
"MIT"
] | null | null | null |
typeidea/blog/views.py
|
starpsp/typeidea-1
|
84e5c34b0e51fd62ccf2dc30a06c060b501d4f4d
|
[
"MIT"
] | null | null | null |
typeidea/blog/views.py
|
starpsp/typeidea-1
|
84e5c34b0e51fd62ccf2dc30a06c060b501d4f4d
|
[
"MIT"
] | null | null | null |
import logging
from datetime import date
from django.core.cache import cache
from django.db.models import Q, F
from django.views.generic import ListView, DetailView, TemplateView
from django.shortcuts import get_object_or_404
from config.models import SideBar
from .models import Post, Category, Tag
logger = logging.getLogger(__name__)
class CommonViewMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'sidebars': self.get_sidebars(),
})
context.update(self.get_navs())
return context
def get_sidebars(self):
return SideBar.objects.filter(status=SideBar.STATUS_SHOW)
def get_navs(self):
categories = Category.objects.filter(status=Category.STATUS_NORMAL)
nav_categories = []
normal_categories = []
for cate in categories:
if cate.is_nav:
nav_categories.append(cate)
else:
normal_categories.append(cate)
return {
'navs': nav_categories,
'categories': normal_categories,
}
class IndexView(CommonViewMixin, ListView):
queryset = Post.objects.filter(status=Post.STATUS_NORMAL)\
.select_related('owner')\
.select_related('category')
paginate_by = 5
context_object_name = 'post_list'
template_name = 'blog/list.html'
class CategoryView(IndexView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category_id = self.kwargs.get('category_id')
category = get_object_or_404(Category, pk=category_id)
context.update({
'category': category,
})
return context
def get_queryset(self):
""" 重写querset,根据分类过滤 """
queryset = super().get_queryset()
category_id = self.kwargs.get('category_id')
return queryset.filter(category_id=category_id)
class TagView(IndexView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tag_id = self.kwargs.get('tag_id')
tag = get_object_or_404(Tag, pk=tag_id)
context.update({
'tag': tag,
})
return context
def get_queryset(self):
""" 重写querset,根据标签过滤 """
queryset = super().get_queryset()
tag_id = self.kwargs.get('tag_id')
return queryset.filter(tag__id=tag_id)
class PostDetailView(CommonViewMixin, DetailView):
queryset = Post.objects.filter(status=Post.STATUS_NORMAL)
template_name = 'blog/detail.html'
context_object_name = 'post'
pk_url_kwarg = 'post_id'
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
self.handle_visited()
return response
def handle_visited(self):
increase_pv = False
increase_uv = False
uid = self.request.uid
pv_key = 'pv:%s:%s' % (uid, self.request.path)
if not cache.get(pv_key):
increase_pv = True
cache.set(pv_key, 1, 1*60) # 1分钟有效
uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path)
if not cache.get(uv_key):
increase_uv = True
cache.set(uv_key, 1, 24*60*60) # 24小时有效
if increase_pv and increase_uv:
Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1, uv=F('uv') + 1)
elif increase_pv:
Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1)
elif increase_uv:
Post.objects.filter(pk=self.object.id).update(uv=F('uv') + 1)
class SearchView(IndexView):
def get_context_data(self):
context = super().get_context_data()
context.update({
'keyword': self.request.GET.get('keyword', '')
})
return context
def get_queryset(self):
queryset = super().get_queryset()
keyword = self.request.GET.get('keyword')
if not keyword:
return queryset
return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains=keyword))
class AuthorView(IndexView):
def get_queryset(self):
queryset = super().get_queryset()
author_id = self.kwargs.get('owner_id')
return queryset.filter(owner_id=author_id)
class Handler404(CommonViewMixin, TemplateView):
template_name = '404.html'
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context, status=404)
class Handler50x(CommonViewMixin, TemplateView):
template_name = '50x.html'
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context, status=500)
| 30.592357
| 89
| 0.634187
|
d778730c03e1ec9d49aa2108fab2951734e8df67
| 125
|
py
|
Python
|
check_duplication.py
|
Damon-wenc/find_out_duplicated_number
|
42c9e692a65380c5166f9e9e9409a327f7aa354e
|
[
"MIT"
] | 1
|
2019-07-08T11:37:02.000Z
|
2019-07-08T11:37:02.000Z
|
check_duplication.py
|
Damon-wenc/find_out_duplicated_number
|
42c9e692a65380c5166f9e9e9409a327f7aa354e
|
[
"MIT"
] | null | null | null |
check_duplication.py
|
Damon-wenc/find_out_duplicated_number
|
42c9e692a65380c5166f9e9e9409a327f7aa354e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import open_excel
def check(num):
return num in open_excel.G_saved_numbers
| 12.5
| 44
| 0.68
|
b5b2ee6449975e9c4ee136cf2b1da2d58cd6729c
| 15,874
|
py
|
Python
|
neutron_lbaas/services/loadbalancer/drivers/haproxy/namespace_driver.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | null | null | null |
neutron_lbaas/services/loadbalancer/drivers/haproxy/namespace_driver.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | null | null | null |
neutron_lbaas/services/loadbalancer/drivers/haproxy/namespace_driver.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import socket
import netaddr
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.common import utils as n_utils
from neutron.plugins.common import constants
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from neutron_lbaas._i18n import _LE, _LW
from neutron_lbaas.services.loadbalancer.agent import agent_device_driver
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer.drivers.haproxy import cfg as hacfg
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
DRIVER_NAME = 'haproxy_ns'
STATE_PATH_DEFAULT = '$state_path/lbaas'
USER_GROUP_DEFAULT = 'nogroup'
OPTS = [
cfg.StrOpt(
'loadbalancer_state_path',
default=STATE_PATH_DEFAULT,
help=_('Location to store config and state files'),
deprecated_opts=[cfg.DeprecatedOpt('loadbalancer_state_path',
group='DEFAULT')],
),
cfg.StrOpt(
'user_group',
default=USER_GROUP_DEFAULT,
help=_('The user group'),
deprecated_opts=[cfg.DeprecatedOpt('user_group', group='DEFAULT')],
),
cfg.IntOpt(
'send_gratuitous_arp',
default=3,
help=_('When delete and re-add the same vip, send this many '
'gratuitous ARPs to flush the ARP cache in the Router. '
'Set it below or equal to 0 to disable this feature.'),
)
]
cfg.CONF.register_opts(OPTS, 'haproxy')
class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver):
def __init__(self, conf, plugin_rpc):
self.conf = conf
self.state_path = conf.haproxy.loadbalancer_state_path
try:
vif_driver_class = n_utils.load_class_by_alias_or_classname(
'neutron.interface_drivers',
conf.interface_driver)
except ImportError:
with excutils.save_and_reraise_exception():
msg = (_('Error importing interface driver: %s')
% conf.interface_driver)
LOG.error(msg)
self.vif_driver = vif_driver_class(conf)
self.plugin_rpc = plugin_rpc
self.pool_to_port_id = {}
@classmethod
def get_name(cls):
return DRIVER_NAME
def create(self, logical_config):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
self._plug(namespace, logical_config['vip']['port'],
logical_config['vip']['address'])
self._spawn(logical_config)
def update(self, logical_config):
pool_id = logical_config['pool']['id']
pid_path = self._get_state_file_path(pool_id, 'pid')
extra_args = ['-sf']
extra_args.extend(p.strip() for p in open(pid_path, 'r'))
self._spawn(logical_config, extra_args)
def _spawn(self, logical_config, extra_cmd_args=()):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
conf_path = self._get_state_file_path(pool_id, 'conf')
pid_path = self._get_state_file_path(pool_id, 'pid')
sock_path = self._get_state_file_path(pool_id, 'sock')
user_group = self.conf.haproxy.user_group
hacfg.save_config(conf_path, logical_config, sock_path, user_group)
cmd = ['haproxy', '-f', conf_path, '-p', pid_path]
cmd.extend(extra_cmd_args)
ns = ip_lib.IPWrapper(namespace=namespace)
ns.netns.execute(cmd)
# remember the pool<>port mapping
self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id']
@n_utils.synchronized('haproxy-driver')
def undeploy_instance(self, pool_id, **kwargs):
cleanup_namespace = kwargs.get('cleanup_namespace', False)
delete_namespace = kwargs.get('delete_namespace', False)
namespace = get_ns_name(pool_id)
pid_path = self._get_state_file_path(pool_id, 'pid')
# kill the process
kill_pids_in_file(pid_path)
# unplug the ports
if pool_id in self.pool_to_port_id:
self._unplug(namespace, self.pool_to_port_id[pool_id])
# delete all devices from namespace;
# used when deleting orphans and port_id is not known for pool_id
if cleanup_namespace:
ns = ip_lib.IPWrapper(namespace=namespace)
for device in ns.get_devices(exclude_loopback=True):
self.vif_driver.unplug(device.name, namespace=namespace)
# remove the configuration directory
conf_dir = os.path.dirname(self._get_state_file_path(pool_id, ''))
if os.path.isdir(conf_dir):
shutil.rmtree(conf_dir)
if delete_namespace:
ns = ip_lib.IPWrapper(namespace=namespace)
ns.garbage_collect_namespace()
def exists(self, pool_id):
namespace = get_ns_name(pool_id)
root_ns = ip_lib.IPWrapper()
socket_path = self._get_state_file_path(pool_id, 'sock', False)
if root_ns.netns.exists(namespace) and os.path.exists(socket_path):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
return True
except socket.error:
pass
return False
def get_stats(self, pool_id):
socket_path = self._get_state_file_path(pool_id, 'sock', False)
TYPE_BACKEND_REQUEST = 2
TYPE_SERVER_REQUEST = 4
if os.path.exists(socket_path):
parsed_stats = self._get_stats_from_socket(
socket_path,
entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST)
pool_stats = self._get_backend_stats(parsed_stats)
pool_stats['members'] = self._get_servers_stats(parsed_stats)
return pool_stats
else:
LOG.warning(_LW('Stats socket not found for pool %s'), pool_id)
return {}
def _get_backend_stats(self, parsed_stats):
TYPE_BACKEND_RESPONSE = '1'
for stats in parsed_stats:
if stats.get('type') == TYPE_BACKEND_RESPONSE:
unified_stats = dict((k, stats.get(v, ''))
for k, v in hacfg.STATS_MAP.items())
return unified_stats
return {}
def _get_servers_stats(self, parsed_stats):
TYPE_SERVER_RESPONSE = '2'
res = {}
for stats in parsed_stats:
if stats.get('type') == TYPE_SERVER_RESPONSE:
res[stats['svname']] = {
lb_const.STATS_STATUS: (constants.INACTIVE
if stats['status'] == 'DOWN'
else constants.ACTIVE),
lb_const.STATS_HEALTH: stats['check_status'],
lb_const.STATS_FAILED_CHECKS: stats['chkfail']
}
return res
def _get_stats_from_socket(self, socket_path, entity_type):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send('show stat -1 %s -1\n' % entity_type)
raw_stats = ''
chunk_size = 1024
while True:
chunk = s.recv(chunk_size)
raw_stats += chunk
if len(chunk) < chunk_size:
break
return self._parse_stats(raw_stats)
except socket.error as e:
LOG.warning(_LW('Error while connecting to stats socket: %s'), e)
return {}
def _parse_stats(self, raw_stats):
stat_lines = raw_stats.splitlines()
if len(stat_lines) < 2:
return []
stat_names = [name.strip('# ') for name in stat_lines[0].split(',')]
res_stats = []
for raw_values in stat_lines[1:]:
if not raw_values:
continue
stat_values = [value.strip() for value in raw_values.split(',')]
res_stats.append(dict(zip(stat_names, stat_values)))
return res_stats
def _get_state_file_path(self, pool_id, kind, ensure_state_dir=True):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.state_path))
conf_dir = os.path.join(confs_dir, pool_id)
if ensure_state_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _plug(self, namespace, port, vip_address, reuse_existing=True):
self.plugin_rpc.plug_vip_port(port['id'])
interface_name = self.vif_driver.get_device_name(Wrap(port))
if ip_lib.device_exists(interface_name, namespace=namespace):
if not reuse_existing:
raise exceptions.PreexistingDeviceFailure(
dev_name=interface_name
)
else:
self.vif_driver.plug(
port['network_id'],
port['id'],
interface_name,
port['mac_address'],
namespace=namespace
)
cidrs = [
'%s/%s' % (ip['ip_address'],
netaddr.IPNetwork(ip['subnet']['cidr']).prefixlen)
for ip in port['fixed_ips']
]
self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace)
# Haproxy socket binding to IPv6 VIP address will fail if this address
# is not yet ready(i.e tentative address).
if netaddr.IPAddress(vip_address).version == 6:
device = ip_lib.IPDevice(interface_name, namespace=namespace)
device.addr.wait_until_address_ready(vip_address)
gw_ip = port['fixed_ips'][0]['subnet'].get('gateway_ip')
if not gw_ip:
host_routes = port['fixed_ips'][0]['subnet'].get('host_routes', [])
for host_route in host_routes:
if host_route['destination'] == "0.0.0.0/0":
gw_ip = host_route['nexthop']
break
if gw_ip:
cmd = ['route', 'add', 'default', 'gw', gw_ip]
ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
# When delete and re-add the same vip, we need to
# send gratuitous ARP to flush the ARP cache in the Router.
gratuitous_arp = self.conf.haproxy.send_gratuitous_arp
if gratuitous_arp > 0:
for ip in port['fixed_ips']:
cmd_arping = ['arping', '-U',
'-I', interface_name,
'-c', gratuitous_arp,
ip['ip_address']]
ip_wrapper.netns.execute(cmd_arping, check_exit_code=False)
def _unplug(self, namespace, port_id):
port_stub = {'id': port_id}
self.plugin_rpc.unplug_vip_port(port_id)
interface_name = self.vif_driver.get_device_name(Wrap(port_stub))
self.vif_driver.unplug(interface_name, namespace=namespace)
def _is_active(self, logical_config):
# haproxy wil be unable to start without any active vip
if ('vip' not in logical_config or
(logical_config['vip']['status'] not in
constants.ACTIVE_PENDING_STATUSES) or
not logical_config['vip']['admin_state_up']):
return False
# not checking pool's admin_state_up to utilize haproxy ability to
# turn backend off instead of doing undeploy.
# in this case "ERROR 503: Service Unavailable" will be returned
if (logical_config['pool']['status'] not in
constants.ACTIVE_PENDING_STATUSES):
return False
return True
@n_utils.synchronized('haproxy-driver')
def deploy_instance(self, logical_config):
"""Deploys loadbalancer if necessary
:returns: True if loadbalancer was deployed, False otherwise
"""
# do actual deploy only if vip and pool are configured and active
if not logical_config or not self._is_active(logical_config):
return False
if self.exists(logical_config['pool']['id']):
self.update(logical_config)
else:
self.create(logical_config)
return True
def _refresh_device(self, pool_id):
logical_config = self.plugin_rpc.get_logical_device(pool_id)
# cleanup if the loadbalancer wasn't deployed (in case nothing to
# deploy or any errors)
if not self.deploy_instance(logical_config) and self.exists(pool_id):
self.undeploy_instance(pool_id)
def create_vip(self, vip):
self._refresh_device(vip['pool_id'])
def update_vip(self, old_vip, vip):
self._refresh_device(vip['pool_id'])
def delete_vip(self, vip):
self.undeploy_instance(vip['pool_id'])
def create_pool(self, pool):
# nothing to do here because a pool needs a vip to be useful
pass
def update_pool(self, old_pool, pool):
self._refresh_device(pool['id'])
def delete_pool(self, pool):
if self.exists(pool['id']):
self.undeploy_instance(pool['id'], delete_namespace=True)
def create_member(self, member):
self._refresh_device(member['pool_id'])
def update_member(self, old_member, member):
self._refresh_device(member['pool_id'])
def delete_member(self, member):
self._refresh_device(member['pool_id'])
def create_pool_health_monitor(self, health_monitor, pool_id):
self._refresh_device(pool_id)
def update_pool_health_monitor(self, old_health_monitor, health_monitor,
pool_id):
self._refresh_device(pool_id)
def delete_pool_health_monitor(self, health_monitor, pool_id):
self._refresh_device(pool_id)
def remove_orphans(self, known_pool_ids):
if not os.path.exists(self.state_path):
return
orphans = (pool_id for pool_id in os.listdir(self.state_path)
if pool_id not in known_pool_ids)
for pool_id in orphans:
if self.exists(pool_id):
self.undeploy_instance(pool_id, cleanup_namespace=True)
# NOTE (markmcclain) For compliance with interface.py which expects objects
class Wrap(object):
"""A light attribute wrapper for compatibility with the interface lib."""
def __init__(self, d):
self.__dict__.update(d)
def __getitem__(self, key):
return self.__dict__[key]
def get_ns_name(namespace_id):
return NS_PREFIX + namespace_id
def kill_pids_in_file(pid_path):
if os.path.exists(pid_path):
with open(pid_path, 'r') as pids:
for pid in pids:
pid = pid.strip()
try:
utils.execute(['kill', '-9', pid], run_as_root=True)
except RuntimeError:
LOG.exception(
_LE('Unable to kill haproxy process: %s'),
pid
)
| 37.527187
| 79
| 0.617425
|
78ec1f9e16cd0772ed24856d7ad51bc15776ed3b
| 3,504
|
py
|
Python
|
Plugins/Analyses/Live_Editor_Builders/Ships.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 25
|
2018-12-10T12:52:11.000Z
|
2022-01-29T14:42:57.000Z
|
Plugins/Analyses/Live_Editor_Builders/Ships.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 4
|
2019-08-01T19:09:11.000Z
|
2022-01-02T01:47:42.000Z
|
Plugins/Analyses/Live_Editor_Builders/Ships.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 6
|
2019-02-16T08:39:04.000Z
|
2021-12-21T06:11:58.000Z
|
from Framework import File_System
from Framework.Live_Editor_Components import *
# Convenience macro renaming.
E = Edit_Item_Macro
D = Display_Item_Macro
G = Item_Group_Macro
from .Support import Create_Objects_From_Asset_Files
from .Support import physics_item_macros
from .Support import connection_item_macros
from ..Shared import Get_Ship_Macro_Files
from ...Transforms.Support import Float_to_String
# TODO:
# Speed calculations depend on the engine. However, can make an assumption
# about the engine to estimate speed.
@Live_Editor_Object_Builder('ships')
def _Build_Storage_Objects():
# Ensure some available refs are loaded.
Live_Editor.Get_Category_Objects('storage')
Live_Editor.Get_Category_Objects('dockingbays')
Live_Editor.Get_Category_Objects('cockpits')
# TODO: dynamic connections.
# These would require looking up and processing the component file,
# to try to identify eg. how many engines the ship has, and of which
# tags, then to go look up engine data and determine what the thrusts are.
# Overall, this route is plausible, but would be best served by a wider
# scale parsing of data into custom classes for cross referencing,
# whereas the code here just runs on xpaths in the original xml.
#Live_Editor.Get_Category_Objects('engines')
# Switch to shared function that finds more mod ships.
#game_files = File_System.Get_All_Indexed_Files('macros','ship_*')
game_files = Get_Ship_Macro_Files()
return Create_Objects_From_Asset_Files(game_files, ship_item_macros)
#def Display_Update_Speed(component, ship_type, purpose_primary, physics_drag_forward):
# return ''
ship_item_macros = [
E('ship_type' , './properties/ship' , 'type' , 'Ship Type' , ''),
E('purpose_primary' , './properties/purpose' , 'primary' , 'Primary Purpose' , ''),
E('hull' , './properties/hull' , 'max' , 'Hull' , ''),
E('explosion_damage' , './properties/explosiondamage' , 'value' , 'Expl. Damage' , ''),
E('people_capacity' , './properties/people' , 'capacity' , 'People' , ''),
E('storage_missile' , './properties/storage' , 'missile' , 'Missile Storage' , ''),
E('thruster_tags' , './properties/thruster' , 'tags' , 'Thruster Tags' , ''),
E('secrecy_level' , './properties/secrecy' , 'level' , 'Secrecy Level' , ''),
#D('speed' , Display_Update_Speed , 'Speed', 'Assumes combat mk3 engines'),
*physics_item_macros,
E('sounds_ship' , './properties/sounds/shipdetail' , 'ref' , 'Sound Effect' , ''),
E('sound_occlusion' , './properties/sound_occlusion' , 'inside' , 'Sound Occlusion' , ''),
# Loop over software.
G('software' , './properties/software' , 'software' , 'Software' ),
E('ware' , '.' , 'ware' , 'Ware' , ''),
E('default' , '.' , 'default' , 'Default' , ''),
E('compatible' , '.' , 'compatible' , 'Compatible' , ''),
G('/software'),
*connection_item_macros
]
| 47.351351
| 113
| 0.578767
|
bde254b85876657dffc62cdc8715fab456674390
| 425
|
py
|
Python
|
losses/__init__.py
|
RerRayne/learn3d
|
83e4ac657c6538fb4cbed6e00b2e3ed6cbf43555
|
[
"MIT"
] | 335
|
2020-05-17T19:37:47.000Z
|
2022-03-29T09:32:14.000Z
|
losses/__init__.py
|
RerRayne/learn3d
|
83e4ac657c6538fb4cbed6e00b2e3ed6cbf43555
|
[
"MIT"
] | 13
|
2020-06-08T05:28:03.000Z
|
2022-03-29T07:46:18.000Z
|
losses/__init__.py
|
RerRayne/learn3d
|
83e4ac657c6538fb4cbed6e00b2e3ed6cbf43555
|
[
"MIT"
] | 59
|
2020-06-27T09:01:29.000Z
|
2022-03-21T07:22:09.000Z
|
from .rmse_features import RMSEFeaturesLoss
from .frobenius_norm import FrobeniusNormLoss
from .classification import ClassificationLoss
from .correspondence_loss import CorrespondenceLoss
try:
from .emd import EMDLoss
except:
print("Sorry EMD loss is not compatible with your system!")
try:
from .chamfer_distance import ChamferDistanceLoss
except:
print("Sorry ChamferDistance loss is not compatible with your system!")
| 35.416667
| 72
| 0.837647
|
0255243a31c998dfb2dedf3bd9aab0908983ecc5
| 4,027
|
py
|
Python
|
paradigm_pattern_separation.py
|
danielmk/pyDentateeLife2020
|
b4a9f2beaa0c74dbc9583e2cf228856612596f8a
|
[
"MIT"
] | null | null | null |
paradigm_pattern_separation.py
|
danielmk/pyDentateeLife2020
|
b4a9f2beaa0c74dbc9583e2cf228856612596f8a
|
[
"MIT"
] | null | null | null |
paradigm_pattern_separation.py
|
danielmk/pyDentateeLife2020
|
b4a9f2beaa0c74dbc9583e2cf228856612596f8a
|
[
"MIT"
] | 4
|
2020-02-18T09:25:20.000Z
|
2021-11-20T23:52:29.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 05 13:41:23 2018
@author: DanielM
"""
from neuron import h, gui # gui necessary for some parameters to h namespace
import numpy as np
import net_tunedrev
from burst_generator_inhomogeneous_poisson import inhom_poiss
import os
import argparse
# Handle command line inputs with argparse
parser = argparse.ArgumentParser(description='Pattern separation paradigm')
parser.add_argument('-runs',
nargs=3,
type=int,
help='start stop range for the range of runs',
default=[0, 1, 1],
dest='runs')
parser.add_argument('-savedir',
type=str,
help='complete directory where data is saved',
default=os.getcwd(),
dest='savedir')
args = parser.parse_args()
runs = range(args.runs[0], args.runs[1], args.runs[2])
savedir = args.savedir
# Where to search for nrnmech.dll file. Must be adjusted for your machine.
dll_files = [("C:\\Users\\Daniel\\repos\\pyDentate\\mechs_7-5\\nrnmech.dll"),
("C:\\Users\\DanielM\\Repos\\models_dentate\\"
"dentate_gyrus_Santhakumar2005_and_Yim_patterns\\"
"dentategyrusnet2005\\nrnmech.dll"),
"C:\\Users\\daniel\\Repos\\nrnmech.dll",
("C:\\Users\\Holger\\danielm\\models_dentate\\"
"dentate_gyrus_Santhakumar2005_and_Yim_patterns\\"
"dentategyrusnet2005\\nrnmech.dll"),
("C:\\Users\\Daniel\\repos\\"
"dentate_gyrus_Santhakumar2005_and_Yim_patterns\\"
"dentategyrusnet2005\\nrnmech.dll")]
for x in dll_files:
if os.path.isfile(x):
dll_dir = x
print("DLL loaded from: " + str(dll_dir))
h.nrn_load_dll(dll_dir)
# Generate temporal patterns for the 100 PP inputs
np.random.seed(10000)
temporal_patterns = inhom_poiss()
# Generate the PP -> GC mapping so that each GC receives inputs from 20/400
# randomly chosen PP inputs
innervation_pattern_gc = np.array([np.random.choice(400, 20, replace=False)
for x in range(2000)])
innervation_pattern_gc = innervation_pattern_gc.swapaxes(0, 1)
PP_to_GCs = []
for x in range(0, 400):
PP_to_GCs.append(np.argwhere(innervation_pattern_gc == x)[:, 1])
PP_to_GCs = np.array(PP_to_GCs)
# Generate the PP -> BC mapping as above
innervation_pattern_bc = np.array([np.random.choice(400, 20, replace=False)
for x in range(24)])
innervation_pattern_bc = innervation_pattern_bc.swapaxes(0, 1)
PP_to_BCs = []
for x in range(0, 400):
PP_to_BCs.append(np.argwhere(innervation_pattern_bc == x)[:, 1])
PP_to_BCs = np.array(PP_to_BCs)
all_targets = np.array([y for x in PP_to_GCs for y in x])
# Start the runs of the model
for run in runs:
nw = net_tunedrev.TunedNetwork(10000, temporal_patterns[0+run:24+run],
PP_to_GCs[0+run:24+run],
PP_to_BCs[0+run:24+run])
# Attach voltage recordings to all cells
nw.populations[0].voltage_recording(range(2000))
nw.populations[1].voltage_recording(range(60))
nw.populations[2].voltage_recording(range(24))
nw.populations[3].voltage_recording(range(24))
# Run the model
"""Initialization for -2000 to -100"""
h.cvode.active(0)
dt = 0.1
h.steps_per_ms = 1.0/dt
h.finitialize(-60)
h.t = -2000
h.secondorder = 0
h.dt = 10
while h.t < -100:
h.fadvance()
h.secondorder = 2
h.t = 0
h.dt = 0.1
"""Setup run control for -100 to 1500"""
h.frecord_init() # Necessary after changing t to restart the vectors
while h.t < 600:
h.fadvance()
print("Done Running")
tuned_save_file_name = str(nw) + '_run_' + str(run)
nw.shelve_network(savedir, tuned_save_file_name)
fig = nw.plot_aps(time=600)
tuned_fig_file_name = str(nw) + '_spike_plot_run_' + str(run)
nw.save_ap_fig(fig, savedir, tuned_fig_file_name)
| 33.840336
| 78
| 0.636951
|
452972b77124fb789b3916638522ea25acba2b0f
| 7,168
|
py
|
Python
|
src/octopus/manufacturer/ika.py
|
gar-syn/congo-lab
|
dc50af4e35903556bc8bc34dc23a7a708c1f5422
|
[
"MIT"
] | 1
|
2021-02-02T11:27:25.000Z
|
2021-02-02T11:27:25.000Z
|
src/octopus/manufacturer/ika.py
|
gar-syn/congo-lab
|
dc50af4e35903556bc8bc34dc23a7a708c1f5422
|
[
"MIT"
] | 18
|
2021-02-01T11:35:15.000Z
|
2021-08-03T14:23:38.000Z
|
src/octopus/manufacturer/ika.py
|
gar-syn/congo-lab
|
dc50af4e35903556bc8bc34dc23a7a708c1f5422
|
[
"MIT"
] | null | null | null |
# Twisted Imports
from twisted.internet import defer
from twisted.internet.protocol import Factory
# Package Imports
from octopus.util import now
from octopus.machine import Machine, Stream, Property
from octopus.protocol.basic import QueuedLineReceiver
#
# Serial Settings for IKA Eurostar
# --------------------------------
#
# Baud rate 9600 bps
# Data bits 7 Parity Even
# Stop bits 1 Flow control None
#
# Protocol type Raw TCP
#
class IKALineReceiver (QueuedLineReceiver):
delimiter = b" \r \n"
class IKAEurostar (Machine):
protocolFactory = Factory.forProtocol(IKALineReceiver)
name = "IKA Eurostar"
def setup (self):
# setup variables
self.power = Property(title = "Power", type = str, options = ("on", "off"), setter = _set_power(self))
self.setpoint = Property(title = "Stirrer setpoint", type = float, unit = "rpm", setter = _set_setpoint(self))
self.rpm = Stream(title = "Stirrer Speed", type = float, unit = "rpm")
self.torque = Stream(title = "Torque", type = float, unit = "Ncm")
def start (self):
# def interpret_power (result: str) -> str:
# if result == "ON":
# return "on"
# elif result == "OFF":
# return "off"
def interpret_rpm (result: str) -> float:
value, id = result.split(' ')
if (id == "4"):
return float(value)
def interpret_torque (result: str) -> float:
value, id = result.split(' ')
if (id == "5"):
return float(value)
def interpret_setpoint (result: str) -> float:
value, id = result.split(' ')
if (id == "4"):
return float(value)
to_monitor = []
def addMonitor (command, fn, variable: Stream):
def interpret (result):
variable._push(fn(result), now())
to_monitor.append(( command, interpret ))
# addMonitor("KM?", interpret_power, self.power)
addMonitor("IN_PV_4", interpret_rpm, self.rpm)
addMonitor("IN_PV_5", interpret_torque, self.torque)
addMonitor("IN_SP_4", interpret_setpoint, self.setpoint)
def monitor ():
for cmd, fn in to_monitor:
self.protocol.write(cmd).addCallback(fn)
self._monitor = self._tick(monitor, 1)
def stop (self):
if self._monitor:
self._monitor.stop()
def reset (self):
return defer.succeed('OK')
def _set_power (machine: IKAEurostar):
@defer.inlineCallbacks
def set_power (power: str):
if power == "on":
yield machine.protocol.write("START_4", expectReply = False)
else:
yield machine.protocol.write("STOP_4", expectReply = False)
machine.power._push(power)
return set_power
def _set_setpoint (machine: IKAEurostar):
def set_setpoint (setpoint: float):
return machine.protocol.write(f"OUT_SP_4 {setpoint:.1f}", expectReply = False)
return set_setpoint
#
# Serial Settings for RCT 5
# -----------------------------------
#
# Baud rate 9600 bps
# Data bits 7 Parity None
# Stop bits 1 Flow control None
class RCT5 (Machine):
protocolFactory = Factory.forProtocol(QueuedLineReceiver)
name = "IKA RCT 5"
def setup (self):
# setup variables
self.heater_power = Property(title = "Heater On", type = str, options = ("on", "off"), setter = _set_heater_power(self))
self.stirrer_power = Property(title = "Stirrer On", type = str, options = ("on", "off"), setter = _set_stirrer_power(self))
self.stirrer_setpoint = Property(title = "Stirrer setpoint", type = float, unit = "rpm", setter = _set_stirrer_setpoint(self))
self.heater_setpoint = Property(title = "Heater setpoint", type = float, unit = "rpm", setter = _set_heater_setpoint(self))
self.external_temperature = Stream(title = "External Temperature", type = float, unit = "C")
self.hotplate_temperature = Stream(title = "Hotplate Temperature", type = float, unit = "C")
self.stirrer_speed = Stream(title = "Stirrer Speed", type = float, unit = "rpm")
self.viscosity = Stream(title = "Viscosity", type = float, unit = "%")
def start (self):
def interpret_external_temperature (result: str) -> float:
value, id = result.split(' ')
if (id == "1"):
return float(value)
def interpret_hotplate_temperature (result: str) -> float:
value, id = result.split(' ')
if (id == "2"):
return float(value)
def interpret_stirrer_speed (result: str) -> float:
value, id = result.split(' ')
if (id == "4"):
return float(value)
def interpret_viscosity (result: str) -> float:
value, id = result.split(' ')
if (id == "5"):
return float(value)
to_monitor = []
def addMonitor (command, fn, variable: Stream):
def interpret (result):
variable._push(fn(result), now())
to_monitor.append(( command, interpret ))
addMonitor("IN_PV_1", interpret_external_temperature, self.external_temperature)
addMonitor("IN_PV_2", interpret_hotplate_temperature, self.hotplate_temperature)
addMonitor("IN_PV_4", interpret_stirrer_speed, self.stirrer_speed)
addMonitor("IN_PV_5", interpret_viscosity, self.viscosity)
def monitor ():
for cmd, fn in to_monitor:
self.protocol.write(cmd).addCallback(fn)
self._monitor = self._tick(monitor, 1)
def stop (self):
if self._monitor:
self._monitor.stop()
def reset (self):
return defer.succeed('OK')
def _set_stirrer_power (machine: RCT5):
@defer.inlineCallbacks
def set_stirrer_power (power: str):
if power == "on":
yield machine.protocol.write("START_4", expectReply = False)
else:
yield machine.protocol.write("STOP_4", expectReply = False)
machine.stirrer_power._push(power)
return set_stirrer_power
def _set_heater_power (machine: RCT5):
@defer.inlineCallbacks
def set_heater_power (power: str):
if power == "on":
yield machine.protocol.write("START_1", expectReply = False)
else:
yield machine.protocol.write("STOP_1", expectReply = False)
machine.heater_power._push(power)
return set_heater_power
def _set_stirrer_setpoint (machine: RCT5):
def set_stirrer_setpoint (setpoint: float):
return machine.protocol.write(f"OUT_SP_4 {setpoint:.1f}", expectReply = False)
return set_stirrer_setpoint
def _set_heater_setpoint (machine: RCT5):
def set_heater_setpoint (setpoint: float):
return machine.protocol.write(f"OUT_SP_1 {setpoint:.1f}", expectReply = False)
return set_heater_setpoint
__all__ = ["IKAEurostar", "RCT5"]
| 32.581818
| 134
| 0.596819
|
2b472a9e48b561e6a2032d76a6cde05b3ae6ce9a
| 8,163
|
py
|
Python
|
fuji_server/evaluators/fair_evaluator_file_format.py
|
sneumann/fuji
|
55b63e9e901ff7fee4d92fc42dec6b5753d67016
|
[
"MIT"
] | null | null | null |
fuji_server/evaluators/fair_evaluator_file_format.py
|
sneumann/fuji
|
55b63e9e901ff7fee4d92fc42dec6b5753d67016
|
[
"MIT"
] | null | null | null |
fuji_server/evaluators/fair_evaluator_file_format.py
|
sneumann/fuji
|
55b63e9e901ff7fee4d92fc42dec6b5753d67016
|
[
"MIT"
] | 1
|
2021-02-18T16:58:10.000Z
|
2021-02-18T16:58:10.000Z
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 PANGAEA (https://www.pangaea.de/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from fuji_server.evaluators.fair_evaluator import FAIREvaluator
from fuji_server.models.data_file_format import DataFileFormat
from fuji_server.models.data_file_format_output import DataFileFormatOutput
from fuji_server.models.data_file_format_output_inner import DataFileFormatOutputInner
import mimetypes
import re
class FAIREvaluatorFileFormat(FAIREvaluator):
def evaluate(self):
text_format_regex = r'(^text)[\/]|[\/\+](xml|text|json)'
self.result = DataFileFormat(id=self.metric_number, metric_identifier=self.metric_identifier,
metric_name=self.metric_name)
self.output = DataFileFormatOutput()
data_file_list = []
if not self.fuji.content_identifier: # self.content_identifier only includes uris that are accessible
contents = self.fuji.metadata_merged.get('object_content_identifier')
unique_types = []
if contents:
for c in contents:
if c.get('type'):
unique_types.append(c.get('type'))
self.logger.info('FsF-R1.3-02D : File format(s) specified -: {}'.format(list(set(unique_types))))
mime_url_pair = {}
if len(self.fuji.content_identifier) > 0:
content_urls = [item.get('url') for item in self.fuji.content_identifier]
self.logger.info('FsF-R1.3-02D : Data content identifier provided - {}'.format(content_urls))
#self.maturity = 1
for file_index, data_file in enumerate(self.fuji.content_identifier):
mime_type = data_file.get('type')
if data_file.get('url') is not None:
if mime_type is None or mime_type in ['application/octet-stream']:
self.logger.info(
'FsF-R1.3-02D : Guessing the type of a file based on its filename or URL -: {}'.format(
data_file.get('url')))
# if mime type not given try to guess it based on the file name
guessed_mime_type = mimetypes.guess_type(data_file.get('url'))
self.logger.info('FsF-R1.3-02D : Guess return value - {}'.format(guessed_mime_type))
mime_type = guessed_mime_type[0] # the return value is a tuple (type, encoding) where type is None if the type can’t be guessed
if mime_type:
if mime_type in self.fuji.ARCHIVE_MIMETYPES: # check archive&compress media type
self.logger.info(
'FsF-R1.3-02D : Archiving/compression format specified - {}'.format(mime_type))
# exclude archive format
if file_index == len(self.fuji.content_identifier)-1:
self.fuji.tika_content_types_list = [n for n in self.fuji.tika_content_types_list if
n not in self.fuji.ARCHIVE_MIMETYPES]
self.logger.info(
'FsF-R1.3-02D : Extracted file formats for selected data object (see FsF-R1-01MD) -: {}'.format(self.fuji.tika_content_types_list))
for t in self.fuji.tika_content_types_list:
mime_url_pair[t] = data_file.get('url')
else:
mime_url_pair[mime_type] = data_file.get('url')
# FILE FORMAT CHECKS....
# check if format is a scientific one:
for mimetype, url in mime_url_pair.items():
data_file_output = DataFileFormatOutputInner()
preferance_reason = []
subject_area = []
if mimetype in self.fuji.SCIENCE_FILE_FORMATS:
self.setEvaluationCriteriumScore('FsF-R1.3-02D-1c', 0, 'pass')
self.maturity = 3
if self.fuji.SCIENCE_FILE_FORMATS.get(mimetype) == 'Generic':
subject_area.append('General')
preferance_reason.append('generic science format')
else:
subject_area.append(self.fuji.SCIENCE_FILE_FORMATS.get(mimetype))
preferance_reason.append('science format')
data_file_output.is_preferred_format = True
# check if long term format
if mimetype in self.fuji.LONG_TERM_FILE_FORMATS:
self.setEvaluationCriteriumScore('FsF-R1.3-02D-1b', 0, 'pass')
self.maturity = 2
preferance_reason.append('long term format')
subject_area.append('General')
data_file_output.is_preferred_format = True
# check if open format
if mimetype in self.fuji.OPEN_FILE_FORMATS:
self.setEvaluationCriteriumScore('FsF-R1.3-02D-1a', 0, 'pass')
self.maturity = 1
preferance_reason.append('open format')
subject_area.append('General')
data_file_output.is_preferred_format = True
# generic text/xml/json file check
if re.search(text_format_regex, mimetype):
self.setEvaluationCriteriumScore('FsF-R1.3-02D-1a', 0, 'pass')
self.setEvaluationCriteriumScore('FsF-R1.3-02D-1b', 0, 'pass')
self.setEvaluationCriteriumScore('FsF-R1.3-02D-1c', 0, 'pass')
preferance_reason.extend(['long term format', 'open format', 'generic science format'])
subject_area.append('General')
data_file_output.is_preferred_format = True
data_file_output.mime_type = mimetype
data_file_output.file_uri = url
data_file_output.preference_reason = list(set(preferance_reason))
data_file_output.subject_areas = list(set(subject_area))
data_file_list.append(data_file_output)
if len(data_file_list) > 0:
self.score.earned = 1
self.setEvaluationCriteriumScore('FsF-R1.3-02D-1', 1, 'pass')
#self.maturity = 3
self.logger.log(self.fuji.LOG_SUCCESS, 'FsF-R1.3-02D : Could identify a file format commonly used by the scientific community')
self.result.test_status = 'pass'
else:
self.logger.warning('FsF-R1.3-02D : Could not perform file format checks as data content identifier(s) unavailable/inaccesible')
self.result.test_status = 'fail'
self.output = data_file_list
self.result.output = self.output
self.result.metric_tests = self.metric_tests
self.result.maturity = self.maturity_levels.get(self.maturity)
self.result.score = self.score
| 57.083916
| 167
| 0.60223
|
28dfa82b4fa0ad7bfa315a69ac37f6d408a50ebf
| 5,144
|
py
|
Python
|
Tests/test_properties.py
|
KwiatQIM/Quantum-Tomography
|
e10d03b9dc5c8c1bb61a081db1aef70a4b2bb6ca
|
[
"MIT"
] | 11
|
2020-06-23T13:44:30.000Z
|
2021-10-22T02:55:14.000Z
|
Tests/test_properties.py
|
KwiatQIM/Quantum-Tomography
|
e10d03b9dc5c8c1bb61a081db1aef70a4b2bb6ca
|
[
"MIT"
] | 3
|
2020-06-23T00:09:46.000Z
|
2021-09-12T23:25:05.000Z
|
Tests/test_properties.py
|
KwiatQIM/Quantum-Tomography
|
e10d03b9dc5c8c1bb61a081db1aef70a4b2bb6ca
|
[
"MIT"
] | 4
|
2020-08-11T09:11:17.000Z
|
2021-11-06T05:47:47.000Z
|
import unittest
import QuantumTomography as qLib
import numpy as np
import numpy.testing as tests
"""
Copyright 2020 University of Illinois Board of Trustees.
Licensed under the terms of an MIT license
"""
"""CHECK OUT THE REFERENCE PAGE ON OUR WEBSITE :
https://quantumtomo.web.illinois.edu/Doc/"""
"Attention! These tests run on the version that your environment uses. See readme for details"
class Test_Properties(unittest.TestCase):
# Test linear entropy and purity
def test_purity(self):
# Setup
numStates = 1000
for qDim in [1,2,3,4]:
pure_states = [qLib.toDensity(qLib.random_pure_state(qDim)) for x in range(numStates)]
puritys = np.array([qLib.purity(x) for x in pure_states])
linear_entropys = np.array([qLib.linear_entropy(x) for x in pure_states])
# Tests
self.assertEqual(any(puritys<.95), False) # Purities of random pure states should be 1
tests.assert_array_almost_equal(linear_entropys, 1-puritys) # Linear entropy = 1-purity
# Test concurrence, tangle, and negativity
def test_concurrence(self):
# Setup
bell_states = [qLib.toDensity(qLib.random_bell_state(2)) for x in range(100)]
cons = np.array([qLib.concurrence(x) for x in bell_states])
tangles = np.array([qLib.tangle(x) for x in bell_states])
negs = np.array([qLib.negativity(x) for x in bell_states])
# Tests
self.assertEqual(any(cons < .95), False) # Concurrence of random bell states should be 1
self.assertEqual(any(negs < .95), False) # Negativity of random bell states should be 1
tests.assert_array_almost_equal(tangles, cons**2) # tangle = concurrence**2
# Test entropy
def test_entropy(self):
for qDim in [1,2,3,4]:
# Setup
bell_states = [qLib.toDensity(qLib.random_pure_state(qDim)) for x in range(1000)]
entrops = np.array([qLib.entropy(x) for x in bell_states])
# Tests
self.assertEqual(any(entrops > .05), False) # Entropy of pure states should be 0
# Test fidelity
def test_fidelity(self):
for qDim in [1,2,3,4]:
# Setup
numStates = 1000
pure_states = [qLib.random_pure_state(qDim) for x in range(numStates)]
density_states = [qLib.random_density_state(qDim) for x in range(numStates)]
bell_states = [qLib.toDensity(qLib.random_pure_state(qDim)) for x in range(numStates)]
pure_states_almost = [qLib.performOperation(x,.9999999*np.eye(2**qDim)) for x in pure_states]
density_states_almost = [qLib.performOperation(x,.9999999*np.eye(2**qDim)) for x in density_states]
bell_states_almost = [qLib.performOperation(x,.9999999*np.eye(2**qDim)) for x in bell_states]
fidels_pure_pp = []
fidels_pure_dp = []
fidels_pure_pd = []
fidels_pure_dd = []
fidels_density = []
fidels_bell = []
for i in range(numStates):
fidels_pure_pp.append(qLib.fidelity(pure_states_almost[i],
pure_states[i]))
fidels_pure_pd.append(qLib.fidelity(pure_states_almost[i],
qLib.toDensity(pure_states[i])))
fidels_pure_dp.append(qLib.fidelity(qLib.toDensity(pure_states_almost[i]),
pure_states[i]))
fidels_pure_dd.append(qLib.fidelity(qLib.toDensity(pure_states_almost[i]),
qLib.toDensity(pure_states[i])))
fidels_density.append(qLib.fidelity(density_states_almost[i],
density_states[i]))
fidels_bell.append(qLib.fidelity(bell_states_almost[i],
bell_states[i]))
self.assertEqual(any(np.array(fidels_pure_pp) < .95), False)
self.assertEqual(any(np.array(fidels_pure_pd) < .95), False)
self.assertEqual(any(np.array(fidels_pure_dp) < .95), False)
self.assertEqual(any(np.array(fidels_pure_dd) < .95), False)
self.assertEqual(any(np.array(fidels_density) < .95), False)
self.assertEqual(any(np.array(fidels_bell) < .95), False)
randomFidels = np.array([qLib.fidelity(qLib.random_density_state(qDim),
qLib.random_density_state(qDim)) for x in range(numStates)])
for i in range(int(2**qDim)):
state_i = np.zeros(2**qDim)
state_i[i] = 1
for j in range(int(2 ** qDim)):
state_j = np.zeros(2**qDim)
state_j[j] = 1
if i == j:
self.assertAlmostEqual(qLib.fidelity(state_i,state_j),1)
else:
self.assertAlmostEqual(qLib.fidelity(state_i,state_j),0)
if __name__ == '__main__':
unittest.main()
| 47.192661
| 111
| 0.587481
|
fb6f739e3672089aa6f282ef746f22b2dd24c72b
| 8,778
|
py
|
Python
|
local_packages/ExampleBasedRigging.py
|
vasiliskatr/example_based_facial_rigging_ARkit_blendshapes
|
39db32431f3bab2ebb91a74560a61da1ab40af05
|
[
"MIT"
] | 6
|
2022-01-16T16:00:29.000Z
|
2022-03-03T04:47:19.000Z
|
local_packages/ExampleBasedRigging.py
|
vasiliskatr/example_based_facial_rigging_ARkit_blendshapes
|
39db32431f3bab2ebb91a74560a61da1ab40af05
|
[
"MIT"
] | null | null | null |
local_packages/ExampleBasedRigging.py
|
vasiliskatr/example_based_facial_rigging_ARkit_blendshapes
|
39db32431f3bab2ebb91a74560a61da1ab40af05
|
[
"MIT"
] | 1
|
2022-03-03T04:47:20.000Z
|
2022-03-03T04:47:20.000Z
|
import numpy as np
import os
import time
import local_packages.tools3d_ as t3d
from scipy.optimize import lsq_linear
from IPython.display import clear_output
from scipy.sparse import csr_matrix
import scipy.sparse as sp
from scipy.sparse.linalg import spsolve
from qpsolvers import solve_qp
import numba
from numba import jit
from numba.core.extending import overload
from numba.np.linalg import norm_impl
from numba.core.errors import NumbaDeprecationWarning, NumbaPendingDeprecationWarning, NumbaPerformanceWarning
import warnings
warnings.simplefilter('ignore', category=NumbaDeprecationWarning)
warnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)
from tqdm import tqdm
def reading_generic_bs (objpath_generic_bs):
A_bs_model = [] # variable to store all data with regards to the template
B_bs_model = [] # variable to store all data with regards to the actor
bs_name_list = [] # variable to store the names of imported BS
A_0, faces, _, _ = t3d.Read(objpath_generic_bs + 'Neutral.obj', QuadMode = True)
n_vertices = A_0.shape[1]
#A_0 = tools_3d.center(A_0)
generic_bs_data = os.scandir(objpath_generic_bs)
for generic_bs in tqdm(generic_bs_data, unit=' files', desc="Loading generic blend shapes"):
name, ext = os.path.splitext(generic_bs)
name_s = name.split("/")
if ext == '.obj' and 'neutral' not in name: # read only the .obj files from the source directory
temp_vertices, _, _, _ = t3d.Read(name+ext, QuadMode = True)
A_bs_vertices = temp_vertices - A_0
A_bs_model.append(A_bs_vertices)
B_bs_model.append(np.zeros((3, n_vertices)))
bs_name_list.append(name_s[-1])
n = len(A_bs_model)
print ('Generic model of n = ' + str(len(A_bs_model)) + ' blend shapes imported (+1 neutral pose)')
return A_bs_model, B_bs_model, A_0, faces, n, bs_name_list
def reading_training_data(objpath_training_poses):
S_training_poses = [] # Variable to store training poses
training_pose_data = os.scandir(objpath_training_poses)
for training_pose in tqdm(training_pose_data, unit=' files', desc="Loading poses"):
name, ext = os.path.splitext(training_pose)
if ext == '.obj': # read only the .obj files from the source directory
temp_vertices, _, _, _ = t3d.Read(name+ext, QuadMode = True)
S_training_poses.append(temp_vertices)
m = len(S_training_poses)
print ('m = ' + str(m)+' training poses in total (+ 1 neutral)')
return S_training_poses, m
def blend_shape_weights(A_0, B_0, A_BS_model, S_training_poses):
# initial blend shape weights guessing for each training pose
start_time = time.time()
print ('Computing intial blend-shape weight guess for the training poses')
n = len(A_BS_model)
m = len(S_training_poses)
n_vertices = A_0.shape[1]
Alpha_star = np.zeros((m, n))# Initial guess of blendhspae weights
A_All = A_BS_model[0].T.flatten().reshape(n_vertices*3, 1)
for i in range(1,n):
A_All = np.concatenate((A_All, A_BS_model[i].T.flatten().reshape(n_vertices*3, 1)), axis=1)
for i in tqdm(range(m), unit=' pose', desc='Guessing weights'):
B_temp = (S_training_poses[i] - B_0).T.flatten().reshape(n_vertices*3)
weights_temp = lsq_linear(A_All, B_temp, bounds = (0, 1), lsmr_tol='auto', verbose=0)
Alpha_star[i, :] = weights_temp.x.reshape(1, n)
return Alpha_star
print ("done in ",(time.time() - start_time), "sec")
def columnise(model):
for i in range(0, len(model)):
model[i] = model[i].T
return model
@jit(nopython=True, parallel=True)
def local_tri_frame_fast(vertices, triangles, tri_index):
tri_vertices = vertices[triangles[tri_index, :], :]
LF = np.zeros((3,3))
v1 = tri_vertices[0, :]
v2 = tri_vertices[1, :]
v3 = tri_vertices[2, :]
LF[:,0] = (v3-v1) # v3-v1
LF[:,1] = (v2-v1) # v2-v1
LF[:,2] = (np.cross((v3-v1),(v2-v1))) # n
return LF
@jit(nopython=True, parallel=True)
def compute_lf_fast (vertices, triangles):
lf = np.zeros((len(triangles)*3, 3))
for i in numba.prange(len(triangles)):
lf[i*3:i*3+3]= local_tri_frame_fast(vertices, triangles, i)
return lf
@jit(nopython=True, parallel=True)
def compute_lf_inverse_fast(vertices, triangles):
lf_inv = np.zeros((len(triangles)*3, 3))
for i in numba.prange(len(triangles)):
lf_inv[i*3:i*3+3] = np.linalg.inv(local_tri_frame_fast(vertices, triangles, i))
return lf_inv
@jit(nopython=True, parallel=True)
def make_M_S_minus_M_B_0_fast(S_training_poses, B_0, triangles):
m = len(S_training_poses)
M_B_0 = compute_lf_fast(B_0, triangles)
M_S_minus_M_B_0 = np.empty((m, len(triangles)*3, 3))
M_S = np.empty((m, len(triangles)*3, 3))
for s in numba.prange(m):
M_S_temp = compute_lf_fast(S_training_poses[s], triangles)
M_S_minus_M_B_0[s] = M_S_temp - M_B_0
M_S[s] = M_S_temp
return M_S_minus_M_B_0 , M_B_0, M_S
@jit(nopython=True, parallel=True)
def make_W_seed_fast(triangles, A_BS_model, kappa, theta):
n = len(A_BS_model)
W_seed = np.empty((n, len(triangles)))
for i in numba.prange(n):
M_A_i = compute_lf_fast(A_BS_model[i], triangles)
for j in numba.prange(len(triangles)):
lf_tri_norm = np.linalg.norm(M_A_i[j*3:j*3+3,:])
W_seed[i,j] = (1 + lf_tri_norm)/np.power((kappa + lf_tri_norm), theta)
return W_seed
@jit(nopython=True, parallel=True)
def make_M_A_star_fast(triangles, A_0, B_0, A_BS_model):
n = len(A_BS_model)
M_A_star = np.empty((n, len(triangles)*3, 3))
M_A_0_inv = compute_lf_inverse_fast(A_0, triangles)
M_A_0 = compute_lf_fast(A_0, triangles)
M_B_0 = compute_lf_fast(B_0, triangles)
for i in numba.prange(n):
M_A_i = compute_lf_fast(A_BS_model[i], triangles)
M_A_sum = M_A_0 + M_A_i
for j in numba.prange(len(triangles)):
M_A_star[i][j*3:j*3+3] = ((M_A_sum[j*3:j*3+3] @ M_A_0_inv[j*3:j*3+3]) @ M_B_0[j*3:j*3+3]) - M_B_0[j*3:j*3+3]
return M_A_star
# Parallel version lf optimisation
@jit(nopython=True, parallel=True)
def lf_optimisation (num_triangles, A, M_S_minus_M_B_0, M_B, M_A_star, beta, gamma, W_seed, opt_iteration, n, m):
for tri_index in numba.prange(num_triangles):
# Constructing Bfit
B_fit = np.zeros((n*3,3))
B_fit = A.T @ M_S_minus_M_B_0[:,tri_index*3:tri_index*3+3,:].copy().reshape(m*3,3)
# Constructing W
dia = [[i,i,i] for i in W_seed[:,tri_index]]
dia = np.asarray(dia)
dia = dia.flatten()
W = np.diag(dia, 0)
M_A_starr = M_A_star[:,tri_index*3:tri_index*3+3,:].copy().reshape(n*3,3)
A_sum = A.T @ A + beta[opt_iteration] * (W.T @ W)
B_sum = B_fit + beta[opt_iteration] * (W.T @ (W @ M_A_starr))
M_B_tri = np.linalg.solve(A_sum, B_sum[:,0:2]) #.copy()
M_B[:, tri_index*2:tri_index*2+2] = M_B_tri #.copy()
return M_B
def make_A_sparse_reconstruction (triangles, n_vertices):
row = []
col = []
data = []
for j in range(len(triangles)):
tri_indices = triangles[j]
row.append(j*2)
col.append(tri_indices[2])
data.append(1)
row.append(j*2)
col.append(tri_indices[0])
data.append(-1)
row.append(j*2+1)
col.append(tri_indices[1])
data.append(1)
row.append(j*2+1)
col.append(tri_indices[0])
data.append(-1)
row = np.asarray(row)
col = np.asarray(col)
data = np.asarray(data)
########### I removed the consideration of zero-deformation vertices.
########### There is no drifting in the reconstruction even without it.
A_sparse = csr_matrix((data, (row, col)), shape=(triangles.shape[0]*2, n_vertices))
return A_sparse
def recon(M_B, A_sparse_recon, n_vertices, num_triangles, i):
# reconstruction of vertices
B_temp_X = M_B[i*3,:].reshape(num_triangles*2, 1)
X_vals = sp.linalg.lsqr(A_sparse_recon, B_temp_X)[0]
B_temp_Y = M_B[i*3+1,:].reshape(num_triangles*2, 1)
Y_vals = sp.linalg.lsqr(A_sparse_recon, B_temp_Y)[0]
B_temp_Z = M_B[i*3+2,:].reshape(num_triangles*2, 1)
Z_vals = sp.linalg.lsqr(A_sparse_recon, B_temp_Z)[0]
return X_vals.reshape(1, n_vertices), Y_vals.reshape(1, n_vertices), Z_vals.reshape(1, n_vertices), i
| 34.155642
| 120
| 0.644224
|
ee3d7955dbfa7c804d4f0d3733bd5cbcb057579c
| 1,742
|
py
|
Python
|
qiskit/pulse/instructions/directives.py
|
romainfd/qiskit-terra
|
b5285ccc5cb1d17b7c73402833f2750b93652426
|
[
"Apache-2.0"
] | null | null | null |
qiskit/pulse/instructions/directives.py
|
romainfd/qiskit-terra
|
b5285ccc5cb1d17b7c73402833f2750b93652426
|
[
"Apache-2.0"
] | null | null | null |
qiskit/pulse/instructions/directives.py
|
romainfd/qiskit-terra
|
b5285ccc5cb1d17b7c73402833f2750b93652426
|
[
"Apache-2.0"
] | 1
|
2020-07-13T17:56:46.000Z
|
2020-07-13T17:56:46.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Directives are hints to the pulse compiler for how to process its input programs."""
from abc import ABC
from typing import Optional
from qiskit.pulse import channels as chans
from qiskit.pulse.instructions import instruction
class Directive(instruction.Instruction, ABC):
"""A compiler directive.
This is a hint to the pulse compiler and is not loaded into hardware.
"""
class RelativeBarrier(Directive):
"""Pulse ``RelativeBarrier`` directive."""
def __init__(self,
*channels: chans.Channel,
name: Optional[str] = None):
"""Create a relative barrier directive.
The barrier directive blocks instructions within the same schedule
as the barrier on channels contained within this barrier from moving
through the barrier in time.
Args:
channels: The channel that the barrier applies to.
name: Name of the directive for display purposes.
"""
super().__init__(tuple(channels), 0, tuple(channels), name=name)
def __eq__(self, other):
"""Verify two barriers are equivalent."""
return (isinstance(other, type(self)) and
set(self.channels) == set(other.channels))
| 33.5
| 87
| 0.687141
|
2ab6d757a28916517166e8e5f2f3587a18ca4635
| 1,365
|
py
|
Python
|
questions/q301_reorder_list/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
questions/q301_reorder_list/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | 1
|
2021-05-15T07:56:51.000Z
|
2021-05-15T07:56:51.000Z
|
questions/q301_reorder_list/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reversePortion(self, root) :
head = None
tail = None
ptr = root
while ptr :
ptr2 = ptr.next
ptr.next = None
if head is None :
head = ptr
tail = ptr
else :
ptr.next = head
head = ptr
ptr = ptr2
return head, tail
def findLength(self, head) :
ptr = head
l = 0
while ptr :
ptr = ptr.next
l += 1
return l
# @param A : head node of linked list
# @return the head node in the linked list
def reorderList(self, A):
n = self.findLength(A)
req_index = n//2 + n%2
i = 1
ptr = A
while i < req_index :
i += 1
ptr = ptr.next
head2 = ptr.next
ptr.next = None
head2, tail2 = self.reversePortion(head2)
head1 = A
ptr1 = head1
ptr2 = head2
while ptr2 :
node = ptr2
ptr2 = ptr2.next
node.next = None
node.next = ptr1.next
ptr1.next = node
ptr1 = node.next
return head1
| 19.782609
| 49
| 0.443956
|
fc5cf709c37f9b8000ed337dd619e9b995160644
| 4,180
|
py
|
Python
|
unet3d/data.py
|
microdyb/3DUnetCNN
|
45505ce8bc747301bcf53cdeced19c7bf84c09fe
|
[
"MIT"
] | null | null | null |
unet3d/data.py
|
microdyb/3DUnetCNN
|
45505ce8bc747301bcf53cdeced19c7bf84c09fe
|
[
"MIT"
] | null | null | null |
unet3d/data.py
|
microdyb/3DUnetCNN
|
45505ce8bc747301bcf53cdeced19c7bf84c09fe
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import tables
from .normalize import normalize_data_storage, reslice_image_set
# cardiac: filters are used for data compression
# cardiac: earray is a kind of enlargeable array defined by tables
# cardiac: affine_storage
def create_data_file(out_file, n_channels, n_samples, image_shape):
hdf5_file = tables.open_file(out_file, mode='w')
filters = tables.Filters(complevel=5, complib='blosc')
data_shape = tuple([0, n_channels] + list(image_shape))
truth_shape = tuple([0, 1] + list(image_shape))
data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,
filters=filters, expectedrows=n_samples)
truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,
filters=filters, expectedrows=n_samples)
affine_storage = hdf5_file.create_earray(hdf5_file.root, 'affine', tables.Float32Atom(), shape=(0, 4, 4),
filters=filters, expectedrows=n_samples)
return hdf5_file, data_storage, truth_storage, affine_storage
def write_image_data_to_file(image_files, data_storage, truth_storage, image_shape, n_channels, affine_storage,
truth_dtype=np.uint8, crop=True):
for set_of_files in image_files:
images = reslice_image_set(set_of_files, image_shape, label_indices=len(set_of_files) - 1, crop=crop)
subject_data = [image.get_data() for image in images]
add_data_to_storage(data_storage, truth_storage, affine_storage, subject_data, images[0].affine, n_channels,
truth_dtype)
return data_storage, truth_storage
def add_data_to_storage(data_storage, truth_storage, affine_storage, subject_data, affine, n_channels, truth_dtype):
data_storage.append(np.asarray(subject_data[:n_channels])[np.newaxis])
truth_storage.append(np.asarray(subject_data[n_channels], dtype=truth_dtype)[np.newaxis][np.newaxis])
affine_storage.append(np.asarray(affine)[np.newaxis])
def write_data_to_file(training_data_files, out_file, image_shape, truth_dtype=np.uint8, subject_ids=None,
normalize=True, crop=True):
"""
Takes in a set of training images and writes those images to an hdf5 file.
:param training_data_files: List of tuples containing the training data files. The modalities should be listed in
the same order in each tuple. The last item in each tuple must be the labeled image.
Example: [('sub1-T1.nii.gz', 'sub1-T2.nii.gz', 'sub1-truth.nii.gz'),
('sub2-T1.nii.gz', 'sub2-T2.nii.gz', 'sub2-truth.nii.gz')]
:param out_file: Where the hdf5 file will be written to.
:param image_shape: Shape of the images that will be saved to the hdf5 file.
:param truth_dtype: Default is 8-bit unsigned integer.
:return: Location of the hdf5 file with the image data written to it.
"""
n_samples = len(training_data_files)
n_channels = len(training_data_files[0]) - 1
try:
hdf5_file, data_storage, truth_storage, affine_storage = create_data_file(out_file,
n_channels=n_channels,
n_samples=n_samples,
image_shape=image_shape)
except Exception as e:
# If something goes wrong, delete the incomplete data file
os.remove(out_file)
raise e
write_image_data_to_file(training_data_files, data_storage, truth_storage, image_shape,
truth_dtype=truth_dtype, n_channels=n_channels, affine_storage=affine_storage, crop=crop)
if subject_ids:
hdf5_file.create_array(hdf5_file.root, 'subject_ids', obj=subject_ids)
if normalize:
normalize_data_storage(data_storage)
hdf5_file.close()
return out_file
def open_data_file(filename, readwrite="r"):
return tables.open_file(filename, readwrite)
| 52.911392
| 118
| 0.66866
|
8f1a3110014196ee7c072ac532c459fa4a1006b4
| 8,630
|
py
|
Python
|
recommender.py
|
stewiemcbacon/celp
|
77c8135dd9c91005e5249e5fd9416e007faa4012
|
[
"MIT"
] | null | null | null |
recommender.py
|
stewiemcbacon/celp
|
77c8135dd9c91005e5249e5fd9416e007faa4012
|
[
"MIT"
] | null | null | null |
recommender.py
|
stewiemcbacon/celp
|
77c8135dd9c91005e5249e5fd9416e007faa4012
|
[
"MIT"
] | null | null | null |
from data import CITIES, BUSINESSES, USERS, REVIEWS, TIPS, CHECKINS
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import random
"""Deze functie wordt gebruikt om de ratings voor de utility matrix te berekenen"""
def get_rating(ratings,user_id,business_id):
if ratings.loc[(ratings['user_id'] == user_id) & (ratings['business_id'] == business_id)]['stars'].any() == False:
res = np.nan
else:
res = float(ratings.loc[(ratings['user_id'] == user_id) & (ratings['business_id'] == business_id),'stars'].values[0])
return res
"""Deze functie wordt gebruikt om een utility matrix te maken"""
def pivot_ratings(df):
""" takes a rating table as input and computes the utility matrix """
business_ids = df['business_id'].unique()
user_ids = df['user_id'].unique()
# create empty data frame
pivot_data = pd.DataFrame(np.nan, columns=user_ids, index=business_ids, dtype=float)
# use the function get_rating to fill the matrix
for x in pivot_data:
for y in pivot_data.index:
pivot_data[x][y] = get_rating(df,x,y)
return pivot_data
"""We hebben het verschil tussen cosine en euclid similarity getest"""
# def cosine_angle(matrix, id1, id2):
# """Compute euclid distance between two rows."""
# if id1 == id2:
# return 1
# # only take the features that have values for both id1 and id2
# selected_features = matrix.loc[id1].notna() & matrix.loc[id2].notna()
#
# # if no matching features, return NaN
# if not selected_features.any():
# return 0.0
#
# # get the features from the matrix
# features1 = matrix.loc[id1][selected_features]
# features2 = matrix.loc[id2][selected_features]
# top=0
# squared1=0
# squared2=0
#
# # compute the distances for the features
# distances = features1 * features2
# for x in distances:
# top = top + x
# for x in features1:
# squared1 = squared1 + (x*x)
# for x in features2:
# squared2 = squared2 + (x*x)
#
# bottom = np.sqrt(squared1) * np.sqrt(squared2)
# if bottom == 0:
# return 0.0
#
# res = top/bottom
# return res
# def create_similarity_matrix_cosine(matrix):
# """ creates the similarity matrix based on cosine similarity """
# similarity_matrix = pd.DataFrame(0, index=matrix.index, columns=matrix.index, dtype=float)
# for x in similarity_matrix:
# for y in similarity_matrix.index:
# similarity_matrix[x][y] = cosine_angle(matrix,x,y)
#
# return similarity_matrix
def mean(frame, group_index, avg_index):
return frame.groupby(group_index)[avg_index].mean()
def select_neighborhood(similarity_matrix, utility_matrix, target_user, target_business):
"""selects all items with similarity > 0"""
seen = []
a = {}
for i in utility_matrix.index:
if pd.isnull(utility_matrix[target_user][i]):
pass
else:
seen.append(i)
for x in similarity_matrix:
if similarity_matrix[target_business][x] > 0 and similarity_matrix[target_business][x] < 1 and x in seen:
a.update({x:similarity_matrix[target_business][x]})
res = pd.Series(a)
return res
def weighted_mean(neighborhood, utility_matrix, business_id):
top = 0
bottom = 0
res=0
test = []
if neighborhood.empty:
return 0.0
for x,y in neighborhood.iteritems():
top = top + (utility_matrix[business_id][x] * y)
bottom = bottom + y
if bottom == 0:
return 0.0
res = top/bottom
return res
def euclid_distance(matrix, id1, id2):
"""Compute euclid distance between two rows."""
# only take the features that have values for both id1 and id2
selected_features = matrix.loc[id1].notna() & matrix.loc[id2].notna()
# if no matching features, return NaN
if not selected_features.any():
return np.nan
# get the features from the matrix
features1 = matrix.loc[id1][selected_features]
features2 = matrix.loc[id2][selected_features]
# compute the distances for the features
distances = features1 - features2
squared = 0
# return the absolute sum
for x in distances:
squared = squared + x*x
res = np.sqrt(squared)
return res
def euclid_similarity(matrix, id1, id2):
"""Compute euclid similarity between two rows."""
# compute distance
distance = euclid_distance(matrix, id1, id2)
# if no distance could be computed (no shared features) return a similarity of 0
if distance is np.nan:
return 0
# else return similarity
return 1 / (1 + distance)
# TODO
def create_similarity_matrix_euclid(matrix):
similarity_matrix = pd.DataFrame(0, index=matrix.index, columns=matrix.index, dtype=float)
for x in similarity_matrix:
for y in similarity_matrix.index:
similarity_matrix[x][y] = euclid_similarity(matrix,x,y)
return similarity_matrix
def mean_center_rows(matrix):
matrix1 = pd.DataFrame(matrix)
new_matrix = pd.DataFrame(0, index=matrix.index, columns=matrix.columns,dtype=float)
avg = matrix1.mean(axis=1)
for x in new_matrix.index:
for y in new_matrix:
new_matrix[y][x] = matrix1[y][x] - avg[x]
return new_matrix
def recommend(user_id=None, business_id=None, city=None, n=10):
"""
Returns n recommendations as a list of dicts.
Optionally takes in a user_id, business_id and/or city.
A recommendation is a dictionary in the form of:
{
business_id:str
stars:str
name:str
city:str
adress:str
}
"""
user_ids = []
user_ids2 = []
names = []
business_ids = []
business_ids2 = []
stars = []
review_ids = []
addresses = []
# Puts all userids, businessids, stars, names and addresses in seperate lists.
for cities, reviews in REVIEWS.items():
if cities == city:
for review in reviews:
user_ids.append(review['user_id'])
business_ids.append(review['business_id'])
review_ids.append(review['review_id'])
stars.append(review['stars'])
for cities, users in USERS.items():
if cities == city:
for user in users:
names.append(user['name'])
user_ids2.append(user['user_id'])
for cities, businesses in BUSINESSES.items():
if cities == city:
for business in businesses:
business_ids2.append(business['business_id'])
addresses.append(business['address'])
data = {'user_id':user_ids,'business_id':business_ids,'stars':stars,'review_id':review_ids}
names_dict = dict(zip(user_ids2,names))
business_dict = dict(zip(business_ids2,addresses))
df = pd.DataFrame(data)
utility_matrix = pivot_ratings(df)
utility_matrix = utility_matrix.T
centered_utility_matrix = mean_center_rows(utility_matrix)
similarity = create_similarity_matrix_euclid(centered_utility_matrix)
if not city:
city = random.choice(CITIES)
return random.sample(BUSINESSES[city], n)
"""Creates a series consisting of the top n predicted ratings for all businesses"""
if business_id is None:
res = []
rec = {}
i = 0
for x in utility_matrix:
n1 = select_neighborhood(similarity, utility_matrix, x, user_id)
p1 = weighted_mean(n1, utility_matrix, x)
if p1 > 0:
res.append({'business_id':x,'stars':p1,'name':names_dict[user_id],'city':city,'adress':business_dict[x]})
rec.update({x:p1})
i += 1
a = rec.copy()
for x in a:
if pd.isnull(utility_matrix[x][user_id]):
pass
else:
rec.pop(x)
rec = pd.Series(rec)
rec = rec.sort_values(ascending=False)
rec = rec.head(n)
return rec
else:
n = select_neighborhood(similarity, utility_matrix, business_id, user_id)
p = weighted_mean(n,utility_matrix,business_id)
res = [{'business_id':business_id,'stars':p,'name':names_dict[user_id],'city':city,'adress':business_dict[business_id]}]
rec = {x:p1}
return rec
print(recommend("y4a_7xpbvRvCGwMNY4iRhQ",None,'ambridge'))
| 32.689394
| 129
| 0.620162
|
7352b8e8b3ef35aac8c590c6b522ab7875212913
| 8,751
|
py
|
Python
|
pypureclient/flasharray/FA_2_5/models/volume.py
|
ashahid-ps/py-pure-client
|
2e3565d37b2a41db69308769f6f485d08a7c46c3
|
[
"BSD-2-Clause"
] | null | null | null |
pypureclient/flasharray/FA_2_5/models/volume.py
|
ashahid-ps/py-pure-client
|
2e3565d37b2a41db69308769f6f485d08a7c46c3
|
[
"BSD-2-Clause"
] | null | null | null |
pypureclient/flasharray/FA_2_5/models/volume.py
|
ashahid-ps/py-pure-client
|
2e3565d37b2a41db69308769f6f485d08a7c46c3
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class Volume(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'connection_count': 'int',
'created': 'int',
'destroyed': 'bool',
'host_encryption_key_status': 'str',
'provisioned': 'int',
'qos': 'Qos',
'serial': 'str',
'space': 'Space',
'time_remaining': 'int',
'pod': 'Reference',
'source': 'FixedReference',
'subtype': 'str',
'volume_group': 'Reference',
'requested_promotion_state': 'str',
'promotion_status': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'connection_count': 'connection_count',
'created': 'created',
'destroyed': 'destroyed',
'host_encryption_key_status': 'host_encryption_key_status',
'provisioned': 'provisioned',
'qos': 'qos',
'serial': 'serial',
'space': 'space',
'time_remaining': 'time_remaining',
'pod': 'pod',
'source': 'source',
'subtype': 'subtype',
'volume_group': 'volume_group',
'requested_promotion_state': 'requested_promotion_state',
'promotion_status': 'promotion_status'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
connection_count=None, # type: int
created=None, # type: int
destroyed=None, # type: bool
host_encryption_key_status=None, # type: str
provisioned=None, # type: int
qos=None, # type: models.Qos
serial=None, # type: str
space=None, # type: models.Space
time_remaining=None, # type: int
pod=None, # type: models.Reference
source=None, # type: models.FixedReference
subtype=None, # type: str
volume_group=None, # type: models.Reference
requested_promotion_state=None, # type: str
promotion_status=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
connection_count (int): The total number of hosts and host groups connected to the volume.
created (int): The volume creation time. Measured in milliseconds since the UNIX epoch.
destroyed (bool): Returns a value of `true` if the volume has been destroyed and is pending eradication. The `time_remaining` value displays the amount of time left until the destroyed volume is permanently eradicated. Before the `time_remaining` period has elapsed, the destroyed volume can be recovered by setting `destroyed=false`. Once the `time_remaining` period has elapsed, the volume is permanently eradicated and can no longer be recovered.
host_encryption_key_status (str): The host encryption key status for this volume. Possible values include `none`, `detected`, and `fetched`.
provisioned (int): The virtual size of the volume. Measured in bytes and must be a multiple of 512. The maximum size is 4503599627370496 (4PB)
qos (Qos): Displays QoS limit information.
serial (str): A globally unique serial number generated by the system when the volume is created.
space (Space): Displays size and space consumption information.
time_remaining (int): The amount of time left until the destroyed volume is permanently eradicated. Measured in milliseconds. Before the `time_remaining` period has elapsed, the destroyed volume can be recovered by setting `destroyed=false`.
pod (Reference): A reference to the pod.
source (FixedReference): A reference to the originating volume as a result of a volume copy.
subtype (str): The type of volume. Valid values are `protocol_endpoint` and `regular`.
volume_group (Reference): A reference to the volume group.
requested_promotion_state (str): Valid values are `promoted` and `demoted`. Patch `requested_promotion_state` to `demoted` to demote the volume so that the volume stops accepting write requests. Patch `requested_promotion_state` to `promoted` to promote the volume so that the volume starts accepting write requests.
promotion_status (str): Current promotion status of a volume. Valid values are `promoted` and `demoted`. A status of `promoted` indicates that the volume has been promoted and can accept write requests from hosts. This is the default status for a volume when it is created. A status of `demoted` indicates that the volume has been demoted and no longer accepts write requests.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if connection_count is not None:
self.connection_count = connection_count
if created is not None:
self.created = created
if destroyed is not None:
self.destroyed = destroyed
if host_encryption_key_status is not None:
self.host_encryption_key_status = host_encryption_key_status
if provisioned is not None:
self.provisioned = provisioned
if qos is not None:
self.qos = qos
if serial is not None:
self.serial = serial
if space is not None:
self.space = space
if time_remaining is not None:
self.time_remaining = time_remaining
if pod is not None:
self.pod = pod
if source is not None:
self.source = source
if subtype is not None:
self.subtype = subtype
if volume_group is not None:
self.volume_group = volume_group
if requested_promotion_state is not None:
self.requested_promotion_state = requested_promotion_state
if promotion_status is not None:
self.promotion_status = promotion_status
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Volume`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Volume, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Volume):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 42.072115
| 461
| 0.61113
|
a209fb95ee194bc9e2a27cdc020da06fcc17f7e5
| 1,589
|
py
|
Python
|
src/ramstk/models/commondb/site_info/table.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 26
|
2019-05-15T02:03:47.000Z
|
2022-02-21T07:28:11.000Z
|
src/ramstk/models/commondb/site_info/table.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 815
|
2019-05-10T12:31:52.000Z
|
2022-03-31T12:56:26.000Z
|
src/ramstk/models/commondb/site_info/table.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 9
|
2019-04-20T23:06:29.000Z
|
2022-01-24T21:21:04.000Z
|
# -*- coding: utf-8 -*-
#
# ramstk.models.commondb.site_info.table.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Site Information Table Model."""
# Standard Library Imports
from typing import Type
# RAMSTK Package Imports
from ramstk.models import RAMSTKBaseTable, RAMSTKSiteInfoRecord
class RAMSTKSiteInfoTable(RAMSTKBaseTable):
"""Contain the attributes and methods of the Option data manager."""
# Define private dict class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_db_id_colname = "fld_site_id"
_db_tablename = "ramstk_site_info"
_select_msg = "request_get_option_attributes2"
_tag = "option"
# Define public dict class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self, **kwargs) -> None:
"""Initialize a Options data manager instance."""
RAMSTKBaseTable.__init__(self, **kwargs)
# Initialize private dictionary attributes.
# Initialize private list attributes.
self._lst_id_columns = [
"site_id",
]
# Initialize private scalar attributes.
self._record: Type[RAMSTKSiteInfoRecord] = RAMSTKSiteInfoRecord
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.pkey = "site_id"
# Subscribe to PyPubSub messages.
| 27.877193
| 88
| 0.696035
|
435a0f22697864a60bb171910323c106f33c2cd8
| 17,624
|
py
|
Python
|
sapp/db_support.py
|
MLH-Fellowship/sapp
|
35ac9417520f2a10e9934ff2eeda513453b78ab2
|
[
"MIT"
] | 1
|
2021-06-17T16:32:58.000Z
|
2021-06-17T16:32:58.000Z
|
sapp/db_support.py
|
EdOverflow/sapp
|
ecb2b54c27294aa3b2d7fc52c186053b6349cb11
|
[
"MIT"
] | 11
|
2021-07-20T11:28:54.000Z
|
2021-12-11T16:28:03.000Z
|
sapp/db_support.py
|
EdOverflow/sapp
|
ecb2b54c27294aa3b2d7fc52c186053b6349cb11
|
[
"MIT"
] | 1
|
2021-06-17T16:33:01.000Z
|
2021-06-17T16:33:01.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
from collections import namedtuple
from itertools import tee
from typing import Dict, List, Optional, Set, Tuple, Type
from munch import Munch
from sqlalchemy import Column, String, and_, exc, inspect, or_, types
from sqlalchemy.dialects import mysql
from sqlalchemy.dialects.mysql import BIGINT
from sqlalchemy.orm import Session
from .iterutil import split_every
log: logging.Logger = logging.getLogger("sapp")
"""Number of variables that can safely be set on a single DB call"""
BATCH_SIZE = 450
# The following three DBID classes require some explanation. Normally models
# will reference each other by their id. But we do bulk insertion at the end
# of our processing, which means the id isn't set until later. Having a DBID
# object allows these models to reference each other before that point. When
# we are ready to insert into the database, PrimaryKeyGenerator will give it
# an ID. Any other models referencing that DBID object will now be able to use
# the real id.
class DBID(object):
__slots__ = ["_id", "is_new", "local_id"]
# Temporary IDs that are local per run (local_id) are assigned for each
# DBID object on creation. This acts as a key for the object in map-like
# structures of DB objects without having to define a hashing function for
# each of them. next_id tracks the next available int to act as an id.
next_id: int = 0
# pyre-fixme[2]: Parameter must be annotated.
def __init__(self, id=None) -> None:
self.resolve(id)
self.local_id: int = DBID.next_id
DBID.next_id += 1
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def resolve(self, id, is_new=True):
self._check_type(id)
self._id = id
self.is_new = is_new
return self
# pyre-fixme[3]: Return type must be annotated.
def resolved(self):
id = self._id
# We allow one level of a DBID pointing to another DBID
if isinstance(id, DBID):
id = id.resolved()
return id
# pyre-fixme[2]: Parameter must be annotated.
def _check_type(self, id) -> None:
if not isinstance(id, (int, type(None), DBID)):
raise TypeError(
"id expected to be type '{}' but was type '{}'".format(int, type(id))
)
# Allow DBIDs to be added and compared as ints
# pyre-fixme[3]: Return type must be annotated.
def __int__(self):
return self.resolved()
# pyre-fixme[3]: Return type must be annotated.
def __str__(self):
return str(self.resolved())
# pyre-fixme[2]: Parameter must be annotated.
def __add__(self, other) -> int:
return int(self) + int(other)
# pyre-fixme[2]: Parameter must be annotated.
def __lt__(self, other) -> bool:
return int(self) < int(other)
# pyre-fixme[2]: Parameter must be annotated.
def __gt__(self, other) -> bool:
return int(self) > int(other)
# pyre-fixme[2]: Parameter must be annotated.
def __ge__(self, other) -> bool:
return int(self) >= int(other)
# pyre-fixme[2]: Parameter must be annotated.
def __le__(self, other) -> bool:
return int(self) <= int(other)
def __repr__(self) -> str:
return "<{}(id={}) object at 0x{:x}>".format(
self.__class__.__name__, self._id, id(self)
)
class DBIDType(types.TypeDecorator):
impl = types.Integer
cache_ok = False
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def process_bind_param(self, value, dialect):
# If it is a DBID wrapper, then write the contained value. Otherwise it
# may be resolved already, or None.
if isinstance(value, DBID):
return value.resolved()
else:
return value
# pyre-fixme[2]: parameter must be annotated.
# pyre-fixme[2]: parameter must be annotated.
def process_result_value(self, value, dialect) -> DBID:
return DBID(value)
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def load_dialect_impl(self, dialect):
if dialect.name == "mysql":
return dialect.type_descriptor(mysql.INTEGER(unsigned=True))
return self.impl
class BIGDBIDType(DBIDType):
impl = types.BigInteger
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def load_dialect_impl(self, dialect):
if dialect.name == "mysql":
return dialect.type_descriptor(mysql.BIGINT(unsigned=True))
return self.impl
class PrepareMixin(object):
@classmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def prepare(cls, session, pkgen, items):
"""This is called immediately before the items are written to the
database. pkgen is passed in to allow last-minute resolving of ids.
"""
for item in cls.merge(session, items):
if hasattr(item, "id"):
item.id.resolve(id=pkgen.get(cls), is_new=True)
# pyre-fixme[16]: `PrepareMixin` has no attribute `to_dict`.
yield cls.to_dict(item)
@classmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def merge(cls, session, items):
"""Models should override this to perform a merge"""
return items
@classmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def _merge_by_key(cls, session, items, attr):
return cls._merge_by_keys(
session, items, lambda item: getattr(item, attr.key), attr
)
@classmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def _merge_by_keys(cls, session, items, hash_item, *attrs):
"""An object can have multiple attributes as its key. This merges the
items to be added with existing items in the database based on their
key(s).
session: Session object for querying the DB.
items: Iterator of items to be added to the DB.
hash_item: Function that takes as in put the item to be added and
returns a hash of it.
attrs: List of attributes of the object/class that represent the
object's key.
Returns the next item (in items) that is not already in the DB.
"""
# Note: items is an iterator, not an iterable, 'tee' is a must.
items_iter1, items_iter2 = tee(items)
keys = {} # map of hash -> keys of the item
for i in items_iter1:
# An item's key is a map of 'attr -> item[attr]' where attr is
# usually a column name.
# For 'SharedText', its key would look like: {
# "kind": "feature",
# "contents": "via tito",
# }
item_hash = hash_item(i)
keys[item_hash] = {attr.key: getattr(i, attr.key) for attr in attrs}
# Find existing items.
existing_ids = {} # map of item_hash -> existing ID
cls_attrs = [getattr(cls, attr.key) for attr in attrs]
for fetch_keys in split_every(BATCH_SIZE, keys.values()):
filters = []
for fetch_key in fetch_keys:
# Sub-filters for checking if item with fetch_key is in the DB
# Example: [
# SharedText.kind.__eq__("feature"),
# SharedText.contents.__eq__("via tito"),
# ]
subfilter = [
getattr(cls, attr).__eq__(val) for attr, val in fetch_key.items()
]
filters.append(and_(*subfilter))
existing_items = (
# pyre-fixme[16]: `PrepareMixin` has no attribute `id`.
session.query(cls.id, *cls_attrs)
.filter(or_(*(filters)))
.all()
)
for existing_item in existing_items:
item_hash = hash_item(existing_item)
existing_ids[item_hash] = existing_item.id
# Now see if we can merge
new_items = {}
for i in items_iter2:
item_hash = hash_item(i)
if item_hash in existing_ids:
# The key is already in the DB
i.id.resolve(existing_ids[item_hash], is_new=False)
elif item_hash in new_items:
# The key is already in the list of new items
i.id.resolve(new_items[item_hash].id, is_new=False)
else:
# The key is new
new_items[item_hash] = i
yield i
@classmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def _merge_assocs(cls, session, items, id1, id2):
new_items = {}
for i in items:
r1 = getattr(i, id1.key)
r2 = getattr(i, id2.key)
key = (r1.resolved(), r2.resolved())
if key not in new_items:
new_items[key] = i
yield i
# The record mixin class is more efficient than the MutableRecordMixin, so it
# should be preferred. But the performance isn't from the mutability, it's
# because we use namedtuples, which creates a new class on demand, which uses
# __slots__, which is more efficient. Both of these mixins can be replaced when
# we have dynamically created classes with the slots set. But until then,
# prefer RecordMixin unless you need to change fields after creation.
class RecordMixin(object):
# pyre-fixme[4]: Attribute must be annotated.
_record = None
@classmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def Record(cls, extra_fields=None, **kwargs):
if not cls._record:
if not extra_fields:
extra_fields = []
mapper = inspect(cls)
keys = [c.key for c in mapper.column_attrs] + ["model"] + extra_fields
cls._record = namedtuple(cls.__name__ + "Record", keys)
return cls._record(model=cls, **kwargs)
@classmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def to_dict(cls, obj):
return obj._asdict()
class MutableRecordMixin(object):
@classmethod
# pyre-fixme[2]: Parameter must be annotated.
def Record(cls, **kwargs) -> Munch:
return Munch(model=cls, **kwargs)
@classmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def to_dict(cls, obj):
return obj.toDict()
class PrimaryKeyBase(PrepareMixin, RecordMixin): # noqa
"""Subclass this and include your declarative_base mixin"""
__tablename__ = "primary_keys"
# pyre-fixme[8]: Attribute has type `str`; used as `Column[str]`.
table_name: str = Column(
String(length=100),
doc="Name of the table that this row stores the next available primary key for",
nullable=False,
primary_key=True,
)
# pyre-fixme[8]: Attribute has type `int`; used as
# `Column[Variable[sqlalchemy.sql.type_api._U]]`.
current_id: int = Column(
BIGINT(unsigned=True).with_variant(BIGINT, "sqlite"),
doc="The current/latest id used in the table.",
nullable=False,
primary_key=False,
)
class PrimaryKeyGeneratorBase: # pyre-ignore[13]
"""Keep track of DB objects' primary keys by ourselves rather than relying
on SQLAlchemy, so we can supply them as arguments when creating association
objects. Subclass to define PRIMARY_KEY class and QUERY_CLASSES."""
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
PRIMARY_KEY: Type
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
QUERY_CLASSES: Set[Type]
# Map from class name to an ID range (next_id, max_reserved_id)
pks: Dict[str, Tuple[int, int]] = {}
def reserve(
self,
session: Session,
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
saving_classes: List[Type],
item_counts: Optional[Dict[str, int]] = None,
) -> "PrimaryKeyGeneratorBase":
"""
session - Session for DB operations.
saving_classes - class objects that need to be saved e.g. Issue, Run
item_counts - map from class name to the number of items, for preallocating
id ranges
"""
query_classes = {cls for cls in saving_classes if cls in self.QUERY_CLASSES}
for cls in query_classes:
if item_counts and cls.__name__ in item_counts:
count = item_counts[cls.__name__]
else:
count = 1
self._reserve_id_range(session, cls, count)
return self
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
def _lock_pk_with_retries(self, session: Session, cls: Type) -> Optional[Type]:
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
cls_pk: Optional[Type] = None
retries: int = 6
while retries > 0:
try:
cls_pk = (
session.query(self.PRIMARY_KEY)
.filter(self.PRIMARY_KEY.table_name == cls.__name__)
.with_for_update()
.first()
)
# if we're here, the record has been locked, or there is no record
retries = 0
except exc.OperationalError as ex:
# Failed to get exclusive lock on the record, so we retry
retries -= 1
# Re-raise the exception if our retries are exhausted
if retries == 0:
raise ex
return cls_pk
def _reserve_id_range(
self,
session: Session,
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
cls: Type,
count: int,
) -> None:
cls_pk = self._lock_pk_with_retries(session, cls)
if not cls_pk:
# If cls_pk is None, then we query the data table for the max ID
# and use that as the current_id in the primary_keys table. This
# should only occur once (the except with a rollback means any
# additional attempt will fail to add a row, and use the "current"
# id value)
row = session.query(cls.id).order_by(cls.id.desc()).first()
try:
session.execute(
"INSERT INTO primary_keys(table_name, current_id) \
VALUES (:table_name, :current_id)",
{
"table_name": cls.__name__,
"current_id": (row.id) if row else 0,
},
)
session.commit()
except exc.SQLAlchemyError as err:
log.error("Writing into the primary keys table failed", exc_info=err)
session.rollback()
cls_pk = self._lock_pk_with_retries(session, cls)
if cls_pk:
next_id = cls_pk.current_id + 1
cls_pk.current_id = cls_pk.current_id + count
pk_entry: Tuple[int, int] = (next_id, cls_pk.current_id)
session.commit()
self.pks[cls.__name__] = pk_entry
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def get(self, cls):
assert cls in self.QUERY_CLASSES, (
"%s primary key should be generated by SQLAlchemy" % cls.__name__
)
assert cls.__name__ in self.pks, (
"%s primary key needs to be initialized before use" % cls.__name__
)
(pk, max_pk) = self.pks[cls.__name__]
assert pk <= max_pk, "%s reserved primary key range exhausted" % cls.__name__
self.pks[cls.__name__] = (pk + 1, max_pk)
return pk
| 38.229935
| 88
| 0.616262
|
b8c270376fa13dda8a812527041dc436d1df19b5
| 6,732
|
py
|
Python
|
msl/equipment/resources/bentham/benhw64.py
|
SwiftyMorgan/msl-equipment
|
56bc467e97a2a0a60aa6f031dd30bf1d98ebda5c
|
[
"MIT"
] | null | null | null |
msl/equipment/resources/bentham/benhw64.py
|
SwiftyMorgan/msl-equipment
|
56bc467e97a2a0a60aa6f031dd30bf1d98ebda5c
|
[
"MIT"
] | null | null | null |
msl/equipment/resources/bentham/benhw64.py
|
SwiftyMorgan/msl-equipment
|
56bc467e97a2a0a60aa6f031dd30bf1d98ebda5c
|
[
"MIT"
] | null | null | null |
"""
A wrapper around the :class:`~.benhw32.Bentham32` class.
"""
import os
import inspect
from msl.loadlib import Client64
from msl.equipment.connection import Connection
from msl.equipment.exceptions import BenthamError
from msl.equipment.resources import register
from .errors import BI_OK, ERROR_CODES
from .tokens import MonochromatorCurrentWL, BenMono
@register(manufacturer=r'Bentham', model=r'[D]*TMc300')
class Bentham(Connection):
def __init__(self, record):
"""A wrapper around the :class:`~.benhw32.Bentham32` class.
This class can be used with either a 32- or 64-bit Python interpreter
to call the 32-bit functions in ``benhw32_cdecl.dll``.
The :attr:`~msl.equipment.record_types.ConnectionRecord.properties`
for a Bentham connection supports the following key-value pairs in the
:ref:`connections_database`::
'cfg': str, the path to the System.cfg file [default: None]
'atr': str, the path to the System.atr file [default: None]
If the ``cfg`` and ``atr`` values are not defined in the :ref:`connections_database`
then you will have to call :meth:`build_system_model`, :meth:`load_setup`
and :meth:`initialise` (in that order) to configure the SDK.
Do not instantiate this class directly. Use the :meth:`~.EquipmentRecord.connect`
method to connect to the equipment.
Parameters
----------
record : :class:`~.EquipmentRecord`
A record from an :ref:`equipment_database`.
"""
self._is_connected = False
super(Bentham, self).__init__(record)
self.set_exception_class(BenthamError)
path = record.connection.address[5:]
head, tail = os.path.split(path)
self._tail = tail
self.log_debug('Starting 32-bit server for {}'.format(tail))
# the IEEE_32M.dll library must be available on PATH
env_path = [head, os.path.join(head, 'IEEE', 'Dummy')]
self._client = Client64(
'benhw32',
append_sys_path=os.path.dirname(__file__),
append_environ_path=env_path,
lib_path=path
)
self._hw_id = None
cfg_path = record.connection.properties.get('cfg')
atr_path = record.connection.properties.get('atr')
if cfg_path and atr_path:
self.build_system_model(cfg_path)
self.load_setup(atr_path)
self.initialise()
self._is_connected = True
def auto_measure(self):
ret, reading = self._client.request32('auto_measure')
self.errcheck(ret)
return reading
def build_system_model(self, path):
"""Set the model configuration file.
Parameters
----------
path : :class:`str`
The path to the ``System.cfg`` file.
"""
if not os.path.isfile(path):
raise IOError('Cannot find {}'.format(path))
ret, error_report = self._client.request32('build_system_model', path)
self.errcheck(ret, path, append_msg=error_report)
return ret
def disconnect(self):
"""Disconnect from the SDK and from the 32-bit server."""
if self._is_connected:
self.errcheck(self._client.request32('close'))
self.log_debug('Stopping 32-bit server for {}'.format(self._tail))
self.shutdown_server32()
self._is_connected = False
def errcheck(self, result, *args, **kwargs):
"""Checks whether a function call to the SDK was successful."""
frame = inspect.getouterframes(inspect.currentframe())[1]
self.log_debug('{}.{}{} -> {}'.format(self.__class__.__name__, frame.function, args, result))
if result != BI_OK:
e, m = ERROR_CODES[result]
try:
append_msg = kwargs['append_msg']
except KeyError:
append_msg = ''
self.raise_exception('{0}: {1} {2}'.format(e, m, append_msg))
return result
def get(self, hw_id, token, index):
ret, value = self._client.request32('get', hw_id, token, index)
self.errcheck(ret, hw_id, token, index)
return value
def get_component_list(self):
ret, components = self._client.request32('get_component_list')
self.errcheck(ret)
return components
def get_hardware_type(self, hw_id):
ret, hardware_type = self._client.request32('get_hardware_type', hw_id)
self.errcheck(ret, hw_id)
return hardware_type
def get_mono_items(self, hw_id):
ret, items = self._client.request32('get_mono_items', hw_id)
self.errcheck(ret, hw_id)
return items
@property
def wavelength(self):
if self._hw_id is None:
for item in self.get_component_list():
if self.get_hardware_type(item) == BenMono:
self._hw_id = item
break
if self._hw_id is None:
raise ValueError('Cannot get wavelength. BenMono is not a hardware type.')
return self.get(self._hw_id, MonochromatorCurrentWL, 0)
@wavelength.setter
def wavelength(self, wavelength):
self.select_wavelength(wavelength)
def initialise(self):
"""Initialize the connection."""
return self.errcheck(self._client.request32('initialise'))
def load_setup(self, path):
"""Load the setup file.
Parameters
----------
path : :class:`str`
The path to the ``System.atr`` file.
"""
if not os.path.isfile(path):
raise IOError('Cannot find {}'.format(path))
return self.errcheck(self._client.request32('load_setup', path), path)
def park(self):
return self.errcheck(self._client.request32('park'))
def select_wavelength(self, wavelength):
ret, recommended_delay_ms = self._client.request32('select_wavelength', wavelength)
self.errcheck(ret, wavelength)
return recommended_delay_ms
def set(self, hw_id, token, index, value):
ret = self._client.request32('set', hw_id, token, index, value)
return self.errcheck(ret, hw_id, token, index, value)
def version(self):
""":class:`str`: The version number of the SDK."""
version = self._client.request32('get_version')
self.log_debug('{}.version() -> {}'.format(self.__class__.__name__, version))
return version
def zero_calibration(self, start_wavelength, stop_wavelength):
ret = self._client.request32('zero_calibration', start_wavelength, stop_wavelength)
return self.errcheck(ret, start_wavelength, stop_wavelength)
| 36
| 101
| 0.628788
|
e9fa24ae052ede60b6afb86d074139f576ff240f
| 6,750
|
py
|
Python
|
google/cloud/errorreporting_v1beta1/services/error_stats_service/transports/base.py
|
LaudateCorpus1/python-error-reporting
|
b207f2cec4f5f3196e775ed35cd429f34f9c0bd1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/errorreporting_v1beta1/services/error_stats_service/transports/base.py
|
LaudateCorpus1/python-error-reporting
|
b207f2cec4f5f3196e775ed35cd429f34f9c0bd1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/errorreporting_v1beta1/services/error_stats_service/transports/base.py
|
LaudateCorpus1/python-error-reporting
|
b207f2cec4f5f3196e775ed35cd429f34f9c0bd1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.errorreporting_v1beta1.types import error_stats_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-errorreporting",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ErrorStatsServiceTransport(abc.ABC):
"""Abstract transport class for ErrorStatsService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "clouderrorreporting.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_group_stats: gapic_v1.method.wrap_method(
self.list_group_stats, default_timeout=None, client_info=client_info,
),
self.list_events: gapic_v1.method.wrap_method(
self.list_events, default_timeout=None, client_info=client_info,
),
self.delete_events: gapic_v1.method.wrap_method(
self.delete_events, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_group_stats(
self,
) -> Callable[
[error_stats_service.ListGroupStatsRequest],
Union[
error_stats_service.ListGroupStatsResponse,
Awaitable[error_stats_service.ListGroupStatsResponse],
],
]:
raise NotImplementedError()
@property
def list_events(
self,
) -> Callable[
[error_stats_service.ListEventsRequest],
Union[
error_stats_service.ListEventsResponse,
Awaitable[error_stats_service.ListEventsResponse],
],
]:
raise NotImplementedError()
@property
def delete_events(
self,
) -> Callable[
[error_stats_service.DeleteEventsRequest],
Union[
error_stats_service.DeleteEventsResponse,
Awaitable[error_stats_service.DeleteEventsResponse],
],
]:
raise NotImplementedError()
__all__ = ("ErrorStatsServiceTransport",)
| 37.292818
| 101
| 0.658074
|
2966b7a3f95fc5671abb96e81c000e69f226e12d
| 5,683
|
py
|
Python
|
src/curt/curt/modules/vision/oakd_facemesh.py
|
sanyaade-teachings/cep
|
59e22b148c3a95eff521ce75cf4eacbcfb074115
|
[
"MIT"
] | 108
|
2021-08-09T17:10:39.000Z
|
2022-03-21T21:59:03.000Z
|
src/curt/curt/modules/vision/oakd_facemesh.py
|
sanyaade-teachings/cep
|
59e22b148c3a95eff521ce75cf4eacbcfb074115
|
[
"MIT"
] | 15
|
2021-09-19T01:25:25.000Z
|
2022-03-28T18:47:49.000Z
|
src/curt/curt/modules/vision/oakd_facemesh.py
|
sanyaade-teachings/cep
|
59e22b148c3a95eff521ce75cf4eacbcfb074115
|
[
"MIT"
] | 14
|
2021-08-10T04:42:17.000Z
|
2022-03-28T16:30:34.000Z
|
"""
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by Michael Ng <michaelng@cortic.ca>, 2021
"""
from curt.modules.vision.oakd_processing import OAKDProcessingWorker
import depthai as dai
from curt.modules.vision.utils import *
from curt.modules.vision.utils import decode_image_byte
import numpy as np
import logging
import os
import time
class OAKDFaceMesh(OAKDProcessingWorker):
def __init__(self):
super().__init__()
def preprocess_input(self, params):
img, detected_faces = params
if img is None:
return None
if isinstance(img, str):
img = decode_image_byte(img)
if "facemesh" not in self.oakd_pipeline.xlink_nodes:
logging.warning("No such node: facemesh in the pipeline")
return []
self.fm_nn_node_names = self.oakd_pipeline.xlink_nodes["facemesh"]
self.width = img.shape[1]
self.height = img.shape[0]
face_frames = []
lefts = []
tops = []
xmin_crops = []
ymin_crops = []
scale_xs = []
scale_ys = []
for detection in np.array(detected_faces):
detection[0] = int(detection[0] * img.shape[1])
detection[1] = int(detection[1] * img.shape[0])
detection[2] = int(detection[2] * img.shape[1])
detection[3] = int(detection[3] * img.shape[0])
box_width = detection[2] - detection[0]
box_height = detection[3] - detection[1]
x_center = detection[0] + box_width / 2
y_center = detection[1] + box_height / 2
new_width = box_width / 2 * 1.5
new_height = box_height / 2 * 1.5
xmin_crop = int(x_center - new_width)
ymin_crop = int(y_center - new_height)
xmax_crop = int(x_center + new_width)
ymax_crop = int(y_center + new_height)
top = 0
bottom = 0
left = 0
right = 0
if ymin_crop < 0:
top = ymin_crop * -1
ymin_crop = 0
if xmin_crop < 0:
left = xmin_crop * -1
xmin_crop = 0
if ymax_crop >= img.shape[0]:
bottom = ymax_crop - (img.shape[0] - 1)
ymax_crop = img.shape[0] - 1
if xmax_crop >= img.shape[1]:
right = xmax_crop - (img.shape[1] - 1)
xmax_crop = img.shape[1] - 1
crop_img = img[ymin_crop:ymax_crop, xmin_crop:xmax_crop, :]
crop_img = cv2.copyMakeBorder(
crop_img,
top,
bottom,
left,
right,
cv2.BORDER_CONSTANT,
None,
[0, 0, 0],
)
scale_x = crop_img.shape[1] / float(
self.oakd_pipeline.nn_node_input_sizes["facemesh"][0]
)
scale_y = crop_img.shape[0] / float(
self.oakd_pipeline.nn_node_input_sizes["facemesh"][1]
)
face_frames.append(crop_img)
lefts.append(left)
tops.append(top)
xmin_crops.append(xmin_crop)
ymin_crops.append(ymin_crop)
scale_xs.append(scale_x)
scale_ys.append(scale_y)
return face_frames, lefts, tops, xmin_crops, ymin_crops, scale_xs, scale_ys
def execute_nn_operation(self, preprocessed_data):
(
face_frames,
lefts,
tops,
xmin_crops,
ymin_crops,
scale_xs,
scale_ys,
) = preprocessed_data
raw_facemeshes = []
for face_frame in face_frames:
facemesh = self.get_facemesh_single(self.fm_nn_node_names, face_frame)
raw_facemeshes.append(facemesh)
return raw_facemeshes, lefts, tops, xmin_crops, ymin_crops, scale_xs, scale_ys
def postprocess_result(self, inference_results):
(
raw_facemeshes,
lefts,
tops,
xmin_crops,
ymin_crops,
scale_xs,
scale_ys,
) = inference_results
facemeshes = []
for i in range(len(raw_facemeshes)):
facemesh = raw_facemeshes[i]
left = lefts[i]
top = tops[i]
xmin_crop = xmin_crops[i]
ymin_crop = ymin_crops[i]
scale_x = scale_xs[i]
scale_y = scale_ys[i]
coordinates = np.squeeze(facemesh).reshape((-1, 3))
coordinates[:, 0] = (
coordinates[:, 0] * scale_x + xmin_crop - left
) / self.width
coordinates[:, 1] = (
coordinates[:, 1] * scale_y + ymin_crop - top
) / self.height
coordinates[:, 2] = coordinates[:, 2] * scale_x
facemeshes.append(coordinates.tolist())
return facemeshes
def get_facemesh_single(self, nn_node_names, aligned_face):
frame_fm = dai.ImgFrame()
frame_fm.setWidth(self.oakd_pipeline.nn_node_input_sizes["facemesh"][0])
frame_fm.setHeight(self.oakd_pipeline.nn_node_input_sizes["facemesh"][1])
frame_fm.setData(
to_planar(
aligned_face,
(
self.oakd_pipeline.nn_node_input_sizes["facemesh"][0],
self.oakd_pipeline.nn_node_input_sizes["facemesh"][1],
),
)
)
self.oakd_pipeline.set_input(nn_node_names[0], frame_fm)
facemesh = self.oakd_pipeline.get_output(nn_node_names[1]).getFirstLayerFp16()
return facemesh
| 34.02994
| 86
| 0.545135
|
784b359dea0db948ff7096b9e73bb278869cb2ed
| 1,286
|
py
|
Python
|
the-saga-stepfunction/python/setup.py
|
LincolnHedgehog/serverless
|
80f79cd2ac6fb13ac9e16af6a765a06422be437b
|
[
"MIT"
] | 1
|
2020-05-31T18:58:19.000Z
|
2020-05-31T18:58:19.000Z
|
the-saga-stepfunction/python/setup.py
|
LincolnHedgehog/serverless
|
80f79cd2ac6fb13ac9e16af6a765a06422be437b
|
[
"MIT"
] | null | null | null |
the-saga-stepfunction/python/setup.py
|
LincolnHedgehog/serverless
|
80f79cd2ac6fb13ac9e16af6a765a06422be437b
|
[
"MIT"
] | 1
|
2020-06-01T15:03:24.000Z
|
2020-06-01T15:03:24.000Z
|
import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="the_saga_stepfunction",
version="0.0.1",
description="An empty CDK Python app",
long_description=long_description,
long_description_content_type="text/markdown",
author="author",
package_dir={"": "the_saga_stepfunction"},
packages=setuptools.find_packages(where="the_saga_stepfunction"),
install_requires=[
"aws-cdk.core==1.36.0",
"aws-cdk.aws_apigateway==1.36.0",
"aws-cdk.aws-lambda==1.36.0",
"aws-cdk.aws-dynamodb==1.36.0",
"aws-cdk.aws-stepfunctions==1.36.0",
"aws-cdk.aws-stepfunctions-tasks==1.36.0",
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
"Typing :: Typed",
],
)
| 25.215686
| 69
| 0.607309
|
82903d10da50ba31464b575e8a0f69d024126753
| 650
|
py
|
Python
|
backend/app/scrumboard/serializers.py
|
tamasf97/Platform
|
b5d69d051b6e8dc7d56f723146392c49db5e99c3
|
[
"MIT"
] | 1
|
2019-09-22T10:21:17.000Z
|
2019-09-22T10:21:17.000Z
|
backend/app/scrumboard/serializers.py
|
tamasf97/Platform
|
b5d69d051b6e8dc7d56f723146392c49db5e99c3
|
[
"MIT"
] | 20
|
2019-09-26T13:54:12.000Z
|
2022-02-26T18:07:34.000Z
|
backend/app/scrumboard/serializers.py
|
tamasf97/Platform
|
b5d69d051b6e8dc7d56f723146392c49db5e99c3
|
[
"MIT"
] | 1
|
2019-09-20T09:50:01.000Z
|
2019-09-20T09:50:01.000Z
|
from rest_framework import serializers
from .models import List, Card, Project
from auth_api.serializers import UserSerializer
class CardSerializer(serializers.ModelSerializer):
class Meta:
model = Card
fields = '__all__'
class ListSerializer(serializers.ModelSerializer):
cards = CardSerializer(read_only=True, many=True)
class Meta:
model = List
fields = '__all__'
class ProjectSerializer(serializers.ModelSerializer):
lists = ListSerializer(read_only=True, many=True)
members = UserSerializer(read_only=True, many=True)
class Meta:
model = Project
fields = '__all__'
| 24.074074
| 55
| 0.715385
|
68cd8992875895f3d5d5d5e63a1c8001a65b9c9f
| 3,679
|
py
|
Python
|
var/spack/repos/builtin/packages/mptensor/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/mptensor/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/mptensor/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mptensor(CMakePackage):
"""mptensor is parallel C++ libarary for tensor calculations.
It provides similar interfaces as Numpy and Scipy in Python."""
homepage = "https://github.com/smorita/mptensor"
url = "https://github.com/smorita/mptensor/archive/v0.3.0.tar.gz"
version('0.3.0', sha256='819395a91551bddb77958615042fcb935a4b67ee37f912b9a2ca5b49c71befae')
variant('mpi', default=False, description='Build with MPI library')
variant("doc", default=False, description="build documentation with Doxygen")
depends_on('cmake@3.6:', type='build')
depends_on('mpi', when="+mpi")
depends_on('blas')
depends_on('lapack')
depends_on('scalapack', when="+mpi")
depends_on('doxygen@:1.8.11', type="build", when="+doc")
test_requires_compiler = True
def cmake_args(self):
spec = self.spec
options = []
if "+mpi" in spec:
options.extend([
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
'-DSCALAPACK_LIBRARIES=%s' % spec['scalapack'].libs,
])
else:
options.extend([
'-DCMAKE_C_COMPILER=%s' % spack_cc,
'-DCMAKE_CXX_COMPILER=%s' % spack_cxx,
'-DCMAKE_Fortran_COMPILER=%s' % spack_fc,
])
blas = spec['blas'].libs
lapack = spec['lapack'].libs
options.extend([
'-DLAPACK_LIBRARIES=%s' % ';'.join(lapack),
'-DBLAS_LIBRARIES=%s' % ';'.join(blas),
self.define_from_variant('ENABLE_MPI', 'mpi'),
self.define_from_variant('BUILD_DOC', 'doc')
])
return options
@run_after("install")
def setup_build_tests(self):
"""Copy the build test files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources('.')
def test(self):
if "+mpi" not in self.spec:
print("Test of mptensor only runs with +mpi option.")
else:
with working_dir(join_path(self.install_test_root, "tests"), create=False):
make("clean")
makefile = FileFilter("Makefile")
makefile.filter("g++", "{0}".format(spack_cxx), string=True)
with working_dir(join_path(self.install_test_root), create=False):
makefile = FileFilter("Makefile.option")
makefile.filter("CXX =.*", "CXX ={0}".format(self.spec["mpi"].mpicxx))
makefile.filter(
"CXXFLAGS =.*", "CXXFLAGS ={0}".format(self.compiler.cxx11_flag)
)
math_libs = (
self.spec["scalapack"].libs
+ self.spec["lapack"].libs
+ self.spec["blas"].libs
)
with working_dir(join_path(self.install_test_root, "tests"), create=False):
make("LDFLAGS={0}".format(math_libs.ld_flags))
mpirun = self.spec["mpi"].prefix.bin.mpirun
mpiexec = Executable(mpirun)
mpiexec("-n", "1", "tensor_test.out")
# Test of mptensor has checker
# and checker is abort when check detect any errors.
print("Test of mptensor PASSED !")
| 37.927835
| 95
| 0.575428
|
ce2f96323d651f4b3808ba7e0ff18715a89db29a
| 12,780
|
py
|
Python
|
Lib/test/test_class_jy.py
|
weimingtom/j2mepython-midp
|
472333ebc6a7f06d92c5ede85c8ed55e4ad66c6d
|
[
"CNRI-Jython",
"PSF-2.0",
"Apache-2.0"
] | 1
|
2015-11-07T12:22:17.000Z
|
2015-11-07T12:22:17.000Z
|
Lib/test/test_class_jy.py
|
weimingtom/j2mepython-midp
|
472333ebc6a7f06d92c5ede85c8ed55e4ad66c6d
|
[
"CNRI-Jython",
"PSF-2.0",
"Apache-2.0"
] | null | null | null |
Lib/test/test_class_jy.py
|
weimingtom/j2mepython-midp
|
472333ebc6a7f06d92c5ede85c8ed55e4ad66c6d
|
[
"CNRI-Jython",
"PSF-2.0",
"Apache-2.0"
] | null | null | null |
"""Misc. class tests. These are more general class tests than CPython's
test_class which focuses on operators.
Made for Jython
"""
import __builtin__
import types
import unittest
from java.lang import Object
from test import test_support
class ClassGeneralTestCase(unittest.TestCase):
TE_MSG = "can't set attributes of built-in/extension type 'str'"
def test_dunder_module(self):
self.assertEqual(str.__module__, '__builtin__')
class Foo:
pass
Fu = types.ClassType('Fu', (), {})
for cls in Foo, Fu:
self.assert_('__module__' in cls.__dict__)
self.assertEqual(cls.__module__, __name__)
self.assertEqual(str(cls), '%s.%s' % (__name__, cls.__name__))
self.assert_(repr(cls).startswith('<class %s.%s at' %
(__name__, cls.__name__)))
obj = cls()
self.assert_(str(obj).startswith('<%s.%s instance at' %
(__name__, cls.__name__)))
class Bar(object):
pass
class Baz(Object):
pass
Bang = type('Bang', (), {})
for cls in Bar, Baz, Bang:
self.assert_('__module__' in cls.__dict__)
self.assertEqual(cls.__module__, __name__)
self.assertEqual(str(cls), "<class '%s.%s'>" % (__name__, cls.__name__))
self.assertEqual(repr(cls), "<class '%s.%s'>" % (__name__, cls.__name__))
self.assert_(str(Bar()).startswith('<%s.Bar object at' % __name__))
self.assert_(str(Baz()).startswith("org.python.proxies.%s$Baz" % __name__))
def test_builtin_attributes(self):
for attr, val in dict(__name__='foo', __module__='bar', __dict__={},
__flags__=1, __base__=object,
__bases__=(unicode, object),
__mro__=(unicode, object)).iteritems():
try:
setattr(str, attr, val)
except TypeError, te:
self.assertEqual(str(te), self.TE_MSG)
else:
self.assert_(False,
'setattr str.%s expected a TypeError' % attr)
try:
delattr(str, attr)
except TypeError, te:
self.assertEqual(str(te), self.TE_MSG)
else:
self.assert_(False,
'delattr str.%s expected a TypeError' % attr)
def test_attributes(self):
class Foo(object):
pass
Foo.__name__ = 'Bar'
self.assertEqual(Foo.__name__, 'Bar')
try:
del Foo.__name__
except TypeError, te:
self.assertEqual(str(te), "can't delete Bar.__name__")
else:
self.assert_(False, 'Expected a TypeError')
Foo.__module__ = 'baz'
self.assertEqual(Foo.__module__, 'baz')
try:
del Foo.__module__
except TypeError, te:
self.assertEqual(str(te), "can't delete Bar.__module__")
else:
self.assert_(False, 'Expected a TypeError')
try:
Foo.__dict__ = {}
except AttributeError, ae:
self.assertEqual(str(ae),
"attribute '__dict__' of 'type' objects is not "
"writable")
else:
self.assert_(False, 'Expected an AttributeError')
try:
del Foo.__dict__
except AttributeError, ae:
self.assertEqual(str(ae),
"attribute '__dict__' of 'type' objects is not "
"writable")
else:
self.assert_(False, 'Expected an AttributeError')
for attr, val in dict(__flags__=1, __base__=object,
__bases__=(unicode, object),
__mro__=(unicode, object)).iteritems():
try:
setattr(str, attr, val)
except TypeError, te:
self.assertEqual(str(te), self.TE_MSG)
else:
self.assert_(False,
'setattr Foo.%s expected a TypeError' % attr)
try:
delattr(str, attr)
except TypeError, te:
self.assertEqual(str(te), self.TE_MSG)
else:
self.assert_(False,
'delattr Foo.%s expected a TypeError' % attr)
def test_newstyle_new_classobj(self):
# Ensure new.classobj can create new style classes
class Foo(object):
pass
def hello(self):
return 'hello'
Bar = types.ClassType('Bar', (Foo,), dict(hello=hello))
self.assert_(type(Bar), type)
self.assert_(issubclass(Bar, Foo))
self.assert_(hasattr(Bar, 'hello'))
self.assertEquals(Bar().hello(), 'hello')
def test_attribute_error_message(self):
# Ensure that AttributeError matches the CPython message
class Bar:
pass
try:
Bar.bar
self._assert(False) # The previous line should have raised
# AttributeError
except AttributeError, e:
self.assertEqual("class Bar has no attribute 'bar'", str(e))
class Foo(object):
pass
try:
Foo.bar
self._assert(False) # The previous line should have raised
# AttributeError
except AttributeError, e:
self.assertEqual("type object 'Foo' has no attribute 'bar'",
str(e))
def test_inner_class_dict(self):
class z:
class t:
def moo(self):
pass
# Printing this caused an NPE in Jython 2.1
keys = list(z.t.__dict__)
keys.sort()
self.assertEqual(str(keys), "['__doc__', '__module__', 'moo']")
def test_metaclass_and_slotted_base(self):
class Meta(type):
pass
class SlottedBase(object):
__slots__ = 'foo'
# A regression up until 2.5a3: Defining Bar would cause a
# TypeError "mro() returned base with unsuitable layout ('Bar')"
class Bar(SlottedBase):
__metaclass__ = Meta
def test_slotted_diamond_problem_bug(self):
class A(object):
__slots__ = 'foo'
class B(A):
pass
class C(A):
pass
# used to raise TypeError: multiple bases have instance lay-out
# conflict
class D(B, C):
pass
def test_getitem_exceptions(self):
class A:
def __getitem__(self, key):
raise IndexError, "Fraid not"
self.assertRaises(IndexError, A().__getitem__, 'b')
class ClassNamelessModuleTestCase(unittest.TestCase):
def setUp(self):
global __name__
self.name = __name__
del __name__
def tearDown(self):
global __name__
__name__ = self.name
def test_nameless_module(self):
class Foo:
pass
self.assertEqual(Foo.__module__, '__builtin__')
self.assertEqual(str(Foo), '__builtin__.Foo')
self.assert_(repr(Foo).startswith('<class __builtin__.Foo at'))
foo = Foo()
self.assert_(str(foo).startswith('<__builtin__.Foo instance at'))
class Bar(object):
pass
self.assertEqual(Bar.__module__, '__builtin__')
self.assertEqual(str(Bar), "<class 'Bar'>")
self.assertEqual(repr(Bar), "<class 'Bar'>")
bar = Bar()
self.assert_(str(bar).startswith('<Bar '))
self.assert_(repr(bar).startswith('<Bar object at'))
class BrokenNameTestCase(unittest.TestCase):
def setUp(self):
global __name__
self.name = __name__
self.builtin_name = __builtin__.__name__
del __name__
del __builtin__.__name__
def tearDown(self):
global __name__
__builtin__.__name__ = self.builtin_name
__name__ = self.name
def test_broken_name(self):
try:
class Foobar:
pass
except NameError:
pass
else:
self.assert_(False, "Expected a NameError")
class ClassLocalsTestCase(unittest.TestCase):
def test_class_locals(self):
class Foo(object):
pass
class Bar(object):
foo = Foo()
self.assert_(not hasattr(Bar, 'Foo'))
class Bar2(object):
foo = Foo()
locals()
# Observer effect: Bar2 differs because we looked at
# locals. This might be considered 'buggy' behavior; but it
# matches CPython and Pypy. see below for an example
self.assert_(hasattr(Bar2, 'Foo'))
def test_class_locals_realworld(self):
# A more real world test of the above situation, for reference
class FieldGathererMeta(type):
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
cls.fields = [field.upper() for field in class_dict.iterkeys() \
if not field.startswith('_')]
cls.fields.sort()
return cls
class SomeClass(object):
pass
class MyFields(object):
__metaclass__ = FieldGathererMeta
jython = 'foo'
java = ('bar', SomeClass())
# Technically SomeClass and FieldGathererMeta are actually
# locals in the MyFields' class definition scope, but we expect
# them to be omitted from its class_dict
self.assertEqual(MyFields.fields, ['JAVA', 'JYTHON'])
class MyFields2(object):
__metaclass__ = FieldGathererMeta
jython = 'foo'
java = ('bar', SomeClass())
locals()
# Oops, locals() updates f_locals. Hilarity ensues
self.assertEqual(MyFields2.fields, ['FIELDGATHERERMETA', 'JAVA',
'JYTHON', 'SOMECLASS'])
class IsDescendentTestCase(unittest.TestCase):
def test_newstyle_descendent_of_oldstyle(self):
class NewStyle(object):
pass
class OldStyle:
pass
class Retro(NewStyle, OldStyle):
pass
self.assert_(issubclass(Retro, NewStyle))
self.assert_(issubclass(Retro, OldStyle))
retro = Retro()
self.assert_(isinstance(retro, NewStyle))
self.assert_(isinstance(retro, OldStyle))
class JavaClassNamingTestCase(unittest.TestCase):
"""Tests for PyJavaClass naming."""
def test_java_class_name(self):
# The __name__ and __module__ attributes of Java classes should
# be set according to the same convention that Python uses.
from java.lang import String
self.assertEqual(String.__name__, "String")
self.assertEqual(String.__module__, "java.lang")
module_name = __name__
class ClassDefinesDunderModule(unittest.TestCase):
"""Verifies http://bugs.jython.org/issue1022 is fixed"""
def test_dundermodule_in_classdef(self):
class Foo:
self.assertEqual(__module__, module_name)
class Bar(object):
self.assertEqual(__module__, module_name)
def test_dundermodule_in_class_dict_copy(self):
class_dict = {'a': 'this is a', 'b': 'this is b'}
Foo = type.__new__(type, 'Foo', (object,), class_dict)
Foo.keys = class_dict.keys
assert sorted(Foo().keys()) == sorted(['a', 'b']), sorted(Foo().keys())
class ClassMetaclassRepr(unittest.TestCase):
def test_repr_with_metaclass(self):
# http://bugs.jython.org/issue1131
class FooMetaclass(type):
def __new__(cls, name, bases, attrs):
return super(FooMetaclass, cls).__new__(cls, name, bases, attrs)
class Foo(object):
__metaclass__ = FooMetaclass
self.assertEqual("<class '%s.Foo'>" % __name__, repr(Foo))
def test_metaclass_str(self):
class Foo(type):
def __repr__(cls):
return 'foo'
class Bar(object):
__metaclass__ = Foo
self.assertEqual(repr(Bar), 'foo')
# type.__str__ previously broke this
self.assertEqual(str(Bar), 'foo')
def test_main():
test_support.run_unittest(
ClassGeneralTestCase,
ClassNamelessModuleTestCase,
BrokenNameTestCase,
ClassLocalsTestCase,
IsDescendentTestCase,
JavaClassNamingTestCase,
ClassDefinesDunderModule,
ClassMetaclassRepr)
if __name__ == "__main__":
test_main()
| 33.455497
| 85
| 0.563146
|
ee3e594032a4a31288798f7ae133b00b3b0872a5
| 556
|
py
|
Python
|
test/constants.py
|
js-ts/fix-same-dataset-tests
|
d76091d1b7bac4d267caaf9b6e04dd255aef8053
|
[
"Apache-2.0"
] | 8
|
2021-12-17T18:26:24.000Z
|
2022-03-16T18:21:04.000Z
|
test/constants.py
|
js-ts/fix-same-dataset-tests
|
d76091d1b7bac4d267caaf9b6e04dd255aef8053
|
[
"Apache-2.0"
] | 45
|
2021-12-18T08:28:56.000Z
|
2022-03-31T21:24:45.000Z
|
test/constants.py
|
js-ts/fix-same-dataset-tests
|
d76091d1b7bac4d267caaf9b6e04dd255aef8053
|
[
"Apache-2.0"
] | 5
|
2021-12-17T20:08:38.000Z
|
2022-03-21T13:51:06.000Z
|
# Environment variable that can be optionally set to specify a host where Durable Functions backend is running.
# If none is specified, the one deployed on Azure Functions is used.
DURABLE_FUNCTIONS_BACKEND_TEST_HOST_ENV_VAR = "DURABLE_FUNCTIONS_BACKEND_TEST_HOST"
# Name of the app deployed on Azure Functions running the test backend.
DURABLE_FUNCTIONS_APP_NAME_AZURE = "same-df-backend"
# Azure Functions host URL where the test backend is running.
DURABLE_FUNCTIONS_BACKEND_URL_AZURE = f"https://{DURABLE_FUNCTIONS_APP_NAME_AZURE}.azurewebsites.net"
| 55.6
| 111
| 0.832734
|
0a27c4e57aa4e89d6ea02e1bef612a6571343918
| 1,710
|
py
|
Python
|
db_trial.py
|
zenranda/proj6-mongod
|
26286c28b9b65e4c84a959f2322e8e070d987485
|
[
"Artistic-2.0"
] | null | null | null |
db_trial.py
|
zenranda/proj6-mongod
|
26286c28b9b65e4c84a959f2322e8e070d987485
|
[
"Artistic-2.0"
] | null | null | null |
db_trial.py
|
zenranda/proj6-mongod
|
26286c28b9b65e4c84a959f2322e8e070d987485
|
[
"Artistic-2.0"
] | null | null | null |
"""
Just to test database functions,
outside of Flask.
We want to open our MongoDB database,
insert some memos, and read them back
"""
import pymongo
from pymongo import MongoClient
import arrow
import sys
import secrets.admin_secrets
import secrets.client_secrets
MONGO_CLIENT_URL = "mongodb://{}:{}@{}:{}/{}".format(
secrets.client_secrets.db_user,
secrets.client_secrets.db_user_pw,
secrets.admin_secrets.host,
secrets.admin_secrets.port,
secrets.client_secrets.db)
try:
dbclient = MongoClient(MONGO_CLIENT_URL)
db = getattr(dbclient, secrets.client_secrets.db)
print("Got database")
collection = db.dated
print("Using sample collection")
except Exception as err:
print("Failed")
print(err)
sys.exit(1)
#
# Insertions: I commented these out after the first
# run successfuly inserted them
#
record = { "type": "dated_memo",
"date": arrow.utcnow().naive,
"text": "This is a sample memo"
}
collection.insert(record)
record = { "type": "dated_memo",
"date": arrow.utcnow().replace(days=+1).naive,
"text": "Sample one day later"
}
collection.insert(record)
#
# Read database --- May be useful to see what is in there,
# even after you have a working 'insert' operation in the flask app,
# but they aren't very readable. If you have more than a couple records,
# you'll want a loop for printing them in a nicer format.
#
records = [ ]
for record in collection.find( { "type": "dated_memo" } ):
records.append(
{ "type": record['type'],
"date": arrow.get(record['date']).to('local').isoformat(),
"text": record['text']
})
print(records)
| 24.782609
| 73
| 0.661404
|
298b1ac043b949d8a322ea61de9d54b13f6deb8b
| 2,208
|
py
|
Python
|
fcos/core/layers/roi_align.py
|
best-of-acrv/fcos
|
47e5624973b256b8c74ce2c00fca50e62c19c66a
|
[
"BSD-3-Clause"
] | 1
|
2021-12-12T19:17:34.000Z
|
2021-12-12T19:17:34.000Z
|
fcos/core/layers/roi_align.py
|
best-of-acrv/fcos
|
47e5624973b256b8c74ce2c00fca50e62c19c66a
|
[
"BSD-3-Clause"
] | 2
|
2021-09-17T10:28:01.000Z
|
2022-03-01T00:02:27.000Z
|
fcos/core/layers/roi_align.py
|
best-of-acrv/fcos
|
47e5624973b256b8c74ce2c00fca50e62c19c66a
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from .. import _C
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_forward(input, roi, spatial_scale,
output_size[0], output_size[1],
sampling_ratio)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_align_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align = _ROIAlign.apply
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
return roi_align(input, rois, self.output_size, self.spatial_scale,
self.sampling_ratio)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| 32
| 78
| 0.597826
|
01ff2ae59d247ab32c79e877faf0aff9e02c9948
| 3,743
|
py
|
Python
|
scripts/sources/s_rn_derivative_mre.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2021-04-10T13:24:30.000Z
|
2022-03-26T08:20:42.000Z
|
scripts/sources/s_rn_derivative_mre.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | null | null | null |
scripts/sources/s_rn_derivative_mre.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2019-08-13T22:02:17.000Z
|
2022-02-09T17:49:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_rn_derivative_mre [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_rn_derivative_mre&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-sdf-mre).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy as sp
from arpym.statistics import simulate_normal, cdf_sp, pdf_sp
from arpym.pricing import numeraire_mre
from arpym.tools import add_logo
# -
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_rn_derivative_mre-implementation-step00): Upload data
# +
path = '../../../databases/temporary-databases/'
db_vpayoff = pd.read_csv(path+'db_valuation_vpayoff.csv', index_col=0)
v_payoff = db_vpayoff.values
db_vtnow = pd.read_csv(path+'db_valuation_vtnow.csv', index_col=0)
v_tnow = db_vtnow.values.T[0]
db_prob = pd.read_csv(path+'db_valuation_prob.csv', index_col=0)
p = db_prob.values.T[0]
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_rn_derivative_mre-implementation-step01): Compute the minimum relative entropy numeraire probabilities
# +
p_mre, sdf_mre = numeraire_mre(v_payoff, v_tnow, p=p, k=1)
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_rn_derivative_mre-implementation-step02): Compute Radon-Nikodym derivative and inflator
# +
# compute Radon-Nikodym derivative
rnd_mre = p_mre / p
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_rn_derivative_mre-implementation-step03): Compute pdfs
# +
h = 0.02
# grid for computing pdfs
x = np.linspace(-1, 4, 100)
# compute pdfs
sdf_mre = pdf_sp(h, np.array([x]).T, np.array([sdf_mre]).T, p)
rnd_mre = pdf_sp(h, np.array([x]).T, np.array([rnd_mre]).T, p)
infl = pdf_sp(h, np.array([x]).T, np.array([v_payoff[:, 1]/v_tnow[1]]).T,
p)
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_rn_derivative_mre-implementation-step04): Compute cdfs under probability measures p and p_mre
# +
y = np.linspace(0, 12, 100)
ind = np.argsort(v_payoff[:, 1])
cdf = cdf_sp(y, v_payoff[:, 1], p)
cdf_mre = cdf_sp(y, v_payoff[:, 1], p_mre)
# -
# ## Plots
# +
plt.style.use('arpm')
sdf_name = r'$\mathit{SDF}_{t_{\mathit{now}}\rightarrow t_{\mathit{hor}}}^{\mathit{MRE}}$'
rnd_name = r'$\mathit{RND}_{t_{\mathit{now}}\rightarrow t_{\mathit{hor}}}^{\mathit{MRE}}$'
infl_name = r'$\mathit{V}_{2,t_{\mathit{now}}\rightarrow t_{\mathit{hor}}}^{\mathit{payoff}}/v_{2,t_{\mathit{now}}}$'
fig, axes = plt.subplots(1, 2)
axes[0].plot(x, sdf_mre, 'b', label=sdf_name)
axes[0].plot(x, rnd_mre, 'g', label=rnd_name)
axes[0].plot(x, infl, 'r', label=infl_name)
yl = axes[0].get_ylim()
axes[0].plot([v_tnow[0], v_tnow[0]], [0, yl[1]], 'b--',
label=r'$E\{$' + sdf_name + '$\}$')
axes[0].plot([1, 1], [0, yl[1]], 'g--',
label=r'$E\{$' + rnd_name + '$\}$')
axes[0].plot([p @ v_payoff[:, 1] / v_tnow[1],
p @ v_payoff[:, 1] / v_tnow[1]], [0, yl[1]], 'r--',
label=r'$E\{$' + infl_name + '$\}$')
axes[0].set_xlim([x[0], x[-1]])
axes[0].set_ylim(yl)
axes[0].legend()
axes[1].plot(y, cdf, 'b', label='$F$')
axes[1].plot(y, cdf_mre, 'g', label='$F^{numer}$')
axes[1].set_ylim([0, 1])
axes[1].set_xlabel(r'$\mathit{V}_{2,t_{\mathit{now}}\rightarrow t_{\mathit{hor}}}^{\mathit{payoff}}$')
axes[1].legend()
add_logo(fig, location=4, size_frac_x=1/8)
plt.tight_layout()
| 33.123894
| 209
| 0.659097
|
33c379c98ba9c9d27d9f59e1bbbaceb76c648d22
| 1,582
|
py
|
Python
|
MQTTupload/DORJI_Serial_to_MarkerAPI.py
|
JittoThomas/IOT
|
994fa25087d14e33c2d82b9c9d526f65823b6fa8
|
[
"MIT"
] | null | null | null |
MQTTupload/DORJI_Serial_to_MarkerAPI.py
|
JittoThomas/IOT
|
994fa25087d14e33c2d82b9c9d526f65823b6fa8
|
[
"MIT"
] | null | null | null |
MQTTupload/DORJI_Serial_to_MarkerAPI.py
|
JittoThomas/IOT
|
994fa25087d14e33c2d82b9c9d526f65823b6fa8
|
[
"MIT"
] | null | null | null |
#DORJI_Serial_to_MarkerAPI
#!/usr/bin/env python
#WARNING: This is a TTL serial port and must not have more than 3.3 volts applied to the pins
#this imports the libraries needed
import serial, time
#import needed modules
import urllib
import urllib2
#This sets up the serial port ttyAMA0 GPIO. baud rate is the bits per second.
port = serial.Serial("/dev/ttyAMA0", baudrate=2400)
while True:
#read buffer until cr/lf
rcv = port.readline()
rcv = rcv.rstrip("\r\n")
attributes = rcv.split(",")
#for attribute in attributes:
#print(attribute)
param, key = attributes[0].split("=",1)
param, node = attributes[1].split("=",1)
param, channel = attributes[2].split("=",1)
param, data = attributes[3].split("=",1)
print(key, node, channel, data)
# Custom Functions
def send():
#API URL
url = 'http://203.118.129.73:8082/api/marker'
#place marker attributes in a dictionary
dataToSend = {
'key' : key,
'node' : node,
'channel' : channel,
'latitude' : '',
'longitude' : '',
'elevation' : '',
'data' : data
}
data_encoded = urllib.urlencode(dataToSend)
req = urllib2.Request(url, data_encoded)
response = urllib2.urlopen(req)
print response.read()
send() # excute send function
| 31.64
| 94
| 0.530341
|
bd5ad529e92d808ff2ef0c2ee02c6c70a30af5be
| 1,237
|
py
|
Python
|
saleor/graphql/page/sorters.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/graphql/page/sorters.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/graphql/page/sorters.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
import graphene
from ..core.types import SortInputObjectType
class PageSortField(graphene.Enum):
TITLE = ["title", "slug"]
SLUG = ["slug"]
VISIBILITY = ["is_published", "title", "slug"]
CREATION_DATE = ["created", "title", "slug"]
PUBLICATION_DATE = ["publication_date", "title", "slug"]
@property
def description(self):
if self.name in PageSortField.__enum__._member_names_:
sort_name = self.name.lower().replace("_", " ")
return f"Sort pages by {sort_name}."
raise ValueError("Unsupported enum value: %s" % self.value)
class PageSortingInput(SortInputObjectType):
class Meta:
sort_enum = PageSortField
type_name = "pages"
class PageTypeSortField(graphene.Enum):
NAME = ["name", "slug"]
SLUG = ["slug"]
@property
def description(self):
if self.name in PageTypeSortField.__enum__._member_names_:
sport_name = self.name.lower().replace("_", " ")
return f"Sort page types by {sport_name}."
raise ValueError(f"Unsupported enum value: {self.value}")
class PageTypeSortingInput(SortInputObjectType):
class Meta:
sort_enum = PageTypeSortField
type_name = "page types"
| 28.767442
| 67
| 0.647534
|
f352dbdc764ad97b8609c8a0870e659a64a3ce2b
| 555
|
py
|
Python
|
tests/helper_test.py
|
KI-Research-Services/CREDA_tools
|
5d2709fa3e1f96fe8315b0e838253dd025f7ff85
|
[
"MIT"
] | 1
|
2021-05-17T16:44:27.000Z
|
2021-05-17T16:44:27.000Z
|
tests/helper_test.py
|
KI-Research-Services/CREDA_tools
|
5d2709fa3e1f96fe8315b0e838253dd025f7ff85
|
[
"MIT"
] | null | null | null |
tests/helper_test.py
|
KI-Research-Services/CREDA_tools
|
5d2709fa3e1f96fe8315b0e838253dd025f7ff85
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 18:52:51 2020
@author: fisherd
"""
from CREDA_tools import helper
import pytest
def test_init_project_file():
with pytest.raises(FileNotFoundError):
helper.CREDA_Project("addresses", "no_good_file.txt")
def test_init_project_type():
with pytest.raises(ValueError):
helper.CREDA_Project("addreses", "test_data/san_jose_d1.csv")
def test_init_address_missing_column():
with pytest.raises(KeyError):
helper.CREDA_Project("addresses", "test_data/bad_file.csv")
| 26.428571
| 69
| 0.715315
|
e4c73d82553d52e576ed769b4e5d00aeedb76f3b
| 2,498
|
py
|
Python
|
src/clusterfuzz/_internal/tests/appengine/handlers/testcase_detail/update_from_trunk_test.py
|
mspectorgoogle/clusterfuzz
|
44df69cbcb94efc212f27758d45d6ff0f36061e5
|
[
"Apache-2.0"
] | 5,023
|
2019-02-07T16:57:56.000Z
|
2022-03-31T01:08:05.000Z
|
src/clusterfuzz/_internal/tests/appengine/handlers/testcase_detail/update_from_trunk_test.py
|
mspectorgoogle/clusterfuzz
|
44df69cbcb94efc212f27758d45d6ff0f36061e5
|
[
"Apache-2.0"
] | 2,303
|
2019-02-07T17:36:36.000Z
|
2022-03-31T15:44:38.000Z
|
src/clusterfuzz/_internal/tests/appengine/handlers/testcase_detail/update_from_trunk_test.py
|
mspectorgoogle/clusterfuzz
|
44df69cbcb94efc212f27758d45d6ff0f36061e5
|
[
"Apache-2.0"
] | 564
|
2019-02-07T17:34:24.000Z
|
2022-03-26T09:25:44.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""update_from_trunk tests."""
import unittest
import flask
import webtest
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
from handlers.testcase_detail import update_from_trunk
from libs import form
@test_utils.with_cloud_emulators('datastore')
class HandlerTest(unittest.TestCase):
"""Test Handler."""
def setUp(self):
test_helpers.patch(self, [
'clusterfuzz._internal.base.tasks.add_task',
'clusterfuzz._internal.base.tasks.queue_for_job',
'libs.auth.get_current_user',
'handlers.testcase_detail.show.get_testcase_detail',
'libs.access.check_access_and_get_testcase',
])
flaskapp = flask.Flask('testflask')
flaskapp.add_url_rule('/', view_func=update_from_trunk.Handler.as_view('/'))
self.app = webtest.TestApp(flaskapp)
self.testcase = data_types.Testcase(queue='old-queue')
self.testcase.put()
self.mock.check_access_and_get_testcase.return_value = self.testcase
self.mock.get_testcase_detail.return_value = {'testcase': 'yes'}
self.mock.get_current_user().email = 'test@user.com'
def test_succeed(self):
"""Update from trunk"""
self.mock.queue_for_job.return_value = 'jobs-suffix'
self.testcase.crash_stacktrace = 'Random'
self.testcase.job_type = 'job'
self.testcase.put()
resp = self.app.post_json(
'/', {
'testcaseId': self.testcase.key.id(),
'csrf_token': form.generate_csrf_token(),
})
self.assertEqual(200, resp.status_int)
self.assertEqual('yes', resp.json['testcase'])
self.mock.add_task.assert_called_once_with(
'variant', self.testcase.key.id(), 'job', queue='jobs-suffix')
testcase = self.testcase.key.get()
self.assertEqual('Pending', testcase.last_tested_crash_stacktrace)
| 35.685714
| 80
| 0.729384
|
0b07a135b8d883019cab30dc9cb7d21374be4576
| 470
|
py
|
Python
|
utility/trim_dataset.py
|
jain-nikunj/radioML
|
24bcfce5f189e22679881c3eea3819e7a19e7301
|
[
"MIT"
] | 5
|
2018-03-07T03:46:32.000Z
|
2021-02-20T11:59:40.000Z
|
utility/trim_dataset.py
|
jain-nikunj/radioML
|
24bcfce5f189e22679881c3eea3819e7a19e7301
|
[
"MIT"
] | 1
|
2018-03-11T03:19:03.000Z
|
2018-03-11T03:19:03.000Z
|
utility/trim_dataset.py
|
jain-nikunj/radioML
|
24bcfce5f189e22679881c3eea3819e7a19e7301
|
[
"MIT"
] | 3
|
2018-03-14T18:16:25.000Z
|
2018-11-14T07:19:41.000Z
|
import numpy as np
import cPickle
import matplotlib.pyplot as plt
def open_ds(location):
f = open(location)
ds = cPickle.load(f)
return ds
def save_ds(dataset, location):
cPickle.dump( dataset, file(location, 'wb' ) )
def main():
ds = open_ds(location='datasets/radioml.dat')
ds_trimmed = {}
ds_trimmed[('BPSK', 18)] = ds[('BPSK', 18)]
save_ds(dataset=ds_trimmed, location='datasets/bpsk.dat')
if __name__ == "__main__":
main()
| 20.434783
| 61
| 0.659574
|
368d2df9538e87cd93834a6f6c3adcb7937a8d8b
| 1,651
|
py
|
Python
|
utils/tileconnwire.py
|
rw1nkler/prjxray
|
aff076b47dcf6d653eb3ce791b41fd6cf4343edd
|
[
"ISC"
] | 583
|
2017-12-21T11:06:13.000Z
|
2022-02-20T21:27:33.000Z
|
utils/tileconnwire.py
|
rw1nkler/prjxray
|
aff076b47dcf6d653eb3ce791b41fd6cf4343edd
|
[
"ISC"
] | 1,212
|
2017-12-22T15:05:06.000Z
|
2022-02-19T13:04:59.000Z
|
utils/tileconnwire.py
|
mfkiwl/prjxray-xilinx-7-bitstream-fortmat
|
5349556bc2c230801d6df0cf11bccb9cfd171639
|
[
"ISC"
] | 134
|
2017-12-21T10:16:50.000Z
|
2022-02-16T06:42:04.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os, sys, json
def main(argv):
if len(argv) != 3:
print("Usage example: python3 %s HCLK_R HCLK_SW6E3" % sys.argv[0])
sys.exit(1)
with open("%s/%s/tileconn.json" % (os.getenv("XRAY_DATABASE_DIR"),
os.getenv("XRAY_DATABASE")), "r") as f:
tileconn = json.load(f)
outdata = list()
max_tiletype_len = 1
for entry in tileconn:
if entry["tile_types"][0] == sys.argv[1]:
this_idx, other_idx = 0, 1
delta_x, delta_y = entry["grid_deltas"]
elif entry["tile_types"][1] == sys.argv[1]:
this_idx, other_idx = 1, 0
delta_x, delta_y = -entry["grid_deltas"][0], -entry["grid_deltas"][
1]
else:
continue
for wire_pair in entry["wire_pairs"]:
if wire_pair[this_idx] != sys.argv[2]:
continue
outdata.append(
(
delta_x, delta_y, entry["tile_types"][other_idx],
wire_pair[other_idx]))
max_tiletype_len = max(
max_tiletype_len, len(entry["tile_types"][other_idx]))
for entry in outdata:
print(
"%3d %3d %-*s %s" %
(entry[0], entry[1], max_tiletype_len, entry[2], entry[3]))
if __name__ == "__main__":
main(sys.argv)
| 28.964912
| 79
| 0.54573
|
41b0007b2fc02a46ebb0a9a9940671701ebc5054
| 177
|
py
|
Python
|
TicTacToePlayers.py
|
dz1domin/tictactoe
|
671eeaecb6e6c8434b2db5595d15fbcd28e90f49
|
[
"MIT"
] | null | null | null |
TicTacToePlayers.py
|
dz1domin/tictactoe
|
671eeaecb6e6c8434b2db5595d15fbcd28e90f49
|
[
"MIT"
] | null | null | null |
TicTacToePlayers.py
|
dz1domin/tictactoe
|
671eeaecb6e6c8434b2db5595d15fbcd28e90f49
|
[
"MIT"
] | null | null | null |
class TicTacToePlayers:
def __init__(self, addr, port, name, conn):
self._addr = addr
self._port = port
self._name = name
self._conn = conn
| 22.125
| 47
| 0.587571
|
6dd144f2acbffdd3de82f1fbc18a66189f26b0a3
| 25,100
|
py
|
Python
|
resolve_ref_exp_elements_ml/elements_embeddings.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
resolve_ref_exp_elements_ml/elements_embeddings.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
resolve_ref_exp_elements_ml/elements_embeddings.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Machine learning models to calculate embeddings for screen elements.
These models are meant to be included as part of another model.
This model is not pretrained (except for the text_model)
and will be trained with the rest of the model.
"""
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
REF_EXP_ID = 'ref_exp'
ELEMENTS_TEXT_ID = 'elements_text'
ELEMENTS_BOX_ID = 'elements_box'
ELEMENTS_EXIST_ID = 'elements_exists'
ELEMENTS_NEIGHBORS_ID = 'elements_neighbors'
ELEMENTS_REF_MATCH_ID = 'elements_ref_match'
def text_model(text_feature, pretrained_text_enc_name):
"""Builds the part of the model that processes the text.
Args:
text_feature: A batch tf.string to run through the model. Size: [batch_size]
pretrained_text_enc_name: The text encoder to use.
Returns:
Encoding of the text. Size: [batch_size, text_embed_size] The
text_embed_size depends on the specific tf hub model.
"""
with tf.variable_scope('text_model'):
text_encode_result = hub.Module(pretrained_text_enc_name)(text_feature)
return text_encode_result
def undo_mask(x, mask, pad_val=0.0):
"""Converts the output of boolean_mask to the original input dimensions.
The boolean_mask is usually used to condense items from multiple batches into
one large 'batch' for faster processing. This function is used to convert
back.
Args:
x: The input to reshape.
mask: The mask used in boolean_mask.
pad_val: value to pad with.
Returns:
x reshaped and padded.
"""
with tf.variable_scope('undo_mask'):
flat_x = tf.reshape(x, [-1])
x_shape = tf.shape(x)[1:]
expanded_mask = tf.tile(
tf.reshape(
mask, tf.concat([[-1, tf.shape(mask)[1]],
tf.ones_like(x_shape)], 0)),
tf.concat([[1, 1], x_shape], 0))
flat_mask = tf.reshape(expanded_mask, [-1])
start_indices = tf.range(tf.shape(flat_mask)[0])
condition_indices = tf.dynamic_partition(start_indices,
tf.cast(flat_mask, tf.int32), 2)
stitched = tf.dynamic_stitch(condition_indices, [
tf.ones_like(condition_indices[0], tf.float32) * pad_val,
tf.reshape(flat_x, [-1])
])
final_shape = tf.shape(mask)
out_shape = tf.concat([[final_shape[0], final_shape[1]], x_shape], 0)
return tf.reshape(stitched, out_shape)
def tile_ref_enc_to_elements(ref_enc, elements_mask):
"""Utility to tile the ref_enc to the same shape as the elements."""
with tf.variable_scope('tile_ref_enc_to_elements'):
orig_shape = tf.shape(ref_enc)
orig_shape_static = ref_enc.get_shape().as_list()
ref_enc = tf.tile(
tf.reshape(ref_enc, [orig_shape[0], 1, orig_shape[1]]),
[1, tf.shape(elements_mask)[1], 1])
ref_enc = tf.boolean_mask(ref_enc, elements_mask)
ref_enc = tf.reshape(ref_enc, [-1, orig_shape_static[1]])
return ref_enc
def tile_to_image(x, image_size):
x = tf.reshape(x, [-1, 1, 1, x.get_shape()[1]])
x = tf.tile(x, [1, image_size[0], image_size[1], 1])
return x
def get_filled_rect(box, fill_vals, output_size, mode):
"""Returns a feature map with the values in the box filled in.
This can either work with a single box or a batch of boxes.
Args:
box: The box to fill, in x,y,width,height format normalized
between 0 and 1. shape: [num_boxes, 4]
fill_vals: The vector to tile over each bounding box. This could be the
embedding for the element.
output_size: The length and width of the output. Assumes length and width
are the same.
mode: Method to compute values in the box. 'step': Pixels change immediately
at box border. 'cont': Pixels change gradually at box border and increase
towards the center.
Returns:
The tensor with the boxes filled.
"""
with tf.variable_scope('get_filled_rect'):
axis = tf.to_float(tf.range(output_size))
disp_box = box
disp_box *= tf.to_float(output_size)
disp_box += [-1.5, -1.5, 3.0, 3.0]
disp_box *= [1, 1, .5, .5]
if mode == 'step':
x_vals = tf.nn.relu(
tf.nn.relu(axis - disp_box[:, 0:1]) / 2 -
tf.nn.relu(axis - disp_box[:, 0:1] - disp_box[:, 2:3]))
y_vals = tf.nn.relu(
tf.nn.relu(axis - disp_box[:, 1:2]) / 2 -
tf.nn.relu(axis - disp_box[:, 1:2] - disp_box[:, 3:4]))
else:
x_vals = tf.nn.relu(-tf.abs(axis - disp_box[:, 0:1] - disp_box[:, 2:3]) /
(disp_box[:, 2:3]) + 1)
y_vals = tf.nn.relu(-tf.abs(axis - disp_box[:, 1:2] - disp_box[:, 3:4]) /
(disp_box[:, 3:4]) + 1)
x_vals = tf.expand_dims(x_vals, 1)
y_vals = tf.expand_dims(y_vals, 2)
filled_rect = x_vals * y_vals
if mode == 'step':
filled_rect = tf.minimum(filled_rect, 1.0)
fill_vals = tf.reshape(
fill_vals, [tf.shape(fill_vals)[0], 1, 1,
tf.shape(fill_vals)[1]])
filled_rect = tf.expand_dims(filled_rect, 3)
filled_rect *= fill_vals
return filled_rect
def atten_softmax(atten_mask, elements_mask):
"""Calculates the softmax between the values in each batch."""
atten_mask = undo_mask(atten_mask, elements_mask, np.finfo(np.float32).min)
atten_mask = tf.nn.softmax(atten_mask)
atten_mask = tf.boolean_mask(atten_mask, elements_mask)
return atten_mask
def atten_metric(elements_enc_attend, ref_enc_attend, elements_mask,
do_softmax):
"""Computes similarity metric to be used with attention."""
with tf.variable_scope('atten_metric'):
ref_enc_attend = tile_ref_enc_to_elements(ref_enc_attend, elements_mask)
atten_mask = tf.multiply(elements_enc_attend, ref_enc_attend)
atten_mask = tf.reduce_sum(atten_mask, axis=1)
if do_softmax:
atten_mask = atten_softmax(atten_mask, elements_mask)
return atten_mask
def attention(query, attend_in, single_dot_in, elements_mask, do_softmax,
attention_method, flags):
"""Returns the attention mask using the method described by attention_method.
Args:
query: Query vector. Shape: [batch_size, query_size]
attend_in: Values for each item to use for attention. [batch_size *
elements_per_query, attend_size]
single_dot_in: Values for each item to use for attention in single dot mode.
[batch_size * elements_per_query, single_dot_attend_size]
single_dot_attend_size must be greater than query_size
elements_mask: Mask for what elements items exist in the input.
do_softmax: Whether to put the output through softmax.
attention_method: The attention method to use.
flags: The input Flags. (Currently unused)
Returns:
The attention mask.
"""
del flags
elements_item_size = attend_in.shape[1]
# Use different weights for DNN ontop of Ref Exp, and Elements
if 'sepDotAtten' == attention_method:
elements_enc_attend = tf.layers.dense(attend_in, elements_item_size)
query_attend = tf.layers.dense(query, elements_item_size)
attention_mask = atten_metric(elements_enc_attend, query_attend,
elements_mask, do_softmax)
# Use the same weights for DNN ontop of Ref Exp, and Elements
if 'singDotAtten' == attention_method:
elements_enc_attend = single_dot_in
query_attend = tf.concat([
query,
tf.zeros([
tf.shape(query)[0],
tf.shape(single_dot_in)[1] - tf.shape(query)[1]
])
], 1)
# Concat along batch dim, so same weights used for each.
all_attend = tf.concat([elements_enc_attend, query_attend], 0)
all_attend = tf.layers.dense(all_attend, elements_item_size, tf.nn.relu)
all_attend = tf.layers.dense(all_attend, elements_item_size)
elements_enc_attend, query_attend = tf.split(
all_attend,
[tf.shape(elements_enc_attend)[0],
tf.shape(query_attend)[0]])
attention_mask = atten_metric(elements_enc_attend, query_attend,
elements_mask, do_softmax)
# Combine Ref Exp, and Elements before input to DNN
if 'combAtten' == attention_method:
query_tile = tile_ref_enc_to_elements(query, elements_mask)
attention_mask = tf.concat([attend_in, query_tile], 1)
attention_mask = tf.layers.dense(attention_mask, elements_item_size,
tf.nn.relu)
attention_mask = tf.layers.dense(attention_mask, 1)
attention_mask = tf.squeeze(attention_mask, 1)
if do_softmax:
attention_mask = atten_softmax(attention_mask, elements_mask)
tf.summary.histogram('attention_mask', attention_mask)
return attention_mask
def filter_none(lst):
"""Removes None elements from the list."""
lst = [el for el in lst if el is not None]
return lst
def calc_neighbor_embed(elements_neighbors, elements_enc, elements_mask):
"""Calculates the sum of the embeddings of neighboring elements."""
with tf.variable_scope('calc_neighbor_embed'):
elements_enc_orig_shape = elements_enc.get_shape().as_list()
elements_enc = undo_mask(elements_enc, elements_mask)
elements_enc_shape = tf.shape(elements_enc)
elements_enc_expand = tf.tile(elements_enc, [1, elements_enc_shape[1], 1])
elements_enc_expand = tf.reshape(elements_enc_expand, [
-1, elements_enc_shape[1], elements_enc_shape[1], elements_enc_shape[2]
])
elements_neighbors = tf.cast(
tf.expand_dims(elements_neighbors, 3), tf.float32)
neighbor_embed = elements_enc_expand * elements_neighbors
neighbor_embed = tf.reduce_mean(neighbor_embed, axis=2)
neighbor_embed = tf.boolean_mask(neighbor_embed, elements_mask)
neighbor_embed.set_shape(elements_enc_orig_shape)
return neighbor_embed
def elements_model(elements_texts_enc, feature_map, output_size, elements_mask,
ref_enc, flags):
"""The part of the model that processes the elements text and boxes.
This assumes that the text has already been preprocessed with the text_model.
Even if you are only using the elements and not the referring expression, you
should probably use the ref_elements_model since that also handles
preprocessing with the text_model.
Args:
elements_texts_enc: The elements text encoded by the text_model. Size:
[batch_size * elements_per_query, text_embed_size]
feature_map: Features used by the model.
output_size: Desired output size of the encoding. Format: [length, width,
depth]
elements_mask: Mask for what elements items exist in the input.
ref_enc: The referring expression encoded by the text_model. [batch_size,
text_embed_size]
flags: The input Flags.
Returns:
The encoding of the elements data.
"""
with tf.variable_scope('elements_model'):
elements_item_size = output_size[2]
if flags.use_elements_boxes:
elements_boxes = tf.identity(feature_map[ELEMENTS_BOX_ID],
ELEMENTS_BOX_ID)
flat_elements_boxes = tf.boolean_mask(elements_boxes, elements_mask)
else:
elements_boxes = None
flat_elements_boxes = None
if ref_enc is not None:
ref_enc_tile = tile_ref_enc_to_elements(ref_enc, elements_mask)
elements_ref_match_enc = None
if flags.use_elements_ref_match:
elements_ref_match = tf.identity(feature_map[ELEMENTS_REF_MATCH_ID],
ELEMENTS_REF_MATCH_ID)
tf.summary.text('elements_ref_match', elements_ref_match)
flat_elements_ref_match = tf.boolean_mask(elements_ref_match,
elements_mask)
elements_ref_match_enc = text_model(
flat_elements_ref_match, flags.pretrained_elements_ref_match_model)
# For combinding the element with the refering expression.
if flags.merge_ref_elements_method == 'combine' and (ref_enc is not None):
elements_enc = tf.concat(
filter_none([
elements_texts_enc, flat_elements_boxes, ref_enc_tile,
elements_ref_match_enc
]), 1)
elements_enc = tf.layers.dense(elements_enc, elements_item_size * 2,
tf.nn.relu)
else:
# Paper results
elements_enc = tf.concat(
filter_none(
[elements_texts_enc, flat_elements_boxes,
elements_ref_match_enc]), 1)
elements_enc = tf.layers.dense(elements_enc, elements_item_size,
tf.nn.relu)
neighbor_embed = None
if flags.use_elements_neighbors:
neighbor_embed = calc_neighbor_embed(feature_map[ELEMENTS_NEIGHBORS_ID],
elements_enc, elements_mask)
elements_enc = tf.concat(filter_none([elements_enc, neighbor_embed]), 1)
elements_enc = tf.layers.dense(elements_enc, elements_item_size, tf.nn.relu)
attend_in = elements_enc
# "DNN"
elements_enc = tf.nn.dropout(elements_enc, flags.elements_keep_prob)
elements_enc = tf.layers.dense(elements_enc, elements_item_size, tf.nn.relu)
elements_enc = tf.nn.dropout(elements_enc, flags.elements_keep_prob)
elements_enc = tf.layers.dense(elements_enc, elements_item_size)
elements_enc_pre_atten = elements_enc
if 'Atten' in flags.merge_ref_elements_method and (ref_enc is not None):
with tf.variable_scope('attention'):
if elements_texts_enc is None:
# Prepad with 0s so the box embedding won't overlap with the ref_enc.
single_dot_concat = tf.zeros([
tf.shape(flat_elements_boxes)[0],
ref_enc.get_shape().as_list()[1]
])
else:
single_dot_concat = elements_texts_enc
single_dot_in = tf.concat(
filter_none([
single_dot_concat,
flat_elements_boxes,
neighbor_embed,
elements_ref_match_enc,
]), 1)
single_dot_in = tf.concat(
[single_dot_in,
tf.ones([tf.shape(single_dot_in)[0], 1])], 1)
attention_mask = attention(ref_enc, attend_in, single_dot_in,
elements_mask, True,
flags.merge_ref_elements_method, flags)
attention_mask = tf.expand_dims(attention_mask, 1)
elements_enc *= attention_mask
# Projects the element embeddings into a 2d feature map.
if flags.elements_proj_mode != 'tile':
with tf.variable_scope('elements_proj'):
# Projects the elements text onto the image feature map
# on the corresponding bounding boxes.
assert_op = tf.Assert(
tf.equal(output_size[0], output_size[1]), [
'Assumes height and width are the same.',
feature_map[ELEMENTS_BOX_ID]
])
with tf.control_dependencies([assert_op]):
if flags.proj_elements_memop:
# Iterate through all bounding boxes and embeddings to create
# embedded bounding boxes and sum to result vector iterately
elements_enc = undo_mask(elements_enc, elements_mask)
fold_elms = tf.transpose(
tf.concat([elements_enc, elements_boxes], 2), [1, 0, 2])
initializer = tf.zeros([tf.shape(elements_mask)[0]] + output_size)
def fold_fn(total, fold_elm):
elements_enc_boxes = tf.split(
fold_elm,
[tf.shape(elements_enc)[2],
tf.shape(elements_boxes)[2]], 1)
return total + get_filled_rect(
elements_enc_boxes[1], elements_enc_boxes[0], output_size[0],
flags.elements_proj_mode)
elements_enc = tf.foldl(
fold_fn,
fold_elms,
initializer=initializer,
swap_memory=True,
parallel_iterations=2)
else:
# Create embedding of all bb then reduce sum
elements_enc = get_filled_rect(flat_elements_boxes, elements_enc,
output_size[0],
flags.elements_proj_mode)
elements_enc = undo_mask(elements_enc, elements_mask)
elements_enc = tf.reduce_sum(elements_enc, axis=1)
# Turn sum into average.
mask_sum = tf.cast(
tf.reduce_sum(tf.cast(elements_mask, tf.uint8), 1), tf.float32)
mask_sum = tf.reshape(mask_sum, [-1, 1, 1, 1])
mask_sum = tf.where(
tf.equal(mask_sum, 0), tf.ones_like(mask_sum), mask_sum)
elements_enc /= mask_sum
tf.summary.histogram('elements_enc', elements_enc)
elements_enc_for_disp = tf.reduce_mean(elements_enc, 3, keepdims=True)
tf.summary.image('elements_enc_for_disp', elements_enc_for_disp, 4)
else:
# Undo the mask for feature mapping
sequence_elements_enc = undo_mask(elements_enc, elements_mask)
elements_enc = tf.reduce_mean(sequence_elements_enc, axis=1)
tf.summary.histogram('elements_enc', elements_enc)
if flags.elements_3d_output:
elements_enc = tile_to_image(elements_enc, output_size)
if flags.elements_3d_output:
elements_enc.set_shape(
[None, output_size[0], output_size[1], elements_item_size])
# Last CNN layer of elements model
if flags.elements_3d_output and flags.elements_cnn:
elements_enc = tf.layers.conv2d(
elements_enc,
elements_enc.shape[3],
3,
padding='SAME',
activation=tf.nn.relu,
strides=1)
elements_enc = tf.nn.dropout(elements_enc, flags.elements_keep_prob)
elements_enc = tf.layers.conv2d(
elements_enc,
elements_enc.shape[3],
3,
padding='SAME',
activation=None,
strides=1)
return elements_enc, elements_enc_pre_atten
def ref_elements_model(feature_map, output_size, flags):
"""Calculates an embedding for the referring expression and screen elements.
The input is defined in the feature_map, with a referring expression and also
text and boxes for each screen element.
The certain inputs can be ignored by the model by setting their corresponding
use_ flags to false.
The model produces a 3d output (length, width, depth) when:
flags.elements_proj_mode != 'tile' or flags.elements_3d_output
and a 1d output (embedding_size) otherwize.
Args:
feature_map: Dict of features used by the model. See
research/socrates/vis_va_model/seg_model/model_input.py for how to
construct from tf.Examples,
REF_EXP_ID The referring expression as tf.string. Shape: [batch_size]
ELEMENTS_TEXT_ID The text for each element as tf.string. Shape:
[batch_size, num_elements] (If some batches have more elements, pad with
empty string)
ELEMENTS_BOX_ID The box for each element as tf.string. Shape:
[batch_size, 4]. The last dimension is [x,y,width,height]. (If some
batches have more elements, pad with empty 0s)
ELEMENTS_EXIST_ID 1 for if an element is present and 0 otherwise. Used
since the batches can have different numbers of elements.
output_size: Desired output size of the encoding. Format: [length, width,
depth] if the flags specify a 1d output, only the depth is considered.
flags: The input Flags See
research/socrates/screen_elements_embedding_model/notebook.ipynb for the
optimal values for these.
elements_3d_output Forces a 3d output even when elements_proj_mode==tile.
use_ref_exp Whether or not to use the referring expression in the model.
use_elements_texts Whether or not to use the elements information in the
model. Crash if this is true when use_elements_boxes is false.
use_elements_boxes:Whether or not to use the elements boxes in the model.
merge_ref_elements_method options: '': Don't merge in elements model.
'combine' Concatenate the representations and feed through a DNN.
'singDotAtten' Use the same DNN to calculate the
representations of the items and expression to multiply.
'sepDotAtten' Use separate networks to calculate the
representations. 'combAtten': Use a network to directly output
multiply values.
elements_proj_mode How to project the elements information onto the image
feature.' 'tile' blindly tile the feature over the image." 'step': Tile
only in the elements bounding box locations." 'cont': Tile the values in
bounding box locations and" increase magnitude near the center of the
box.'
pretrained_text_enc_name - The text model to use.
elements_keep_prob - Controls dropout.
elements_cnn - True to use a CNN after the elements are processed.
elements_enc_size - The size of the output encoding. -1
to use the output_size.
proj_elements_memop Reduces elements projection mem by using a tf while
loop. May be slower.
Returns:
A tuple of elements_enc, ref_enc, elements_enc_for_select
elements_enc - The embedding representing the elements
ref_enc - The embedding representing the referring expression.
elements_enc_for_select - only useful when building a model to select from
the screen elements.
"""
with tf.variable_scope('ref_elements_model'):
# Checks if the referring elements model is needed. If not, return 0s
if not (flags.use_ref_exp or flags.use_elements_texts or
flags.use_elements_boxes or flags.use_elements_ref_match or
flags.use_elements_neighbors):
return (tf.zeros([
tf.shape(feature_map[ELEMENTS_EXIST_ID])[0], output_size[0],
output_size[1], output_size[2]
]),
tf.zeros([
tf.shape(feature_map[ELEMENTS_EXIST_ID])[0], output_size[0],
output_size[1], output_size[2]
]), None)
if flags.use_ref_exp:
ref_exp = tf.identity(feature_map[REF_EXP_ID], REF_EXP_ID)
tf.summary.text('ref_exp', ref_exp)
else:
ref_exp = []
# Puts everything in the same "batch" so it can be processed
# by the text_model. Ignores features with their use_ flags set to false.
if (flags.use_elements_texts or flags.use_elements_boxes or
flags.use_elements_ref_match or flags.use_elements_neighbors):
elements_mask = tf.identity(feature_map[ELEMENTS_EXIST_ID],
ELEMENTS_EXIST_ID)
if flags.use_elements_texts:
elements_texts = tf.identity(feature_map[ELEMENTS_TEXT_ID],
ELEMENTS_TEXT_ID)
tf.summary.text('elements_texts', elements_texts)
# Use boolean mask to ignore empty padding elements
flat_elements_texts = tf.boolean_mask(elements_texts, elements_mask)
else:
elements_texts = []
flat_elements_texts = []
if flags.use_ref_exp or flags.use_elements_texts:
ref_elements = tf.concat([ref_exp, flat_elements_texts], 0)
# Text model refers to embedding model for sentences
ref_elements_concat_enc = text_model(ref_elements,
flags.pretrained_text_enc_name)
# Unpack the "batch".
ref_enc, elements_texts_enc = tf.split(
ref_elements_concat_enc,
[tf.shape(ref_exp)[0],
tf.shape(flat_elements_texts)[0]], 0)
if not flags.use_ref_exp:
ref_enc = None
if not flags.use_elements_texts:
elements_texts_enc = None
if (flags.use_elements_texts or flags.use_elements_boxes or
flags.use_elements_ref_match or flags.use_elements_neighbors):
# Elements odel can process OCR infromation
elements_enc, elements_enc_for_select = elements_model(
elements_texts_enc, feature_map, output_size, elements_mask, ref_enc,
flags)
else:
elements_enc = tf.zeros([
tf.shape(ref_enc)[0], output_size[0], output_size[1], output_size[2]
])
elements_enc_for_select = None
if flags.use_ref_exp:
ref_enc = tf.layers.dense(ref_enc, output_size[2], tf.nn.relu)
tf.summary.histogram('ref_enc', ref_enc)
else:
ref_enc = tf.zeros(
[tf.shape(feature_map[ELEMENTS_EXIST_ID])[0], output_size[2]])
if flags.elements_proj_mode != 'tile' or flags.elements_3d_output:
ref_enc = tile_to_image(ref_enc, output_size)
return elements_enc, ref_enc, elements_enc_for_select
| 39.096573
| 80
| 0.676494
|
9c4716379da2236111ec5e1929013e6c3aca08c6
| 4,672
|
py
|
Python
|
tests/vcf_tools/test_format_sv_variant.py
|
bjhall/loqusdb
|
55ee806662848eeffd266bf65d4b4eb24e534a89
|
[
"MIT"
] | 4
|
2018-06-04T12:42:45.000Z
|
2021-03-29T20:36:12.000Z
|
tests/vcf_tools/test_format_sv_variant.py
|
bjhall/loqusdb
|
55ee806662848eeffd266bf65d4b4eb24e534a89
|
[
"MIT"
] | 50
|
2016-02-26T07:54:39.000Z
|
2021-10-12T07:52:01.000Z
|
tests/vcf_tools/test_format_sv_variant.py
|
bjhall/loqusdb
|
55ee806662848eeffd266bf65d4b4eb24e534a89
|
[
"MIT"
] | 8
|
2016-02-29T13:50:46.000Z
|
2020-04-22T10:15:23.000Z
|
from pprint import pprint as pp
from loqusdb.build_models import build_variant
def test_format_indel(del_variant, case_obj):
## GIVEN a SV deletion
variant = del_variant
case_id = case_obj['case_id']
## WHEN parsing the variant
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id
)
expected_id = '_'.join([
variant.CHROM,
str(variant.POS),
variant.REF,
variant.ALT[0]
])
## THEN assert the sv is parsed correct
assert formated_variant
assert formated_variant['variant_id'] == expected_id
assert formated_variant['chrom'] == variant.CHROM
assert formated_variant['end_chrom'] == variant.CHROM
assert formated_variant['pos'] == variant.POS
assert formated_variant['end'] == variant.INFO['END']
assert formated_variant['sv_len'] == abs(variant.INFO['SVLEN'])
assert formated_variant['ref'] == variant.REF
assert formated_variant['alt'] == variant.ALT[0]
assert formated_variant['sv_type'] == 'DEL'
assert formated_variant['case_id'] == case_id
assert formated_variant['homozygote'] == 0
assert formated_variant['hemizygote'] == 0
def test_format_small_ins(small_insert_variant, case_obj):
## GIVEN a small insertion (This means that the insertion is included in ALT field)
variant = small_insert_variant
case_id = case_obj['case_id']
## WHEN parsing the variant
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id
)
## THEN assert the sv is parsed correct
assert formated_variant['chrom'] == variant.CHROM
assert formated_variant['end_chrom'] == variant.CHROM
assert formated_variant['pos'] == variant.POS
assert formated_variant['end'] == variant.POS + abs(variant.INFO['SVLEN'])
assert formated_variant['sv_len'] == abs(variant.INFO['SVLEN'])
assert formated_variant['ref'] == variant.REF
assert formated_variant['alt'] == variant.ALT[0]
assert formated_variant['sv_type'] == 'INS'
def test_format_insertion(insertion_variant, case_obj):
## GIVEN a small insertion (This means that the insertion is included in ALT field)
variant = insertion_variant
case_id = case_obj['case_id']
## WHEN parsing the variant
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id
)
## THEN assert the sv is parsed correct
assert formated_variant['chrom'] == variant.CHROM
assert formated_variant['end_chrom'] == variant.CHROM
assert formated_variant['pos'] == variant.POS
assert formated_variant['end'] == variant.INFO['END']
assert formated_variant['sv_len'] == 0
assert formated_variant['ref'] == variant.REF
assert formated_variant['alt'] == variant.ALT[0]
assert formated_variant['sv_type'] == 'INS'
def test_format_dup_tandem(duptandem_variant, case_obj):
## GIVEN a small insertion (This means that the insertion is included in ALT field)
variant = duptandem_variant
case_id = case_obj['case_id']
## WHEN parsing the variant
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id
)
## THEN assert the sv is parsed correct
assert formated_variant['chrom'] == variant.CHROM
assert formated_variant['end_chrom'] == variant.CHROM
assert formated_variant['pos'] == variant.POS
assert formated_variant['end'] == variant.INFO['END']
assert formated_variant['sv_len'] == abs(variant.INFO['SVLEN'])
assert formated_variant['ref'] == variant.REF
assert formated_variant['alt'] == variant.ALT[0]
assert formated_variant['sv_type'] == 'DUP'
def test_format_translocation(translocation_variant, case_obj):
## GIVEN a small insertion (This means that the insertion is included in ALT field)
variant = translocation_variant
case_id = case_obj['case_id']
## WHEN parsing the variant
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id
)
## THEN assert the sv is parsed correct
assert formated_variant['chrom'] == variant.CHROM
assert formated_variant['end_chrom'] == '11'
assert formated_variant['pos'] == variant.POS
assert formated_variant['end'] == 119123896
assert formated_variant['sv_len'] == float('inf')
assert formated_variant['ref'] == variant.REF
assert formated_variant['alt'] == variant.ALT[0]
assert formated_variant['sv_type'] == 'BND'
| 36.5
| 87
| 0.678938
|
2ba0bf493f8a035816f6ab6a66c9bb901740dab2
| 1,769
|
py
|
Python
|
lite/tests/unittest_py/pass/backends/arm/test_conv_bn_fuse_pass.py
|
xiebaiyuan/PaddleLite
|
6f7280a91741d1c63fcb0296ac5c08c4e81c2a90
|
[
"Apache-2.0"
] | null | null | null |
lite/tests/unittest_py/pass/backends/arm/test_conv_bn_fuse_pass.py
|
xiebaiyuan/PaddleLite
|
6f7280a91741d1c63fcb0296ac5c08c4e81c2a90
|
[
"Apache-2.0"
] | null | null | null |
lite/tests/unittest_py/pass/backends/arm/test_conv_bn_fuse_pass.py
|
xiebaiyuan/PaddleLite
|
6f7280a91741d1c63fcb0296ac5c08c4e81c2a90
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../../common')
sys.path.append('../../../')
import test_conv_bn_fuse_pass_base
from auto_scan_test_rpc import FusePassAutoScanTest
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
class TestConvBnFusePass(FusePassAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig , predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
return test_conv_bn_fuse_pass_base.sample_program_configs(draw)
def sample_predictor_configs(self):
config = CxxConfig()
config.set_valid_places({Place(TargetType.ARM, PrecisionType.FP32, DataLayoutType.NCHW)})
yield config, ["conv2d"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25, passes=["lite_conv_bn_fuse_pass"])
if __name__ == "__main__":
unittest.main(argv=[''])
| 37.638298
| 125
| 0.751837
|
090dfd484588902985af92602f0c136798f7aae6
| 6,645
|
py
|
Python
|
andriller/decrypts.py
|
pshem/andriller
|
c1e2d9e05f3630a7499907190552089be18563c5
|
[
"MIT"
] | 2
|
2021-11-22T03:38:06.000Z
|
2022-01-12T23:07:15.000Z
|
andriller/decrypts.py
|
pshem/andriller
|
c1e2d9e05f3630a7499907190552089be18563c5
|
[
"MIT"
] | null | null | null |
andriller/decrypts.py
|
pshem/andriller
|
c1e2d9e05f3630a7499907190552089be18563c5
|
[
"MIT"
] | 1
|
2021-09-14T04:53:18.000Z
|
2021-09-14T04:53:18.000Z
|
import gzip
import zlib
import pathlib
import hashlib
from contextlib import suppress
from dataclasses import dataclass
from Cryptodome.Cipher import AES
from .utils import threaded
@dataclass
class WhatsAppCrypt:
input_file: pathlib.Path
key_file: pathlib.Path = None
email: str = None
GZIP_MAGIC = b'\x1f\x8b\x08'
SQLITE_MAGIC = b'SQLite format 3\x00'
KEY_SIZE = 158
SUFFIX = '.decoded.db'
def __post_init__(self):
self.fname = self.input_file.name
self.input_data = None
self.KEY = None
self.IV = None
@property
def dst(self):
return self.input_file.with_suffix(self.SUFFIX)
def get_key(self):
with self.key_file.open('rb') as R:
self.KEY = R.read()[126:158]
def get_iv(self, iv_from_file=False):
if iv_from_file:
with self.input_file.open('rb') as R:
self.IV = R.read()[51:67]
else:
with self.key_fileopen('rb') as R:
self.IV = R.read()[110:126]
def aes_0(self):
# 346a23652a46392b4d73257c67317e352e3372482177652c
return AES.new(b'4j#e*F9+Ms%|g1~5.3rH!we,')
def aes_5(self):
ACC = hashlib.md5(self.email.encode()).digest()
KEY = bytearray(b'\x8dK\x15\\\xc9\xff\x81\xe5\xcb\xf6\xfax\x196j>\xc6!\xa6VAl\xd7\x93')
for i in range(24):
KEY[i] ^= ACC[i]
IV = b'\x1e9\xf3i\xe9\r\xb3:\xa7;D+\xbb\xb6\xb0\xb9'
return AES.new(bytes(KEY), AES.MODE_CBC, IV)
def aes_7(self, mode=AES.MODE_CBC, iv_from_file=False):
self.get_key()
self.get_iv(iv_from_file=iv_from_file)
return AES.new(self.KEY, mode, self.IV)
def aes_8(self, mode=AES.MODE_CBC, iv_from_file=True):
self.get_key()
self.get_iv(iv_from_file=iv_from_file)
return AES.new(self.KEY, mode, self.IV)
def aes_9(self, mode=AES.MODE_GCM, iv_from_file=True):
self.get_key()
self.get_iv(iv_from_file=iv_from_file)
return AES.new(self.KEY, mode, self.IV)
def aes_10(self, mode=AES.MODE_GCM, iv_from_file=True):
self.get_key()
self.get_iv(iv_from_file=iv_from_file)
return AES.new(self.KEY, mode, self.IV)
def aes_12(self, mode=AES.MODE_GCM, iv_from_file=True):
self.get_key()
self.get_iv(iv_from_file=iv_from_file)
return AES.new(self.KEY, mode, self.IV)
@staticmethod
def unpad_pkcs5(data, bs=16):
if len(data) % bs == 0:
return data
elif 0 < data[-1] > bs:
return data[0:-(len(data) % bs)]
else:
return data[0:-data[-1]]
@staticmethod
def unpad(data):
return data[0:-data[-1]]
def check_input_file_size(self, head_size=67):
if not (self.input_file.stat().st_size - head_size) % 16 == 0:
raise WhatsAppCryptError('Unexpected input file size, may not be decrypted.')
def check_input_data_size(self, data, head_size=67):
if not (len(data) - head_size) % 16 == 0:
raise WhatsAppCryptError('Unexpected input file size, may not be decrypted.')
def check_key_file_size(self):
if not self.key_file.stat().st_size == self.KEY_SIZE:
raise WhatsAppCryptError('Odd key file size.')
@classmethod
def check_is_sqlite(cls, data):
if not data.startswith(cls.SQLITE_MAGIC):
raise WhatsAppCryptError('Decryption failed (not sqlite).')
@classmethod
def check_is_gzip(cls, data):
if not data.startswith(cls.GZIP_MAGIC):
raise WhatsAppCryptError('Decryption failed (not gzip).')
def gzip_decompress(self, data):
# Python gzip lib bug workaround
length = len(data)
for i in range(0, -65, -1):
i = length if not i else i
with suppress(OSError):
return gzip.decompress(data[:i])
raise WhatsAppCryptError('Decompression failed')
@threaded
def decrypt(self, **kwargs):
# self.check_input_file_size(**kwargs)
self.check_key_file_size()
self.input_data = self.input_file.read_bytes()
def save_output(self, data):
if self.dst.is_file():
raise WhatsAppCryptError(f'File {self.dst} already exists!')
self.dst.write_bytes(data)
# -----------------------------------------------------------------------------
class WhatsAppCrypt7(WhatsAppCrypt):
CRYPT = 'crypt7'
def __init__(self, input_file, key_file):
super().__init__(input_file=input_file, key_file=key_file)
def decrypt(self, **kwargs):
super().decrypt(**kwargs)
data = self.aes_7.decrypt(self.input_data[67:])
self.check_is_sqlite(data)
self.save_output(data)
return self.dst
class WhatsAppCrypt8(WhatsAppCrypt):
CRYPT = 'crypt8'
def __init__(self, input_file, key_file):
super().__init__(input_file=input_file, key_file=key_file)
def decrypt(self, **kwargs):
super().decrypt(**kwargs)
cipher = self.aes_8()
data = cipher.decrypt(self.input_data[67:])
self.check_is_gzip(data)
data = gzip.decompress(self.unpad(data))
self.check_is_sqlite(data)
self.save_output(data)
return self.dst
class WhatsAppCrypt9(WhatsAppCrypt):
CRYPT = 'crypt9'
def __init__(self, input_file, key_file):
super().__init__(input_file=input_file, key_file=key_file)
def decrypt(self, **kwargs):
super().decrypt(**kwargs)
data = self.unpad_pkcs5(self.input_data[67:])
self.check_input_data_size(data, head_size=0)
cipher = self.aes_9()
data = cipher.decrypt(data)
data = self.gzip_decompress(data)
self.check_is_sqlite(data)
self.save_output(data)
return self.dst
class WhatsAppCrypt10(WhatsAppCrypt9, WhatsAppCrypt):
CRYPT = 'crypt10'
class WhatsAppCrypt11(WhatsAppCrypt9, WhatsAppCrypt):
CRYPT = 'crypt11'
class WhatsAppCrypt12(WhatsAppCrypt):
CRYPT = 'crypt12'
def __init__(self, input_file, key_file):
super().__init__(input_file, key_file=key_file)
def decrypt(self, **kwargs):
super().decrypt(**kwargs)
data = self.unpad_pkcs5(self.input_data[67:])
self.check_input_data_size(data, head_size=0)
cipher = self.aes_12()
data = cipher.decrypt(data)
data = zlib.decompress(data)
self.check_is_sqlite(data)
self.save_output(data)
return self.dst
# -----------------------------------------------------------------------------
class WhatsAppCryptError(Exception):
pass
| 30.62212
| 95
| 0.618661
|
ee315b5a4c1e3e10c62e456a2ca2e51e4c7a12e5
| 73,451
|
py
|
Python
|
adsrefpipe/tests/unittests/stubdata/parsed_references.py
|
golnazads/ADSReferencePipeline
|
802f26a9e085e6ff5de43f3b5642b2d9fad52cbb
|
[
"MIT"
] | null | null | null |
adsrefpipe/tests/unittests/stubdata/parsed_references.py
|
golnazads/ADSReferencePipeline
|
802f26a9e085e6ff5de43f3b5642b2d9fad52cbb
|
[
"MIT"
] | null | null | null |
adsrefpipe/tests/unittests/stubdata/parsed_references.py
|
golnazads/ADSReferencePipeline
|
802f26a9e085e6ff5de43f3b5642b2d9fad52cbb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
parsed_crossref = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'SB Prusiner', 'journal': 'Proc Natl Acad Sci U S A', 'title': 'Prions', 'volume': '95', 'page': '13363', 'year': '1998', 'doi': '10.1073/pnas.95.23.13363', 'refstr': 'SB Prusiner, 1998. Proc Natl Acad Sci U S A, Prions, 95, 13363. doi:10.1073/pnas.95.23.13363', 'refraw': '<citation key="ref1">\n <journal_title>Proc Natl Acad Sci U S A</journal_title>\n <author>SB Prusiner</author>\n <volume>95</volume>\n <first_page>13363</first_page>\n <cYear>1998</cYear>\n <doi provider="crossref">10.1073/pnas.95.23.13363</doi>\n <article_title>Prions</article_title>\n</citation>'}, {'year': '1099', 'doi': '10.1099/mic.0.071191-0', 'refstr': '1099, 10.1099/mic.0.071191-0', 'refraw': '<citation key="B1">\n <doi>10.1099/mic.0.071191-0</doi>\n</citation>'}, {'year': '1073', 'doi': '10.1073/pnas.1305049110', 'refstr': '1073, 10.1073/pnas.1305049110', 'refraw': '<citation key="B2">\n <doi>10.1073/pnas.1305049110</doi>\n</citation>'}, {'authors': 'Almeida MC', 'journal': 'Microbiol. Spectr.', 'volume': '5', 'issue': '2', 'year': '2017', 'refstr': 'Almeida MC, 2017. Microbiol. Spectr., 5.', 'refraw': '<citation key="B3">\n <journal_title>Microbiol. Spectr.</journal_title>\n <author>Almeida MC</author>\n <first_page>FUNK</first_page>\n <volume>5</volume>\n <issue>2</issue>\n <cYear>2017</cYear>\n</citation>'}, {'authors': 'Gerasimov G N', 'journal': 'Journal of Cosmology and Astroparticle Physics', 'volume': '58', 'issue': '02', 'page': 'A1029', 'year': '1985', 'refstr': 'Gerasimov G N, 1985. Journal of Cosmology and Astroparticle Physics, 58, A1029.', 'issn': '0030-4034', 'refraw': '<citation key="B12">\n <cYear>1985</cYear>\n <volume>58</volume>\n <issue>2</issue>\n <first_page>A1029</first_page>\n <issn>0030-4034</issn>\n <journal_title>Journal of Cosmology and Astroparticle Physics</journal_title>\n <author>Gerasimov G N</author>\n</citation>'}, {'authors': 'Gerasimov G N', 'journal': 'Journal of Cosmology and Astroparticle Physics', 'volume': '58', 'issue': '00', 'page': 'A1029', 'year': '1985', 'refstr': 'Gerasimov G N, 1985. Journal of Cosmology and Astroparticle Physics, 58, A1029.', 'issn': '0030-4034', 'refraw': '<citation key="B12a">\n <cYear>1985</cYear>\n <volume>58</volume>\n <first_page>A1029</first_page>\n <issn>0030-4034</issn>\n <journal_title>Journal of Cosmology and Astroparticle Physics</journal_title>\n <author>Gerasimov G N</author>\n</citation>'}, {'authors': 'Katz', 'journal': 'Progress in Aerospace Sciences', 'title': 'Wing/vortex interactions and wing rock', 'volume': '35', 'issue': '7', 'page': '727', 'year': '1999', 'doi': '10.1016/S0376-0421(99)00004-4', 'refstr': 'Katz, 1999. Progress in Aerospace Sciences, Wing/vortex interactions and wing rock, 35, 727. doi:10.1016/S0376-0421(99)00004-4', 'issn': '03760421', 'refraw': '<citation key="y">\n <journal_title>Progress in Aerospace Sciences</journal_title>\n <issn>03760421</issn>\n <author>Katz</author>\n <volume>35</volume>\n <issue>7</issue>\n <first_page>727</first_page>\n <cYear>1999</cYear>\n <doi>10.1016/S0376-0421(99)00004-4</doi>\n <article_title>Wing/vortex interactions and wing rock</article_title>\n</citation>'}, {'year': '1987', 'refplaintext': 'Pyoeraelae, K. and Laakso, M. and Uusitupa, M. (1987) Diabetes and atherosclerosis: an epidemiologic view Diabetes amp Metabolism Reviews, 3, pp. 463 - 524.', 'refraw': '<citation key="E00002">\n <unstructured_citation>Pyoeraelae, K. and Laakso, M. and Uusitupa, M. (1987) Diabetes and atherosclerosis: an\n epidemiologic view <i>Diabetes & Metabolism Reviews</i>, 3, pp. 463 - 524.\n </unstructured_citation>\n</citation>'}, {'year': '1992', 'refplaintext': 'Parthasarathy, S. and Steinberg, D. and Witztum, J.L. (1992) The role of oxidized low-density lipoproteins in the pathogenesis of atherosclerosis Annual Reviews of Medicine, 43, pp. 219 - 225.', 'refraw': '<citation key="E00003">\n <unstructured_citation>Parthasarathy, S. and Steinberg, D. and Witztum, J.L. (1992) The role of oxidized low-density\n lipoproteins in the pathogenesis of atherosclerosis <i>Annual Reviews of Medicine</i>, 43, pp. 219 - 225.\n </unstructured_citation>\n</citation>'}, {'refraw': '<citation key="B1">\n <empty>what we dont have anything</empty>\n</citation>'}, {'year': '2012', 'arxiv': '1207.5271', 'refstr': '2012, 1207.5271', 'refraw': '<citation key="631_CR2">\n <unstructured_citation>An, X.: Formation of trapped surfaces from past null infinity. arXiv:1207.5271 (2012)\n </unstructured_citation>\n</citation>'}, {'authors': 'Bald', 'journal': 'Nanoscale Insights Into Ion-Beam Cancer Therapy', 'title': 'Dissociative electron attachment to biomolecules', 'page': '159', 'year': '2017', 'doi': '10.1016/bs.aamop.2017.02.002_bb0190', 'refstr': 'Bald, 2017. Nanoscale Insights Into Ion-Beam Cancer Therapy, Dissociative electron attachment to biomolecules, 159. doi:10.1016/bs.aamop.2017.02.002_bb0190', 'refraw': '<citation key="10.1016/bs.aamop.2017.02.002_bb0190">\n <author>Bald</author>\n <first_page>159</first_page>\n <cYear>2017</cYear>\n <series_title>Nanoscale Insights Into Ion-Beam Cancer Therapy</series_title>\n <article_title>Dissociative electron attachment to biomolecules</article_title>\n</citation>'}]}]
parsed_elsevier = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'Ainsworth, E.A.', 'journal': 'Plant J.', 'title': 'Understanding and improving global crop response to ozone pollution', 'volume': '90', 'page': '886', 'year': '2016', 'doi': '10.1111/tpj.13298', 'refstr': 'Ainsworth, E.A., 2016. Plant J., Understanding and improving global crop response to ozone pollution, 90, 886. doi:10.1111/tpj.13298', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="sref1">\n <contribution langtype="en">\n <authors>\n <author>\n <given-name>E.A.</given-name>\n <surname>Ainsworth</surname>\n </author>\n </authors>\n <title>\n <maintitle>Understanding and improving global crop response to ozone pollution</maintitle>\n </title>\n </contribution>\n <host>\n <issue>\n <series>\n <title>\n <maintitle>Plant J.</maintitle>\n </title>\n <volume-nr>90</volume-nr>\n </series>\n <date>2016</date>\n </issue>\n <pages>\n <first-page>886</first-page>\n <last-page>897</last-page>\n </pages>\n <doi>10.1111/tpj.13298</doi>\n </host>\n</reference>'}, {'journal': 'Plant J.', 'volume': '90', 'page': '886', 'year': '2016', 'doi': '10.1111/tpj.13298', 'refstr': '2016, Plant J., 90, 886, 10.1111/tpj.13298', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="sref2">\n <host>\n <issue>\n <series>\n <title>\n <maintitle>Plant J.</maintitle>\n </title>\n <volume-nr>90</volume-nr>\n </series>\n <date>2016</date>\n </issue>\n <pages>\n <first-page>886</first-page>\n <last-page>897</last-page>\n </pages>\n <doi>10.1111/tpj.13298</doi>\n </host>\n</reference>'}, {'authors': 'Boucher, O., Randall, D., Artaxo, P., Bretherton, G., Feingold, P., Forster, P., Kerminen, V.-M., Kondo, Y., Liao, H., Lohmann, U., Rasch, P., Satheesh, S.K., Sherwood, S., Stevens, B., Zhang, X.Y.', 'journal': 'Climate Change 2013: the Physical Science Basis', 'title': 'Clouds and aerosols', 'volume': '571', 'year': '2013', 'refstr': 'Boucher, O., Randall, D., Artaxo, P., Bretherton, G., Feingold, P., Forster, P., Kerminen, V.-M., Kondo, Y., Liao, H., Lohmann, U., Rasch, P., Satheesh, S.K., Sherwood, S., Stevens, B., Zhang, X.Y., 2013. Climate Change 2013: the Physical Science Basis, Clouds and aerosols, 571.', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="sref31">\n <contribution langtype="en">\n <authors>\n <author>\n <given-name>O.</given-name>\n <surname>Boucher</surname>\n </author>\n <author>\n <given-name>D.</given-name>\n <surname>Randall</surname>\n </author>\n <author>\n <given-name>P.</given-name>\n <surname>Artaxo</surname>\n </author>\n <author>\n <given-name>G.</given-name>\n <surname>Bretherton</surname>\n </author>\n <author>\n <given-name>P.</given-name>\n <surname>Feingold</surname>\n </author>\n <author>\n <given-name>P.</given-name>\n <surname>Forster</surname>\n </author>\n <author>\n <given-name>V.-M.</given-name>\n <surname>Kerminen</surname>\n </author>\n <author>\n <given-name>Y.</given-name>\n <surname>Kondo</surname>\n </author>\n <author>\n <given-name>H.</given-name>\n <surname>Liao</surname>\n </author>\n <author>\n <given-name>U.</given-name>\n <surname>Lohmann</surname>\n </author>\n <author>\n <given-name>P.</given-name>\n <surname>Rasch</surname>\n </author>\n <author>\n <given-name>S.K.</given-name>\n <surname>Satheesh</surname>\n </author>\n <author>\n <given-name>S.</given-name>\n <surname>Sherwood</surname>\n </author>\n <author>\n <given-name>B.</given-name>\n <surname>Stevens</surname>\n </author>\n <author>\n <given-name>X.Y.</given-name>\n <surname>Zhang</surname>\n </author>\n </authors>\n <title>\n <maintitle>Clouds and aerosols</maintitle>\n </title>\n </contribution>\n <host>\n <edited-book>\n <book-series>\n <editors>\n <editor>\n <given-name>T.F.</given-name>\n <surname>Stocker</surname>\n </editor>\n <editor>\n <given-name>D.</given-name>\n <surname>Qin</surname>\n </editor>\n <editor>\n <given-name>G.-K.</given-name>\n <surname>Plattner</surname>\n </editor>\n <editor>\n <given-name>M.M.B.</given-name>\n <surname>Tignor</surname>\n </editor>\n <editor>\n <given-name>S.K.</given-name>\n <surname>Allen</surname>\n </editor>\n <editor>\n <given-name>J.</given-name>\n <surname>Boschung</surname>\n </editor>\n <editor>\n <given-name>A.</given-name>\n <surname>Nauels</surname>\n </editor>\n <editor>\n <given-name>Y.</given-name>\n <surname>Xia</surname>\n </editor>\n <editor>\n <given-name>V.</given-name>\n <surname>Bex</surname>\n </editor>\n <editor>\n <given-name>P.M.</given-name>\n <surname>Midgley</surname>\n </editor>\n </editors>\n <series>\n <title>\n <maintitle>Climate Change 2013: the Physical Science Basis</maintitle>\n </title>\n <volume-nr>vols. 571–657</volume-nr>\n </series>\n </book-series>\n <date>2013</date>\n <publisher>\n <name>U.S.A</name>\n <location>Cambridge, U.K., and New York</location>\n </publisher>\n </edited-book>\n </host>\n</reference>'}, {'authors': 'Kahnet', 'journal': 'J. Geophys. Res.', 'title': 'Multiangle Imaging Spectroradiometer (MISR) global aerosol optical depth validation based on 2 years of coincident Aerosol Robotic Network (AERONET) observations', 'volume': '110', 'year': '2005', 'refstr': 'Kahnet, 2005. J. Geophys. Res., Multiangle Imaging Spectroradiometer (MISR) global aerosol optical depth validation based on 2 years of coincident Aerosol Robotic Network (AERONET) observations, 110.', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="sref34">\n <contribution langtype="en">\n <authors>\n <author>\n <surname>Kahnet</surname>\n </author>\n <et-al/>\n </authors>\n <title>\n <maintitle>Multiangle Imaging Spectroradiometer (MISR) global aerosol optical depth validation based on 2\n years of coincident Aerosol Robotic Network (AERONET) observations\n </maintitle>\n </title>\n </contribution>\n <host>\n <issue>\n <series>\n <title>\n <maintitle>J. Geophys. Res.</maintitle>\n </title>\n <volume-nr>110</volume-nr>\n </series>\n <issue-nr>D10</issue-nr>\n <date>2005</date>\n </issue>\n </host>\n</reference>'}, {'authors': 'Karimipour, Ghandehari', 'journal': 'Transactions on Computational Science', 'title': 'Voronoi-based medial axis approximation from samples: issues and solutions', 'volume': '20', 'page': '138', 'year': '2013', 'refstr': 'Karimipour, Ghandehari, 2013. Transactions on Computational Science, Voronoi-based medial axis approximation from samples: issues and solutions, 20, 138.', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="sref35">\n <contribution langtype="en">\n <authors>\n <author>\n <surname>Karimipour</surname>\n </author>\n <author>\n <surname>Ghandehari</surname>\n </author>\n </authors>\n <title>\n <maintitle>Voronoi-based medial axis approximation from samples: issues and solutions</maintitle>\n </title>\n </contribution>\n <host>\n <issue>\n <series>\n <title>\n <maintitle>Transactions on Computational Science</maintitle>\n </title>\n <volume-nr>XX</volume-nr>\n </series>\n <date>2013</date>\n </issue>\n <pages>\n <first-page>138</first-page>\n <last-page>157</last-page>\n </pages>\n </host>\n <comment>Springer</comment>\n</reference>'}, {'authors': 'Cerri, C.E.P., You, X., Cherubin, M.R., Moreira, C.S., Raucci, G.S., Castigioni, B. de A., Alves, P.A., Cerri, D.G.P., Mello, F.F. de C., Cerri, C.C.', 'journal': 'PloS One', 'title': 'Assessing the greenhouse gas emissions of Brazilian soybean biodiesel production', 'volume': '12', 'page': '0176948', 'year': '2017', 'doi': '10.1371/journal.pone.0176948', 'refstr': 'Cerri, C.E.P., You, X., Cherubin, M.R., Moreira, C.S., Raucci, G.S., Castigioni, B. de A., Alves, P.A., Cerri, D.G.P., Mello, F.F. de C., Cerri, C.C., 2017. PloS One, Assessing the greenhouse gas emissions of Brazilian soybean biodiesel production, 12, 0176948. doi:10.1371/journal.pone.0176948', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="sref3">\n <contribution langtype="en">\n <authors>\n <author>\n <given-name>C.E.P.</given-name>\n <surname>Cerri</surname>\n </author>\n <author>\n <given-name>X.</given-name>\n <surname>You</surname>\n </author>\n <author>\n <given-name>M.R.</given-name>\n <surname>Cherubin</surname>\n </author>\n <author>\n <given-name>C.S.</given-name>\n <surname>Moreira</surname>\n </author>\n <author>\n <given-name>G.S.</given-name>\n <surname>Raucci</surname>\n </author>\n <author>\n <given-name>B. de A.</given-name>\n <surname>Castigioni</surname>\n </author>\n <author>\n <given-name>P.A.</given-name>\n <surname>Alves</surname>\n </author>\n <author>\n <given-name>D.G.P.</given-name>\n <surname>Cerri</surname>\n </author>\n <author>\n <given-name>F.F. de C.</given-name>\n <surname>Mello</surname>\n </author>\n <author>\n <given-name>C.C.</given-name>\n <surname>Cerri</surname>\n </author>\n </authors>\n <title>\n <maintitle>Assessing the greenhouse gas emissions of Brazilian soybean biodiesel production\n </maintitle>\n </title>\n </contribution>\n <host>\n <issue>\n <series>\n <title>\n <maintitle>PloS One</maintitle>\n </title>\n <volume-nr>12</volume-nr>\n </series>\n <date>2017</date>\n </issue>\n <article-number>e0176948</article-number>\n <doi>10.1371/journal.pone.0176948</doi>\n </host>\n</reference>'}, {'authors': 'CARB', 'title': 'California map for local air district websites', 'year': '2020', 'refstr': 'CARB, 2020. California map for local air district websites.', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="sref7">\n <contribution langtype="en">\n <authors>\n <author>\n <surname>CARB</surname>\n </author>\n </authors>\n <title>\n <maintitle>California map for local air district websites</maintitle>\n </title>\n </contribution>\n <comment>Accessed</comment>\n <host>\n <e-host>\n <inter-ref>\n https://ww3.arb.ca.gov/capcoa/dismap.htm\n </inter-ref>\n <date>2020</date>\n <date-accessed day="25" month="1" year="2020"/>\n </e-host>\n </host>\n</reference>'}, {'authors': 'Albert, A., Kaur, J., Gonzalez, M.C.', 'title': 'Using Convolutional Networks and Satellite Imagery to Identify Patterns in Urban Environments at a Large Scale', 'year': '2017', 'arxiv': '1704.02965', 'refstr': 'Albert, A., Kaur, J., Gonzalez, M.C., 2017. Using Convolutional Networks and Satellite Imagery to Identify Patterns in Urban Environments at a Large Scale. arXiv:1704.02965', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="sref1">\n <contribution langtype="en">\n <authors>\n <author>\n <given-name>A.</given-name>\n <surname>Albert</surname>\n </author>\n <author>\n <given-name>J.</given-name>\n <surname>Kaur</surname>\n </author>\n <author>\n <given-name>M.C.</given-name>\n <surname>González</surname>\n </author>\n </authors>\n <title>\n <maintitle>Using Convolutional Networks and Satellite Imagery to Identify Patterns in Urban Environments\n at a Large Scale\n </maintitle>\n </title>\n </contribution>\n <host>\n <book>\n <date>2017</date>\n </book>\n </host>\n <comment>arXiv:1704.02965</comment>\n</reference>'}, {'authors': 'Baniecki, J.D., Ishii, M., Kurihara, K., Yamanaka, K., Yano, T., Shinozaki, K., Imada, T., Nozaki, K., Kin, N.', 'journal': 'Phys. Rev. B: Condens. Matter Mater. Phys.', 'volume': '78', 'page': '195415', 'year': '2008', 'refstr': 'Baniecki, J.D., Ishii, M., Kurihara, K., Yamanaka, K., Yano, T., Shinozaki, K., Imada, T., Nozaki, K., Kin, N., 2008. Phys. Rev. B: Condens. Matter Mater. Phys., 78, 195415.', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="h0155">\n <contribution langtype="en">\n <authors>\n <author>\n <given-name>J.D.</given-name>\n <surname>Baniecki</surname>\n </author>\n <author>\n <given-name>M.</given-name>\n <surname>Ishii</surname>\n </author>\n <author>\n <given-name>K.</given-name>\n <surname>Kurihara</surname>\n </author>\n <author>\n <given-name>K.</given-name>\n <surname>Yamanaka</surname>\n </author>\n <author>\n <given-name>T.</given-name>\n <surname>Yano</surname>\n </author>\n <author>\n <given-name>K.</given-name>\n <surname>Shinozaki</surname>\n </author>\n <author>\n <given-name>T.</given-name>\n <surname>Imada</surname>\n </author>\n <author>\n <given-name>K.</given-name>\n <surname>Nozaki</surname>\n </author>\n <author>\n <given-name>N.</given-name>\n <surname>Kin</surname>\n </author>\n </authors>\n </contribution>\n <host>\n <issue>\n <series>\n <title>\n <maintitle>Phys. Rev. B: Condens. Matter Mater. Phys.</maintitle>\n </title>\n <volume-nr>78</volume-nr>\n </series>\n <date>2008</date>\n </issue>\n </host>\n <comment>195415</comment>\n</reference>'}, {'authors': 'Strasburger, K.', 'journal': 'Phys. Rev. A', 'volume': '99', 'page': '069901', 'year': '2019', 'refstr': 'Strasburger, K., 2019. Phys. Rev. A, 99, 069901.', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="h0095">\n <contribution langtype="en">\n <authors>\n <author>\n <given-name>K.</given-name>\n <surname>Strasburger</surname>\n </author>\n </authors>\n </contribution>\n <host>\n <issue>\n <series>\n <title>\n <maintitle>Phys. Rev. A</maintitle>\n </title>\n <volume-nr>99</volume-nr>\n </series>\n <date>2019</date>\n </issue>\n </host>\n <comment>069901(E)</comment>\n</reference>'}, {'authors': 'Morgunov, R.B., Farle, M., Kazakova, O.L.', 'journal': 'Zh. Eksp. Teor. Fiz.', 'volume': '134', 'page': '141', 'year': '2008', 'refstr': 'Morgunov, R.B., Farle, M., Kazakova, O.L., 2008. Zh. Eksp. Teor. Fiz., 134, 141.', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference id="h0130">\n <contribution langtype="en">\n <authors>\n <author>\n <given-name>R.B.</given-name>\n <surname>Morgunov</surname>\n </author>\n <author>\n <given-name>M.</given-name>\n <surname>Farle</surname>\n </author>\n <author>\n <given-name>O.L.</given-name>\n <surname>Kazakova</surname>\n </author>\n </authors>\n </contribution>\n <host>\n <issue>\n <series>\n <title>\n <maintitle>Zh. Eksp. Teor. Fiz.</maintitle>\n </title>\n <volume-nr>134</volume-nr>\n </series>\n <date>2008</date>\n </issue>\n <pages>\n <first-page>141</first-page>\n </pages>\n </host>\n <comment>[R.B. Morgunov, M. Farle, O.L. Kazakova, JETP 107 (2008) 113]</comment>\n</reference>'}, {'year': '2005', 'refplaintext': 'I. Kravchenko, et al., Updated limits on the ultra-high energy neutrino flux from the RICE experiment, in: Proceedings of the 29th International Cosmic Ray Conference, Pune, 2005', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference>\n <textref>I. Kravchenko, et al., Updated limits on the ultra-high energy neutrino flux from the RICE experiment,\n in: Proceedings of the 29th International Cosmic Ray Conference, Pune, 2005\n </textref>\n</reference>'}, {'year': '2005', 'refplaintext': 'I. Kravchenko, et al., Using RICE data and GZK neutrino flux models to bound low scale gravity, in: Proceedings of the 29th International Cosmic Ray Conference, Pune, 2005', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference>\n <textref>I. Kravchenko, et al., Using RICE data and GZK neutrino flux models to bound low scale gravity, in:\n Proceedings of the 29th International Cosmic Ray Conference, Pune, 2005\n </textref>\n</reference>'}, {'refplaintext': 'I. Kravchenko, et al., RICE limits on the diffuse ultra-high energy neutrino flux, in preparation', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference>\n <textref>I. Kravchenko, et al., RICE limits on the diffuse ultra-high energy neutrino flux, in preparation\n </textref>\n</reference>'}]}]
parsed_jats = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'Sun, J., Hu, F.', 'journal': 'Int. J. RF Microwave Comput. Aided Eng.', 'volume': '30', 'page': '21983', 'year': '2020', 'doi': '10.1002/mmce.21983', 'refstr': 'Sun, J., Hu, F., 2020. Int. J. RF Microwave Comput. Aided Eng., 30, 21983. doi:10.1002/mmce.21983', 'refraw': '<mixed-citation publication-type="journal">\n <string-name>\n <given-names>J.</given-names>\n <surname>Sun</surname>\n </string-name>\n and\n <string-name>\n <given-names>F.</given-names>\n <surname>Hu</surname>\n </string-name>\n ,\n <source>Int. J. RF Microwave Comput. Aided Eng.</source>\n <volume>\n <bold>30</bold>\n </volume>\n (1), <fpage>e21983</fpage> (<year>2020</year>).\n <pub-id pub-id-type="doi" specific-use="metadata">10.1002/mmce.21983</pub-id>\n</mixed-citation>'}, {'journal': 'Thin Films PVD for Microelectronics: Sputter Deposition Applied to Semiconductor Manufacturing', 'title': 'Sputter Deposition Applied to Semiconductor Manufacturing', 'year': '1999', 'refplaintext': 'Thin Films PVD for Microelectronics: Sputter Deposition Applied to Semiconductor Manufacturing , edited by R. A. Powell and S. M. Rossnagel (Academic Press, San Diego, 1999), Vol. 26.', 'refraw': '<mixed-citation publication-type="other"><italic>Thin Films PVD for Microelectronics: Sputter Deposition Applied to\n Semiconductor Manufacturing</italic>, edited by R. A. Powell and S. M. Rossnagel (Academic Press, San Diego, 1999),\n Vol. 26.\n</mixed-citation>'}, {'authors': 'Aminu, Mohammed D., Nabavi, Seyed Ali, Rochelle, Christopher A., Manovic, Vasilije', 'journal': 'Applied Energy', 'title': 'A review of developments in carbon dioxide storage', 'volume': '208', 'page': '1389', 'year': '2017', 'refstr': 'Aminu, Mohammed D., Nabavi, Seyed Ali, Rochelle, Christopher A., Manovic, Vasilije, 2017. Applied Energy, A review of developments in carbon dioxide storage, 208, 1389.', 'refraw': '<mixed-citation publication-type="journal">\n <person-group person-group-type="author">\n <name>\n <surname>Aminu</surname>\n <given-names>Mohammed D.</given-names>\n </name>\n <name>\n <surname>Nabavi</surname>\n <given-names>Seyed Ali</given-names>\n </name>\n <name>\n <surname>Rochelle</surname>\n <given-names>Christopher A.</given-names>\n </name>\n <name>\n <surname>Manovic</surname>\n <given-names>Vasilije</given-names>\n </name>\n </person-group>\n <article-title xml:lang="en">A review of developments in carbon dioxide storage</article-title>\n <source>Applied Energy</source>\n <year>2017</year>\n <volume>208</volume>\n <fpage>1389</fpage>\n <lpage>1419</lpage>\n</mixed-citation>'}, {'authors': 'Moon, H., Lennon, D. T., Kirkpatrick, J., van Esbroeck, N. M., Camenzind, L. C., Yu, L., Vigneau, F., Zumbuhl, D. M., Briggs, G. A. D., Osborne, M. A., Sejdinovic, D., Laird, E. A., Ares, N.', 'year': '2020', 'arxiv': '2001.02589', 'refstr': 'Moon, H., Lennon, D. T., Kirkpatrick, J., van Esbroeck, N. M., Camenzind, L. C., Yu, L., Vigneau, F., Zumbuhl, D. M., Briggs, G. A. D., Osborne, M. A., Sejdinovic, D., Laird, E. A., Ares, N., 2020, 2001.02589', 'refraw': '<mixed-citation publication-type="eprint">\n <string-name>\n <given-names>H.</given-names>\n <surname>Moon</surname>\n </string-name>\n ,\n <string-name>\n <given-names>D. T.</given-names>\n <surname>Lennon</surname>\n </string-name>\n ,\n <string-name>\n <given-names>J.</given-names>\n <surname>Kirkpatrick</surname>\n </string-name>\n ,\n <string-name>\n <given-names>N. M.</given-names>\n <surname>van Esbroeck</surname>\n </string-name>\n ,\n <string-name>\n <given-names>L. C.</given-names>\n <surname>Camenzind</surname>\n </string-name>\n ,\n <string-name>\n <given-names>L.</given-names>\n <surname>Yu</surname>\n </string-name>\n ,\n <string-name>\n <given-names>F.</given-names>\n <surname>Vigneau</surname>\n </string-name>\n ,\n <string-name>\n <given-names>D. M.</given-names>\n <surname>Zumbühl</surname>\n </string-name>\n ,\n <string-name>\n <given-names>G. A. D.</given-names>\n <surname>Briggs</surname>\n </string-name>\n ,\n <string-name>\n <given-names>M. A.</given-names>\n <surname>Osborne</surname>\n </string-name>\n ,\n <string-name>\n <given-names>D.</given-names>\n <surname>Sejdinovic</surname>\n </string-name>\n ,\n <string-name>\n <given-names>E. A.</given-names>\n <surname>Laird</surname>\n </string-name>\n , and\n <string-name>\n <given-names>N.</given-names>\n <surname>Ares</surname>\n </string-name>\n , preprint <ext-link href="http://arxiv.org/abs/arXiv:2001.02589">arXiv:2001.02589\n</ext-link> (2020).\n</mixed-citation>'}, {'authors': 'Chen, L., Liao, D. G., Guo, X. G., Zhao, J. Y., Zhu, Y. M., Zhuang, S. L.', 'journal': 'Front. Inf. Technol. Electron. Eng.', 'volume': '20', 'page': '591', 'year': '2019', 'doi': '10.1631/FITEE.1800633', 'refstr': 'Chen, L., Liao, D. G., Guo, X. G., Zhao, J. Y., Zhu, Y. M., Zhuang, S. L., 2019. Front. Inf. Technol. Electron. Eng., 20, 591. doi:10.1631/FITEE.1800633', 'refraw': '<mixed-citation id="c4a" publication-type="journal">\n <string-name>\n <given-names>L.</given-names>\n <surname>Chen</surname>\n </string-name>\n ,\n <string-name>\n <given-names>D. G.</given-names>\n <surname>Liao</surname>\n </string-name>\n ,\n <string-name>\n <given-names>X. G.</given-names>\n <surname>Guo</surname>\n </string-name>\n ,\n <string-name>\n <given-names>J. Y.</given-names>\n <surname>Zhao</surname>\n </string-name>\n ,\n <string-name>\n <given-names>Y. M.</given-names>\n <surname>Zhu</surname>\n </string-name>\n , and\n <string-name>\n <given-names>S. L.</given-names>\n <surname>Zhuang</surname>\n </string-name>\n ,\n <source>Front. Inf. Technol. Electron. Eng.</source>\n <volume>\n <bold>20</bold>\n </volume>\n , <fpage>591</fpage> (<year>2019</year>);\n <pub-id pub-id-type="doi" specific-use="metadata">10.1631/FITEE.1800633</pub-id>\n</mixed-citation>'}, {'authors': 'Jepsen, P. U., Cooke, D. G., Koch, M.', 'journal': 'Laser Photonics Rev.', 'volume': '5', 'year': '2011', 'doi': '10.1002/lpor.201000011', 'refstr': 'Jepsen, P. U., Cooke, D. G., Koch, M., 2011. Laser Photonics Rev., 5. doi:10.1002/lpor.201000011', 'refraw': '<mixed-citation id="c4b" publication-type="journal">\n <string-name>\n <given-names>P. U.</given-names>\n <surname>Jepsen</surname>\n </string-name>\n ,\n <string-name>\n <given-names>D. G.</given-names>\n <surname>Cooke</surname>\n </string-name>\n , and\n <string-name>\n <given-names>M.</given-names>\n <surname>Koch</surname>\n </string-name>\n ,\n <source>Laser Photonics Rev.</source>\n <volume>\n <bold>5</bold>\n </volume>\n , 124 (2011).\n <pub-id pub-id-type="doi" specific-use="metadata">10.1002/lpor.201000011</pub-id>\n</mixed-citation>'}, {'authors': 'Agosta, L., Metere, A.', 'title': 'Movie---Fddd mesophase trajectory', 'year': '1063', 'doi': '10.1063/5', 'refstr': 'Agosta, L., Metere, A., 1063. Movie---Fddd mesophase trajectory. doi:10.1063/5', 'refraw': '<mixed-citation publication-type="supplementary-material">\n <string-name>\n <given-names>L.</given-names>\n <surname>Agosta</surname>\n </string-name>\n and\n <string-name>\n <given-names>A.</given-names>\n <surname>Metere</surname>\n </string-name>\n , “<article-title>Movie—Fddd mesophase trajectory</article-title>,” available in\n the <inline-supplementary-material href="https://doi.org/10.1063/5.0006096#suppl">supplementary\n material</inline-supplementary-material>.\n</mixed-citation>'}, {'authors': 'Mitchell, A. C., Zemansky, M. W.', 'title': 'Resonance Radiation and Excited Atoms', 'year': '1961', 'refstr': 'Mitchell, A. C., Zemansky, M. W., 1961. Resonance Radiation and Excited Atoms.', 'refraw': '<mixed-citation publication-type="book">\n <string-name>\n <given-names>A. C.</given-names>\n <surname>Mitchell</surname>\n </string-name>\n and\n <string-name>\n <given-names>M. W.</given-names>\n <surname>Zemansky</surname>\n </string-name>\n ,\n <source>\n <italic>Resonance Radiation and Excited Atoms</italic>\n </source>\n , <edition>2nd ed.</edition> (<publisher-name>Cambridge University Press</publisher-name>, <publisher-loc>\n Cambridge</publisher-loc>, <year>1961</year>).\n</mixed-citation>'}, {'authors': 'Knappe, R., Haloui, H., Seifert, A., Weis, A., Nebel, A.', 'title': 'Proc. SPIE', 'volume': '7585', 'page': '75850', 'year': '2010', 'doi': '10.1117/12.842318', 'refstr': 'Knappe, R., Haloui, H., Seifert, A., Weis, A., Nebel, A., 2010. Proc. SPIE, 7585, 75850. doi:10.1117/12.842318', 'refraw': '<mixed-citation publication-type="book">\n <string-name>\n <given-names>R.</given-names>\n <surname>Knappe</surname>\n </string-name>\n ,\n <string-name>\n <given-names>H.</given-names>\n <surname>Haloui</surname>\n </string-name>\n ,\n <string-name>\n <given-names>A.</given-names>\n <surname>Seifert</surname>\n </string-name>\n ,\n <string-name>\n <given-names>A.</given-names>\n <surname>Weis</surname>\n </string-name>\n , and\n <string-name>\n <given-names>A.</given-names>\n <surname>Nebel</surname>\n </string-name>\n ,\n <source>\n <italic>Proc. SPIE</italic>\n </source>\n <volume>7585</volume>, <fpage>75850H</fpage> (<year>2010</year>).\n <pub-id pub-id-type="doi" specific-use="metadata">10.1117/12.842318</pub-id>\n</mixed-citation>'}, {'authors': 'Jingli', 'title': 'Technology driven or application driven', 'year': '2018', 'refstr': 'Jingli, 2018. Technology driven or application driven.', 'refraw': '<mixed-citation publication-type="other">\n <string-name>\n <surname>Jingli</surname>\n </string-name>\n , “Nonvolatile memory outlook: Technology driven or application driven,” in 2018 China Semiconductor\n Technology International Conference (CSTIC) (IEEE, 2018), pp. 1–2.\n</mixed-citation>'}, {'journal': 'k', 'year': '1063', 'doi': '10.1063/5', 'refstr': '1063, k, 10.1063/5', 'refraw': '<mixed-citation publication-type="supplementary-material">The dashed curves in <xref ref-type="fig" rid="f4">Fig. 4\n</xref> are guides based on the assumptions\n \n and\n \n . The proportionality coefficient <italic>k</italic> is arbitrary, but the difference in\n \n between the electrons and holes is taken into account:\n \n . For\n \n of the n-type sample, see the <inline-supplementary-material href="https://doi.org/10.1063/5.0009687#suppl">\n supplementary material</inline-supplementary-material>.\n</mixed-citation>'}, {'authors': 'M. Van Damme', 'year': '1907', 'arxiv': 'arXiv:1907.02474', 'refstr': 'M. Van Damme, 1907, arXiv:1907.02474', 'refraw': '<mixed-citation publication-type="eprint">\n \n <person-group person-group-type="author"><string-name>M. Van Damme</string-name>, <string-name>L.\n Vanderstraeten</string-name>, <string-name>J. De Nardis</string-name>, <string-name>J. Haegeman</string-name>,\n and\n <string-name>F. Verstraete</string-name>\n </person-group>\n , <pub-id pub-id-type="arxiv">arXiv:1907.02474</pub-id> [cond-mat].\n</mixed-citation>'}, {'authors': 'M. Van Damme', 'year': '1907', 'arxiv': 'arXiv:1907.02474', 'refstr': 'M. Van Damme, 1907, arXiv:1907.02474', 'refraw': '<mixed-citation publication-type="eprint">\n \n <person-group person-group-type="author"><string-name>M. Van Damme</string-name>, <string-name>L.\n Vanderstraeten</string-name>, <string-name>J. De Nardis</string-name>, <string-name>J. Haegeman</string-name>,\n and\n <string-name>F. Verstraete</string-name>\n </person-group>\n , <pub-id pub-id-type="arxiv">arXiv:1907.02474</pub-id> [cond-mat].\n</mixed-citation>'}, {'authors': 'F. Verstraete', 'arxiv': 'arXiv:cond-mat/0407066', 'refstr': 'F. Verstraete, arXiv:cond-mat/0407066', 'refraw': '<mixed-citation publication-type="eprint">\n \n <person-group person-group-type="author">\n <string-name>F. Verstraete</string-name>\n and\n <string-name>J. I. Cirac</string-name>\n </person-group>\n , <pub-id pub-id-type="arxiv">arXiv:cond-mat/0407066</pub-id>.\n</mixed-citation>'}]}]
parsed_iop = [{'bibcode': '2020TEST..........R', 'references': [{'year': '2015', 'refplaintext': 'Flynn J 2015 Computing Resources Scrutiny Group Report Tech. Rep. CERN-RRB-2015-014 CERN Geneva URL', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference type="journal">\n <ref_label>[1]</ref_label>\n <ref_citation>Flynn J 2015 Computing Resources Scrutiny Group Report Tech. Rep. CERN-RRB-2015-014 CERN Geneva URL\n https://cds.cern.ch/record/2002240\n </ref_citation>\n</reference>'}, {'authors': 'J. Lattin D C and P G', 'journal': 'Analyzing multivariate data.', 'year': '2003', 'refplaintext': 'J. Lattin D C and P G 2003 Analyzing multivariate data. (New York: Duxbury: Pacific Grove, CA: Thomson Brooks/Cole)', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference type="book">\n <ref_label>[24]</ref_label>\n <ref_citation>J. Lattin D C and P G 2003 Analyzing multivariate data. (New York: Duxbury: Pacific Grove, CA: Thomson\n Brooks/Cole)\n </ref_citation>\n <ref_authors>J. Lattin D C and P G</ref_authors>\n <ref_journal>Analyzing multivariate data.</ref_journal>\n <ref_year>2003</ref_year>\n</reference>'}, {'authors': 'Agostinelli S et al and GEANT4', 'journal': 'Nucl. Instrum. Meth.', 'volume': '506', 'page': '250', 'year': '2003', 'doi': '10.1016/S0168-9002(03)01368-8', 'refstr': 'Agostinelli S et al and GEANT4, 2003. Nucl. Instrum. Meth., 506, 250. doi:10.1016/S0168-9002(03)01368-8', 'issn': '0168-9002', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference type="journal">\n <ref_label>[13]</ref_label>\n <ref_citation>Agostinelli S et al and GEANT4 2003 Nucl. Instrum. Meth. A506 250-303</ref_citation>\n <ref_doi>10.1016/S0168-9002(03)01368-8</ref_doi>\n <ref_authors>Agostinelli S et al and GEANT4</ref_authors>\n <ref_journal>Nucl. Instrum. Meth.</ref_journal>\n <ref_issn>0168-9002</ref_issn>\n <ref_volume>506</ref_volume>\n \n <ref_year>2003</ref_year>\n <ref_start_page>250</ref_start_page>\n <ref_end_page>303</ref_end_page>\n</reference>'}, {'year': '2017', 'arxiv': 'arXiv:1701.00160', 'refstr': '2017, arXiv:1701.00160', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference type="journal">\n <ref_label>[14]</ref_label>\n <ref_citation>Goodfellow I J 2017 CoRR abs/1701.00160 URL http://arxiv.org/abs/1701.00160</ref_citation>\n</reference>'}, {'year': '2017', 'arxiv': 'arXiv:1701.00160', 'refstr': '2017, arXiv:1701.00160', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference type="journal">\n <ref_label>[14]</ref_label>\n <ref_citation>Goodfellow I J 2017 CoRR abs/1701.00160 URL http://arxiv.org/abs/1701.00160</ref_citation>\n</reference>'}, {'authors': 'Bekbaev U', 'title': 'On classification of finite dimensional algebras', 'year': '2015', 'arxiv': 'arXiv:1504.01194', 'refstr': ' arXiv:1504.01194', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference type="journal">\n <ref_label>[1]</ref_label>\n <ref_citation>Bekbaev U 2015 On classification of finite dimensional algebras arXiv: 1504.01194</ref_citation>\n <ref_item_title>On classification of finite dimensional algebras</ref_item_title>\n <ref_authors>Bekbaev U</ref_authors>\n <ref_year>2015</ref_year>\n</reference>'}, {'authors': 'Orosz J. A., Welsh W. F., Carter J. A. et al', 'journal': 'Sci', 'volume': '337', 'year': '2012', 'doi': '10.1126/science.1228380', 'refstr': 'Orosz J. A., Welsh W. F., Carter J. A. et al, 2012. Sci, 337. doi:10.1126/science.1228380', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference type="journal">\n <ref_citation>Orosz J. A., Welsh W. F., Carter J. A. et al 2012a Sci 337 1511</ref_citation>\n <ref_doi>10.1126/science.1228380</ref_doi>\n <ref_authors>Orosz J. A., Welsh W. F., Carter J. A. et al</ref_authors>\n <ref_journal>Sci</ref_journal>\n <ref_volume>337</ref_volume>\n <ref_year>2012</ref_year>\n <ref_start_page>1511</ref_start_page>\n</reference>'}, {'authors': 'Rao A. M., Zhou P., Wang K. A. et al', 'journal': 'Sci', 'volume': '259', 'year': '1993', 'refstr': 'Rao A. M., Zhou P., Wang K. A. et al, 1993. Sci, 259.', 'refraw': '<?xml version="1.0" encoding="ISO-8859-1" standalone="yes" ?><reference type="journal">\n <ref_citation>Rao A. M., Zhou P., Wang K. A. et al 1993 Sci 259 955</ref_citation>\n <ref_doi>10.1126/science.259.5097.955</ref_doi>\n <ref_authors>Rao A. M., Zhou P., Wang K. A. et al</ref_authors>\n <ref_journal>Sci</ref_journal>\n <ref_volume>259</ref_volume>\n <ref_year>1993</ref_year>\n <ref_start_page>955</ref_start_page>\n</reference>'}]}]
parsed_springer = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'Jaeckel, J, Ringwald, A', 'journal': 'Ann. Rev. Nucl. Part. Sci.', 'title': 'The Low-Energy Frontier of Particle Physics', 'volume': '60', 'page': '405', 'year': '2010', 'doi': '10.1146/annurev.nucl.012809.104433', 'arxiv': '1002.0329', 'refstr': 'Jaeckel, J, Ringwald, A, 2010. Ann. Rev. Nucl. Part. Sci., The Low-Energy Frontier of Particle Physics, 60, 405. doi:10.1146/annurev.nucl.012809.104433 arXiv:1002.0329', 'refraw': '<Citation ID="CR1">\n<CitationNumber>[1]</CitationNumber>\n<BibArticle>\n <BibAuthorName>\n <Initials>J</Initials>\n <FamilyName>Jaeckel</FamilyName>\n </BibAuthorName>\n <BibAuthorName>\n <Initials>A</Initials>\n <FamilyName>Ringwald</FamilyName>\n </BibAuthorName>\n <Year>2010</Year>\n <ArticleTitle Language="En">The Low-Energy Frontier of Particle Physics</ArticleTitle>\n <JournalTitle>Ann. Rev. Nucl. Part. Sci.</JournalTitle>\n <VolumeID>60</VolumeID>\n <FirstPage>405</FirstPage>\n <Occurrence Type="Bibcode">\n <Handle>2010ARNPS..60..405J</Handle>\n </Occurrence>\n <Occurrence Type="DOI">\n <Handle>10.1146/annurev.nucl.012809.104433</Handle>\n </Occurrence>\n <BibComments>[arXiv:1002.0329] [INSPIRE]</BibComments>\n</BibArticle>\n<BibUnstructured>J. Jaeckel and A. Ringwald, The Low-Energy Frontier of Particle Physics, Ann. Rev. Nucl. Part. Sci. 60\n (2010) 405 [\n <ExternalRef>\n <RefSource>arXiv:1002.0329</RefSource>\n <RefTarget Address="https://arxiv.org/abs/1002.0329" TargetType="URL"/>\n </ExternalRef>\n ] [\n <ExternalRef>\n <RefSource>INSPIRE</RefSource>\n <RefTarget Address="https://inspirehep.net/search?p=find+J+%22Ann.Rev.Nucl.Part.Sci.,60,405%22"\n TargetType="URL"/>\n </ExternalRef>\n ].\n</BibUnstructured>\n</Citation>'}, {'year': '2015', 'doi': '10.1017/CBO9781107706620', 'refstr': '2015, 10.1017/CBO9781107706620', 'refraw': '<Citation ID="CR86">\n<CitationNumber>[86]</CitationNumber>\n<BibUnstructured>H. Elvang and Y. Huang, Scattering Amplitudes in Gauge Theory and Gravity, Cambridge University Press,\n (2015), [\n <ExternalRef>\n <RefSource>https://doi.org/10.1017/CBO9781107706620</RefSource>\n <RefTarget Address="10.1017/CBO9781107706620" TargetType="DOI"/>\n </ExternalRef>\n ].\n</BibUnstructured>\n</Citation>'}, {'authors': 'Henneaux, Marc, Teitelboim, Claudio', 'journal': 'Quantum Mechanics of Fundamental Systems 2', 'title': 'Consistent Quantum Mechanics of Chiral p-Forms', 'page': '79', 'year': '1989', 'doi': '10.1007/978-1-4613-0797-6_8', 'refstr': 'Henneaux, Marc, Teitelboim, Claudio, 1989. Quantum Mechanics of Fundamental Systems 2, Consistent Quantum Mechanics of Chiral p-Forms, 79. doi:10.1007/978-1-4613-0797-6_8', 'refraw': '<Citation ID="CR6">\n<CitationNumber>[5]</CitationNumber>\n<BibChapter>\n <BibAuthorName>\n <Initials>Marc</Initials>\n <FamilyName>Henneaux</FamilyName>\n </BibAuthorName>\n <BibAuthorName>\n <Initials>Claudio</Initials>\n <FamilyName>Teitelboim</FamilyName>\n </BibAuthorName>\n <Year>1989</Year>\n <ChapterTitle Language="--">Consistent Quantum Mechanics of Chiral p-Forms</ChapterTitle>\n <BookTitle>Quantum Mechanics of Fundamental Systems 2</BookTitle>\n <PublisherName>Springer US</PublisherName>\n <PublisherLocation>Boston, MA</PublisherLocation>\n <FirstPage>79</FirstPage>\n <LastPage>112</LastPage>\n <Occurrence Type="DOI">\n <Handle>10.1007/978-1-4613-0797-6_8</Handle>\n </Occurrence>\n</BibChapter>\n<BibUnstructured>M. Henneaux and C. Teitelboim, Consistent quantum mechanics of chiral p forms, in 2nd Meeting on\n Quantum Mechanics of Fundamental Systems (CECS) Santiago, Chile, December 17–20, 1987, pp. 79–112.\n</BibUnstructured>\n</Citation>'}, {'authors': 'Elvang, H, Huang, YT', 'title': 'Scattering amplitudes in gauge theory and gravity', 'year': '2015', 'doi': '10.1017/CBO9781107706620', 'arxiv': '1332.81010', 'refstr': 'Elvang, H, Huang, YT, 2015. Scattering amplitudes in gauge theory and gravity. doi:10.1017/CBO9781107706620 arXiv:1332.81010', 'refraw': '<Citation ID="CR6i">\n<CitationNumber>[6]</CitationNumber>\n<BibBook>\n <BibAuthorName>\n <Initials>H</Initials>\n <FamilyName>Elvang</FamilyName>\n </BibAuthorName>\n <BibAuthorName>\n <Initials>YT</Initials>\n <FamilyName>Huang</FamilyName>\n </BibAuthorName>\n <Year>2015</Year>\n <BookTitle>Scattering amplitudes in gauge theory and gravity</BookTitle>\n <PublisherName>Cambridge University Press</PublisherName>\n <PublisherLocation>Cambridge, U.K.</PublisherLocation>\n <Occurrence Type="ZLBID">\n <Handle>1332.81010</Handle>\n </Occurrence>\n <Occurrence Type="DOI">\n <Handle>10.1017/CBO9781107706620</Handle>\n </Occurrence>\n</BibBook>\n<BibUnstructured>H. Elvang and Y.T. Huang, Scattering amplitudes in gauge theory and gravity, Cambridge University\n Press, Cambridge, U.K. (2015).\n</BibUnstructured>\n</Citation>'}, {'authors': 'Nyhus, PJ, Sumianto', 'journal': 'Oryx', 'title': 'Crop-raiding elephants and conservation implications at Way Kambas National Park, Sumatra, Indonesia', 'volume': '34', 'page': '262', 'year': '2000', 'refstr': 'Nyhus, PJ, Sumianto, 2000. Oryx, Crop-raiding elephants and conservation implications at Way Kambas National Park, Sumatra, Indonesia, 34, 262.', 'refraw': '<Citation ID="CR39">\n<BibArticle>\n <BibAuthorName>\n <Initials>PJ</Initials>\n <FamilyName>Nyhus</FamilyName>\n </BibAuthorName>\n <BibAuthorName>\n <Initials>R</Initials>\n <NoFamilyName/>\n </BibAuthorName>\n <BibAuthorName>\n <NoInitials/>\n <FamilyName>Sumianto</FamilyName>\n </BibAuthorName>\n <Year>2000</Year>\n <ArticleTitle Language="En">Crop-raiding elephants and conservation implications at Way Kambas National Park,\n Sumatra, Indonesia\n </ArticleTitle>\n <JournalTitle>Oryx</JournalTitle>\n <VolumeID>34</VolumeID>\n <FirstPage>262</FirstPage>\n <LastPage>274</LastPage>\n</BibArticle>\n<BibUnstructured>Nyhus PJ, Tilson R, Sumianto (2000) Crop-raiding elephants and conservation implications at Way Kambas\n National Park, Sumatra, Indonesia. Oryx 34:262–274\n</BibUnstructured>\n</Citation>'}, {'authors': 'Onur, M, Hegeman, P S, Gok, I M, Kuchuk, F J', 'journal': 'Proc. of the Int. Petroleum Technol. Conf., Doha (Qatar), December 7--9, 2009', 'title': 'A Novel Analysis Procedure for Estimating Thickness- Independent Horizontal and Vertical Permeabilities from Pressure Data at an Observation Probe Acquired by Packer-Probe Wireline Formation Testers', 'year': '2011', 'doi': '10.2118/148403-PA', 'refstr': 'Onur, M, Hegeman, P S, Gok, I M, Kuchuk, F J, 2011. Proc. of the Int. Petroleum Technol. Conf., Doha (Qatar), December 7--9, 2009. doi:10.2118/148403-PA', 'refraw': '<Citation ID="CR5">\n<CitationNumber>5.</CitationNumber>\n<BibChapter>\n <BibAuthorName>\n <Initials>M</Initials>\n <FamilyName>Onur</FamilyName>\n </BibAuthorName>\n <BibAuthorName>\n <Initials>P S</Initials>\n <FamilyName>Hegeman</FamilyName>\n </BibAuthorName>\n <BibAuthorName>\n <Initials>I M</Initials>\n <FamilyName>Gok</FamilyName>\n </BibAuthorName>\n <BibAuthorName>\n <Initials>F J</Initials>\n <FamilyName>Kuchuk</FamilyName>\n </BibAuthorName>\n <Year>2011</Year>\n <ChapterTitle Language="En">A Novel Analysis Procedure for Estimating Thickness- Independent Horizontal and Vertical\n Permeabilities from Pressure Data at an Observation Probe Acquired by Packer-Probe Wireline Formation Testers\n </ChapterTitle>\n <BookTitle>Proc. of the Int. Petroleum Technol. Conf., Doha (Qatar), December 7–9, 2009</BookTitle>\n</BibChapter>\n<BibUnstructured>M. Onur, P. S. Hegeman, I. M. Gok, and F. J. Kuchuk, “A Novel Analysis Procedure for Estimating\n Thickness- Independent Horizontal and Vertical Permeabilities from Pressure Data at an Observation Probe Acquired by\n Packer-Probe Wireline Formation Testers,” in Proc. of the Int. Petroleum Technol. Conf., Doha (Qatar),\n December 7–9, 2009 (Petroleum Eng., 2011);\n <ExternalRef>\n <RefSource>https://doi.org/10.2118/148403-PA</RefSource>\n <RefTarget Address="https://doi.org/10.2118/148403-PA" TargetType="URL"/>\n </ExternalRef>\n .\n</BibUnstructured>\n</Citation>'}, {'year': '2006', 'refplaintext': 'Kervella, P., amp Domiciano de Souza, A. (2006). The polar wind of the fast rotating Be star Achernar. VINCI/VLTI interferometric observations of an elongated polar envelope. Astronomy amp Astrophysics, 453, 1059.', 'refraw': '<Citation ID="CR12">\n<BibUnstructured>Kervella, P., & Domiciano de Souza, A. (2006). The polar wind of the fast rotating Be star\n Achernar. VINCI/VLTI interferometric observations of an elongated polar envelope. Astronomy & Astrophysics, 453,\n 1059.\n</BibUnstructured>\n</Citation>'}]}]
parsed_aps = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'Jensen H. J., Brass A., Berlinsky A. J.', 'journal': 'Phys. Rev. Lett.', 'volume': '60', 'page': '1676', 'year': '1988', 'doi': '10.1103/PhysRevLett.60.1676', 'refstr': 'Jensen H. J., Brass A., Berlinsky A. J., 1988. Phys. Rev. Lett., 60, 1676. doi:10.1103/PhysRevLett.60.1676', 'refraw': '<ref citid="c1">\n <jcite><refauth>H. J. Jensen</refauth>, <refauth>A. Brass</refauth>, and <refauth>A. J. Berlinsky</refauth>, <jtitle>\n Phys. Rev. Lett.\n </jtitle>\n <volume>60</volume>, <pages>1676</pages> (<date>1988</date>).\n <doi>10.1103/PhysRevLett.60.1676</doi>\n </jcite>\n </ref>'}, {'authors': 'Brandt E. H.', 'journal': 'Phys. Rev. B', 'volume': '50', 'page': '4034', 'year': '1994', 'doi': '10.1103/PhysRevB.50.4034', 'refstr': 'Brandt E. H., 1994. Phys. Rev. B, 50, 4034. doi:10.1103/PhysRevB.50.4034', 'refraw': '<ref citid="c31">\n <jcite><refauth>E. H. Brandt</refauth>, <jtitle>Phys. Rev. B</jtitle>\n <volume>50</volume>, <pages>4034</pages> (<date>1994</date>).\n <doi>10.1103/PhysRevB.50.4034</doi>\n </jcite>\n </ref>'}, {'authors': 'Golterman M., Peris S.', 'journal': 'J. High Energy Phys.', 'volume': '2001', 'issue': '01', 'page': '028', 'doi': '10.1088/1126-6708/2001/01/028', 'refstr': 'Golterman M., Peris S., J. High Energy Phys., 2001, 028, 10.1088/1126-6708/2001/01/028', 'issn': '1029-8479', 'refraw': '<ref citid="c36">\n <jcite>\n <refauth>M. Golterman</refauth>\n and <refauth>S. Peris</refauth>,\n <jtitle>J. High Energy Phys.</jtitle>\n <coden>JHEPFG</coden>\n <issn>1029-8479</issn>\n <issue>01</issue> (<volume>2001</volume>) <pages>028</pages>.\n <doi>10.1088/1126-6708/2001/01/028</doi>\n </jcite>\n </ref>'}, {'authors': 'Truong T. N.', 'arxiv': 'arXiv:hep-ph/0102300', 'refstr': 'Truong T. N., arXiv:hep-ph/0102300', 'refraw': '<ref citid="c88">\n <eprint><refauth>T. N. Truong</refauth>, <eprintid>arXiv:hep-ph/0102300</eprintid>.\n </eprint>\n </ref>'}, {'journal': 'Z. Phys. C', 'year': '1991', 'doi': '10.1007/BF01549692', 'refstr': '1991, Z. Phys. C, 10.1007/BF01549692', 'issn': '0170-9739', 'refraw': '<ref citid="c89">\n <jcite>\n <jtitle>Z. Phys. C</jtitle> (<date>1991</date>).\n <coden>ZPCFD2</coden>\n <issn>0170-9739</issn>\n <doi>10.1007/BF01549692</doi>\n </jcite>\n </ref>'}]}]
parsed_nature = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'Campeau, E. et al. ', 'journal': 'PLoS One ', 'title': 'A versatile viral system for expression and depletion of proteins in mammalian cells', 'year': '2009', 'refstr': 'Campeau, E. et al. , 2009. A versatile viral system for expression and depletion of proteins in mammalian cells.', 'refraw': '<reftxt>\n<refau><snm>Campeau</snm>,\n <fnm>E.</fnm>\n</refau>\net al. <atl>A versatile viral system for expression and depletion of proteins in mammalian cells</atl>. <jtl>PLoS One\n</jtl> 4, e6529 (2009)\n</reftxt>'}, {'authors': 'Inaba, M., Buszczack, M., Yamashita, Y. M.', 'journal': 'Nature', 'volume': '523', 'page': '329', 'year': '2015', 'refstr': 'Inaba, M., Buszczack, M., Yamashita, Y. M., 2015. Nature, 523, 329.', 'refraw': '<reftxt>\n<refau><snm>Inaba</snm>,\n <fnm>M.</fnm>\n</refau>\n,\n<refau><snm>Buszczack</snm>,\n <fnm>M.</fnm>\n</refau>\n__amp__amp;amp\n<refau><snm>Yamashita</snm>,\n <fnm>Y. M.</fnm>\n</refau>\n<jtl>Nature</jtl>\n523, 329__amp__#8211;332 (2015).\n</reftxt>'}]}]
parsed_aip = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'Smolenskii, G., Ioffe, V.', 'title': 'Communications de Colloque International de Magnetism de Grenoble', 'year': '1958', 'refstr': 'Smolenskii, G., Ioffe, V., 1958. Communications de Colloque International de Magnetism de Grenoble.', 'refraw': '<ref>\n <biother>\n <biaugrp>\n <biauth>\n <bifname>G. A.</bifname>\n <punct></punct>\n <bilname>Smolenskii</bilname>\n </biauth>\n <punct>and</punct>\n <biauth>\n <bifname>V. A.</bifname>\n <punct></punct>\n <bilname>Ioffe</bilname>\n </biauth>\n </biaugrp>\n <punct>,</punct>\n <emph_1>Communications de Colloque International de Magnetism de Grenoble</emph_1>\n <punct>(</punct>\n <othinfo>France), 2-6 Jullet, Communication No.1</othinfo>\n <punct>(</punct>\n <year>1958</year>\n <punct>).</punct>\n </biother>\n</ref>'}, {'authors': 'Jansen, M., Bak, P.', 'journal': 'Phys. Rev. B', 'volume': '27', 'page': '6853', 'doi': '10.1103/PhysRevB.27.6853', 'refstr': 'Jansen, M., Bak, P., Phys. Rev. B, 27, 6853, 10.1103/PhysRevB.27.6853', 'refraw': '<ref>\n <art>\n <biaugrp>\n <biauth>\n <bifname>M. H.</bifname>\n <punct></punct>\n <bilname>Jansen</bilname>\n </biauth>\n <punct>and</punct>\n <biauth>\n <bifname>P.</bifname>\n <punct></punct>\n <bilname>Bak</bilname>\n </biauth>\n </biaugrp>\n <punct>,</punct>\n <journal>Phys. Rev. B</journal>\n <punct></punct>\n <vol>27</vol>\n <punct>,</punct>\n <pp>6853</pp>\n <punct>.</punct>\n <plink>\n <linkkey_dbkey>prb</linkkey_dbkey>\n <linkkey_coden>PRBMDO</linkkey_coden>\n <linkkey_issn>0163-1829</linkkey_issn>\n <linkkey_doi>10.1103/PhysRevB.27.6853</linkkey_doi>\n </plink>\n </art>\n</ref>'}, {'authors': 'Binder, К., Landau, D.', 'journal': 'Phys. Rev. B', 'volume': '21', 'page': '1941', 'year': '1941', 'refstr': 'Binder, К., Landau, D., 1941. Phys. Rev. B, 21, 1941.', 'refraw': '<ref>\n <art>\n <biaugrp>\n <biauth>\n <bifname>К.</bifname>\n <punct></punct>\n <bilname>Binder</bilname>\n </biauth>\n <punct>and</punct>\n <biauth>\n <bifname>D. P.</bifname>\n <punct></punct>\n <bilname>Landau</bilname>\n </biauth>\n </biaugrp>\n <punct>,</punct>\n <journal>Phys. Rev. B</journal>\n <punct></punct>\n <vol>21</vol>\n <punct>,</punct>\n <pp>1941</pp>\n <punct>.</punct>\n <plink>\n <linkkey_dbkey>prb</linkkey_dbkey>\n <linkkey_coden>PRBMDO</linkkey_coden>\n <linkkey_issn>0163-1829</linkkey_issn>\n </plink>\n </art>\n</ref>'}]}]
parsed_wiley = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'Benkovitz, C. M.', 'journal': 'Atmos. Environ.', 'title': 'Compilation of an inventory of anthropogenic emissions in the United States and Canada', 'volume': '16', 'page': '1551', 'year': '1982', 'refstr': 'Benkovitz, C. M., 1982. Atmos. Environ., Compilation of an inventory of anthropogenic emissions in the United States and Canada, 16, 1551.', 'refraw': '<citation type="journal" xmlid="jgrd3831-cit-0001">\n <author><familyName>Benkovitz</familyName>,\n <givenNames>C. M.</givenNames>\n </author>\n , <articleTitle>Compilation of an inventory of anthropogenic emissions in the United States and\n Canada</articleTitle>, <journalTitle>Atmos. Environ.</journalTitle>, <vol>16</vol>, <pageFirst>\n 1551</pageFirst>-<pageLast>1563</pageLast>, <pubYear year="1982">1982</pubYear>.\n</citation>'}, {'authors': 'Crisp, D., Meadows, V. S., Allen, D. A., Bezard, B., Bergh, C., Maillard, J.-P.', 'journal': 'International Colloquium on Venus', 'title': '; Near-infrared oxygen airglow from the Venus night side', 'year': '1992', 'refstr': 'Crisp, D., Meadows, V. S., Allen, D. A., Bezard, B., Bergh, C., Maillard, J.-P., 1992. ; Near-infrared oxygen airglow from the Venus night side.', 'refraw': '<citation type="book" xmlid="jgre558-cit-0015">\n <author><familyName>Crisp</familyName>,\n <givenNames>D.</givenNames>\n </author>\n ,\n <author>\n <givenNames>V. S.</givenNames>\n <familyName>Meadows</familyName>\n </author>\n ,\n <author>\n <givenNames>D. A.</givenNames>\n <familyName>Allen</familyName>\n </author>\n ,\n <author>\n <givenNames>B.</givenNames>\n <familyName>Bezard</familyName>\n </author>\n ,\n <author>\n <givenNames>C.</givenNames>\n <familyNamePrefix>de</familyNamePrefix>\n <familyName>Bergh</familyName>\n </author>\n ,\n <author>\n <givenNames>J.-P.</givenNames>\n <familyName>Maillard</familyName>\n </author>\n , <chapterTitle>Near-infrared oxygen airglow from the Venus night side</chapterTitle>, <bookTitle>International\n Colloquium on Venus</bookTitle>, <otherTitle>Lunar and Planet. Inst. Cont.</otherTitle>, <vol>789</vol>, <pageFirst>\n 23</pageFirst>-<pageLast>24</pageLast>, <publisherName>Lunar and Planet. Inst.</publisherName>, <publisherLoc>\n Houston</publisherLoc>, <pubYear year="1992">1992</pubYear>.\n</citation>'}, {'authors': 'Radke, L. F.', 'journal': 'Airborne observations of cloud microphysics modified by anthropogenic forcing', 'page': '310', 'year': '1989', 'refstr': 'Radke, L. F., 1989. Airborne observations of cloud microphysics modified by anthropogenic forcing, 310.', 'refraw': '<citation type="other" xmlid="jgrd3831-cit-0018">\n <author><familyName>Radke</familyName>,\n <givenNames>L. F.</givenNames>\n </author>\n , <otherTitle>Airborne observations of cloud microphysics modified by anthropogenic forcing</otherTitle>,\n <otherTitle>Preprints — Symposium on the Role of Clouds in Atmospheric Chemistry and Global\n Climate, Anaheim, CA</otherTitle>, <pageFirst>310</pageFirst>-<pageLast>315</pageLast>Am. Meteorol.\n Soc., <publisherLoc>Boston</publisherLoc>, <pubYear year="1989">1989</pubYear>.\n</citation>'}, {'authors': "Lindaas, J., Pollack, I. B., Calahorrano, J. J., O'Dell, K., Garofalo, L. A., Pothier, M. A.", 'journal': '(in review at journal of Geophysical research: Atmospheres)', 'title': 'Empirical insights into the fate of ammonia in western U.S. wildfire smoke plumes', 'refraw': '<citation type="other" xmlid="jgrd56720-cit-0046">\n <author><familyName>Lindaas</familyName>,\n <givenNames>J.</givenNames>\n </author>,\n <author><familyName>Pollack</familyName>,\n <givenNames>I. B.</givenNames>\n </author>,\n <author><familyName>Calahorrano</familyName>,\n <givenNames>J. J.</givenNames>\n </author>,\n <author><familyName>O\'Dell</familyName>,\n <givenNames>K.</givenNames>\n </author>,\n <author><familyName>Garofalo</familyName>,\n <givenNames>L. A.</givenNames>\n </author>,\n <author><familyName>Pothier</familyName>,\n <givenNames>M. A.</givenNames>\n </author>,\n et al.\n <otherTitle>Empirical insights into the fate of ammonia in western U.S. wildfire smoke plumes</otherTitle>.\n <bookTitle>(in review at journal of Geophysical research: Atmospheres)</bookTitle>.\n</citation>'}]}]
parsed_nlm3 = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'Facebook', 'journal': 'http://www.facebook.com/press/info.php?statistics', 'year': '2012', 'refplaintext': 'Facebook 2012 February', 'refraw': '<ref id="B1">\n<nlm-citation citation-type="web">\n <collab>Facebook</collab>\n <source>\n \n http://www.facebook.com/press/info.php?statistics\n \n </source>\n <year>2012</year>\n <comment>February</comment>\n</nlm-citation>\n</ref>'}, {'authors': 'Brin, S', 'journal': 'Comput Netw', 'title': 'The anatomy of a large-scale hypertextual web search engine', 'volume': '30', 'page': '107', 'year': '1998', 'refstr': 'Brin, S, 1998. Comput Netw, The anatomy of a large-scale hypertextual web search engine, 30, 107.', 'refraw': '<ref id="B4">\n<nlm-citation citation-type="journal">\n <person-group person-group-type="author">\n <name>\n <surname>Brin</surname>\n <given-names>S</given-names>\n </name>\n <name>\n <surname>Page</surname>\n <given-names>L</given-names>\n </name>\n </person-group>\n <article-title>The anatomy of a large-scale hypertextual web search engine</article-title>\n <source>Comput Netw</source>\n <year>1998</year>\n <volume>30</volume>\n <fpage>107</fpage>\n <lpage>117</lpage>\n</nlm-citation>\n</ref>'}, {'authors': 'Energy Information Administration', 'journal': 'Annual Energy Outlook 2011', 'year': '2011', 'refplaintext': 'U.S. Energy Information Administration Annual Energy Outlook 2011 2011 ( www.eia.gov/forecasts/aeo/)', 'refraw': '<ref id="B7">\n<nlm-citation citation-type="other">\n <collab>U.S. Energy Information Administration</collab>\n <source>Annual Energy Outlook 2011</source>\n <year>2011</year>\n <comment>(\n www.eia.gov/forecasts/aeo/)\n </comment>\n</nlm-citation>\n</ref>'}, {'authors': 'Cervero, J. M., Lohe, M. A.', 'journal': 'Exact Monopole Solution and Euclidean Yang–Mills Field', 'volume': '70', 'page': '325', 'year': '1977', 'doi': 'doi:10.1016/0370-2693(77)90669-4', 'refstr': 'Cervero, J. M., Lohe, M. A., 1977. Exact Monopole Solution and Euclidean Yang–Mills Field, 70, 325. doi:doi:10.1016/0370-2693(77)90669-4', 'refraw': '<ref id="r11">\n<label>11</label>\n<mixed-citation publication-type="book">\n <person-group person-group-type="author">\n <string-name name-style="western">\n <given-names>J. M.</given-names>\n <surname>Cervero</surname>\n </string-name>\n </person-group>\n , <source>Exact Monopole Solution and Euclidean Yang–Mills Field</source> (<publisher-name>Harvard\n University</publisher-name>, <year>1977</year>) <comment>preprint HUTP-77/A011</comment>;\n</mixed-citation>\n<mixed-citation publication-type="journal">\n <person-group person-group-type="author">\n <string-name name-style="western">\n <given-names>M. A.</given-names>\n <surname>Lohe</surname>\n </string-name>\n </person-group>\n ,\n <source>Phys. Lett. B</source>\n <volume>70</volume>, <fpage>325</fpage> (<year>1977</year>).\n <pub-id pub-id-type="doi">10.1016/0370-2693(77)90669-4</pub-id>\n</mixed-citation>\n</ref>'}, {'authors': 'Chaikin, PM', 'journal': 'Principles of Condensed Matter Physics', 'year': '2000', 'refplaintext': 'Chaikin PM Lubensky TC Principles of Condensed Matter Physics 2000 Cambridge Univ Press', 'refraw': '<ref id="B1i">\n<nlm-citation citation-type="book">\n <person-group person-group-type="author">\n <name>\n <surname>Chaikin</surname>\n <given-names>PM</given-names>\n </name>\n <name>\n <surname>Lubensky</surname>\n <given-names>TC</given-names>\n </name>\n </person-group>\n <source>Principles of Condensed Matter Physics</source>\n <year>2000</year>\n <publisher-name>Cambridge Univ Press</publisher-name>\n</nlm-citation>\n</ref>'}, {'authors': 'Knollmüller, J., Enßlin, T. A.', 'year': '2019', 'arxiv': '1901.11033', 'refstr': 'Knollmüller, J., Enßlin, T. A., 2019, 1901.11033', 'refraw': '<ref id="R5">\n<mixed-citation publication-type="other">\n <string-name><surname>Knollmüller</surname>,\n <given-names>J.</given-names>\n </string-name>\n , &\n <string-name><surname>Enßlin</surname>,\n <given-names>T. A.</given-names>\n </string-name>\n <year>2019</year>, ArXiv preprint [\n arXiv:1901.11033]\n</mixed-citation>\n</ref>'}, {'authors': 'Meija, J., Coplen, T.B., Berglund, M., Brand, W.A., Bièvre, P.D., Gröning, M.', 'journal': 'Pure Appl. Chem.', 'title': 'Atomic weights of the elements 2013', 'volume': '88', 'page': '265', 'year': '2016', 'refstr': 'Meija, J., Coplen, T.B., Berglund, M., Brand, W.A., Bièvre, P.D., Gröning, M., 2016. Pure Appl. Chem., Atomic weights of the elements 2013, 88, 265.', 'refraw': '<ref id="R10">\n<label>10</label>\n<mixed-citation publication-type="journal">\n <string-name>\n <given-names>J.</given-names>\n <surname>Meija</surname>\n </string-name>\n ,\n <string-name>\n <given-names>T.B.</given-names>\n <surname>Coplen</surname>\n </string-name>\n ,\n <string-name>\n <given-names>M.</given-names>\n <surname>Berglund</surname>\n </string-name>\n ,\n <string-name>\n <given-names>W.A.</given-names>\n <surname>Brand</surname>\n </string-name>\n ,\n <string-name>\n <given-names>P.D.</given-names>\n <surname>Bièvre</surname>\n </string-name>\n ,\n <string-name>\n <given-names>M.</given-names>\n <surname>Gröning</surname>\n </string-name>\n et al., <article-title>Atomic weights of the elements 2013</article-title>, <source>Pure Appl. Chem.</source>\n <volume>88</volume>, <fpage>265</fpage> (<year>2016</year>)&#13;\n</mixed-citation>\n</ref>'}]}]
parsed_agu = [{'bibcode': '2020TEST..........R', 'references': [{'authors': 'Bouwman', 'journal': 'Global Biogeochem. Cycles', 'volume': '7', 'page': '557', 'year': '1993', 'refstr': 'Bouwman, 1993. Global Biogeochem. Cycles, 7, 557.', 'refraw': '<citation id="bouw93">\n <journal_title>Global Biogeochem. Cycles</journal_title>\n <first_author>Bouwman</first_author>\n <reftitle>Global analysis of the potential for N<sub>2</sub>O production in natural soils</reftitle>\n <volume>7</volume>\n <firstPage>557</firstPage>\n <CitationNumber>null</CitationNumber>\n <year>1993</year>\n <partOfCode>journal</partOfCode>\n <paperType>article</paperType>\n</citation>'}, {'authors': 'Scheffer', 'journal': 'Lehrbuch der Bodenkunde', 'year': '1992', 'refplaintext': 'Scheffer, Lehrbuch der Bodenkunde, 1992, standalone, book', 'refraw': '<citation id="sche92">\n <journal_title>null</journal_title>\n <first_author>Scheffer</first_author>\n <reftitle>Lehrbuch der Bodenkunde</reftitle>\n <firstPage>null</firstPage>\n <CitationNumber>null</CitationNumber>\n <year>1992</year>\n <partOfCode>standalone</partOfCode>\n <paperType>book</paperType>\n</citation>'}, {'authors': 'Yu', 'journal': 'J. Geophys. Res. D', 'volume': '113', 'page': 'DD14S12', 'year': '2008', 'doi': '10.1029/2007JD009349', 'refstr': 'Yu, 2008. J. Geophys. Res. D, 113, DD14S12. doi:10.1029/2007JD009349', 'refraw': '<citation id="yu08">\n <journal_title>J. Geophys. Res.</journal_title>\n <first_author>Yu</first_author>\n <reftitle>A satellite-based assessment of transpacific transport of pollution aerosol</reftitle>\n <volume>113</volume>\n <firstPage>null</firstPage>\n <CitationNumber>D14S12</CitationNumber>\n <year>2008</year>\n <DOI>10.1029/2007JD009349</DOI>\n <partOfCode>journal</partOfCode>\n <paperType>article</paperType>\n</citation>'}, {'authors': 'Tjernstrom', 'journal': 'J. Geophys Res. D', 'volume': '101', 'issue': 'D14', 'year': '1996', 'refstr': 'Tjernstrom, 1996. J. Geophys Res. D, 101.', 'refraw': '<citation id="tjer">\n <journal_title>J. Geophys Res.</journal_title>\n <first_author>Tjernström</first_author>\n <reftitle>Thermal mesoscale circulations on the Baltic coast, Part 1, A numerical case study</reftitle>\n <volume>101</volume>\n <issue>D14</issue>\n <firstPage>null</firstPage>\n <CitationNumber>null</CitationNumber>\n <year>1996</year>\n <partOfCode>journal</partOfCode>\n <paperType>article</paperType>\n</citation>'}, {'doi': '10.5194/acp-8-6117-2008', 'refstr': '10.5194/acp-8-6117-2008', 'refraw': '<citation id="zhan08">\n <DOI>10.5194/acp-8-6117-2008</DOI>\n <partOfCode>journal</partOfCode>\n <paperType>article</paperType>\n</citation>'}]}]
| 3,060.458333
| 20,810
| 0.607698
|
ca0b4a4161064461096685786242a5610cac0b2a
| 111
|
py
|
Python
|
neo_db/config.py
|
liupuchun/KGQA-of-HongLouMeng
|
2d2a1192f7d2850fb306dbb948177370140a652d
|
[
"MIT"
] | 1
|
2020-06-03T08:07:37.000Z
|
2020-06-03T08:07:37.000Z
|
neo_db/config.py
|
liupuchun/KGQA-of-HongLouMeng
|
2d2a1192f7d2850fb306dbb948177370140a652d
|
[
"MIT"
] | null | null | null |
neo_db/config.py
|
liupuchun/KGQA-of-HongLouMeng
|
2d2a1192f7d2850fb306dbb948177370140a652d
|
[
"MIT"
] | null | null | null |
from py2neo import Graph
graph = Graph(
"http://localhost:7474",
username="neo4j",
password="hlm"
)
| 18.5
| 28
| 0.648649
|
da79a26ccdb0db3e03f38bdaf8cc89dc05bb00c4
| 11,264
|
py
|
Python
|
resources/WPy32/python-3.10.2/Lib/site-packages/Cryptodome/Cipher/_mode_cbc.py
|
eladkarako/yt-dlp_kit
|
6365651111ef4d2f94335cf38bf4d9b0136d42d2
|
[
"Unlicense"
] | 1
|
2022-03-26T15:43:50.000Z
|
2022-03-26T15:43:50.000Z
|
resources/WPy32/python-3.10.2/Lib/site-packages/Cryptodome/Cipher/_mode_cbc.py
|
eladkarako/yt-dlp_kit
|
6365651111ef4d2f94335cf38bf4d9b0136d42d2
|
[
"Unlicense"
] | null | null | null |
resources/WPy32/python-3.10.2/Lib/site-packages/Cryptodome/Cipher/_mode_cbc.py
|
eladkarako/yt-dlp_kit
|
6365651111ef4d2f94335cf38bf4d9b0136d42d2
|
[
"Unlicense"
] | 1
|
2022-03-28T19:28:45.000Z
|
2022-03-28T19:28:45.000Z
|
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""
Ciphertext Block Chaining (CBC) mode.
"""
__all__ = ['CbcMode']
from Cryptodome.Util.py3compat import _copy_bytes
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer,
create_string_buffer, get_raw_buffer,
SmartPointer, c_size_t, c_uint8_ptr,
is_writeable_buffer)
from Cryptodome.Random import get_random_bytes
raw_cbc_lib = load_pycryptodome_raw_lib("Cryptodome.Cipher._raw_cbc", """
int CBC_start_operation(void *cipher,
const uint8_t iv[],
size_t iv_len,
void **pResult);
int CBC_encrypt(void *cbcState,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CBC_decrypt(void *cbcState,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CBC_stop_operation(void *state);
"""
)
class CbcMode(object):
"""*Cipher-Block Chaining (CBC)*.
Each of the ciphertext blocks depends on the current
and all previous plaintext blocks.
An Initialization Vector (*IV*) is required.
See `NIST SP800-38A`_ , Section 6.2 .
.. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
:undocumented: __init__
"""
def __init__(self, block_cipher, iv):
"""Create a new block cipher, configured in CBC mode.
:Parameters:
block_cipher : C pointer
A smart pointer to the low-level block cipher instance.
iv : bytes/bytearray/memoryview
The initialization vector to use for encryption or decryption.
It is as long as the cipher block.
**The IV must be unpredictable**. Ideally it is picked randomly.
Reusing the *IV* for encryptions performed with the same key
compromises confidentiality.
"""
self._state = VoidPointer()
result = raw_cbc_lib.CBC_start_operation(block_cipher.get(),
c_uint8_ptr(iv),
c_size_t(len(iv)),
self._state.address_of())
if result:
raise ValueError("Error %d while instantiating the CBC mode"
% result)
# Ensure that object disposal of this Python object will (eventually)
# free the memory allocated by the raw library for the cipher mode
self._state = SmartPointer(self._state.get(),
raw_cbc_lib.CBC_stop_operation)
# Memory allocated for the underlying block cipher is now owed
# by the cipher mode
block_cipher.release()
self.block_size = len(iv)
"""The block size of the underlying cipher, in bytes."""
self.iv = _copy_bytes(None, None, iv)
"""The Initialization Vector originally used to create the object.
The value does not change."""
self.IV = self.iv
"""Alias for `iv`"""
self._next = [ self.encrypt, self.decrypt ]
def encrypt(self, plaintext, output=None):
"""Encrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have encrypted a message
you cannot encrypt (or decrypt) another message using the same
object.
The data to encrypt can be broken up in two or
more pieces and `encrypt` can be called multiple times.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is equivalent to:
>>> c.encrypt(a+b)
That also means that you cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not add any padding to the plaintext.
:Parameters:
plaintext : bytes/bytearray/memoryview
The piece of data to encrypt.
Its lenght must be multiple of the cipher block size.
:Keywords:
output : bytearray/memoryview
The location where the ciphertext must be written to.
If ``None``, the ciphertext is returned.
:Return:
If ``output`` is ``None``, the ciphertext is returned as ``bytes``.
Otherwise, ``None``.
"""
if self.encrypt not in self._next:
raise TypeError("encrypt() cannot be called after decrypt()")
self._next = [ self.encrypt ]
if output is None:
ciphertext = create_string_buffer(len(plaintext))
else:
ciphertext = output
if not is_writeable_buffer(output):
raise TypeError("output must be a bytearray or a writeable memoryview")
if len(plaintext) != len(output):
raise ValueError("output must have the same length as the input"
" (%d bytes)" % len(plaintext))
result = raw_cbc_lib.CBC_encrypt(self._state.get(),
c_uint8_ptr(plaintext),
c_uint8_ptr(ciphertext),
c_size_t(len(plaintext)))
if result:
if result == 3:
raise ValueError("Data must be padded to %d byte boundary in CBC mode" % self.block_size)
raise ValueError("Error %d while encrypting in CBC mode" % result)
if output is None:
return get_raw_buffer(ciphertext)
else:
return None
def decrypt(self, ciphertext, output=None):
"""Decrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have decrypted a message
you cannot decrypt (or encrypt) another message with the same
object.
The data to decrypt can be broken up in two or
more pieces and `decrypt` can be called multiple times.
That is, the statement:
>>> c.decrypt(a) + c.decrypt(b)
is equivalent to:
>>> c.decrypt(a+b)
This function does not remove any padding from the plaintext.
:Parameters:
ciphertext : bytes/bytearray/memoryview
The piece of data to decrypt.
Its length must be multiple of the cipher block size.
:Keywords:
output : bytearray/memoryview
The location where the plaintext must be written to.
If ``None``, the plaintext is returned.
:Return:
If ``output`` is ``None``, the plaintext is returned as ``bytes``.
Otherwise, ``None``.
"""
if self.decrypt not in self._next:
raise TypeError("decrypt() cannot be called after encrypt()")
self._next = [ self.decrypt ]
if output is None:
plaintext = create_string_buffer(len(ciphertext))
else:
plaintext = output
if not is_writeable_buffer(output):
raise TypeError("output must be a bytearray or a writeable memoryview")
if len(ciphertext) != len(output):
raise ValueError("output must have the same length as the input"
" (%d bytes)" % len(plaintext))
result = raw_cbc_lib.CBC_decrypt(self._state.get(),
c_uint8_ptr(ciphertext),
c_uint8_ptr(plaintext),
c_size_t(len(ciphertext)))
if result:
if result == 3:
raise ValueError("Data must be padded to %d byte boundary in CBC mode" % self.block_size)
raise ValueError("Error %d while decrypting in CBC mode" % result)
if output is None:
return get_raw_buffer(plaintext)
else:
return None
def _create_cbc_cipher(factory, **kwargs):
"""Instantiate a cipher object that performs CBC encryption/decryption.
:Parameters:
factory : module
The underlying block cipher, a module from ``Cryptodome.Cipher``.
:Keywords:
iv : bytes/bytearray/memoryview
The IV to use for CBC.
IV : bytes/bytearray/memoryview
Alias for ``iv``.
Any other keyword will be passed to the underlying block cipher.
See the relevant documentation for details (at least ``key`` will need
to be present).
"""
cipher_state = factory._create_base_cipher(kwargs)
iv = kwargs.pop("IV", None)
IV = kwargs.pop("iv", None)
if (None, None) == (iv, IV):
iv = get_random_bytes(factory.block_size)
if iv is not None:
if IV is not None:
raise TypeError("You must either use 'iv' or 'IV', not both")
else:
iv = IV
if len(iv) != factory.block_size:
raise ValueError("Incorrect IV length (it must be %d bytes long)" %
factory.block_size)
if kwargs:
raise TypeError("Unknown parameters for CBC: %s" % str(kwargs))
return CbcMode(cipher_state, iv)
| 38.312925
| 106
| 0.566229
|
8be59dca3d699154a7140412e5d27ae3ac1dfa72
| 3,644
|
py
|
Python
|
Lesson12/QuizSix.py
|
SamRicha/CS101
|
9b6c1334293221a5c7a68f450aa069a19b50d08d
|
[
"MIT"
] | null | null | null |
Lesson12/QuizSix.py
|
SamRicha/CS101
|
9b6c1334293221a5c7a68f450aa069a19b50d08d
|
[
"MIT"
] | null | null | null |
Lesson12/QuizSix.py
|
SamRicha/CS101
|
9b6c1334293221a5c7a68f450aa069a19b50d08d
|
[
"MIT"
] | null | null | null |
# The web crawler we built at the end of Unit 3 has some serious
# flaws if we were going to use it in a real crawler. One
# problem is if we start with a good seed page, it might
# run for an extremely long time (even forever, since the
# number of URLS on the web is not actually finite). This
# question and the following one explore two different ways
# to limit the pages that it can crawl.
# Modify the crawl_web procedure to take a second parameter,
# max_pages, that limits the number of pages to crawl.
# Your procedure should terminate the crawl after
# max_pages different pages have been crawled, or when
# there are no more pages to crawl.
# The following definition of get_page provides an interface
# to the website found at http://www.udacity.com/cs101x/index.html
# The function output order does not affect grading.
def get_page(url):
try:
if url == "http://www.udacity.com/cs101x/index.html":
return ('<html> <body> This is a test page for learning to crawl! '
'<p> It is a good idea to '
'<a href="http://www.udacity.com/cs101x/crawling.html">learn to '
'crawl</a> before you try to '
'<a href="http://www.udacity.com/cs101x/walking.html">walk</a> '
'or <a href="http://www.udacity.com/cs101x/flying.html">fly</a>. '
'</p> </body> </html> ')
elif url == "http://www.udacity.com/cs101x/crawling.html":
return ('<html> <body> I have not learned to crawl yet, but I '
'am quite good at '
'<a href="http://www.udacity.com/cs101x/kicking.html">kicking</a>.'
'</body> </html>')
elif url == "http://www.udacity.com/cs101x/walking.html":
return ('<html> <body> I cant get enough '
'<a href="http://www.udacity.com/cs101x/index.html">crawling</a>! '
'</body> </html>')
elif url == "http://www.udacity.com/cs101x/flying.html":
return ('<html> <body> The magic words are Squeamish Ossifrage! '
'</body> </html>')
except:
return ""
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def union(p,q):
for e in q:
if e not in p:
p.append(e)
def get_all_links(page):
links = []
while True:
url,endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def crawl_web(seed, max_pages):
tocrawl = [seed]
crawled = []
i = 0
while tocrawl and i < max_pages:
page = tocrawl.pop()
if page not in crawled:
union(tocrawl, get_all_links(get_page(page)))
crawled.append(page)
i += 1
return crawled
print crawl_web("http://www.udacity.com/cs101x/index.html",1)
#>>> ['http://www.udacity.com/cs101x/index.html']
print crawl_web("http://www.udacity.com/cs101x/index.html",3)
#>>> ['http://www.udacity.com/cs101x/index.html',
#>>> 'http://www.udacity.com/cs101x/flying.html',
#>>> 'http://www.udacity.com/cs101x/walking.html']
print crawl_web("http://www.udacity.com/cs101x/index.html",500)
#>>> ['http://www.udacity.com/cs101x/index.html',
#>>> 'http://www.udacity.com/cs101x/flying.html',
#>>> 'http://www.udacity.com/cs101x/walking.html',
#>>> 'http://www.udacity.com/cs101x/crawling.html',
#>>> 'http://www.udacity.com/cs101x/kicking.html']
| 37.56701
| 79
| 0.617453
|
8c68b2aa762a633f466ac1b7fc44abaffc028139
| 4,877
|
py
|
Python
|
MessagePayload.py
|
aws-samples/aws-connected-mobility-solution-telemetry-device-demo
|
eea82db46bd073198b68e776c7795aeef0034fe9
|
[
"MIT-0"
] | 2
|
2021-02-05T19:16:01.000Z
|
2021-12-02T19:54:29.000Z
|
MessagePayload.py
|
aws-samples/aws-connected-mobility-solution-telemetry-device-demo
|
eea82db46bd073198b68e776c7795aeef0034fe9
|
[
"MIT-0"
] | null | null | null |
MessagePayload.py
|
aws-samples/aws-connected-mobility-solution-telemetry-device-demo
|
eea82db46bd073198b68e776c7795aeef0034fe9
|
[
"MIT-0"
] | 2
|
2021-02-24T17:10:53.000Z
|
2021-06-10T19:00:35.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# MessagePayload
#
# Takes dict of keys/vals and makes a formatted payload message.
# Implemented as factory pattern that allows for variations in message formatting.
#
from abc import ABC, abstractmethod
import ast
from dict_recursive_update import recursive_update
import json
class MessagePayload(ABC):
# pass array of keys to remove from message BEFORE or AFTER formatting
# allows for subclasses to use data and then remove it
#
# Typically, the caller will only supply preDropKeys if any and
# subclasses would set the postDropKeys as needed.
#
def __init__(self, d, config={'preDropKeys':[], 'postDropKeys':[]}) -> None:
self.payload = {}
self.preDropKeys = config.get('preDropKeys')
if self.preDropKeys is None:
self.preDropKeys = []
self.preDropKeys.append('')
self.postDropKeys = config.get('postDropKeys')
if self.postDropKeys is None:
self.postDropKeys = []
self._prepare_message(d)
def _prepare_message(self, d):
[ d.pop(k) for k in (set(self.preDropKeys) & set(d.keys())) ]
self.payload = d.copy()
self.make_message(d)
[ self.payload.pop(k) for k in (set(self.postDropKeys) & set(self.payload.keys())) ]
def message(self, formatter=None):
return self.payload if formatter == None else formatter(self.payload)
@abstractmethod
def make_message(self, d):
raise NotImplementedError("MessagePayload must be subclassed with an implementation of #prepare_message")
# SimpleLabelled Strategy just returns the dict
# the dict is assumed to be structured with 'key': value
# so no changes.
class SimpleLabelledPayload(MessagePayload):
def make_message(self, d):
# self.payload = d.copy()
pass
# DotLabelledPayload Strategy will expand any property labels with dots .. e.g. "a.b"
# into a: { b }
class DotLabelledPayload(MessagePayload):
def dot_expand(self, k, v):
try:
v = ast.literal_eval(v)
except Exception as e:
pass
if len(k) == 0:
return v
keys = k.split('.')
key = keys.pop(0)
if len(keys) == 0:
return { key: v }
return { key: self.dot_expand(".".join(keys), v) }
def make_message(self, d):
self.payload = {}
[ recursive_update(self.payload, self.dot_expand(key, d[key])) for key in d ]
# DynamicLabelledPayload takes apart the dict and builds the payload
# the dict is of the format 'name': metric, 'value': reading
# and will be reformatted to 'metric': reading
#
class DynamicLabelledPayload(MessagePayload):
def __init__(self, d, config={'metricKey':'status', 'readingKey':'value', 'value_transform_function': float}) -> None:
self.metricKey = config.get('metricKey', 'status')
self.readingKey = config.get('readingKey', 'value')
self.transform = config.get('value_transform_function', float)
pdk = config.get('postDropKeys', [])
pdk.extend([self.metricKey, self.readingKey])
config['postDropKeys'] = pdk
super().__init__(d, config)
def make_message(self, d):
try:
self.payload[d[self.metricKey]] = self.transform(d[self.readingKey])
except Exception as e:
print("key or value didn't exist")
# UntimedDynamicLabelledPayload removes the timestamp from the payload
#
class UntimedDynamicLabelledPayload(DynamicLabelledPayload):
def __init__(self, d, config={'metricKey':'status', 'readingKey':'value', 'time_col_name': 'timestamp'}) -> None:
self.time_col_name = config.get('time_col_name', 'timestamp')
pdk = config.get('postDropKeys', [])
pdk.extend([self.time_col_name])
config['postDropKeys'] = pdk
super().__init__(d, config)
| 39.97541
| 122
| 0.672955
|
8c5f312924e77641554de61a638a7de3bf314c42
| 547
|
py
|
Python
|
Iniciante/Python/1183 - Acima da Diagonal Principal.py
|
gtozeti/URI
|
e776172fd5605e9d6e641bbd59859680fecd2400
|
[
"MIT"
] | 2
|
2021-02-19T01:18:10.000Z
|
2021-02-19T01:18:27.000Z
|
Iniciante/Python/1183 - Acima da Diagonal Principal.py
|
gtozeti/URI
|
e776172fd5605e9d6e641bbd59859680fecd2400
|
[
"MIT"
] | null | null | null |
Iniciante/Python/1183 - Acima da Diagonal Principal.py
|
gtozeti/URI
|
e776172fd5605e9d6e641bbd59859680fecd2400
|
[
"MIT"
] | null | null | null |
operacao = input()
matriz = []
soma = 0
media = 0
total = 0
for i in range(12):
linha = []
for j in range(12):
linha.append(float(input()))
matriz.append(linha)
if operacao == "S":
for x in range(11):
for y in range(1,12):
if y > x:
soma += float(matriz[x][y])
print("%.1f"%soma)
else:
for x in range(11):
for y in range(1, 12):
if y > x:
media += float(matriz[x][y])
total += 1
media = media/total
print("%.1f"%media)
| 20.259259
| 44
| 0.477148
|
429257ab086ecc6ec5a41665446f00be5c2ba1f7
| 1,278
|
py
|
Python
|
peeringdb_server/migrations/0058_deskpro_cc.py
|
CyberFlameGO/peeringdb
|
83461deb58805fe1d83c5ff4276fa3bcb59a7323
|
[
"BSD-2-Clause"
] | 224
|
2016-10-13T10:32:33.000Z
|
2022-03-23T13:08:48.000Z
|
peeringdb_server/migrations/0058_deskpro_cc.py
|
CyberFlameGO/peeringdb
|
83461deb58805fe1d83c5ff4276fa3bcb59a7323
|
[
"BSD-2-Clause"
] | 1,063
|
2016-06-07T02:57:11.000Z
|
2022-03-31T00:08:07.000Z
|
peeringdb_server/migrations/0058_deskpro_cc.py
|
CyberFlameGO/peeringdb
|
83461deb58805fe1d83c5ff4276fa3bcb59a7323
|
[
"BSD-2-Clause"
] | 92
|
2016-10-22T14:59:40.000Z
|
2022-03-26T11:30:12.000Z
|
# Generated by Django 2.2.17 on 2020-11-20 15:40
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0057_add_suite_and_floor"),
]
operations = [
migrations.CreateModel(
name="DeskProTicketCC",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("email", models.EmailField(max_length=254)),
(
"ticket",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="cc_set",
to="peeringdb_server.DeskProTicket",
),
),
],
options={
"verbose_name": "DeskPRO Ticket CC Contact",
"verbose_name_plural": "Deskpro Ticket CC Contacts",
"unique_together": {("ticket", "email")},
},
),
]
| 29.72093
| 68
| 0.442879
|
a93f3d3cb711af50f6e8881fb948583a06d73126
| 16,448
|
py
|
Python
|
pytomorrowio/pytomorrowio.py
|
raman325/pytomorrowio
|
2a698c3fd5686067bd8b97fd83cba6f8b125e00d
|
[
"MIT"
] | null | null | null |
pytomorrowio/pytomorrowio.py
|
raman325/pytomorrowio
|
2a698c3fd5686067bd8b97fd83cba6f8b125e00d
|
[
"MIT"
] | null | null | null |
pytomorrowio/pytomorrowio.py
|
raman325/pytomorrowio
|
2a698c3fd5686067bd8b97fd83cba6f8b125e00d
|
[
"MIT"
] | null | null | null |
"""Main module."""
import asyncio
from datetime import datetime, timedelta, timezone
import json
import logging
from typing import Any, Dict, List, Optional, Union
from aiohttp import ClientConnectionError, ClientSession
from .const import (
BASE_URL_V4,
CURRENT,
DAILY,
FIELDS_V4,
FORECASTS,
HEADERS,
HOURLY,
NOWCAST,
TIMESTEP_DAILY,
TIMESTEP_HOURLY,
)
from .exceptions import (
CantConnectException,
InvalidAPIKeyException,
InvalidTimestep,
MalformedRequestException,
RateLimitedException,
UnknownException,
)
from .helpers import async_to_sync
_LOGGER = logging.getLogger(__name__)
def process_v4_fields(fields: List[str], timestep: str) -> str:
"""
Filter v4 field list to only include valid fields for a given endpoint.
Logs a warning when fields get filtered out.
"""
valid_fields = [field for field in fields if field in FIELDS_V4]
if len(valid_fields) < len(fields):
_LOGGER.warning(
"Removed invalid fields: %s", list(set(fields) - set(valid_fields))
)
if timestep == timedelta(days=1):
processed_fields = [
field for field in valid_fields if FIELDS_V4[field]["timestep"][1] == 360
]
elif timestep == timedelta(hours=1):
processed_fields = [
field for field in valid_fields if FIELDS_V4[field]["timestep"][1] >= 108
]
elif timestep in (
timedelta(minutes=30),
timedelta(minutes=15),
timedelta(minutes=5),
timedelta(minutes=1),
):
processed_fields = [
field for field in valid_fields if FIELDS_V4[field]["timestep"][1] >= 6
]
elif timestep == timedelta(0):
processed_fields = [
field
for field in valid_fields
if FIELDS_V4[field]["timestep"][0] <= 0
and FIELDS_V4[field]["timestep"][1] >= 0
]
elif timestep < timedelta(0):
processed_fields = [
field for field in valid_fields if FIELDS_V4[field]["timestep"][0] < 0
]
else:
raise InvalidTimestep
if len(processed_fields) < len(valid_fields):
_LOGGER.warning(
"Remove fields not available for `%s` timestep: %s",
timestep,
list(set(valid_fields) - set(processed_fields)),
)
return processed_fields
def dt_to_utc(input_dt: datetime) -> datetime:
"""If input datetime has a timezone defined, convert to UTC."""
if input_dt and input_dt.tzinfo:
return input_dt.astimezone(timezone.utc)
return input_dt
class TomorrowioV4:
"""Async class to query the Tomorrow.io v4 API."""
def __init__(
self,
apikey: str,
latitude: Union[int, float, str],
longitude: Union[int, float, str],
unit_system: str = "imperial",
session: ClientSession = None,
) -> None:
"""Initialize Tomorrow.io API object."""
if unit_system.lower() not in ("metric", "imperial", "si", "us"):
raise ValueError("`unit_system` must be `metric` or `imperial`")
elif unit_system.lower() == "si":
unit_system = "metric"
elif unit_system.lower() == "us":
unit_system = "imperial"
self._apikey = apikey
self.location = [float(latitude), float(longitude)]
self.unit_system = unit_system.lower()
self._session = session
self._params = {
"location": self.location,
"units": self.unit_system,
}
self._headers = {**HEADERS, "apikey": self._apikey}
@staticmethod
def convert_fields_to_measurements(fields: List[str]) -> List[str]:
"""Converts general field list into fields with measurements."""
field_list = []
for field in fields:
measurements = FIELDS_V4[field]["measurements"]
if len(measurements) < 2:
field_list.append(field)
else:
field_list.extend(
[f"{field}{measurement}" for measurement in measurements]
)
return field_list
@staticmethod
def available_fields(
timestep: timedelta, types: Optional[List] = None
) -> List[str]:
"Return available fields for a given timestep."
if timestep == timedelta(days=1):
fields = [
field for field in FIELDS_V4 if FIELDS_V4[field]["timestep"][1] == 360
]
elif timestep == timedelta(hours=1):
fields = [
field for field in FIELDS_V4 if FIELDS_V4[field]["timestep"][1] >= 108
]
elif timestep in (
timedelta(minutes=30),
timedelta(minutes=15),
timedelta(minutes=5),
timedelta(minutes=1),
):
fields = [
field for field in FIELDS_V4 if FIELDS_V4[field]["timestep"][1] >= 6
]
elif timestep in (timedelta(0), CURRENT):
fields = [
field
for field in FIELDS_V4
if FIELDS_V4[field][0] <= 0 and FIELDS_V4[field]["timestep"][1] >= 0
]
elif timestep < timedelta(0):
fields = [
field for field in FIELDS_V4 if FIELDS_V4[field]["timestep"][0] < 0
]
else:
raise InvalidTimestep
if types:
return [field for field in fields if FIELDS_V4[field]["type"] in types]
return fields
async def _call_api(self, params: Dict[str, Any]) -> Dict[str, Any]:
"""Call Tomorrow.io API."""
try:
if self._session:
resp = await self._session.post(
BASE_URL_V4,
headers=self._headers,
data=json.dumps({**self._params, **params}),
)
resp_json = await resp.json()
if resp.status == 200:
return resp_json
if resp.status == 400:
raise MalformedRequestException(resp_json, resp.headers)
elif resp.status in (401, 403):
raise InvalidAPIKeyException(resp_json, resp.headers)
elif resp.status == 429:
raise RateLimitedException(resp_json, resp.headers)
else:
raise UnknownException(resp_json, resp.headers)
async with ClientSession() as session:
resp = await session.post(
BASE_URL_V4,
headers=self._headers,
data=json.dumps({**self._params, **params}),
)
resp_json = await resp.json()
if resp.status == 200:
return resp_json
if resp.status == 400:
raise MalformedRequestException(resp_json, resp.headers)
elif resp.status in (401, 403):
raise InvalidAPIKeyException(resp_json, resp.headers)
elif resp.status == 429:
raise RateLimitedException(resp_json, resp.headers)
else:
raise UnknownException(resp_json, resp.headers)
except ClientConnectionError:
raise CantConnectException()
async def realtime(self, fields: List[str]) -> Dict[str, Any]:
"""Return realtime weather conditions from Tomorrow.io API."""
return await self._call_api(
{
"fields": process_v4_fields(fields, timedelta(0)),
"timesteps": ["current"],
}
)
async def _forecast(
self,
timestep: timedelta,
fields: List[str],
start_time: Optional[datetime] = None,
duration: Optional[timedelta] = None,
**kwargs,
) -> List[Dict[str, Any]]:
"""Return forecast data from Tomorrow.io's API for a given time period."""
params = {
"fields": self.convert_fields_to_measurements(
process_v4_fields(fields, timestep)
),
**kwargs,
}
if timestep == timedelta(days=1):
params["timestep"] = [TIMESTEP_DAILY]
elif timestep == timedelta(hours=1):
params["timestep"] = [TIMESTEP_HOURLY]
elif timestep in (
timedelta(minutes=30),
timedelta(minutes=15),
timedelta(minutes=5),
timedelta(minutes=1),
):
params["timestep"] = [f"{int(timestep.total_seconds()/60)}m"]
else:
raise InvalidTimestep
if start_time:
if not start_time.tzinfo:
start_time.replace(tzinfo=timezone.utc)
params["startTime"] = f"{start_time.replace(microsecond=0).isoformat()}"
else:
start_time = datetime.utcnow().replace(tzinfo=timezone.utc)
if duration:
end_time = (start_time + duration).replace(microsecond=0)
params["endTime"] = f"{end_time.isoformat()}"
return await self._call_api(params)
async def forecast_nowcast(
self,
fields: List[str],
start_time: Optional[datetime] = None,
duration: Optional[timedelta] = None,
timestep: int = 5,
) -> Dict[str, Any]:
"""Return forecast data from Tomorrow.io's NowCast API for a given time period."""
if timestep not in (1, 5, 15, 30):
raise InvalidTimestep
return await self._forecast(
timedelta(minutes=timestep),
fields,
start_time=start_time,
duration=duration,
)
async def forecast_daily(
self,
fields: List[str],
start_time: Optional[datetime] = None,
duration: Optional[timedelta] = None,
) -> Dict[str, Any]:
"""Return daily forecast data from Tomorrow.io's API for a given time period."""
return await self._forecast(
timedelta(days=1), fields, start_time=start_time, duration=duration
)
async def forecast_hourly(
self,
fields: List[str],
start_time: Optional[datetime] = None,
duration: Optional[timedelta] = None,
) -> Dict[str, Any]:
"""Return hourly forecast data from Tomorrow.io's API for a given time period."""
return await self._forecast(
timedelta(hours=1), fields, start_time=start_time, duration=duration
)
async def realtime_and_all_forecasts(
self,
realtime_fields: List[str],
forecast_or_nowcast_fields: List[str],
hourly_fields: List[str] = None,
daily_fields: List[str] = None,
nowcast_timestep: int = 5,
) -> Dict[str, Any]:
"""
Return realtime weather and all forecasts.
If `hourly_fields` and `daily_fields` are not provided,
`forecast_or_nowcast_fields` will be used to get nowcast, hourly, and daily
data.
"""
ret_data = {}
data = await self._call_api(
{
"timesteps": ["current"],
"fields": realtime_fields,
}
)
if (
"data" in data
and "timelines" in data["data"]
and "intervals" in data["data"]["timelines"][0]
and "values" in data["data"]["timelines"][0]["intervals"][0]
):
ret_data[CURRENT] = data["data"]["timelines"][0]["intervals"][0]["values"]
forecasts = ret_data.setdefault(FORECASTS, {})
if not hourly_fields and not daily_fields:
data = await self._call_api(
{
"timesteps": [
f"{nowcast_timestep}m",
TIMESTEP_HOURLY,
TIMESTEP_DAILY,
],
"fields": forecast_or_nowcast_fields,
"startTime": datetime.utcnow()
.replace(tzinfo=timezone.utc)
.isoformat(),
}
)
if "data" in data and "timelines" in data["data"]:
for timeline in data["data"]["timelines"]:
if timeline["timestep"] == TIMESTEP_DAILY:
key = DAILY
elif timeline["timestep"] == TIMESTEP_HOURLY:
key = HOURLY
else:
key = NOWCAST
forecasts[key] = timeline["intervals"]
else:
data = await self._call_api(
{
"timesteps": [f"{nowcast_timestep}m"],
"fields": forecast_or_nowcast_fields,
"startTime": datetime.utcnow()
.replace(tzinfo=timezone.utc)
.isoformat(),
}
)
if "data" in data and "timelines" in data["data"]:
forecasts[NOWCAST] = data["data"]["timelines"][0]["intervals"]
for field_list, timestep, key in (
(hourly_fields, TIMESTEP_HOURLY, HOURLY),
(daily_fields, TIMESTEP_DAILY, DAILY),
):
if field_list:
await asyncio.sleep(1)
data = await self._call_api(
{
"timesteps": [timestep],
"fields": field_list,
"startTime": datetime.utcnow()
.replace(tzinfo=timezone.utc)
.isoformat(),
}
)
if "data" in data and "timelines" in data["data"]:
forecasts[key] = data["data"]["timelines"][0]["intervals"]
return ret_data
class TomorrowioV4Sync(TomorrowioV4):
"""Synchronous class to query the Tomorrow.io API."""
def __init__(
self,
apikey: str,
latitude: Union[int, float, str],
longitude: Union[int, float, str],
unit_system: str = "imperial",
) -> None:
"""Initialize Synchronous Tomorrow.io v4 API object."""
super().__init__(apikey, latitude, longitude, unit_system)
@async_to_sync
async def realtime(self, fields: List[str]) -> Dict[str, Any]:
"""Return realtime weather conditions from Tomorrow.io API."""
return await super().realtime(fields)
@async_to_sync
async def forecast_nowcast(
self,
fields: List[str],
start_time: Optional[datetime] = None,
duration: Optional[timedelta] = None,
timestep: int = 5,
) -> List[Dict[str, Any]]:
"""Return forecast data from Tomorrow.io's NowCast API for a given time period."""
return await super().forecast_nowcast(fields, start_time, duration, timestep)
@async_to_sync
async def forecast_daily(
self,
fields: List[str],
start_time: Optional[datetime] = None,
duration: Optional[timedelta] = None,
) -> List[Dict[str, Any]]:
"""Return daily forecast data from Tomorrow.io's API for a given time period."""
return await super().forecast_daily(fields, start_time, duration)
@async_to_sync
async def forecast_hourly(
self,
fields: List[str],
start_time: Optional[datetime] = None,
duration: Optional[timedelta] = None,
) -> List[Dict[str, Any]]:
"""Return hourly forecast data from Tomorrow.io's API for a given time period."""
return await super().forecast_hourly(fields, start_time, duration)
@async_to_sync
async def realtime_and_all_forecasts(
self,
realtime_fields: List[str],
forecast_fields: List[str],
hourly_fields: List[str] = None,
daily_fields: List[str] = None,
nowcast_timestep: int = 5,
) -> Dict[str, Any]:
"""
Return realtime weather and all forecasts.
If `hourly_fields` and `daily_fields` are not provided,
`forecast_or_nowcast_fields` will be used to get nowcast, hourly, and daily
data.
"""
return await super().realtime_and_all_forecasts(
realtime_fields,
forecast_fields,
hourly_fields=hourly_fields,
daily_fields=daily_fields,
nowcast_timestep=nowcast_timestep,
)
| 35.145299
| 90
| 0.550401
|
8c73bf8a7212b7aa458fcc1b8e82f45c709be528
| 2,316
|
py
|
Python
|
tests/cfngin/fixtures/mock_docker/fake_api_client.py
|
paul-duffy/runway
|
a0c22eb7ca7b55df5317bdda92c08c4bb39569d2
|
[
"Apache-2.0"
] | 1
|
2020-02-25T21:08:00.000Z
|
2020-02-25T21:08:00.000Z
|
tests/cfngin/fixtures/mock_docker/fake_api_client.py
|
paul-duffy/runway
|
a0c22eb7ca7b55df5317bdda92c08c4bb39569d2
|
[
"Apache-2.0"
] | 2
|
2020-01-07T15:00:55.000Z
|
2020-01-07T15:03:25.000Z
|
tests/cfngin/fixtures/mock_docker/fake_api_client.py
|
voodooGQ/runway
|
8a744f33b39f1342022f1b57db996bb843e4556c
|
[
"Apache-2.0"
] | null | null | null |
"""Fake Docker API client."""
# pylint: disable=attribute-defined-outside-init,protected-access
import copy
import docker
from . import fake_api
try:
from unittest import mock
except ImportError:
import mock
class CopyReturnMagicMock(mock.MagicMock):
"""A MagicMock which deep copies every return value."""
def _mock_call(self, *args, **kwargs):
ret = super(CopyReturnMagicMock, self)._mock_call(*args, **kwargs)
if isinstance(ret, (dict, list)):
ret = copy.deepcopy(ret)
return ret
def make_fake_api_client(overrides=None):
"""Return non-complete fake APIClient.
This returns most of the default cases correctly, but most arguments that
change behavior will not work.
"""
if overrides is None:
overrides = {}
api_client = docker.APIClient()
mock_attrs = {
'build.return_value': fake_api.FAKE_CONTAINER_ID,
'commit.return_value': fake_api.post_fake_commit()[1],
'containers.return_value': fake_api.get_fake_containers()[1],
'create_container.return_value':
fake_api.post_fake_create_container()[1],
'create_host_config.side_effect': api_client.create_host_config,
'create_network.return_value': fake_api.post_fake_network()[1],
'exec_create.return_value': fake_api.post_fake_exec_create()[1],
'exec_start.return_value': fake_api.post_fake_exec_start()[1],
'images.return_value': fake_api.get_fake_images()[1],
'inspect_container.return_value':
fake_api.get_fake_inspect_container()[1],
'inspect_image.return_value': fake_api.get_fake_inspect_image()[1],
'inspect_network.return_value': fake_api.get_fake_network()[1],
'logs.return_value': [b'hello world\n'],
'networks.return_value': fake_api.get_fake_network_list()[1],
'start.return_value': None,
'wait.return_value': {'StatusCode': 0},
}
mock_attrs.update(overrides)
mock_client = CopyReturnMagicMock(**mock_attrs)
mock_client._version = docker.constants.DEFAULT_DOCKER_API_VERSION
return mock_client
def make_fake_client(overrides=None):
"""Return a Client with a fake APIClient."""
client = docker.DockerClient()
client.api = make_fake_api_client(overrides)
return client
| 34.567164
| 77
| 0.697323
|
2efdf8a793b42ad316740554643a811cdff52c06
| 17,893
|
py
|
Python
|
fairseq/tasks/translation.py
|
protonish/fairseq-cipherdaug
|
f268173f75698905814f62a37695d831d070b22c
|
[
"MIT"
] | null | null | null |
fairseq/tasks/translation.py
|
protonish/fairseq-cipherdaug
|
f268173f75698905814f62a37695d831d070b22c
|
[
"MIT"
] | null | null | null |
fairseq/tasks/translation.py
|
protonish/fairseq-cipherdaug
|
f268173f75698905814f62a37695d831d070b22c
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import json
import logging
import os
from typing import Optional
from argparse import Namespace
from omegaconf import II
import numpy as np
from fairseq import metrics, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
prepend_bos_src=None,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
elif prepend_bos_src is not None:
logger.info(f"prepending src bos: {prepend_bos_src}")
src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@dataclass
class TranslationConfig(FairseqDataclass):
data: Optional[str] = field(
default=None,
metadata={
"help": "colon separated path to data directories list, will be iterated upon during epochs "
"in round-robin manner; however, valid and test data are always in the first directory "
"to avoid the need for repeating them in all directories"
},
)
source_lang: Optional[str] = field(
default=None,
metadata={
"help": "source language",
"argparse_alias": "-s",
},
)
target_lang: Optional[str] = field(
default=None,
metadata={
"help": "target language",
"argparse_alias": "-t",
},
)
load_alignments: bool = field(
default=False, metadata={"help": "load the binarized alignments"}
)
left_pad_source: bool = field(
default=True, metadata={"help": "pad the source on the left"}
)
left_pad_target: bool = field(
default=False, metadata={"help": "pad the target on the left"}
)
max_source_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the target sequence"}
)
upsample_primary: int = field(
default=-1, metadata={"help": "the amount of upsample primary dataset"}
)
truncate_source: bool = field(
default=False, metadata={"help": "truncate source to max-source-positions"}
)
num_batch_buckets: int = field(
default=0,
metadata={
"help": "if >0, then bucket source and target lengths into "
"N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations"
},
)
train_subset: str = II("dataset.train_subset")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
required_seq_len_multiple: int = II("dataset.required_seq_len_multiple")
# options for reporting BLEU during validation
eval_bleu: bool = field(
default=False, metadata={"help": "evaluation with BLEU scores"}
)
eval_bleu_args: Optional[str] = field(
default="{}",
metadata={
"help": 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string'
},
)
eval_bleu_detok: str = field(
default="space",
metadata={
"help": "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; "
"use 'space' to disable detokenization; see fairseq.data.encoders for other options"
},
)
eval_bleu_detok_args: Optional[str] = field(
default="{}",
metadata={"help": "args for building the tokenizer, if needed, as JSON string"},
)
eval_tokenized_bleu: bool = field(
default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"}
)
eval_bleu_remove_bpe: Optional[str] = field(
default=None,
metadata={
"help": "remove BPE before computing BLEU",
"argparse_const": "@@ ",
},
)
eval_bleu_print_samples: bool = field(
default=False, metadata={"help": "print sample generations during validation"}
)
@register_task("translation", dataclass=TranslationConfig)
class TranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
"""
cfg: TranslationConfig
def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict):
super().__init__(cfg)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, cfg: TranslationConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
# find language pair automatically
if cfg.source_lang is None or cfg.target_lang is None:
cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0])
if cfg.source_lang is None or cfg.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(cfg.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(cfg.target_lang, len(tgt_dict)))
return cls(cfg, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
if split != self.cfg.train_subset:
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
load_alignments=self.cfg.load_alignments,
truncate_source=self.cfg.truncate_source,
num_buckets=self.cfg.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.cfg.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, cfg):
model = super().build_model(cfg)
if self.cfg.eval_bleu:
detok_args = json.loads(self.cfg.eval_bleu_detok_args)
self.tokenizer = encoders.build_tokenizer(
Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)
)
gen_args = json.loads(self.cfg.eval_bleu_args)
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.cfg.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.cfg.eval_bleu:
def sum_logs(key):
import torch
result = sum(log.get(key, 0) for log in logging_outputs)
if torch.is_tensor(result):
result = result.cpu()
return result
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
try:
from sacrebleu.metrics import BLEU
comp_bleu = BLEU.compute_bleu
except ImportError:
# compatibility API for sacrebleu 1.x
import sacrebleu
comp_bleu = sacrebleu.compute_bleu
fn_sig = inspect.getfullargspec(comp_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = comp_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=int(meters["_bleu_sys_len"].sum),
ref_len=int(meters["_bleu_ref_len"].sum),
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.cfg.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.cfg.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.cfg.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
| 36.220648
| 108
| 0.600961
|
44bfddd8311b2ce5f5b9d04ea8832fb97c03d8da
| 4,309
|
py
|
Python
|
anime_downloader/scrapers/gogoanime/gogoanime_scraper.py
|
Amdrossa/Anime
|
9757f7c8d1a094da61e0c0ac38a2a29bf1c21e28
|
[
"MIT"
] | 554
|
2020-04-15T20:22:50.000Z
|
2022-03-31T11:07:53.000Z
|
anime_downloader/scrapers/gogoanime/gogoanime_scraper.py
|
Amdrossa/Anime
|
9757f7c8d1a094da61e0c0ac38a2a29bf1c21e28
|
[
"MIT"
] | 44
|
2020-04-15T19:26:43.000Z
|
2022-03-11T09:59:24.000Z
|
anime_downloader/scrapers/gogoanime/gogoanime_scraper.py
|
Amdrossa/Anime
|
9757f7c8d1a094da61e0c0ac38a2a29bf1c21e28
|
[
"MIT"
] | 61
|
2020-04-16T19:17:04.000Z
|
2022-03-27T14:51:54.000Z
|
import re
from util.Episode import Episode
from bs4 import BeautifulSoup
from extractors.jwplayer_extractor import JWPlayerExtractor
from scrapers.base_scraper import BaseScraper
from util.Color import printer
class GoGoAnimeScraper(BaseScraper):
def __init__(self, url, start_episode, end_episode, session, gui=None, resolution="480"):
super().__init__(url, start_episode, end_episode, session, gui)
self.resolution = resolution
self.extractor = JWPlayerExtractor(None, self.session)
self.anime_id = None
self.api_link_bases = ['https://ajax.gogocdn.net/ajax/load-list-episode',
'https://ajax.apimovie.xyz/ajax/load-list-episode']
self.__set_anime_id()
def __set_anime_id(self):
response = self.session.get(self.url)
if response.status_code == 200:
soup_html = BeautifulSoup(response.content, "html.parser")
movie_id_tag = soup_html.find("input", attrs={"id": "movie_id"})
if movie_id_tag is not None:
self.anime_id = movie_id_tag["value"]
def __get_episode_data(self):
for base_link in self.api_link_bases:
api_link = base_link + "?ep_start=" + str(self.start_episode) + "&ep_end=" + str(
self.end_episode) + "&id=" + self.anime_id
response = self.session.get(api_link)
if response.status_code == 200:
return response.content
return None
def __get_page_url(self, href):
base_url = re.search("(.*)/category/", self.url).group(1)
# print(base_url)
src = base_url + href
# print(src)
return src
def __set_stream_url(self, episode):
response = self.session.get(episode.page_url)
if response.status_code == 200:
soup_html = BeautifulSoup(response.content, "html.parser")
item_tag = soup_html.find("li", attrs={"class": "anime"}).find("a")
streamer_url = item_tag["data-video"]
if "https" not in streamer_url:
streamer_url = "https:" + streamer_url
streamer_resp = self.session.get(streamer_url)
if streamer_resp.status_code == 200:
sources = self.extractor.extract_sources(streamer_resp.text)
src = ""
for source in sources:
if "m3u8" in source:
src = source
break
if src != "":
res_link_id = self.extractor.get_resolution_link(src, self.resolution)
stream_base = re.search("(.*)/[\S]+\.m3u8", src).group(1)
episode.download_url = stream_base + "/" + res_link_id
print("stream url:", episode.download_url)
return True
return False
def __collect_episodes(self):
printer("INFO", "Extracting page URLs...", self.gui)
episodes = []
if self.anime_id is not None:
data = self.__get_episode_data()
if data is not None:
soup_html = BeautifulSoup(data, "html.parser")
anchor_tags = soup_html.findAll("a", href=True)
for anchor in anchor_tags:
href = anchor["href"].strip()
epi_no = int(href.split("-")[-1])
if epi_no < self.start_episode or epi_no > self.end_episode:
continue
episode = Episode("Episode - " + str(epi_no), "Episode - " + str(epi_no))
episode.is_direct = False
episode.page_url = self.__get_page_url(href)
val = self.__set_stream_url(episode)
if val:
episodes.append(episode)
else:
printer("ERROR", "Failed to collect download link for " + episode.title, self.gui)
return episodes
def get_direct_links(self):
try:
episodes = self.__collect_episodes()
if len(episodes) > 0:
return episodes
else:
return None
except Exception as ex:
printer("ERROR", str(ex), self.gui)
return None
| 38.81982
| 106
| 0.561383
|
d535f9cd48f7ea39f5263170165a41da1351c10c
| 3,674
|
py
|
Python
|
tests/test_rwc_jazz.py
|
marypilataki/mirdata
|
78981e1f1e7b8661e2d04de0dd5640981bbb1881
|
[
"BSD-3-Clause"
] | 2
|
2020-04-08T07:13:47.000Z
|
2021-11-08T06:43:58.000Z
|
tests/test_rwc_jazz.py
|
marypilataki/mirdata
|
78981e1f1e7b8661e2d04de0dd5640981bbb1881
|
[
"BSD-3-Clause"
] | 2
|
2020-04-06T03:46:42.000Z
|
2020-04-08T14:16:02.000Z
|
tests/test_rwc_jazz.py
|
marypilataki/mirdata
|
78981e1f1e7b8661e2d04de0dd5640981bbb1881
|
[
"BSD-3-Clause"
] | 3
|
2020-04-05T17:35:54.000Z
|
2020-04-08T11:15:09.000Z
|
# -*- coding: utf-8 -*-
from mirdata import rwc_jazz, utils
from tests.test_utils import run_track_tests
def test_track():
default_trackid = 'RM-J004'
data_home = 'tests/resources/mir_datasets/RWC-Jazz'
track = rwc_jazz.Track(default_trackid, data_home=data_home)
expected_attributes = {
'track_id': 'RM-J004',
'audio_path': 'tests/resources/mir_datasets/RWC-Jazz/'
+ 'audio/rwc-j-m01/4.wav',
'sections_path': 'tests/resources/mir_datasets/RWC-Jazz/'
+ 'annotations/AIST.RWC-MDB-J-2001.CHORUS/RM-J004.CHORUS.TXT',
'beats_path': 'tests/resources/mir_datasets/RWC-Jazz/'
+ 'annotations/AIST.RWC-MDB-J-2001.BEAT/RM-J004.BEAT.TXT',
'piece_number': 'No. 4',
'suffix': 'M01',
'track_number': 'Tr. 04',
'title': 'Crescent Serenade (Piano Solo)',
'artist': 'Makoto Nakamura',
'duration': 167,
'variation': 'Instrumentation 1',
'instruments': 'Pf',
}
expected_property_types = {'beats': utils.BeatData, 'sections': utils.SectionData}
run_track_tests(track, expected_attributes, expected_property_types)
# test audio loading functions
y, sr = track.audio
assert sr == 44100
assert y.shape == (44100 * 2,)
def test_to_jams():
data_home = 'tests/resources/mir_datasets/RWC-Jazz'
track = rwc_jazz.Track('RM-J004', data_home=data_home)
jam = track.to_jams()
beats = jam.search(namespace='beat')[0]['data']
assert [beat.time for beat in beats] == [
0.05,
0.86,
1.67,
2.48,
3.29,
4.1,
4.91,
5.72,
6.53,
7.34,
]
assert [beat.duration for beat in beats] == [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
assert [beat.value for beat in beats] == [1, 2, 1, 2, 1, 2, 1, 2, 1, 2]
assert [beat.confidence for beat in beats] == [
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
]
segments = jam.search(namespace='segment')[0]['data']
assert [segment.time for segment in segments] == [0.05, 6.53, 152.06]
assert [segment.duration for segment in segments] == [
6.48,
13.099999999999998,
13.319999999999993,
]
assert [segment.value for segment in segments] == [
'nothing',
'chorus A',
'chorus B',
]
assert [segment.confidence for segment in segments] == [None, None, None]
assert jam['file_metadata']['title'] == 'Crescent Serenade (Piano Solo)'
assert jam['file_metadata']['artist'] == 'Makoto Nakamura'
def test_load_metadata():
data_home = 'tests/resources/mir_datasets/RWC-Jazz'
metadata = rwc_jazz._load_metadata(data_home)
assert metadata['data_home'] == data_home
assert metadata['RM-J004'] == {
'piece_number': 'No. 4',
'suffix': 'M01',
'track_number': 'Tr. 04',
'title': 'Crescent Serenade (Piano Solo)',
'artist': 'Makoto Nakamura',
'duration': 167,
'variation': 'Instrumentation 1',
'instruments': 'Pf',
}
assert metadata['RM-J044'] == {
'piece_number': 'No. 44',
'suffix': 'M04',
'track_number': 'Tr. 09',
'title': 'Joyful, Joyful, We Adore Thee',
'artist': 'K’s Band',
'duration': 270,
'variation': 'Style (Free jazz)',
'instruments': 'Pf & Bs & Dr & Gt & Ts & Fl & Bar',
}
metadata_none = rwc_jazz._load_metadata('asdf/asdf')
assert metadata_none is None
| 28.045802
| 86
| 0.564507
|
b555378707f5628852e63d90d23e58dd14d56c82
| 335
|
py
|
Python
|
coinbase_problems/problem_1.py
|
loftwah/Daily-Coding-Problem
|
0327f0b4f69ef419436846c831110795c7a3c1fe
|
[
"MIT"
] | 129
|
2018-10-14T17:52:29.000Z
|
2022-01-29T15:45:57.000Z
|
coinbase_problems/problem_1.py
|
loftwah/Daily-Coding-Problem
|
0327f0b4f69ef419436846c831110795c7a3c1fe
|
[
"MIT"
] | 2
|
2019-11-30T23:28:23.000Z
|
2020-01-03T16:30:32.000Z
|
coinbase_problems/problem_1.py
|
loftwah/Daily-Coding-Problem
|
0327f0b4f69ef419436846c831110795c7a3c1fe
|
[
"MIT"
] | 60
|
2019-02-21T09:18:31.000Z
|
2022-03-25T21:01:04.000Z
|
"""This problem was asked by Coinbase.
Write a function that takes in a number, string, list, or dictionary and
returns its JSON encoding. It should also handle nulls.
For example, given the following input:
[None, 123, ["a", "b"], {"c":"d"}]
You should return the following, as a string:
'[null, 123, ["a", "b"], {"c": "d"}]'
"""
| 27.916667
| 73
| 0.659701
|
ccd00f848211399cdf640bfa60c586162b458bdb
| 965
|
py
|
Python
|
convertor/migrations/0001_initial.py
|
fbrakhmonov/mdweb
|
dbe503462a2a594fa0b9a7a97f98e45b12954c9f
|
[
"MIT"
] | null | null | null |
convertor/migrations/0001_initial.py
|
fbrakhmonov/mdweb
|
dbe503462a2a594fa0b9a7a97f98e45b12954c9f
|
[
"MIT"
] | null | null | null |
convertor/migrations/0001_initial.py
|
fbrakhmonov/mdweb
|
dbe503462a2a594fa0b9a7a97f98e45b12954c9f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1 on 2019-03-01 13:54
import convertor.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('published_date', models.DateTimeField(auto_now_add=True)),
('summary', models.TextField(blank=True, max_length=200)),
('slug', models.SlugField()),
('page_md', models.FileField(upload_to=convertor.models.post_filename)),
('page_web', models.FileField(blank=True, null=True, upload_to='')),
],
options={
'ordering': ['-published_date'],
},
),
]
| 31.129032
| 114
| 0.566839
|
120bafa532f5f9769f58119f26862cd3bed41157
| 3,124
|
py
|
Python
|
app.py
|
stevexiaofei/LabOnWeb
|
ace26455c0b101bf5b03c3bf43799355b7513005
|
[
"MIT"
] | null | null | null |
app.py
|
stevexiaofei/LabOnWeb
|
ace26455c0b101bf5b03c3bf43799355b7513005
|
[
"MIT"
] | 1
|
2021-10-12T22:16:02.000Z
|
2021-10-12T22:16:02.000Z
|
app.py
|
stevexiaofei/LabOnWeb
|
ace26455c0b101bf5b03c3bf43799355b7513005
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response,url_for, request, session, json, \
send_from_directory, current_app, g,redirect
from flask.ext.moment import Moment
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from sqlite import *
# import camera driver
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
from client import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
##############################
moment = Moment(app)
app.config['SECRET_KEY'] = 'stevexiaofei@app123456'
@app.before_request
def preprocess():
g.username = session.get('username')
except_list=[url_for('logout'),'/video_feed']
session['last_base_url']='/' if (request.path in except_list) else url_for('logout')
@app.after_request
def postprocess(response):
return response
@app.route('/mission')
def mission():
return render_template('mission.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/login',methods = [ 'GET', 'POST' ])
def login():
if request.method == 'POST':
username = request.values.get('username')
password = request.values.get('password')
user_profile={'name':username,'password':password}
query_return=query(user_profile)
if query_return==0:
session['username'] = username
return json.dumps({
'success': 'true',
'msg': 'Login success!'
})
elif query_return==1:
return json.dumps({
'success': 'false',
'msg': 'password incorrect please try again!'
})
else:
return json.dumps({
'success': 'false',
'msg': "The user does't exit please register first!"
})
else:
return render_template('login.html')
@app.route('/logout', methods = [ 'GET', 'POST' ])
def logout():
last_base_url=session['last_base_url']
session.clear()
return redirect(last_base_url)
@app.route('/controll')
def lab_on_web():
#print('lab_on_web',session.get('username'))
if session.get('username',None)==None:
return render_template('login.html')
else:
return render_template('controll.html')
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html' )
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed', methods = [ 'GET', 'POST' ])
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
if request.method == 'GET':
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
item_idx = int(request.values.get('item_idx'))
return json.dumps({
'success': 'true',
'msg': "respose from remote server\n you select the %d item!"%(item_idx,)
})
if __name__ == '__main__':
app.run(host='202.121.181.3',threaded=True)
| 28.4
| 85
| 0.680538
|
7424cfd553471bd0b8cdefd0244627d0795bc828
| 167
|
py
|
Python
|
pipeline/schemas/tag.py
|
mystic-ai/pipeline
|
487c5e755a862a12c90572b0eff170853ecb3790
|
[
"Apache-2.0"
] | 7
|
2022-01-28T20:27:50.000Z
|
2022-02-22T15:30:00.000Z
|
pipeline/schemas/tag.py
|
mystic-ai/pipeline
|
487c5e755a862a12c90572b0eff170853ecb3790
|
[
"Apache-2.0"
] | 17
|
2022-01-11T12:05:38.000Z
|
2022-03-25T15:29:43.000Z
|
pipeline/schemas/tag.py
|
neuro-ai-dev/pipeline
|
c7edcc83576158062fe48f266dfaea62d754e761
|
[
"Apache-2.0"
] | null | null | null |
from .base import BaseModel
class TagBase(BaseModel):
name: str
class TagCreate(TagBase):
pass
class TagGet(TagBase):
id: str
frequency: int = 0
| 11.133333
| 27
| 0.670659
|
bcf09c6cdd0e8f30bc65eaacb07ad9f49c5c3151
| 4,197
|
py
|
Python
|
comet/utility/test/test_event_db.py
|
shinybrar/Comet
|
4229092fca74c130a7d4ecd4dbd22ae85f7e6308
|
[
"BSD-2-Clause"
] | null | null | null |
comet/utility/test/test_event_db.py
|
shinybrar/Comet
|
4229092fca74c130a7d4ecd4dbd22ae85f7e6308
|
[
"BSD-2-Clause"
] | null | null | null |
comet/utility/test/test_event_db.py
|
shinybrar/Comet
|
4229092fca74c130a7d4ecd4dbd22ae85f7e6308
|
[
"BSD-2-Clause"
] | null | null | null |
# Comet VOEvent Broker.
# Event database tests.
import os
import shutil
import stat
import tempfile
import time
from functools import reduce
from itertools import repeat, permutations
from multiprocessing.pool import ThreadPool
from operator import __or__
from sys import platform
from unittest import skipIf
from twisted.trial import unittest
from comet.testutils import DummyEvent
from comet.utility.event_db import Event_DB
class Event_DB_TestCase(unittest.TestCase):
def setUp(self):
self.event_db_dir = tempfile.mkdtemp()
self.event_db = Event_DB(self.event_db_dir)
self.event = DummyEvent()
def test_non_existing_dir(self):
# If the root for the Event_DB doesn't exist, we should create it.
# Note the relative path means that this DB will be created in the
# _trial_temp directory.
event_db = Event_DB("event_db_test_non_exist_%.5f" % (time.time(),))
self.assertTrue(event_db.check_event(self.event))
def test_dir_is_file(self):
# If the path specified for the Event_DB *does* exist but isn't a
# directory, then we should fail fast.
filename = "event_db_test_is_file_%.5f" % (time.time(),)
open(filename, 'w').close()
self.assertRaises(RuntimeError, Event_DB, filename)
@skipIf(platform == "win32", "Not available on Windows.")
def test_dir_is_unusable(self):
# If the path specified for the Event_DB exists and is a directory,
# but we don't have permissions to use it, fail fast.
# Note that os.chmod() isn't properly supported on Windows.
filename = "event_db_test__is_unusable_%.5f" % (time.time(),)
os.makedirs(filename)
os.chmod(filename, 0)
self.assertRaises(RuntimeError, Event_DB, filename)
for n_perms in [1, 2]:
for perms in permutations([stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR],
n_perms):
os.chmod(filename, reduce(__or__, perms))
self.assertRaises(RuntimeError, Event_DB, filename)
os.chmod(filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
self.assertTrue(Event_DB(filename).check_event(self.event))
def test_unseen(self):
# Unseen event -> return True
self.assertTrue(self.event_db.check_event(self.event))
def test_seen(self):
# Seen event -> return False
self.event_db.check_event(self.event)
self.assertFalse(self.event_db.check_event(self.event))
def test_threadsafe(self):
# Ensure that the eventdb is thread-safe by hammering on it with
# multiple threads simultaneously. We should only get one positive.
pool = ThreadPool(10)
results = pool.map(self.event_db.check_event, repeat(self.event, 1000))
self.assertEqual(results.count(True), 1)
self.assertEqual(results.count(False), 999)
def test_prune(self):
def done_prune(result):
self.assertTrue(self.event_db.check_event(self.event))
self.event_db.check_event(self.event)
d = self.event_db.prune(0)
d.addCallback(done_prune)
return d
def test_bad_ivoid(self):
bad_event = DummyEvent(b"ivo://#")
self.assertFalse(self.event_db.check_event(bad_event))
def test_prune_bad_event(self):
bad_event = DummyEvent(ivoid=b"ivo://")
self.assertNotIn("", self.event_db.databases)
# This event doesn't validate and is rejected.
self.assertFalse(self.event_db.check_event(bad_event))
# The hostname shouldn't event be stored in our list of databases.
self.assertNotIn("", self.event_db.databases)
d = self.event_db.prune(0)
def done_prune(result):
# After pruning, everything in the database should be unlocked.
for lock in self.event_db.databases.values():
self.assertFalse(lock.locked())
self.assertFalse(self.event_db.check_event(bad_event))
self.assertTrue(self.event_db.check_event(self.event))
d.addCallback(done_prune)
return d
def tearDown(self):
shutil.rmtree(self.event_db_dir)
| 39.224299
| 81
| 0.671194
|
f009003228961c726a9731d0c4f829275445603e
| 8,703
|
py
|
Python
|
imapy/email_message.py
|
s0x90/imapy
|
60f0b26aba6d4dfa088debc0a2a0c3c236cd308a
|
[
"MIT"
] | 1
|
2021-07-19T07:31:07.000Z
|
2021-07-19T07:31:07.000Z
|
imapy/email_message.py
|
s0x90/imapy
|
60f0b26aba6d4dfa088debc0a2a0c3c236cd308a
|
[
"MIT"
] | null | null | null |
imapy/email_message.py
|
s0x90/imapy
|
60f0b26aba6d4dfa088debc0a2a0c3c236cd308a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
imapy.email_message
~~~~~~~~~~~~~~~~~~~
This module contains EmailMessage class used for parsing email messages
and passing calls which modify email state to imapy.IMAP() class.
:copyright: (c) 2015 by Vladimir Goncharov.
:license: MIT, see LICENSE for more details.
"""
import re
from email.header import decode_header
from . import utils
from .structures import CaseInsensitiveDict
from .packages import six
from .exceptions import (
EmailParsingError,
)
class EmailMessage(CaseInsensitiveDict):
"""Class for parsing email message"""
def __init__(self, **kwargs):
super(EmailMessage, self).__init__()
# inject connections
self.uid = kwargs.pop('uid', None)
self.folder = kwargs.pop('folder', None)
self.email_obj = kwargs.pop('email_obj', None)
self.imap_obj = kwargs.pop('imap_obj', None)
# init
self.update(kwargs)
self['to'] = []
self['subject'] = ''
self['cc'] = []
self['text'] = []
self['html'] = []
self['headers'] = CaseInsensitiveDict()
self['flags'] = kwargs.pop('flags', None)
self['attachments'] = []
self['uid'] = self.uid
self['email_sequence_id'] = kwargs.pop('email_sequence_id', None)
self.parse()
def clean_value(self, value, encoding):
"""Converts value to utf-8 encoding"""
if six.PY2:
if encoding not in ['utf-8', None]:
return value.decode(encoding).encode('utf-8')
elif six.PY3:
# in PY3 'decode_headers' may return both byte and unicode
if isinstance(value, bytes):
if encoding in ['utf-8', None]:
return utils.b_to_str(value)
else:
return value.decode(encoding)
return value
def _normalize_string(self, text):
'''Removes excessive spaces, tabs, newlines, etc.'''
conversion = {
# newlines
'\r\n\t': ' ',
# replace excessive empty spaces
'\s+': ' '
}
for find, replace in six.iteritems(conversion):
text = re.sub(find, replace, text, re.UNICODE)
return text
def _get_links(self, text):
links = []
"""Returns list of found links in text"""
matches = re.findall(
'(?<=[\s^\<])(?P<link>https?\:\/\/.*?)(?=[\s\>$])', text, re.I)
if(matches):
for match in matches:
links.append(match)
return list(set(links))
def mark(self, flags):
"""Alias function for imapy.mark()"""
if not isinstance(flags, list):
flags = [flags]
# update self['flags']
for t in flags:
if t[:2] == 'un':
if t[2:] in self['flags']:
self['flags'].remove(t[2:])
else:
if t not in self['flags']:
self['flags'].append(t)
return self.imap_obj.mark(flags, self.uid)
def delete(self):
"""Alias function for imapy.delete_message"""
return self.imap_obj.delete_message(self.uid, self.folder)
def copy(self, new_mailbox):
"""Alias function for imapy.copy_message"""
return self.imap_obj.copy_message(self.uid, new_mailbox, self)
def move(self, new_mailbox):
"""Alias function for imapy.copy_message"""
return self.imap_obj.move_message(self.uid, new_mailbox, self)
def parse(self):
"""Parses email object and stores data so that email parts can be
access with a dictionary syntax like msg['from'], msg['to']
"""
# check main body
if not self.email_obj.is_multipart():
text = utils.b_to_str(self.email_obj.get_payload(decode=True))
self['text'].append(
{
'text': text,
'text_normalized': self._normalize_string(text),
'links': self._get_links(text)
}
)
# check attachments
else:
for part in self.email_obj.walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
content_type = part.get_content_type()
if content_type == 'text/plain':
# convert text
text = utils.b_to_str(part.get_payload(decode=True))
self['text'].append(
{
'text': text,
'text_normalized':
self._normalize_string(text),
'links': self._get_links(text)
}
)
elif content_type == 'text/html':
# convert html
html = utils.b_to_str(part.get_payload(decode=True))
self['html'].append(html)
else:
try:
data = part.get_payload(decode=True)
# rare cases when we get decoding error
except AssertionError:
data = None
attachment_fname = decode_header(part.get_filename())
filename = self.clean_value(
attachment_fname[0][0], attachment_fname[0][1]
)
attachment = {
'filename': filename,
'data': data,
'content_type': content_type
}
self['attachments'].append(attachment)
# subject
if 'subject' in self.email_obj:
msg_subject = decode_header(self.email_obj['subject'])
self['subject'] = self.clean_value(
msg_subject[0][0], msg_subject[0][1])
# from
# cleanup header
from_header_cleaned = re.sub('[\n\r\t]+', ' ', self.email_obj['from'])
msg_from = decode_header(from_header_cleaned)
msg_txt = ''
for part in msg_from:
msg_txt += self.clean_value(part[0], part[1])
if '<' in msg_txt and '>' in msg_txt:
result = re.match('(?P<from>.*)?(?P<email>\<.*\>)', msg_txt, re.U)
self['from_whom'] = result.group('from').strip()
self['from_email'] = result.group('email').strip('<>')
self['from'] = msg_txt
else:
self['from_whom'] = ''
self['from_email'] = self['from'] = msg_txt.strip()
# to
if 'to' in self.email_obj:
msg_to = decode_header(self.email_obj['to'])
self['to'] = self.clean_value(
msg_to[0][0], msg_to[0][1]).strip('<>')
# cc
msg_cc = decode_header(str(self.email_obj['cc']))
cc_clean = self.clean_value(msg_cc[0][0], msg_cc[0][1])
if cc_clean and cc_clean.lower() != 'none':
# split recepients
recepients = cc_clean.split(',')
for recepient in recepients:
if '<' in recepient and '>' in recepient:
# (name)? + email
matches = re.findall('((?P<to>.*)?(?P<to_email>\<.*\>))',
recepient, re.U)
if matches:
for match in matches:
self['cc'].append(
{
'cc': match[0],
'cc_to': match[1].strip(" \n\r\t"),
'cc_email': match[2].strip("<>"),
}
)
else:
raise EmailParsingError(
"Error parsing CC message header. "
"Header value: {header}".format(header=cc_clean)
)
else:
# email only
self['cc'].append(
{
'cc': recepient,
'cc_to': '',
'cc_email': recepient,
}
)
# Date
self['date'] = self.email_obj['Date']
# message headers
for header, val in self.email_obj.items():
if header in self['headers']:
self['headers'][header].append(val)
else:
self['headers'][header] = [val]
| 36.877119
| 78
| 0.475928
|
218660bc03a12ea774aec41148d54b5493f1c024
| 8,318
|
py
|
Python
|
official/vision/detection/main.py
|
denis-choi/models
|
b478430d89f0b40fe3caf281f4e24cefda825412
|
[
"Apache-2.0"
] | 2
|
2019-11-29T09:04:04.000Z
|
2020-04-28T06:11:54.000Z
|
official/vision/detection/main.py
|
denis-choi/models
|
b478430d89f0b40fe3caf281f4e24cefda825412
|
[
"Apache-2.0"
] | 1
|
2021-03-31T21:39:22.000Z
|
2021-03-31T21:39:22.000Z
|
official/vision/detection/main.py
|
denis-choi/models
|
b478430d89f0b40fe3caf281f4e24cefda825412
|
[
"Apache-2.0"
] | 1
|
2022-01-25T01:09:00.000Z
|
2022-01-25T01:09:00.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main function to train various object detection models."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import functools
import os
import pprint
import tensorflow.compat.v2 as tf
from official.modeling.hyperparams import params_dict
from official.modeling.training import distributed_executor as executor
from official.utils import hyperparams_flags
from official.vision.detection.configs import factory as config_factory
from official.vision.detection.dataloader import input_reader
from official.vision.detection.dataloader import mode_keys as ModeKeys
from official.vision.detection.executor.detection_executor import DetectionDistributedExecutor
from official.vision.detection.modeling import factory as model_factory
hyperparams_flags.initialize_common_flags()
flags.DEFINE_string(
'mode',
default='train',
help='Mode to run: `train`, `eval` or `train_and_eval`.')
flags.DEFINE_string(
'model', default='retinanet',
help='Model to run: `retinanet` or `shapemask`.')
flags.DEFINE_string('training_file_pattern', None,
'Location of the train data.')
flags.DEFINE_string('eval_file_pattern', None, 'Location of ther eval data')
flags.DEFINE_string(
'checkpoint_path', None,
'The checkpoint path to eval. Only used in eval_once mode.')
FLAGS = flags.FLAGS
def run_executor(params,
train_input_fn=None,
eval_input_fn=None,
callbacks=None):
"""Runs Retinanet model on distribution strategy defined by the user."""
if params.architecture.use_bfloat16:
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
'mixed_bfloat16')
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
model_builder = model_factory.model_generator(params)
if FLAGS.mode == 'train':
def _model_fn(params):
return model_builder.build_model(params, mode=ModeKeys.TRAIN)
builder = executor.ExecutorBuilder(
strategy_type=params.strategy_type,
strategy_config=params.strategy_config)
num_workers = int(builder.strategy.num_replicas_in_sync + 7) // 8
is_multi_host = (int(num_workers) >= 2)
logging.info(
'Train num_replicas_in_sync %d num_workers %d is_multi_host %s',
builder.strategy.num_replicas_in_sync, num_workers, is_multi_host)
if is_multi_host:
train_input_fn = functools.partial(
train_input_fn,
batch_size=params.train.batch_size //
builder.strategy.num_replicas_in_sync)
dist_executor = builder.build_executor(
class_ctor=DetectionDistributedExecutor,
params=params,
is_multi_host=is_multi_host,
model_fn=_model_fn,
loss_fn=model_builder.build_loss_fn,
predict_post_process_fn=model_builder.post_processing,
trainable_variables_filter=model_builder
.make_filter_trainable_variables_fn())
return dist_executor.train(
train_input_fn=train_input_fn,
model_dir=params.model_dir,
iterations_per_loop=params.train.iterations_per_loop,
total_steps=params.train.total_steps,
init_checkpoint=model_builder.make_restore_checkpoint_fn(),
custom_callbacks=callbacks,
save_config=True)
elif FLAGS.mode == 'eval' or FLAGS.mode == 'eval_once':
def _model_fn(params):
return model_builder.build_model(params, mode=ModeKeys.PREDICT_WITH_GT)
builder = executor.ExecutorBuilder(
strategy_type=params.strategy_type,
strategy_config=params.strategy_config)
num_workers = int(builder.strategy.num_replicas_in_sync + 7) // 8
is_multi_host = (int(num_workers) >= 2)
if is_multi_host:
eval_input_fn = functools.partial(
eval_input_fn,
batch_size=params.eval.batch_size //
builder.strategy.num_replicas_in_sync)
logging.info('Eval num_replicas_in_sync %d num_workers %d is_multi_host %s',
builder.strategy.num_replicas_in_sync, num_workers,
is_multi_host)
dist_executor = builder.build_executor(
class_ctor=DetectionDistributedExecutor,
params=params,
is_multi_host=is_multi_host,
model_fn=_model_fn,
loss_fn=model_builder.build_loss_fn,
predict_post_process_fn=model_builder.post_processing,
trainable_variables_filter=model_builder
.make_filter_trainable_variables_fn())
if FLAGS.mode == 'eval':
results = dist_executor.evaluate_from_model_dir(
model_dir=params.model_dir,
eval_input_fn=eval_input_fn,
eval_metric_fn=model_builder.eval_metrics,
eval_timeout=params.eval.eval_timeout,
min_eval_interval=params.eval.min_eval_interval,
total_steps=params.train.total_steps)
else:
# Run evaluation once for a single checkpoint.
if not FLAGS.checkpoint_path:
raise ValueError('FLAGS.checkpoint_path cannot be empty.')
checkpoint_path = FLAGS.checkpoint_path
if tf.io.gfile.isdir(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
summary_writer = executor.SummaryWriter(params.model_dir, 'eval')
results, _ = dist_executor.evaluate_checkpoint(
checkpoint_path=checkpoint_path,
eval_input_fn=eval_input_fn,
eval_metric_fn=model_builder.eval_metrics,
summary_writer=summary_writer)
for k, v in results.items():
logging.info('Final eval metric %s: %f', k, v)
return results
else:
raise ValueError('Mode not found: %s.' % FLAGS.mode)
def run(callbacks=None):
params = config_factory.config_generator(FLAGS.model)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=True)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.override(
{
'strategy_type': FLAGS.strategy_type,
'model_dir': FLAGS.model_dir,
'strategy_config': executor.strategy_flags_dict(),
},
is_strict=False)
params.validate()
params.lock()
pp = pprint.PrettyPrinter()
params_str = pp.pformat(params.as_dict())
logging.info('Model Parameters: {}'.format(params_str))
train_input_fn = None
eval_input_fn = None
training_file_pattern = FLAGS.training_file_pattern or params.train.train_file_pattern
eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern
if not training_file_pattern and not eval_file_pattern:
raise ValueError('Must provide at least one of training_file_pattern and '
'eval_file_pattern.')
if training_file_pattern:
# Use global batch size for single host.
train_input_fn = input_reader.InputFn(
file_pattern=training_file_pattern,
params=params,
mode=input_reader.ModeKeys.TRAIN,
batch_size=params.train.batch_size)
if eval_file_pattern:
eval_input_fn = input_reader.InputFn(
file_pattern=eval_file_pattern,
params=params,
mode=input_reader.ModeKeys.PREDICT_WITH_GT,
batch_size=params.eval.batch_size,
num_examples=params.eval.eval_samples)
return run_executor(
params,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
callbacks=callbacks)
def main(argv):
del argv # Unused.
run()
if __name__ == '__main__':
assert tf.version.VERSION.startswith('2.')
app.run(main)
| 36.482456
| 94
| 0.72265
|
f6a0ab277a5b4487dd315aaac9a0887ad29835f9
| 3,904
|
py
|
Python
|
colour/difference/huang2015.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/difference/huang2015.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/difference/huang2015.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Huang et al. (2015) Power-Functions
===================================
Defines the *Huang, Cui, Melgosa, Sanchez-Maranon, Li, Luo and Liu (2015)*
power-functions improving the performance of colour-difference formulas:
- :func:`colour.difference.power_function_Huang2015`
References
----------
- :cite:`Huang2015` : Huang, M., Cui, G., Melgosa, M., Sanchez-Maranon, M.,
Li, C., Luo, M. R., & Liu, H. (2015). Power functions improving the
performance of color-difference formulas. Optical Society of America,
23(1), 597-610. doi:10.1364/OE.23.000597
- :cite:`Li2017` : Li, C., Li, Z., Wang, Z., Xu, Y., Luo, M. R., Cui, G.,
Melgosa, M., Brill, M. H., & Pointer, M. (2017). Comprehensive color
solutions: CAM16, CAT16, and CAM16-UCS. Color Research & Application,
42(6), 703-718. doi:10.1002/col.22131
"""
import numpy as np
from colour.utilities import CaseInsensitiveMapping, tsplit, validate_method
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'power_function_Huang2015',
]
COEFFICIENTS_HUANG2015 = CaseInsensitiveMapping({
'CIE 1976': np.array([1.26, 0.55]),
'CIE 1994': np.array([1.41, 0.70]),
'CIE 2000': np.array([1.43, 0.70]),
'CMC': np.array([1.34, 0.66]),
'CAM02-LCD': np.array([1.00, 0.85]),
'CAM02-SCD': np.array([1.45, 0.75]),
'CAM02-UCS': np.array([1.30, 0.75]),
'CAM16-UCS': np.array([1.41, 0.63]),
'DIN99d': np.array([1.28, 0.74]),
'OSA': np.array([3.32, 0.62]),
'OSA-GP-Euclidean': np.array([1.52, 0.76]),
'ULAB': np.array([1.17, 0.69]),
})
COEFFICIENTS_HUANG2015.__doc__ = """
*Huang et al. (2015)* power-functions coefficients.
References
----------
:cite:`Huang2015`, :cite:`Li2017`
COEFFICIENTS_HUANG2015 : CaseInsensitiveMapping
**{'CIE 1976', 'CIE 1994', 'CIE 2000', 'CMC', 'CAM02-LCD', 'CAM02-SCD',
'CAM16-UCS', 'DIN99d', 'OSA', 'OSA-GP-Euclidean', 'ULAB'}**
Notes
-----
- :cite:`Li2017` does not give the coefficients for the *CAM16-LCD* and
*CAM16-SCD* colourspaces. *Ronnie Luo* has been contacted to know if they
have been computed.
Aliases:
- 'cie1976': 'CIE 1976'
- 'cie1994': 'CIE 1994'
- 'cie2000': 'CIE 2000'
"""
COEFFICIENTS_HUANG2015['cie1976'] = COEFFICIENTS_HUANG2015['CIE 1976']
COEFFICIENTS_HUANG2015['cie1994'] = COEFFICIENTS_HUANG2015['CIE 1994']
COEFFICIENTS_HUANG2015['cie2000'] = COEFFICIENTS_HUANG2015['CIE 2000']
def power_function_Huang2015(d_E, coefficients='CIE 2000'):
"""
Improves the performance of the :math:`\\Delta E` value for given
coefficients using
*Huang, Cui, Melgosa, Sanchez-Maranon, Li, Luo and Liu (2015)*
power-function: :math:`d_E^{\\prime}=a*d_{E^b}`.
Parameters
----------
d_E : array_like
Computed colour difference array :math:`\\Delta E`.
coefficients : str, optional
**{'CIE 1976', 'CIE 1994', 'CIE 2000', 'CMC', 'CAM02-LCD', 'CAM02-SCD',
'CAM16-UCS', 'DIN99d', 'OSA', 'OSA-GP-Euclidean', 'ULAB'}**,
Coefficients for the power-function.
Returns
-------
numeric or ndarray
Improved math:`\\Delta E` value.
References
----------
:cite:`Huang2015`, :cite:`Li2017`
Examples
--------
>>> d_E = np.array([2.0425, 2.8615, 3.4412])
>>> power_function_Huang2015(d_E) # doctest: +ELLIPSIS
array([ 2.3574879..., 2.9850503..., 3.3965106...])
"""
coefficients = validate_method(
coefficients, COEFFICIENTS_HUANG2015,
'"{0}" coefficients are invalid, '
'they must be one of {1}!')
a, b = tsplit(COEFFICIENTS_HUANG2015[coefficients])
d_E_p = a * d_E ** b
return d_E_p
| 32
| 79
| 0.630891
|
6e31af3d57648146ce7dc5c4520ff9651e7e69bb
| 318
|
py
|
Python
|
Divide and conquer/BinarySearch.py
|
Meemaw/Algorithms
|
13da51dfcdc3f7470920c8d4975aa2efce261417
|
[
"MIT"
] | null | null | null |
Divide and conquer/BinarySearch.py
|
Meemaw/Algorithms
|
13da51dfcdc3f7470920c8d4975aa2efce261417
|
[
"MIT"
] | null | null | null |
Divide and conquer/BinarySearch.py
|
Meemaw/Algorithms
|
13da51dfcdc3f7470920c8d4975aa2efce261417
|
[
"MIT"
] | null | null | null |
__author__ = 'Meemaw'
def binarySearch(aList, element):
if len(aList) == 1:
return aList[0] == element
mid = int(len(aList)/2)
if aList[mid] > element:
return binarySearch(aList[:mid],element)
if aList[mid] < element:
return binarySearch(aList[mid:], element)
return True
| 24.461538
| 49
| 0.622642
|
2568592a3ef9ea9843e317fe64c8f1ca2957599d
| 471
|
py
|
Python
|
data/scripts/templates/object/tangible/ship/attachment/weapon/shared_xwing_weapon1_pos_s06_0.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/ship/attachment/weapon/shared_xwing_weapon1_pos_s06_0.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/ship/attachment/weapon/shared_xwing_weapon1_pos_s06_0.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/weapon/shared_xwing_weapon1_pos_s06_0.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.705882
| 94
| 0.738854
|
1290e9688e9de9003c62d3a6a86e010c97095903
| 525
|
py
|
Python
|
rates_app_05242021/rates_shared/setup.py
|
t4d-classes/advanced-python_05242021
|
a4874d20372001cf199360183e5223ac60efa19f
|
[
"MIT"
] | null | null | null |
rates_app_05242021/rates_shared/setup.py
|
t4d-classes/advanced-python_05242021
|
a4874d20372001cf199360183e5223ac60efa19f
|
[
"MIT"
] | null | null | null |
rates_app_05242021/rates_shared/setup.py
|
t4d-classes/advanced-python_05242021
|
a4874d20372001cf199360183e5223ac60efa19f
|
[
"MIT"
] | null | null | null |
"""Setup Package Module"""
from pathlib import Path
from setuptools import find_packages, setup
HERE = Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="rates_shared",
version="0.1.0",
description="A shared library for the Rates App.",
long_description=README,
long_description_content_type="text/markdown",
url="https://www.t4d.io",
license="MIT",
author="Eric Greene",
author_email="eric@t4d.io",
packages=find_packages(where=".", exclude=('tests',))
)
| 25
| 57
| 0.685714
|
4e5d2930cff0ac752d291141be657c8caba57343
| 129
|
py
|
Python
|
pokus1/views.py
|
zvolsky/example_translation
|
37a8e71df6bdc12dda96b9e84b5c1bb7a16087dc
|
[
"MIT"
] | null | null | null |
pokus1/views.py
|
zvolsky/example_translation
|
37a8e71df6bdc12dda96b9e84b5c1bb7a16087dc
|
[
"MIT"
] | null | null | null |
pokus1/views.py
|
zvolsky/example_translation
|
37a8e71df6bdc12dda96b9e84b5c1bb7a16087dc
|
[
"MIT"
] | null | null | null |
from django.views.generic import TemplateView
class HelloWorldView(TemplateView):
template_name = "pokus1/helloworld.html"
| 21.5
| 45
| 0.806202
|
65beeadedeb7337da616145bafe1667553a261d1
| 369
|
py
|
Python
|
receiver.py
|
dack/labo-caller
|
bb7764bc92f1aaa9d758f3811c50df17f99a79f8
|
[
"MIT"
] | null | null | null |
receiver.py
|
dack/labo-caller
|
bb7764bc92f1aaa9d758f3811c50df17f99a79f8
|
[
"MIT"
] | null | null | null |
receiver.py
|
dack/labo-caller
|
bb7764bc92f1aaa9d758f3811c50df17f99a79f8
|
[
"MIT"
] | null | null | null |
from flask import Flask
from twilio.twiml.voice_response import VoiceResponse
app = Flask(__name__)
@app.route("/answer", methods=['GET', 'POST'])
def answer_call():
"""Respond to incoming phone calls with a brief message."""
resp = VoiceResponse()
resp.say('Hello', voice='man')
return str(resp)
if __name__ == "__main__":
app.run(debug=True)
| 21.705882
| 63
| 0.685637
|
28aee349081a5be342e32ecb3d0d42158b641956
| 409
|
py
|
Python
|
backend/drive_time_29216/wsgi.py
|
crowdbotics-apps/drive-time-29216
|
963cbe578f21c79a974a72a9cd410a1996a1957b
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/drive_time_29216/wsgi.py
|
crowdbotics-apps/drive-time-29216
|
963cbe578f21c79a974a72a9cd410a1996a1957b
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/drive_time_29216/wsgi.py
|
crowdbotics-apps/drive-time-29216
|
963cbe578f21c79a974a72a9cd410a1996a1957b
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for drive_time_29216 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drive_time_29216.settings')
application = get_wsgi_application()
| 24.058824
| 78
| 0.794621
|
b7864ba36467fba2de68248cf4079d7f44fc7aaf
| 3,909
|
py
|
Python
|
tests/test_integration_user_information.py
|
PhotoScout/API
|
24c2040b0a2fcb1ea906c7aa095c9e74d3ca4fa9
|
[
"MIT"
] | null | null | null |
tests/test_integration_user_information.py
|
PhotoScout/API
|
24c2040b0a2fcb1ea906c7aa095c9e74d3ca4fa9
|
[
"MIT"
] | null | null | null |
tests/test_integration_user_information.py
|
PhotoScout/API
|
24c2040b0a2fcb1ea906c7aa095c9e74d3ca4fa9
|
[
"MIT"
] | null | null | null |
import unittest2 as unittest
import json
from app import app, db
from app.models import User, BetaCode
import base64
import time
class Test_Integration_User_Information(unittest.TestCase):
""" All the test cases around the user manipulation """
ENTRYPOINT_USER = app.config['BASE_URL']+'/user'
ENTRYPOINT_AUTH = app.config['BASE_URL']+'/user/auth'
ENTRYPOINT_INFO = app.config['BASE_URL']+'/user/info'
def setUp(self):
self.app = app.test_client()
db.create_all()
# Add some users
user1 = User(
username='johndoe',
email='johndoe@foo.bar'
)
user1.hash_password("123456")
user2 = User(
username='johndoe1',
email='johndoe@foo.bar'
)
user2.hash_password("123456")
db.session.add(user1)
db.session.add(user2)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
def create_authorization_header(self, username, password):
""" Create the authorization header """
auth_str = username + ':' + password
return {'Authorization': 'Basic ' + base64.b64encode(auth_str.encode())}
def login_get_token_header(self, username, password):
""" Login the user and return the token """
response = self.app.post(
self.ENTRYPOINT_AUTH,
headers=self.create_authorization_header(username, password) ,
follow_redirects=True
)
token_str = json.loads(response.data)['token']
return self.create_authorization_header(token_str, '*')
def get_user(self, username):
return User.query.filter_by(username=username).first()
def test_user_information_self_token(self):
""" Test getting user info without token """
response = self.app.get(
self.ENTRYPOINT_INFO,
follow_redirects=True
)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.data, 'Unauthorized Access')
def test_user_information_self(self):
""" Test get information about himself """
user = self.get_user('johndoe')
data = {
'id': user.id,
'username': user.username,
'fullname': user.fullname,
'email': user.email
}
response = self.app.get(
self.ENTRYPOINT_INFO,
headers=self.login_get_token_header('johndoe', '123456'),
follow_redirects=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data), data)
def test_user_information_other(self):
""" Test get information about another user """
user = self.get_user('johndoe1')
data_email = {
'id': user.id,
'username': user.username,
'fullname': user.fullname,
'email': user.email
}
data = {
'id': user.id,
'username': user.username,
'fullname': user.fullname
}
response = self.app.get(
self.ENTRYPOINT_INFO+"/johndoe1",
headers=self.login_get_token_header('johndoe', '123456'),
follow_redirects=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data), data)
self.assertNotEqual(json.loads(response.data), data_email)
def test_user_information_other_invalid(self):
""" Test get information about another user that doesn't exist """
response = self.app.get(
self.ENTRYPOINT_INFO+"/johndoe2",
headers=self.login_get_token_header('johndoe', '123456'),
follow_redirects=True
)
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data), {"error": "No user found"})
| 32.575
| 80
| 0.606293
|
299b4032f39badc444fbc70ad0f72aa3ad2ea879
| 548
|
py
|
Python
|
utils/logsupport.py
|
JackYangzg/pytorch-ddpg
|
96838a40dd6992a0a18065a5edafbefc6bb0ac69
|
[
"Apache-2.0"
] | 1
|
2018-07-31T03:00:42.000Z
|
2018-07-31T03:00:42.000Z
|
utils/logsupport.py
|
JackYangzg/pytorch-ddpg
|
96838a40dd6992a0a18065a5edafbefc6bb0ac69
|
[
"Apache-2.0"
] | null | null | null |
utils/logsupport.py
|
JackYangzg/pytorch-ddpg
|
96838a40dd6992a0a18065a5edafbefc6bb0ac69
|
[
"Apache-2.0"
] | 1
|
2018-07-31T03:01:11.000Z
|
2018-07-31T03:01:11.000Z
|
import logging
from utils.configsupport import config
from constfile.constkey import *
__fileout = config.get(LOG_ISFILEOUT)
__filepath = config.get(LOG_FILEPATH)
__level = config.get(LOG_LEVEL)
__formated = "%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s"
__datefmt= '%a, %d %b %Y %H:%M:%S'
if not __fileout:
logging.basicConfig(level=__level, format=__formated, datefmt=__datefmt)
else:
logging.basicConfig(level=__level, format=__formated, datefmt=__datefmt, filename=__filepath)
log = logging.getLogger("ddpg")
| 30.444444
| 97
| 0.759124
|
2e93114b29aa107bb77b3ffaad2013a16b3ad9d1
| 776
|
py
|
Python
|
scripts/rebar_visible.py
|
appolimp/Dynamo_scripts
|
c4ea77428111d186fab55501243ad4319376482b
|
[
"MIT"
] | 1
|
2021-07-23T14:38:17.000Z
|
2021-07-23T14:38:17.000Z
|
scripts/rebar_visible.py
|
appolimp/Dynamo_scripts
|
c4ea77428111d186fab55501243ad4319376482b
|
[
"MIT"
] | null | null | null |
scripts/rebar_visible.py
|
appolimp/Dynamo_scripts
|
c4ea77428111d186fab55501243ad4319376482b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from base.wrapper import transaction, doc
from base.exeption import ScriptError
from rebar import unobscured_all_selected_rebars_on_view, unobscured_all_rebars_on_view
import logging
@transaction
def main():
ONLY_SELECTED = IN[0]
if ONLY_SELECTED:
unobscured_all_selected_rebars_on_view(doc.ActiveView, True, True)
else:
unobscured_all_rebars_on_view(doc.ActiveView, True, True)
if __name__ == '__main__':
logging.basicConfig(
filename=None, level=logging.INFO,
format='[%(asctime)s] %(levelname).1s: %(message)s',
datefmt='%Y.%m.%d %H:%M:%S')
try:
OUT = main()
except ScriptError as e:
logging.error(e)
except Exception as err:
logging.exception('Critical error')
| 27.714286
| 87
| 0.69201
|
bb83ddadd832bf156966a91cee9f00ae6247aacd
| 5,154
|
py
|
Python
|
quantum_chemistry/train.py
|
yuxuan-du/Quantum_architecture_search
|
429be34bd7ccce60453344a3e72f05fba07dde1e
|
[
"Apache-2.0"
] | 5
|
2021-08-19T06:45:23.000Z
|
2022-03-07T03:00:20.000Z
|
quantum_chemistry/train.py
|
yuxuan-du/Quantum_architecture_search
|
429be34bd7ccce60453344a3e72f05fba07dde1e
|
[
"Apache-2.0"
] | null | null | null |
quantum_chemistry/train.py
|
yuxuan-du/Quantum_architecture_search
|
429be34bd7ccce60453344a3e72f05fba07dde1e
|
[
"Apache-2.0"
] | 1
|
2021-11-25T08:07:19.000Z
|
2021-11-25T08:07:19.000Z
|
import argparse
import pennylane as qml
from pennylane import numpy as np
import time
import os
import json
from model import CircuitModel
parser = argparse.ArgumentParser("QAS")
parser.add_argument('--data', type=str, default='./data', help='location of the data corpus')
parser.add_argument('--learning_rate', type=float, default=0.05, help='learning rate')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='', help='which architecture to use')
parser.add_argument('--noise', action='store_true', default=False, help='use noise')
parser.add_argument('--device', type=str, default='default', help='which device to use', choices=['default', 'ibmq-sim', 'ibmq'])
# circuit
parser.add_argument('--n_qubits', type=int, default=3, help='number of qubits')
parser.add_argument('--n_layers', type=int, default=3, help='number of layers')
args = parser.parse_args()
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
os.makedirs(args.save)
with open(os.path.join(args.save, 'args.txt'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
if args.noise or args.device in ['ibmq-sim', 'ibmq']:
import qiskit
import qiskit.providers.aer.noise as noise
def main():
np.random.seed(args.seed)
records = {
'energy': [],
'conv': [],
'test_acc': 0
}
name = "h2"
geometry = "h2.xyz"
charge = 0
multiplicity = 1
basis_set = "sto-3g"
hamiltonian, n_qubits = qml.qchem.generate_hamiltonian(
name,
geometry,
charge,
multiplicity,
basis_set,
n_active_electrons=2,
n_active_orbitals=2,
mapping='jordan_wigner'
)
args.n_qubits = n_qubits
print("Number of qubits = ", n_qubits)
'''init device'''
if args.device in ['ibmq-sim', 'ibmq']:
from qiskit import IBMQ
account_key = ''
assert account_key != '', 'You must fill in your IBMQ account key.'
IBMQ.save_account(account_key, overwrite=True)
provider = IBMQ.enable_account(account_key)
if args.device == 'ibmq':
dev = qml.device('qiskit.ibmq', wires=4, backend='ibmq_ourense', provider=provider)
else:
backend = provider.get_backend('ibmq_ourense')
noise_model = noise.NoiseModel().from_backend(backend)
dev = qml.device('qiskit.aer', wires=args.n_qubits, noise_model=noise_model)
else:
if args.noise:
# Error probabilities
prob_1 = 0.05 # 1-qubit gate
prob_2 = 0.2 # 2-qubit gate
# Depolarizing quantum errors
error_1 = noise.depolarizing_error(prob_1, 1)
error_2 = noise.depolarizing_error(prob_2, 2)
# Add errors to noise model
noise_model = noise.NoiseModel()
noise_model.add_all_qubit_quantum_error(error_1, ['u1', 'u2', 'u3'])
noise_model.add_all_qubit_quantum_error(error_2, ['cx'])
print(noise_model)
dev = qml.device('qiskit.aer', wires=args.n_qubits, noise_model=noise_model)
else:
dev = qml.device("default.qubit", wires=args.n_qubits)
'''init model'''
model = CircuitModel(dev, args.n_qubits, args.n_layers, args.arch)
cost = qml.VQECost(lambda params, wires: model(params, wires), hamiltonian, dev)
step_size = 0.2
exact_value = -1.136189454088
opt = qml.QNGOptimizer(step_size, lam=0.001, diag_approx=False)
'''train'''
prev_energy = cost(model.params)
print(prev_energy)
for epoch in range(args.epochs):
'''
# shuffle data
indices = np.random.permutation(data_train.shape[0])
data_train = data_train[indices]
label_train = label_train[indices]
'''
model.params = opt.step(cost, model.params) # train
energy = cost(model.params)
conv = np.abs(energy - prev_energy)
prev_energy = energy
records['energy'].append(energy)
records['conv'].append(conv)
if epoch % 1 == 0:
print(
"Iteration = {:}, Ground-state energy = {:.8f} Ha, Convergence parameter = {"
":.8f} Ha".format(epoch, energy, conv)
)
print("Final convergence parameter = {:.8f} Ha".format(conv))
print("Number of iterations = ", epoch)
print("Final value of the ground-state energy = {:.8f} Ha".format(energy))
print(
"Accuracy with respect to the FCI energy: {:.8f} Ha ({:.8f} kcal/mol)".format(
np.abs(energy - exact_value), np.abs(energy - exact_value) * 627.503
)
)
'''save records'''
json.dump(records, open(os.path.join(args.save, 'records.txt'), 'w'), indent=2)
if __name__ == '__main__':
main()
| 37.347826
| 129
| 0.629608
|
7d50657b9d7dd01d6e2892ea8097b87d33494182
| 1,313
|
py
|
Python
|
setup.py
|
Khan/flask-babel
|
864eaabbe54e3bc9b26b25ecee859a7a1bab716b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Khan/flask-babel
|
864eaabbe54e3bc9b26b25ecee859a7a1bab716b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Khan/flask-babel
|
864eaabbe54e3bc9b26b25ecee859a7a1bab716b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Flask-Babel
-----------
Adds i18n/l10n support to Flask applications with the help of the
`Babel`_ library.
Links
`````
* `documentation <http://packages.python.org/Flask-Babel>`_
* `development version
<http://github.com/mitsuhiko/flask-babel/zipball/master#egg=Flask-Babel-dev>`_
.. _Babel: http://babel.edgewall.org/
"""
from setuptools import setup
setup(
name='Flask-Babel',
version='0.8',
url='http://github.com/mitsuhiko/flask-babel',
license='BSD',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
description='Adds i18n/l10n support to Flask applications',
long_description=__doc__,
packages=['flaskext'],
namespace_packages=['flaskext'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'Babel',
'pytz',
'speaklater>=1.2',
'Jinja2>=2.5'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 25.25
| 80
| 0.623762
|
e133e662d811f4ceedc29fc450376a35c5513e2e
| 378
|
py
|
Python
|
setup.py
|
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
|
f80e7c0cff97a1e9b301aa04015db983c7645778
|
[
"BSD-3-Clause"
] | 4
|
2015-05-08T16:58:53.000Z
|
2019-09-06T05:30:59.000Z
|
setup.py
|
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
|
f80e7c0cff97a1e9b301aa04015db983c7645778
|
[
"BSD-3-Clause"
] | 2
|
2019-02-17T17:44:53.000Z
|
2019-03-28T03:54:39.000Z
|
setup.py
|
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
|
f80e7c0cff97a1e9b301aa04015db983c7645778
|
[
"BSD-3-Clause"
] | 7
|
2015-05-21T15:38:29.000Z
|
2019-10-28T23:39:06.000Z
|
import os
from setuptools import setup, find_packages
setup(name='make_mozilla',
version='1.0',
description='Django application.',
long_description='',
author='',
author_email='',
license='',
url='',
include_package_data=True,
classifiers = [],
packages=find_packages(exclude=['tests']),
install_requires=[])
| 21
| 48
| 0.613757
|
a0d868b5cc7e82243953bc64f12c0964045ba7e7
| 1,290
|
py
|
Python
|
pg_jts/__init__.py
|
iburadempa/pg_jts
|
bbb444525c6c3760bfd5407e884782824594d0e0
|
[
"MIT"
] | 9
|
2015-10-21T18:08:01.000Z
|
2021-05-26T15:33:14.000Z
|
pg_jts/__init__.py
|
iburadempa/pg_jts
|
bbb444525c6c3760bfd5407e884782824594d0e0
|
[
"MIT"
] | 6
|
2015-11-16T18:36:06.000Z
|
2020-08-09T10:36:27.000Z
|
pg_jts/__init__.py
|
iburadempa/pg_jts
|
bbb444525c6c3760bfd5407e884782824594d0e0
|
[
"MIT"
] | 3
|
2015-11-16T09:55:13.000Z
|
2018-03-29T02:04:20.000Z
|
# Copyright (C) 2015 ibu radempa <ibu@radempa.de>
#
# Permission is hereby granted, free of charge, to
# any person obtaining a copy of this software and
# associated documentation files (the "Software"),
# to deal in the Software without restriction,
# including without limitation the rights to use,
# copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is
# furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission
# notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY
# OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
pg_jts extracts JSON table schemas from a live PostgreSQL database.
"""
from .pg_jts import get_database, get_schema_table_column_triples
__version__ = (0, 0, 1)
| 36.857143
| 67
| 0.768992
|
ddaf0109411a4690337b561f1aea436303ac76a4
| 607
|
py
|
Python
|
easy/Binary Tree Inorder Traversal/solution.py
|
ashutosh1919/leetcode-problems
|
65f99a3694549af88c7702b598de1a8ccb7db5fb
|
[
"MIT"
] | 8
|
2021-08-21T19:10:04.000Z
|
2022-03-11T14:30:02.000Z
|
easy/Binary Tree Inorder Traversal/solution.py
|
ashutosh1919/leetcode-problems
|
65f99a3694549af88c7702b598de1a8ccb7db5fb
|
[
"MIT"
] | null | null | null |
easy/Binary Tree Inorder Traversal/solution.py
|
ashutosh1919/leetcode-problems
|
65f99a3694549af88c7702b598de1a8ccb7db5fb
|
[
"MIT"
] | 1
|
2021-08-24T06:29:02.000Z
|
2021-08-24T06:29:02.000Z
|
# Time complexity: O(n)
# Approach: Standard inorder traversal algorithm
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def inorder(self, root, ans):
if not root:
return
self.inorder(root.left, ans)
ans.append(root.val)
self.inorder(root.right, ans)
def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
ans = []
self.inorder(root, ans)
return ans
| 28.904762
| 70
| 0.593081
|
5ff489d26516baeb27a7a86feaefd7dbb642bdaa
| 844
|
py
|
Python
|
tests/variables_test.py
|
sethvargo/vaex
|
c610324316b2c0a14b8ceac2a30e202adc9da28b
|
[
"MIT"
] | 337
|
2016-02-11T07:36:35.000Z
|
2018-12-10T07:17:35.000Z
|
tests/variables_test.py
|
sethvargo/vaex
|
c610324316b2c0a14b8ceac2a30e202adc9da28b
|
[
"MIT"
] | 127
|
2016-07-06T15:43:14.000Z
|
2018-12-11T18:46:27.000Z
|
tests/variables_test.py
|
sethvargo/vaex
|
c610324316b2c0a14b8ceac2a30e202adc9da28b
|
[
"MIT"
] | 29
|
2016-10-05T14:15:28.000Z
|
2018-11-29T10:17:00.000Z
|
import numpy as np
def test_variables(df):
df.add_variable('a', 2)
df['w'] = df['x'] + df['a']
assert (df.x + 2).tolist() == df.w.tolist()
assert (df.x + 2).tolist() == df[['w']].w.tolist()
def test_variable_rename(df):
df.add_variable('a', 2)
df['w'] = df['x'] + df['a']
assert (df.x + 2).tolist() == df.w.tolist()
df.rename('a', 'a2')
# arrow df has an extra variable, since it does the virtual as_numpy
assert set(df.w.variables()) in [{'x', 'a2'} , {'__x', 'x', 'a2'}]
assert (df.x + 2).tolist() == df.w.tolist()
def test_numpy_array_argument(df):
xx = -np.arange(10)
assert len(df.variables) == 1
# this should insert a variable (the numpy array)
df['w'] = df.func.where(df.x > 5, xx, df.x)
assert len(df.variables) == 2
assert list(df.variables.values())[1] is xx
| 30.142857
| 72
| 0.57346
|
38a5c6fab6e94b336162868db481d7a172b2c4a5
| 447
|
py
|
Python
|
scripts/patchNeutron.py
|
swordboy/neutron-portforward-service
|
271cccbc89f8508b3dd9370de52cd1ca58afad6c
|
[
"Artistic-2.0"
] | 1
|
2016-10-13T02:15:36.000Z
|
2016-10-13T02:15:36.000Z
|
scripts/patchNeutron.py
|
swordboy/neutron-portforward-service
|
271cccbc89f8508b3dd9370de52cd1ca58afad6c
|
[
"Artistic-2.0"
] | null | null | null |
scripts/patchNeutron.py
|
swordboy/neutron-portforward-service
|
271cccbc89f8508b3dd9370de52cd1ca58afad6c
|
[
"Artistic-2.0"
] | null | null | null |
#!/usr/bin/env python
#coding=utf-8
def patch():
config="/etc/neutron/neutron.conf"
with open("/etc/neutron/neutron.conf") as conf:
contents=conf.readlines()
result=[]
for line in contents:
if line.startswith("service_plugins"):
line=line.strip()+",portforward\n"
result.append(line)
with open("/etc/neutron/neutron.conf","w") as conf:
conf.writelines(result)
if __name__=="__main__":
patch()
| 24.833333
| 54
| 0.64877
|
837eac150f745f1bc9e1e6cacda597e9b77a173a
| 15,277
|
py
|
Python
|
aiortc/rtcdtlstransport.py
|
harrydrippin/aiortc
|
1e72b9a752b0c807316cd0bea2c26c7ae00ecdf4
|
[
"BSD-3-Clause"
] | null | null | null |
aiortc/rtcdtlstransport.py
|
harrydrippin/aiortc
|
1e72b9a752b0c807316cd0bea2c26c7ae00ecdf4
|
[
"BSD-3-Clause"
] | null | null | null |
aiortc/rtcdtlstransport.py
|
harrydrippin/aiortc
|
1e72b9a752b0c807316cd0bea2c26c7ae00ecdf4
|
[
"BSD-3-Clause"
] | null | null | null |
import asyncio
import base64
import binascii
import datetime
import enum
import logging
import os
import struct
import attr
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.bindings.openssl.binding import Binding
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import (Encoding,
NoEncryption,
PrivateFormat)
from OpenSSL import crypto
from pyee import EventEmitter
from pylibsrtp import Policy, Session
from .rtp import RtcpPacket, RtpPacket, is_rtcp
from .utils import first_completed
binding = Binding()
binding.init_static_locks()
ffi = binding.ffi
lib = binding.lib
SRTP_KEY_LEN = 16
SRTP_SALT_LEN = 14
logger = logging.getLogger('dtls')
class DtlsError(Exception):
pass
def _openssl_assert(ok):
if not ok:
raise DtlsError('OpenSSL call failed')
def certificate_digest(x509):
digest = lib.EVP_get_digestbyname(b'SHA256')
_openssl_assert(digest != ffi.NULL)
result_buffer = ffi.new('unsigned char[]', lib.EVP_MAX_MD_SIZE)
result_length = ffi.new('unsigned int[]', 1)
result_length[0] = len(result_buffer)
digest_result = lib.X509_digest(x509, digest, result_buffer, result_length)
assert digest_result == 1
return b":".join([
base64.b16encode(ch).upper() for ch
in ffi.buffer(result_buffer, result_length[0])]).decode('ascii')
def generate_key():
key = ec.generate_private_key(ec.SECP256R1(), default_backend())
key_pem = key.private_bytes(
encoding=Encoding.PEM,
format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption())
return crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
def generate_certificate(key):
cert = crypto.X509()
cert.get_subject().CN = binascii.hexlify(os.urandom(16)).decode('ascii')
cert.gmtime_adj_notBefore(-86400)
cert.gmtime_adj_notAfter(30 * 86400)
cert.set_version(2)
cert.set_serial_number(struct.unpack('!L', os.urandom(4))[0])
cert.set_issuer(cert.get_subject())
cert.set_pubkey(key)
cert.sign(key, 'sha256')
return cert
def get_srtp_key_salt(src, idx):
key_start = idx * SRTP_KEY_LEN
salt_start = 2 * SRTP_KEY_LEN + idx * SRTP_SALT_LEN
return (
src[key_start:key_start + SRTP_KEY_LEN] +
src[salt_start:salt_start + SRTP_SALT_LEN]
)
@ffi.callback('int(int, X509_STORE_CTX *)')
def verify_callback(x, y):
return 1
def create_ssl_context(certificate):
if hasattr(lib, 'DTLS_method'):
# openssl >= 1.0.2
method = lib.DTLS_method
else: # pragma: no cover
# openssl < 1.0.2
method = lib.DTLSv1_method
ctx = lib.SSL_CTX_new(method())
ctx = ffi.gc(ctx, lib.SSL_CTX_free)
lib.SSL_CTX_set_verify(ctx, lib.SSL_VERIFY_PEER | lib.SSL_VERIFY_FAIL_IF_NO_PEER_CERT,
verify_callback)
_openssl_assert(lib.SSL_CTX_use_certificate(ctx, certificate._cert._x509) == 1)
_openssl_assert(lib.SSL_CTX_use_PrivateKey(ctx, certificate._key._pkey) == 1)
_openssl_assert(lib.SSL_CTX_set_cipher_list(ctx, b'HIGH:!CAMELLIA:!aNULL') == 1)
_openssl_assert(lib.SSL_CTX_set_tlsext_use_srtp(ctx, b'SRTP_AES128_CM_SHA1_80') == 0)
_openssl_assert(lib.SSL_CTX_set_read_ahead(ctx, 1) == 0)
return ctx
class Channel:
def __init__(self, closed, queue, send):
self.closed = closed
self.queue = queue
self.send = send
async def recv(self):
data = await first_completed(self.queue.get(), self.closed.wait())
if data is True:
raise ConnectionError
return data
class State(enum.Enum):
NEW = 0
CONNECTING = 1
CONNECTED = 2
CLOSED = 3
FAILED = 4
class RTCCertificate:
"""
The :class:`RTCCertificate` interface enables the certificates used by an
:class:`RTCDtlsTransport`.
To generate a certificate and the corresponding private key use :func:`generateCertificate`.
"""
def __init__(self, key, cert):
self._key = key
self._cert = cert
@property
def expires(self):
"""
The date and time after which the certificate will be considered invalid.
"""
not_after = self._cert.get_notAfter().decode('ascii')
return datetime.datetime.strptime(not_after, '%Y%m%d%H%M%SZ').replace(
tzinfo=datetime.timezone.utc)
def getFingerprints(self):
"""
Returns the list of certificate fingerprints, one of which is computed
with the digest algorithm used in the certificate signature.
"""
return [
RTCDtlsFingerprint(algorithm='sha-256', value=certificate_digest(self._cert._x509))
]
@classmethod
def generateCertificate(cls):
"""
Create and return an X.509 certificate and corresponding private key.
:rtype: RTCCertificate
"""
key = generate_key()
cert = generate_certificate(key)
return cls(key=key, cert=cert)
@attr.s
class RTCDtlsFingerprint:
"""
The :class:`RTCDtlsFingerprint` dictionary includes the hash function
algorithm and certificate fingerprint.
"""
algorithm = attr.ib()
"The hash function name, for instance `'sha-256'`."
value = attr.ib()
"The fingerprint value."
@attr.s
class RTCDtlsParameters:
"""
The :class:`RTCDtlsParameters` dictionary includes information relating to
DTLS configuration.
"""
fingerprints = attr.ib(default=attr.Factory(list))
"List of :class:`RTCDtlsFingerprint`, one fingerprint for each certificate."
role = attr.ib(default='auto')
"The DTLS role, with a default of auto."
class RtpRouter:
def __init__(self):
self.ssrc_table = {}
def register(self, receiver, parameters):
if parameters.rtcp.ssrc:
self.ssrc_table[parameters.rtcp.ssrc] = receiver
def route(self, ssrc):
return self.ssrc_table.get(ssrc)
class RTCDtlsTransport(EventEmitter):
"""
The :class:`RTCDtlsTransport` object includes information relating to
Datagram Transport Layer Security (DTLS) transport.
:param: transport: An :class:`RTCIceTransport`.
:param: certificates: A list of :class:`RTCCertificate` (only one is allowed currently).
"""
def __init__(self, transport, certificates):
assert len(certificates) == 1
certificate = certificates[0]
super().__init__()
self.closed = asyncio.Event()
self.encrypted = False
self._role = 'auto'
self._rtp_router = RtpRouter()
self._start = None
self._state = State.NEW
self._transport = transport
self.data_queue = asyncio.Queue()
self.data = Channel(
closed=self.closed,
queue=self.data_queue,
send=self._send_data)
# SSL init
self.__ctx = create_ssl_context(certificate)
ssl = lib.SSL_new(self.__ctx)
self.ssl = ffi.gc(ssl, lib.SSL_free)
self.read_bio = lib.BIO_new(lib.BIO_s_mem())
self.read_cdata = ffi.new('char[]', 1500)
self.write_bio = lib.BIO_new(lib.BIO_s_mem())
self.write_cdata = ffi.new('char[]', 1500)
lib.SSL_set_bio(self.ssl, self.read_bio, self.write_bio)
self.__local_parameters = RTCDtlsParameters(fingerprints=certificate.getFingerprints())
@property
def state(self):
"""
The current state of the DTLS transport.
"""
return str(self._state)[6:].lower()
@property
def transport(self):
"""
The associated :class:`RTCIceTransport` instance.
"""
return self._transport
def getLocalParameters(self):
"""
Get the local parameters of the DTLS transport.
:rtype: :class:`RTCDtlsParameters`
"""
return self.__local_parameters
async def start(self, remoteParameters):
"""
Start DTLS transport negotiation with the parameters of the remote
DTLS transport.
:param: remoteParameters: An :class:`RTCDtlsParameters`.
"""
assert self._state not in [State.CLOSED, State.FAILED]
assert len(remoteParameters.fingerprints)
# handle the case where start is already in progress
if self._start is not None:
return await self._start.wait()
self._start = asyncio.Event()
if self.transport.role == 'controlling':
self._role = 'server'
lib.SSL_set_accept_state(self.ssl)
else:
self._role = 'client'
lib.SSL_set_connect_state(self.ssl)
self._set_state(State.CONNECTING)
while not self.encrypted:
result = lib.SSL_do_handshake(self.ssl)
await self._write_ssl()
if result > 0:
self.encrypted = True
break
error = lib.SSL_get_error(self.ssl, result)
if error == lib.SSL_ERROR_WANT_READ:
await self._recv_next()
else:
self._set_state(State.FAILED)
raise DtlsError('DTLS handshake failed (error %d)' % error)
# check remote fingerprint
x509 = lib.SSL_get_peer_certificate(self.ssl)
remote_fingerprint = certificate_digest(x509)
fingerprint_is_valid = False
for f in remoteParameters.fingerprints:
if f.algorithm == 'sha-256' and f.value.lower() == remote_fingerprint.lower():
fingerprint_is_valid = True
break
if not fingerprint_is_valid:
self._set_state(State.FAILED)
raise DtlsError('DTLS fingerprint does not match')
# generate keying material
buf = ffi.new('unsigned char[]', 2 * (SRTP_KEY_LEN + SRTP_SALT_LEN))
extractor = b'EXTRACTOR-dtls_srtp'
_openssl_assert(lib.SSL_export_keying_material(
self.ssl, buf, len(buf), extractor, len(extractor), ffi.NULL, 0, 0) == 1)
view = ffi.buffer(buf)
if self._role == 'server':
srtp_tx_key = get_srtp_key_salt(view, 1)
srtp_rx_key = get_srtp_key_salt(view, 0)
else:
srtp_tx_key = get_srtp_key_salt(view, 0)
srtp_rx_key = get_srtp_key_salt(view, 1)
rx_policy = Policy(key=srtp_rx_key, ssrc_type=Policy.SSRC_ANY_INBOUND)
self._rx_srtp = Session(rx_policy)
tx_policy = Policy(key=srtp_tx_key, ssrc_type=Policy.SSRC_ANY_OUTBOUND)
self._tx_srtp = Session(tx_policy)
# start data pump
self.__log_debug('- DTLS handshake complete')
self._set_state(State.CONNECTED)
asyncio.ensure_future(self.__run())
self._start.set()
async def stop(self):
"""
Stop and close the DTLS transport.
"""
if self._state in [State.CONNECTING, State.CONNECTED]:
lib.SSL_shutdown(self.ssl)
await self._write_ssl()
self.__log_debug('- DTLS shutdown complete')
self.closed.set()
async def __run(self):
try:
while True:
await self._recv_next()
except ConnectionError:
pass
finally:
self._set_state(State.CLOSED)
self.closed.set()
async def _recv_next(self):
# get timeout
ptv_sec = ffi.new('time_t *')
ptv_usec = ffi.new('long *')
if lib.Cryptography_DTLSv1_get_timeout(self.ssl, ptv_sec, ptv_usec):
timeout = ptv_sec[0] + (ptv_usec[0] / 1000000)
else:
timeout = None
try:
data = await first_completed(self.transport._connection.recv(), self.closed.wait(),
timeout=timeout)
except TimeoutError:
self.__log_debug('x DTLS handling timeout')
lib.DTLSv1_handle_timeout(self.ssl)
await self._write_ssl()
return
if data is True:
# session was closed
raise ConnectionError
first_byte = data[0]
if first_byte > 19 and first_byte < 64:
# DTLS
lib.BIO_write(self.read_bio, data, len(data))
result = lib.SSL_read(self.ssl, self.read_cdata, len(self.read_cdata))
await self._write_ssl()
if result == 0:
self.__log_debug('- DTLS shutdown by remote party')
raise ConnectionError
elif result > 0:
await self.data_queue.put(ffi.buffer(self.read_cdata)[0:result])
elif first_byte > 127 and first_byte < 192:
# SRTP / SRTCP
if is_rtcp(data):
data = self._rx_srtp.unprotect_rtcp(data)
packets = RtcpPacket.parse(data)
for packet in packets:
receiver = None
if hasattr(packet, 'ssrc'):
# SR and RR
receiver = self._rtp_router.route(packet.ssrc)
elif getattr(packet, 'chunks', None):
# SDES
receiver = self._rtp_router.route(packet.chunks[0].ssrc)
elif getattr(packet, 'sources', None):
# BYE
receiver = self._rtp_router.route(packet.sources[0])
if receiver is not None:
await receiver._handle_rtcp_packet(packet)
else:
data = self._rx_srtp.unprotect(data)
packet = RtpPacket.parse(data)
receiver = self._rtp_router.route(packet.ssrc)
if receiver is not None:
await receiver._handle_rtp_packet(packet)
def _register_rtp_receiver(self, receiver, parameters):
self._rtp_router.register(receiver, parameters)
async def _send_data(self, data):
if self._state != State.CONNECTED:
raise ConnectionError('Cannot send encrypted data, not connected')
lib.SSL_write(self.ssl, data, len(data))
await self._write_ssl()
async def _send_rtp(self, data):
if self._state != State.CONNECTED:
raise ConnectionError('Cannot send encrypted RTP, not connected')
if is_rtcp(data):
data = self._tx_srtp.protect_rtcp(data)
else:
data = self._tx_srtp.protect(data)
await self.transport._connection.send(data)
def _set_state(self, state):
if state != self._state:
self.__log_debug('- %s -> %s', self._state, state)
self._state = state
self.emit('statechange')
async def _write_ssl(self):
"""
Flush outgoing data which OpenSSL put in our BIO to the transport.
"""
pending = lib.BIO_ctrl_pending(self.write_bio)
if pending > 0:
result = lib.BIO_read(self.write_bio, self.write_cdata, len(self.write_cdata))
await self.transport._connection.send(ffi.buffer(self.write_cdata)[0:result])
def __log_debug(self, msg, *args):
logger.debug(self._role + ' ' + msg, *args)
| 32.504255
| 96
| 0.619035
|
66d04b556c690f348751d1a010b936f36251ac99
| 4,158
|
py
|
Python
|
aif360/algorithms/transformer.py
|
andrerubeis/AIF360
|
c0ce6f2e3eff9cab0ccce0bc0a05b681a5df7e44
|
[
"Apache-2.0"
] | null | null | null |
aif360/algorithms/transformer.py
|
andrerubeis/AIF360
|
c0ce6f2e3eff9cab0ccce0bc0a05b681a5df7e44
|
[
"Apache-2.0"
] | null | null | null |
aif360/algorithms/transformer.py
|
andrerubeis/AIF360
|
c0ce6f2e3eff9cab0ccce0bc0a05b681a5df7e44
|
[
"Apache-2.0"
] | null | null | null |
from abc import abstractmethod
from functools import wraps
from aif360.datasets import Dataset
from aif360.decorating_metaclass import ApplyDecorator
# TODO: Use sklearn.exceptions.NotFittedError instead?
class NotFittedError(ValueError, AttributeError):
"""Error to be raised if `predict` or `transform` is called before `fit`."""
def addmetadata(func):
"""Decorator for instance methods which perform a transformation and return
a new dataset.
Automatically populates the `metadata` field of the new dataset to reflect
details of the transformation that occurred, e.g.::
{
'transformer': 'TransformerClass.function_name',
'params': kwargs_from_init,
'previous': [all_datasets_used_by_func]
}
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
new_dataset = func(self, *args, **kwargs)
if isinstance(new_dataset, Dataset):
new_dataset.metadata = new_dataset.metadata.copy()
new_dataset.metadata.update({
'transformer': '{}.{}'.format(type(self).__name__, func.__name__),
'params': self._params,
'previous': [a for a in args if isinstance(a, Dataset)]
})
return new_dataset
return wrapper
BaseClass = ApplyDecorator(addmetadata)
class Transformer(BaseClass):
"""Abstract base class for transformers.
Transformers are an abstraction for any process which acts on a
:obj:`Dataset` and returns a new, modified Dataset. This definition
encompasses pre-processing, in-processing, and post-processing algorithms.
"""
@abstractmethod
def __init__(self, **kwargs):
"""Initialize a Transformer object.
Algorithm-specific configuration parameters should be passed here.
"""
#kwargs: {'unprivileged_groups': [{'sex': 0}], 'privileged_groups': [{'sex': 1}]}
#Store protected attributes (lfr)
self._params = kwargs
def fit(self, dataset):
"""Train a model on the input.
Args:
dataset (Dataset): Input dataset.
Returns:
Transformer: Returns self.
"""
return self
def predict(self, dataset):
"""Return a new dataset with labels predicted by running this
Transformer on the input.
Args:
dataset (Dataset): Input dataset.
Returns:
Dataset: Output dataset. `metadata` should reflect the details of
this transformation.
"""
raise NotImplementedError("'predict' is not supported for this class. "
"Perhaps you meant 'transform' or 'fit_predict' instead?")
def transform(self, dataset):
"""Return a new dataset generated by running this Transformer on the
input.
This function could return different `dataset.features`,
`dataset.labels`, or both.
Args:
dataset (Dataset): Input dataset.
Returns:
Dataset: Output dataset. `metadata` should reflect the details of
this transformation.
"""
raise NotImplementedError("'transform' is not supported for this class."
" Perhaps you meant 'predict' or 'fit_transform' instead?")
def fit_predict(self, dataset):
"""Train a model on the input and predict the labels.
Equivalent to calling `fit(dataset)` followed by `predict(dataset)`.
Args:
dataset (Dataset): Input dataset.
Returns:
Dataset: Output dataset. `metadata` should reflect the details of
this transformation.
"""
return self.fit(dataset).predict(dataset)
def fit_transform(self, dataset):
"""Train a model on the input and transform the dataset accordingly.
Equivalent to calling `fit(dataset)` followed by `transform(dataset)`.
Args:
dataset (Dataset): Input dataset.
Returns:
Dataset: Output dataset. `metadata` should reflect the details of
this transformation.
"""
return self.fit(dataset).transform(dataset)
| 32.484375
| 89
| 0.633237
|
ec47bf01ebf9beb16408299c8c0c4301f6ba9ef1
| 4,642
|
py
|
Python
|
PasswordManager/PwWindow.py
|
whymatter/PasswordManager
|
86070a1f998362cfa026e6e6e9b820a2d7ad5f06
|
[
"MIT"
] | null | null | null |
PasswordManager/PwWindow.py
|
whymatter/PasswordManager
|
86070a1f998362cfa026e6e6e9b820a2d7ad5f06
|
[
"MIT"
] | null | null | null |
PasswordManager/PwWindow.py
|
whymatter/PasswordManager
|
86070a1f998362cfa026e6e6e9b820a2d7ad5f06
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5 import QtWidgets, uic
from PyQt5.QtWidgets import QApplication, QTableWidgetItem
from Crypto.Cipher import AES
import json
from constants import FILE_NAME, KEY_ENCODING
from SaveDialog import SaveDialog
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 13:53:56 2017
@author: seitz
"""
class PwWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None, key=None):
super(PwWindow, self).__init__(parent)
self.key = key
self.save_dialog = SaveDialog(parent=self)
# load and show the user interface created with the designer.
uic.loadUi('../pw_window.ui', self)
self.add_button.clicked.connect(self.add_row)
self.remove_button.clicked.connect(self.remove_row)
self.toggle_button.clicked.connect(self.toggle_password)
self.load_data(FILE_NAME, key)
self.show()
def add_row(self):
self.tableWidget.insertRow(self.tableWidget.rowCount())
def remove_row(self):
self.tableWidget.removeRow(self.tableWidget.currentRow())
def toggle_password(self):
item = self.tableWidget.currentItem()
if item == None or item.column() != 1:
return
print(item.text())
print(item.text() == '****')
if item.text() == '****':
print(str(item.data(1)))
item.setText(str(item.data(1)))
else:
item.setData(1, item.text())
item.setText('****')
def closeEvent(self, event):
if self.save_dialog.exec_():
self.save_data(FILE_NAME, self.key)
event.accept()
def get_data(self):
data = []
for row in range(self.tableWidget.rowCount()):
if self.tableWidget.item(row, 0) == None or\
self.tableWidget.item(row, 1) == None:
continue
data_pw = self.tableWidget.item(row, 1).data(1)
text_pw = self.tableWidget.item(row, 1).text()
print(data_pw)
data.append({
"accountid": self.tableWidget.item(row, 0).text(),
"password": text_pw if text_pw != '****' else data_pw
})
return data
def load_data(self, filename, key):
try:
with open(filename, 'rb') as file_in:
nonce, tag, ciphertext = [ file_in.read(x) for x in (16, 16, -1) ]
# let's assume that the key is somehow available again
cipher = AES.new(key, AES.MODE_EAX, nonce)
jsontext = cipher.decrypt_and_verify(ciphertext, tag)
data = json.loads(jsontext)
print(data)
self.refresh_table(data)
except Exception as e:
print("Your file contains errors")
print(e)
def save_data(self, filename, key):
data = self.get_data()
print(data)
cipher = AES.new(key, AES.MODE_EAX)
ciphertext, tag = cipher.encrypt_and_digest(json.dumps(data).encode(KEY_ENCODING))
try:
with open(FILE_NAME, "wb") as file_out:
[ file_out.write(x) for x in (cipher.nonce, tag, ciphertext) ]
except Exception as e:
print("Your file contains errors")
print(e)
def refresh_table(self, data):
def load_value(row, column, value):
self.tableWidget.setItem(row, column, QTableWidgetItem(str(value), 0))
[self.tableWidget.removeRow(0) for x in range(self.tableWidget.rowCount())]
for index, entry in enumerate(data):
self.tableWidget.insertRow(index)
self.tableWidget.setItem(index, 0, QTableWidgetItem(str(entry['accountid']), 0))
pw_item = QTableWidgetItem()
pw_item.setText('****')
pw_item.setData(1, str(entry['password']))
self.tableWidget.setItem(index, 1, pw_item)
@staticmethod
def persist(data, filename):
try:
print(data)
with open(filename, 'w') as outFile:
json.dump(data, outFile)
except Exception as e:
print("Failed to save!")
print(e)
def return_data(self):
self.data = self.get_data()
self.accept()
def _main():
app = QApplication(sys.argv)
m = PwWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
_main()
| 32.461538
| 93
| 0.550409
|
9aaa1c2b83405b7e75a1a8f71bd668d7240eddb2
| 8,520
|
py
|
Python
|
tpu/models/official/resnet/benchmark/resnet_benchmark.py
|
DLPerf/class-balanced-loss
|
8ac4342949991384c13b5419148be7a617f2abf1
|
[
"MIT"
] | 541
|
2019-01-18T03:10:19.000Z
|
2022-03-30T05:45:36.000Z
|
models/official/resnet/benchmark/resnet_benchmark.py
|
lc0/tpu
|
78eacbc1b216d498cd0456b5d84d4abcd5f16a9a
|
[
"Apache-2.0"
] | 20
|
2019-02-11T06:45:43.000Z
|
2021-11-09T18:44:36.000Z
|
models/official/resnet/benchmark/resnet_benchmark.py
|
lc0/tpu
|
78eacbc1b216d498cd0456b5d84d4abcd5f16a9a
|
[
"Apache-2.0"
] | 75
|
2019-01-20T13:50:39.000Z
|
2022-03-30T08:10:50.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import sys
import time
from absl import flags
import tensorflow as tf
# For Cloud environment, add parent directory for imports
sys.path.append(os.path.dirname(os.path.abspath(sys.path[0])))
from official.resnet import imagenet_input # pylint: disable=g-import-not-at-top
from official.resnet import resnet_main
from tensorflow.python.estimator import estimator
FLAGS = tf.flags.FLAGS
CKPT_PATTERN = r'model\.ckpt-(?P<gs>[0-9]+)\.data'
flags.DEFINE_string(
'data_dir_small', default=None,
help=('The directory where the resized (160x160) ImageNet input data is '
'stored. This is only to be used in conjunction with the '
'resnet_benchmark.py script.'))
flags.DEFINE_bool(
'use_fast_lr', default=False,
help=('Enabling this uses a faster learning rate schedule along with '
'different image sizes in the input pipeline. This is only to be '
'used in conjunction with the resnet_benchmark.py script.'))
# Number of training and evaluation images in the standard ImageNet dataset
NUM_TRAIN_IMAGES = 1281167
NUM_EVAL_IMAGES = 50000
def main(unused_argv):
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=FLAGS.iterations_per_loop,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_cores,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2)) # pylint: disable=line-too-long
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = imagenet_input.ImageNetInput(
is_training=True,
data_dir=FLAGS.data_dir,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
imagenet_eval = imagenet_input.ImageNetInput(
is_training=False,
data_dir=FLAGS.data_dir,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
if FLAGS.use_fast_lr:
resnet_main.LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 4), (0.1, 21), (0.01, 35), (0.001, 43)
]
imagenet_train_small = imagenet_input.ImageNetInput(
is_training=True,
image_size=128,
data_dir=FLAGS.data_dir_small,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input,
cache=True)
imagenet_eval_small = imagenet_input.ImageNetInput(
is_training=False,
image_size=128,
data_dir=FLAGS.data_dir_small,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input,
cache=True)
imagenet_train_large = imagenet_input.ImageNetInput(
is_training=True,
image_size=288,
data_dir=FLAGS.data_dir,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
imagenet_eval_large = imagenet_input.ImageNetInput(
is_training=False,
image_size=288,
data_dir=FLAGS.data_dir,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
resnet_classifier = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=resnet_main.resnet_model_fn,
config=config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.mode == 'train':
current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
batches_per_epoch = NUM_TRAIN_IMAGES / FLAGS.train_batch_size
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.' % (FLAGS.train_steps,
FLAGS.train_steps / batches_per_epoch,
current_step))
start_timestamp = time.time() # This time will include compilation time
# Write a dummy file at the start of training so that we can measure the
# runtime at each checkpoint from the file write time.
tf.gfile.MkDir(FLAGS.model_dir)
if not tf.gfile.Exists(os.path.join(FLAGS.model_dir, 'START')):
with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'START'), 'w') as f:
f.write(str(start_timestamp))
if FLAGS.use_fast_lr:
small_steps = int(18 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
normal_steps = int(41 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
large_steps = int(min(50 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size,
FLAGS.train_steps))
resnet_classifier.train(
input_fn=imagenet_train_small.input_fn, max_steps=small_steps)
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=normal_steps)
resnet_classifier.train(
input_fn=imagenet_train_large.input_fn,
max_steps=large_steps)
else:
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=FLAGS.train_steps)
else:
assert FLAGS.mode == 'eval'
start_timestamp = tf.gfile.Stat(
os.path.join(FLAGS.model_dir, 'START')).mtime_nsec
results = []
eval_steps = NUM_EVAL_IMAGES // FLAGS.eval_batch_size
ckpt_steps = set()
all_files = tf.gfile.ListDirectory(FLAGS.model_dir)
for f in all_files:
mat = re.match(CKPT_PATTERN, f)
if mat is not None:
ckpt_steps.add(int(mat.group('gs')))
ckpt_steps = sorted(list(ckpt_steps))
tf.logging.info('Steps to be evaluated: %s' % str(ckpt_steps))
for step in ckpt_steps:
ckpt = os.path.join(FLAGS.model_dir, 'model.ckpt-%d' % step)
batches_per_epoch = NUM_TRAIN_IMAGES // FLAGS.train_batch_size
current_epoch = step // batches_per_epoch
if FLAGS.use_fast_lr:
if current_epoch < 18:
eval_input_fn = imagenet_eval_small.input_fn
if current_epoch >= 18 and current_epoch < 41:
eval_input_fn = imagenet_eval.input_fn
if current_epoch >= 41: # 41:
eval_input_fn = imagenet_eval_large.input_fn
else:
eval_input_fn = imagenet_eval.input_fn
end_timestamp = tf.gfile.Stat(ckpt + '.index').mtime_nsec
elapsed_hours = (end_timestamp - start_timestamp) / (1e9 * 3600.0)
tf.logging.info('Starting to evaluate.')
eval_start = time.time() # This time will include compilation time
eval_results = resnet_classifier.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=ckpt)
eval_time = int(time.time() - eval_start)
tf.logging.info('Eval results: %s. Elapsed seconds: %d' %
(eval_results, eval_time))
results.append([
current_epoch,
elapsed_hours,
'%.2f' % (eval_results['top_1_accuracy'] * 100),
'%.2f' % (eval_results['top_5_accuracy'] * 100),
])
time.sleep(60)
with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'results.tsv'), 'wb') as tsv_file: # pylint: disable=line-too-long
writer = csv.writer(tsv_file, delimiter='\t')
writer.writerow(['epoch', 'hours', 'top1Accuracy', 'top5Accuracy'])
writer.writerows(results)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 37.20524
| 133
| 0.688967
|
34cf4c21bff5e3b2b838b405c718743366ef4c94
| 178,722
|
py
|
Python
|
autotest/gdrivers/nitf.py
|
cholmes/gdal
|
e091ab0c9b45832f6281dae2437a5759ecd777c2
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/nitf.py
|
cholmes/gdal
|
e091ab0c9b45832f6281dae2437a5759ecd777c2
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/nitf.py
|
cholmes/gdal
|
e091ab0c9b45832f6281dae2437a5759ecd777c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read/write functionality for NITF driver.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import copy
import os
import sys
import array
import struct
import shutil
from osgeo import gdal
from osgeo import osr
import gdaltest
import pytest
@pytest.fixture(scope='module')
def not_jpeg_9b():
import jpeg
jpeg.test_jpeg_1()
if gdaltest.jpeg_version == '9b':
pytest.skip()
def hex_string(s):
return "".join(hex(ord(c))[2:] for c in s)
###############################################################################
# Write/Read test of simple byte reference data.
def test_nitf_1():
tst = gdaltest.GDALTest('NITF', 'byte.tif', 1, 4672)
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple 16bit reference data.
def test_nitf_2():
tst = gdaltest.GDALTest('NITF', 'int16.tif', 1, 4672)
return tst.testCreateCopy()
###############################################################################
# Write/Read RGB image with lat/long georeferencing, and verify.
def test_nitf_3():
tst = gdaltest.GDALTest('NITF', 'rgbsmall.tif', 3, 21349)
return tst.testCreateCopy()
###############################################################################
# Test direction creation of an NITF file.
def nitf_create(creation_options, set_inverted_color_interp=True, createcopy=False):
drv = gdal.GetDriverByName('NITF')
try:
os.remove('tmp/test_create.ntf')
except OSError:
pass
if createcopy:
ds = gdal.GetDriverByName('MEM').Create('', 200, 100, 3, gdal.GDT_Byte)
else:
ds = drv.Create('tmp/test_create.ntf', 200, 100, 3, gdal.GDT_Byte,
creation_options)
ds.SetGeoTransform((100, 0.1, 0.0, 30.0, 0.0, -0.1))
if set_inverted_color_interp:
ds.GetRasterBand(1).SetRasterColorInterpretation(gdal.GCI_BlueBand)
ds.GetRasterBand(2).SetRasterColorInterpretation(gdal.GCI_GreenBand)
ds.GetRasterBand(3).SetRasterColorInterpretation(gdal.GCI_RedBand)
else:
ds.GetRasterBand(1).SetRasterColorInterpretation(gdal.GCI_RedBand)
ds.GetRasterBand(2).SetRasterColorInterpretation(gdal.GCI_GreenBand)
ds.GetRasterBand(3).SetRasterColorInterpretation(gdal.GCI_BlueBand)
my_list = list(range(200)) + list(range(20, 220)) + list(range(30, 230))
try:
raw_data = array.array('h', my_list).tobytes()
except:
# Python 2
raw_data = array.array('h', my_list).tostring()
for line in range(100):
ds.WriteRaster(0, line, 200, 1, raw_data,
buf_type=gdal.GDT_Int16,
band_list=[1, 2, 3])
if createcopy:
ds = drv.CreateCopy('tmp/test_create.ntf', ds,
options=creation_options)
ds = None
###############################################################################
# Test direction creation of an non-compressed NITF file.
def test_nitf_4():
return nitf_create(['ICORDS=G'])
###############################################################################
# Verify created file
def nitf_check_created_file(checksum1, checksum2, checksum3, set_inverted_color_interp=True):
ds = gdal.Open('tmp/test_create.ntf')
chksum = ds.GetRasterBand(1).Checksum()
chksum_expect = checksum1
assert chksum == chksum_expect, 'Did not get expected chksum for band 1'
chksum = ds.GetRasterBand(2).Checksum()
chksum_expect = checksum2
assert chksum == chksum_expect, 'Did not get expected chksum for band 2'
chksum = ds.GetRasterBand(3).Checksum()
chksum_expect = checksum3
assert chksum == chksum_expect, 'Did not get expected chksum for band 3'
geotransform = ds.GetGeoTransform()
assert geotransform[0] == pytest.approx(100, abs=0.1) and geotransform[1] == pytest.approx(0.1, abs=0.001) and geotransform[2] == pytest.approx(0, abs=0.001) and geotransform[3] == pytest.approx(30.0, abs=0.1) and geotransform[4] == pytest.approx(0, abs=0.001) and geotransform[5] == pytest.approx(-0.1, abs=0.001), \
'geotransform differs from expected'
if set_inverted_color_interp:
assert ds.GetRasterBand(1).GetRasterColorInterpretation() == gdal.GCI_BlueBand, \
'Got wrong color interpretation.'
assert ds.GetRasterBand(2).GetRasterColorInterpretation() == gdal.GCI_GreenBand, \
'Got wrong color interpretation.'
assert ds.GetRasterBand(3).GetRasterColorInterpretation() == gdal.GCI_RedBand, \
'Got wrong color interpretation.'
ds = None
###############################################################################
# Verify file created by nitf_4()
def test_nitf_5():
return nitf_check_created_file(32498, 42602, 38982)
###############################################################################
# Read existing NITF file. Verifies the new adjusted IGEOLO interp.
def test_nitf_6():
tst = gdaltest.GDALTest('NITF', 'nitf/rgb.ntf', 3, 21349)
return tst.testOpen(check_prj='WGS84',
check_gt=(-44.842029478458, 0.003503401360, 0,
-22.930748299319, 0, -0.003503401360))
###############################################################################
# NITF in-memory.
def test_nitf_7():
tst = gdaltest.GDALTest('NITF', 'rgbsmall.tif', 3, 21349)
return tst.testCreateCopy(vsimem=1)
###############################################################################
# Verify we can open an NSIF file, and get metadata including BLOCKA.
def test_nitf_8():
ds = gdal.Open('data/nitf/fake_nsif.ntf')
chksum = ds.GetRasterBand(1).Checksum()
chksum_expect = 12033
assert chksum == chksum_expect, 'Did not get expected chksum for band 1'
md = ds.GetMetadata()
assert md['NITF_FHDR'] == 'NSIF01.00', 'Got wrong FHDR value'
assert md['NITF_BLOCKA_BLOCK_INSTANCE_01'] == '01' and md['NITF_BLOCKA_BLOCK_COUNT'] == '01' and md['NITF_BLOCKA_N_GRAY_01'] == '00000' and md['NITF_BLOCKA_L_LINES_01'] == '01000' and md['NITF_BLOCKA_LAYOVER_ANGLE_01'] == '000' and md['NITF_BLOCKA_SHADOW_ANGLE_01'] == '000' and md['NITF_BLOCKA_FRLC_LOC_01'] == '+41.319331+020.078400' and md['NITF_BLOCKA_LRLC_LOC_01'] == '+41.317083+020.126072' and md['NITF_BLOCKA_LRFC_LOC_01'] == '+41.281634+020.122570' and md['NITF_BLOCKA_FRFC_LOC_01'] == '+41.283881+020.074924', \
'BLOCKA metadata has unexpected value.'
###############################################################################
# Create and read a JPEG encoded NITF file.
def test_nitf_9():
src_ds = gdal.Open('data/rgbsmall.tif')
ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf9.ntf', src_ds,
options=['IC=C3'])
src_ds = None
ds = None
ds = gdal.Open('tmp/nitf9.ntf')
(exp_mean, exp_stddev) = (65.9532, 46.9026375565)
(mean, stddev) = ds.GetRasterBand(1).ComputeBandStats()
assert exp_mean == pytest.approx(mean, abs=0.1) and exp_stddev == pytest.approx(stddev, abs=0.1), \
'did not get expected mean or standard dev.'
md = ds.GetMetadata('IMAGE_STRUCTURE')
assert md['COMPRESSION'] == 'JPEG', 'Did not get expected compression value.'
###############################################################################
# For esoteric reasons, createcopy from jpeg compressed nitf files can be
# tricky. Verify this is working.
def test_nitf_10():
src_ds = gdal.Open('tmp/nitf9.ntf')
expected_cs = src_ds.GetRasterBand(2).Checksum()
src_ds = None
assert expected_cs == 22296 or expected_cs == 22259
tst = gdaltest.GDALTest('NITF', '../tmp/nitf9.ntf', 2, expected_cs)
return tst.testCreateCopy()
###############################################################################
# Test 1bit file ... conveniently very small and easy to include! (#1854)
def test_nitf_11():
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/i_3034c.ntf
tst = gdaltest.GDALTest('NITF', 'nitf/i_3034c.ntf', 1, 170)
return tst.testOpen()
###############################################################################
# Verify that TRE and CGM access via the metadata domain works.
def test_nitf_12():
ds = gdal.Open('data/nitf/fake_nsif.ntf')
mdTRE = ds.GetMetadata('TRE')
try: # NG bindings
blockA = ds.GetMetadataItem('BLOCKA', 'TRE')
except:
blockA = mdTRE['BLOCKA']
mdCGM = ds.GetMetadata('CGM')
try: # NG bindings
segmentCount = ds.GetMetadataItem('SEGMENT_COUNT', 'CGM')
except:
segmentCount = mdCGM['SEGMENT_COUNT']
ds = None
expectedBlockA = '010000001000000000 +41.319331+020.078400+41.317083+020.126072+41.281634+020.122570+41.283881+020.074924 '
assert mdTRE['BLOCKA'] == expectedBlockA, \
'did not find expected BLOCKA from metadata.'
assert blockA == expectedBlockA, 'did not find expected BLOCKA from metadata item.'
assert mdCGM['SEGMENT_COUNT'] == '0', \
'did not find expected SEGMENT_COUNT from metadata.'
assert segmentCount == '0', \
'did not find expected SEGMENT_COUNT from metadata item.'
###############################################################################
# Test creation of an NITF file in UTM Zone 11, Southern Hemisphere.
def test_nitf_13():
drv = gdal.GetDriverByName('NITF')
ds = drv.Create('tmp/test_13.ntf', 200, 100, 1, gdal.GDT_Byte,
['ICORDS=S'])
ds.SetGeoTransform((400000, 10, 0.0, 6000000, 0.0, -10))
ds.SetProjection('PROJCS["UTM Zone 11, Southern Hemisphere",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-117],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",10000000],UNIT["Meter",1]]')
my_list = list(range(200))
try:
raw_data = array.array('f', my_list).tobytes()
except:
# Python 2
raw_data = array.array('f', my_list).tostring()
for line in range(100):
ds.WriteRaster(0, line, 200, 1, raw_data,
buf_type=gdal.GDT_Int16,
band_list=[1])
ds = None
###############################################################################
# Verify previous file
def test_nitf_14():
ds = gdal.Open('tmp/test_13.ntf')
chksum = ds.GetRasterBand(1).Checksum()
chksum_expect = 55964
assert chksum == chksum_expect, 'Did not get expected chksum for band 1'
geotransform = ds.GetGeoTransform()
assert geotransform[0] == pytest.approx(400000, abs=.1) and geotransform[1] == pytest.approx(10, abs=0.001) and geotransform[2] == pytest.approx(0, abs=0.001) and geotransform[3] == pytest.approx(6000000, abs=.1) and geotransform[4] == pytest.approx(0, abs=0.001) and geotransform[5] == pytest.approx(-10, abs=0.001), \
'geotransform differs from expected'
prj = ds.GetProjectionRef()
assert prj.find('PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-117],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",10000000]') != -1, \
'Coordinate system not UTM Zone 11, Southern Hemisphere'
ds = None
###############################################################################
# Test creating an in memory copy.
def test_nitf_15():
tst = gdaltest.GDALTest('NITF', 'byte.tif', 1, 4672)
return tst.testCreateCopy(vsimem=1)
###############################################################################
# Checks a 1-bit mono with mask table having (0x00) black as transparent with white arrow.
def test_nitf_16():
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3034d.nsf
tst = gdaltest.GDALTest('NITF', 'nitf/ns3034d.nsf', 1, 170)
return tst.testOpen()
###############################################################################
# Checks a 1-bit RGB/LUT (green arrow) with a mask table (pad pixels having value of 0x00)
# and a transparent pixel value of 1 being mapped to green by the LUT
def test_nitf_17():
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/i_3034f.ntf
tst = gdaltest.GDALTest('NITF', 'nitf/i_3034f.ntf', 1, 170)
return tst.testOpen()
###############################################################################
# Test NITF file without image segment
def test_nitf_18():
# Shut up the warning about missing image segment
gdal.PushErrorHandler('CPLQuietErrorHandler')
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv1_1/U_0006A.NTF
ds = gdal.Open("data/nitf/U_0006A.NTF")
gdal.PopErrorHandler()
assert ds.RasterCount == 0
###############################################################################
# Test BILEVEL (C1) decompression
def test_nitf_19():
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_0/U_1050A.NTF
tst = gdaltest.GDALTest('NITF', 'nitf/U_1050A.NTF', 1, 65024)
return tst.testOpen()
###############################################################################
# Test NITF file consisting only of an header
def test_nitf_20():
# Shut up the warning about file either corrupt or empty
gdal.PushErrorHandler('CPLQuietErrorHandler')
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv1_1/U_0002A.NTF
ds = gdal.Open("data/nitf/U_0002A.NTF")
gdal.PopErrorHandler()
assert ds is None
###############################################################################
# Verify that TEXT access via the metadata domain works.
#
# See also nitf_35 for writing TEXT segments.
def test_nitf_21():
# Shut up the warning about missing image segment
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.Open('data/nitf/ns3114a.nsf')
gdal.PopErrorHandler()
mdTEXT = ds.GetMetadata('TEXT')
try: # NG bindings
data0 = ds.GetMetadataItem('DATA_0', 'TEXT')
except:
data0 = mdTEXT['DATA_0']
ds = None
assert mdTEXT['DATA_0'] == 'A', 'did not find expected DATA_0 from metadata.'
assert data0 == 'A', 'did not find expected DATA_0 from metadata item.'
###############################################################################
# Write/Read test of simple int32 reference data.
def test_nitf_22():
tst = gdaltest.GDALTest('NITF', '../../gcore/data/int32.tif', 1, 4672)
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple float32 reference data.
def test_nitf_23():
tst = gdaltest.GDALTest('NITF', '../../gcore/data/float32.tif', 1, 4672)
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple float64 reference data.
def test_nitf_24():
tst = gdaltest.GDALTest('NITF', '../../gcore/data/float64.tif', 1, 4672)
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple uint16 reference data.
def test_nitf_25():
tst = gdaltest.GDALTest('NITF', '../../gcore/data/uint16.tif', 1, 4672)
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple uint32 reference data.
def test_nitf_26():
tst = gdaltest.GDALTest('NITF', '../../gcore/data/uint32.tif', 1, 4672)
return tst.testCreateCopy()
###############################################################################
# Test Create() with IC=NC compression, and multi-blocks
def test_nitf_27():
nitf_create(['ICORDS=G', 'IC=NC', 'BLOCKXSIZE=10', 'BLOCKYSIZE=10'])
return nitf_check_created_file(32498, 42602, 38982)
###############################################################################
# Test Create() with IC=C8 compression with the JP2ECW driver
def test_nitf_28_jp2ecw():
gdaltest.nitf_28_jp2ecw_is_ok = False
if gdal.GetDriverByName('JP2ECW') is None:
pytest.skip()
import ecw
if not ecw.has_write_support():
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but('JP2ECW')
if nitf_create(['ICORDS=G', 'IC=C8', 'TARGET=75'], set_inverted_color_interp=False) == 'success':
ret = nitf_check_created_file(32398, 42502, 38882, set_inverted_color_interp=False)
if ret == 'success':
gdaltest.nitf_28_jp2ecw_is_ok = True
else:
ret = 'fail'
tmpfilename = '/vsimem/nitf_28_jp2ecw.ntf'
src_ds = gdal.GetDriverByName('MEM').Create('', 1025, 1025)
gdal.GetDriverByName('NITF').CreateCopy(tmpfilename, src_ds, options=['IC=C8'])
ds = gdal.Open(tmpfilename)
blockxsize, blockysize = ds.GetRasterBand(1).GetBlockSize()
ds = None
gdal.Unlink(tmpfilename)
if (blockxsize, blockysize) != (256, 256): # 256 since this is hardcoded as such in the ECW driver
gdaltest.post_reason('wrong block size')
print(blockxsize, blockysize)
ret = 'fail'
gdaltest.reregister_all_jpeg2000_drivers()
return ret
###############################################################################
# Test reading the previously create file with the JP2MrSID driver
def test_nitf_28_jp2mrsid():
if not gdaltest.nitf_28_jp2ecw_is_ok:
pytest.skip()
jp2mrsid_drv = gdal.GetDriverByName('JP2MrSID')
if jp2mrsid_drv is None:
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but('JP2MrSID')
ret = nitf_check_created_file(32398, 42502, 38882, set_inverted_color_interp=False)
gdaltest.reregister_all_jpeg2000_drivers()
return ret
###############################################################################
# Test reading the previously create file with the JP2KAK driver
def test_nitf_28_jp2kak():
if not gdaltest.nitf_28_jp2ecw_is_ok:
pytest.skip()
jp2kak_drv = gdal.GetDriverByName('JP2KAK')
if jp2kak_drv is None:
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but('JP2KAK')
ret = nitf_check_created_file(32398, 42502, 38882, set_inverted_color_interp=False)
gdaltest.reregister_all_jpeg2000_drivers()
return ret
###############################################################################
# Test reading the previously create file with the JP2KAK driver
def test_nitf_28_jp2openjpeg():
if not gdaltest.nitf_28_jp2ecw_is_ok:
pytest.skip()
drv = gdal.GetDriverByName('JP2OpenJPEG')
if drv is None:
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but('JP2OpenJPEG')
ret = nitf_check_created_file(32398, 42502, 38882, set_inverted_color_interp=False)
gdaltest.reregister_all_jpeg2000_drivers()
return ret
###############################################################################
# Test Create() with IC=C8 compression with the JP2OpenJPEG driver
def test_nitf_28_jp2openjpeg_bis():
drv = gdal.GetDriverByName('JP2OpenJPEG')
if drv is None:
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but('JP2OpenJPEG')
if nitf_create(['ICORDS=G', 'IC=C8', 'QUALITY=25'], set_inverted_color_interp=False, createcopy=True) == 'success':
ret = nitf_check_created_file(31604, 42782, 38791, set_inverted_color_interp=False)
else:
ret = 'fail'
tmpfilename = '/vsimem/nitf_28_jp2openjpeg_bis.ntf'
src_ds = gdal.GetDriverByName('MEM').Create('', 1025, 1025)
gdal.GetDriverByName('NITF').CreateCopy(tmpfilename, src_ds, options=['IC=C8'])
ds = gdal.Open(tmpfilename)
blockxsize, blockysize = ds.GetRasterBand(1).GetBlockSize()
ds = None
gdal.Unlink(tmpfilename)
if (blockxsize, blockysize) != (1024, 1024):
gdaltest.post_reason('wrong block size')
print(blockxsize, blockysize)
ret = 'fail'
gdaltest.reregister_all_jpeg2000_drivers()
return ret
###############################################################################
# Test Create() with a LUT
def test_nitf_29():
drv = gdal.GetDriverByName('NITF')
ds = drv.Create('tmp/test_29.ntf', 1, 1, 1, gdal.GDT_Byte,
['IREP=RGB/LUT', 'LUT_SIZE=128'])
ct = gdal.ColorTable()
ct.SetColorEntry(0, (255, 255, 255, 255))
ct.SetColorEntry(1, (255, 255, 0, 255))
ct.SetColorEntry(2, (255, 0, 255, 255))
ct.SetColorEntry(3, (0, 255, 255, 255))
ds.GetRasterBand(1).SetRasterColorTable(ct)
ds = None
ds = gdal.Open('tmp/test_29.ntf')
ct = ds.GetRasterBand(1).GetRasterColorTable()
assert (ct.GetCount() == 129 and \
ct.GetColorEntry(0) == (255, 255, 255, 255) and \
ct.GetColorEntry(1) == (255, 255, 0, 255) and \
ct.GetColorEntry(2) == (255, 0, 255, 255) and \
ct.GetColorEntry(3) == (0, 255, 255, 255)), 'Wrong color table entry.'
new_ds = drv.CreateCopy('tmp/test_29_copy.ntf', ds)
del new_ds
ds = None
ds = gdal.Open('tmp/test_29_copy.ntf')
ct = ds.GetRasterBand(1).GetRasterColorTable()
assert (ct.GetCount() == 130 and \
ct.GetColorEntry(0) == (255, 255, 255, 255) and \
ct.GetColorEntry(1) == (255, 255, 0, 255) and \
ct.GetColorEntry(2) == (255, 0, 255, 255) and \
ct.GetColorEntry(3) == (0, 255, 255, 255)), 'Wrong color table entry.'
ds = None
###############################################################################
# Verify we can write a file with BLOCKA TRE and read it back properly.
def test_nitf_30():
src_ds = gdal.Open('data/nitf/fake_nsif.ntf')
ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf30.ntf', src_ds)
chksum = ds.GetRasterBand(1).Checksum()
chksum_expect = 12033
assert chksum == chksum_expect, 'Did not get expected chksum for band 1'
md = ds.GetMetadata()
assert md['NITF_FHDR'] == 'NSIF01.00', 'Got wrong FHDR value'
assert md['NITF_BLOCKA_BLOCK_INSTANCE_01'] == '01' and md['NITF_BLOCKA_BLOCK_COUNT'] == '01' and md['NITF_BLOCKA_N_GRAY_01'] == '00000' and md['NITF_BLOCKA_L_LINES_01'] == '01000' and md['NITF_BLOCKA_LAYOVER_ANGLE_01'] == '000' and md['NITF_BLOCKA_SHADOW_ANGLE_01'] == '000' and md['NITF_BLOCKA_FRLC_LOC_01'] == '+41.319331+020.078400' and md['NITF_BLOCKA_LRLC_LOC_01'] == '+41.317083+020.126072' and md['NITF_BLOCKA_LRFC_LOC_01'] == '+41.281634+020.122570' and md['NITF_BLOCKA_FRFC_LOC_01'] == '+41.283881+020.074924', \
'BLOCKA metadata has unexpected value.'
ds = None
gdal.GetDriverByName('NITF').Delete('tmp/nitf30.ntf')
# Test overriding src BLOCKA metadata with NITF_BLOCKA creation options
gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf30_override.ntf', src_ds,
options=['BLOCKA_BLOCK_INSTANCE_01=01',
'BLOCKA_BLOCK_COUNT=01',
'BLOCKA_N_GRAY_01=00000',
'BLOCKA_L_LINES_01=01000',
'BLOCKA_LAYOVER_ANGLE_01=000',
'BLOCKA_SHADOW_ANGLE_01=000',
'BLOCKA_FRLC_LOC_01=+42.319331+020.078400',
'BLOCKA_LRLC_LOC_01=+42.317083+020.126072',
'BLOCKA_LRFC_LOC_01=+42.281634+020.122570',
'BLOCKA_FRFC_LOC_01=+42.283881+020.074924'
])
ds = gdal.Open('/vsimem/nitf30_override.ntf')
md = ds.GetMetadata()
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf30_override.ntf')
assert md['NITF_BLOCKA_BLOCK_INSTANCE_01'] == '01' and md['NITF_BLOCKA_BLOCK_COUNT'] == '01' and md['NITF_BLOCKA_N_GRAY_01'] == '00000' and md['NITF_BLOCKA_L_LINES_01'] == '01000' and md['NITF_BLOCKA_LAYOVER_ANGLE_01'] == '000' and md['NITF_BLOCKA_SHADOW_ANGLE_01'] == '000' and md['NITF_BLOCKA_FRLC_LOC_01'] == '+42.319331+020.078400' and md['NITF_BLOCKA_LRLC_LOC_01'] == '+42.317083+020.126072' and md['NITF_BLOCKA_LRFC_LOC_01'] == '+42.281634+020.122570' and md['NITF_BLOCKA_FRFC_LOC_01'] == '+42.283881+020.074924', \
'BLOCKA metadata has unexpected value.'
# Test overriding src BLOCKA metadata with TRE=BLOCKA= creation option
gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf30_override.ntf', src_ds,
options=['TRE=BLOCKA=010000001000000000 +42.319331+020.078400+42.317083+020.126072+42.281634+020.122570+42.283881+020.074924xxxxx'
])
ds = gdal.Open('/vsimem/nitf30_override.ntf')
md = ds.GetMetadata()
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf30_override.ntf')
assert md['NITF_BLOCKA_BLOCK_INSTANCE_01'] == '01' and md['NITF_BLOCKA_BLOCK_COUNT'] == '01' and md['NITF_BLOCKA_N_GRAY_01'] == '00000' and md['NITF_BLOCKA_L_LINES_01'] == '01000' and md['NITF_BLOCKA_LAYOVER_ANGLE_01'] == '000' and md['NITF_BLOCKA_SHADOW_ANGLE_01'] == '000' and md['NITF_BLOCKA_FRLC_LOC_01'] == '+42.319331+020.078400' and md['NITF_BLOCKA_LRLC_LOC_01'] == '+42.317083+020.126072' and md['NITF_BLOCKA_LRFC_LOC_01'] == '+42.281634+020.122570' and md['NITF_BLOCKA_FRFC_LOC_01'] == '+42.283881+020.074924', \
'BLOCKA metadata has unexpected value.'
# Test that gdal_translate -ullr doesn't propagate BLOCKA
gdal.Translate('/vsimem/nitf30_no_src_md.ntf', src_ds, format='NITF', outputBounds=[2, 49, 3, 50])
ds = gdal.Open('/vsimem/nitf30_no_src_md.ntf')
md = ds.GetMetadata()
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf30_no_src_md.ntf')
assert 'NITF_BLOCKA_BLOCK_INSTANCE_01' not in md, \
'unexpectdly found BLOCKA metadata.'
# Test USE_SRC_NITF_METADATA=NO
gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf30_no_src_md.ntf', src_ds,
options=['USE_SRC_NITF_METADATA=NO'])
ds = gdal.Open('/vsimem/nitf30_no_src_md.ntf')
md = ds.GetMetadata()
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf30_no_src_md.ntf')
assert 'NITF_BLOCKA_BLOCK_INSTANCE_01' not in md, \
'unexpectdly found BLOCKA metadata.'
###############################################################################
# Verify we can write a file with a custom TRE and read it back properly.
def test_nitf_31():
nitf_create(['TRE=CUSTOM= Test TRE1\\0MORE',
'TRE=TOTEST=SecondTRE',
'ICORDS=G'])
ds = gdal.Open('tmp/test_create.ntf')
md = ds.GetMetadata('TRE')
assert len(md) == 2, 'Did not get expected TRE count'
# Check that the leading space in the CUSTOM metadata item is preserved (#3088, #3204)
try:
assert ds.GetMetadataItem('CUSTOM', 'TRE') == ' Test TRE1\\0MORE', \
'Did not get expected TRE contents'
except:
pass
assert md['CUSTOM'] == ' Test TRE1\\0MORE' and md['TOTEST'] == 'SecondTRE', \
'Did not get expected TRE contents'
ds = None
return nitf_check_created_file(32498, 42602, 38982)
###############################################################################
# Test Create() with ICORDS=D
def test_nitf_32():
nitf_create(['ICORDS=D'])
return nitf_check_created_file(32498, 42602, 38982)
###############################################################################
# Test Create() with ICORDS=D and a consistent BLOCKA
def test_nitf_33():
nitf_create(['ICORDS=D',
'BLOCKA_BLOCK_COUNT=01',
'BLOCKA_BLOCK_INSTANCE_01=01',
'BLOCKA_L_LINES_01=100',
'BLOCKA_FRLC_LOC_01=+29.950000+119.950000',
'BLOCKA_LRLC_LOC_01=+20.050000+119.950000',
'BLOCKA_LRFC_LOC_01=+20.050000+100.050000',
'BLOCKA_FRFC_LOC_01=+29.950000+100.050000'])
return nitf_check_created_file(32498, 42602, 38982)
###############################################################################
# Test CreateCopy() of a 16bit image with tiling
def test_nitf_34():
tst = gdaltest.GDALTest('NITF', 'n43.dt0', 1, 49187, options=['BLOCKSIZE=64'])
return tst.testCreateCopy()
###############################################################################
# Test CreateCopy() writing file with a text segment.
def test_nitf_35():
src_ds = gdal.Open('data/nitf/text_md.vrt')
ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf_35.ntf', src_ds)
src_ds = None
ds = None
ds = gdal.Open('tmp/nitf_35.ntf')
exp_text = """This is text data
with a newline."""
md = ds.GetMetadata('TEXT')
assert md['DATA_0'] == exp_text, 'Did not get expected TEXT metadata.'
exp_text = """Also, a second text segment is created."""
md = ds.GetMetadata('TEXT')
assert md['DATA_1'] == exp_text, 'Did not get expected TEXT metadata.'
ds = None
gdal.GetDriverByName('NITF').Delete('tmp/nitf_35.ntf')
###############################################################################
# Create and read a JPEG encoded NITF file (C3) with several blocks
# Check that statistics are persisted (#3985)
def test_nitf_36():
src_ds = gdal.Open('data/rgbsmall.tif')
ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf36.ntf', src_ds,
options=['IC=C3', 'BLOCKSIZE=32', 'QUALITY=100'])
src_ds = None
ds = None
ds = gdal.Open('tmp/nitf36.ntf')
assert ds.GetRasterBand(1).GetMinimum() is None, \
'Did not expect to have minimum value at that point.'
(_, _, mean, stddev) = ds.GetRasterBand(1).GetStatistics(False, False)
assert stddev < 0, 'Did not expect to have statistics at that point.'
(exp_mean, exp_stddev) = (65.4208, 47.254550335)
(_, _, mean, stddev) = ds.GetRasterBand(1).GetStatistics(False, True)
assert exp_mean == pytest.approx(mean, abs=0.1) and exp_stddev == pytest.approx(stddev, abs=0.1), \
'did not get expected mean or standard dev.'
md = ds.GetMetadata('IMAGE_STRUCTURE')
assert md['COMPRESSION'] == 'JPEG', 'Did not get expected compression value.'
ds = None
# Check that statistics are persisted (#3985)
ds = gdal.Open('tmp/nitf36.ntf')
assert ds.GetRasterBand(1).GetMinimum() is not None, \
'Should have minimum value at that point.'
(_, _, mean, stddev) = ds.GetRasterBand(1).GetStatistics(False, False)
assert exp_mean == pytest.approx(mean, abs=0.1) and exp_stddev == pytest.approx(stddev, abs=0.1), \
'Should have statistics at that point.'
ds = None
###############################################################################
# Create and read a NITF file with 69999 bands
def test_nitf_37():
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf37.ntf', 1, 1, 69999)
ds = None
ds = gdal.Open('tmp/nitf37.ntf')
assert ds.RasterCount == 69999
ds = None
###############################################################################
# Create and read a NITF file with 999 images
def test_nitf_38():
ds = gdal.Open('data/byte.tif')
nXSize = ds.RasterXSize
nYSize = ds.RasterYSize
data = ds.GetRasterBand(1).ReadRaster(0, 0, nXSize, nYSize)
expected_cs = ds.GetRasterBand(1).Checksum()
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf38.ntf', nXSize, nYSize, 1, options=['NUMI=999'])
ds = None
ds = gdal.Open('NITF_IM:998:tmp/nitf38.ntf', gdal.GA_Update)
ds.GetRasterBand(1).WriteRaster(0, 0, nXSize, nYSize, data)
# Create overviews
ds.BuildOverviews(overviewlist=[2])
ds = None
ds = gdal.Open('NITF_IM:0:tmp/nitf38.ntf')
assert ds.GetRasterBand(1).Checksum() == 0
ds = None
ds = gdal.Open('NITF_IM:998:tmp/nitf38.ntf')
cs = ds.GetRasterBand(1).Checksum()
assert cs == expected_cs, 'bad checksum for image of 998th subdataset'
# Check the overview
cs = ds.GetRasterBand(1).GetOverview(0).Checksum()
assert cs == 1087, 'bad checksum for overview of image of 998th subdataset'
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/nitf38.vrt', ds)
out_ds = None
ds = None
ds = gdal.Open('tmp/nitf38.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/nitf38.vrt')
assert cs == expected_cs
ds = gdal.Open('NITF_IM:998:%s/tmp/nitf38.ntf' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('%s/tmp/nitf38.vrt' % os.getcwd(), ds)
out_ds = None
ds = None
ds = gdal.Open('tmp/nitf38.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/nitf38.vrt')
assert cs == expected_cs
ds = gdal.Open('NITF_IM:998:%s/tmp/nitf38.ntf' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/nitf38.vrt', ds)
del out_ds
ds = None
ds = gdal.Open('tmp/nitf38.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/nitf38.vrt')
assert cs == expected_cs
###############################################################################
# Create and read a JPEG encoded NITF file (M3) with several blocks
def test_nitf_39():
src_ds = gdal.Open('data/rgbsmall.tif')
ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf39.ntf', src_ds,
options=['IC=M3', 'BLOCKSIZE=32', 'QUALITY=100'])
src_ds = None
ds = None
ds = gdal.Open('tmp/nitf39.ntf')
(exp_mean, exp_stddev) = (65.4208, 47.254550335)
(mean, stddev) = ds.GetRasterBand(1).ComputeBandStats()
assert exp_mean == pytest.approx(mean, abs=0.1) and exp_stddev == pytest.approx(stddev, abs=0.1), \
'did not get expected mean or standard dev.'
md = ds.GetMetadata('IMAGE_STRUCTURE')
assert md['COMPRESSION'] == 'JPEG', 'Did not get expected compression value.'
ds = None
###############################################################################
# Create a 10 GB NITF file
def test_nitf_40():
# Determine if the filesystem supports sparse files (we don't want to create a real 10 GB
# file !
if not gdaltest.filesystem_supports_sparse_files('tmp'):
pytest.skip()
width = 99000
height = 99000
x = width - 1
y = height - 1
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf40.ntf', width, height, options=['BLOCKSIZE=256'])
data = struct.pack('B' * 1, 123)
# Write a non NULL byte at the bottom right corner of the image (around 10 GB offset)
ds.GetRasterBand(1).WriteRaster(x, y, 1, 1, data)
ds = None
# Check that we can fetch it at the right value
ds = gdal.Open('tmp/nitf40.ntf')
assert ds.GetRasterBand(1).ReadRaster(x, y, 1, 1) == data
ds = None
# Check that it is indeed at a very far offset, and that the NITF driver
# has not put it somewhere else due to involuntary cast to 32bit integer.
blockWidth = 256
blockHeight = 256
nBlockx = int((width + blockWidth - 1) / blockWidth)
iBlockx = int(x / blockWidth)
iBlocky = int(y / blockHeight)
ix = x % blockWidth
iy = y % blockHeight
offset = 843 + (iBlocky * nBlockx + iBlockx) * blockWidth * blockHeight + (iy * blockWidth + ix)
try:
os.SEEK_SET
except AttributeError:
os.SEEK_SET, os.SEEK_CUR, os.SEEK_END = list(range(3))
fd = open('tmp/nitf40.ntf', 'rb')
fd.seek(offset, os.SEEK_SET)
bytes_read = fd.read(1)
fd.close()
val = struct.unpack('B' * 1, bytes_read)[0]
assert val == 123, ('Bad value at offset %d : %d' % (offset, val))
###############################################################################
# Check reading a 12-bit JPEG compressed NITF
def test_nitf_41(not_jpeg_9b):
# Check if JPEG driver supports 12bit JPEG reading/writing
jpg_drv = gdal.GetDriverByName('JPEG')
md = jpg_drv.GetMetadata()
if md[gdal.DMD_CREATIONDATATYPES].find('UInt16') == -1:
sys.stdout.write('(12bit jpeg not available) ... ')
pytest.skip()
gdal.Unlink('data/nitf/U_4017A.NTF.aux.xml')
ds = gdal.Open('data/nitf/U_4017A.NTF')
assert ds.GetRasterBand(1).DataType == gdal.GDT_UInt16
stats = ds.GetRasterBand(1).GetStatistics(0, 1)
assert stats[2] >= 2385 and stats[2] <= 2386
ds = None
gdal.Unlink('data/nitf/U_4017A.NTF.aux.xml')
###############################################################################
# Check creating a 12-bit JPEG compressed NITF
def test_nitf_42(not_jpeg_9b):
# Check if JPEG driver supports 12bit JPEG reading/writing
jpg_drv = gdal.GetDriverByName('JPEG')
md = jpg_drv.GetMetadata()
if md[gdal.DMD_CREATIONDATATYPES].find('UInt16') == -1:
sys.stdout.write('(12bit jpeg not available) ... ')
pytest.skip()
ds = gdal.Open('data/nitf/U_4017A.NTF')
out_ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf42.ntf', ds, options=['IC=C3', 'FHDR=NITF02.10'])
del out_ds
ds = gdal.Open('tmp/nitf42.ntf')
assert ds.GetRasterBand(1).DataType == gdal.GDT_UInt16
stats = ds.GetRasterBand(1).GetStatistics(0, 1)
assert stats[2] >= 2385 and stats[2] <= 2386
ds = None
###############################################################################
# Test CreateCopy() in IC=C8 with various JPEG2000 drivers
def nitf_43(driver_to_test, options):
jp2_drv = gdal.GetDriverByName(driver_to_test)
if driver_to_test == 'JP2ECW' and jp2_drv is not None:
if 'DMD_CREATIONOPTIONLIST' not in jp2_drv.GetMetadata():
jp2_drv = None
if jp2_drv is None:
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
ds = gdal.Open('data/byte.tif')
gdal.PushErrorHandler('CPLQuietErrorHandler')
out_ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf_43.ntf', ds, options=options, strict=0)
gdal.PopErrorHandler()
out_ds = None
out_ds = gdal.Open('tmp/nitf_43.ntf')
if out_ds.GetRasterBand(1).Checksum() == 4672:
ret = 'success'
else:
ret = 'fail'
out_ds = None
if open('tmp/nitf_43.ntf', 'rb').read().decode('LATIN1').find('<gml') >= 0:
print('GMLJP2 detected !')
ret = 'fail'
gdal.GetDriverByName('NITF').Delete('tmp/nitf_43.ntf')
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def test_nitf_43_jasper():
return nitf_43('JPEG2000', ['IC=C8'])
def test_nitf_43_jp2ecw():
import ecw
if not ecw.has_write_support():
pytest.skip()
return nitf_43('JP2ECW', ['IC=C8', 'TARGET=0'])
def test_nitf_43_jp2kak():
return nitf_43('JP2KAK', ['IC=C8', 'QUALITY=100'])
###############################################################################
# Check creating a monoblock 10000x1 image (ticket #3263)
def test_nitf_44():
out_ds = gdal.GetDriverByName('NITF').Create('tmp/nitf44.ntf', 10000, 1)
out_ds.GetRasterBand(1).Fill(255)
out_ds = None
ds = gdal.Open('tmp/nitf44.ntf')
if 'GetBlockSize' in dir(gdal.Band):
(blockx, _) = ds.GetRasterBand(1).GetBlockSize()
assert blockx == 10000
assert ds.GetRasterBand(1).Checksum() == 57182
ds = None
###############################################################################
# Check overviews on a JPEG compressed subdataset
def test_nitf_45():
try:
os.remove('tmp/nitf45.ntf.aux.xml')
except OSError:
pass
shutil.copyfile('data/nitf/two_images_jpeg.ntf', 'tmp/nitf45.ntf')
ds = gdal.Open('NITF_IM:1:tmp/nitf45.ntf', gdal.GA_Update)
ds.BuildOverviews(overviewlist=[2])
# FIXME ? ds.GetRasterBand(1).GetOverview(0) is None until we reopen
ds = None
ds = gdal.Open('NITF_IM:1:tmp/nitf45.ntf')
cs = ds.GetRasterBand(1).GetOverview(0).Checksum()
assert cs == 1086, 'did not get expected checksum for overview of subdataset'
ds = None
###############################################################################
# Check overviews on a JPEG2000 compressed subdataset
def nitf_46(driver_to_test):
jp2_drv = gdal.GetDriverByName(driver_to_test)
if jp2_drv is None:
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
try:
os.remove('tmp/nitf46.ntf.aux.xml')
except OSError:
pass
try:
os.remove('tmp/nitf46.ntf_0.ovr')
except OSError:
pass
shutil.copyfile('data/nitf/two_images_jp2.ntf', 'tmp/nitf46.ntf')
ds = gdal.Open('NITF_IM:1:tmp/nitf46.ntf', gdal.GA_Update)
ds.BuildOverviews(overviewlist=[2])
# FIXME ? ds.GetRasterBand(1).GetOverview(0) is None until we reopen
ds = None
ds = gdal.Open('NITF_IM:1:tmp/nitf46.ntf')
if ds.GetRasterBand(1).GetOverview(0) is None:
gdaltest.post_reason('no overview of subdataset')
ret = 'fail'
else:
cs = ds.GetRasterBand(1).GetOverview(0).Checksum()
if cs != 1086:
print(cs)
gdaltest.post_reason('did not get expected checksum for overview of subdataset')
ret = 'fail'
else:
ret = 'success'
ds = None
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def nitf_46_jp2ecw():
return nitf_46('JP2ECW')
def nitf_46_jp2mrsid():
return nitf_46('JP2MrSID')
def nitf_46_jp2kak():
return nitf_46('JP2KAK')
def test_nitf_46_jasper():
return nitf_46('JPEG2000')
def nitf_46_openjpeg():
return nitf_46('JP2OpenJPEG')
###############################################################################
# Check reading of rsets.
def test_nitf_47():
ds = gdal.Open('data/nitf/rset.ntf.r0')
band = ds.GetRasterBand(2)
assert band.GetOverviewCount() == 2, \
'did not get the expected number of rset overviews.'
cs = band.GetOverview(1).Checksum()
assert cs == 1297, 'did not get expected checksum for overview of subdataset'
ds = None
###############################################################################
# Check building of standard overviews in place of rset overviews.
def test_nitf_48():
try:
os.remove('tmp/rset.ntf.r0')
os.remove('tmp/rset.ntf.r1')
os.remove('tmp/rset.ntf.r2')
os.remove('tmp/rset.ntf.r0.ovr')
except OSError:
pass
shutil.copyfile('data/nitf/rset.ntf.r0', 'tmp/rset.ntf.r0')
shutil.copyfile('data/nitf/rset.ntf.r1', 'tmp/rset.ntf.r1')
shutil.copyfile('data/nitf/rset.ntf.r2', 'tmp/rset.ntf.r2')
ds = gdal.Open('tmp/rset.ntf.r0', gdal.GA_Update)
ds.BuildOverviews(overviewlist=[3])
ds = None
ds = gdal.Open('tmp/rset.ntf.r0')
assert ds.GetRasterBand(1).GetOverviewCount() == 1, \
'did not get the expected number of rset overviews.'
cs = ds.GetRasterBand(1).GetOverview(0).Checksum()
assert cs == 2328, 'did not get expected checksum for overview of subdataset'
ds = None
try:
os.remove('tmp/rset.ntf.r0')
os.remove('tmp/rset.ntf.r1')
os.remove('tmp/rset.ntf.r2')
os.remove('tmp/rset.ntf.r0.ovr')
except OSError:
pass
###############################################################################
# Test TEXT and CGM creation options with CreateCopy() (#3376)
def test_nitf_49():
options = ["TEXT=DATA_0=COUCOU",
"TEXT=HEADER_0=ABC", # This content is invalid but who cares here
"CGM=SEGMENT_COUNT=1",
"CGM=SEGMENT_0_SLOC_ROW=25",
"CGM=SEGMENT_0_SLOC_COL=25",
"CGM=SEGMENT_0_SDLVL=2",
"CGM=SEGMENT_0_SALVL=1",
"CGM=SEGMENT_0_DATA=XYZ"]
src_ds = gdal.Open('data/nitf/text_md.vrt')
# This will check that the creation option overrides the TEXT metadata domain from the source
ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf49.ntf', src_ds,
options=options)
# Test copy from source TEXT and CGM metadata domains
ds2 = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf49_2.ntf', ds)
md = ds2.GetMetadata('TEXT')
if 'DATA_0' not in md or md['DATA_0'] != 'COUCOU' or \
'HEADER_0' not in md or md['HEADER_0'].find('ABC ') == -1:
gdaltest.post_reason('did not get expected TEXT metadata')
print(md)
return
md = ds2.GetMetadata('CGM')
if 'SEGMENT_COUNT' not in md or md['SEGMENT_COUNT'] != '1' or \
'SEGMENT_0_DATA' not in md or md['SEGMENT_0_DATA'] != 'XYZ':
gdaltest.post_reason('did not get expected CGM metadata')
print(md)
return
src_ds = None
ds = None
ds2 = None
###############################################################################
# Test TEXT and CGM creation options with Create() (#3376)
def test_nitf_50():
options = [ # "IC=C8",
"TEXT=DATA_0=COUCOU",
"TEXT=HEADER_0=ABC", # This content is invalid but who cares here
"CGM=SEGMENT_COUNT=1",
"CGM=SEGMENT_0_SLOC_ROW=25",
"CGM=SEGMENT_0_SLOC_COL=25",
"CGM=SEGMENT_0_SDLVL=2",
"CGM=SEGMENT_0_SALVL=1",
"CGM=SEGMENT_0_DATA=XYZ"]
try:
os.remove('tmp/nitf50.ntf')
except OSError:
pass
# This will check that the creation option overrides the TEXT metadata domain from the source
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf50.ntf', 100, 100, 3, options=options)
ds.WriteRaster(0, 0, 100, 100, ' ', 1, 1,
buf_type=gdal.GDT_Byte,
band_list=[1, 2, 3])
ds.GetRasterBand(1).SetRasterColorInterpretation(gdal.GCI_BlueBand)
ds.GetRasterBand(2).SetRasterColorInterpretation(gdal.GCI_GreenBand)
ds.GetRasterBand(3).SetRasterColorInterpretation(gdal.GCI_RedBand)
# We need to reopen the dataset, because the TEXT and CGM segments are only written
# when closing the dataset (for JP2 compressed datastreams, we need to wait for the
# imagery to be written)
ds = None
ds = gdal.Open('tmp/nitf50.ntf')
md = ds.GetMetadata('TEXT')
if 'DATA_0' not in md or md['DATA_0'] != 'COUCOU' or \
'HEADER_0' not in md or md['HEADER_0'].find('ABC ') == -1:
gdaltest.post_reason('did not get expected TEXT metadata')
print(md)
return
md = ds.GetMetadata('CGM')
if 'SEGMENT_COUNT' not in md or md['SEGMENT_COUNT'] != '1' or \
'SEGMENT_0_DATA' not in md or md['SEGMENT_0_DATA'] != 'XYZ':
gdaltest.post_reason('did not get expected CGM metadata')
print(md)
return
ds = None
###############################################################################
# Test reading very small images with NBPP < 8 or NBPP == 12
def test_nitf_51():
for xsize in range(1, 9):
for nbpp in [1, 2, 3, 4, 5, 6, 7, 12]:
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf51.ntf', xsize, 1)
ds = None
f = open('tmp/nitf51.ntf', 'rb+')
# Patch NBPP value at offset 811
f.seek(811)
f.write(struct.pack('B' * 2, 48 + int(nbpp / 10), 48 + nbpp % 10))
# Write image data
f.seek(843)
n = int((xsize * nbpp + 7) / 8)
for i in range(n):
f.write(struct.pack('B' * 1, 255))
f.close()
ds = gdal.Open('tmp/nitf51.ntf')
if nbpp == 12:
data = ds.GetRasterBand(1).ReadRaster(0, 0, xsize, 1, buf_type=gdal.GDT_UInt16)
arr = struct.unpack('H' * xsize, data)
else:
data = ds.GetRasterBand(1).ReadRaster(0, 0, xsize, 1)
arr = struct.unpack('B' * xsize, data)
ds = None
for i in range(xsize):
if arr[i] != (1 << nbpp) - 1:
print('xsize = %d, nbpp = %d' % (xsize, nbpp))
pytest.fail('did not get expected data')
###############################################################################
# Test reading GeoSDE TREs
def test_nitf_52():
# Create a fake NITF file with GeoSDE TREs (probably not conformant, but enough to test GDAL code)
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf52.ntf', 1, 1, options=['FILE_TRE=GEOPSB=01234567890123456789012345678901234567890123456789012345678901234567890123456789012345EURM ',
'FILE_TRE=PRJPSB=01234567890123456789012345678901234567890123456789012345678901234567890123456789AC0000000000000000000000000000000',
'TRE=MAPLOB=M 0001000010000000000100000000000005000000'])
ds = None
ds = gdal.Open('tmp/nitf52.ntf')
wkt = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
ds = None
expected_wkt ="""PROJCS["unnamed",GEOGCS["EUROPEAN 1950, Mean (3 Param)",DATUM["EUROPEAN_1950_Mean_3_Param",SPHEROID["International 1924",6378388,297],TOWGS84[-87,-98,-121,0,0,0,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Albers_Conic_Equal_Area"],PARAMETER["latitude_of_center",0],PARAMETER["longitude_of_center",0],PARAMETER["standard_parallel_1",0],PARAMETER["standard_parallel_2",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH]]"""
assert wkt in (expected_wkt, expected_wkt.replace('EUROPEAN_1950_Mean_3_Param', 'EUROPEAN 1950, Mean (3 Param)'))
assert gt == (100000.0, 10.0, 0.0, 5000000.0, 0.0, -10.0), \
'did not get expected geotransform'
###############################################################################
# Test reading UTM MGRS
def test_nitf_53():
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf53.ntf', 2, 2, options=['ICORDS=N'])
ds = None
f = open('tmp/nitf53.ntf', 'rb+')
# Patch ICORDS and IGEOLO
f.seek(775)
f.write(b'U')
f.write(b'31UBQ1000040000')
f.write(b'31UBQ2000040000')
f.write(b'31UBQ2000030000')
f.write(b'31UBQ1000030000')
f.close()
ds = gdal.Open('tmp/nitf53.ntf')
wkt = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
ds = None
assert 'PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",3],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0]' in wkt, \
'did not get expected SRS'
assert gt == (205000.0, 10000.0, 0.0, 5445000.0, 0.0, -10000.0), \
'did not get expected geotransform'
###############################################################################
# Test reading RPC00B
def test_nitf_54():
# Create a fake NITF file with RPC00B TRE (probably not conformant, but enough to test GDAL code)
RPC00B = '100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf54.ntf', 1, 1, options=['TRE=RPC00B=' + RPC00B])
ds = None
ds = gdal.Open('tmp/nitf54.ntf')
md = ds.GetMetadata('RPC')
ds = None
assert md is not None and 'HEIGHT_OFF' in md
###############################################################################
# Test reading ICHIPB
def test_nitf_55():
# Create a fake NITF file with ICHIPB TRE (probably not conformant, but enough to test GDAL code)
ICHIPB = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf55.ntf', 1, 1, options=['TRE=ICHIPB=' + ICHIPB])
ds = None
ds = gdal.Open('tmp/nitf55.ntf')
md = ds.GetMetadata()
ds = None
assert md is not None and 'ICHIP_SCALE_FACTOR' in md
###############################################################################
# Test reading USE00A
def test_nitf_56():
# Create a fake NITF file with USE00A TRE (probably not conformant, but enough to test GDAL code)
USE00A = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf56.ntf', 1, 1, options=['TRE=USE00A=' + USE00A])
ds = None
ds = gdal.Open('tmp/nitf56.ntf')
md = ds.GetMetadata()
ds = None
assert md is not None and 'NITF_USE00A_ANGLE_TO_NORTH' in md
###############################################################################
# Test reading GEOLOB
def test_nitf_57():
# Create a fake NITF file with GEOLOB TRE
GEOLOB = '000000360000000360-180.000000000090.000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf57.ntf', 1, 1, options=['TRE=GEOLOB=' + GEOLOB])
ds = None
ds = gdal.Open('tmp/nitf57.ntf')
gt = ds.GetGeoTransform()
ds = None
if gt != (-180.0, 1.0, 0.0, 90.0, 0.0, -1.0):
gdaltest.post_reason('did not get expected geotransform')
print(gt)
return
###############################################################################
# Test reading STDIDC
def test_nitf_58():
# Create a fake NITF file with STDIDC TRE (probably not conformant, but enough to test GDAL code)
STDIDC = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf58.ntf', 1, 1, options=['TRE=STDIDC=' + STDIDC])
ds = None
ds = gdal.Open('tmp/nitf58.ntf')
md = ds.GetMetadata()
ds = None
assert md is not None and 'NITF_STDIDC_ACQUISITION_DATE' in md
###############################################################################
# Test reading IMRFCA and IMASDA
def test_nitf_read_IMRFCA_IMASDA():
# Create a fake NITF file with fake IMRFCA and IMASDA TRE
IMRFCA = '0' * 1760
IMASDA = '0' * 242
tmpfile = '/vsimem/nitf_read_IMRFCA_IMASDA.ntf'
gdal.GetDriverByName('NITF').Create(tmpfile, 1, 1, options=['TRE=IMRFCA=' + IMRFCA, 'TRE=IMASDA=' + IMASDA])
ds = gdal.Open(tmpfile)
md = ds.GetMetadata('RPC')
ds = None
gdal.Unlink(tmpfile)
assert not (md is None or md == {})
# Only IMRFCA
gdal.GetDriverByName('NITF').Create(tmpfile, 1, 1, options=['TRE=IMRFCA=' + IMRFCA])
ds = gdal.Open(tmpfile)
md = ds.GetMetadata('RPC')
ds = None
gdal.Unlink(tmpfile)
assert md == {}
# Only IMASDA
gdal.GetDriverByName('NITF').Create(tmpfile, 1, 1, options=['TRE=IMASDA=' + IMASDA])
ds = gdal.Open(tmpfile)
md = ds.GetMetadata('RPC')
ds = None
gdal.Unlink(tmpfile)
assert md == {}
# Too short IMRFCA
with gdaltest.error_handler():
gdal.GetDriverByName('NITF').Create(tmpfile, 1, 1, options=['TRE=IMRFCA=' + IMRFCA[0:-1], 'TRE=IMASDA=' + IMASDA])
ds = gdal.Open(tmpfile)
md = ds.GetMetadata('RPC')
ds = None
gdal.Unlink(tmpfile)
assert md == {}
# Too short IMASDA
with gdaltest.error_handler():
gdal.GetDriverByName('NITF').Create(tmpfile, 1, 1, options=['TRE=IMRFCA=' + IMRFCA, 'TRE=IMASDA=' + IMASDA[0:-1]])
ds = gdal.Open(tmpfile)
md = ds.GetMetadata('RPC')
ds = None
gdal.Unlink(tmpfile)
assert md == {}
###############################################################################
# Test georeferencing through .nfw and .hdr files
def test_nitf_59():
shutil.copyfile('data/nitf/nitf59.nfw', 'tmp/nitf59.nfw')
shutil.copyfile('data/nitf/nitf59.hdr', 'tmp/nitf59.hdr')
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf59.ntf', 1, 1, options=['ICORDS=N'])
ds = None
ds = gdal.Open('tmp/nitf59.ntf')
wkt = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
ds = None
assert """PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",3],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0]""" in wkt, \
'did not get expected SRS'
assert gt == (149999.5, 1.0, 0.0, 4500000.5, 0.0, -1.0), \
'did not get expected geotransform'
###############################################################################
# Test reading CADRG polar tile georeferencing (#2940)
def test_nitf_60():
# Shut down errors because the file is truncated
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.Open('data/nitf/testtest.on9')
gdal.PopErrorHandler()
wkt = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
ds = None
assert wkt == """PROJCS["unknown",GEOGCS["unknown",DATUM["unknown",SPHEROID["unknown",6378137,0]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Azimuthal_Equidistant"],PARAMETER["latitude_of_center",90],PARAMETER["longitude_of_center",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH]]""", \
'did not get expected SRS'
ref_gt = [1036422.8453166834, 149.94543479697344, 0.0, 345474.28177222813, 0.0, -149.94543479697404]
for i in range(6):
assert gt[i] == pytest.approx(ref_gt[i], abs=1e-6), 'did not get expected geotransform'
###############################################################################
# Test reading TRE from DE segment
def test_nitf_61():
# Derived from http://www.gwg.nga.mil/ntb/baseline/software/testfile/rsm/SampleFiles/FrameSet1/NITF_Files/i_6130a.zip
# but hand edited to have just 1x1 imagery
ds = gdal.Open('data/nitf/i_6130a_truncated.ntf')
md = ds.GetMetadata('TRE')
xml_tre = ds.GetMetadata('xml:TRE')[0]
ds = None
assert md is not None and 'RSMDCA' in md and 'RSMECA' in md and 'RSMPCA' in md and 'RSMIDA' in md
assert xml_tre.find('<tre name="RSMDCA"') != -1, 'did not get expected xml:TRE'
###############################################################################
# Test creating & reading image comments
def test_nitf_62():
# 80+1 characters
comments = '012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678ZA'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf62.ntf', 1, 1, options=['ICOM=' + comments])
ds = None
ds = gdal.Open('tmp/nitf62.ntf')
md = ds.GetMetadata()
ds = None
got_comments = md['NITF_IMAGE_COMMENTS']
if len(got_comments) != 160 or got_comments.find(comments) == -1:
print("'%s'" % got_comments)
pytest.fail('did not get expected comments')
###############################################################################
# Test NITFReadImageLine() and NITFWriteImageLine() when nCols < nBlockWidth (#3551)
def test_nitf_63():
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf63.ntf', 50, 25, 3, gdal.GDT_Int16, options=['BLOCKXSIZE=256'])
ds = None
try:
os.SEEK_SET
except AttributeError:
os.SEEK_SET, os.SEEK_CUR, os.SEEK_END = list(range(3))
# Patch IMODE at hand
f = open('tmp/nitf63.ntf', 'r+')
f.seek(820, os.SEEK_SET)
f.write('P')
f.close()
ds = gdal.Open('tmp/nitf63.ntf', gdal.GA_Update)
md = ds.GetMetadata()
assert md['NITF_IMODE'] == 'P', 'wrong IMODE'
ds.GetRasterBand(1).Fill(0)
ds.GetRasterBand(2).Fill(127)
ds.GetRasterBand(3).Fill(255)
ds = None
ds = gdal.Open('tmp/nitf63.ntf')
cs1 = ds.GetRasterBand(1).Checksum()
cs2 = ds.GetRasterBand(2).Checksum()
cs3 = ds.GetRasterBand(3).Checksum()
ds = None
assert cs1 == 0 and cs2 == 14186 and cs3 == 15301, \
('did not get expected checksums : (%d, %d, %d) instead of (0, 14186, 15301)' % (cs1, cs2, cs3))
###############################################################################
# Test SDE_TRE creation option
def test_nitf_64():
src_ds = gdal.GetDriverByName('GTiff').Create('/vsimem/nitf_64.tif', 256, 256, 1)
src_ds.SetGeoTransform([2.123456789, 0.123456789, 0, 49.123456789, 0, -0.123456789])
sr = osr.SpatialReference()
sr.SetWellKnownGeogCS('WGS84')
src_ds.SetProjection(sr.ExportToWkt())
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_64.ntf', src_ds, options=['ICORDS=D'])
ds = None
ds = gdal.Open('/vsimem/nitf_64.ntf')
# One can notice that the topleft location is only precise to the 3th decimal !
expected_gt = (2.123270588235294, 0.12345882352941177, 0.0, 49.123729411764707, 0.0, -0.12345882352941176)
got_gt = ds.GetGeoTransform()
for i in range(6):
assert expected_gt[i] == pytest.approx(got_gt[i], abs=1e-10), \
'did not get expected GT in ICORDS=D mode'
ds = None
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_64.ntf', src_ds, options=['ICORDS=G'])
ds = None
ds = gdal.Open('/vsimem/nitf_64.ntf')
# One can notice that the topleft location is only precise to the 3th decimal !
expected_gt = (2.1235495642701521, 0.12345642701525053, 0.0, 49.123394880174288, 0.0, -0.12345642701525052)
got_gt = ds.GetGeoTransform()
for i in range(6):
assert expected_gt[i] == pytest.approx(got_gt[i], abs=1e-10), \
'did not get expected GT in ICORDS=G mode'
ds = None
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_64.ntf', src_ds, options=['SDE_TRE=YES'])
ds = None
ds = gdal.Open('/vsimem/nitf_64.ntf')
# One can notice that the topleft location is precise up to the 9th decimal
expected_gt = (2.123456789, 0.1234567901234568, 0.0, 49.123456789000002, 0.0, -0.12345679012345678)
got_gt = ds.GetGeoTransform()
for i in range(6):
assert expected_gt[i] == pytest.approx(got_gt[i], abs=1e-10), \
'did not get expected GT in SDE_TRE mode'
ds = None
src_ds = None
gdal.Unlink('/vsimem/nitf_64.tif')
gdal.Unlink('/vsimem/nitf_64.ntf')
###############################################################################
# Test creating an image with block_width = image_width > 8192 (#3922)
def test_nitf_65():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_65.ntf', 10000, 100, options=['BLOCKXSIZE=10000'])
ds = None
ds = gdal.Open('/vsimem/nitf_65.ntf')
(block_xsize, _) = ds.GetRasterBand(1).GetBlockSize()
ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('/vsimem/nitf_65.ntf')
assert block_xsize == 10000
###############################################################################
# Test creating an image with block_height = image_height > 8192 (#3922)
def test_nitf_66():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_66.ntf', 100, 10000, options=['BLOCKYSIZE=10000', 'BLOCKXSIZE=50'])
ds = None
ds = gdal.Open('/vsimem/nitf_66.ntf')
(_, block_ysize) = ds.GetRasterBand(1).GetBlockSize()
ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('/vsimem/nitf_66.ntf')
assert block_ysize == 10000
###############################################################################
# Test that we don't use scanline access in illegal cases (#3926)
def test_nitf_67():
src_ds = gdal.Open('data/byte.tif')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_67.ntf', src_ds, options=['BLOCKYSIZE=1', 'BLOCKXSIZE=10'], strict=0)
gdal.PopErrorHandler()
ds = None
src_ds = None
ds = gdal.Open('/vsimem/nitf_67.ntf')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('/vsimem/nitf_67.ntf')
gdal.Unlink('/vsimem/nitf_67.ntf.aux.xml')
assert cs == 4672
###############################################################################
# Test reading NITF_METADATA domain
def test_nitf_68():
ds = gdal.Open('data/nitf/rgb.ntf')
assert len(ds.GetMetadata('NITF_METADATA')) == 2
ds = None
ds = gdal.Open('data/nitf/rgb.ntf')
assert ds.GetMetadataItem('NITFFileHeader', 'NITF_METADATA')
ds = None
###############################################################################
# Test SetGCPs() support
def test_nitf_69():
vrt_txt = """<VRTDataset rasterXSize="20" rasterYSize="20">
<GCPList Projection='GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]'>
<GCP Id="" Pixel="0.5" Line="0.5" X="2" Y="49"/>
<GCP Id="" Pixel="0.5" Line="19.5" X="2" Y="48"/>
<GCP Id="" Pixel="19.5" Line="0.5" X="3" Y="49.5"/>
<GCP Id="" Pixel="19.5" Line="19.5" X="3" Y="48"/>
</GCPList>
<VRTRasterBand dataType="Byte" band="1">
<SimpleSource>
<SourceFilename relativeToVRT="1">data/byte.tif</SourceFilename>
<SourceProperties RasterXSize="20" RasterYSize="20" DataType="Byte" BlockXSize="20" BlockYSize="20" />
<SourceBand>1</SourceBand>
</SimpleSource>
</VRTRasterBand>
</VRTDataset>"""
# Test CreateCopy()
vrt_ds = gdal.Open(vrt_txt)
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_69_src.ntf', vrt_ds)
ds = None
vrt_ds = None
# Just in case
gdal.Unlink('/vsimem/nitf_69_src.ntf.aux.xml')
# Test Create() and SetGCPs()
src_ds = gdal.Open('/vsimem/nitf_69_src.ntf')
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_69_dest.ntf', 20, 20, 1, options=['ICORDS=G'])
ds.SetGCPs(src_ds.GetGCPs(), src_ds.GetGCPProjection())
ds.SetGCPs(src_ds.GetGCPs(), src_ds.GetGCPProjection()) # To check we can call it several times without error
ds = None
src_ds = None
# Now open again
ds = gdal.Open('/vsimem/nitf_69_dest.ntf')
got_gcps = ds.GetGCPs()
ds = None
gdal.Unlink('/vsimem/nitf_69_src.ntf')
gdal.Unlink('/vsimem/nitf_69_dest.ntf')
# Check
# Upper-left
assert (got_gcps[0].GCPPixel == pytest.approx(0.5, abs=1e-5) and got_gcps[0].GCPLine == pytest.approx(0.5, abs=1e-5) and \
got_gcps[0].GCPX == pytest.approx(2, abs=1e-5) and got_gcps[0].GCPY == pytest.approx(49, abs=1e-5)), \
'wrong gcp'
# Upper-right
assert (got_gcps[1].GCPPixel == pytest.approx(19.5, abs=1e-5) and got_gcps[1].GCPLine == pytest.approx(0.5, abs=1e-5) and \
got_gcps[1].GCPX == pytest.approx(3, abs=1e-5) and got_gcps[1].GCPY == pytest.approx(49.5, abs=1e-5)), \
'wrong gcp'
# Lower-right
assert (got_gcps[2].GCPPixel == pytest.approx(19.5, abs=1e-5) and got_gcps[2].GCPLine == pytest.approx(19.5, abs=1e-5) and \
got_gcps[2].GCPX == pytest.approx(3, abs=1e-5) and got_gcps[2].GCPY == pytest.approx(48, abs=1e-5)), \
'wrong gcp'
# Lower-left
assert (got_gcps[3].GCPPixel == pytest.approx(0.5, abs=1e-5) and got_gcps[3].GCPLine == pytest.approx(19.5, abs=1e-5) and \
got_gcps[3].GCPX == pytest.approx(2, abs=1e-5) and got_gcps[3].GCPY == pytest.approx(48, abs=1e-5)), \
'wrong gcp'
###############################################################################
# Create and read a JPEG encoded NITF file with NITF dimensions != JPEG dimensions
def test_nitf_70():
src_ds = gdal.Open('data/rgbsmall.tif')
ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf_70.ntf', src_ds,
options=['IC=C3', 'BLOCKXSIZE=64', 'BLOCKYSIZE=64'])
ds = None
# For comparison
ds = gdal.GetDriverByName('GTiff').CreateCopy('tmp/nitf_70.tif', src_ds,
options=['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'TILED=YES', 'BLOCKXSIZE=64', 'BLOCKYSIZE=64'])
ds = None
src_ds = None
ds = gdal.Open('tmp/nitf_70.ntf')
cs = ds.GetRasterBand(1).Checksum()
ds = None
ds = gdal.Open('tmp/nitf_70.tif')
cs_ref = ds.GetRasterBand(1).Checksum()
ds = None
gdal.GetDriverByName('NITF').Delete('tmp/nitf_70.ntf')
gdal.GetDriverByName('GTiff').Delete('tmp/nitf_70.tif')
assert cs == cs_ref
###############################################################################
# Test reading ENGRDA TRE (#6285)
def test_nitf_71():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_71.ntf', 1, 1, options=['TRE=ENGRDA=0123456789012345678900210012345678901230123X01200000002XY01X01230123X01200000001X'])
ds = None
ds = gdal.Open('/vsimem/nitf_71.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_71.ntf')
expected_data = """<tres>
<tre name="ENGRDA" location="image">
<field name="RESRC" value="01234567890123456789" />
<field name="RECNT" value="002" />
<repeated name="RECORDS" number="2">
<group index="0">
<field name="ENGLN" value="10" />
<field name="ENGLBL" value="0123456789" />
<field name="ENGMTXC" value="0123" />
<field name="ENGMTXR" value="0123" />
<field name="ENGTYP" value="X" />
<field name="ENGDTS" value="0" />
<field name="ENGDTU" value="12" />
<field name="ENGDATC" value="00000002" />
<field name="ENGDATA" value="XY" />
</group>
<group index="1">
<field name="ENGLN" value="01" />
<field name="ENGLBL" value="X" />
<field name="ENGMTXC" value="0123" />
<field name="ENGMTXR" value="0123" />
<field name="ENGTYP" value="X" />
<field name="ENGDTS" value="0" />
<field name="ENGDTU" value="12" />
<field name="ENGDATC" value="00000001" />
<field name="ENGDATA" value="X" />
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test writing and reading RPC00B
def compare_rpc(src_md, md):
# Check that we got data with the expected precision
for key in src_md:
if key == 'ERR_BIAS' or key == 'ERR_RAND':
continue
assert key in md, ('fail: %s missing' % key)
if 'COEFF' in key:
expected = [float(v) for v in src_md[key].strip().split(' ')]
found = [float(v) for v in md[key].strip().split(' ')]
if expected != found:
print(md)
pytest.fail('fail: %s value is not the one expected' % key)
elif float(src_md[key]) != float(md[key]):
print(md)
pytest.fail('fail: %s value is not the one expected' % key)
def test_nitf_72():
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
# Use full precision
src_md_max_precision = {
'ERR_BIAS': '1234.56',
'ERR_RAND': '2345.67',
'LINE_OFF': '345678',
'SAMP_OFF': '45678',
'LAT_OFF': '-89.8765',
'LONG_OFF': '-179.1234',
'HEIGHT_OFF': '-9876',
'LINE_SCALE': '987654',
'SAMP_SCALE': '67890',
'LAT_SCALE': '-12.3456',
'LONG_SCALE': '-123.4567',
'HEIGHT_SCALE': '-1234',
'LINE_NUM_COEFF': '0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9',
'LINE_DEN_COEFF': '1 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9',
'SAMP_NUM_COEFF': '2 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9',
'SAMP_DEN_COEFF': '3 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9',
}
src_md = src_md_max_precision
src_ds.SetMetadata(src_md, 'RPC')
gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72.ntf', src_ds)
assert gdal.GetLastErrorMsg() == '', 'fail: did not expect warning'
if gdal.VSIStatL('/vsimem/nitf_72.ntf.aux.xml') is not None:
f = gdal.VSIFOpenL('/vsimem/nitf_72.ntf.aux.xml', 'rb')
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
print(str(data))
pytest.fail('fail: PAM file not expected')
ds = gdal.Open('/vsimem/nitf_72.ntf')
md = ds.GetMetadata('RPC')
RPC00B = ds.GetMetadataItem('RPC00B', 'TRE')
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_72.ntf')
compare_rpc(src_md, md)
expected_RPC00B_max_precision = '11234.562345.6734567845678-89.8765-179.1234-987698765467890-12.3456-123.4567-1234+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+1.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+2.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+3.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9'
assert RPC00B == expected_RPC00B_max_precision, 'fail: did not get expected RPC00B'
# Test without ERR_BIAS and ERR_RAND
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_md = copy.copy(src_md_max_precision)
del src_md['ERR_BIAS']
del src_md['ERR_RAND']
src_ds.SetMetadata(src_md, 'RPC')
gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72.ntf', src_ds)
assert gdal.GetLastErrorMsg() == '', 'fail: did not expect warning'
if gdal.VSIStatL('/vsimem/nitf_72.ntf.aux.xml') is not None:
f = gdal.VSIFOpenL('/vsimem/nitf_72.ntf.aux.xml', 'rb')
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
print(str(data))
pytest.fail('fail: PAM file not expected')
ds = gdal.Open('/vsimem/nitf_72.ntf')
md = ds.GetMetadata('RPC')
RPC00B = ds.GetMetadataItem('RPC00B', 'TRE')
ds = None
expected_RPC00B = '10000.000000.0034567845678-89.8765-179.1234-987698765467890-12.3456-123.4567-1234+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+1.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+2.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+3.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9'
assert RPC00B == expected_RPC00B, 'fail: did not get expected RPC00B'
# Test that direct RPC00B copy works
src_nitf_ds = gdal.Open('/vsimem/nitf_72.ntf')
gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72_copy.ntf', src_nitf_ds)
src_nitf_ds = None
ds = gdal.Open('/vsimem/nitf_72_copy.ntf')
md = ds.GetMetadata('RPC')
RPC00B = ds.GetMetadataItem('RPC00B', 'TRE')
ds = None
assert RPC00B == expected_RPC00B, 'fail: did not get expected RPC00B'
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_72.ntf')
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_72_copy.ntf')
# Test that RPC00B = NO works
gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72.ntf', src_ds, options=['RPC00B=NO'])
assert gdal.VSIStatL('/vsimem/nitf_72.ntf.aux.xml') is not None, \
'fail: PAM file was expected'
ds = gdal.Open('/vsimem/nitf_72.ntf')
md = ds.GetMetadata('RPC')
RPC00B = ds.GetMetadataItem('RPC00B', 'TRE')
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_72.ntf')
assert RPC00B is None, 'fail: did not expect RPC00B'
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
# Test padding
src_md = {
'ERR_BIAS': '123',
'ERR_RAND': '234',
'LINE_OFF': '3456',
'SAMP_OFF': '4567',
'LAT_OFF': '8',
'LONG_OFF': '17',
'HEIGHT_OFF': '987',
'LINE_SCALE': '98765',
'SAMP_SCALE': '6789',
'LAT_SCALE': '12',
'LONG_SCALE': '109',
'HEIGHT_SCALE': '34',
'LINE_NUM_COEFF': '0 9.87e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9',
'LINE_DEN_COEFF': '1 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9',
'SAMP_NUM_COEFF': '2 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9',
'SAMP_DEN_COEFF': '3 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9',
}
src_ds.SetMetadata(src_md, 'RPC')
gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72.ntf', src_ds)
assert gdal.GetLastErrorMsg() == '', 'fail: did not expect warning'
if gdal.VSIStatL('/vsimem/nitf_72.ntf.aux.xml') is not None:
f = gdal.VSIFOpenL('/vsimem/nitf_72.ntf.aux.xml', 'rb')
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
print(str(data))
pytest.fail('fail: PAM file not expected')
ds = gdal.Open('/vsimem/nitf_72.ntf')
md = ds.GetMetadata('RPC')
RPC00B = ds.GetMetadataItem('RPC00B', 'TRE')
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_72.ntf')
compare_rpc(src_md, md)
expected_RPC00B = '10123.000234.0000345604567+08.0000+017.0000+098709876506789+12.0000+109.0000+0034+0.000000E+0+9.870000E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+1.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+2.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+3.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9'
assert RPC00B == expected_RPC00B, 'fail: did not get expected RPC00B'
# Test loss of precision
for key in ('LINE_OFF', 'SAMP_OFF', 'LAT_OFF', 'LONG_OFF', 'HEIGHT_OFF', 'LINE_SCALE', 'SAMP_SCALE', 'LAT_SCALE', 'LONG_SCALE', 'HEIGHT_SCALE'):
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_md = copy.copy(src_md_max_precision)
if src_md[key].find('.') < 0:
src_md[key] += '.1'
else:
src_md[key] += '1'
src_ds.SetMetadata(src_md, 'RPC')
with gdaltest.error_handler():
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72.ntf', src_ds)
assert ds is not None, 'fail: expected a dataset'
ds = None
assert gdal.GetLastErrorMsg() != '', 'fail: expected a warning'
assert gdal.VSIStatL('/vsimem/nitf_72.ntf.aux.xml') is not None, \
'fail: PAM file was expected'
gdal.Unlink('/vsimem/nitf_72.ntf.aux.xml')
ds = gdal.Open('/vsimem/nitf_72.ntf')
md = ds.GetMetadata('RPC')
RPC00B = ds.GetMetadataItem('RPC00B', 'TRE')
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_72.ntf')
assert RPC00B == expected_RPC00B_max_precision, \
'fail: did not get expected RPC00B'
# Test loss of precision on coefficient lines
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_md = copy.copy(src_md_max_precision)
src_md['LINE_NUM_COEFF'] = '0 9.876543e-10 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9'
src_ds.SetMetadata(src_md, 'RPC')
with gdaltest.error_handler():
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72.ntf', src_ds)
assert ds is not None, 'fail: expected a dataset'
ds = None
assert gdal.GetLastErrorMsg() != '', 'fail: expected a warning'
assert gdal.VSIStatL('/vsimem/nitf_72.ntf.aux.xml') is not None, \
'fail: PAM file was expected'
gdal.Unlink('/vsimem/nitf_72.ntf.aux.xml')
ds = gdal.Open('/vsimem/nitf_72.ntf')
md = ds.GetMetadata('RPC')
RPC00B = ds.GetMetadataItem('RPC00B', 'TRE')
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_72.ntf')
expected_RPC00B = '11234.562345.6734567845678-89.8765-179.1234-987698765467890-12.3456-123.4567-1234+0.000000E+0+0.000000E+0+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+1.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+2.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+3.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9+0.000000E+0+9.876543E+9+9.876543E-9-9.876543E+9-9.876543E-9'
assert RPC00B == expected_RPC00B, 'fail: did not get expected RPC00B'
# Test RPCTXT creation option
with gdaltest.error_handler():
gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72.ntf', src_ds, options=['RPCTXT=YES'])
assert gdal.VSIStatL('/vsimem/nitf_72.ntf.aux.xml') is not None, \
'fail: PAM file was expected'
gdal.Unlink('/vsimem/nitf_72.ntf.aux.xml')
assert gdal.VSIStatL('/vsimem/nitf_72_RPC.TXT') is not None, \
'fail: rpc.txt file was expected'
ds = gdal.Open('/vsimem/nitf_72.ntf')
md = ds.GetMetadata('RPC')
RPC00B = ds.GetMetadataItem('RPC00B', 'TRE')
fl = ds.GetFileList()
ds = None
assert '/vsimem/nitf_72_RPC.TXT' in fl, \
'fail: _RPC.TXT file not reported in file list'
# Check that we get full precision from the _RPC.TXT file
compare_rpc(src_md, md)
assert RPC00B == expected_RPC00B, 'fail: did not get expected RPC00B'
# Test out of range
for key in ('LINE_OFF', 'SAMP_OFF', 'LAT_OFF', 'LONG_OFF', 'HEIGHT_OFF', 'LINE_SCALE', 'SAMP_SCALE', 'LAT_SCALE', 'LONG_SCALE', 'HEIGHT_SCALE'):
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_md = copy.copy(src_md_max_precision)
if src_md[key].find('-') >= 0:
src_md[key] = '-1' + src_md[key][1:]
else:
src_md[key] = '1' + src_md[key]
src_ds.SetMetadata(src_md, 'RPC')
with gdaltest.error_handler():
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72.ntf', src_ds)
assert ds is None, ('fail: expected failure for %s' % key)
# Test out of rangeon coefficient lines
src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1)
src_md = copy.copy(src_md_max_precision)
src_md['LINE_NUM_COEFF'] = '0 9.876543e10 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9 0 9.876543e+9 9.876543e-9 -9.876543e+9 -9.876543e-9'
src_ds.SetMetadata(src_md, 'RPC')
with gdaltest.error_handler():
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_72.ntf', src_ds)
assert ds is None, 'fail: expected failure'
###############################################################################
# Test case for https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=1525
def test_nitf_73():
with gdaltest.error_handler():
gdal.Open('data/nitf/oss_fuzz_1525.ntf')
###############################################################################
# Test cases for CCLSTA
# - Simple case
def test_nitf_74():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_74.ntf', 1, 1, options=['FILE_TRE=CCINFA=0012AS 17ge:GENC:3:3-5:AUS00000'])
ds = None
ds = gdal.Open('/vsimem/nitf_74.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_74.ntf')
expected_data = """<tres>
<tre name="CCINFA" location="file">
<field name="NUMCODE" value="001" />
<repeated name="CODES" number="1">
<group index="0">
<field name="CODE_LEN" value="2" />
<field name="CODE" value="AS" />
<field name="EQTYPE" value="" />
<field name="ESURN_LEN" value="17" />
<field name="ESURN" value="ge:GENC:3:3-5:AUS" />
<field name="DETAIL_LEN" value="00000" />
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
# - TABLE AG.2 case
def test_nitf_75():
listing_AG1 = """<?xml version="1.0" encoding="UTF-8"?>
<genc:GeopoliticalEntityEntry
xmlns:genc="http://api.nsgreg.nga.mil/schema/genc/3.0"
xmlns:genc-cmn="http://api.nsgreg.nga.mil/schema/genc/3.0/genc-cmn"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://api.nsgreg.nga.mil/schema/genc/3.0 http://api.nsgreg.nga.mil/schema/genc/3.0.0/genc.xsd">
<genc:encoding>
<genc-cmn:char3Code>MMR</genc-cmn:char3Code>
<genc-cmn:char3CodeURISet>
<genc-cmn:codespaceURL>http://api.nsgreg.nga.mil/geo-political/GENC/3/3-5</genc-cmn:codespaceURL>
<genc-cmn:codespaceURN>urn:us:gov:dod:nga:def:geo-political:GENC:3:3-5</genc-cmn:codespaceURN>
<genc-cmn:codespaceURNBased>geo-political:GENC:3:3-5</genc-cmn:codespaceURNBased>
<genc-cmn:codespaceURNBasedShort>ge:GENC:3:3-5</genc-cmn:codespaceURNBasedShort>
</genc-cmn:char3CodeURISet>
<genc-cmn:char2Code>MM</genc-cmn:char2Code>
<genc-cmn:char2CodeURISet>
<genc-cmn:codespaceURL>http://api.nsgreg.nga.mil/geo-political/GENC/2/3-5</genc-cmn:codespaceURL>
<genc-cmn:codespaceURN>urn:us:gov:dod:nga:def:geo-political:GENC:2:3-5</genc-cmn:codespaceURN>
<genc-cmn:codespaceURNBased>geo-political:GENC:2:3-5</genc-cmn:codespaceURNBased>
<genc-cmn:codespaceURNBasedShort>ge:GENC:2:3-5</genc-cmn:codespaceURNBasedShort>
</genc-cmn:char2CodeURISet>
<genc-cmn:numericCode>104</genc-cmn:numericCode>
<genc-cmn:numericCodeURISet>
<genc-cmn:codespaceURL>http://api.nsgreg.nga.mil/geo-political/GENC/n/3-5</genc-cmn:codespaceURL>
<genc-cmn:codespaceURN>urn:us:gov:dod:nga:def:geo-political:GENC:n:3-5</genc-cmn:codespaceURN>
<genc-cmn:codespaceURNBased>geo-political:GENC:n:3-5</genc-cmn:codespaceURNBased>
<genc-cmn:codespaceURNBasedShort>ge:GENC:n:3-5</genc-cmn:codespaceURNBasedShort>
</genc-cmn:numericCodeURISet>
</genc:encoding>
<genc:name><![CDATA[BURMA]]></genc:name>
<genc:shortName><![CDATA[Burma]]></genc:shortName>
<genc:fullName><![CDATA[Union of Burma]]></genc:fullName>
<genc:gencStatus>exception</genc:gencStatus>
<genc:entryDate>2016-09-30</genc:entryDate>
<genc:entryType>unchanged</genc:entryType>
<genc:usRecognition>independent</genc:usRecognition>
<genc:entryNotesOnNaming><![CDATA[
The GENC Standard specifies the name "BURMA" where instead ISO 3166-1 specifies "MYANMAR"; GENC specifies the short name "Burma" where instead ISO 3166-1 specifies "Myanmar"; and GENC specifies the full name "Union of Burma" where instead ISO 3166-1 specifies "the Republic of the Union of Myanmar". The GENC Standard specifies the local short name for 'my'/'mya' as "Myanma Naingngandaw" where instead ISO 3166-1 specifies "Myanma".
]]></genc:entryNotesOnNaming>
<genc:division codeSpace="as:GENC:6:3-5">MM-01</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-02</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-03</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-04</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-05</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-06</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-07</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-11</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-12</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-13</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-14</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-15</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-16</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-17</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-18</genc:division>
<genc:localShortName>
<genc:name><![CDATA[Myanma Naingngandaw]]></genc:name>
<genc:iso6393Char3Code>mya</genc:iso6393Char3Code>
</genc:localShortName>
</genc:GeopoliticalEntityEntry>"""
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_75.ntf', 1, 1, options=['TRE=CCINFA=0062RQ 17ge:GENC:3:3-5:PRI000002RQ 20as:ISO2:6:II-3:US-PR000002BM 17ge:GENC:3:3-5:MMR04108 ' +
listing_AG1 + '3MMR 19ge:ISO1:3:VII-7:MMR00000' + '2S1 19ge:GENC:3:3-alt:SCT000002YYC16gg:1059:2:ed9:3E00000'])
ds = None
ds = gdal.Open('/vsimem/nitf_75.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_75.ntf')
expected_data = """<tres>
<tre name="CCINFA" location="image">
<field name="NUMCODE" value="006" />
<repeated name="CODES" number="6">
<group index="0">
<field name="CODE_LEN" value="2" />
<field name="CODE" value="RQ" />
<field name="EQTYPE" value="" />
<field name="ESURN_LEN" value="17" />
<field name="ESURN" value="ge:GENC:3:3-5:PRI" />
<field name="DETAIL_LEN" value="00000" />
</group>
<group index="1">
<field name="CODE_LEN" value="2" />
<field name="CODE" value="RQ" />
<field name="EQTYPE" value="" />
<field name="ESURN_LEN" value="20" />
<field name="ESURN" value="as:ISO2:6:II-3:US-PR" />
<field name="DETAIL_LEN" value="00000" />
</group>
<group index="2">
<field name="CODE_LEN" value="2" />
<field name="CODE" value="BM" />
<field name="EQTYPE" value="" />
<field name="ESURN_LEN" value="17" />
<field name="ESURN" value="ge:GENC:3:3-5:MMR" />
<field name="DETAIL_LEN" value="04108" />
<field name="DETAIL_CMPR" value="" />
<field name="DETAIL" value="<?xml version="1.0" encoding="UTF-8"?>
<genc:GeopoliticalEntityEntry
xmlns:genc="http://api.nsgreg.nga.mil/schema/genc/3.0"
xmlns:genc-cmn="http://api.nsgreg.nga.mil/schema/genc/3.0/genc-cmn"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://api.nsgreg.nga.mil/schema/genc/3.0 http://api.nsgreg.nga.mil/schema/genc/3.0.0/genc.xsd">
<genc:encoding>
<genc-cmn:char3Code>MMR</genc-cmn:char3Code>
<genc-cmn:char3CodeURISet>
<genc-cmn:codespaceURL>http://api.nsgreg.nga.mil/geo-political/GENC/3/3-5</genc-cmn:codespaceURL>
<genc-cmn:codespaceURN>urn:us:gov:dod:nga:def:geo-political:GENC:3:3-5</genc-cmn:codespaceURN>
<genc-cmn:codespaceURNBased>geo-political:GENC:3:3-5</genc-cmn:codespaceURNBased>
<genc-cmn:codespaceURNBasedShort>ge:GENC:3:3-5</genc-cmn:codespaceURNBasedShort>
</genc-cmn:char3CodeURISet>
<genc-cmn:char2Code>MM</genc-cmn:char2Code>
<genc-cmn:char2CodeURISet>
<genc-cmn:codespaceURL>http://api.nsgreg.nga.mil/geo-political/GENC/2/3-5</genc-cmn:codespaceURL>
<genc-cmn:codespaceURN>urn:us:gov:dod:nga:def:geo-political:GENC:2:3-5</genc-cmn:codespaceURN>
<genc-cmn:codespaceURNBased>geo-political:GENC:2:3-5</genc-cmn:codespaceURNBased>
<genc-cmn:codespaceURNBasedShort>ge:GENC:2:3-5</genc-cmn:codespaceURNBasedShort>
</genc-cmn:char2CodeURISet>
<genc-cmn:numericCode>104</genc-cmn:numericCode>
<genc-cmn:numericCodeURISet>
<genc-cmn:codespaceURL>http://api.nsgreg.nga.mil/geo-political/GENC/n/3-5</genc-cmn:codespaceURL>
<genc-cmn:codespaceURN>urn:us:gov:dod:nga:def:geo-political:GENC:n:3-5</genc-cmn:codespaceURN>
<genc-cmn:codespaceURNBased>geo-political:GENC:n:3-5</genc-cmn:codespaceURNBased>
<genc-cmn:codespaceURNBasedShort>ge:GENC:n:3-5</genc-cmn:codespaceURNBasedShort>
</genc-cmn:numericCodeURISet>
</genc:encoding>
<genc:name><![CDATA[BURMA]]></genc:name>
<genc:shortName><![CDATA[Burma]]></genc:shortName>
<genc:fullName><![CDATA[Union of Burma]]></genc:fullName>
<genc:gencStatus>exception</genc:gencStatus>
<genc:entryDate>2016-09-30</genc:entryDate>
<genc:entryType>unchanged</genc:entryType>
<genc:usRecognition>independent</genc:usRecognition>
<genc:entryNotesOnNaming><![CDATA[
The GENC Standard specifies the name "BURMA" where instead ISO 3166-1 specifies "MYANMAR"; GENC specifies the short name "Burma" where instead ISO 3166-1 specifies "Myanmar"; and GENC specifies the full name "Union of Burma" where instead ISO 3166-1 specifies "the Republic of the Union of Myanmar". The GENC Standard specifies the local short name for 'my'/'mya' as "Myanma Naingngandaw" where instead ISO 3166-1 specifies "Myanma".
]]></genc:entryNotesOnNaming>
<genc:division codeSpace="as:GENC:6:3-5">MM-01</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-02</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-03</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-04</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-05</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-06</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-07</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-11</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-12</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-13</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-14</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-15</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-16</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-17</genc:division>
<genc:division codeSpace="as:GENC:6:3-5">MM-18</genc:division>
<genc:localShortName>
<genc:name><![CDATA[Myanma Naingngandaw]]></genc:name>
<genc:iso6393Char3Code>mya</genc:iso6393Char3Code>
</genc:localShortName>
</genc:GeopoliticalEntityEntry>" />
</group>
<group index="3">
<field name="CODE_LEN" value="3" />
<field name="CODE" value="MMR" />
<field name="EQTYPE" value="" />
<field name="ESURN_LEN" value="19" />
<field name="ESURN" value="ge:ISO1:3:VII-7:MMR" />
<field name="DETAIL_LEN" value="00000" />
</group>
<group index="4">
<field name="CODE_LEN" value="2" />
<field name="CODE" value="S1" />
<field name="EQTYPE" value="" />
<field name="ESURN_LEN" value="19" />
<field name="ESURN" value="ge:GENC:3:3-alt:SCT" />
<field name="DETAIL_LEN" value="00000" />
</group>
<group index="5">
<field name="CODE_LEN" value="2" />
<field name="CODE" value="YY" />
<field name="EQTYPE" value="C" />
<field name="ESURN_LEN" value="16" />
<field name="ESURN" value="gg:1059:2:ed9:3E" />
<field name="DETAIL_LEN" value="00000" />
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing MATESA TRE (STDI-0002 App AK)
def test_nitf_76():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_76.ntf', 1, 1, options=['FILE_TRE=MATESA=EO-1_HYPERION FTITLE 006307APR2005_Hyperion_331406N0442000E_SWIR172_001_L1R-01B-BIB-GLAS0005RADIOMTRC_CALIB 0001EO-1_HYPERION FILENAME 0020HypGain_revC.dat.svfPARENT 0001EO-1_HYPERION FILENAME 0032EO12005097_020D020C_r1_WPS_01.L0PRE_DARKCOLLECT 0001EO-1_HYPERION FILENAME 0032EO12005097_020A0209_r1_WPS_01.L0POST_DARKCOLLECT 0001EO-1_HYPERION FILENAME 0032EO12005097_020F020E_r1_WPS_01.L0PARENT 0003EO-1_HYPERION FILENAME 0026EO1H1680372005097110PZ.L1REO-1_HYPERION FILENAME 0026EO1H1680372005097110PZ.AUXEO-1_HYPERION FILENAME 0026EO1H1680372005097110PZ.MET'])
ds = None
ds = gdal.Open('/vsimem/nitf_76.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_76.ntf')
expected_data = """<tres>
<tre name="MATESA" location="file">
<field name="CUR_SOURCE" value="EO-1_HYPERION" />
<field name="CUR_MATE_TYPE" value="FTITLE" />
<field name="CUR_FILE_ID_LEN" value="0063" />
<field name="CUR_FILE_ID" value="07APR2005_Hyperion_331406N0442000E_SWIR172_001_L1R-01B-BIB-GLAS" />
<field name="NUM_GROUPS" value="0005" />
<repeated name="GROUPS" number="5">
<group index="0">
<field name="RELATIONSHIP" value="RADIOMTRC_CALIB" />
<field name="NUM_MATES" value="0001" />
<repeated name="MATES" number="1">
<group index="0">
<field name="SOURCE" value="EO-1_HYPERION" />
<field name="MATE_TYPE" value="FILENAME" />
<field name="MATE_ID_LEN" value="0020" />
<field name="MATE_ID" value="HypGain_revC.dat.svf" />
</group>
</repeated>
</group>
<group index="1">
<field name="RELATIONSHIP" value="PARENT" />
<field name="NUM_MATES" value="0001" />
<repeated name="MATES" number="1">
<group index="0">
<field name="SOURCE" value="EO-1_HYPERION" />
<field name="MATE_TYPE" value="FILENAME" />
<field name="MATE_ID_LEN" value="0032" />
<field name="MATE_ID" value="EO12005097_020D020C_r1_WPS_01.L0" />
</group>
</repeated>
</group>
<group index="2">
<field name="RELATIONSHIP" value="PRE_DARKCOLLECT" />
<field name="NUM_MATES" value="0001" />
<repeated name="MATES" number="1">
<group index="0">
<field name="SOURCE" value="EO-1_HYPERION" />
<field name="MATE_TYPE" value="FILENAME" />
<field name="MATE_ID_LEN" value="0032" />
<field name="MATE_ID" value="EO12005097_020A0209_r1_WPS_01.L0" />
</group>
</repeated>
</group>
<group index="3">
<field name="RELATIONSHIP" value="POST_DARKCOLLECT" />
<field name="NUM_MATES" value="0001" />
<repeated name="MATES" number="1">
<group index="0">
<field name="SOURCE" value="EO-1_HYPERION" />
<field name="MATE_TYPE" value="FILENAME" />
<field name="MATE_ID_LEN" value="0032" />
<field name="MATE_ID" value="EO12005097_020F020E_r1_WPS_01.L0" />
</group>
</repeated>
</group>
<group index="4">
<field name="RELATIONSHIP" value="PARENT" />
<field name="NUM_MATES" value="0003" />
<repeated name="MATES" number="3">
<group index="0">
<field name="SOURCE" value="EO-1_HYPERION" />
<field name="MATE_TYPE" value="FILENAME" />
<field name="MATE_ID_LEN" value="0026" />
<field name="MATE_ID" value="EO1H1680372005097110PZ.L1R" />
</group>
<group index="1">
<field name="SOURCE" value="EO-1_HYPERION" />
<field name="MATE_TYPE" value="FILENAME" />
<field name="MATE_ID_LEN" value="0026" />
<field name="MATE_ID" value="EO1H1680372005097110PZ.AUX" />
</group>
<group index="2">
<field name="SOURCE" value="EO-1_HYPERION" />
<field name="MATE_TYPE" value="FILENAME" />
<field name="MATE_ID_LEN" value="0026" />
<field name="MATE_ID" value="EO1H1680372005097110PZ.MET" />
</group>
</repeated>
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing MATESA TRE (STDI-0002 App AK)
def test_nitf_77():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_77.ntf', 1, 1, options=['TRE=GRDPSB=01+000027.81PIX_LATLON0000000000010000000000010000000000000000000000'])
ds = None
ds = gdal.Open('/vsimem/nitf_77.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_77.ntf')
expected_data = """<tres>
<tre name="GRDPSB" location="image">
<field name="NUM_GRDS" value="01" />
<repeated name="GRDS" number="1">
<group index="0">
<field name="ZVL" value="+000027.81" />
<field name="BAD" value="PIX_LATLON" />
<field name="LOD" value="000000000001" />
<field name="LAD" value="000000000001" />
<field name="LSO" value="00000000000" />
<field name="PSO" value="00000000000" />
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing BANDSB TRE (STDI-0002 App X)
def test_nitf_78():
float_data = "40066666" # == struct.pack(">f", 2.1).hex()
bit_mask = "89800000" # Set bits 31, 27, 24, 23
tre_data = "TRE=HEX/BANDSB=" + hex_string("00001RADIANCE S") + float_data*2 + \
hex_string("0030.00M0030.00M-------M-------M ") + \
bit_mask + hex_string("DETECTOR ") + float_data + hex_string("U00.851920.01105")
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_78.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_78.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_78.ntf')
expected_data = """<tres>
<tre name="BANDSB" location="image">
<field name="COUNT" value="00001" />
<field name="RADIOMETRIC_QUANTITY" value="RADIANCE" />
<field name="RADIOMETRIC_QUANTITY_UNIT" value="S" />
<field name="SCALE_FACTOR" value="2.100000" />
<field name="ADDITIVE_FACTOR" value="2.100000" />
<field name="ROW_GSD" value="0030.00" />
<field name="ROW_GSD_UNIT" value="M" />
<field name="COL_GSD" value="0030.00" />
<field name="COL_GSD_UNIT" value="M" />
<field name="SPT_RESP_ROW" value="-------" />
<field name="SPT_RESP_UNIT_ROW" value="M" />
<field name="SPT_RESP_COL" value="-------" />
<field name="SPT_RESP_UNIT_COL" value="M" />
<field name="DATA_FLD_1" value="" />
<field name="EXISTENCE_MASK" value="2306867200" />
<field name="RADIOMETRIC_ADJUSTMENT_SURFACE" value="DETECTOR" />
<field name="ATMOSPHERIC_ADJUSTMENT_ALTITUDE" value="2.100000" />
<field name="WAVE_LENGTH_UNIT" value="U" />
<repeated name="BANDS" number="1">
<group index="0">
<field name="BAD_BAND" value="0" />
<field name="CWAVE" value="0.85192" />
<field name="FWHM" value="0.01105" />
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing ACCHZB TRE (STDI-0002-1-v5.0 Appendix P)
def test_nitf_79():
tre_data = "TRE=ACCHZB=01M 00129M 00129004+044.4130499724+33.69234401034+044.4945572008" \
"+33.67855217830+044.1731373448+32.79106350687+044.2538103407+32.77733592314"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_79.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_79.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_79.ntf')
expected_data = """<tres>
<tre name="ACCHZB" location="image">
<field name="NUM_ACHZ" value="01" />
<repeated number="1">
<group index="0">
<field name="UNIAAH" value="M" />
<field name="AAH" value="00129" />
<field name="UNIAPH" value="M" />
<field name="APH" value="00129" />
<field name="NUM_PTS" value="004" />
<repeated number="4">
<group index="0">
<field name="LON" value="+044.4130499724" />
<field name="LAT" value="+33.69234401034" />
</group>
<group index="1">
<field name="LON" value="+044.4945572008" />
<field name="LAT" value="+33.67855217830" />
</group>
<group index="2">
<field name="LON" value="+044.1731373448" />
<field name="LAT" value="+32.79106350687" />
</group>
<group index="3">
<field name="LON" value="+044.2538103407" />
<field name="LAT" value="+32.77733592314" />
</group>
</repeated>
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing ACCVTB TRE (STDI-0002-1-v5.0 Appendix P)
def test_nitf_80():
tre_data = "TRE=ACCVTB=01M 00095M 00095004+044.4130499724+33.69234401034+044.4945572008" \
"+33.67855217830+044.1731373448+32.79106350687+044.2538103407+32.77733592314"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_80.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_80.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_80.ntf')
expected_data = """<tres>
<tre name="ACCVTB" location="image">
<field name="NUM_ACVT" value="01" />
<repeated number="1">
<group index="0">
<field name="UNIAAV" value="M" />
<field name="AAV" value="00095" />
<field name="UNIAPV" value="M" />
<field name="APV" value="00095" />
<field name="NUM_PTS" value="004" />
<repeated number="4">
<group index="0">
<field name="LON" value="+044.4130499724" />
<field name="LAT" value="+33.69234401034" />
</group>
<group index="1">
<field name="LON" value="+044.4945572008" />
<field name="LAT" value="+33.67855217830" />
</group>
<group index="2">
<field name="LON" value="+044.1731373448" />
<field name="LAT" value="+32.79106350687" />
</group>
<group index="3">
<field name="LON" value="+044.2538103407" />
<field name="LAT" value="+32.77733592314" />
</group>
</repeated>
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing MSTGTA TRE (STDI-0002-1-v5.0 App E)
def test_nitf_81():
tre_data = "TRE=MSTGTA=012340123456789AB0123456789ABCDE0120123456789AB0123456789AB000123401234560123450TGT_LOC= "
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_81.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_81.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_81.ntf')
expected_data = """<tres>
<tre name="MSTGTA" location="image">
<field name="TGT_NUM" value="01234" />
<field name="TGT_ID" value="0123456789AB" />
<field name="TGT_BE" value="0123456789ABCDE" />
<field name="TGT_PRI" value="012" />
<field name="TGT_REQ" value="0123456789AB" />
<field name="TGT_LTIOV" value="0123456789AB" />
<field name="TGT_TYPE" value="0" />
<field name="TGT_COLL" value="0" />
<field name="TGT_CAT" value="01234" />
<field name="TGT_UTC" value="0123456" />
<field name="TGT_ELEV" value="012345" />
<field name="TGT_ELEV_UNIT" value="0" />
<field name="TGT_LOC" value="TGT_LOC=" />
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing PIATGB TRE (STDI-0002-1-v5.0 App C)
def test_nitf_82():
tre_data = "TRE=PIATGB=0123456789ABCDE0123456789ABCDE01012340123456789ABCDE012" \
"TGTNAME= 012+01.234567-012.345678"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_82.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_82.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_82.ntf')
expected_data = """<tres>
<tre name="PIATGB" location="image">
<field name="TGTUTM" value="0123456789ABCDE" />
<field name="PIATGAID" value="0123456789ABCDE" />
<field name="PIACTRY" value="01" />
<field name="PIACAT" value="01234" />
<field name="TGTGEO" value="0123456789ABCDE" />
<field name="DATUM" value="012" />
<field name="TGTNAME" value="TGTNAME=" />
<field name="PERCOVER" value="012" />
<field name="TGTLAT" value="+01.234567" />
<field name="TGTLON" value="-012.345678" />
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing PIXQLA TRE (STDI-0002-1-v5.0 App AA)
def test_nitf_83():
tre_data = "TRE=PIXQLA=00100200031Dead " \
"Saturated Bad "
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_83.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_83.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_83.ntf')
expected_data = """<tres>
<tre name="PIXQLA" location="image">
<field name="NUMAIS" value="001" />
<repeated number="1">
<group index="0">
<field name="AISDLVL" value="002" />
</group>
</repeated>
<field name="NPIXQUAL" value="0003" />
<field name="PQ_BIT_VALUE" value="1" />
<repeated number="3">
<group index="0">
<field name="PQ_CONDITION" value="Dead" />
</group>
<group index="1">
<field name="PQ_CONDITION" value="Saturated" />
</group>
<group index="2">
<field name="PQ_CONDITION" value="Bad" />
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing PIXMTA TRE (STDI-0002-1-v5.0 App AJ)
def test_nitf_84():
tre_data = "TRE=PIXMTA=0010020.00000000E+000.00000000E+001.00000000E+003.35200000E+03F00001P" \
"BAND_WAVELENGTH micron D00000"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_84.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_84.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_84.ntf')
expected_data = """<tres>
<tre name="PIXMTA" location="image">
<field name="NUMAIS" value="001" />
<repeated number="1">
<group index="0">
<field name="AISDLVL" value="002" />
</group>
</repeated>
<field name="ORIGIN_X" value="0.00000000E+00" />
<field name="ORIGIN_Y" value="0.00000000E+00" />
<field name="SCALE_X" value="1.00000000E+00" />
<field name="SCALE_Y" value="3.35200000E+03" />
<field name="SAMPLE_MODE" value="F" />
<field name="NUMMETRICS" value="00001" />
<field name="PERBAND" value="P" />
<repeated number="1">
<group index="0">
<field name="DESCRIPTION" value="BAND_WAVELENGTH" />
<field name="UNIT" value="micron" />
<field name="FITTYPE" value="D" />
</group>
</repeated>
<field name="RESERVED_LEN" value="00000" />
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test creating a TRE with a hexadecimal string
def test_nitf_85():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_85.ntf', 1, 1, options=["TRE=HEX/TSTTRE=414243"])
ds = None
ds = gdal.Open('/vsimem/nitf_85.ntf')
data = ds.GetMetadata('TRE')['TSTTRE']
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_85.ntf')
expected_data = "ABC"
assert data == expected_data
###############################################################################
# Test parsing CSEXRB TRE (STDI-0002-1-v5.0 App AH)
def test_nitf_86():
tre_data = "TRE=HEX/CSEXRB=" + hex_string("824ecf8e-1041-4cce-9edb-bc92d88624ca0047308e4b1-80e4-4777-b70f-f6e4a6881de9") + \
hex_string("17261ee9-2175-4ff2-86ad-dddda1f8270ccf306a0b-c47c-44fa-af63-463549f6bf98fd99a346-770e-4048-94d8-5a8b2e832b32") + \
hex_string("EO-1 HYPERNHYPERNF+03819809.03+03731961.77+03475785.73000000000120201012145900.000000000") + \
"0100000000000000" + "05" + "0000000100000001" "FFFFFFFFFF" + \
hex_string(" 1181.1 65535000335200256250.000") + \
hex_string(" 0000132.812+54.861 9991000000")
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_86.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_86.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_86.ntf')
expected_data = """<tres>
<tre name="CSEXRB" location="image">
<field name="IMAGE_UUID" value="824ecf8e-1041-4cce-9edb-bc92d88624ca" />
<field name="NUM_ASSOC_DES" value="004" />
<repeated number="4">
<group index="0">
<field name="ASSOC_DES_ID" value="7308e4b1-80e4-4777-b70f-f6e4a6881de9" />
</group>
<group index="1">
<field name="ASSOC_DES_ID" value="17261ee9-2175-4ff2-86ad-dddda1f8270c" />
</group>
<group index="2">
<field name="ASSOC_DES_ID" value="cf306a0b-c47c-44fa-af63-463549f6bf98" />
</group>
<group index="3">
<field name="ASSOC_DES_ID" value="fd99a346-770e-4048-94d8-5a8b2e832b32" />
</group>
</repeated>
<field name="PLATFORM_ID" value="EO-1" />
<field name="PAYLOAD_ID" value="HYPERN" />
<field name="SENSOR_ID" value="HYPERN" />
<field name="SENSOR_TYPE" value="F" />
<field name="GROUND_REF_POINT_X" value="+03819809.03" />
<field name="GROUND_REF_POINT_Y" value="+03731961.77" />
<field name="GROUND_REF_POINT_Z" value="+03475785.73" />
<field name="TIME_STAMP_LOC" value="0" />
<field name="REFERENCE_FRAME_NUM" value="000000001" />
<field name="BASE_TIMESTAMP" value="20201012145900.000000000" />
<field name="DT_MULTIPLIER" value="72057594037927936" />
<field name="DT_SIZE" value="5" />
<field name="NUMBER_FRAMES" value="1" />
<field name="NUMBER_DT" value="1" />
<repeated number="1">
<group index="0">
<field name="DT" value="1099511627775" />
</group>
</repeated>
<field name="MAX_GSD" value="" />
<field name="ALONG_SCAN_GSD" value="" />
<field name="CROSS_SCAN_GSD" value="" />
<field name="GEO_MEAN_GSD" value="1181.1" />
<field name="A_S_VERT_GSD" value="" />
<field name="C_S_VERT_GSD" value="" />
<field name="GEO_MEAN_VERT_GSD" value="" />
<field name="GSD_BETA_ANGLE" value="" />
<field name="DYNAMIC_RANGE" value="65535" />
<field name="NUM_LINES" value="0003352" />
<field name="NUM_SAMPLES" value="00256" />
<field name="ANGLE_TO_NORTH" value="250.000" />
<field name="OBLIQUITY_ANGLE" value="" />
<field name="AZ_OF_OBLIQUITY" value="" />
<field name="ATM_REFR_FLAG" value="0" />
<field name="VEL_ABER_FLAG" value="0" />
<field name="GRD_COVER" value="0" />
<field name="SNOW_DEPTH_CATEGORY" value="0" />
<field name="SUN_AZIMUTH" value="132.812" />
<field name="SUN_ELEVATION" value="+54.861" />
<field name="PREDICTED_NIIRS" value="" />
<field name="CIRCL_ERR" value="" />
<field name="LINEAR_ERR" value="" />
<field name="CLOUD_COVER" value="999" />
<field name="ROLLING_SHUTTER_FLAG" value="1" />
<field name="UE_TIME_FLAG" value="0" />
<field name="RESERVED_LEN" value="00000" />
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing ILLUMB TRE (STDI-0002-1-v5.0 App AL)
def test_nitf_87():
bit_mask = "7A0000"
tre_data = "TRE=HEX/ILLUMB=" + hex_string("0001mm 8.5192000000E-01") + \
hex_string("2.5770800000E+00001NUM_BANDS=1 because ILLUMB has no band-dependent content ") + \
hex_string("World Geodetic System 1984 ") + \
hex_string("WGE World Geodetic System 1984 ") + \
hex_string("WE Geodetic ") + \
hex_string("GEOD") + \
bit_mask + hex_string("00120050407072410+33.234974+044.333405+27.8100000E+0132.8+54.9167.5+52.5") + \
hex_string("-163.4004099.2+84.0")
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_87.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_87.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_87.ntf')
expected_data = """<tres>
<tre name="ILLUMB" location="image">
<field name="NUM_BANDS" value="0001" />
<field name="BAND_UNIT" value="mm" />
<repeated number="1">
<group index="0">
<field name="LBOUND" value="8.5192000000E-01" />
<field name="UBOUND" value="2.5770800000E+00" />
</group>
</repeated>
<field name="NUM_OTHERS" value="00" />
<field name="NUM_COMS" value="1" />
<repeated number="1">
<group index="0">
<field name="COMMENT" value="NUM_BANDS=1 because ILLUMB has no band-dependent content" />
</group>
</repeated>
<field name="GEO_DATUM" value="World Geodetic System 1984" />
<field name="GEO_DATUM_CODE" value="WGE" />
<field name="ELLIPSOID_NAME" value="World Geodetic System 1984" />
<field name="ELLIPSOID_CODE" value="WE" />
<field name="VERTICAL_DATUM_REF" value="Geodetic" />
<field name="VERTICAL_REF_CODE" value="GEOD" />
<field name="EXISTENCE_MASK" value="7995392" />
<field name="NUM_ILLUM_SETS" value="001" />
<repeated number="1">
<group index="0">
<field name="DATETIME" value="20050407072410" />
<field name="TARGET_LAT" value="+33.234974" />
<field name="TARGET_LON" value="+044.333405" />
<field name="TARGET_HGT" value="+27.8100000E+0" />
<field name="SUN_AZIMUTH" value="132.8" />
<field name="SUN_ELEV" value="+54.9" />
<field name="MOON_AZIMUTH" value="167.5" />
<field name="MOON_ELEV" value="+52.5" />
<field name="MOON_PHASE_ANGLE" value="-163.4" />
<field name="MOON_ILLUM_PERCENT" value="004" />
<field name="SENSOR_AZIMUTH" value="099.2" />
<field name="SENSOR_ELEV" value="+84.0" />
<repeated number="1">
<group index="0" />
</repeated>
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing CSWRPB TRE (STDI-0002-1-v5.0 App AH)
def test_nitf_88():
tre_data = "TRE=CSWRPB=1F199.9999999900000010000002000000300000040000005000000600000070000008" \
"1111-9.99999999999999E-99+9.99999999999999E+9900000"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_88.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_88.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_88.ntf')
expected_data = """<tres>
<tre name="CSWRPB" location="image">
<field name="NUM_SETS_WARP_DATA" value="1" />
<field name="SENSOR_TYPE" value="F" />
<field name="WRP_INTERP" value="1" />
<repeated number="1">
<group index="0">
<field name="FL_WARP" value="99.99999999" />
<field name="OFFSET_LINE" value="0000001" />
<field name="OFFSET_SAMP" value="0000002" />
<field name="SCALE_LINE" value="0000003" />
<field name="SCALE_SAMP" value="0000004" />
<field name="OFFSET_LINE_UNWRP" value="0000005" />
<field name="OFFSET_SAMP_UNWRP" value="0000006" />
<field name="SCALE_LINE_UNWRP" value="0000007" />
<field name="SCALE_SAMP_UNWRP" value="0000008" />
<field name="LINE_POLY_ORDER_M1" value="1" />
<field name="LINE_POLY_ORDER_M2" value="1" />
<field name="SAMP_POLY_ORDER_N1" value="1" />
<field name="SAMP_POLY_ORDER_N2" value="1" />
<repeated number="1">
<group index="0">
<repeated number="1">
<group index="0">
<field name="A" value="-9.99999999999999E-99" />
</group>
</repeated>
</group>
</repeated>
<repeated number="1">
<group index="0">
<repeated number="1">
<group index="0">
<field name="B" value="+9.99999999999999E+99" />
</group>
</repeated>
</group>
</repeated>
</group>
</repeated>
<field name="RESERVED_LEN" value="00000" />
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing CSRLSB TRE (STDI-0002-1-v5.0 App AH)
def test_nitf_89():
tre_data = "TRE=CSRLSB=0101+11111111.11-22222222.22+33333333.33-44444444.44"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_89.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_89.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_89.ntf')
expected_data = """<tres>
<tre name="CSRLSB" location="image">
<field name="N_RS_ROW_BLOCKS" value="01" />
<field name="M_RS_COLUMN_BLOCKS" value="01" />
<repeated number="1">
<group index="0">
<repeated number="1">
<group index="0">
<field name="RS_DT_1" value="+11111111.11" />
<field name="RS_DT_2" value="-22222222.22" />
<field name="RS_DT_3" value="+33333333.33" />
<field name="RS_DT_4" value="-44444444.44" />
</group>
</repeated>
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing SECURA TRE (STDI-0002-1-v5.0 App AI)
def test_nitf_90():
tre_data = "FILE_TRE=SECURA=20201020142500NITF02.10" + " "*207 + "ARH.XML 00068" + \
"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <arh:Security></arh:Security>"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_90.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_90.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_90.ntf')
expected_data = """<tres>
<tre name="SECURA" location="file">
<field name="FDATTIM" value="20201020142500" />
<field name="NITFVER" value="NITF02.10" />
<field name="NFSECFLDS" value="" />
<field name="SECSTD" value="ARH.XML" />
<field name="SECCOMP" value="" />
<field name="SECLEN" value="00068" />
<field name="SECURITY" value="<?xml version="1.0" encoding="UTF-8"?> <arh:Security></arh:Security>" />
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing SNSPSB TRE (STDI-0002-1-v5.0 App P)
def test_nitf_91():
tre_data = "TRE=SNSPSB=010001111112222233333M 000001000001000001000001GSL " + \
"PLTFM INS MOD PRL SID ACT DEG0000001 +111111.11-222222.22" + \
" meters 000000000000000000000011111111111111111111112222222222222222222222001" + \
"API Imeters 0123456789"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_91.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_91.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_91.ntf')
expected_data = """<tres>
<tre name="SNSPSB" location="image">
<field name="NUM_SNS" value="01" />
<repeated number="1">
<group index="0">
<field name="NUM_BP" value="00" />
<field name="NUM_BND" value="01" />
<repeated number="1">
<group index="0">
<field name="BID" value="11111" />
<field name="WS1" value="22222" />
<field name="WS2" value="33333" />
</group>
</repeated>
<field name="UNIRES" value="M" />
<field name="REX" value="000001" />
<field name="REY" value="000001" />
<field name="GSX" value="000001" />
<field name="GSY" value="000001" />
<field name="GSL" value="GSL" />
<field name="PLTFM" value="PLTFM" />
<field name="INS" value="INS" />
<field name="MOD" value="MOD" />
<field name="PRL" value="PRL" />
<field name="SID" value="SID" />
<field name="ACT" value="ACT" />
<field name="UNINOA" value="DEG" />
<field name="NOA" value="0000001" />
<field name="UNIANG" value="" />
<field name="UNIALT" value="" />
<field name="LONSCC" value="+111111.11" />
<field name="LATSCC" value="-222222.22" />
<field name="UNISAE" value="" />
<field name="UNIRPY" value="" />
<field name="UNIPXT" value="" />
<field name="UNISPE" value="meters" />
<field name="ROS" value="0000000000000000000000" />
<field name="PIS" value="1111111111111111111111" />
<field name="YAS" value="2222222222222222222222" />
<field name="NUM_AUX" value="001" />
<repeated number="1">
<group index="0">
<field name="API" value="API" />
<field name="APF" value="I" />
<field name="UNIAPX" value="meters" />
<field name="APN" value="0123456789" />
</group>
</repeated>
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing RSMAPB TRE (STDI-0002-1-v5.0 App U)
def test_nitf_RSMAPB():
tre_data = "TRE=RSMAPB=iid " + \
"edition tid 01IG+9.99999999999999E+99" + \
"+9.99999999999999E+99+9.99999999999999E+99+9.99999999999999E+99+9.99999999999999E+99+9.99999999999999E+99" + \
"Y01011230001+9.99999999999999E+99+9.99999999999999E+99"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_RSMAPB.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_RSMAPB.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_RSMAPB.ntf')
expected_data = """<tres>
<tre name="RSMAPB" location="image">
<field name="IID" value="iid" />
<field name="EDITION" value="edition" />
<field name="TID" value="tid" />
<field name="NPAR" value="01" />
<field name="APTYP" value="I" />
<field name="LOCTYP" value="G" />
<field name="NSFX" value="+9.99999999999999E+99" />
<field name="NSFY" value="+9.99999999999999E+99" />
<field name="NSFZ" value="+9.99999999999999E+99" />
<field name="NOFFX" value="+9.99999999999999E+99" />
<field name="NOFFY" value="+9.99999999999999E+99" />
<field name="NOFFZ" value="+9.99999999999999E+99" />
<field name="APBASE" value="Y" />
<field name="NISAP" value="01" />
<field name="NISAPR" value="01" />
<repeated number="1">
<group index="0">
<field name="XPWRR" value="1" />
<field name="YPWRR" value="2" />
<field name="ZPWRR" value="3" />
</group>
</repeated>
<field name="NISAPC" value="00" />
<field name="NBASIS" value="01" />
<repeated number="1">
<group index="0">
<repeated number="1">
<group index="0">
<field name="AEL" value="+9.99999999999999E+99" />
</group>
</repeated>
</group>
</repeated>
<repeated number="1">
<group index="0">
<field name="PARVAL" value="+9.99999999999999E+99" />
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing RSMDCB TRE (STDI-0002-1-v5.0 App U)
def test_nitf_RSMDCB():
tre_data = "TRE=RSMDCB=iid " + \
"edition tid 01001iidi" + " "*76 + \
"01Y01GN" + "+9.99999999999999E+99"*6 + "N01ABCD+9.99999999999999E+99"
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_RSMDCB.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_RSMDCB.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_RSMDCB.ntf')
expected_data = """<tres>
<tre name="RSMDCB" location="image">
<field name="IID" value="iid" />
<field name="EDITION" value="edition" />
<field name="TID" value="tid" />
<field name="NROWCB" value="01" />
<field name="NIMGE" value="001" />
<repeated number="1">
<group index="0">
<field name="IIDI" value="iidi" />
<field name="NCOLCB" value="01" />
</group>
</repeated>
<field name="INCAPD" value="Y" />
<field name="NPAR" value="01" />
<field name="APTYP" value="G" />
<field name="LOCTYP" value="N" />
<field name="NSFX" value="+9.99999999999999E+99" />
<field name="NSFY" value="+9.99999999999999E+99" />
<field name="NSFZ" value="+9.99999999999999E+99" />
<field name="NOFFX" value="+9.99999999999999E+99" />
<field name="NOFFY" value="+9.99999999999999E+99" />
<field name="NOFFZ" value="+9.99999999999999E+99" />
<field name="APBASE" value="N" />
<field name="NGSAP" value="01" />
<repeated number="1">
<group index="0">
<field name="GSAPID" value="ABCD" />
</group>
</repeated>
<repeated number="1">
<group index="0">
<repeated number="1">
<group index="0">
<repeated number="1">
<group index="0">
<field name="CRSCOV" value="+9.99999999999999E+99" />
</group>
</repeated>
</group>
</repeated>
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test parsing RSMECB TRE (STDI-0002-1-v5.0 App U)
def test_nitf_RSMECB():
tre_data = "TRE=RSMECB=iid " + \
"edition tid " + \
"YY01012020110201GN" + "+9.99999999999999E+99"*6 + "N01ABCD02" + "+9.99999999999999E+99"*3 + \
"1N2" + "+9.99999999999999E+99"*8 + "N2" + "+9.99999999999999E+99"*4 + "2" + "+9.99999999999999E+99"*4
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_RSMECB.ntf', 1, 1, options=[tre_data])
ds = None
ds = gdal.Open('/vsimem/nitf_RSMECB.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_RSMECB.ntf')
expected_data = """<tres>
<tre name="RSMECB" location="image">
<field name="IID" value="iid" />
<field name="EDITION" value="edition" />
<field name="TID" value="tid" />
<field name="INCLIC" value="Y" />
<field name="INCLUC" value="Y" />
<field name="NPARO" value="01" />
<field name="IGN" value="01" />
<field name="CVDATE" value="20201102" />
<field name="NPAR" value="01" />
<field name="APTYP" value="G" />
<field name="LOCTYP" value="N" />
<field name="NSFX" value="+9.99999999999999E+99" />
<field name="NSFY" value="+9.99999999999999E+99" />
<field name="NSFZ" value="+9.99999999999999E+99" />
<field name="NOFFX" value="+9.99999999999999E+99" />
<field name="NOFFY" value="+9.99999999999999E+99" />
<field name="NOFFZ" value="+9.99999999999999E+99" />
<field name="APBASE" value="N" />
<field name="NGSAP" value="01" />
<repeated number="1">
<group index="0">
<field name="GSAPID" value="ABCD" />
</group>
</repeated>
<repeated number="1">
<group index="0">
<field name="NUMOPG" value="02" />
<repeated number="3">
<group index="0">
<field name="ERRCVG" value="+9.99999999999999E+99" />
</group>
<group index="1">
<field name="ERRCVG" value="+9.99999999999999E+99" />
</group>
<group index="2">
<field name="ERRCVG" value="+9.99999999999999E+99" />
</group>
</repeated>
<field name="TCDF" value="1" />
<field name="ACSMC" value="N" />
<field name="NCSEG" value="2" />
<repeated number="2">
<group index="0">
<field name="CORSEG" value="+9.99999999999999E+99" />
<field name="TAUSEG" value="+9.99999999999999E+99" />
</group>
<group index="1">
<field name="CORSEG" value="+9.99999999999999E+99" />
<field name="TAUSEG" value="+9.99999999999999E+99" />
</group>
</repeated>
</group>
</repeated>
<repeated number="1">
<group index="0">
<repeated number="1">
<group index="0">
<field name="MAP" value="+9.99999999999999E+99" />
</group>
</repeated>
</group>
</repeated>
<field name="URR" value="+9.99999999999999E+99" />
<field name="URC" value="+9.99999999999999E+99" />
<field name="UCC" value="+9.99999999999999E+99" />
<field name="UACSMC" value="N" />
<field name="UNCSR" value="2" />
<repeated number="2">
<group index="0">
<field name="UCORSR" value="+9.99999999999999E+99" />
<field name="UTAUSR" value="+9.99999999999999E+99" />
</group>
<group index="1">
<field name="UCORSR" value="+9.99999999999999E+99" />
<field name="UTAUSR" value="+9.99999999999999E+99" />
</group>
</repeated>
<field name="UNCSC" value="2" />
<repeated number="2">
<group index="0">
<field name="UCORSC" value="+9.99999999999999E+99" />
<field name="UTAUSC" value="+9.99999999999999E+99" />
</group>
<group index="1">
<field name="UCORSC" value="+9.99999999999999E+99" />
<field name="UTAUSC" value="+9.99999999999999E+99" />
</group>
</repeated>
</tre>
</tres>
"""
assert data == expected_data
###############################################################################
# Test creation and reading of Data Extension Segments (DES)
def test_nitf_des():
des_data = "02U" + " "*166 + r'0004ABCD1234567\0890'
ds = gdal.GetDriverByName("NITF").Create("/vsimem/nitf_DES.ntf", 1, 1, options=["DES=DES1=" + des_data, "DES=DES2=" + des_data])
ds = None
# DESDATA portion will be Base64 encoded on output
# base64.b64encode(bytes("1234567\x00890", "utf-8")) == b'MTIzNDU2NwA4OTA='
ds = gdal.Open("/vsimem/nitf_DES.ntf")
data = ds.GetMetadata("xml:DES")[0]
ds = None
gdal.GetDriverByName('NITF').Delete('/vsimem/nitf_DES.ntf')
expected_data = """<des_list>
<des name="DES1">
<field name="NITF_DESVER" value="02" />
<field name="NITF_DECLAS" value="U" />
<field name="NITF_DESCLSY" value="" />
<field name="NITF_DESCODE" value="" />
<field name="NITF_DESCTLH" value="" />
<field name="NITF_DESREL" value="" />
<field name="NITF_DESDCTP" value="" />
<field name="NITF_DESDCDT" value="" />
<field name="NITF_DESDCXM" value="" />
<field name="NITF_DESDG" value="" />
<field name="NITF_DESDGDT" value="" />
<field name="NITF_DESCLTX" value="" />
<field name="NITF_DESCATP" value="" />
<field name="NITF_DESCAUT" value="" />
<field name="NITF_DESCRSN" value="" />
<field name="NITF_DESSRDT" value="" />
<field name="NITF_DESCTLN" value="" />
<field name="NITF_DESSHL" value="0004" />
<field name="NITF_DESSHF" value="ABCD" />
<field name="NITF_DESDATA" value="MTIzNDU2NwA4OTA=" />
</des>
<des name="DES2">
<field name="NITF_DESVER" value="02" />
<field name="NITF_DECLAS" value="U" />
<field name="NITF_DESCLSY" value="" />
<field name="NITF_DESCODE" value="" />
<field name="NITF_DESCTLH" value="" />
<field name="NITF_DESREL" value="" />
<field name="NITF_DESDCTP" value="" />
<field name="NITF_DESDCDT" value="" />
<field name="NITF_DESDCXM" value="" />
<field name="NITF_DESDG" value="" />
<field name="NITF_DESDGDT" value="" />
<field name="NITF_DESCLTX" value="" />
<field name="NITF_DESCATP" value="" />
<field name="NITF_DESCAUT" value="" />
<field name="NITF_DESCRSN" value="" />
<field name="NITF_DESSRDT" value="" />
<field name="NITF_DESCTLN" value="" />
<field name="NITF_DESSHL" value="0004" />
<field name="NITF_DESSHF" value="ABCD" />
<field name="NITF_DESDATA" value="MTIzNDU2NwA4OTA=" />
</des>
</des_list>
"""
assert data == expected_data
###############################################################################
# Test reading C4 compressed file
def test_nitf_read_C4():
ds = gdal.Open('data/nitf/RPFTOC01.ON2')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 53599
###############################################################################
# Test reading a file with a SENSRB TRE
def test_nitf_SENSRB():
ds = gdal.Open('data/nitf/SENSRB_TRE.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
expected_data = """<tres>
<tre name="SENSRB" location="image">
<field name="GENERAL_DATA" value="Y" />
<field name="SENSOR" value="" />
<field name="SENSOR_URI" value="" />
<field name="PLATFORM" value=" UMS" />
<field name="PLATFORM_URI" value="" />
<field name="OPERATION_DOMAIN" value="" />
<field name="CONTENT_LEVEL" value="4" />
<field name="GEODETIC_SYSTEM" value="" />
<field name="GEODETIC_TYPE" value="" />
<field name="ELEVATION_DATUM" value="" />
<field name="LENGTH_UNIT" value=" m" />
<field name="ANGULAR_UNIT" value="deg" />
<field name="START_DATE" value="" />
<field name="START_TIME" value="00000000000000" />
<field name="END_DATE" value="20190507" />
<field name="END_TIME" value="0000084.059869" />
<field name="GENERATION_COUNT" value="00" />
<field name="GENERATION_DATE" value="" />
<field name="GENERATION_TIME" value="" />
<field name="SENSOR_ARRAY_DATA" value="" />
<field name="SENSOR_CALIBRATION_DATA" value="" />
<field name="IMAGE_FORMATION_DATA" value="Y" />
<field name="METHOD" value="" />
<field name="MODE" value="" />
<field name="ROW_COUNT" value="00000000" />
<field name="COLUMN_COUNT" value="00000000" />
<field name="ROW_SET" value="00000000" />
<field name="COLUMN_SET" value="00000000" />
<field name="ROW_RATE" value="0000000000" />
<field name="COLUMN_RATE" value="0000000000" />
<field name="FIRST_PIXEL_ROW" value="00000000" />
<field name="FIRST_PIXEL_COLUMN" value="00000000" />
<field name="TRANSFORM_PARAMS" value="3" />
<repeated name="TRANSFORM_PARAM" number="3">
<group index="0">
<field name="TRANSFORM_PARAM" value=" 470" />
</group>
<group index="1">
<field name="TRANSFORM_PARAM" value=" 471" />
</group>
<group index="2">
<field name="TRANSFORM_PARAM" value=" 472" />
</group>
</repeated>
<field name="REFERENCE_TIME" value="" />
<field name="REFERENCE_ROW" value="" />
<field name="REFERENCE_COLUMN" value="" />
<field name="LATITUDE_OR_X" value=" 43643267" />
<field name="LONGITUDE_OR_Y" value="" />
<field name="ALTITUDE_OR_Z" value="" />
<field name="SENSOR_X_OFFSET" value="00000000" />
<field name="SENSOR_Y_OFFSET" value="00000000" />
<field name="SENSOR_Z_OFFSET" value="00000000" />
<field name="ATTITUDE_EULER_ANGLES" value="" />
<field name="ATTITUDE_UNIT_VECTORS" value="" />
<field name="ATTITUDE_QUATERNION" value="" />
<field name="SENSOR_VELOCITY_DATA" value="" />
<field name="POINT_SET_DATA" value="00" />
<field name="TIME_STAMPED_DATA_SETS" value="02" />
<repeated name="TIME_STAMPED_SET" number="2">
<group index="0">
<field name="TIME_STAMP_TYPE_MM" value="06b" />
<field name="TIME_STAMP_COUNT_MM" value="0003" />
<repeated name="TIME_STAMP_COUNTS" number="3">
<group index="0">
<field name="TIME_STAMP_TIME_NNNN" value="111111111111" />
<field name="TIME_STAMP_VALUE_NNNN" value="111100001111" />
</group>
<group index="1">
<field name="TIME_STAMP_TIME_NNNN" value="222222222222" />
<field name="TIME_STAMP_VALUE_NNNN" value="222200001111" />
</group>
<group index="2">
<field name="TIME_STAMP_TIME_NNNN" value="333333333333" />
<field name="TIME_STAMP_VALUE_NNNN" value="333300001111" />
</group>
</repeated>
</group>
<group index="1">
<field name="TIME_STAMP_TYPE_MM" value="06e" />
<field name="TIME_STAMP_COUNT_MM" value="0002" />
<repeated name="TIME_STAMP_COUNTS" number="2">
<group index="0">
<field name="TIME_STAMP_TIME_NNNN" value="444444444444" />
<field name="TIME_STAMP_VALUE_NNNN" value="44440000" />
</group>
<group index="1">
<field name="TIME_STAMP_TIME_NNNN" value="555555555555" />
<field name="TIME_STAMP_VALUE_NNNN" value="55550000" />
</group>
</repeated>
</group>
</repeated>
<field name="PIXEL_REFERENCED_DATA_SETS" value="00" />
<field name="UNCERTAINTY_DATA" value="000" />
<field name="ADDITIONAL_PARAMETER_DATA" value="000" />
</tre>
</tres>
"""
assert data == expected_data, data
###############################################################################
# Verify we can read UDID metadata
def test_nitf_valid_udid():
ds = gdal.Open('data/nitf/valid_udid.ntf')
md = ds.GetMetadata()
assert md['NITF_CSDIDA_YEAR'] == '2019', \
'UDID CSDIDA metadata has unexpected value.'
assert md['NITF_BLOCKA_BLOCK_INSTANCE_01'] == '01', \
'BLOCKA metadata has unexpected value.'
###############################################################################
# Verify that bad UDID metadata doesn't prevent reading IXSHD metadata
def test_nitf_invalid_udid():
ds = gdal.Open('data/nitf/invalid_udid.ntf')
md = ds.GetMetadata()
assert 'NITF_CSDIDA_YEAR' not in md, \
'Unexpected parings of UDID CSDIDA metadata.'
assert md['NITF_BLOCKA_BLOCK_INSTANCE_01'] == '01', \
'BLOCKA metadata has unexpected value.'
###############################################################################
# Test NITF21_CGM_ANNO_Uncompressed_unmasked.ntf for bug #1313 and #1714
def test_nitf_online_1():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/bugs/NITF21_CGM_ANNO_Uncompressed_unmasked.ntf', 'NITF21_CGM_ANNO_Uncompressed_unmasked.ntf'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'tmp/cache/NITF21_CGM_ANNO_Uncompressed_unmasked.ntf', 1, 13123, filename_absolute=1)
# Shut up the warning about missing image segment
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = tst.testOpen()
gdal.PopErrorHandler()
return ret
###############################################################################
# Test NITF file with multiple images
def test_nitf_online_2():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf1.1/U_0001a.ntf', 'U_0001a.ntf'):
pytest.skip()
ds = gdal.Open('tmp/cache/U_0001a.ntf')
md = ds.GetMetadata('SUBDATASETS')
assert 'SUBDATASET_1_NAME' in md, 'missing SUBDATASET_1_NAME metadata'
ds = None
###############################################################################
# Test ARIDPCM (C2) image
def test_nitf_online_3():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf1.1/U_0001a.ntf', 'U_0001a.ntf'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'NITF_IM:3:tmp/cache/U_0001a.ntf', 1, 23463, filename_absolute=1)
return tst.testOpen()
###############################################################################
# Test Vector Quantization (VQ) (C4) file
def test_nitf_online_4():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/cadrg/001zc013.on1', '001zc013.on1'):
pytest.skip()
# check that the RPF attribute metadata was carried through.
ds = gdal.Open('tmp/cache/001zc013.on1')
md = ds.GetMetadata()
assert md['NITF_RPF_CurrencyDate'] == '19950720' and md['NITF_RPF_ProductionDate'] == '19950720' and md['NITF_RPF_SignificantDate'] == '19890629', \
'RPF attribute metadata not captured (#3413)'
ds = None
tst = gdaltest.GDALTest('NITF', 'tmp/cache/001zc013.on1', 1, 53960, filename_absolute=1)
return tst.testOpen()
###############################################################################
# Test Vector Quantization (VQ) (M4) file
def test_nitf_online_5():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/cadrg/overview.ovr', 'overview.ovr'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'tmp/cache/overview.ovr', 1, 60699, filename_absolute=1)
return tst.testOpen()
###############################################################################
# Test a JPEG compressed, single blocked 2048x2048 mono image
def test_nitf_online_6():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf2.0/U_4001b.ntf', 'U_4001b.ntf'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'tmp/cache/U_4001b.ntf', 1, 60030, filename_absolute=1)
return tst.testOpen()
###############################################################################
# Test all combinations of IMODE (S,P,B,R) for an image with 6 bands whose 3 are RGB
def test_nitf_online_7():
for filename in ['ns3228b.nsf', 'i_3228c.ntf', 'ns3228d.nsf', 'i_3228e.ntf']:
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/' + filename, filename):
pytest.skip()
ds = gdal.Open('tmp/cache/' + filename)
assert ds.RasterCount == 6
checksums = [48385, 48385, 40551, 54223, 48385, 33094]
colorInterpretations = [gdal.GCI_Undefined, gdal.GCI_Undefined, gdal.GCI_RedBand, gdal.GCI_BlueBand, gdal.GCI_Undefined, gdal.GCI_GreenBand]
for i in range(6):
cs = ds.GetRasterBand(i + 1).Checksum()
assert cs == checksums[i], ('got checksum %d for image %s'
% (cs, filename))
assert ds.GetRasterBand(i + 1).GetRasterColorInterpretation() == colorInterpretations[i], \
('got wrong color interp for image %s'
% filename)
ds = None
###############################################################################
# Test JPEG-compressed multi-block mono-band image with a data mask subheader (IC=M3, IMODE=B)
def test_nitf_online_8():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3301j.nsf', 'ns3301j.nsf'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'tmp/cache/ns3301j.nsf', 1, 56861, filename_absolute=1)
return tst.testOpen()
###############################################################################
# Test JPEG-compressed multi-block mono-band image without a data mask subheader (IC=C3, IMODE=B)
def test_nitf_online_9():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3304a.nsf', 'ns3304a.nsf'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'tmp/cache/ns3304a.nsf', 1, 32419, filename_absolute=1)
return tst.testOpen()
###############################################################################
# Verify that CGM access on a file with 8 CGM segments
def test_nitf_online_10():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3119b.nsf', 'ns3119b.nsf'):
pytest.skip()
# Shut up the warning about missing image segment
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.Open('tmp/cache/ns3119b.nsf')
gdal.PopErrorHandler()
mdCGM = ds.GetMetadata('CGM')
ds = None
assert mdCGM['SEGMENT_COUNT'] == '8', 'wrong SEGMENT_COUNT.'
tab = [
('SEGMENT_0_SLOC_ROW', '0'),
('SEGMENT_0_SLOC_COL', '0'),
('SEGMENT_0_CCS_COL', '0'),
('SEGMENT_0_CCS_COL', '0'),
('SEGMENT_0_SDLVL', '1'),
('SEGMENT_0_SALVL', '0'),
('SEGMENT_1_SLOC_ROW', '0'),
('SEGMENT_1_SLOC_COL', '684'),
('SEGMENT_2_SLOC_ROW', '0'),
('SEGMENT_2_SLOC_COL', '1364'),
('SEGMENT_3_SLOC_ROW', '270'),
('SEGMENT_3_SLOC_COL', '0'),
('SEGMENT_4_SLOC_ROW', '270'),
('SEGMENT_4_SLOC_COL', '684'),
('SEGMENT_5_SLOC_ROW', '270'),
('SEGMENT_5_SLOC_COL', '1364'),
('SEGMENT_6_SLOC_ROW', '540'),
('SEGMENT_6_SLOC_COL', '0'),
('SEGMENT_7_SLOC_ROW', '540'),
('SEGMENT_7_SLOC_COL', '1364'),
('SEGMENT_7_CCS_ROW', '540'),
('SEGMENT_7_CCS_COL', '1364'),
('SEGMENT_7_SDLVL', '8'),
('SEGMENT_7_SALVL', '0'),
]
for item in tab:
assert mdCGM[item[0]] == item[1], ('wrong value for %s.' % item[0])
###############################################################################
# 5 text files
def test_nitf_online_11():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf2.0/U_1122a.ntf', 'U_1122a.ntf'):
pytest.skip()
ds = gdal.Open('tmp/cache/U_1122a.ntf')
mdTEXT = ds.GetMetadata('TEXT')
ds = None
assert mdTEXT['DATA_0'] == 'This is test text file 01.\r\n', \
'did not find expected DATA_0 from metadata.'
assert mdTEXT['DATA_1'] == 'This is test text file 02.\r\n', \
'did not find expected DATA_1 from metadata.'
assert mdTEXT['DATA_2'] == 'This is test text file 03.\r\n', \
'did not find expected DATA_2 from metadata.'
assert mdTEXT['DATA_3'] == 'This is test text file 04.\r\n', \
'did not find expected DATA_3 from metadata.'
assert mdTEXT['DATA_4'] == 'This is test text file 05.\r\n', \
'did not find expected DATA_4 from metadata.'
###############################################################################
# Test 12 bit uncompressed image.
def test_nitf_online_12():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/bugs/i_3430a.ntf', 'i_3430a.ntf'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'tmp/cache/i_3430a.ntf', 1, 38647,
filename_absolute=1)
return tst.testOpen()
###############################################################################
# Test complex relative graphic/image attachment.
def test_nitf_online_13():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/u_3054a.ntf', 'u_3054a.ntf'):
pytest.skip()
# Shut up the warning about missing image segment
ds = gdal.Open('NITF_IM:2:tmp/cache/u_3054a.ntf')
mdCGM = ds.GetMetadata('CGM')
md = ds.GetMetadata()
ds = None
assert mdCGM['SEGMENT_COUNT'] == '3', 'wrong SEGMENT_COUNT.'
tab = [
('SEGMENT_2_SLOC_ROW', '0'),
('SEGMENT_2_SLOC_COL', '0'),
('SEGMENT_2_CCS_COL', '1100'),
('SEGMENT_2_CCS_COL', '1100'),
('SEGMENT_2_SDLVL', '6'),
('SEGMENT_2_SALVL', '3')
]
for item in tab:
assert mdCGM[item[0]] == item[1], ('wrong value for %s.' % item[0])
tab = [
('NITF_IDLVL', '3'),
('NITF_IALVL', '1'),
('NITF_ILOC_ROW', '1100'),
('NITF_ILOC_COLUMN', '1100'),
('NITF_CCS_ROW', '1100'),
('NITF_CCS_COLUMN', '1100'),
]
for item in tab:
assert md[item[0]] == item[1], ('wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]))
###############################################################################
# Check reading a 12-bit JPEG compressed NITF (multi-block)
def test_nitf_online_14(not_jpeg_9b):
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf2.0/U_4020h.ntf', 'U_4020h.ntf'):
pytest.skip()
try:
os.remove('tmp/cache/U_4020h.ntf.aux.xml')
except OSError:
pass
# Check if JPEG driver supports 12bit JPEG reading/writing
jpg_drv = gdal.GetDriverByName('JPEG')
md = jpg_drv.GetMetadata()
if md[gdal.DMD_CREATIONDATATYPES].find('UInt16') == -1:
sys.stdout.write('(12bit jpeg not available) ... ')
pytest.skip()
ds = gdal.Open('tmp/cache/U_4020h.ntf')
assert ds.GetRasterBand(1).DataType == gdal.GDT_UInt16
stats = ds.GetRasterBand(1).GetStatistics(0, 1)
assert stats[2] >= 2607 and stats[2] <= 2608
ds = None
try:
os.remove('tmp/cache/U_4020h.ntf.aux.xml')
except OSError:
pass
###############################################################################
# Test opening a IC=C8 NITF file with the various JPEG2000 drivers
def nitf_online_15(driver_to_test, expected_cs=1054):
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Jpeg2000/p0_01/p0_01a.ntf', 'p0_01a.ntf'):
pytest.skip()
jp2_drv = gdal.GetDriverByName(driver_to_test)
if jp2_drv is None:
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
ds = gdal.Open('tmp/cache/p0_01a.ntf')
if ds.GetRasterBand(1).Checksum() == expected_cs:
ret = 'success'
else:
print(ds.GetRasterBand(1).Checksum())
gdaltest.post_reason('Did not get expected checksums')
ret = 'fail'
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def test_nitf_online_15_jp2ecw():
return nitf_online_15('JP2ECW')
def test_nitf_online_15_jp2mrsid():
return nitf_online_15('JP2MrSID')
def test_nitf_online_15_jp2kak():
return nitf_online_15('JP2KAK')
def test_nitf_online_15_jasper():
return nitf_online_15('JPEG2000')
def test_nitf_online_15_openjpeg():
return nitf_online_15('JP2OpenJPEG')
###############################################################################
# Test opening a IC=C8 NITF file which has 256-entry palette/LUT in both JP2 Header and image Subheader
# We expect RGB expansion from some JPEG2000 driver
def nitf_online_16(driver_to_test):
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Jpeg2000/jp2_09/file9_jp2_2places.ntf', 'file9_jp2_2places.ntf'):
pytest.skip()
jp2_drv = gdal.GetDriverByName(driver_to_test)
if jp2_drv is None:
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
ds = gdal.Open('tmp/cache/file9_jp2_2places.ntf')
# JPEG2000 driver
if ds.RasterCount == 3 and \
ds.GetRasterBand(1).Checksum() == 48954 and \
ds.GetRasterBand(2).Checksum() == 4939 and \
ds.GetRasterBand(3).Checksum() == 17734:
ret = 'success'
elif ds.RasterCount == 1 and \
ds.GetRasterBand(1).Checksum() == 47664 and \
ds.GetRasterBand(1).GetRasterColorTable() is not None:
ret = 'success'
else:
print(ds.RasterCount)
for i in range(ds.RasterCount):
print(ds.GetRasterBand(i + 1).Checksum())
print(ds.GetRasterBand(1).GetRasterColorTable())
gdaltest.post_reason('Did not get expected checksums')
ret = 'fail'
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def test_nitf_online_16_jp2ecw():
return nitf_online_16('JP2ECW')
def test_nitf_online_16_jp2mrsid():
return nitf_online_16('JP2MrSID')
def test_nitf_online_16_jp2kak():
return nitf_online_16('JP2KAK')
def test_nitf_online_16_jasper():
return nitf_online_16('JPEG2000')
def test_nitf_online_16_openjpeg():
return nitf_online_16('JP2OpenJPEG')
###############################################################################
# Test opening a IC=C8 NITF file which has 256-entry/LUT in Image Subheader, JP2 header completely removed
# We don't expect RGB expansion from the JPEG2000 driver
def nitf_online_17(driver_to_test):
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Jpeg2000/jp2_09/file9_j2c.ntf', 'file9_j2c.ntf'):
pytest.skip()
jp2_drv = gdal.GetDriverByName(driver_to_test)
if jp2_drv is None:
pytest.skip()
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
ds = gdal.Open('tmp/cache/file9_j2c.ntf')
if ds.RasterCount == 1 and \
ds.GetRasterBand(1).Checksum() == 47664 and \
ds.GetRasterBand(1).GetRasterColorTable() is not None:
ret = 'success'
else:
print(ds.RasterCount)
for i in range(ds.RasterCount):
print(ds.GetRasterBand(i + 1).Checksum())
print(ds.GetRasterBand(1).GetRasterColorTable())
gdaltest.post_reason('Did not get expected checksums')
ret = 'fail'
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def test_nitf_online_17_jp2ecw():
return nitf_online_17('JP2ECW')
def test_nitf_online_17_jp2mrsid():
return nitf_online_17('JP2MrSID')
def test_nitf_online_17_jp2kak():
return nitf_online_17('JP2KAK')
def test_nitf_online_17_jasper():
return nitf_online_17('JPEG2000')
def test_nitf_online_17_openjpeg():
return nitf_online_17('JP2OpenJPEG')
###############################################################################
# Test polar stereographic CADRG tile.
def test_nitf_online_18():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/bugs/bug3337.ntf', 'bug3337.ntf'):
pytest.skip()
ds = gdal.Open('tmp/cache/bug3337.ntf')
gt = ds.GetGeoTransform()
prj = ds.GetProjection()
# If we have functioning coordinate transformer.
if prj[:6] == 'PROJCS':
assert prj.find('Azimuthal_Equidistant') != -1, 'wrong projection?'
expected_gt = (-1669792.3618991028, 724.73626818537502, 0.0, -556597.45396636717, 0.0, -724.73626818537434)
assert gdaltest.geotransform_equals(gt, expected_gt, 1.0), \
'did not get expected geotransform.'
# If we do not have a functioning coordinate transformer.
else:
assert prj == '' and gdaltest.geotransform_equals(gt, (0, 1, 0, 0, 0, 1), 0.00000001), \
'did not get expected empty gt/projection'
prj = ds.GetGCPProjection()
assert prj[:6] == 'GEOGCS', 'did not get expected geographic srs'
gcps = ds.GetGCPs()
gcp3 = gcps[3]
assert gcp3.GCPPixel == 0 and gcp3.GCPLine == 1536 and abs(gcp3.GCPX + 45) <= 0.0000000001 and gcp3.GCPY == pytest.approx(68.78679656, abs=0.00000001), \
'did not get expected gcp.'
ds = None
###############################################################################
# Test CADRG tile crossing dateline (#3383)
def test_nitf_online_19():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/0000M033.GN3', '0000M033.GN3'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'tmp/cache/0000M033.GN3', 1, 38928,
filename_absolute=1)
return tst.testOpen(check_gt=(174.375000000000000, 0.010986328125000, 0,
51.923076923076927, 0, -0.006760817307692))
###############################################################################
# Check that the RPF attribute metadata was carried through.
# Special case where the reported size of the attribute subsection is
# smaller than really available
def test_nitf_online_20():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/0000M033.GN3', '0000M033.GN3'):
pytest.skip()
# check that the RPF attribute metadata was carried through.
# Special case where the reported size of the attribute subsection is
# smaller than really available
ds = gdal.Open('tmp/cache/0000M033.GN3')
md = ds.GetMetadata()
assert md['NITF_RPF_CurrencyDate'] == '19941201' and md['NITF_RPF_ProductionDate'] == '19980511' and md['NITF_RPF_SignificantDate'] == '19850305', \
'RPF attribute metadata not captured (#3413)'
###############################################################################
# Check that we can read NITF header located in STREAMING_FILE_HEADER DE
# segment when header at beginning of file is incomplete
def test_nitf_online_21():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3321a.nsf', 'ns3321a.nsf'):
pytest.skip()
ds = gdal.Open('tmp/cache/ns3321a.nsf')
md = ds.GetMetadata()
ds = None
# If we get NS3321A, it means we are not exploiting the header from the STREAMING_FILE_HEADER DE segment
assert md['NITF_OSTAID'] == 'I_3321A', \
'did not get expected OSTAID value'
###############################################################################
# Test fix for #3002 (reconcile NITF file with LA segments)
#
def test_nitf_online_22():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv1_1/U_0001C.NTF', 'U_0001C.NTF'):
pytest.skip()
ds = gdal.Open('NITF_IM:1:tmp/cache/U_0001C.NTF')
md = ds.GetMetadata()
ds = None
tab = [
('NITF_IDLVL', '6'),
('NITF_IALVL', '1'),
('NITF_ILOC_ROW', '360'),
('NITF_ILOC_COLUMN', '380'),
('NITF_CCS_ROW', '425'),
('NITF_CCS_COLUMN', '410'),
]
for item in tab:
assert md[item[0]] == item[1], ('(1) wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]))
ds = gdal.Open('NITF_IM:2:tmp/cache/U_0001C.NTF')
md = ds.GetMetadata()
ds = None
tab = [
('NITF_IDLVL', '11'),
('NITF_IALVL', '2'),
('NITF_ILOC_ROW', '360'),
('NITF_ILOC_COLUMN', '40'),
('NITF_CCS_ROW', '422'),
('NITF_CCS_COLUMN', '210'),
]
for item in tab:
assert md[item[0]] == item[1], ('(2) wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]))
ds = gdal.Open('NITF_IM:3:tmp/cache/U_0001C.NTF')
md = ds.GetMetadata()
ds = None
tab = [
('NITF_IDLVL', '5'),
('NITF_IALVL', '3'),
('NITF_ILOC_ROW', '40'),
('NITF_ILOC_COLUMN', '240'),
('NITF_CCS_ROW', '-1'),
('NITF_CCS_COLUMN', '-1'),
]
for item in tab:
assert md[item[0]] == item[1], ('(3) wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]))
ds = gdal.Open('NITF_IM:4:tmp/cache/U_0001C.NTF')
md = ds.GetMetadata()
ds = None
tab = [
('NITF_IDLVL', '1'),
('NITF_IALVL', '0'),
('NITF_ILOC_ROW', '65'),
('NITF_ILOC_COLUMN', '30'),
('NITF_CCS_ROW', '65'),
('NITF_CCS_COLUMN', '30'),
]
for item in tab:
assert md[item[0]] == item[1], ('(4) wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]))
###############################################################################
# Test reading a M4 compressed file (fixed for #3848)
def test_nitf_online_23():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf2.0/U_3058b.ntf', 'U_3058b.ntf'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'tmp/cache/U_3058b.ntf', 1, 44748, filename_absolute=1)
return tst.testOpen()
###############################################################################
# Test reading ECRG frames
def test_nitf_online_24():
if not gdaltest.download_file('http://www.falconview.org/trac/FalconView/downloads/17', 'ECRG_Sample.zip'):
pytest.skip()
try:
os.stat('tmp/cache/ECRG_Sample.zip')
except OSError:
pytest.skip()
oldval = gdal.GetConfigOption('NITF_OPEN_UNDERLYING_DS')
gdal.SetConfigOption('NITF_OPEN_UNDERLYING_DS', 'NO')
ds = gdal.Open('/vsizip/tmp/cache/ECRG_Sample.zip/ECRG_Sample/EPF/clfc/2/000000009s0013.lf2')
gdal.SetConfigOption('NITF_OPEN_UNDERLYING_DS', oldval)
assert ds is not None
xml_tre = ds.GetMetadata('xml:TRE')[0]
ds = None
assert (not (xml_tre.find('<tre name="GEOPSB"') == -1 or \
xml_tre.find('<tre name="J2KLRA"') == -1 or \
xml_tre.find('<tre name="GEOLOB"') == -1 or \
xml_tre.find('<tre name="BNDPLB"') == -1 or \
xml_tre.find('<tre name="ACCPOB"') == -1 or \
xml_tre.find('<tre name="SOURCB"') == -1)), 'did not get expected xml:TRE'
###############################################################################
# Test reading a HRE file
def test_nitf_online_25():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/docs/HRE_spec/Case1_HRE10G324642N1170747W_Uxx.hr5', 'Case1_HRE10G324642N1170747W_Uxx.hr5'):
pytest.skip()
tst = gdaltest.GDALTest('NITF', 'tmp/cache/Case1_HRE10G324642N1170747W_Uxx.hr5', 1, 7099, filename_absolute=1)
tst.testOpen()
ds = gdal.Open('tmp/cache/Case1_HRE10G324642N1170747W_Uxx.hr5')
xml_tre = ds.GetMetadata('xml:TRE')[0]
ds = None
assert xml_tre.find('<tre name="PIAPRD"') != -1, 'did not get expected xml:TRE'
###############################################################################
# Cleanup.
def test_nitf_cleanup():
try:
gdal.GetDriverByName('NITF').Delete('tmp/test_create.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf9.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/test_13.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/test_29.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/test_29_copy.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf36.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf37.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf38.ntf')
os.unlink('tmp/nitf38.ntf_0.ovr')
except (RuntimeError, OSError):
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf39.ntf')
except (RuntimeError, OSError):
pass
try:
os.stat('tmp/nitf40.ntf')
gdal.GetDriverByName('NITF').Delete('tmp/nitf40.ntf')
except (RuntimeError, OSError):
pass
try:
os.stat('tmp/nitf42.ntf')
gdal.GetDriverByName('NITF').Delete('tmp/nitf42.ntf')
except (OSError, RuntimeError):
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf44.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf45.ntf')
os.unlink('tmp/nitf45.ntf_0.ovr')
except (RuntimeError, OSError):
pass
try:
os.stat('tmp/nitf46.ntf')
gdal.GetDriverByName('NITF').Delete('tmp/nitf46.ntf')
os.unlink('tmp/nitf46.ntf_0.ovr')
except (RuntimeError, OSError):
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf49.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf49_2.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf50.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf51.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf52.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf53.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf54.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf55.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf56.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf57.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf58.ntf')
except RuntimeError:
pass
try:
os.remove('tmp/nitf59.hdr')
gdal.GetDriverByName('NITF').Delete('tmp/nitf59.ntf')
except (OSError, RuntimeError):
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf62.ntf')
except RuntimeError:
pass
try:
gdal.GetDriverByName('NITF').Delete('tmp/nitf63.ntf')
except RuntimeError:
pass
| 37.993622
| 1,079
| 0.594426
|
0f228f25863905a12148bddbe4cf13a44edfe2fe
| 22
|
py
|
Python
|
consul/datadog_checks/consul/__about__.py
|
seants/integrations-core
|
1e5548915fc24f1bbd095e845f0940c22992b09c
|
[
"BSD-3-Clause"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
rasa_sdk/version.py
|
ClaudeCoulombe/rasa-sdk
|
17eb70187555c8656336f9f21dcb2eb18655f455
|
[
"Apache-2.0"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
rasa_sdk/version.py
|
ClaudeCoulombe/rasa-sdk
|
17eb70187555c8656336f9f21dcb2eb18655f455
|
[
"Apache-2.0"
] | 31
|
2019-03-10T09:51:27.000Z
|
2022-02-14T23:11:12.000Z
|
__version__ = "1.5.1"
| 11
| 21
| 0.636364
|
9bb843cbec397759faee30373360260fb3503149
| 692
|
py
|
Python
|
cpuTemperatureReading.py
|
bobjects/google-spreadsheet-temperature-logger
|
91861ddc7f651288116a727b17c9b9ffdbd15ea3
|
[
"MIT"
] | null | null | null |
cpuTemperatureReading.py
|
bobjects/google-spreadsheet-temperature-logger
|
91861ddc7f651288116a727b17c9b9ffdbd15ea3
|
[
"MIT"
] | null | null | null |
cpuTemperatureReading.py
|
bobjects/google-spreadsheet-temperature-logger
|
91861ddc7f651288116a727b17c9b9ffdbd15ea3
|
[
"MIT"
] | null | null | null |
from temperatureReading import TemperatureReading
class CPUTemperatureReading(TemperatureReading):
def acquireCelsius(self):
return self.parseRawString(self.procFileContents)
@property
def procFileName(self):
return ""
@property
def procFileContents(self):
try:
with open(self.procFileName, "r") as procFile:
return procFile.read()
except:
return ""
def parseRawString(self, aString):
# by default, we assume that the proc file reports the temp as thousandths of a degree
# celsius, with only one line in the file. Override as needed.
return float(aString) / 1000.0
| 28.833333
| 94
| 0.654624
|
303573c9d577292f6b309f692aa48e563ebe5b9a
| 3,875
|
py
|
Python
|
tests/test_git_format_pkg_patch.py
|
openSUSE/git-packaging-tools
|
be81c7d207db185ddc0d2510bd025cfc2e9613c7
|
[
"MIT"
] | 8
|
2017-08-15T12:51:34.000Z
|
2020-10-07T09:58:34.000Z
|
tests/test_git_format_pkg_patch.py
|
openSUSE/git-packaging-tools
|
be81c7d207db185ddc0d2510bd025cfc2e9613c7
|
[
"MIT"
] | 5
|
2017-02-04T12:32:16.000Z
|
2020-07-01T14:13:19.000Z
|
tests/test_git_format_pkg_patch.py
|
openSUSE/git-packaging-tools
|
be81c7d207db185ddc0d2510bd025cfc2e9613c7
|
[
"MIT"
] | 6
|
2017-02-07T13:31:21.000Z
|
2021-02-10T23:14:03.000Z
|
from gfp import get_diff_contents, unique
class TestUtils(object):
'''
Grouped tests for utility functions
'''
diff_a = """
From b338b21fe340ee4efa0045894315fcf20be1dc49 Mon Sep 17 00:00:00 2001
From: Test <info@suse.com>
Date: Wed, 14 Dec 2016 10:33:39 +0100
Subject: [PATCH] Avoid failures on SLES 12 SP2 because of new systemd
TaskMax limit (bsc#985112)
---
pkg/salt-master.service | 1 +
1 file changed, 1 insertion(+)
diff --git a/pkg/salt-master.service b/pkg/salt-master.service
index 59be50301a..ecd3edd467 100644
--- a/pkg/salt-master.service
+++ b/pkg/salt-master.service
@@ -6,6 +6,7 @@ After=network.target
LimitNOFILE=16384
Type=simple
ExecStart=/usr/bin/salt-master
+TasksMax=infinity
[Install]
WantedBy=multi-user.target
--
2.11.0
"""
diff_b = """
From 7bbbd3b6ebaf3988a4f97b905040b56be065f201 Mon Sep 17 00:00:00 2001
From: Test <info@suse.com>
Date: Fri, 29 Jul 2016 10:50:21 +0200
Subject: [PATCH] Run salt-api as user salt (bsc#990029)
---
pkg/salt-api.service | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/pkg/salt-api.service b/pkg/salt-api.service
index c3e67d510c..9be2cb8ee6 100644
--- a/pkg/salt-api.service
+++ b/pkg/salt-api.service
@@ -3,8 +3,8 @@ Description=The Salt API
After=network.target
[Service]
-Type=notify
-NotifyAccess=all
+User=salt
+Type=simple
LimitNOFILE=8192
ExecStart=/usr/bin/salt-api
TimeoutStopSec=3
--
2.11.0
"""
diff_c = """
From 0943872fab17ae5400acc5b66cdb338193291e9e Mon Sep 17 00:00:00 2001
From: Test <info@saltstack.com>
Date: Mon, 30 Jan 2017 16:43:40 -0700
Subject: [PATCH 350/351] Add 2016.11.3 release notes file (#39044)
---
doc/topics/releases/2016.11.3.rst | 5 +++++
1 file changed, 5 insertions(+)
create mode 100644 doc/topics/releases/2016.11.3.rst
diff --git a/doc/topics/releases/2016.11.3.rst b/doc/topics/releases/2016.11.3.rst
new file mode 100644
index 0000000000..cb2a5974ff
--- /dev/null
+++ b/doc/topics/releases/2016.11.3.rst
@@ -0,0 +1,5 @@
+============================
+Salt 2016.11.3 Release Notes
+============================
+
+Version 2016.11.3 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.
--
2.11.0
"""
def test_get_diff_contents(self):
'''
Test diff content extracted properly.
:return:
'''
sample_content_a = [' LimitNOFILE=16384\n Type=simple\n ExecStart=/usr/bin/salt-master\n'
'+TasksMax=infinity\n\n [Install]\n WantedBy=multi-user.target\n']
assert get_diff_contents(self.diff_a) == sample_content_a
sample_content_b = [' After=network.target\n\n [Service]\n-Type=notify\n-NotifyAccess=all'
'\n+User=salt\n+Type=simple\n LimitNOFILE=8192\n ExecStart=/usr/bin/salt-api'
'\n TimeoutStopSec=3\n']
assert get_diff_contents(self.diff_b) == sample_content_b
sample_content_c = ['+============================\n+Salt 2016.11.3 Release Notes\n'
'+============================\n+\n+Version 2016.11.3 is a bugfix release '
'for :ref:`2016.11.0 <release-2016-11-0>`.\n']
assert get_diff_contents(self.diff_c) == sample_content_c
def test_make_unique_filename(self):
'''
Test unique filename
:return:
'''
fname = 'file.patch'
for iter in range(10):
fname = unique(fname)
assert fname == 'file-{0}.patch'.format(iter + 1)
fname = 'file-something.patch'
for iter in range(10):
fname = unique(fname)
assert fname == 'file-something-{0}.patch'.format(iter + 1)
fname = 'some-archive-here.tar.gz'
for iter in range(10):
fname = unique(fname)
assert fname == 'some-archive-here-{0}.tar.gz'.format(iter + 1)
| 30.273438
| 105
| 0.626581
|
498c3fafbddb00616f420673aa6eb7436a022b6f
| 853
|
py
|
Python
|
options/test_options.py
|
IrinaM21/ChestXRayVis
|
13e8f2767406b924f38695b79b7cf80abf0ef755
|
[
"MIT"
] | 2
|
2021-04-06T03:45:12.000Z
|
2021-04-12T17:27:05.000Z
|
options/test_options.py
|
IrinaM21/ChestXRayVis
|
13e8f2767406b924f38695b79b7cf80abf0ef755
|
[
"MIT"
] | null | null | null |
options/test_options.py
|
IrinaM21/ChestXRayVis
|
13e8f2767406b924f38695b79b7cf80abf0ef755
|
[
"MIT"
] | null | null | null |
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--ntest', type=int, default=float("inf"), help='# of the test examples')
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here')
parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
parser.add_argument('--phase', type=str, default='test', help='train, val, test')
parser.add_argument('--nsampling', type=int, default=50, help='ramplimg # times for each images')
parser.add_argument('--save_number', type=int, default=10, help='choice # reasonable results based on the discriminator score')
self.isTrain = False
return parser
| 47.388889
| 135
| 0.682298
|
29d0c6f250e078e9680104a36619da59f559cd8f
| 10,129
|
py
|
Python
|
trainer/trainer.py
|
justinge/DBnet.Pytorch
|
00dd292f1e0091e71486616b0771ab978dbd0d28
|
[
"Apache-2.0"
] | null | null | null |
trainer/trainer.py
|
justinge/DBnet.Pytorch
|
00dd292f1e0091e71486616b0771ab978dbd0d28
|
[
"Apache-2.0"
] | null | null | null |
trainer/trainer.py
|
justinge/DBnet.Pytorch
|
00dd292f1e0091e71486616b0771ab978dbd0d28
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:58
# @Author : zhoujun
import time
import torch
import torchvision.utils as vutils
from tqdm import tqdm
from base import BaseTrainer
from utils import WarmupPolyLR, runningScore, cal_text_score
class Trainer(BaseTrainer):
def __init__(self, config, model, criterion, train_loader, validate_loader, metric_cls, post_process=None):
a = 1
super(Trainer, self).__init__(config, model, criterion)
self.show_images_iter = self.config['trainer']['show_images_iter']
self.train_loader = train_loader
if validate_loader is not None:
assert post_process is not None and metric_cls is not None
self.validate_loader = validate_loader
self.post_process = post_process
self.metric_cls = metric_cls
self.train_loader_len = len(train_loader)
if self.config['lr_scheduler']['type'] == 'WarmupPolyLR':
warmup_iters = config['lr_scheduler']['args']['warmup_epoch'] * self.train_loader_len
if self.start_epoch > 1:
self.config['lr_scheduler']['args']['last_epoch'] = (self.start_epoch - 1) * self.train_loader_len
self.scheduler = WarmupPolyLR(self.optimizer, max_iters=self.epochs * self.train_loader_len,
warmup_iters=warmup_iters, **config['lr_scheduler']['args'])
if self.validate_loader is not None:
self.logger_info(
'train dataset has {} samples,{} in dataloader, validate dataset has {} samples,{} in dataloader'.format(
len(self.train_loader.dataset), self.train_loader_len, len(self.validate_loader.dataset), len(self.validate_loader)))
else:
self.logger_info('train dataset has {} samples,{} in dataloader'.format(len(self.train_loader.dataset), self.train_loader_len))
def _train_epoch(self, epoch):
self.model.train()
epoch_start = time.time()
batch_start = time.time()
train_loss = 0.
running_metric_text = runningScore(2)
lr = self.optimizer.param_groups[0]['lr']
for i, batch in enumerate(self.train_loader):
if i >= self.train_loader_len:
break
self.global_step += 1
lr = self.optimizer.param_groups[0]['lr']
# 数据进行转换和丢到gpu
for key, value in batch.items():
if value is not None:
if isinstance(value, torch.Tensor):
batch[key] = value.to(self.device)
cur_batch_size = batch['img'].size()[0]
preds = self.model(batch['img'])
loss_dict = self.criterion(preds, batch)
# backward
self.optimizer.zero_grad()
loss_dict['loss'].backward()
self.optimizer.step()
if self.config['lr_scheduler']['type'] == 'WarmupPolyLR':
self.scheduler.step()
# acc iou
score_shrink_map = cal_text_score(preds[:, 0, :, :], batch['shrink_map'], batch['shrink_mask'], running_metric_text,
thred=self.config['post_processing']['args']['thresh'])
# loss 和 acc 记录到日志
loss_str = 'loss: {:.4f}, '.format(loss_dict['loss'].item())
for idx, (key, value) in enumerate(loss_dict.items()):
loss_dict[key] = value.item()
if key == 'loss':
continue
loss_str += '{}: {:.4f}'.format(key, loss_dict[key])
if idx < len(loss_dict) - 1:
loss_str += ', '
train_loss += loss_dict['loss']
acc = score_shrink_map['Mean Acc']
iou_shrink_map = score_shrink_map['Mean IoU']
if self.global_step % self.log_iter == 0:
batch_time = time.time() - batch_start
self.logger_info(
'[{}/{}], [{}/{}], global_step: {}, speed: {:.1f} samples/sec, acc: {:.4f}, iou_shrink_map: {:.4f}, {}, lr:{:.6}, time:{:.2f}'.format(
epoch, self.epochs, i + 1, self.train_loader_len, self.global_step, self.log_iter * cur_batch_size / batch_time, acc,
iou_shrink_map, loss_str, lr, batch_time))
batch_start = time.time()
if self.tensorboard_enable and self.config['local_rank'] == 0:
# write tensorboard
for key, value in loss_dict.items():
self.writer.add_scalar('TRAIN/LOSS/{}'.format(key), value, self.global_step)
self.writer.add_scalar('TRAIN/ACC_IOU/acc', acc, self.global_step)
self.writer.add_scalar('TRAIN/ACC_IOU/iou_shrink_map', iou_shrink_map, self.global_step)
self.writer.add_scalar('TRAIN/lr', lr, self.global_step)
if self.global_step % self.show_images_iter == 0:
# show images on tensorboard
self.inverse_normalize(batch['img'])
self.writer.add_images('TRAIN/imgs', batch['img'], self.global_step)
# shrink_labels and threshold_labels
shrink_labels = batch['shrink_map']
threshold_labels = batch['threshold_map']
shrink_labels[shrink_labels <= 0.5] = 0
shrink_labels[shrink_labels > 0.5] = 1
show_label = torch.cat([shrink_labels, threshold_labels])
show_label = vutils.make_grid(show_label.unsqueeze(1), nrow=cur_batch_size, normalize=False, padding=20, pad_value=1)
self.writer.add_image('TRAIN/gt', show_label, self.global_step)
# model output
show_pred = []
for kk in range(preds.shape[1]):
show_pred.append(preds[:, kk, :, :])
show_pred = torch.cat(show_pred)
show_pred = vutils.make_grid(show_pred.unsqueeze(1), nrow=cur_batch_size, normalize=False, padding=20, pad_value=1)
self.writer.add_image('TRAIN/preds', show_pred, self.global_step)
return {'train_loss': train_loss / self.train_loader_len, 'lr': lr, 'time': time.time() - epoch_start,
'epoch': epoch}
def _eval(self, epoch):
self.model.eval()
# torch.cuda.empty_cache() # speed up evaluating after training finished
raw_metrics = []
total_frame = 0.0
total_time = 0.0
for i, batch in tqdm(enumerate(self.validate_loader), total=len(self.validate_loader), desc='test model'):
with torch.no_grad():
# 数据进行转换和丢到gpu
for key, value in batch.items():
if value is not None:
if isinstance(value, torch.Tensor):
batch[key] = value.to(self.device)
start = time.time()
preds = self.model(batch['img'])
boxes, scores = self.post_process(batch, preds,is_output_polygon=self.metric_cls.is_output_polygon)
total_frame += batch['img'].size()[0]
total_time += time.time() - start
raw_metric = self.metric_cls.validate_measure(batch, (boxes, scores))
raw_metrics.append(raw_metric)
metrics = self.metric_cls.gather_measure(raw_metrics)
self.logger_info('FPS:{}'.format(total_frame / total_time))
return metrics['recall'].avg, metrics['precision'].avg, metrics['fmeasure'].avg
def _on_epoch_finish(self):
self.logger_info('[{}/{}], train_loss: {:.4f}, time: {:.4f}, lr: {}'.format(
self.epoch_result['epoch'], self.epochs, self.epoch_result['train_loss'], self.epoch_result['time'],
self.epoch_result['lr']))
net_save_path = '{}/model_latest.pth'.format(self.checkpoint_dir)
net_save_path_best = '{}/model_best.pth'.format(self.checkpoint_dir)
if self.config['local_rank'] == 0:
self._save_checkpoint(self.epoch_result['epoch'], net_save_path)
save_best = False
if self.validate_loader is not None and self.metric_cls is not None: # 使用f1作为最优模型指标
recall, precision, hmean = self._eval(self.epoch_result['epoch'])
if self.tensorboard_enable:
self.writer.add_scalar('EVAL/recall', recall, self.global_step)
self.writer.add_scalar('EVAL/precision', precision, self.global_step)
self.writer.add_scalar('EVAL/hmean', hmean, self.global_step)
self.logger_info('test: recall: {:.6f}, precision: {:.6f}, f1: {:.6f}'.format(recall, precision, hmean))
if hmean >= self.metrics['hmean']:
save_best = True
self.metrics['train_loss'] = self.epoch_result['train_loss']
self.metrics['hmean'] = hmean
self.metrics['precision'] = precision
self.metrics['recall'] = recall
self.metrics['best_model_epoch'] = self.epoch_result['epoch']
else:
if self.epoch_result['train_loss'] <= self.metrics['train_loss']:
save_best = True
self.metrics['train_loss'] = self.epoch_result['train_loss']
self.metrics['best_model_epoch'] = self.epoch_result['epoch']
best_str = 'current best, '
for k, v in self.metrics.items():
best_str += '{}: {:.6f}, '.format(k, v)
self.logger_info(best_str)
if save_best:
import shutil
shutil.copy(net_save_path, net_save_path_best)
self.logger_info("Saving current best: {}".format(net_save_path_best))
else:
self.logger_info("Saving checkpoint: {}".format(net_save_path))
def _on_train_finish(self):
for k, v in self.metrics.items():
self.logger_info('{}:{}'.format(k, v))
self.logger_info('finish train')
| 52.21134
| 154
| 0.573897
|
1e5bab80845593f186a6d6c3af8e28946948b87a
| 12,191
|
py
|
Python
|
Lib/multiprocessing/queues.py
|
lotapp/cpython3
|
2c3125eba450f4f4ec649cae5613b3f6cbc4f110
|
[
"PSF-2.0"
] | 2
|
2019-07-05T09:19:52.000Z
|
2019-12-18T10:31:38.000Z
|
Lib/multiprocessing/queues.py
|
lotapp/cpython3
|
2c3125eba450f4f4ec649cae5613b3f6cbc4f110
|
[
"PSF-2.0"
] | null | null | null |
Lib/multiprocessing/queues.py
|
lotapp/cpython3
|
2c3125eba450f4f4ec649cae5613b3f6cbc4f110
|
[
"PSF-2.0"
] | null | null | null |
#
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
import sys
import os
import threading
import collections
import time
import weakref
import errno
from queue import Empty, Full
import _multiprocessing
from . import connection
from . import context
_ForkingPickler = context.reduction.ForkingPickler
from .util import debug, info, Finalize, register_after_fork, is_exiting
# 队列类型,使用PIPE,缓存,线程
# Queue type using a pipe, buffer and thread
class Queue(object):
# ctx = multiprocessing.get_context("xxx")
# 上下文总共3种:spawn、fork、forkserver
def __init__(self, maxsize=0, *, ctx):
# 默认使用最大容量
if maxsize <= 0:
from .synchronize import SEM_VALUE_MAX as maxsize
self._maxsize = maxsize # 指定队列大小
# 创建了一个PIPE匿名管道(单向)
self._reader, self._writer = connection.Pipe(duplex=False)
# `multiprocessing/synchronize.py > Lock`
self._rlock = ctx.Lock() # 进程锁(读)【非递归】
self._opid = os.getpid() # 获取PID
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock() # 进程锁(写)【非递归】
# Semaphore信号量通常用于保护容量有限的资源
# 控制信号量,超了就异常
self._sem = ctx.BoundedSemaphore(maxsize)
# 不忽略PIPE管道破裂的错误
self._ignore_epipe = False
# 线程相关操作
self._after_fork()
# 向`_afterfork_registry`字典中注册
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
context.assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send_bytes = self._writer.send_bytes
self._recv_bytes = self._reader.recv_bytes
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
# 如果Queue已经关闭就抛异常
assert not self._closed, "Queue {0!r} has been closed".format(self)
# 记录信号量的锁
if not self._sem.acquire(block, timeout):
raise Full # 超过数量,抛个异常
# 条件变量允许一个或多个线程等待,直到另一个线程通知它们
with self._notempty:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
def get(self, block=True, timeout=None):
# 默认情况是阻塞(lock加锁)
if block and timeout is None:
with self._rlock:
res = self._recv_bytes()
self._sem.release() # 信号量+1
else:
if block:
deadline = time.monotonic() + timeout
# 超时抛异常
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if block:
timeout = deadline - time.monotonic()
# 不管有没有内容都去读,超时就抛异常
if not self._poll(timeout):
raise Empty
elif not self._poll():
raise Empty
# 接收字节数据作为字节对象
res = self._recv_bytes()
self._sem.release() # 信号量+1
finally:
# 释放锁
self._rlock.release()
# 释放锁后,重新序列化数据
# unserialize the data after having released the lock
return _ForkingPickler.loads(res)
def qsize(self):
# Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
try:
self._reader.close()
finally:
close = self._close
if close:
self._close = None
close()
def join_thread(self):
debug('Queue.join_thread()')
assert self._closed, "Queue {0!r} not closed".format(self)
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send_bytes, self._wlock,
self._writer.close, self._ignore_epipe,
self._on_queue_feeder_error, self._sem),
name='QueueFeederThread')
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
if not self._joincancelled:
self._jointhread = Finalize(
self._thread,
Queue._finalize_join, [weakref.ref(self._thread)],
exitpriority=-5)
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self,
Queue._finalize_close, [self._buffer, self._notempty],
exitpriority=10)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
with notempty:
buffer.append(_sentinel)
notempty.notify()
@staticmethod
def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe,
onerror, queue_sem):
debug('starting thread to feed data to pipe')
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
while 1:
try:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
# serialize the data before acquiring the lock
obj = _ForkingPickler.dumps(obj)
if wacquire is None:
send_bytes(obj)
else:
wacquire()
try:
send_bytes(obj)
finally:
wrelease()
except IndexError:
pass
except Exception as e:
if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
return
# 由于这在守护程序线程中运行,因此在进程清理时,它使用的资源可能
# 变得无法使用。我们忽略在流程开始清理后发生的错误。
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
if is_exiting():
info('error in queue thread: %s', e)
return
else:
# Since the object has not been sent in the queue, we need
# to decrease the size of the queue. The error acts as
# if the object had been silently removed from the queue
# and this step is necessary to have a properly working
# queue.
queue_sem.release()
onerror(e, obj)
@staticmethod
def _on_queue_feeder_error(e, obj):
"""
Private API hook called when feeding data in the background thread
raises an exception. For overriding by concurrent.futures.
"""
import traceback
traceback.print_exc()
_sentinel = object()
#
# A queue type which also supports join() and task_done() methods
#
# Note that if you do not call task_done() for each finished task then
# eventually the counter's semaphore may overflow causing Bad Things
# to happen.
#
class JoinableQueue(Queue):
def __init__(self, maxsize=0, *, ctx):
Queue.__init__(self, maxsize, ctx=ctx)
self._unfinished_tasks = ctx.Semaphore(0)
self._cond = ctx.Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
assert not self._closed, "Queue {0!r} is closed".format(self)
if not self._sem.acquire(block, timeout):
raise Full
with self._notempty, self._cond:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
def task_done(self):
with self._cond:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
def join(self):
with self._cond:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
#
# Simplified Queue type -- really just a locked pipe
#
class SimpleQueue(object):
def __init__(self, *, ctx):
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._poll = self._reader.poll
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
def empty(self):
return not self._poll()
def __getstate__(self):
context.assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
self._poll = self._reader.poll
def get(self):
with self._rlock:
res = self._reader.recv_bytes()
# unserialize the data after having released the lock
return _ForkingPickler.loads(res)
def put(self, obj):
# serialize the data before acquiring the lock
obj = _ForkingPickler.dumps(obj)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(obj)
else:
with self._wlock:
self._writer.send_bytes(obj)
| 32.33687
| 80
| 0.564269
|
9bebc880f97708d73aeaabe3fe878ec5f53343b4
| 9,506
|
py
|
Python
|
lib/fast_rcnn/config.py
|
yanxp/self_learning-framwork
|
df7902f83913ba709170a8e2e003abe89619f5cc
|
[
"MIT"
] | 1
|
2018-03-18T11:50:58.000Z
|
2018-03-18T11:50:58.000Z
|
lib/fast_rcnn/config.py
|
yanxp/self_learning-framwork
|
df7902f83913ba709170a8e2e003abe89619f5cc
|
[
"MIT"
] | null | null | null |
lib/fast_rcnn/config.py
|
yanxp/self_learning-framwork
|
df7902f83913ba709170a8e2e003abe89619f5cc
|
[
"MIT"
] | null | null | null |
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (400,500,600,700,800)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 4
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
__C.TRAIN.RPN_NORMALIZE_TARGETS = False
__C.TRAIN.RPN_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.RPN_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'selective_search'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
__C.TRAIN.ASPECT_GROUPING = True
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = False
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# whether use class aware box or not
__C.TRAIN.AGNOSTIC = False
#
# Testing options
#
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (200, 400, 600,800,)
#__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.5
__C.TEST.USE_FLIPPED=True
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'selective_search'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
# whether use class aware box or not
__C.TEST.AGNOSTIC = False
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Model directory
__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models', 'pascal_voc'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net=None):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is not None:
outdir = osp.join(outdir, net.name)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 32.443686
| 91
| 0.695876
|
f2e18a6a6f8b14acc5b5214a2fec88b209b18e6b
| 141
|
py
|
Python
|
LinearModel/logistic_reg/test.py
|
Jarvis73/MachineLearning
|
a3add9eb0f45cbcc193ca72236373ae5b4befcb5
|
[
"MIT"
] | 2
|
2021-06-10T05:02:51.000Z
|
2021-09-16T14:23:26.000Z
|
LinearModel/logistic_reg/test.py
|
Jarvis73/MachineLearning
|
a3add9eb0f45cbcc193ca72236373ae5b4befcb5
|
[
"MIT"
] | null | null | null |
LinearModel/logistic_reg/test.py
|
Jarvis73/MachineLearning
|
a3add9eb0f45cbcc193ca72236373ae5b4befcb5
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-3, 3)
y = -x**5 + x**3 + 4*x
plt.plot(x, y)
plt.ylim(-10, 10)
plt.show()
| 17.625
| 31
| 0.631206
|
98e9cb54c2f1d0b29f02b657b8c2e4d324dfc377
| 3,874
|
py
|
Python
|
injaz/hooks.py
|
erpcloudsystems/injaz
|
ed273d2ff179070e0b44dbc1932ac8ffb13e27de
|
[
"MIT"
] | null | null | null |
injaz/hooks.py
|
erpcloudsystems/injaz
|
ed273d2ff179070e0b44dbc1932ac8ffb13e27de
|
[
"MIT"
] | null | null | null |
injaz/hooks.py
|
erpcloudsystems/injaz
|
ed273d2ff179070e0b44dbc1932ac8ffb13e27de
|
[
"MIT"
] | null | null | null |
from . import __version__ as app_version
app_name = "injaz"
app_title = "Injaz"
app_publisher = "erpcloud.systems"
app_description = "injaztech.net customization"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "mg@erpcloud.systems"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/injaz/css/injaz.css"
# app_include_js = "/assets/injaz/js/injaz.js"
# include js, css files in header of web template
# web_include_css = "/assets/injaz/css/injaz.css"
# web_include_js = "/assets/injaz/js/injaz.js"
# include custom scss in every website theme (without file extension ".scss")
# website_theme_scss = "injaz/public/scss/website"
# include js, css files in header of web form
# webform_include_js = {"doctype": "public/js/doctype.js"}
# webform_include_css = {"doctype": "public/css/doctype.css"}
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "injaz.install.before_install"
# after_install = "injaz.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "injaz.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# DocType Class
# ---------------
# Override standard doctype classes
# override_doctype_class = {
# "ToDo": "custom_app.overrides.CustomToDo"
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "injaz.tasks.all"
# ],
# "daily": [
# "injaz.tasks.daily"
# ],
# "hourly": [
# "injaz.tasks.hourly"
# ],
# "weekly": [
# "injaz.tasks.weekly"
# ]
# "monthly": [
# "injaz.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "injaz.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "injaz.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "injaz.task.get_dashboard_data"
# }
# exempt linked doctypes from being automatically cancelled
#
# auto_cancel_exempted_doctypes = ["Auto Repeat"]
# User Data Protection
# --------------------
user_data_fields = [
{
"doctype": "{doctype_1}",
"filter_by": "{filter_by}",
"redact_fields": ["{field_1}", "{field_2}"],
"partial": 1,
},
{
"doctype": "{doctype_2}",
"filter_by": "{filter_by}",
"partial": 1,
},
{
"doctype": "{doctype_3}",
"strict": False,
},
{
"doctype": "{doctype_4}"
}
]
# Authentication and authorization
# --------------------------------
# auth_hooks = [
# "injaz.auth.validate"
# ]
| 22.011364
| 78
| 0.653072
|
0120b8f77b81dc0d9be0dd8c17f9fa49544ff089
| 5,090
|
py
|
Python
|
servequnit/scripts.py
|
bnkr/selenit
|
bdbedd930a5d324ddfbebcc0be3998d7d517eced
|
[
"MIT"
] | 1
|
2015-03-04T22:45:52.000Z
|
2015-03-04T22:45:52.000Z
|
servequnit/scripts.py
|
bnkr/selenit
|
bdbedd930a5d324ddfbebcc0be3998d7d517eced
|
[
"MIT"
] | null | null | null |
servequnit/scripts.py
|
bnkr/selenit
|
bdbedd930a5d324ddfbebcc0be3998d7d517eced
|
[
"MIT"
] | null | null | null |
"""
Runs an HTTP server which serves up qunit unit tests.
"""
from __future__ import print_function
import argparse, sys, logging, subprocess, os
from six.moves import urllib
from servequnit.tester import QunitSeleniumTester
from servequnit.factory import ServerFactory
class CliCommand(object):
"""Command pattern converts cli settings into an operation to run."""
def __init__(self, settings):
self.settings = settings
def get_server_factory(self):
"""Turn settings into parameters for factory'ing a server."""
config = dict(
port=self.settings.port,
host=self.settings.host,
test_dir=self.settings.root,
base_dir=os.getcwd(),
)
factory = ServerFactory(**config)
# TODO:
# Present existence errors better.
for name in (self.settings.files or []):
if not name:
continue
if '=' in name:
ident, location = name.split("=")
if location.endswith(".css"):
factory.bind_style(ident, location)
else:
factory.bind_script(ident, location)
else:
name = urllib.parse.urljoin("/static/", name)
if name.endswith(".css"):
factory.style(name)
else:
factory.script(name)
return factory
class SeleniumCommand(CliCommand):
def get_tester_config(self, server):
caps = {}
for capability in (self.settings.capability or []):
name, value = capability.split("=")
caps[name] = value
return dict(url=server.url + "test/",
hub=self.settings.webdriver,
capabilities=caps)
def run(self):
try:
factory = self.get_server_factory()
with factory.server_context() as server:
tester_config = self.get_tester_config(server)
test = QunitSeleniumTester(**tester_config)
test.run()
except QunitSeleniumTester.FailureError as ex:
print("FAIL", ex)
return 1
except KeyboardInterrupt:
pass
print("PASS")
return 0
class BrowserCommand(CliCommand):
def run(self):
try:
factory = self.get_server_factory()
with factory.server_context() as server:
# could be a tester.BrowserTester?
subprocess.call([self.settings.browser, server.url + "test/"])
except KeyboardInterrupt:
pass
return 0
class ServerCommand(CliCommand):
def run(self):
factory = self.get_server_factory()
server = factory.create()
try:
# No need to thread; we just want the startup parts.
server.run_in_current_thread()
except KeyboardInterrupt:
pass
return 0
def get_settings(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", default=8081, help="Port to run on.",
type=int,)
parser.add_argument("-H", "--host", default="localhost",
help="Host to listen on (default localhost).")
parser.add_argument("-w", "--webdriver", default="http://127.0.0.1:4444/wd/hub",
help="Location of your webdriver HTTP endpoint.")
parser.add_argument("-s", "--selenium", action="store_true", default=False,
help="Run tests with selenium and exit.")
parser.add_argument("-b", "--browser", default="unset", nargs="?",
help="Run tests with a web browser command.")
parser.add_argument("-r", "--root", default=os.getcwd(),
help="Root for test /unit files (js test files). (default: pwd)")
parser.add_argument("-c", "--capability", action="append", default=[],
help="Capability of selenium node like 'browserName=firefox'. Add multiple times.")
parser.add_argument("files", nargs="*",
help="Stuff to source in the test file (css or js).",)
settings = parser.parse_args(argv[1:])
if settings.browser == "unset":
settings.browser = None
elif settings.browser == None:
settings.browser = "firefox"
return settings
def configure_logging(settings):
message_format = '%(asctime)s %(levelname)s %(name)s: %(message)s'
time_format = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(level=logging.INFO, format=message_format,
datefmt=time_format)
def servequnit_main():
"""Command-line entry point. If your import paths are set right you can
just call main() as the entire script."""
settings = get_settings(sys.argv)
configure_logging(settings)
if settings.selenium:
command = SeleniumCommand(settings)
elif settings.browser:
command = BrowserCommand(settings)
else:
command = ServerCommand(settings)
sys.exit(command.run())
| 34.62585
| 108
| 0.58389
|
caae465d794051bd8a665a1b3adc87d1f3ab8fb0
| 3,953
|
py
|
Python
|
h2o-py/tests/testdir_algos/glm/pyunit_grid_carsGLM.py
|
huamichaelchen/h2o-3
|
2b52f2240652a1c73c1708762248c0773d0c073e
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_algos/glm/pyunit_grid_carsGLM.py
|
huamichaelchen/h2o-3
|
2b52f2240652a1c73c1708762248c0773d0c073e
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_algos/glm/pyunit_grid_carsGLM.py
|
huamichaelchen/h2o-3
|
2b52f2240652a1c73c1708762248c0773d0c073e
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
import copy
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
def grid_cars_GLM():
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
r = cars[0].runif(seed=42)
train = cars[r > .2]
validation_scheme = random.randint(1,3) # 1:none, 2:cross-validation, 3:validation set
print "Validation scheme: {0}".format(validation_scheme)
if validation_scheme == 2:
nfolds = 2
print "Nfolds: 2"
if validation_scheme == 3:
valid = cars[r <= .2]
grid_space = pyunit_utils.make_random_grid_space(algo="glm")
print "Grid space: {0}".format(grid_space)
predictors = ["displacement","power","weight","acceleration","year"]
if grid_space['family'][0] == 'binomial':
response_col = "economy_20mpg"
elif grid_space['family'][0] == 'gaussian':
response_col = "economy"
else:
response_col = "cylinders"
print "Predictors: {0}".format(predictors)
print "Response: {0}".format(response_col)
if grid_space['family'][0] in ['binomial', 'multinomial']:
print "Converting the response column to a factor..."
train[response_col] = train[response_col].asfactor()
if validation_scheme == 3:
valid[response_col] = valid[response_col].asfactor()
#grid_space.update({"lambda":[0.1,0.05,0.01]})
grid_space.pop('family')
print "Grid space: {0}".format(grid_space)
print "Constructing the grid of glm models..."
cars_glm_grid = H2OGridSearch(H2OGeneralizedLinearEstimator, hyper_params=grid_space)
if validation_scheme == 1:
cars_glm_grid.train(x=predictors,y=response_col,training_frame=train)
elif validation_scheme == 2:
cars_glm_grid.train(x=predictors,y=response_col,training_frame=train,nfolds=nfolds)
else:
cars_glm_grid.train(x=predictors,y=response_col,training_frame=train,validation_frame=valid)
print "Performing various checks of the constructed grid..."
print "Check cardinality of grid, that is, the correct number of models have been created..."
size_of_grid_space = 1
for v in grid_space.values():
size_of_grid_space = size_of_grid_space * len(v)
actual_size = len(cars_glm_grid)
assert size_of_grid_space == actual_size, "Expected size of grid to be {0}, but got {1}" \
"".format(size_of_grid_space,actual_size)
print "Duplicate-entries-in-grid-space check"
new_grid_space = copy.deepcopy(grid_space)
for name in grid_space.keys():
if not name == "family":
new_grid_space[name] = grid_space[name] + grid_space[name]
print "The new search space: {0}".format(new_grid_space)
print "Constructing the new grid of glm models..."
cars_glm_grid2 = H2OGridSearch(H2OGeneralizedLinearEstimator, hyper_params=new_grid_space)
if validation_scheme == 1:
cars_glm_grid2.train(x=predictors,y=response_col,training_frame=train)
elif validation_scheme == 2:
cars_glm_grid2.train(x=predictors,y=response_col,training_frame=train,nfolds=nfolds)
else:
cars_glm_grid2.train(x=predictors,y=response_col,training_frame=train,validation_frame=valid)
actual_size2 = len(cars_glm_grid2)
assert actual_size == actual_size2, "Expected duplicates to be ignored. Without dups grid size: {0}. With dups " \
"size: {1}".format(actual_size, actual_size2)
print "Check that the hyper_params that were passed to grid, were used to construct the models..."
for name in grid_space.keys():
print name
pyunit_utils.expect_model_param(cars_glm_grid, name, grid_space[name])
if __name__ == "__main__":
pyunit_utils.standalone_test(grid_cars_GLM)
else:
grid_cars_GLM()
| 42.505376
| 118
| 0.692133
|
9d49feb2bd372a9847d40a88e4e5c1561d0ac111
| 1,600
|
py
|
Python
|
samples/generated_samples/dialogflow_generated_dialogflow_v2_answer_records_list_answer_records_async.py
|
rkdfc93/python-dialogflow
|
a59cff0298ef18674c0b4133ef0a6ab82e288920
|
[
"Apache-2.0"
] | 171
|
2018-09-19T21:16:18.000Z
|
2020-12-07T17:41:10.000Z
|
samples/generated_samples/dialogflow_generated_dialogflow_v2_answer_records_list_answer_records_async.py
|
rkdfc93/python-dialogflow
|
a59cff0298ef18674c0b4133ef0a6ab82e288920
|
[
"Apache-2.0"
] | 150
|
2018-09-25T14:04:28.000Z
|
2020-12-09T21:45:43.000Z
|
samples/generated_samples/dialogflow_generated_dialogflow_v2_answer_records_list_answer_records_async.py
|
rkdfc93/python-dialogflow
|
a59cff0298ef18674c0b4133ef0a6ab82e288920
|
[
"Apache-2.0"
] | 75
|
2018-09-22T14:12:18.000Z
|
2020-12-08T07:12:12.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListAnswerRecords
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_AnswerRecords_ListAnswerRecords_async]
from google.cloud import dialogflow_v2
async def sample_list_answer_records():
# Create a client
client = dialogflow_v2.AnswerRecordsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.ListAnswerRecordsRequest(
parent="parent_value",
filter="filter_value",
)
# Make the request
page_result = client.list_answer_records(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dialogflow_generated_dialogflow_v2_AnswerRecords_ListAnswerRecords_async]
| 33.333333
| 85
| 0.764375
|
ea4647a39c0aea1aacc7944f050b6ad6a266d6db
| 3,038
|
py
|
Python
|
kmip/core/messages/payloads/destroy.py
|
vbnmmnbv/PyKMIP
|
4617ae528006178c466fe3945a477f568b596940
|
[
"Apache-2.0"
] | 12
|
2016-09-14T21:59:10.000Z
|
2020-03-11T07:37:25.000Z
|
kmip/core/messages/payloads/destroy.py
|
vbnmmnbv/PyKMIP
|
4617ae528006178c466fe3945a477f568b596940
|
[
"Apache-2.0"
] | 1
|
2021-06-25T15:43:48.000Z
|
2021-06-25T15:43:48.000Z
|
kmip/core/messages/payloads/destroy.py
|
vbnmmnbv/PyKMIP
|
4617ae528006178c466fe3945a477f568b596940
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kmip.core import attributes
from kmip.core import enums
from kmip.core.enums import Tags
from kmip.core.primitives import Struct
from kmip.core.utils import BytearrayStream
# 4.21
class DestroyRequestPayload(Struct):
def __init__(self,
unique_identifier=None):
super(DestroyRequestPayload, self).__init__(enums.Tags.REQUEST_PAYLOAD)
self.unique_identifier = unique_identifier
self.validate()
def read(self, istream):
super(DestroyRequestPayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
if self.is_tag_next(Tags.UNIQUE_IDENTIFIER, tstream):
self.unique_identifier = attributes.UniqueIdentifier()
self.unique_identifier.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
if self.unique_identifier is not None:
self.unique_identifier.write(tstream)
# Write the length and value of the request payload
self.length = tstream.length()
super(DestroyRequestPayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class DestroyResponsePayload(Struct):
def __init__(self,
unique_identifier=None):
super(DestroyResponsePayload, self).__init__(
enums.Tags.RESPONSE_PAYLOAD)
self.unique_identifier = unique_identifier
self.validate()
def read(self, istream):
super(DestroyResponsePayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.unique_identifier = attributes.UniqueIdentifier()
self.unique_identifier.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.unique_identifier.write(tstream)
# Write the length and value of the request payload
self.length = tstream.length()
super(DestroyResponsePayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
| 30.686869
| 79
| 0.693548
|
0c30ff1f70605a98959d29216e233b8cbdfccc28
| 19,043
|
py
|
Python
|
pymcr/mcr.py
|
francisco-dlp/pyMCR
|
358f4087dbaeb1fdba3b8c8e38122d2e934d01a1
|
[
"Python-2.0"
] | 1
|
2021-01-05T08:44:51.000Z
|
2021-01-05T08:44:51.000Z
|
pymcr/mcr.py
|
francisco-dlp/pyMCR
|
358f4087dbaeb1fdba3b8c8e38122d2e934d01a1
|
[
"Python-2.0"
] | null | null | null |
pymcr/mcr.py
|
francisco-dlp/pyMCR
|
358f4087dbaeb1fdba3b8c8e38122d2e934d01a1
|
[
"Python-2.0"
] | null | null | null |
""" MCR Main Class for Computation"""
import sys as _sys
import numpy as _np
import logging as _logging
from pymcr.regressors import OLS, NNLS
from pymcr.constraints import ConstraintNonneg
from pymcr.metrics import mse
# create logger for mcr.py and set default level
_logger = _logging.getLogger(__name__)
_logger.setLevel(_logging.INFO)
class McrAR:
"""
Multivariate Curve Resolution - Alternating Regression
D = CS^T
Parameters
----------
c_regr : str, class
Instantiated regression class (or string, see Notes) for calculating
the C matrix
st_regr : str, class
Instantiated regression class (or string, see Notes) for calculating
the S^T matrix
c_fit_kwargs : dict
kwargs sent to c_regr.fit method
st_fit_kwargs : dict
kwargs sent to st_regr.fit method
c_constraints : list
List of constraints applied to calculation of C matrix
st_constraints : list
List of constraints applied to calculation of S^T matrix
max_iter : int
Maximum number of iterations. One iteration calculates both C and S^T
err_fcn : function
Function to calculate error/differences after each least squares
calculation (ie twice per iteration). Outputs to err attribute.
tol_increase : float
Factor increase to allow in err attribute. Set to 0 for no increase
allowed. E.g., setting to 1.0 means the err can double per iteration.
tol_n_increase : int
Number of consecutive iterations for which the err attribute can
increase
tol_err_change : float
If err changes less than tol_err_change, per iteration, break.
tol_n_above_min : int
Number of half-iterations that can be performed without reaching a
new error-minimum
Attributes
----------
err : list
List of calculated errors (from err_fcn) after each least squares (ie
twice per iteration)
C_ : ndarray [n_samples, n_targets]
Most recently calculated C matrix (that did not cause a tolerance
failure)
ST_ : ndarray [n_targets, n_features]
Most recently calculated S^T matrix (that did not cause a tolerance
failure)
C_opt_ : ndarray [n_samples, n_targets]
[Optimal] C matrix for lowest err attribute
ST_opt_ : ndarray [n_targets, n_features]
[Optimal] ST matrix for lowest err attribute
n_iter : int
Total number of iterations performed
n_features : int
Total number of features, e.g. spectral frequencies.
n_samples : int
Total number of samples (e.g., pixels)
n_targets : int
Total number of targets (e.g., pure analytes)
n_iter_opt : int
Iteration when optimal C and ST calculated
exit_max_iter_reached : bool
Exited iterations due to maximum number of iteration reached (max_iter
parameter)
exit_tol_increase : bool
Exited iterations due to maximum fractional increase in error metric
(via err_fcn)
exit_tol_n_increase : bool
Exited iterations due to maximum number of consecutive increases in
error metric (via err fcn)
exit_tol_err_change : bool
Exited iterations due to error metric change that is smaller than
tol_err_change
exit_tol_n_above_min : bool
Exited iterations due to maximum number of half-iterations for which
the error metric increased above the minimum error
Notes
-----
- Built-in regressor classes (str can be used): OLS (ordinary least
squares), NNLS (non-negatively constrained least squares). See
mcr.regressors.
- Built-in regressor methods can be given as a string to c_regr, st_regr;
though instantiating an imported class gives more flexibility.
- Setting any tolerance to None turns that check off
"""
def __init__(self, c_regr=OLS(), st_regr=OLS(), c_fit_kwargs={},
st_fit_kwargs={}, c_constraints=[ConstraintNonneg()],
st_constraints=[ConstraintNonneg()],
max_iter=50, err_fcn=mse,
tol_increase=0.0, tol_n_increase=10, tol_err_change=None,
tol_n_above_min=10
):
"""
Multivariate Curve Resolution - Alternating Regression
"""
self.max_iter = max_iter
self.tol_increase = tol_increase
self.tol_n_increase = tol_n_increase
self.tol_err_change = tol_err_change
self.tol_n_above_min = tol_n_above_min
self.err_fcn = err_fcn
self.err = None
self.c_constraints = c_constraints
self.st_constraints = st_constraints
self.c_regressor = self._check_regr(c_regr)
self.st_regressor = self._check_regr(st_regr)
self.c_fit_kwargs = c_fit_kwargs
self.st_fit_kwargs = st_fit_kwargs
self.C_ = None
self.ST_ = None
self.C_opt_ = None
self.ST_opt_ = None
self.n_iter_opt = None
self.n_iter = None
self.n_increase = None
self.n_above_min = None
self.exit_max_iter_reached = False
self.exit_tol_increase = False
self.exit_tol_n_increase = False
self.exit_tol_err_change = False
self.exit_tol_n_above_min = False
# Saving every C or S^T matrix at each iteration
# Could create huge memory usage
self._saveall_st = False
self._saveall_c = False
self._saved_st = []
self._saved_c = []
def _check_regr(self, mth):
"""
Check regressor method. If acceptable strings, instantiate and
return object. If instantiated class, make sure it has a fit
attribute.
"""
if isinstance(mth, str):
if mth.upper() == 'OLS':
return OLS()
elif mth.upper() == 'NNLS':
return NNLS()
else:
raise ValueError('{} is unknown. Use NNLS or OLS.'.format(mth))
elif hasattr(mth, 'fit'):
return mth
else:
raise ValueError('Input class '
'{} does not have a \'fit\' method'.format(mth))
@property
def D_(self):
""" D matrix with current C and S^T matrices """
return _np.dot(self.C_, self.ST_)
@property
def D_opt_(self):
""" D matrix with optimal C and S^T matrices """
return _np.dot(self.C_opt_, self.ST_opt_)
@property
def n_features(self):
""" Number of features """
if self.ST_ is not None:
return self.ST_.shape[-1]
else:
return None
@property
def n_targets(self):
""" Number of targets """
if self.C_ is not None:
return self.C_.shape[1]
else:
return None
@property
def n_samples(self):
""" Number of samples """
if self.C_ is not None:
return self.C_.shape[0]
else:
return None
def _ismin_err(self, val):
""" Is the current error the minimum """
if len(self.err) == 0:
return True
else:
return ([val > x for x in self.err].count(True) == 0)
def fit(self, D, C=None, ST=None, st_fix=None, c_fix=None, c_first=True,
verbose=False, post_iter_fcn=None, post_half_fcn=None):
"""
Perform MCR-AR. D = CS^T. Solve for C and S^T iteratively.
Parameters
----------
D : ndarray
D matrix
C : ndarray
Initial C matrix estimate. Only provide initial C OR S^T.
ST : ndarray
Initial S^T matrix estimate. Only provide initial C OR S^T.
st_fix : list
The spectral component numbers to keep fixed.
c_fix : list
The concentration component numbers to keep fixed.
c_first : bool
Calculate C first when both C and ST are provided. c_fix and st_fix
must also be provided in this circumstance.
verbose : bool
Log iteration and per-least squares err results. See Notes.
post_iter_fcn : function
Function to perform after each iteration
post_half_fcn : function
Function to perform after half-iteration
Notes
-----
- pyMCR (>= 0.3.1) uses the native Python logging module
rather than print statements; thus, to see the messages, one will
need to log-to-file or stream to stdout. More info is available in
the docs.
"""
if verbose:
_logger.setLevel(_logging.DEBUG)
else:
_logger.setLevel(_logging.INFO)
# Ensure only C or ST provided
if (C is None) & (ST is None):
raise TypeError('C or ST estimate must be provided')
elif (C is not None) & (ST is not None) & ((c_fix is None) |
(st_fix is None)):
err_str1 = 'Only C or ST estimate must be provided, '
raise TypeError(
err_str1 + 'unless c_fix and st_fix are both provided')
else:
self.C_ = C
self.ST_ = ST
self.n_increase = 0
self.n_above_min = 0
self.err = []
# Both C and ST provided. special_skip_c comes into play below
both_condition = (self.ST_ is not None) & (self.C_ is not None)
for num in range(self.max_iter):
self.n_iter = num + 1
# Both st and c provided, but c_first is False
if both_condition & (num == 0) & (not c_first):
special_skip_c = True
else:
special_skip_c = False
if (self.ST_ is not None) & (not special_skip_c):
# Debugging feature -- saves every S^T matrix in a list
# Can create huge memory usage
if self._saveall_st:
self._saved_st.append(self.ST_)
# * Target is the feature of the regression
self.c_regressor.fit(self.ST_.T, D.T, **self.c_fit_kwargs)
C_temp = self.c_regressor.coef_
# Apply fixed C's
if c_fix:
C_temp[:, c_fix] = self.C_[:, c_fix]
# Apply c-constraints
for constr in self.c_constraints:
C_temp = constr.transform(C_temp)
# Apply fixed C's
if c_fix:
C_temp[:, c_fix] = self.C_[:, c_fix]
D_calc = _np.dot(C_temp, self.ST_)
err_temp = self.err_fcn(C_temp, self.ST_, D, D_calc)
if self._ismin_err(err_temp):
self.C_opt_ = 1 * C_temp
self.ST_opt_ = 1 * self.ST_
self.n_iter_opt = num + 1
self.n_above_min = 0
else:
self.n_above_min += 1
if self.tol_n_above_min is not None:
if self.n_above_min > self.tol_n_above_min:
err_str1 = 'Half-iterated {} times since ' \
'min '.format(self.n_above_min)
err_str2 = 'error. Exiting.'
_logger.info(err_str1 + err_str2)
self.exit_tol_n_above_min = True
break
# Calculate error fcn and check for tolerance increase
if len(self.err) == 0:
self.err.append(1 * err_temp)
self.C_ = 1 * C_temp
elif self.tol_increase is None:
self.err.append(1 * err_temp)
self.C_ = 1 * C_temp
elif err_temp <= self.err[-1] * (1 + self.tol_increase):
self.err.append(1 * err_temp)
self.C_ = 1 * C_temp
else:
err_str1 = 'Error increased above fractional' \
'ctol_increase (C iter). Exiting'
_logger.info(err_str1)
self.exit_tol_increase = True
break
# Check if err went up
if len(self.err) > 1:
if self.err[-1] > self.err[-2]: # Error increased
self.n_increase += 1
else:
self.n_increase *= 0
# Break if too many error-increases in a row
if self.tol_n_increase is not None:
if self.n_increase > self.tol_n_increase:
out_str1 = 'Maximum error increases reached '
_logger.info(
out_str1 + '({}) (C iter). '
'Exiting.'.format(self.tol_n_increase))
self.exit_tol_n_increase = True
break
_logger.debug('Iter: {} (C)\t{}: '
'{:.4e}'.format(self.n_iter,
self.err_fcn.__name__,
err_temp))
if post_half_fcn is not None:
post_half_fcn(self.C_, self.ST_, D, D_calc)
if self.C_ is not None:
# Debugging feature -- saves every C matrix in a list
# Can create huge memory usage
if self._saveall_c:
self._saved_c.append(self.C_)
# * Target is the feature of the regression
self.st_regressor.fit(self.C_, D, **self.st_fit_kwargs)
ST_temp = self.st_regressor.coef_.T
# Apply fixed ST's
if st_fix:
ST_temp[st_fix] = self.ST_[st_fix]
# Apply ST-constraints
for constr in self.st_constraints:
ST_temp = constr.transform(ST_temp)
# Apply fixed ST's
if st_fix:
ST_temp[st_fix] = self.ST_[st_fix]
D_calc = _np.dot(self.C_, ST_temp)
err_temp = self.err_fcn(self.C_, ST_temp, D, D_calc)
# Calculate error fcn and check for tolerance increase
if self._ismin_err(err_temp):
self.ST_opt_ = 1 * ST_temp
self.C_opt_ = 1 * self.C_
self.n_iter_opt = num + 1
self.n_above_min = 0
else:
self.n_above_min += 1
if self.tol_n_above_min is not None:
if self.n_above_min > self.tol_n_above_min:
err_str1 = 'Half-iterated {} times ' \
'since min '.format(self.n_above_min)
err_str2 = 'error. Exiting.'
_logger.info(err_str1 + err_str2)
self.exit_tol_n_above_min = True
break
if len(self.err) == 0:
self.err.append(1 * err_temp)
self.ST_ = 1 * ST_temp
elif self.tol_increase is None:
self.err.append(1 * err_temp)
self.ST_ = 1 * ST_temp
elif err_temp <= self.err[-1] * (1 + self.tol_increase):
self.err.append(1 * err_temp)
self.ST_ = 1 * ST_temp
else:
err_str1 = 'Error increased above fractional ' \
'tol_increase (ST iter). Exiting'
_logger.info(err_str1)
self.exit_tol_increase = True
break
# Check if err went up
if len(self.err) > 1:
if self.err[-1] > self.err[-2]: # Error increased
self.n_increase += 1
else:
self.n_increase *= 0
# Break if too many error-increases in a row
if self.tol_n_increase is not None:
if self.n_increase > self.tol_n_increase:
out_str = 'Maximum error increases reached '
_logger.info(out_str +
'({}) (ST iter). '
'Exiting.'.format(self.tol_n_increase))
self.exit_tol_n_increase = True
break
_logger.debug('Iter: {} (ST)\t{}: '
'{:.4e}'.format(self.n_iter,
self.err_fcn.__name__, err_temp))
if post_half_fcn is not None:
post_half_fcn(self.C_, self.ST_, D, D_calc)
if post_iter_fcn is not None:
post_iter_fcn(self.C_, self.ST_, D, D_calc)
if self.n_iter >= self.max_iter:
_logger.info('Max iterations reached ({}).'.format(num + 1))
self.exit_max_iter_reached = True
break
self.n_iter = num + 1
# Check if err changed (absolute value), per iteration, less
# than abs(tol_err_change)
if (self.tol_err_change is not None) & (len(self.err) > 2):
err_differ = _np.abs(self.err[-1] - self.err[-3])
if err_differ < _np.abs(self.tol_err_change):
_logger.info('Change in err below tol_err_change '
'({:.4e}). Exiting.'.format(err_differ))
self.exit_tol_err_change = True
break
if __name__ == '__main__': # pragma: no cover
# PyMCR uses the Logging facility to capture messaging
# Sends logging messages to stdout (prints them)
stdout_handler = _logging.StreamHandler(stream=_sys.stdout)
stdout_format = _logging.Formatter('%(message)s')
stdout_handler.setFormatter(stdout_format)
_logger.addHandler(stdout_handler)
M = 21
N = 21
P = 101
n_components = 2
C_img = _np.zeros((M, N, n_components))
C_img[..., 0] = _np.dot(_np.ones((M, 1)), _np.linspace(0, 1, N)[None, :])
C_img[..., 1] = 1 - C_img[..., 0]
St_known = _np.zeros((n_components, P))
St_known[0, 40:60] = 1
St_known[1, 60:80] = 2
C_known = C_img.reshape((-1, n_components))
D_known = _np.dot(C_known, St_known)
mcrar = McrAR()
mcrar.fit(D_known, ST=St_known, verbose=True)
# assert_equal(1, mcrar.n_iter_opt)
assert ((mcrar.D_ - D_known) ** 2).mean() < 1e-10
assert ((mcrar.D_opt_ - D_known) ** 2).mean() < 1e-10
mcrar = McrAR()
mcrar.fit(D_known, C=C_known)
# assert_equal(1, mcrar.n_iter_opt)
assert ((mcrar.D_ - D_known) ** 2).mean() < 1e-10
assert ((mcrar.D_opt_ - D_known) ** 2).mean() < 1e-10
| 34.435805
| 79
| 0.538623
|
d74864083f1fe6e61c107e538e978caf85306aed
| 6,847
|
py
|
Python
|
bootstrap3_datetime/widgets.py
|
Solanar/django-bootstrap3-datetimepicker
|
cc67aa79c2843f6c477a6b2f0053aee25cd8c313
|
[
"Apache-2.0"
] | 2
|
2018-04-29T05:31:59.000Z
|
2018-04-29T05:32:15.000Z
|
bootstrap3_datetime/widgets.py
|
AntycSolutions/django-bootstrap3-datetimepicker
|
cc67aa79c2843f6c477a6b2f0053aee25cd8c313
|
[
"Apache-2.0"
] | null | null | null |
bootstrap3_datetime/widgets.py
|
AntycSolutions/django-bootstrap3-datetimepicker
|
cc67aa79c2843f6c477a6b2f0053aee25cd8c313
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.forms.utils import flatatt
from django.forms.widgets import DateTimeInput
from django.utils import translation
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
try:
import json
except ImportError:
from django.utils import simplejson as json
try:
from django.utils.encoding import force_unicode as force_text
except ImportError: # python3
from django.utils.encoding import force_text
class DateTimePicker(DateTimeInput):
class Media:
class JsFiles(object):
def __iter__(self):
yield 'bootstrap3_datetime/js/moment.min.js'
yield 'bootstrap3_datetime/js/bootstrap-datetimepicker.min.js'
lang = translation.get_language()
if lang:
lang = lang.lower()
#There is language name that length>2 *or* contains uppercase.
lang_map = {
'ar-ma': 'ar-ma',
'en-au': 'en-au',
'en-ca': 'en-ca',
'en-gb': 'en-gb',
'en-us': 'en-us',
'fa-ir': 'fa-ir',
'fr-ca': 'fr-ca',
'ms-my': 'ms-my',
'pt-br': 'bt-BR',
'rs-latin': 'rs-latin',
'tzm-la': 'tzm-la',
'tzm': 'tzm',
'zh-cn': 'zh-CN',
'zh-tw': 'zh-TW',
'zh-hk': 'zh-TW',
}
if len(lang) > 2:
lang = lang_map.get(lang, 'en-us')
if lang not in ('en', 'en-us'):
yield 'bootstrap3_datetime/js/locales/bootstrap-datetimepicker.%s.js' % (lang)
# js = JsFiles()
# css = {'all': ('bootstrap3_datetime/css/bootstrap-datetimepicker.min.css',), }
# http://momentjs.com/docs/#/parsing/string-format/
# http://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
format_map = (('DDD', r'%j'),
('DD', r'%d'),
('MMMM', r'%B'),
('MMM', r'%b'),
('MM', r'%m'),
('YYYY', r'%Y'),
('YY', r'%y'),
('HH', r'%H'),
('hh', r'%I'),
('mm', r'%M'),
('ss', r'%S'),
('a', r'%p'),
('ZZ', r'%z'),
)
@classmethod
def conv_datetime_format_py2js(cls, format):
for js, py in cls.format_map:
format = format.replace(py, js)
return format
@classmethod
def conv_datetime_format_js2py(cls, format):
for js, py in cls.format_map:
format = format.replace(js, py)
return format
html_template = '''
<div%(div_attrs)s>
<input%(input_attrs)s/>
<span class="input-group-addon">
<span%(icon_attrs)s></span>
</span>
</div>'''
js_template = '''
<script>
(function(window) {
var callback = function() {
$(function(){
$("#%(picker_id)s:has(input:not([readonly],[disabled]))").datetimepicker(%(options)s);
$("#%(input_id)s:not([readonly],[disabled])").datetimepicker(%(options)s);
});
};
if(window.addEventListener)
window.addEventListener("load", callback, false);
else if (window.attachEvent)
window.attachEvent("onload", callback);
else window.onload = callback;
})(window);
</script>'''
def __init__(self, attrs=None, format=None, options=None, div_attrs=None, icon_attrs=None):
if not icon_attrs:
icon_attrs = {'class': 'glyphicon glyphicon-calendar'}
if not div_attrs:
div_attrs = {'class': 'input-group date'}
if format is None and options and options.get('format'):
format = self.conv_datetime_format_js2py(options.get('format'))
super(DateTimePicker, self).__init__(attrs, format)
if 'class' not in self.attrs:
self.attrs['class'] = 'form-control'
self.div_attrs = div_attrs and div_attrs.copy() or {}
self.icon_attrs = icon_attrs and icon_attrs.copy() or {}
self.picker_id = self.div_attrs.get('id') or None
if options == False: # datetimepicker will not be initalized only when options is False
self.options = False
else:
self.options = options and options.copy() or {}
if format and not self.options.get('format') and not self.attrs.get('date-format'):
self.options['format'] = self.conv_datetime_format_py2js(format)
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
try:
input_attrs = self.build_attrs(
attrs, type=self.input_type, name=name
)
except TypeError:
input_attrs = self.build_attrs(
attrs, extra_attrs={'type': self.input_type, 'name': name}
)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
input_attrs['value'] = force_text(self.format_value(value))
input_attrs = dict([(key, conditional_escape(val)) for key, val in input_attrs.items()]) # python2.6 compatible
if not self.picker_id:
self.picker_id = (input_attrs.get('id', '') +
'_pickers').replace(' ', '_')
self.div_attrs['id'] = self.picker_id
picker_id = conditional_escape(self.picker_id)
div_attrs = dict(
[(key, conditional_escape(val)) for key, val in self.div_attrs.items()]) # python2.6 compatible
icon_attrs = dict([(key, conditional_escape(val)) for key, val in self.icon_attrs.items()])
input_attrs.update(self.attrs)
html = self.html_template % dict(div_attrs=flatatt(div_attrs),
input_attrs=flatatt(input_attrs),
icon_attrs=flatatt(icon_attrs))
if self.options:
self.options['language'] = translation.get_language()
js = self.js_template % dict(picker_id=picker_id,
options=json.dumps(self.options or {}),
input_id=conditional_escape(input_attrs.get('id')))
else:
js = ''
js = '' # we'll do it manually
return mark_safe(force_text(html + js))
| 42.006135
| 120
| 0.508398
|
3b441e47c08821267d3e93249b2f1f9673b11f56
| 5,197
|
py
|
Python
|
runs/Re200_St0.6_AR1.27_psi110/scripts/process_qcrit_wx_snapshot.py
|
mesnardo/petibm-rollingpitching
|
39f7ed9b88973727bed6955e31d99754d7627c9f
|
[
"BSD-3-Clause"
] | 2
|
2021-09-06T03:37:06.000Z
|
2021-12-01T02:39:13.000Z
|
runs/Re200_St0.6_AR1.27_psi110/scripts/process_qcrit_wx_snapshot.py
|
mesnardo/petibm-rollingpitching
|
39f7ed9b88973727bed6955e31d99754d7627c9f
|
[
"BSD-3-Clause"
] | 3
|
2020-03-30T21:52:01.000Z
|
2021-07-11T13:11:35.000Z
|
runs/Re200_St0.6_AR1.27_psi110/scripts/process_qcrit_wx_snapshot.py
|
mesnardo/petibm-rollingpitching
|
39f7ed9b88973727bed6955e31d99754d7627c9f
|
[
"BSD-3-Clause"
] | 4
|
2021-02-22T21:54:16.000Z
|
2022-01-18T18:39:34.000Z
|
"""Post-process images of Q-criterion to compute inclination angles."""
import collections
import math
from matplotlib import patches, pyplot
import pathlib
import rodney
Point = collections.namedtuple('Point', ['x', 'y'])
def get_midpoint(point1, point2):
"""Return the midpoint."""
return Point(0.5 * (point1.x + point2.x),
0.5 * (point1.y + point2.y))
class Line(object):
"""Define a line."""
def __init__(self, point1, point2):
"""Compute slope and intercept given two reference points."""
self.p1, self.p2 = point1, point2
self.a, self.b = self._slope_intercept(point1, point2)
def _slope_intercept(self, point1, point2):
"""Compute and return slope and intercept."""
a = (point2.y - point1.y) / (point2.x - point1.x)
b = point1.y - a * point1.x
return a, b
def y(self, x):
"""Compute y given x."""
if hasattr(x, "__iter__"):
return (self.a * xi + self.b for xi in x)
return self.a * x + self.b
def get_inclination(self, degrees=True):
"""Compute adn return inclination angle w.r.t. horizontal axis."""
x1, x2 = 0.0, 1.0
y1, y2 = self.y([x1, x2])
length = math.sqrt((y2 - y1)**2 + (x2 - x1)**2)
alpha = math.acos(abs(x2 - x1) / length)
if degrees:
alpha *= 180.0 / math.pi
return alpha
def extend(self, left=0.0, right=0.0):
"""Return a new extended line."""
x1, x2 = self.p1.x - left, self.p2.x + right
y1, y2 = self.y([x1, x2])
return Line(Point(x1, y1), Point(x2, y2))
def limits(self):
"""Return line limits as tuples."""
return (self.p1.x, self.p2.x), (self.p1.y, self.p2.y)
def annotate_angle(ax, text, xloc, line, hline, buf=50):
"""Annotate angle between two given lines."""
posA, posB = Point(xloc, line.y(xloc)), Point(xloc, hline.y(xloc))
arrowstyle = '<->,head_length=5,head_width=3'
arrow = patches.FancyArrowPatch(posA=(posA.x, posA.y),
posB=(posB.x, posB.y),
arrowstyle=arrowstyle,
color='black', linewidth=2.0,
connectionstyle='arc3,rad=-0.5')
ax.add_patch(arrow)
ax.annotate(text, xy=(xloc + buf, get_midpoint(posA, posB).y))
# Parse command line and set directories.
args = rodney.parse_command_line()
simudir = pathlib.Path(__file__).absolute().parents[1]
figdir = simudir / 'figures'
# Set default font family and size for Matplotlib figures.
pyplot.rc('font', family='serif', size=16)
# --------------------------------------------------------------
# Post-process lateral view: compute and add inclination angles.
# --------------------------------------------------------------
# Load PNG image from file.
filepath = figdir / 'qcrit_wx_lateral_view_0008500.png'
with open(filepath, 'rb') as infile:
img = pyplot.imread(infile)
# Plot the image.
fig, ax = pyplot.subplots(figsize=(6.0, 4.0))
ax.imshow(img)
xstart, xend, yend, ystart = ax.axis('scaled', adjustable='box')
ymid = 0.5 * (yend - ystart)
# Compute and plot inclination line for alpha.
ring1 = get_midpoint(Point(247, 168), Point(334, 215)) # first vortex ring
ring2 = get_midpoint(Point(399, 95), Point(506, 152)) # second vortex ring
alpha = Line(ring1, ring2) # line passing through the vortex ring centers
print(f'alpha: {alpha.get_inclination():.2f}')
line = alpha.extend(left=100, right=200)
ax.plot(*line.limits(), color='black', linestyle='--', linewidth=3.0)
hline = Line(Point(xstart, ymid), Point(xend, ymid))
ax.plot(*hline.limits(), color='black', linestyle='-.', linewidth=3.0)
annotate_angle(ax, r'$\alpha$', 500, line, hline, buf=50)
# Set limits and remove axes.
ax.axis((xstart, xend - 100, yend, ystart))
ax.axis('off')
fig.tight_layout()
# Save figure.
if args.save_figures:
filepath = figdir / 'qcrit_wx_lateral_view_0008500_post.png'
fig.savefig(filepath, dpi=300, bbox_inches='tight')
# ----------------------
# Post-process top view.
# ----------------------
# Load PNG image from file.
filepath = figdir / 'qcrit_wx_top_view_0008500.png'
with open(filepath, 'rb') as infile:
img = pyplot.imread(infile)
# Plot the image.
fig, ax = pyplot.subplots(figsize=(6.0, 4.0))
ax.imshow(img)
xstart, xend, yend, ystart = ax.axis('scaled', adjustable='box')
ax.axhline(0.5 * (yend - ystart), xmin=xstart, xmax=xend,
color='black', linestyle='-.', linewidth=3.0)
# Compute inclination angle gamma.
ring1 = get_midpoint(Point(274, 127), Point(274, 259)) # first vortex ring
ring2 = get_midpoint(Point(430, 103), Point(430, 249)) # second vortex ring
gamma = Line(ring1, ring2) # line passing through the vortex ring centers
print(f'gamma: {gamma.get_inclination():.2f}')
# Set limits and remove axis.
ax.axis((xstart, xend - 100, yend - 50, ystart + 50))
ax.axis('off')
fig.tight_layout()
# Save figure.
if args.save_figures:
filepath = figdir / 'qcrit_wx_top_view_0008500_post.png'
fig.savefig(filepath, dpi=300, bbox_inches='tight')
# Display figures.
if args.show_figures:
pyplot.show()
| 33.96732
| 76
| 0.615932
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.