code
stringlengths 4
1.01M
| language
stringclasses 2
values |
|---|---|
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
def test_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
share = 'ip:/share'
drv._clone_volume(volume_name, clone_name, volume_id, share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
LOG.warn(_("Share %(share)s and file name %(file_name)s")
% {'share': share, 'file_name': file_name})
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image(volume, ('image_location', None), 'image_id', {})
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
'mount_point': '/opt/stack/data/glance',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
volume_info = self._driver.create_volume(FakeVolume(1))
self.assertEqual(volume_info.get('provider_location'),
fake_share)
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
with mock.patch.object(drv,
'_set_qos_policy_group_on_volume'
) as mock_set_qos:
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual(volume_info.get('provider_location'),
'localhost:myshare')
mock_set_qos.assert_called_once_with(fake_volume,
fake_share,
fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value="")
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
def _custom_setup(self):
self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
configuration=create_configuration())
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv._client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
drv._client.set_api_version(1, 9)
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
mox.StubOutWithMock(drv, '_get_actual_path_for_export')
mox.StubOutWithMock(drv, '_start_clone')
mox.StubOutWithMock(drv, '_wait_for_clone_finish')
if status == 'fail':
mox.StubOutWithMock(drv, '_clear_clone')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
if status == 'fail':
drv._wait_for_clone_finish('1', '2').AndRaise(
api.NaApiError('error', 'error'))
drv._clear_clone('1')
else:
drv._wait_for_clone_finish('1', '2')
return mox
def test_clone_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
try:
drv._clone_volume(volume_name, clone_name, volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
else:
raise
mox.VerifyAll()
|
Java
|
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
package net.sf.mmm.service.base.client;
import net.sf.mmm.service.api.RemoteInvocationCall;
import net.sf.mmm.util.lang.api.function.Consumer;
/**
* This is a simple container for the data corresponding to a {@link RemoteInvocationCall}.
*
* @param <RESULT> is the generic type of the method return-type.
* @param <CALL> is the generic type of the {@link #getCall() call} data.
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 1.0.0
*/
public class RemoteInvocationCallData<RESULT, CALL extends RemoteInvocationCall> {
/** The callback to receive the service result on success. */
private final Consumer<? extends RESULT> successCallback;
/** The callback to receive a potential service failure. */
private final Consumer<Throwable> failureCallback;
/** @see #getCall() */
private CALL call;
/**
* The constructor.
*
* @param successCallback is the callback that {@link Consumer#accept(Object) receives} the result on
* success.
* @param failureCallback is the callback that {@link Consumer#accept(Object) receives} the failure on
* error.
*/
public RemoteInvocationCallData(Consumer<? extends RESULT> successCallback, Consumer<Throwable> failureCallback) {
super();
this.successCallback = successCallback;
this.failureCallback = failureCallback;
}
/**
* @return the successCallback.
*/
public Consumer<? extends RESULT> getSuccessCallback() {
return this.successCallback;
}
/**
* @return the failureCallback.
*/
public Consumer<Throwable> getFailureCallback() {
return this.failureCallback;
}
/**
* @return the actual call data (either {@link net.sf.mmm.service.api.command.RemoteInvocationCommand}
* itself or {@link net.sf.mmm.service.base.rpc.GenericRemoteInvocationRpcCall}).
*/
public CALL getCall() {
return this.call;
}
/**
* @param call is the new value of {@link #getCall()}.
*/
public void setCall(CALL call) {
assert (this.call == null);
assert (call != null);
this.call = call;
}
}
|
Java
|
/**
*
*/
package jframe.core.plugin;
import java.util.EventListener;
/**
* @author dzh
* @date Sep 12, 2013 9:42:33 PM
* @since 1.0
*/
public interface PluginListener extends EventListener {
void pluginChanged(PluginEvent event);
}
|
Java
|
/*
* Created on May 17, 2004
*
* Paros and its related class files.
*
* Paros is an HTTP/HTTPS proxy for assessing web application security.
* Copyright (C) 2003-2004 Chinotec Technologies Company
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the Clarified Artistic License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Clarified Artistic License for more details.
*
* You should have received a copy of the Clarified Artistic License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
// ZAP: 2013/01/16 Minor fix to prevent NPE
// ZAP: 2014/10/17 Issue 1308: Updated for latest icons
// ZAP: 2015/02/10 Issue 1528: Support user defined font size
// ZAP: 2015/09/07 Move icon loading to a utility class
package org.parosproxy.paros.view;
import java.awt.Dimension;
import java.awt.Frame;
import java.awt.Image;
import java.awt.Point;
import java.awt.Toolkit;
import java.awt.event.ComponentAdapter;
import java.awt.event.ComponentEvent;
import java.awt.event.WindowEvent;
import java.awt.event.WindowStateListener;
import java.util.ArrayList;
import java.util.List;
import java.util.prefs.BackingStoreException;
import java.util.prefs.Preferences;
import javax.swing.JFrame;
import org.apache.log4j.Logger;
import org.parosproxy.paros.Constant;
import org.zaproxy.zap.utils.DisplayUtils;
/**
* Generic Frame, which handles some basic properties.
* <ul>
* <li>Sets the icon(s) for the frame, which are the ZAP icons</li>
* <li>Centers the frame on screen</li>
* <li>Sets the frame to _not_ visible</li>
* <li>Sets a common font for the frame</li>
* <li>Sets a default title (ZAP application name)</li>
* <li>Preserves window state, location and size correctly (will survive multiple session)</li>
* </ul>
* Hint for implementers: If you use this class,
* don't use {@link #setSize(Dimension)}, but {@link #setPreferredSize(Dimension)}
* instead. Also, don't use {@link #setLocation(Point)}. This abstract class
* will automatically take care of size and position.
*/
public abstract class AbstractFrame extends JFrame {
private static final long serialVersionUID = 6751593232255236597L;
private static final String PREF_WINDOW_STATE = "window.state";
private static final String PREF_WINDOW_SIZE = "window.size";
private static final String PREF_WINDOW_POSITION = "window.position";
private static final int WINDOW_DEFAULT_WIDTH = 800;
private static final int WINDOW_DEFAULT_HEIGHT = 600;
/**
* Hint: Preferences are only saved by package.
* We have to use a prefix for separation.
*/
private final Preferences preferences;
private final String prefnzPrefix = this.getClass().getSimpleName()+".";
private final Logger logger = Logger.getLogger(AbstractFrame.class);
/**
* This is the default constructor
*/
public AbstractFrame() {
super();
this.preferences = Preferences.userNodeForPackage(getClass());
initialize();
}
/**
* This method initializes this
*/
private void initialize() {
// ZAP: Rebrand
this.setIconImages(DisplayUtils.getZapIconImages());
this.setVisible(false);
this.setTitle(Constant.PROGRAM_NAME);
final Dimension dim = restoreWindowSize();
if (dim == null) {
this.setSize(WINDOW_DEFAULT_WIDTH, WINDOW_DEFAULT_HEIGHT);
}
final Point point = restoreWindowLocation();
if (point == null) {
centerFrame();
}
restoreWindowState();
this.addWindowStateListener(new FrameWindowStateListener());
this.addComponentListener(new FrameResizedListener());
}
/**
* Centre this frame.
*
*/
public void centerFrame() {
final Dimension screenSize = Toolkit.getDefaultToolkit().getScreenSize();
final Dimension frameSize = this.getSize();
if (frameSize.height > screenSize.height) {
frameSize.height = screenSize.height;
}
if (frameSize.width > screenSize.width) {
frameSize.width = screenSize.width;
}
this.setLocation((screenSize.width - frameSize.width) / 2, (screenSize.height - frameSize.height) / 2);
}
/**
* @param windowstate integer value, see {@link JFrame#getExtendedState()}
*/
private void saveWindowState(int windowstate) {
if ((windowstate & Frame.ICONIFIED) == Frame.ICONIFIED) {
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.ICONFIED.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.ICONFIED);
}
if ((windowstate & Frame.MAXIMIZED_BOTH) == Frame.MAXIMIZED_BOTH) {
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.MAXIMIZED.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.MAXIMIZED);
}
if (windowstate == Frame.NORMAL) { // hint: Frame.NORMAL = 0, thats why no masking
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.NORMAL.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.NORMAL);
}
}
/**
* Loads and sets the last window state of the frame.
* Additionally, the last state will be returned.
*
* @return last window state OR null
*/
private SimpleWindowState restoreWindowState() {
SimpleWindowState laststate = null;
final String statestr = preferences.get(prefnzPrefix+PREF_WINDOW_STATE, null);
if (logger.isDebugEnabled()) logger.debug("Restoring preference "+PREF_WINDOW_STATE+"=" + statestr);
if (statestr != null) {
SimpleWindowState state = null;
try {
state = SimpleWindowState.valueOf(statestr);
} catch (final IllegalArgumentException e) { state = null; }
if (state != null) {
switch (state) {
case ICONFIED: this.setExtendedState(Frame.ICONIFIED); break;
case NORMAL: this.setExtendedState(Frame.NORMAL); break;
case MAXIMIZED: this.setExtendedState(Frame.MAXIMIZED_BOTH); break;
default:
logger.error("Invalid window state (nothing will changed): " + statestr);
}
}
laststate = state;
}
return laststate;
}
/**
* Saves the size of this frame, but only, if window state is 'normal'.
* If window state is iconfied or maximized, the size is not saved!
*
* @param size
*/
private void saveWindowSize(Dimension size) {
if (size != null) {
if (getExtendedState() == Frame.NORMAL) {
if (logger.isDebugEnabled()) logger.debug("Saving preference " + PREF_WINDOW_SIZE + "=" + size.width + "," + size.height);
this.preferences.put(prefnzPrefix+PREF_WINDOW_SIZE, size.width + "," + size.height);
} else {
if (logger.isDebugEnabled()) logger.debug("Preference " + PREF_WINDOW_SIZE + " not saved, cause window state is not 'normal'.");
}
}
}
/**
* Loads and set the saved size preferences for this frame.
*
* @return the size of the frame OR null, if there wasn't any preference.
*/
private Dimension restoreWindowSize() {
Dimension result = null;
final String sizestr = preferences.get(prefnzPrefix+PREF_WINDOW_SIZE, null);
if (sizestr != null) {
int width = 0;
int height = 0;
final String[] sizes = sizestr.split("[,]");
try {
width = Integer.parseInt(sizes[0].trim());
height = Integer.parseInt(sizes[1].trim());
} catch (final Exception e) {
// ignoring, cause is prevented by default values;
}
if (width > 0 && height > 0) {
result = new Dimension(width, height);
if (logger.isDebugEnabled()) logger.debug("Restoring preference " + PREF_WINDOW_SIZE + "=" + result.width + "," + result.height);
this.setSize(result);
}
}
return result;
}
/**
* Saves the location of this frame, but only, if window state is 'normal'.
* If window state is iconfied or maximized, the location is not saved!
*
* @param point
*/
private void saveWindowLocation(Point point) {
if (point != null) {
if (getExtendedState() == Frame.NORMAL) {
if (logger.isDebugEnabled()) logger.debug("Saving preference " + PREF_WINDOW_POSITION + "=" + point.x + "," + point.y);
this.preferences.put(prefnzPrefix+PREF_WINDOW_POSITION, point.x + "," + point.y);
} else {
if (logger.isDebugEnabled()) logger.debug("Preference " + PREF_WINDOW_POSITION + " not saved, cause window state is not 'normal'.");
}
}
}
/**
* Loads and set the saved position preferences for this frame.
*
* @return the size of the frame OR null, if there wasn't any preference.
*/
private Point restoreWindowLocation() {
Point result = null;
final String sizestr = preferences.get(prefnzPrefix+PREF_WINDOW_POSITION, null);
if (sizestr != null) {
int x = 0;
int y = 0;
final String[] sizes = sizestr.split("[,]");
try {
x = Integer.parseInt(sizes[0].trim());
y = Integer.parseInt(sizes[1].trim());
} catch (final Exception e) {
// ignoring, cause is prevented by default values;
}
if (x > 0 && y > 0) {
result = new Point(x, y);
if (logger.isDebugEnabled()) logger.debug("Restoring preference " + PREF_WINDOW_POSITION + "=" + result.x + "," + result.y);
this.setLocation(result);
}
}
return result;
}
/**
* @deprecated (2.4.2) Use {@link DisplayUtils#getZapIconImages()} instead. It will be removed in a future release.
*/
@Deprecated
@SuppressWarnings("javadoc")
protected List<Image> loadIconImages() {
return new ArrayList<>(DisplayUtils.getZapIconImages());
}
@Override
public void dispose() {
super.dispose();
try {
this.preferences.flush();
} catch (final BackingStoreException e) {
logger.error("Error while saving the preferences", e);
}
}
/*
* ========================================================================
*/
private final class FrameWindowStateListener implements WindowStateListener {
@Override
public void windowStateChanged(WindowEvent e) {
saveWindowState(e.getNewState());
}
}
private final class FrameResizedListener extends ComponentAdapter {
@Override
public void componentResized(ComponentEvent e) {
if (e.getComponent() != null) {
saveWindowSize(e.getComponent().getSize());
}
}
@Override
public void componentMoved(ComponentEvent e) {
if (e.getComponent() != null) {
saveWindowLocation(e.getComponent().getLocation());
}
}
}
/**
* Simplified version for easier handling of the states ...
*/
private enum SimpleWindowState {
ICONFIED,
NORMAL,
MAXIMIZED;
}
} // @jve:visual-info decl-index=0 visual-constraint="31,17"
|
Java
|
/**
* FreeRDP: A Remote Desktop Protocol Client
* RDP Security
*
* Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "security.h"
/* 0x36 repeated 40 times */
static const uint8 pad1[40] =
{
"\x36\x36\x36\x36\x36\x36\x36\x36"
"\x36\x36\x36\x36\x36\x36\x36\x36"
"\x36\x36\x36\x36\x36\x36\x36\x36"
"\x36\x36\x36\x36\x36\x36\x36\x36"
"\x36\x36\x36\x36\x36\x36\x36\x36"
};
/* 0x5C repeated 48 times */
static const uint8 pad2[48] =
{
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
};
static const uint8
fips_reverse_table[256] =
{
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff
};
static const uint8
fips_oddparity_table[256] =
{
0x01, 0x01, 0x02, 0x02, 0x04, 0x04, 0x07, 0x07,
0x08, 0x08, 0x0b, 0x0b, 0x0d, 0x0d, 0x0e, 0x0e,
0x10, 0x10, 0x13, 0x13, 0x15, 0x15, 0x16, 0x16,
0x19, 0x19, 0x1a, 0x1a, 0x1c, 0x1c, 0x1f, 0x1f,
0x20, 0x20, 0x23, 0x23, 0x25, 0x25, 0x26, 0x26,
0x29, 0x29, 0x2a, 0x2a, 0x2c, 0x2c, 0x2f, 0x2f,
0x31, 0x31, 0x32, 0x32, 0x34, 0x34, 0x37, 0x37,
0x38, 0x38, 0x3b, 0x3b, 0x3d, 0x3d, 0x3e, 0x3e,
0x40, 0x40, 0x43, 0x43, 0x45, 0x45, 0x46, 0x46,
0x49, 0x49, 0x4a, 0x4a, 0x4c, 0x4c, 0x4f, 0x4f,
0x51, 0x51, 0x52, 0x52, 0x54, 0x54, 0x57, 0x57,
0x58, 0x58, 0x5b, 0x5b, 0x5d, 0x5d, 0x5e, 0x5e,
0x61, 0x61, 0x62, 0x62, 0x64, 0x64, 0x67, 0x67,
0x68, 0x68, 0x6b, 0x6b, 0x6d, 0x6d, 0x6e, 0x6e,
0x70, 0x70, 0x73, 0x73, 0x75, 0x75, 0x76, 0x76,
0x79, 0x79, 0x7a, 0x7a, 0x7c, 0x7c, 0x7f, 0x7f,
0x80, 0x80, 0x83, 0x83, 0x85, 0x85, 0x86, 0x86,
0x89, 0x89, 0x8a, 0x8a, 0x8c, 0x8c, 0x8f, 0x8f,
0x91, 0x91, 0x92, 0x92, 0x94, 0x94, 0x97, 0x97,
0x98, 0x98, 0x9b, 0x9b, 0x9d, 0x9d, 0x9e, 0x9e,
0xa1, 0xa1, 0xa2, 0xa2, 0xa4, 0xa4, 0xa7, 0xa7,
0xa8, 0xa8, 0xab, 0xab, 0xad, 0xad, 0xae, 0xae,
0xb0, 0xb0, 0xb3, 0xb3, 0xb5, 0xb5, 0xb6, 0xb6,
0xb9, 0xb9, 0xba, 0xba, 0xbc, 0xbc, 0xbf, 0xbf,
0xc1, 0xc1, 0xc2, 0xc2, 0xc4, 0xc4, 0xc7, 0xc7,
0xc8, 0xc8, 0xcb, 0xcb, 0xcd, 0xcd, 0xce, 0xce,
0xd0, 0xd0, 0xd3, 0xd3, 0xd5, 0xd5, 0xd6, 0xd6,
0xd9, 0xd9, 0xda, 0xda, 0xdc, 0xdc, 0xdf, 0xdf,
0xe0, 0xe0, 0xe3, 0xe3, 0xe5, 0xe5, 0xe6, 0xe6,
0xe9, 0xe9, 0xea, 0xea, 0xec, 0xec, 0xef, 0xef,
0xf1, 0xf1, 0xf2, 0xf2, 0xf4, 0xf4, 0xf7, 0xf7,
0xf8, 0xf8, 0xfb, 0xfb, 0xfd, 0xfd, 0xfe, 0xfe
};
static void security_salted_hash(uint8* salt, uint8* input, int length, uint8* salt1, uint8* salt2, uint8* output)
{
CryptoMd5 md5;
CryptoSha1 sha1;
uint8 sha1_digest[CRYPTO_SHA1_DIGEST_LENGTH];
/* SaltedHash(Salt, Input, Salt1, Salt2) = MD5(S + SHA1(Input + Salt + Salt1 + Salt2)) */
/* SHA1_Digest = SHA1(Input + Salt + Salt1 + Salt2) */
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, input, length); /* Input */
crypto_sha1_update(sha1, salt, 48); /* Salt (48 bytes) */
crypto_sha1_update(sha1, salt1, 32); /* Salt1 (32 bytes) */
crypto_sha1_update(sha1, salt2, 32); /* Salt2 (32 bytes) */
crypto_sha1_final(sha1, sha1_digest);
/* SaltedHash(Salt, Input, Salt1, Salt2) = MD5(S + SHA1_Digest) */
md5 = crypto_md5_init();
crypto_md5_update(md5, salt, 48); /* Salt (48 bytes) */
crypto_md5_update(md5, sha1_digest, sizeof(sha1_digest)); /* SHA1_Digest */
crypto_md5_final(md5, output);
}
static void security_premaster_hash(char* input, int length, uint8* premaster_secret, uint8* client_random, uint8* server_random, uint8* output)
{
/* PremasterHash(Input) = SaltedHash(PremasterSecret, Input, ClientRandom, ServerRandom) */
security_salted_hash(premaster_secret, (uint8*)input, length, client_random, server_random, output);
}
void security_master_secret(uint8* premaster_secret, uint8* client_random, uint8* server_random, uint8* output)
{
/* MasterSecret = PremasterHash('A') + PremasterHash('BB') + PremasterHash('CCC') */
security_premaster_hash("A", 1, premaster_secret, client_random, server_random, &output[0]);
security_premaster_hash("BB", 2, premaster_secret, client_random, server_random, &output[16]);
security_premaster_hash("CCC", 3, premaster_secret, client_random, server_random, &output[32]);
}
static void security_master_hash(char* input, int length, uint8* master_secret, uint8* client_random, uint8* server_random, uint8* output)
{
/* MasterHash(Input) = SaltedHash(MasterSecret, Input, ServerRandom, ClientRandom) */
security_salted_hash(master_secret, (uint8*)input, length, server_random, client_random, output);
}
void security_session_key_blob(uint8* master_secret, uint8* client_random, uint8* server_random, uint8* output)
{
/* MasterHash = MasterHash('A') + MasterHash('BB') + MasterHash('CCC') */
security_master_hash("A", 1, master_secret, client_random, server_random, &output[0]);
security_master_hash("BB", 2, master_secret, client_random, server_random, &output[16]);
security_master_hash("CCC", 3, master_secret, client_random, server_random, &output[32]);
}
void security_mac_salt_key(uint8* session_key_blob, uint8* client_random, uint8* server_random, uint8* output)
{
/* MacSaltKey = First128Bits(SessionKeyBlob) */
memcpy(output, session_key_blob, 16);
}
void security_md5_16_32_32(uint8* in0, uint8* in1, uint8* in2, uint8* output)
{
CryptoMd5 md5;
md5 = crypto_md5_init();
crypto_md5_update(md5, in0, 16);
crypto_md5_update(md5, in1, 32);
crypto_md5_update(md5, in2, 32);
crypto_md5_final(md5, output);
}
void security_licensing_encryption_key(uint8* session_key_blob, uint8* client_random, uint8* server_random, uint8* output)
{
/* LicensingEncryptionKey = MD5(Second128Bits(SessionKeyBlob) + ClientRandom + ServerRandom)) */
security_md5_16_32_32(&session_key_blob[16], client_random, server_random, output);
}
void security_uint32_le(uint8* output, uint32 value)
{
output[0] = (value) & 0xFF;
output[1] = (value >> 8) & 0xFF;
output[2] = (value >> 16) & 0xFF;
output[3] = (value >> 24) & 0xFF;
}
void security_mac_data(uint8* mac_salt_key, uint8* data, uint32 length, uint8* output)
{
CryptoMd5 md5;
CryptoSha1 sha1;
uint8 length_le[4];
uint8 sha1_digest[CRYPTO_SHA1_DIGEST_LENGTH];
/* MacData = MD5(MacSaltKey + pad2 + SHA1(MacSaltKey + pad1 + length + data)) */
security_uint32_le(length_le, length); /* length must be little-endian */
/* SHA1_Digest = SHA1(MacSaltKey + pad1 + length + data) */
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, mac_salt_key, 16); /* MacSaltKey */
crypto_sha1_update(sha1, pad1, sizeof(pad1)); /* pad1 */
crypto_sha1_update(sha1, length_le, sizeof(length_le)); /* length */
crypto_sha1_update(sha1, data, length); /* data */
crypto_sha1_final(sha1, sha1_digest);
/* MacData = MD5(MacSaltKey + pad2 + SHA1_Digest) */
md5 = crypto_md5_init();
crypto_md5_update(md5, mac_salt_key, 16); /* MacSaltKey */
crypto_md5_update(md5, pad2, sizeof(pad2)); /* pad2 */
crypto_md5_update(md5, sha1_digest, sizeof(sha1_digest)); /* SHA1_Digest */
crypto_md5_final(md5, output);
}
void security_mac_signature(rdpRdp *rdp, uint8* data, uint32 length, uint8* output)
{
CryptoMd5 md5;
CryptoSha1 sha1;
uint8 length_le[4];
uint8 md5_digest[CRYPTO_MD5_DIGEST_LENGTH];
uint8 sha1_digest[CRYPTO_SHA1_DIGEST_LENGTH];
security_uint32_le(length_le, length); /* length must be little-endian */
/* SHA1_Digest = SHA1(MACKeyN + pad1 + length + data) */
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, rdp->sign_key, rdp->rc4_key_len); /* MacKeyN */
crypto_sha1_update(sha1, pad1, sizeof(pad1)); /* pad1 */
crypto_sha1_update(sha1, length_le, sizeof(length_le)); /* length */
crypto_sha1_update(sha1, data, length); /* data */
crypto_sha1_final(sha1, sha1_digest);
/* MACSignature = First64Bits(MD5(MACKeyN + pad2 + SHA1_Digest)) */
md5 = crypto_md5_init();
crypto_md5_update(md5, rdp->sign_key, rdp->rc4_key_len); /* MacKeyN */
crypto_md5_update(md5, pad2, sizeof(pad2)); /* pad2 */
crypto_md5_update(md5, sha1_digest, sizeof(sha1_digest)); /* SHA1_Digest */
crypto_md5_final(md5, md5_digest);
memcpy(output, md5_digest, 8);
}
void security_salted_mac_signature(rdpRdp *rdp, uint8* data, uint32 length, boolean encryption, uint8* output)
{
CryptoMd5 md5;
CryptoSha1 sha1;
uint8 length_le[4];
uint8 use_count_le[4];
uint8 md5_digest[CRYPTO_MD5_DIGEST_LENGTH];
uint8 sha1_digest[CRYPTO_SHA1_DIGEST_LENGTH];
security_uint32_le(length_le, length); /* length must be little-endian */
if (encryption)
{
security_uint32_le(use_count_le, rdp->encrypt_checksum_use_count);
}
else
{
/*
* We calculate checksum on plain text, so we must have already
* decrypt it, which means decrypt_checksum_use_count is
* off by one.
*/
security_uint32_le(use_count_le, rdp->decrypt_checksum_use_count - 1);
}
/* SHA1_Digest = SHA1(MACKeyN + pad1 + length + data) */
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, rdp->sign_key, rdp->rc4_key_len); /* MacKeyN */
crypto_sha1_update(sha1, pad1, sizeof(pad1)); /* pad1 */
crypto_sha1_update(sha1, length_le, sizeof(length_le)); /* length */
crypto_sha1_update(sha1, data, length); /* data */
crypto_sha1_update(sha1, use_count_le, sizeof(use_count_le)); /* encryptionCount */
crypto_sha1_final(sha1, sha1_digest);
/* MACSignature = First64Bits(MD5(MACKeyN + pad2 + SHA1_Digest)) */
md5 = crypto_md5_init();
crypto_md5_update(md5, rdp->sign_key, rdp->rc4_key_len); /* MacKeyN */
crypto_md5_update(md5, pad2, sizeof(pad2)); /* pad2 */
crypto_md5_update(md5, sha1_digest, sizeof(sha1_digest)); /* SHA1_Digest */
crypto_md5_final(md5, md5_digest);
memcpy(output, md5_digest, 8);
}
static void security_A(uint8* master_secret, uint8* client_random, uint8* server_random, uint8* output)
{
security_premaster_hash("A", 1, master_secret, client_random, server_random, &output[0]);
security_premaster_hash("BB", 2, master_secret, client_random, server_random, &output[16]);
security_premaster_hash("CCC", 3, master_secret, client_random, server_random, &output[32]);
}
static void security_X(uint8* master_secret, uint8* client_random, uint8* server_random, uint8* output)
{
security_premaster_hash("X", 1, master_secret, client_random, server_random, &output[0]);
security_premaster_hash("YY", 2, master_secret, client_random, server_random, &output[16]);
security_premaster_hash("ZZZ", 3, master_secret, client_random, server_random, &output[32]);
}
static void fips_expand_key_bits(uint8* in, uint8* out)
{
uint8 buf[21], c;
int i, b, p, r;
/* reverse every byte in the key */
for (i = 0; i < 21; i++)
buf[i] = fips_reverse_table[in[i]];
/* insert a zero-bit after every 7th bit */
for (i = 0, b = 0; i < 24; i++, b += 7)
{
p = b / 8;
r = b % 8;
if (r == 0)
{
out[i] = buf[p] & 0xfe;
}
else
{
/* c is accumulator */
c = buf[p] << r;
c |= buf[p + 1] >> (8 - r);
out[i] = c & 0xfe;
}
}
/* reverse every byte */
/* alter lsb so the byte has odd parity */
for (i = 0; i < 24; i++)
out[i] = fips_oddparity_table[fips_reverse_table[out[i]]];
}
boolean security_establish_keys(uint8* client_random, rdpRdp* rdp)
{
uint8 pre_master_secret[48];
uint8 master_secret[48];
uint8 session_key_blob[48];
uint8* server_random;
uint8 salt40[] = { 0xD1, 0x26, 0x9E };
rdpSettings* settings;
settings = rdp->settings;
server_random = settings->server_random->data;
if (settings->encryption_method == ENCRYPTION_METHOD_FIPS)
{
CryptoSha1 sha1;
uint8 client_encrypt_key_t[CRYPTO_SHA1_DIGEST_LENGTH + 1];
uint8 client_decrypt_key_t[CRYPTO_SHA1_DIGEST_LENGTH + 1];
printf("FIPS Compliant encryption level.\n");
/* disable fastpath input; it doesnt handle FIPS encryption yet */
rdp->settings->fastpath_input = false;
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_random + 16, 16);
crypto_sha1_update(sha1, server_random + 16, 16);
crypto_sha1_final(sha1, client_encrypt_key_t);
client_encrypt_key_t[20] = client_encrypt_key_t[0];
fips_expand_key_bits(client_encrypt_key_t, rdp->fips_encrypt_key);
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_random, 16);
crypto_sha1_update(sha1, server_random, 16);
crypto_sha1_final(sha1, client_decrypt_key_t);
client_decrypt_key_t[20] = client_decrypt_key_t[0];
fips_expand_key_bits(client_decrypt_key_t, rdp->fips_decrypt_key);
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_decrypt_key_t, 20);
crypto_sha1_update(sha1, client_encrypt_key_t, 20);
crypto_sha1_final(sha1, rdp->fips_sign_key);
}
memcpy(pre_master_secret, client_random, 24);
memcpy(pre_master_secret + 24, server_random, 24);
security_A(pre_master_secret, client_random, server_random, master_secret);
security_X(master_secret, client_random, server_random, session_key_blob);
memcpy(rdp->sign_key, session_key_blob, 16);
if (rdp->settings->server_mode) {
security_md5_16_32_32(&session_key_blob[16], client_random,
server_random, rdp->encrypt_key);
security_md5_16_32_32(&session_key_blob[32], client_random,
server_random, rdp->decrypt_key);
} else {
security_md5_16_32_32(&session_key_blob[16], client_random,
server_random, rdp->decrypt_key);
security_md5_16_32_32(&session_key_blob[32], client_random,
server_random, rdp->encrypt_key);
}
if (settings->encryption_method == 1) /* 40 and 56 bit */
{
memcpy(rdp->sign_key, salt40, 3); /* TODO 56 bit */
memcpy(rdp->decrypt_key, salt40, 3); /* TODO 56 bit */
memcpy(rdp->encrypt_key, salt40, 3); /* TODO 56 bit */
rdp->rc4_key_len = 8;
}
else if (settings->encryption_method == 2) /* 128 bit */
{
rdp->rc4_key_len = 16;
}
memcpy(rdp->decrypt_update_key, rdp->decrypt_key, 16);
memcpy(rdp->encrypt_update_key, rdp->encrypt_key, 16);
rdp->decrypt_use_count = 0;
rdp->decrypt_checksum_use_count = 0;
rdp->encrypt_use_count =0;
rdp->encrypt_checksum_use_count =0;
return true;
}
boolean security_key_update(uint8* key, uint8* update_key, int key_len)
{
uint8 sha1h[CRYPTO_SHA1_DIGEST_LENGTH];
CryptoMd5 md5;
CryptoSha1 sha1;
CryptoRc4 rc4;
uint8 salt40[] = { 0xD1, 0x26, 0x9E };
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, update_key, key_len);
crypto_sha1_update(sha1, pad1, sizeof(pad1));
crypto_sha1_update(sha1, key, key_len);
crypto_sha1_final(sha1, sha1h);
md5 = crypto_md5_init();
crypto_md5_update(md5, update_key, key_len);
crypto_md5_update(md5, pad2, sizeof(pad2));
crypto_md5_update(md5, sha1h, sizeof(sha1h));
crypto_md5_final(md5, key);
rc4 = crypto_rc4_init(key, key_len);
crypto_rc4(rc4, key_len, key, key);
crypto_rc4_free(rc4);
if (key_len == 8)
memcpy(key, salt40, 3); /* TODO 56 bit */
return true;
}
boolean security_encrypt(uint8* data, int length, rdpRdp* rdp)
{
if (rdp->encrypt_use_count >= 4096)
{
security_key_update(rdp->encrypt_key, rdp->encrypt_update_key, rdp->rc4_key_len);
crypto_rc4_free(rdp->rc4_encrypt_key);
rdp->rc4_encrypt_key = crypto_rc4_init(rdp->encrypt_key, rdp->rc4_key_len);
rdp->encrypt_use_count = 0;
}
crypto_rc4(rdp->rc4_encrypt_key, length, data, data);
rdp->encrypt_use_count++;
rdp->encrypt_checksum_use_count++;
return true;
}
boolean security_decrypt(uint8* data, int length, rdpRdp* rdp)
{
if (rdp->decrypt_key == NULL)
return false;
if (rdp->decrypt_use_count >= 4096)
{
security_key_update(rdp->decrypt_key, rdp->decrypt_update_key, rdp->rc4_key_len);
crypto_rc4_free(rdp->rc4_decrypt_key);
rdp->rc4_decrypt_key = crypto_rc4_init(rdp->decrypt_key, rdp->rc4_key_len);
rdp->decrypt_use_count = 0;
}
crypto_rc4(rdp->rc4_decrypt_key, length, data, data);
rdp->decrypt_use_count += 1;
rdp->decrypt_checksum_use_count++;
return true;
}
void security_hmac_signature(uint8* data, int length, uint8* output, rdpRdp* rdp)
{
uint8 buf[20];
uint8 use_count_le[4];
security_uint32_le(use_count_le, rdp->encrypt_use_count);
crypto_hmac_sha1_init(rdp->fips_hmac, rdp->fips_sign_key, 20);
crypto_hmac_update(rdp->fips_hmac, data, length);
crypto_hmac_update(rdp->fips_hmac, use_count_le, 4);
crypto_hmac_final(rdp->fips_hmac, buf, 20);
memmove(output, buf, 8);
}
boolean security_fips_encrypt(uint8* data, int length, rdpRdp* rdp)
{
crypto_des3_encrypt(rdp->fips_encrypt, length, data, data);
rdp->encrypt_use_count++;
return true;
}
boolean security_fips_decrypt(uint8* data, int length, rdpRdp* rdp)
{
crypto_des3_decrypt(rdp->fips_decrypt, length, data, data);
return true;
}
boolean security_fips_check_signature(uint8* data, int length, uint8* sig, rdpRdp* rdp)
{
uint8 buf[20];
uint8 use_count_le[4];
security_uint32_le(use_count_le, rdp->decrypt_use_count);
crypto_hmac_sha1_init(rdp->fips_hmac, rdp->fips_sign_key, 20);
crypto_hmac_update(rdp->fips_hmac, data, length);
crypto_hmac_update(rdp->fips_hmac, use_count_le, 4);
crypto_hmac_final(rdp->fips_hmac, buf, 20);
rdp->decrypt_use_count++;
if (memcmp(sig, buf, 8))
return false;
return true;
}
|
Java
|
./clean.sh
javac -d build/modules \
--module-source-path src \
`find src -name "*.java"`
jar --create --file=mlib/com.acme.bids.db@1.0.jar \
--module-version=1.0 -C build/modules/com.acme.bids.db .
jar --create --file=mlib/com.acme.bids.service@1.0.jar \
--module-version=1.0 \
--main-class=com.acme.bids.service.api.UserService \
-C build/modules/com.acme.bids.service .
jar --create --file=mlib/com.acme.bids.app@1.0.jar \
--module-version=1.0 \
--main-class=com.acme.bids.app.App \
-C build/modules/com.acme.bids.app .
|
Java
|
/**
* Copyright 2020 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.accountstats;
import com.codahale.metrics.MetricRegistry;
import com.github.ambry.config.AccountStatsMySqlConfig;
import com.github.ambry.config.ClusterMapConfig;
import com.github.ambry.config.VerifiableProperties;
import com.github.ambry.server.HostAccountStorageStatsWrapper;
import com.github.ambry.server.HostPartitionClassStorageStatsWrapper;
import com.github.ambry.server.StatsHeader;
import com.github.ambry.server.StatsReportType;
import com.github.ambry.server.StatsSnapshot;
import com.github.ambry.server.StatsWrapper;
import com.github.ambry.server.StorageStatsUtil;
import com.github.ambry.server.StorageStatsUtilTest;
import com.github.ambry.server.storagestats.AggregatedAccountStorageStats;
import com.github.ambry.server.storagestats.AggregatedPartitionClassStorageStats;
import com.github.ambry.server.storagestats.ContainerStorageStats;
import com.github.ambry.server.storagestats.HostAccountStorageStats;
import com.github.ambry.server.storagestats.HostPartitionClassStorageStats;
import com.github.ambry.utils.Pair;
import com.github.ambry.utils.TestUtils;
import com.github.ambry.utils.Utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import static org.junit.Assert.*;
/**
* Integration tests for {@link AccountStatsMySqlStore}.
*/
@RunWith(Parameterized.class)
public class AccountStatsMySqlStoreIntegrationTest {
private static final String clusterName1 = "Ambry-test";
private static final String clusterName2 = "Ambry-random";
// hostname1 and hostname2 are the same, but with different port numbers
private static final String hostname1 = "ambry1.test.github.com";
private static final String hostname2 = "ambry1.test.github.com";
private static final String hostname3 = "ambry3.test.github.com";
private static final int port1 = 12345;
private static final int port2 = 12346;
private static final int port3 = 12347;
private final int batchSize;
private final AccountStatsMySqlStore mySqlStore;
@Parameterized.Parameters
public static List<Object[]> data() {
return Arrays.asList(new Object[][]{{0}, {17}});
}
public AccountStatsMySqlStoreIntegrationTest(int batchSize) throws Exception {
this.batchSize = batchSize;
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
}
@Before
public void before() throws Exception {
mySqlStore.cleanupTables();
}
@After
public void after() {
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for multiple hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testMultiStoreStats() throws Exception {
AccountStatsMySqlStore mySqlStore1 = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
AccountStatsMySqlStore mySqlStore2 = createAccountStatsMySqlStore(clusterName1, hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Generating StatsWrappers, store StatsWrappers and retrieve StatsWrappers
StatsWrapper stats1 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats2 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats3 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeAccountStats(stats1);
mySqlStore2.storeAccountStats(stats2);
mySqlStore3.storeAccountStats(stats3);
assertTableSize(mySqlStore1, 3 * 10 * 10);
StatsWrapper obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsWrapper obtainedStats2 = mySqlStore2.queryAccountStatsByHost(hostname2, port2);
StatsWrapper obtainedStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
assertTwoStatsSnapshots(obtainedStats1.getSnapshot(), stats1.getSnapshot());
assertTwoStatsSnapshots(obtainedStats2.getSnapshot(), stats2.getSnapshot());
assertTwoStatsSnapshots(obtainedStats3.getSnapshot(), stats3.getSnapshot());
// Generating HostAccountStorageStatsWrappers, store and retrieve them
HostAccountStorageStatsWrapper hostStats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeHostAccountStorageStats(hostStats1);
mySqlStore2.storeHostAccountStorageStats(hostStats2);
mySqlStore3.storeHostAccountStorageStats(hostStats3);
HostAccountStorageStatsWrapper obtainedHostStats1 =
mySqlStore1.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper obtainedHostStats2 =
mySqlStore2.queryHostAccountStorageStatsByHost(hostname2, port2);
HostAccountStorageStatsWrapper obtainedHostStats3 =
mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
assertEquals(hostStats1.getStats().getStorageStats(), obtainedHostStats1.getStats().getStorageStats());
assertEquals(hostStats2.getStats().getStorageStats(), obtainedHostStats2.getStats().getStorageStats());
assertEquals(hostStats3.getStats().getStorageStats(), obtainedHostStats3.getStats().getStorageStats());
// Retrieve StatWrappers
obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsSnapshot converted =
StorageStatsUtil.convertHostAccountStorageStatsToStatsSnapshot(hostStats1.getStats(), false);
assertTwoStatsSnapshots(converted, obtainedStats1.getSnapshot());
mySqlStore1.shutdown();
mySqlStore2.shutdown();
mySqlStore3.shutdown();
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromMysqlDb() throws Exception {
//write a new stats into database.
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(1, 1, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
//initialized the mySqlStore and write a new stats with the same partition.
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
assertTrue(
mySqlStore.getPreviousHostAccountStorageStatsWrapper().getStats().getStorageStats().containsKey((long) 0));
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(0, 0, 0, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats2.getStats().getStorageStats());
newStorageStats.put((long) 0,
new HashMap<>()); // Remove partition 0's storage stats data, this would remove entire partition from database
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
// empty stats should remove all the data in the database
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromLocalBackUpFile() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 still empty
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats2.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 not empty
HostAccountStorageStatsWrapper stats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, stats.getStats().getStorageStats().get((long) 1));
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats3.getStats().getStorageStats().containsKey((long) 10));
// Write an empty HostAccountStorageStats
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
// Empty storage stats should remove all the data in the database
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats4.getStats().getStorageStats().isEmpty());
// Write an empty HostAccountStorageStats again
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
HostAccountStorageStatsWrapper obtainedStats5 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats5.getStats().getStorageStats().isEmpty());
HostAccountStorageStatsWrapper stats6 =
generateHostAccountStorageStatsWrapper(20, 20, 20, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats6);
HostAccountStorageStatsWrapper obtainedStats6 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats6.getStats().getStorageStats(), stats6.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test to delete partition, account and container data from database
* @throws Exception
*/
@Test
public void testStatsDeletePartitionAccountContainer() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
// Now remove one partition from stats
HostAccountStorageStats storageStatsCopy = new HostAccountStorageStats(stats.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.remove((long) 1);
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats2.getStats().getStorageStats(), stats2.getStats().getStorageStats());
// Now remove one account from stats
storageStatsCopy = new HostAccountStorageStats(stats2.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.get((long) 3).remove((short) 1);
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats2.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats3);
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats3.getStats().getStorageStats(), stats3.getStats().getStorageStats());
// Now remove some containers
storageStatsCopy = new HostAccountStorageStats(stats3.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
for (short containerId : new short[]{0, 1, 2}) {
newStorageStatsMap.get((long) 3).get((short) 3).remove(containerId);
}
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats3.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats4);
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats4.getStats().getStorageStats(), stats4.getStats().getStorageStats());
// Now write the stats back
stats = generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for one hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testStoreMultilpleWrites() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats1);
HostAccountStorageStats hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
ContainerStorageStats origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats2.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).physicalStorageUsage(origin.getPhysicalStorageUsage() + 1)
.build());
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats3);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats3.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0, new ContainerStorageStats.Builder(origin).numberOfBlobs(origin.getNumberOfBlobs() + 1).build());
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats4);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats4.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStats() throws Exception {
Map<String, Map<String, Long>> containerStorageUsages = TestUtils.makeStorageMap(10, 10, 100000, 1000);
StatsSnapshot snapshot = TestUtils.makeAccountStatsSnapshotFromContainerStorageMap(containerStorageUsages);
mySqlStore.storeAggregatedAccountStats(snapshot);
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(snapshot, obtainedSnapshot);
// Fetching aggregated account stats for clustername2 should result in empty stats
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(snapshot);
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 1))
.getSubMap()
.get(Utils.statsContainerKey((short) 1))
.setValue(1);
newSnapshot.updateValue();
containerStorageUsages.get("1").put("1", 1L);
mySqlStore.storeAggregatedAccountStats(newSnapshot);
obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
// Delete account and container
newSnapshot = new StatsSnapshot(newSnapshot);
newSnapshot.getSubMap().remove(Utils.statsAccountKey((short) 1));
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 2))
.getSubMap()
.remove(Utils.statsContainerKey((short) 1));
newSnapshot.updateValue();
// Now remove all containers for account 1 and container 1 of account 2
for (String containerId : containerStorageUsages.get(String.valueOf(1)).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, Short.valueOf(containerId));
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(newSnapshot, obtainedSnapshot);
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account storage stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStorageStats() throws Exception {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
// Compare container usage map
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(aggregatedAccountStorageStats, false),
obtainedContainerStorageUsages);
// Compare StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(
StorageStatsUtil.convertAggregatedAccountStorageStatsToStatsSnapshot(aggregatedAccountStorageStats, false),
obtainedSnapshot);
// Compare AggregatedAccountStorageStats
AggregatedAccountStorageStats obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
// Fetching aggregated account stats for clustername2 should result in a null;
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
assertEquals(mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName2).getStorageStats().size(), 0);
// Change one value and store it to mysql database again
Map<Short, Map<Short, ContainerStorageStats>> newStorageStatsMap =
new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get((short) 1).get((short) 1);
newStorageStatsMap.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
aggregatedAccountStorageStats = new AggregatedAccountStorageStats(newStorageStatsMap);
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
// Delete account and container
newStorageStatsMap = new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
newStorageStatsMap.remove((short) 1);
newStorageStatsMap.get((short) 2).remove((short) 1);
// Now remove all containers for account 1 and container 1 of account 2
for (short containerId : aggregatedAccountStorageStats.getStorageStats().get((short) 1).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, containerId);
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
mySqlStore.shutdown();
}
/**
* Test methods to store, delete and fetch monthly aggregated stats
* @throws Exception
*/
@Test
public void testMonthlyAggregatedStats() throws Exception {
String monthValue = "2020-01";
AggregatedAccountStorageStats currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
if (currentAggregatedStats.getStorageStats().size() == 0) {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
}
// fetch the month and it should return emtpy string
Assert.assertEquals("", mySqlStore.queryRecordedMonth());
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
Map<String, Map<String, Long>> monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
String obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Change the value and store it back to mysql database
monthValue = "2020-02";
currentAggregatedStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(currentAggregatedStats);
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Delete the snapshots
mySqlStore.deleteSnapshotOfAggregatedAccountStats();
assertTrue(mySqlStore.queryMonthlyAggregatedAccountStats(false).isEmpty());
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
StatsWrapper accountStats1 = mySqlStore.queryAccountStatsByHost(hostname1, port1);
StatsWrapper accountStats2 = mySqlStore.queryAccountStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
StatsWrapper accountStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<String> allPartitionKeys = new HashSet<String>() {
{
addAll(accountStats1.getSnapshot().getSubMap().keySet());
addAll(accountStats2.getSnapshot().getSubMap().keySet());
addAll(accountStats3.getSnapshot().getSubMap().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<String, String> partitionKeyToClassName = new HashMap<>();
int ind = 0;
for (String partitionKey : allPartitionKeys) {
partitionKeyToClassName.put(partitionKey, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
StatsWrapper partitionClassStats1 =
convertAccountStatsToPartitionClassStats(accountStats1, partitionKeyToClassName);
StatsWrapper partitionClassStats2 =
convertAccountStatsToPartitionClassStats(accountStats2, partitionKeyToClassName);
StatsWrapper partitionClassStats3 =
convertAccountStatsToPartitionClassStats(accountStats3, partitionKeyToClassName);
mySqlStore.storePartitionClassStats(partitionClassStats1);
mySqlStore.storePartitionClassStats(partitionClassStats2);
mySqlStore3.storePartitionClassStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<String, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(
ent -> ent.getValue().stream().map(pid -> new Pair<String, String>(ent.getKey(), "Partition[" + pid + "]")))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionKeyToClassName, dbPartitionKeyToClassName);
StatsWrapper obtainedStats1 = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getSnapshot(), obtainedStats1.getSnapshot());
StatsWrapper obtainedStats2 = mySqlStore.queryPartitionClassStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getSnapshot(), obtainedStats2.getSnapshot());
StatsWrapper obtainedStats3 = mySqlStore3.queryPartitionClassStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getSnapshot(), obtainedStats3.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class storage stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStorageStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
HostAccountStorageStatsWrapper accountStats1 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper accountStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
HostAccountStorageStatsWrapper accountStats3 = mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<Long> allPartitionKeys = new HashSet<Long>() {
{
addAll(accountStats1.getStats().getStorageStats().keySet());
addAll(accountStats2.getStats().getStorageStats().keySet());
addAll(accountStats3.getStats().getStorageStats().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<Long, String> partitionIdToClassName = new HashMap<>();
int ind = 0;
for (long partitionId : allPartitionKeys) {
partitionIdToClassName.put(partitionId, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
HostPartitionClassStorageStatsWrapper partitionClassStats1 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats1, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats2 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats2, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats3 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats3, partitionIdToClassName);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats1);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats2);
mySqlStore3.storeHostPartitionClassStorageStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<Long, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(ent -> ent.getValue().stream().map(pid -> new Pair<>(ent.getKey(), (long) pid)))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionIdToClassName, dbPartitionKeyToClassName);
// Fetch HostPartitionClassStorageStats
HostPartitionClassStorageStatsWrapper obtainedStats1 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getStats().getStorageStats(), obtainedStats1.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats2 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getStats().getStorageStats(), obtainedStats2.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats3 =
mySqlStore3.queryHostPartitionClassStorageStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getStats().getStorageStats(), obtainedStats3.getStats().getStorageStats());
// Fetch StatsSnapshot
StatsWrapper obtainedStats = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(
StorageStatsUtil.convertHostPartitionClassStorageStatsToStatsSnapshot(obtainedStats1.getStats(), false),
obtainedStats.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store, delete and fetch aggregated partition class stats.
* @throws Exception
*/
@Test
public void testAggregatedPartitionClassStats() throws Exception {
testHostPartitionClassStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
StatsSnapshot aggregated =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
StatsSnapshot aggregated3 =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore3.storeAggregatedPartitionClassStats(aggregated3);
StatsSnapshot obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
assertEquals(mySqlStore.queryAggregatedPartitionClassStatsByClusterName("random-cluster").getSubMap().size(), 0);
StatsSnapshot obtained3 = mySqlStore3.queryAggregatedPartitionClassStats();
assertEquals(aggregated3, obtained3);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(aggregated);
newSnapshot.getSubMap()
.get("default")
.getSubMap()
.get(Utils.partitionClassStatsAccountContainerKey((short) 1, (short) 1))
.setValue(1);
newSnapshot.updateValue();
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
// Delete some account and container
newSnapshot = new StatsSnapshot(newSnapshot);
short accountId = (short) 1;
short containerId = (short) 1;
String accountContainerKey = Utils.partitionClassStatsAccountContainerKey(accountId, containerId);
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newSnapshot.getSubMap().get(partitionClassName).getSubMap().remove(accountContainerKey);
}
newSnapshot.updateValue();
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(newSnapshot, obtained);
mySqlStore3.shutdown();
}
@Test
public void testAggregatedPartitionClassStorageStats() throws Exception {
testHostPartitionClassStorageStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
AggregatedPartitionClassStorageStats aggregatedStats = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedPartitionClassStorageStats(aggregatedStats);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
AggregatedPartitionClassStorageStats aggregatedStats3 = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore3.storeAggregatedPartitionClassStorageStats(aggregatedStats3);
AggregatedPartitionClassStorageStats obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats.getStorageStats(), obtained.getStorageStats());
assertEquals(
mySqlStore.queryAggregatedPartitionClassStorageStatsByClusterName("random-cluster").getStorageStats().size(),
0);
AggregatedPartitionClassStorageStats obtained3 = mySqlStore3.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats3.getStorageStats(), obtained3.getStorageStats());
// Fetch StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(StorageStatsUtil.convertAggregatedPartitionClassStorageStatsToStatsSnapshot(obtained, false),
obtainedSnapshot);
// Change one value and store it to mysql database again
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(aggregatedStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get("default").get((short) 1).get((short) 1);
newStorageStatsMap.get("default")
.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
mySqlStore.storeAggregatedPartitionClassStorageStats(new AggregatedPartitionClassStorageStats(newStorageStatsMap));
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
// Delete some account and container
short accountId = (short) 1;
short containerId = (short) 1;
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newStorageStatsMap.get(partitionClassName).get(accountId).remove(containerId);
}
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
mySqlStore3.shutdown();
}
private AccountStatsMySqlStore createAccountStatsMySqlStore(String clusterName, String hostname, int port)
throws Exception {
Path localBackupFilePath = createTemporaryFile();
Properties configProps = Utils.loadPropsFromResource("accountstats_mysql.properties");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_CLUSTER_NAME, clusterName);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_HOST_NAME, hostname);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_DATACENTER_NAME, "dc1");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_PORT, String.valueOf(port));
configProps.setProperty(AccountStatsMySqlConfig.DOMAIN_NAMES_TO_REMOVE, ".github.com");
configProps.setProperty(AccountStatsMySqlConfig.UPDATE_BATCH_SIZE, String.valueOf(batchSize));
configProps.setProperty(AccountStatsMySqlConfig.POOL_SIZE, String.valueOf(5));
configProps.setProperty(AccountStatsMySqlConfig.LOCAL_BACKUP_FILE_PATH, localBackupFilePath.toString());
VerifiableProperties verifiableProperties = new VerifiableProperties(configProps);
return (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(verifiableProperties,
new ClusterMapConfig(verifiableProperties), new MetricRegistry()).getAccountStatsStore();
}
private static Path createTemporaryFile() throws IOException {
Path tempDir = Files.createTempDirectory("AccountStatsMySqlStoreTest");
return tempDir.resolve("localbackup");
}
private static StatsWrapper generateStatsWrapper(int numPartitions, int numAccounts, int numContainers,
StatsReportType reportType) {
Random random = new Random();
List<StatsSnapshot> storeSnapshots = new ArrayList<>();
for (int i = 0; i < numPartitions; i++) {
storeSnapshots.add(TestUtils.generateStoreStats(numAccounts, numContainers, random, reportType));
}
return TestUtils.generateNodeStats(storeSnapshots, 1000, reportType);
}
private static HostAccountStorageStatsWrapper generateHostAccountStorageStatsWrapper(int numPartitions,
int numAccounts, int numContainersPerAccount, StatsReportType reportType) {
HostAccountStorageStats hostAccountStorageStats = new HostAccountStorageStats(
StorageStatsUtilTest.generateRandomHostAccountStorageStats(numPartitions, numAccounts, numContainersPerAccount,
100000L, 2, 10));
StatsHeader statsHeader =
new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 1000, numPartitions, numPartitions,
Collections.emptyList());
return new HostAccountStorageStatsWrapper(statsHeader, hostAccountStorageStats);
}
private void assertTableSize(AccountStatsMySqlStore mySqlStore, int expectedNumRows) throws SQLException {
int numRows = 0;
try (Connection connection = mySqlStore.getDataSource().getConnection()) {
try (Statement statement = connection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery("SELECT * FROM " + AccountReportsDao.ACCOUNT_REPORTS_TABLE)) {
while (resultSet.next()) {
numRows++;
}
}
}
}
assertEquals(expectedNumRows, numRows);
}
private void assertTwoStatsSnapshots(StatsSnapshot snapshot1, StatsSnapshot snapshot2) {
assertEquals("Snapshot values are not equal", snapshot1.getValue(), snapshot2.getValue());
if (snapshot1.getSubMap() == null) {
assertNull(snapshot2.getSubMap());
} else {
assertEquals("Snapshot submap size mismatch", snapshot1.getSubMap().size(), snapshot2.getSubMap().size());
for (String key : snapshot1.getSubMap().keySet()) {
assertTrue(snapshot2.getSubMap().containsKey(key));
assertTwoStatsSnapshots(snapshot1.getSubMap().get(key), snapshot2.getSubMap().get(key));
}
}
}
private StatsWrapper convertAccountStatsToPartitionClassStats(StatsWrapper accountStats,
Map<String, String> partitionKeyToClassName) {
Map<String, StatsSnapshot> partitionClassSubMap = new HashMap<>();
StatsSnapshot originHostStats = accountStats.getSnapshot();
for (String partitionKey : originHostStats.getSubMap().keySet()) {
StatsSnapshot originPartitionStats = originHostStats.getSubMap().get(partitionKey);
String currentClassName = partitionKeyToClassName.get(partitionKey);
StatsSnapshot partitionClassStats =
partitionClassSubMap.computeIfAbsent(currentClassName, k -> new StatsSnapshot(0L, new HashMap<>()));
Map<String, StatsSnapshot> accountContainerSubMap = new HashMap<>();
for (String accountKey : originPartitionStats.getSubMap().keySet()) {
for (Map.Entry<String, StatsSnapshot> containerEntry : originPartitionStats.getSubMap()
.get(accountKey)
.getSubMap()
.entrySet()) {
String containerKey = containerEntry.getKey();
StatsSnapshot containerStats = new StatsSnapshot(containerEntry.getValue());
String accountContainerKey =
Utils.partitionClassStatsAccountContainerKey(Utils.accountIdFromStatsAccountKey(accountKey),
Utils.containerIdFromStatsContainerKey(containerKey));
accountContainerSubMap.put(accountContainerKey, containerStats);
}
}
long accountContainerValue = accountContainerSubMap.values().stream().mapToLong(StatsSnapshot::getValue).sum();
StatsSnapshot partitionStats = new StatsSnapshot(accountContainerValue, accountContainerSubMap);
partitionClassStats.getSubMap().put(partitionKey, partitionStats);
partitionClassStats.setValue(partitionClassStats.getValue() + accountContainerValue);
}
return new StatsWrapper(new StatsHeader(accountStats.getHeader()),
new StatsSnapshot(originHostStats.getValue(), partitionClassSubMap));
}
private HostPartitionClassStorageStatsWrapper convertHostAccountStorageStatsToHostPartitionClassStorageStats(
HostAccountStorageStatsWrapper accountStatsWrapper, Map<Long, String> partitionIdToClassName) {
HostPartitionClassStorageStats hostPartitionClassStorageStats = new HostPartitionClassStorageStats();
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStats =
accountStatsWrapper.getStats().getStorageStats();
for (long partitionId : storageStats.keySet()) {
Map<Short, Map<Short, ContainerStorageStats>> accountStorageStatsMap = storageStats.get(partitionId);
String partitionClassName = partitionIdToClassName.get(partitionId);
for (short accountId : accountStorageStatsMap.keySet()) {
accountStorageStatsMap.get(accountId)
.values()
.forEach(containerStats -> hostPartitionClassStorageStats.addContainerStorageStats(partitionClassName,
partitionId, accountId, containerStats));
}
}
return new HostPartitionClassStorageStatsWrapper(new StatsHeader(accountStatsWrapper.getHeader()),
hostPartitionClassStorageStats);
}
}
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<HTML
><HEAD
><TITLE
>Planner/Optimizer</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.79"><LINK
REV="MADE"
HREF="mailto:pgsql-docs@postgresql.org"><LINK
REL="HOME"
TITLE="PostgreSQL 9.2.8 Documentation"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of PostgreSQL Internals"
HREF="overview.html"><LINK
REL="PREVIOUS"
TITLE="The PostgreSQL Rule System"
HREF="rule-system.html"><LINK
REL="NEXT"
TITLE="Executor"
HREF="executor.html"><LINK
REL="STYLESHEET"
TYPE="text/css"
HREF="stylesheet.css"><META
HTTP-EQUIV="Content-Type"
CONTENT="text/html; charset=ISO-8859-1"><META
NAME="creation"
CONTENT="2014-03-17T19:46:29"></HEAD
><BODY
CLASS="SECT1"
><DIV
CLASS="NAVHEADER"
><TABLE
SUMMARY="Header navigation table"
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="5"
ALIGN="center"
VALIGN="bottom"
><A
HREF="index.html"
>PostgreSQL 9.2.8 Documentation</A
></TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="top"
><A
TITLE="The PostgreSQL Rule System"
HREF="rule-system.html"
ACCESSKEY="P"
>Prev</A
></TD
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="top"
><A
HREF="overview.html"
ACCESSKEY="U"
>Up</A
></TD
><TD
WIDTH="60%"
ALIGN="center"
VALIGN="bottom"
>Chapter 44. Overview of PostgreSQL Internals</TD
><TD
WIDTH="20%"
ALIGN="right"
VALIGN="top"
><A
TITLE="Executor"
HREF="executor.html"
ACCESSKEY="N"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="PLANNER-OPTIMIZER"
>44.5. Planner/Optimizer</A
></H1
><P
> The task of the <I
CLASS="FIRSTTERM"
>planner/optimizer</I
> is to
create an optimal execution plan. A given SQL query (and hence, a
query tree) can be actually executed in a wide variety of
different ways, each of which will produce the same set of
results. If it is computationally feasible, the query optimizer
will examine each of these possible execution plans, ultimately
selecting the execution plan that is expected to run the fastest.
</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
> In some situations, examining each possible way in which a query
can be executed would take an excessive amount of time and memory
space. In particular, this occurs when executing queries
involving large numbers of join operations. In order to determine
a reasonable (not necessarily optimal) query plan in a reasonable amount
of time, <SPAN
CLASS="PRODUCTNAME"
>PostgreSQL</SPAN
> uses a <I
CLASS="FIRSTTERM"
>Genetic
Query Optimizer</I
> (see <A
HREF="geqo.html"
>Chapter 51</A
>) when the number of joins
exceeds a threshold (see <A
HREF="runtime-config-query.html#GUC-GEQO-THRESHOLD"
>geqo_threshold</A
>).
</P
></BLOCKQUOTE
></DIV
><P
> The planner's search procedure actually works with data structures
called <I
CLASS="FIRSTTERM"
>paths</I
>, which are simply cut-down representations of
plans containing only as much information as the planner needs to make
its decisions. After the cheapest path is determined, a full-fledged
<I
CLASS="FIRSTTERM"
>plan tree</I
> is built to pass to the executor. This represents
the desired execution plan in sufficient detail for the executor to run it.
In the rest of this section we'll ignore the distinction between paths
and plans.
</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN87651"
>44.5.1. Generating Possible Plans</A
></H2
><P
> The planner/optimizer starts by generating plans for scanning each
individual relation (table) used in the query. The possible plans
are determined by the available indexes on each relation.
There is always the possibility of performing a
sequential scan on a relation, so a sequential scan plan is always
created. Assume an index is defined on a
relation (for example a B-tree index) and a query contains the
restriction
<TT
CLASS="LITERAL"
>relation.attribute OPR constant</TT
>. If
<TT
CLASS="LITERAL"
>relation.attribute</TT
> happens to match the key of the B-tree
index and <TT
CLASS="LITERAL"
>OPR</TT
> is one of the operators listed in
the index's <I
CLASS="FIRSTTERM"
>operator class</I
>, another plan is created using
the B-tree index to scan the relation. If there are further indexes
present and the restrictions in the query happen to match a key of an
index, further plans will be considered. Index scan plans are also
generated for indexes that have a sort ordering that can match the
query's <TT
CLASS="LITERAL"
>ORDER BY</TT
> clause (if any), or a sort ordering that
might be useful for merge joining (see below).
</P
><P
> If the query requires joining two or more relations,
plans for joining relations are considered
after all feasible plans have been found for scanning single relations.
The three available join strategies are:
<P
></P
></P><UL
><LI
><P
> <I
CLASS="FIRSTTERM"
>nested loop join</I
>: The right relation is scanned
once for every row found in the left relation. This strategy
is easy to implement but can be very time consuming. (However,
if the right relation can be scanned with an index scan, this can
be a good strategy. It is possible to use values from the current
row of the left relation as keys for the index scan of the right.)
</P
></LI
><LI
><P
> <I
CLASS="FIRSTTERM"
>merge join</I
>: Each relation is sorted on the join
attributes before the join starts. Then the two relations are
scanned in parallel, and matching rows are combined to form
join rows. This kind of join is more
attractive because each relation has to be scanned only once.
The required sorting might be achieved either by an explicit sort
step, or by scanning the relation in the proper order using an
index on the join key.
</P
></LI
><LI
><P
> <I
CLASS="FIRSTTERM"
>hash join</I
>: the right relation is first scanned
and loaded into a hash table, using its join attributes as hash keys.
Next the left relation is scanned and the
appropriate values of every row found are used as hash keys to
locate the matching rows in the table.
</P
></LI
></UL
><P>
</P
><P
> When the query involves more than two relations, the final result
must be built up by a tree of join steps, each with two inputs.
The planner examines different possible join sequences to find the
cheapest one.
</P
><P
> If the query uses fewer than <A
HREF="runtime-config-query.html#GUC-GEQO-THRESHOLD"
>geqo_threshold</A
>
relations, a near-exhaustive search is conducted to find the best
join sequence. The planner preferentially considers joins between any
two relations for which there exist a corresponding join clause in the
<TT
CLASS="LITERAL"
>WHERE</TT
> qualification (i.e., for
which a restriction like <TT
CLASS="LITERAL"
>where rel1.attr1=rel2.attr2</TT
>
exists). Join pairs with no join clause are considered only when there
is no other choice, that is, a particular relation has no available
join clauses to any other relation. All possible plans are generated for
every join pair considered by the planner, and the one that is
(estimated to be) the cheapest is chosen.
</P
><P
> When <TT
CLASS="VARNAME"
>geqo_threshold</TT
> is exceeded, the join
sequences considered are determined by heuristics, as described
in <A
HREF="geqo.html"
>Chapter 51</A
>. Otherwise the process is the same.
</P
><P
> The finished plan tree consists of sequential or index scans of
the base relations, plus nested-loop, merge, or hash join nodes as
needed, plus any auxiliary steps needed, such as sort nodes or
aggregate-function calculation nodes. Most of these plan node
types have the additional ability to do <I
CLASS="FIRSTTERM"
>selection</I
>
(discarding rows that do not meet a specified Boolean condition)
and <I
CLASS="FIRSTTERM"
>projection</I
> (computation of a derived column set
based on given column values, that is, evaluation of scalar
expressions where needed). One of the responsibilities of the
planner is to attach selection conditions from the
<TT
CLASS="LITERAL"
>WHERE</TT
> clause and computation of required
output expressions to the most appropriate nodes of the plan
tree.
</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
SUMMARY="Footer navigation table"
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="rule-system.html"
ACCESSKEY="P"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
ACCESSKEY="H"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="executor.html"
ACCESSKEY="N"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>The <SPAN
CLASS="PRODUCTNAME"
>PostgreSQL</SPAN
> Rule System</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="overview.html"
ACCESSKEY="U"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Executor</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>
|
Java
|
/******************** (C) COPYRIGHT 2012 WildFire Team **************************
* ๆไปถๅ ๏ผmain.c
* ๆ่ฟฐ ๏ผๅ้ฆ 2Mไธฒ่กflashๆต่ฏ๏ผๅนถๅฐๆต่ฏไฟกๆฏ้่ฟไธฒๅฃ1ๅจ็ต่็่ถ
็บง็ป็ซฏไธญๆๅฐๅบๆฅใ
* ๅฎ้ชๅนณๅฐ๏ผ้็ซSTM32ๅผๅๆฟ
* ๅบ็ๆฌ ๏ผST3.5.0
*
* ไฝ่
๏ผwildfire team
* ่ฎบๅ ๏ผhttp://www.amobbs.com/forum-1008-1.html
* ๆทๅฎ ๏ผhttp://firestm32.taobao.com
**********************************************************************************/
#include "stm32f10x.h"
#include "usart1.h"
#include "spi_flash.h"
typedef enum { FAILED = 0, PASSED = !FAILED} TestStatus;
/* ่ทๅ็ผๅฒๅบ็้ฟๅบฆ */
#define TxBufferSize1 (countof(TxBuffer1) - 1)
#define RxBufferSize1 (countof(TxBuffer1) - 1)
#define countof(a) (sizeof(a) / sizeof(*(a)))
#define BufferSize (countof(Tx_Buffer)-1)
#define FLASH_WriteAddress 0x00000
#define FLASH_ReadAddress FLASH_WriteAddress
#define FLASH_SectorToErase FLASH_WriteAddress
#define sFLASH_ID 0xEF3015 //W25X16
//#define sFLASH_ID 0xEF4015 //W25Q16
/* ๅ้็ผๅฒๅบๅๅงๅ */
uint8_t Tx_Buffer[] = " ๆ่ฐขๆจ้็จ้็ซstm32ๅผๅๆฟ\r\n http://firestm32.taobao.com";
uint8_t Rx_Buffer[BufferSize];
__IO uint32_t DeviceID = 0;
__IO uint32_t FlashID = 0;
__IO TestStatus TransferStatus1 = FAILED;
// ๅฝๆฐๅๅๅฃฐๆ
void Delay(__IO uint32_t nCount);
TestStatus Buffercmp(uint8_t* pBuffer1, uint8_t* pBuffer2, uint16_t BufferLength);
/*
* ๅฝๆฐๅ๏ผmain
* ๆ่ฟฐ ๏ผไธปๅฝๆฐ
* ่พๅ
ฅ ๏ผๆ
* ่พๅบ ๏ผๆ
*/
int main(void)
{
/* ้
็ฝฎไธฒๅฃ1ไธบ๏ผ115200 8-N-1 */
USART1_Config();
printf("\r\n ่ฟๆฏไธไธช2Mไธฒ่กflash(W25X16)ๅฎ้ช \r\n");
/* 2Mไธฒ่กflash W25X16ๅๅงๅ */
SPI_FLASH_Init();
/* Get SPI Flash Device ID */
DeviceID = SPI_FLASH_ReadDeviceID();
Delay( 200 );
/* Get SPI Flash ID */
FlashID = SPI_FLASH_ReadID();
printf("\r\n FlashID is 0x%X, Manufacturer Device ID is 0x%X\r\n", FlashID, DeviceID);
/* Check the SPI Flash ID */
if (FlashID == sFLASH_ID) /* #define sFLASH_ID 0xEF3015 */
{
printf("\r\n ๆฃๆตๅฐๅ้ฆไธฒ่กflash W25X16 !\r\n");
/* Erase SPI FLASH Sector to write on */
SPI_FLASH_SectorErase(FLASH_SectorToErase);
/* ๅฐๅ้็ผๅฒๅบ็ๆฐๆฎๅๅฐflashไธญ */
SPI_FLASH_BufferWrite(Tx_Buffer, FLASH_WriteAddress, BufferSize);
printf("\r\n ๅๅ
ฅ็ๆฐๆฎไธบ๏ผ%s \r\t", Tx_Buffer);
/* ๅฐๅๅๅๅ
ฅ็ๆฐๆฎ่ฏปๅบๆฅๆพๅฐๆฅๆถ็ผๅฒๅบไธญ */
SPI_FLASH_BufferRead(Rx_Buffer, FLASH_ReadAddress, BufferSize);
printf("\r\n ่ฏปๅบ็ๆฐๆฎไธบ๏ผ%s \r\n", Tx_Buffer);
/* ๆฃๆฅๅๅ
ฅ็ๆฐๆฎไธ่ฏปๅบ็ๆฐๆฎๆฏๅฆ็ธ็ญ */
TransferStatus1 = Buffercmp(Tx_Buffer, Rx_Buffer, BufferSize);
if( PASSED == TransferStatus1 )
{
printf("\r\n 2Mไธฒ่กflash(W25X16)ๆต่ฏๆๅ!\n\r");
}
else
{
printf("\r\n 2Mไธฒ่กflash(W25X16)ๆต่ฏๅคฑ่ดฅ!\n\r");
}
}// if (FlashID == sFLASH_ID)
else
{
printf("\r\n ่ทๅไธๅฐ W25X16 ID!\n\r");
}
SPI_Flash_PowerDown();
while(1);
}
/*
* ๅฝๆฐๅ๏ผBuffercmp
* ๆ่ฟฐ ๏ผๆฏ่พไธคไธช็ผๅฒๅบไธญ็ๆฐๆฎๆฏๅฆ็ธ็ญ
* ่พๅ
ฅ ๏ผ-pBuffer1 src็ผๅฒๅบๆ้
* -pBuffer2 dst็ผๅฒๅบๆ้
* -BufferLength ็ผๅฒๅบ้ฟๅบฆ
* ่พๅบ ๏ผๆ
* ่ฟๅ ๏ผ-PASSED pBuffer1 ็ญไบ pBuffer2
* -FAILED pBuffer1 ไธๅไบ pBuffer2
*/
TestStatus Buffercmp(uint8_t* pBuffer1, uint8_t* pBuffer2, uint16_t BufferLength)
{
while(BufferLength--)
{
if(*pBuffer1 != *pBuffer2)
{
return FAILED;
}
pBuffer1++;
pBuffer2++;
}
return PASSED;
}
void Delay(__IO uint32_t nCount)
{
for(; nCount != 0; nCount--);
}
/******************* (C) COPYRIGHT 2012 WildFire Team *****END OF FILE************/
|
Java
|
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html>
<head>
<title>Conductor - ScalaTest 2.1.7 - org.scalatest.concurrent.Conductor</title>
<meta name="description" content="Conductor - ScalaTest 2.1.7 - org.scalatest.concurrent.Conductor" />
<meta name="keywords" content="Conductor ScalaTest 2.1.7 org.scalatest.concurrent.Conductor" />
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<link href="../../../lib/template.css" media="screen" type="text/css" rel="stylesheet" />
<link href="../../../lib/diagrams.css" media="screen" type="text/css" rel="stylesheet" id="diagrams-css" />
<script type="text/javascript" src="../../../lib/jquery.js" id="jquery-js"></script>
<script type="text/javascript" src="../../../lib/jquery-ui.js"></script>
<script type="text/javascript" src="../../../lib/template.js"></script>
<script type="text/javascript" src="../../../lib/tools.tooltip.js"></script>
<script type="text/javascript">
if(top === self) {
var url = '../../../index.html';
var hash = 'org.scalatest.concurrent.Conductor';
var anchor = window.location.hash;
var anchor_opt = '';
if (anchor.length >= 1)
anchor_opt = '@' + anchor.substring(1);
window.location.href = url + '#' + hash + anchor_opt;
}
</script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-71294502-3', 'auto');
ga('send', 'pageview');
</script>
</head>
<body class="type">
<!-- Top of doc.scalatest.org [javascript] -->
<script type="text/javascript">
var rnd = window.rnd || Math.floor(Math.random()*10e6);
var pid204546 = window.pid204546 || rnd;
var plc204546 = window.plc204546 || 0;
var abkw = window.abkw || '';
var absrc = 'http://ab167933.adbutler-ikon.com/adserve/;ID=167933;size=468x60;setID=204546;type=js;sw='+screen.width+';sh='+screen.height+';spr='+window.devicePixelRatio+';kw='+abkw+';pid='+pid204546+';place='+(plc204546++)+';rnd='+rnd+';click=CLICK_MACRO_PLACEHOLDER';
document.write('<scr'+'ipt src="'+absrc+'" type="text/javascript"></scr'+'ipt>');
</script>
<div id="definition">
<img src="../../../lib/class_big.png" />
<p id="owner"><a href="../../package.html" class="extype" name="org">org</a>.<a href="../package.html" class="extype" name="org.scalatest">scalatest</a>.<a href="package.html" class="extype" name="org.scalatest.concurrent">concurrent</a></p>
<h1>Conductor</h1>
</div>
<h4 id="signature" class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">class</span>
</span>
<span class="symbol">
<span class="name deprecated" title="Deprecated: org.scalatest.concurrent.Conductor has been deprecated and will be removed in a future version of ScalaTest. Please mix in trait Conductors, which now defines Conductor, instead of using Conductor directly.">Conductor</span><span class="result"> extends <span class="extype" name="scala.AnyRef">AnyRef</span></span>
</span>
</h4>
<div id="comment" class="fullcommenttop"><div class="comment cmt"><p><strong><code>org.scalatest.concurrent.Conductor</code> has been deprecated and will
be removed in a future version of ScalaTest. Please mix in or import the members
of trait <a href="Conductors.html"><code>Conductors</code></a>, into which <code>Conductor</code> has been moved, instead
of using this class directly.</strong></p><p><strong>The reason <code>Conductor</code> was moved into trait <code>Conductors</code>
was so that it can extend trait
<a href="PatienceConfiguration.html"><code>PatienceConfiguration</code></a>, which was
introduced in ScalaTest 1.8. This will make <code>Conductor</code> configurable in a
way consistent with traits <code>Eventually</code> and <code>AsyncAssertions</code>
(both of which were also introduced in ScalaTest 1.8), and scalable with the
<code>scaled</code> method of trait
<a href="ScaledTimeSpans.html"><code>ScaledTimeSpans</code></a>.</strong></p><p>Class that facilitates the testing of classes, traits, and libraries designed
to be used by multiple threads concurrently.</p><p>A <code>Conductor</code> conducts a multi-threaded scenario by maintaining
a clock of "beats." Beats are numbered starting with 0. You can ask a
<code>Conductor</code> to run threads that interact with the class, trait,
or library (the <em>subject</em>)
you want to test. A thread can call the <code>Conductor</code>'s
<code>waitForBeat</code> method, which will cause the thread to block
until that beat has been reached. The <code>Conductor</code> will advance
the beat only when all threads participating in the test are blocked. By
tying the timing of thread activities to specific beats, you can write
tests for concurrent systems that have deterministic interleavings of
threads.</p><p>A <code>Conductor</code> object has a three-phase lifecycle. It begins its life
in the <em>setup</em> phase. During this phase, you can start threads by
invoking the <code>thread</code> method on the <code>Conductor</code>.
When <code>conduct</code> is invoked on a <code>Conductor</code>, it enters
the <em>conducting</em> phase. During this phase it conducts the one multi-threaded
scenario it was designed to conduct. After all participating threads have exited, either by
returning normally or throwing an exception, the <code>conduct</code> method
will complete, either by returning normally or throwing an exception. As soon as
the <code>conduct</code> method completes, the <code>Conductor</code>
enters its <em>defunct</em> phase. Once the <code>Conductor</code> has conducted
a multi-threaded scenario, it is defunct and can't be reused. To run the same test again,
you'll need to create a new instance of <code>Conductor</code>.</p><p>Here's an example of the use of <code>Conductor</code> to test the <code>ArrayBlockingQueue</code>
class from <code>java.util.concurrent</code>:</p><p><pre class="stHighlighted">
<span class="stReserved">import</span> org.scalatest.fixture.FunSuite
<span class="stReserved">import</span> org.scalatest.matchers.ShouldMatchers
<span class="stReserved">import</span> java.util.concurrent.ArrayBlockingQueue
<br /><span class="stReserved">class</span> <span class="stType">ArrayBlockingQueueSuite</span> <span class="stReserved">extends</span> <span class="stType">FunSuite</span> <span class="stReserved">with</span> <span class="stType">ShouldMatchers</span> {
<br /> test(<span class="stQuotedString">"calling put on a full queue blocks the producer thread"</span>) {
<br /> <span class="stReserved">val</span> conductor = <span class="stReserved">new</span> <span class="stType">Conductor</span>
<span class="stReserved">import</span> conductor._
<br /> <span class="stReserved">val</span> buf = <span class="stReserved">new</span> <span class="stType">ArrayBlockingQueue[Int]</span>(<span class="stLiteral">1</span>)
<br /> thread(<span class="stQuotedString">"producer"</span>) {
buf put <span class="stLiteral">42</span>
buf put <span class="stLiteral">17</span>
beat should be (<span class="stLiteral">1</span>)
}
<br /> thread(<span class="stQuotedString">"consumer"</span>) {
waitForBeat(<span class="stLiteral">1</span>)
buf.take should be (<span class="stLiteral">42</span>)
buf.take should be (<span class="stLiteral">17</span>)
}
<br /> whenFinished {
buf should be (<span class="stQuotedString">'empty</span>)
}
}
}
</pre></p><p>When the test shown is run, it will create one thread named <em>producer</em> and another named
<em>consumer</em>. The producer thread will eventually execute the code passed as a by-name
parameter to <code>thread("producer")</code>:</p><p><pre class="stHighlighted">
buf put <span class="stLiteral">42</span>
buf put <span class="stLiteral">17</span>
beat should be (<span class="stLiteral">1</span>)
</pre></p><p>Similarly, the consumer thread will eventually execute the code passed as a by-name parameter
to <code>thread("consumer")</code>:</p><p><pre class="stHighlighted">
waitForBeat(<span class="stLiteral">1</span>)
buf.take should be (<span class="stLiteral">42</span>)
buf.take should be (<span class="stLiteral">17</span>)
</pre></p><p>The <code>thread</code> method invocations will create the threads and start the threads, but will not immediately
execute the by-name parameter passed to them. They will first block, waiting for the <code>Conductor</code>
to give them a green light to proceed.</p><p>The next call in the test is <code>whenFinished</code>. This method will first call <code>conduct</code> on
the <code>Conductor</code>, which will wait until all threads that were created (in this case, producer and consumer) are
at the "starting line", <em>i.e.</em>, they have all started and are blocked, waiting on the green light.
The <code>conduct</code> method will then give these threads the green light and they will
all start executing their blocks concurrently.</p><p>When the threads are given the green light, the beat is 0. The first thing the producer thread does is put 42 in
into the queue. As the queue is empty at this point, this succeeds. The producer thread next attempts to put a 17
into the queue, but because the queue has size 1, this can't succeed until the consumer thread has read the 42
from the queue. This hasn't happened yet, so producer blocks. Meanwhile, the consumer thread's first act is to
call <code>waitForBeat(1)</code>. Because the beat starts out at 0, this call will block the consumer thread.
As a result, once the producer thread has executed <code>buf put 17</code> and the consumer thread has executed
<code>waitForBeat(1)</code>, both threads will be blocked.</p><p>The <code>Conductor</code> maintains a clock that wakes up periodically and checks to see if all threads
participating in the multi-threaded scenario (in this case, producer and consumer) are blocked. If so, it
increments the beat. Thus sometime later the beat will be incremented, from 0 to 1. Because consumer was
waiting for beat 1, it will wake up (<em>i.e.</em>, the <code>waitForBeat(1)</code> call will return) and
execute the next line of code in its block, <code>buf.take should be (42)</code>. This will succeed, because
the producer thread had previously (during beat 0) put 42 into the queue. This act will also make
producer runnable again, because it was blocked on the second <code>put</code>, which was waiting for another
thread to read that 42.</p><p>Now both threads are unblocked and able to execute their next statement. The order is
non-deterministic, and can even be simultaneous if running on multiple cores. If the <code>consumer</code> thread
happens to execute <code>buf.take should be (17)</code> first, it will block (<code>buf.take</code> will not return), because the queue is
at that point empty. At some point later, the producer thread will execute <code>buf put 17</code>, which will
unblock the consumer thread. Again both threads will be runnable and the order non-deterministic and
possibly simulataneous. The producer thread may charge ahead and run its next statement, <code>beat should be (1)</code>.
This will succeed because the beat is indeed 1 at this point. As this is the last statement in the producer's block,
the producer thread will exit normally (it won't throw an exception). At some point later the consumer thread will
be allowed to complete its last statement, the <code>buf.take</code> call will return 17. The consumer thread will
execute <code>17 should be (17)</code>. This will succeed and as this was the last statement in its block, the consumer will return
normally.</p><p>If either the producer or consumer thread had completed abruptly with an exception, the <code>conduct</code> method
(which was called by <code>whenFinished</code>) would have completed abruptly with an exception to indicate the test
failed. However, since both threads returned normally, <code>conduct</code> will return. Because <code>conduct</code> doesn't
throw an exception, <code>whenFinished</code> will execute the block of code passed as a by-name parameter to it: <code>buf should be ('empty)</code>.
This will succeed, because the queue is indeed empty at this point. The <code>whenFinished</code> method will then return, and
because the <code>whenFinished</code> call was the last statement in the test and it didn't throw an exception, the test completes successfully.</p><p>This test tests <code>ArrayBlockingQueue</code>, to make sure it works as expected. If there were a bug in <code>ArrayBlockingQueue</code>
such as a <code>put</code> called on a full queue didn't block, but instead overwrote the previous value, this test would detect
it. However, if there were a bug in <code>ArrayBlockingQueue</code> such that a call to <code>take</code> called on an empty queue
never blocked and always returned 0, this test might not detect it. The reason is that whether the consumer thread will ever call
<code>take</code> on an empty queue during this test is non-deterministic. It depends on how the threads get scheduled during beat 1.
What is deterministic in this test, because the consumer thread blocks during beat 0, is that the producer thread will definitely
attempt to write to a full queue. To make sure the other scenario is tested, you'd need a different test:</p><p><pre class="stHighlighted">
test(<span class="stQuotedString">"calling take on an empty queue blocks the consumer thread"</span>) {
<br /> <span class="stReserved">val</span> conductor = <span class="stReserved">new</span> <span class="stType">Conductor</span>
<span class="stReserved">import</span> conductor._
<br /> <span class="stReserved">val</span> buf = <span class="stReserved">new</span> <span class="stType">ArrayBlockingQueue[Int]</span>(<span class="stLiteral">1</span>)
<br /> thread(<span class="stQuotedString">"producer"</span>) {
waitForBeat(<span class="stLiteral">1</span>)
buf put <span class="stLiteral">42</span>
buf put <span class="stLiteral">17</span>
}
<br /> thread(<span class="stQuotedString">"consumer"</span>) {
buf.take should be (<span class="stLiteral">42</span>)
buf.take should be (<span class="stLiteral">17</span>)
beat should be (<span class="stLiteral">1</span>)
}
<br /> whenFinished {
buf should be (<span class="stQuotedString">'empty</span>)
}
}
</pre></p><p>In this test, the producer thread will block, waiting for beat 1. The consumer thread will invoke <code>buf.take</code>
as its first act. This will block, because the queue is empty. Because both threads are blocked, the <code>Conductor</code>
will at some point later increment the beat to 1. This will awaken the producer thread. It will return from its
<code>waitForBeat(1)</code> call and execute <code>buf put 42</code>. This will unblock the consumer thread, which will
take the 42, and so on.</p><p>The problem that <code>Conductor</code> is designed to address is the difficulty, caused by the non-deterministic nature
of thread scheduling, of testing classes, traits, and libraries that are intended to be used by multiple threads.
If you just create a test in which one thread reads from an <code>ArrayBlockingQueue</code> and
another writes to it, you can't be sure that you have tested all possible interleavings of threads, no matter
how many times you run the test. The purpose of <code>Conductor</code>
is to enable you to write tests with deterministic interleavings of threads. If you write one test for each possible
interleaving of threads, then you can be sure you have all the scenarios tested. The two tests shown here, for example,
ensure that both the scenario in which a producer thread tries to write to a full queue and the scenario in which a
consumer thread tries to take from an empty queue are tested.</p><p>Class <code>Conductor</code> was inspired by the
<a href="http://www.cs.umd.edu/projects/PL/multithreadedtc/">MultithreadedTC project</a>,
created by Bill Pugh and Nat Ayewah of the University of Maryland, and was brought to ScalaTest with major
contributions by Josh Cough.</p><p>Although useful, bear in mind that a <code>Conductor</code>'s results are not guaranteed to be
accurate 100% of the time. The reason is that it uses <code>java.lang.Thread</code>'s <code>getState</code> method to
decide when to advance the beat. This type of use is advised against in the Javadoc documentation for
<code>getState</code>, which says, "This method is designed for use in monitoring of the system state, not for
synchronization." In short, sometimes the return value of <code>getState</code> may be inacurrate, which in turn means
that sometimes a <code>Conductor</code> may decide to advance the beat too early. The upshot is that while <code>Conductor</code>
can be quite helpful in developing a thread-safe class initially, once the class is done you may not want to run the resulting tests
all the time as regression tests because they may generate occassional false negatives. (<code>Conductor</code> should never generate
a false positive, though, so if a test passes you can believe that. If the test fails consistently, you can believe that as well. But
if a test fails only occasionally, it may or may not indicate an actual concurrency bug.)</p></div><dl class="attributes block"> <dt>Annotations</dt><dd>
<span class="name">@deprecated</span>
</dd><dt>Deprecated</dt><dd class="cmt"><p>org.scalatest.concurrent.Conductor has been deprecated and will be removed in a future version of ScalaTest. Please mix in trait Conductors, which now defines Conductor, instead of using Conductor directly.</p></dd><dt>Source</dt><dd><a href="https://github.com/scalatest/scalatest/tree/release-2.1.7-for-scala-2.10/src/main/scala/org/scalatest/concurrent/Conductor.scala" target="_blank">Conductor.scala</a></dd></dl><div class="toggleContainer block">
<span class="toggle">Linear Supertypes</span>
<div class="superTypes hiddenContent"><span class="extype" name="scala.AnyRef">AnyRef</span>, <span class="extype" name="scala.Any">Any</span></div>
</div></div>
<div id="mbrsel">
<div id="textfilter"><span class="pre"></span><span class="input"><input id="mbrsel-input" type="text" accesskey="/" /></span><span class="post"></span></div>
<div id="order">
<span class="filtertype">Ordering</span>
<ol>
<li class="alpha in"><span>Alphabetic</span></li>
<li class="inherit out"><span>By inheritance</span></li>
</ol>
</div>
<div id="ancestors">
<span class="filtertype">Inherited<br />
</span>
<ol id="linearization">
<li class="in" name="org.scalatest.concurrent.Conductor"><span>Conductor</span></li><li class="in" name="scala.AnyRef"><span>AnyRef</span></li><li class="in" name="scala.Any"><span>Any</span></li>
</ol>
</div><div id="ancestors">
<span class="filtertype"></span>
<ol>
<li class="hideall out"><span>Hide All</span></li>
<li class="showall in"><span>Show all</span></li>
</ol>
<a href="http://docs.scala-lang.org/overviews/scaladoc/usage.html#members" target="_blank">Learn more about member selection</a>
</div>
<div id="visbl">
<span class="filtertype">Visibility</span>
<ol><li class="public in"><span>Public</span></li><li class="all out"><span>All</span></li></ol>
</div>
</div>
<div id="template">
<div id="allMembers">
<div id="constructors" class="members">
<h3>Instance Constructors</h3>
<ol><li name="org.scalatest.concurrent.Conductor#<init>" visbl="pub" data-isabs="false" fullComment="no" group="Ungrouped">
<a id="<init>():org.scalatest.concurrent.Conductor"></a>
<a id="<init>:Conductor"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">new</span>
</span>
<span class="symbol">
<span class="name">Conductor</span><span class="params">()</span>
</span>
</h4>
</li></ol>
</div>
<div id="values" class="values members">
<h3>Value Members</h3>
<ol><li name="scala.AnyRef#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="!=(x$1:AnyRef):Boolean"></a>
<a id="!=(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.Any#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="!=(x$1:Any):Boolean"></a>
<a id="!=(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef###" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="##():Int"></a>
<a id="##():Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $hash$hash" class="name">##</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="scala.AnyRef#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="==(x$1:AnyRef):Boolean"></a>
<a id="==(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.Any#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="==(x$1:Any):Boolean"></a>
<a id="==(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.Any#asInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="asInstanceOf[T0]:T0"></a>
<a id="asInstanceOf[T0]:T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">asInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Any.asInstanceOf.T0">T0</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#beat" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="beat:Int"></a>
<a id="beat:Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">beat</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4>
<p class="shortcomment cmt">The current value of the thread clock.</p><div class="fullcomment"><div class="comment cmt"><p>The current value of the thread clock.
</p></div><dl class="paramcmts block"><dt>returns</dt><dd class="cmt"><p>the current beat value
</p></dd></dl></div>
</li><li name="scala.AnyRef#clone" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="clone():Object"></a>
<a id="clone():AnyRef"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">clone</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.AnyRef">AnyRef</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#conduct" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="conduct(clockPeriod:Int,timeout:Int):Unit"></a>
<a id="conduct(Int,Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">conduct</span><span class="params">(<span name="clockPeriod">clockPeriod: <span class="extype" name="scala.Int">Int</span></span>, <span name="timeout">timeout: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Conducts a multithreaded test with the specified clock period (in milliseconds)
and timeout (in seconds).</p><div class="fullcomment"><div class="comment cmt"><p>Conducts a multithreaded test with the specified clock period (in milliseconds)
and timeout (in seconds).</p><p>A <code>Conductor</code> instance maintains an internal clock, which will wake up
periodically and check to see if it should advance the beat, abort the test, or go back to sleep.
It sleeps <code>clockPeriod</code> milliseconds each time. It will abort the test
if either deadlock is suspected or the beat has not advanced for the number of
seconds specified as <code>timeout</code>. Suspected deadlock will be declared if
for some number of consecutive clock cycles, all test threads are in the <code>BLOCKED</code> or
<code>WAITING</code> states and none of them are waiting for a beat.</p></div><dl class="paramcmts block"><dt class="param">clockPeriod</dt><dd class="cmt"><p>The period (in ms) the clock will sleep each time it sleeps</p></dd><dt class="param">timeout</dt><dd class="cmt"><p>The maximum allowed time between successive advances of the beat. If this time
is exceeded, the Conductor will abort the test.</p></dd></dl><dl class="attributes block"> <dt>Exceptions thrown</dt><dd><span class="cmt">Throwable<p>The first error or exception that is thrown by one of the test threads, or
a <code>TestFailedException</code> if the test was aborted due to a timeout or suspected deadlock.
</p></span></dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#conduct" visbl="pub" data-isabs="false" fullComment="no" group="Ungrouped">
<a id="conduct():Unit"></a>
<a id="conduct():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">conduct</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Conducts a multithreaded test with a default clock period of 10 milliseconds
and default run limit of 5 seconds.</p>
</li><li name="org.scalatest.concurrent.Conductor#conductingHasBegun" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="conductingHasBegun:Boolean"></a>
<a id="conductingHasBegun:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">conductingHasBegun</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<p class="shortcomment cmt">Indicates whether either of the two overloaded <code>conduct</code> methods
have been invoked.</p><div class="fullcomment"><div class="comment cmt"><p>Indicates whether either of the two overloaded <code>conduct</code> methods
have been invoked.</p><p>This method returns true if either <code>conduct</code> method has been invoked. The
<code>conduct</code> method may have returned or not. (In other words, a <code>true</code>
result from this method does not mean the <code>conduct</code> method has returned,
just that it was already been invoked and,therefore, the multi-threaded scenario it
conducts has definitely begun.)</p></div></div>
</li><li name="scala.AnyRef#eq" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="eq(x$1:AnyRef):Boolean"></a>
<a id="eq(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">eq</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#equals" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="equals(x$1:Any):Boolean"></a>
<a id="equals(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">equals</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="scala.AnyRef#finalize" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="finalize():Unit"></a>
<a id="finalize():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">finalize</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="scala.AnyRef#getClass" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="getClass():Class[_]"></a>
<a id="getClass():Class[_]"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">getClass</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.Class">Class</span>[_]</span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="scala.AnyRef#hashCode" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="hashCode():Int"></a>
<a id="hashCode():Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">hashCode</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#isConductorFrozen" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="isConductorFrozen:Boolean"></a>
<a id="isConductorFrozen:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">isConductorFrozen</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<p class="shortcomment cmt">Indicates whether the conductor has been frozen.</p><div class="fullcomment"><div class="comment cmt"><p>Indicates whether the conductor has been frozen.</p><p>Note: The only way a thread
can freeze the conductor is by calling <code>withConductorFrozen</code>.</p></div></div>
</li><li name="scala.Any#isInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="isInstanceOf[T0]:Boolean"></a>
<a id="isInstanceOf[T0]:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">isInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef#ne" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="ne(x$1:AnyRef):Boolean"></a>
<a id="ne(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">ne</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notify" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notify():Unit"></a>
<a id="notify():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notify</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notifyAll" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notifyAll():Unit"></a>
<a id="notifyAll():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notifyAll</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#synchronized" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="synchronized[T0](x$1:=>T0):T0"></a>
<a id="synchronized[T0](โT0):T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">synchronized</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="params">(<span name="arg0">arg0: โ <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>)</span><span class="result">: <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#thread" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="thread(name:String)(fun:=>Unit):Thread"></a>
<a id="thread(String)(โUnit):Thread"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">thread</span><span class="params">(<span name="name">name: <span class="extype" name="scala.Predef.String">String</span></span>)</span><span class="params">(<span name="fun">fun: โ <span class="extype" name="scala.Unit">Unit</span></span>)</span><span class="result">: <span class="extype" name="java.lang.Thread">Thread</span></span>
</span>
</h4>
<p class="shortcomment cmt">Creates a new thread with the specified name that will execute the specified function.</p><div class="fullcomment"><div class="comment cmt"><p>Creates a new thread with the specified name that will execute the specified function.</p><p>This method may be safely called by any thread.</p></div><dl class="paramcmts block"><dt class="param">name</dt><dd class="cmt"><p>the name of the newly created thread</p></dd><dt class="param">fun</dt><dd class="cmt"><p>the function to be executed by the newly created thread</p></dd><dt>returns</dt><dd class="cmt"><p>the newly created thread
</p></dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#thread" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="thread(fun:=>Unit):Thread"></a>
<a id="thread(โUnit):Thread"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">thread</span><span class="params">(<span name="fun">fun: โ <span class="extype" name="scala.Unit">Unit</span></span>)</span><span class="result">: <span class="extype" name="java.lang.Thread">Thread</span></span>
</span>
</h4>
<p class="shortcomment cmt">Creates a new thread that will execute the specified function.</p><div class="fullcomment"><div class="comment cmt"><p>Creates a new thread that will execute the specified function.</p><p>The name of the thread will be of the form Conductor-Thread-N, where N is some integer.</p><p>This method may be safely called by any thread.</p></div><dl class="paramcmts block"><dt class="param">fun</dt><dd class="cmt"><p>the function to be executed by the newly created thread</p></dd><dt>returns</dt><dd class="cmt"><p>the newly created thread
</p></dd></dl></div>
</li><li name="scala.AnyRef#toString" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="toString():String"></a>
<a id="toString():String"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">toString</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.String">String</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait():Unit"></a>
<a id="wait():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long,x$2:Int):Unit"></a>
<a id="wait(Long,Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>, <span name="arg1">arg1: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long):Unit"></a>
<a id="wait(Long):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#waitForBeat" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="waitForBeat(beat:Int):Unit"></a>
<a id="waitForBeat(Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">waitForBeat</span><span class="params">(<span name="beat">beat: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Blocks the current thread until the thread beat reaches the
specified value, at which point the current thread will be unblocked.</p><div class="fullcomment"><div class="comment cmt"><p>Blocks the current thread until the thread beat reaches the
specified value, at which point the current thread will be unblocked.
</p></div><dl class="paramcmts block"><dt class="param">beat</dt><dd class="cmt"><p>the tick value to wait for</p></dd></dl><dl class="attributes block"> <dt>Exceptions thrown</dt><dd><span class="cmt">NotAllowedException<p>if the a <code>beat</code> less than or equal to zero is passed
</p></span></dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#whenFinished" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="whenFinished(fun:=>Unit):Unit"></a>
<a id="whenFinished(โUnit):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">whenFinished</span><span class="params">(<span name="fun">fun: โ <span class="extype" name="scala.Unit">Unit</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Invokes <code>conduct</code> and after <code>conduct</code> method returns,
if <code>conduct</code> returns normally (<em>i.e.</em>, without throwing
an exception), invokes the passed function.</p><div class="fullcomment"><div class="comment cmt"><p>Invokes <code>conduct</code> and after <code>conduct</code> method returns,
if <code>conduct</code> returns normally (<em>i.e.</em>, without throwing
an exception), invokes the passed function.</p><p>If <code>conduct</code> completes abruptly with an exception, this method
will complete abruptly with the same exception and not execute the passed
function.</p><p>This method must be called by the thread that instantiated this <code>Conductor</code>,
and that same thread will invoke <code>conduct</code> and, if it returns noramlly, execute
the passed function.</p><p>Because <code>whenFinished</code> invokes <code>conduct</code>, it can only be invoked
once on a <code>Conductor</code> instance. As a result, if you need to pass a block of
code to <code>whenFinished</code> it should be the last statement of your test. If you
don't have a block of code that needs to be run once all the threads have finished
successfully, then you can simply invoke <code>conduct</code> and never invoke
<code>whenFinished</code>.</p></div><dl class="paramcmts block"><dt class="param">fun</dt><dd class="cmt"><p>the function to execute after <code>conduct</code> call returns</p></dd></dl><dl class="attributes block"> <dt>Exceptions thrown</dt><dd><span class="cmt">NotAllowedException<p>if the calling thread is not the thread that
instantiated this <code>Conductor</code>, or if <code>conduct</code> has already
been invoked on this conductor.
</p></span></dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#withConductorFrozen" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="withConductorFrozen[T](fun:=>T):Unit"></a>
<a id="withConductorFrozen[T](โT):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">withConductorFrozen</span><span class="tparams">[<span name="T">T</span>]</span><span class="params">(<span name="fun">fun: โ <span class="extype" name="org.scalatest.concurrent.Conductor.withConductorFrozen.T">T</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Executes the passed function with the <code>Conductor</code> <em>frozen</em> so that it
won't advance the clock.</p><div class="fullcomment"><div class="comment cmt"><p>Executes the passed function with the <code>Conductor</code> <em>frozen</em> so that it
won't advance the clock.</p><p>While the <code>Conductor</code> is frozen, the beat will not advance. Once the
passed function has completed executing, the <code>Conductor</code> will be unfrozen
so that the beat will advance when all threads are blocked, as normal.</p></div><dl class="paramcmts block"><dt class="param">fun</dt><dd class="cmt"><p>the function to execute while the <code>Conductor</code> is frozen.
</p></dd></dl></div>
</li></ol>
</div>
</div>
<div id="inheritedMembers">
<div class="parent" name="scala.AnyRef">
<h3>Inherited from <span class="extype" name="scala.AnyRef">AnyRef</span></h3>
</div><div class="parent" name="scala.Any">
<h3>Inherited from <span class="extype" name="scala.Any">Any</span></h3>
</div>
</div>
<div id="groupedMembers">
<div class="group" name="Ungrouped">
<h3>Ungrouped</h3>
</div>
</div>
</div>
<div id="tooltip"></div>
<div id="footer"> </div>
</body>
</html>
|
Java
|
package info.novatec.testit.webtester.support.assertj;
import static info.novatec.testit.webtester.support.assertj.WebTesterAssertions.assertThat;
import static org.mockito.Mockito.doReturn;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import info.novatec.testit.webtester.pageobjects.RadioButton;
@RunWith(MockitoJUnitRunner.class)
public class RadioButtonAssertTest {
@Mock
RadioButton selectedRadioButton;
@Mock
RadioButton radioButton;
@Before
public void setUp() {
doReturn(true).when(selectedRadioButton).isSelected();
}
/* selected */
@Test
public void selectedTrueTest() {
assertThat(selectedRadioButton).isSelected(true);
}
@Test(expected = AssertionError.class)
public void selectedFalseTest() {
assertThat(radioButton).isSelected(true);
}
@Test
public void notSelectedTrueTest() {
assertThat(radioButton).isNotSelected(true);
}
@Test(expected = AssertionError.class)
public void notSelectedFalseTest() {
assertThat(selectedRadioButton).isNotSelected(true);
}
}
|
Java
|
## How to index pdf documents in folder with pdf extractor sdk in VB.NET using ByteScout Premium Suite
### This code in VB.NET shows how to index pdf documents in folder with pdf extractor sdk with this how to tutorial
Index pdf documents in folder with pdf extractor sdk is simple to apply in VB.NET if you use these source codes below. ByteScout Premium Suite is the bundle that includes twelve SDK products from ByteScout including tools and components for PDF, barcodes, spreadsheets, screen video recording and you can use it to index pdf documents in folder with pdf extractor sdk with VB.NET.
Want to save time? You will save a lot of time on writing and testing code as you may just take the VB.NET code from ByteScout Premium Suite for index pdf documents in folder with pdf extractor sdk below and use it in your application. This VB.NET sample code is all you need for your app. Just copy and paste the code, add references (if needs to) and you are all set! This basic programming language sample code for VB.NET will do the whole work for you to index pdf documents in folder with pdf extractor sdk.
You can download free trial version of ByteScout Premium Suite from our website to see and try many others source code samples for VB.NET.
## REQUEST FREE TECH SUPPORT
[Click here to get in touch](https://bytescout.zendesk.com/hc/en-us/requests/new?subject=ByteScout%20Premium%20Suite%20Question)
or just send email to [support@bytescout.com](mailto:support@bytescout.com?subject=ByteScout%20Premium%20Suite%20Question)
## ON-PREMISE OFFLINE SDK
[Get Your 60 Day Free Trial](https://bytescout.com/download/web-installer?utm_source=github-readme)
[Explore SDK Docs](https://bytescout.com/documentation/index.html?utm_source=github-readme)
[Sign Up For Online Training](https://academy.bytescout.com/)
## ON-DEMAND REST WEB API
[Get your API key](https://pdf.co/documentation/api?utm_source=github-readme)
[Explore Web API Documentation](https://pdf.co/documentation/api?utm_source=github-readme)
[Explore Web API Samples](https://github.com/bytescout/ByteScout-SDK-SourceCode/tree/master/PDF.co%20Web%20API)
## VIDEO REVIEW
[https://www.youtube.com/watch?v=NEwNs2b9YN8](https://www.youtube.com/watch?v=NEwNs2b9YN8)
<!-- code block begin -->
##### ****IndexDocsInFolder.vbproj:**
```
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProjectGuid>{846F275E-BE99-4254-85ED-B8CBBB4546A9}</ProjectGuid>
<OutputType>Exe</OutputType>
<StartupObject>IndexDocsInFolder.Program</StartupObject>
<RootNamespace>IndexDocsInFolder</RootNamespace>
<AssemblyName>IndexDocsInFolder</AssemblyName>
<FileAlignment>512</FileAlignment>
<MyType>Console</MyType>
<TargetFrameworkVersion>v2.0</TargetFrameworkVersion>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<DefineDebug>true</DefineDebug>
<DefineTrace>true</DefineTrace>
<OutputPath>bin\Debug\</OutputPath>
<DocumentationFile>IndexDocsInFolder.xml</DocumentationFile>
<NoWarn>42016,41999,42017,42018,42019,42032,42036,42020,42021,42022</NoWarn>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugType>pdbonly</DebugType>
<DefineDebug>false</DefineDebug>
<DefineTrace>true</DefineTrace>
<Optimize>true</Optimize>
<OutputPath>bin\Release\</OutputPath>
<DocumentationFile>IndexDocsInFolder.xml</DocumentationFile>
<NoWarn>42016,41999,42017,42018,42019,42032,42036,42020,42021,42022</NoWarn>
</PropertyGroup>
<PropertyGroup>
<OptionExplicit>On</OptionExplicit>
</PropertyGroup>
<PropertyGroup>
<OptionCompare>Binary</OptionCompare>
</PropertyGroup>
<PropertyGroup>
<OptionStrict>Off</OptionStrict>
</PropertyGroup>
<PropertyGroup>
<OptionInfer>On</OptionInfer>
</PropertyGroup>
<ItemGroup>
<Reference Include="Bytescout.PDFExtractor, Version=9.1.0.3170, Culture=neutral, PublicKeyToken=f7dd1bd9d40a50eb, processorArchitecture=MSIL">
<SpecificVersion>False</SpecificVersion>
<HintPath>..\..\..\..\..\..\..\..\..\..\Program Files\Bytescout PDF Extractor SDK\net2.00\Bytescout.PDFExtractor.dll</HintPath>
</Reference>
<Reference Include="System" />
<Reference Include="System.Data" />
<Reference Include="System.Deployment" />
<Reference Include="System.Drawing" />
<Reference Include="System.Xml" />
</ItemGroup>
<ItemGroup>
<Import Include="Microsoft.VisualBasic" />
<Import Include="System" />
<Import Include="System.Collections" />
<Import Include="System.Collections.Generic" />
<Import Include="System.Data" />
<Import Include="System.Diagnostics" />
</ItemGroup>
<ItemGroup>
<Compile Include="Program.vb" />
</ItemGroup>
<ItemGroup>
<Content Include="Files\ImageSample.png">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</Content>
</ItemGroup>
<ItemGroup>
<Content Include="Files\SampleFile1.pdf">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</Content>
<Content Include="Files\SampleFile2.pdf">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</Content>
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.VisualBasic.targets" />
</Project>
```
<!-- code block end -->
<!-- code block begin -->
##### ****Program.vb:**
```
Imports System.IO
Imports Bytescout.PDFExtractor
Module Program
Sub Main()
Try
' Output file list
Dim lstAllFilesInfo = New List(Of FileIndexOutput)()
' Get all files inside directory
Dim allFiles = Directory.GetFiles(".\Files", "*.*")
' Iterate all files, and get details
For Each itmFile In allFiles
' Get basic file information
Dim fileInfo As FileInfo = New FileInfo(itmFile)
' Check whether file is supported
If _IsFileSupported(fileInfo) Then
' Fill file index model
Dim oFileIndex = New FileIndexOutput()
oFileIndex.fileName = fileInfo.Name
oFileIndex.fileDate = fileInfo.CreationTime
oFileIndex.content = _GetFileContent(fileInfo)
' Add to final list
lstAllFilesInfo.Add(oFileIndex)
End If
Next
' Print all output
Console.WriteLine("Total {0} files indexed" & vbLf, lstAllFilesInfo.Count)
For Each itmFileInfo In lstAllFilesInfo
Console.WriteLine("fileName: {0}", itmFileInfo.fileName)
Console.WriteLine("fileDate: {0}", itmFileInfo.fileDate.ToString("MMM dd yyyy hh:mm:ss"))
Console.WriteLine("content: {0}", itmFileInfo.content.Trim())
Console.WriteLine(vbLf)
Next
Catch ex As Exception
Console.WriteLine(("ERROR:" + ex.Message))
End Try
Console.WriteLine("Press any key to exit...")
Console.ReadLine()
End Sub
''' <summary>
''' Get File COntent
''' </summary>
Private Function _GetFileContent(ByVal fileInfo As FileInfo) As String
Dim fileExtension As String = System.IO.Path.GetExtension(fileInfo.FullName)
If fileExtension = ".pdf" Then
Return _GetPdfFileContent(fileInfo)
ElseIf fileExtension = ".png" OrElse fileExtension = ".jpg" Then
Return _GetImageContet(fileInfo)
End If
Throw New Exception("File not supported.")
End Function
''' <summary>
''' Get PDF File Content
''' </summary>
Private Function _GetPdfFileContent(ByVal fileInfo As FileInfo) As String
' Read all file content...
Using textExtractor As TextExtractor = New TextExtractor("demo", "demo")
' Load Document
textExtractor.LoadDocumentFromFile(fileInfo.FullName)
Return textExtractor.GetText()
End Using
End Function
''' <summary>
''' Get Image Contents
''' </summary>
Private Function _GetImageContet(ByVal fileInfo As FileInfo) As String
' Read all file content...
Using extractor As TextExtractor = New TextExtractor()
' Load document
extractor.LoadDocumentFromFile(fileInfo.FullName)
' Set option to repair text
extractor.OCRMode = OCRMode.TextFromImagesAndVectorsAndRepairedFonts
' Enable Optical Character Recognition (OCR)
' in .Auto mode (SDK automatically checks if needs to use OCR or not)
extractor.OCRMode = OCRMode.Auto
' Set the location of OCR language data files
extractor.OCRLanguageDataFolder = "c:\Program Files\Bytescout PDF Extractor SDK\ocrdata_best\"
' Set OCR language
extractor.OCRLanguage = "eng" '"eng" for english, "deu" for German, "fra" for French, "spa" for Spanish etc - according to files in "ocrdata" folder
' Find more language files at https://github.com/bytescout/ocrdata
' Set PDF document rendering resolution
extractor.OCRResolution = 300
' Read all text
Return extractor.GetText()
End Using
End Function
''' <summary>
''' Check whether file is valid
''' </summary>
Private Function _IsFileSupported(ByVal fileInfo As FileInfo) As Boolean
' Get File Extension
Dim fileExtension As String = Path.GetExtension(fileInfo.Name)
' Check whether file extension is valid
Return (fileExtension = ".pdf" OrElse fileExtension = ".png" OrElse fileExtension = ".jpg")
End Function
''' <summary>
''' FileIndexOutput class
''' </summary>
Public Class FileIndexOutput
Public Property fileName As String
Public Property fileDate As DateTime
Public Property content As String
End Class
End Module
```
<!-- code block end -->
|
Java
|
package com.jt.test.sort;
import java.util.Arrays;
import java.util.Random;
/**
* since 2016/10/19.
*/
public class Select {
public static void sort(Comparable[] data) {
for (int i = 0; i < data.length; i++) {
int min = i;
for (int j = i+1; j < data.length; j++) {
if (less(data, min, j)) {
min = j;
}
}
exch(data, i, min);
}
}
private static boolean less(Comparable[] data, int min, int j) {
return data[min].compareTo(data[j]) > 0;
}
private static void exch(Comparable[] data, int i, int min) {
Comparable tmp = data[i];
data[i] = data[min];
data[min] = tmp;
}
public static boolean isSort(Comparable[] data) {
for (int i = 0; i < data.length-1; i++) {
if (less(data, i, i + 1)) {
return false;
}
}
return true;
}
public static void main(String[] args) throws Exception {
Random random = new Random();
Integer[] datas = new Integer[10];
for (int i = 0; i < 10; i++) {
datas[i] = random.nextInt(100);
}
sort(datas);
if (!isSort(datas)) {
System.err.println("not sort");
}
System.out.println(Arrays.toString(datas));
}
}
|
Java
|
/*
* Copyright 2000-2008 JetBrains s.r.o.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.groovy.lang.actions.updown;
import com.intellij.ide.DataManager;
import com.intellij.openapi.application.Result;
import com.intellij.openapi.command.WriteCommandAction;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.editor.actionSystem.EditorActionHandler;
import com.intellij.openapi.editor.actionSystem.EditorActionManager;
import com.intellij.openapi.editor.ex.DocumentEx;
import com.intellij.testFramework.fixtures.LightCodeInsightFixtureTestCase;
import org.jetbrains.plugins.groovy.GroovyFileType;
import org.jetbrains.plugins.groovy.util.TestUtils;
import org.jetbrains.plugins.groovy.lang.editor.actions.GroovyEditorActionsManager;
import java.util.List;
/**
* @author ilyas
*/
public class GroovyMoveStatementTest extends LightCodeInsightFixtureTestCase {
@Override
protected String getBasePath() {
return TestUtils.getTestDataPath() + "groovy/actions/moveStatement/";
}
public void testClazz1() throws Throwable { downTest(); }
public void testClazz2() throws Throwable { upTest(); }
public void testClos2() throws Throwable { upTest(); }
public void testMeth1() throws Throwable { downTest(); }
public void testMeth2() throws Throwable { downTest(); }
public void testMeth3() throws Throwable { upTest(); }
public void testMeth4() throws Throwable { upTest(); }
public void testIfst() throws Throwable { downTest(); }
public void testIfst2() throws Throwable { upTest(); }
public void testSimple1() throws Throwable { downTest(); }
public void testSimple2() throws Throwable { upTest(); }
public void testTryst1() throws Throwable { downTest(); }
public void testTryst2() throws Throwable { downTest(); }
public void testStatementOutsideClosure() throws Throwable { downTest(); }
public void testVariableOutsideClosure() throws Throwable { upTest(); }
public void testVariableOutsideClosureDown() throws Throwable { downTest(); }
public void testStatementInsideClosure() throws Throwable { upTest(); }
public void testMoveGroovydocWithMethod() throws Throwable { downTest(); }
public void testMoveMethodWithGroovydoc() throws Throwable { downTest(); }
public void testMoveSecondFieldUp() throws Throwable { upTest(); }
public void testMoveFirstFieldDown() throws Throwable { downTest(); }
public void testVariableOverMethodInScript() throws Throwable { downTest(); }
public void testVariableOverClassInScript() throws Throwable { downTest(); }
public void testUpFromLastOffset() throws Throwable { upTest(); }
public void testClosureWithPrequel() throws Throwable { upTest(); }
public void testMultiLineVariable() throws Throwable { downTest(); }
public void testClosureVariableByRBrace() throws Throwable { upTest(); }
private void downTest() throws Exception {
doTest(GroovyEditorActionsManager.MOVE_STATEMENT_DOWN_ACTION);
}
private void upTest() throws Exception {
doTest(GroovyEditorActionsManager.MOVE_STATEMENT_UP_ACTION);
}
public void doTest(final String actionId) throws Exception {
final List<String> data = TestUtils.readInput(getTestDataPath() + getTestName(true) + ".test");
myFixture.configureByText(GroovyFileType.GROOVY_FILE_TYPE, data.get(0));
final EditorActionHandler handler = EditorActionManager.getInstance().getActionHandler(actionId);
new WriteCommandAction(getProject()) {
protected void run(Result result) throws Throwable {
final Editor editor = myFixture.getEditor();
handler.execute(editor, DataManager.getInstance().getDataContext(editor.getContentComponent()));
((DocumentEx)editor.getDocument()).stripTrailingSpaces(false);
}
}.execute();
myFixture.checkResult(data.get(1));
}
}
|
Java
|
package org.apereo.cas.web.report;
import org.apereo.cas.web.report.util.ControllerUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.actuate.endpoint.mvc.AbstractNamedMvcEndpoint;
import org.springframework.cloud.bus.BusProperties;
import org.springframework.cloud.config.server.config.ConfigServerProperties;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.servlet.ModelAndView;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.HashMap;
import java.util.Map;
/**
* Controller that exposes the CAS internal state and beans
* as JSON. The report is available at {@code /status/config}.
*
* @author Misagh Moayyed
* @since 4.1
*/
public class ConfigurationStateController extends AbstractNamedMvcEndpoint {
private static final String VIEW_CONFIG = "monitoring/viewConfig";
@Autowired(required = false)
private BusProperties busProperties;
@Autowired
private ConfigServerProperties configServerProperties;
public ConfigurationStateController() {
super("configstate", "/config", true, true);
}
/**
* Handle request.
*
* @param request the request
* @param response the response
* @return the model and view
* @throws Exception the exception
*/
@GetMapping
protected ModelAndView handleRequestInternal(final HttpServletRequest request,
final HttpServletResponse response) throws Exception {
final Map<String, Object> model = new HashMap<>();
final String path = request.getContextPath();
ControllerUtils.configureModelMapForConfigServerCloudBusEndpoints(busProperties, configServerProperties, path, model);
return new ModelAndView(VIEW_CONFIG, model);
}
}
|
Java
|
/**
* Copyright (C) 2009 bdferris <bdferris@onebusaway.org>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#import "OBAApplicationDelegate.h"
NS_ASSUME_NONNULL_BEGIN
@interface OBANetworkErrorAlertViewDelegate : NSObject <UIAlertViewDelegate> {
OBAApplicationDelegate * _context;
}
- (id) initWithContext:(OBAApplicationDelegate*)context;
@end
NS_ASSUME_NONNULL_END
|
Java
|
package com.canoo.ant.table;
import com.canoo.ant.filter.AllEqualsFilter;
import com.canoo.ant.filter.AllFilter;
import com.canoo.ant.filter.ITableFilter;
import org.apache.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.util.*;
public abstract class APropertyTable implements IPropertyTable {
private static final Logger LOG = Logger.getLogger(APropertyTable.class);
private static final int MAX_DEPTH = 10; // max recursion depth
private static final ThreadLocal DEPTH = new ThreadLocal();
private File fContainer;
private String fTable;
private String fPrefix;
private ITableFilter fFilter;
private List fRawTable;
private List fMetaTable;
protected static final String EMPTY = "";
protected static final String KEY_JOIN = "JOIN";
protected APropertyTable() {
fFilter = new AllFilter();
if( DEPTH.get() == null ) {
setDepth(0);
}
}
private static void setDepth(int depth){
DEPTH.set(new Integer(depth));
}
private static int getDepth(){
return((Integer)DEPTH.get()).intValue();
}
/**
* @return columnName -> expander (Type IPropertyTable)
*/
public Map getColumnInfo() {
List meta = getMetaTable();
Map result = new HashMap(meta.size()); // smaller is likely
// find all properties for this table
List tableSpecificColumnInfo = new AllEqualsFilter(TableFactory.KEY_TABLE).filter(meta, getTable());
for (Iterator eachColumnInfo = tableSpecificColumnInfo.iterator(); eachColumnInfo.hasNext();) {
Properties colInfo = (Properties) eachColumnInfo.next();
try {
// tableClass defaults to the current class
IPropertyTable table = TableFactory.createTable(colInfo, getClass().getName());
ITableFilter filter = TableFactory.createFilter(colInfo);
final File container;
if (colInfo.getProperty(TableFactory.KEY_CONTAINER, "").length() > 0) {
container = new File(getContainer().getParentFile(), colInfo.getProperty(TableFactory.KEY_CONTAINER));
colInfo.remove(TableFactory.KEY_CONTAINER); // to be sure that it doesn't get used with wrong path
}
else {
container = getContainer();
}
String key = colInfo.getProperty(TableFactory.KEY_NAME); // no default possible
TableFactory.initOrDefault(table, filter, colInfo, container, key);
result.put(key, table);
} catch (Exception e) {
LOG.error("cannot work with Property: " + colInfo.toString(), e);
throw new RuntimeException("Cannot work with Property: " + colInfo.toString(), e);
}
}
return result;
}
public List getPropertiesList(final String filterValue, final String prefix) {
// start with copy of initial table
// if current filter concerns extension keys, filter before extending
// filtering in advance also lowers memory consumption in the average
List result = getFilter().filter(getRawTable(), filterValue);
if (getDepth() > MAX_DEPTH){
LOG.error("processing grounded due to excessive recursion calls: "+getDepth());
return result;
}
setDepth(getDepth()+1);
final Map colInfo = getColumnInfo();
// only go over entries in the colInfo.
// (property names without colInfo info are not expanded)
for (Iterator eachExpandable = colInfo.keySet().iterator(); eachExpandable.hasNext();) {
String expansionName = (String) eachExpandable.next();
expandName(result, expansionName, colInfo);
}
setDepth(getDepth()-1);
// filter a second time to allow filters to work on expansions
result = getFilter().filter(result, filterValue);
// prefix is processed after filtering
if (prefix!=null && prefix.length()>0){
result = mapPrefix(result, prefix);
}
return result;
}
// like a ruby map!
private List mapPrefix(List result, final String prefix) {
List collect = new ArrayList(result.size());
for (Iterator eachProps = result.iterator(); eachProps.hasNext();) {
Properties props = (Properties) eachProps.next();
Properties mapped = new Properties();
for (Iterator eachKey = props.keySet().iterator(); eachKey.hasNext();) {
String key = (String) eachKey.next();
String value = props.getProperty(key);
mapped.setProperty(prefix+"."+key, value);
}
collect.add(mapped);
}
return collect;
}
protected void expandName(List result, String expansionName, Map colInfo) {
List expansions = new LinkedList(); // cannot add while iterating. store and add later
for (Iterator eachProperties = result.iterator(); eachProperties.hasNext();) {
Properties props = (Properties) eachProperties.next();
List newExpansions = expandProps(props, expansionName, colInfo);
// default behaviour: like OUTER join, we do not shrink if nothing found
if (newExpansions.size() > 0) {
eachProperties.remove();
expansions.addAll(newExpansions);
}
}
result.addAll(expansions);
}
protected List expandProps(Properties props, String expansionName, Map colInfo) {
String value = props.getProperty(expansionName);
List propExpansions = new LinkedList();
IPropertyTable expansionTable = (IPropertyTable) colInfo.get(expansionName);
// recursive call
List expandWith = expansionTable.getPropertiesList(value, expansionTable.getPrefix());
for (Iterator eachExpansion = expandWith.iterator(); eachExpansion.hasNext();) {
Properties expandProps = (Properties) eachExpansion.next();
// merge expansion with current line
expandProps.putAll(props);
// store for later adding
propExpansions.add(expandProps);
}
return propExpansions;
}
//-------------- field accessors ------------------
public File getContainer() {
return fContainer;
}
public void setContainer(File container) {
fContainer = container;
}
public String getTable() {
return fTable;
}
public void setTable(String table) {
fTable = table;
}
public ITableFilter getFilter() {
return fFilter;
}
public void setFilter(ITableFilter filter) {
fFilter = filter;
}
public String getPrefix() {
return fPrefix;
}
public void setPrefix(String prefix) {
fPrefix = prefix;
}
//-------------- how to read specifics ------------------
/** lazy getter, cached */
public List getRawTable() {
fRawTable = getCachedTable(getTable(), fRawTable);
return fRawTable;
}
/** lazy getter, cached */
public List getMetaTable() {
if (hasJoinTable()) {
fMetaTable = getCachedTable(KEY_JOIN, fMetaTable);
}
else {
fMetaTable = Collections.EMPTY_LIST;
}
return fMetaTable;
}
/**
* Indicates if the table container has a JOIN table.
* @return default is <code>true</code>
*/
protected boolean hasJoinTable() {
return true;
}
protected List getCachedTable(final String table, List tableCache) {
if (tableCache != null) {
return tableCache;
}
try {
tableCache = read(table);
}
catch (final IOException e) {
LOG.error("Cannot read " + getContainer() + " " + table, e);
String message = "Cannot read container >" + getContainer() + "<";
if (table != null)
message += " (table " + table + ")";
message += ": " + e.getMessage();
throw new RuntimeException(message, e);
}
if (tableCache.isEmpty()) {
LOG.debug("no entry in " + getContainer() + "/" + table);
}
LOG.debug(tableCache.size()+" entries in "+getContainer()+ " " + table);
return tableCache;
}
protected abstract List read(String table) throws IOException;
}
|
Java
|
// For conditions of distribution and use, see copyright notice in LICENSE
#include "StableHeaders.h"
#include "ZipAssetBundle.h"
#include "ZipHelpers.h"
#include "ZipWorker.h"
#include "CoreDefines.h"
#include "Framework.h"
#include "FrameAPI.h"
#include "AssetAPI.h"
#include "AssetCache.h"
#include "LoggingFunctions.h"
#include <Urho3D/IO/FileSystem.h>
#include <zzip/zzip.h>
namespace Tundra
{
ZipAssetBundle::ZipAssetBundle(AssetAPI *owner, const String &type, const String &name) :
IAssetBundle(owner, type, name),
worker_(0),
archive_(0),
fileCount_(-1),
done_(false),
success_(false)
{
}
ZipAssetBundle::~ZipAssetBundle()
{
Unload();
}
void ZipAssetBundle::DoUnload()
{
Close();
StopThread();
fileCount_ = -1;
}
void ZipAssetBundle::Close()
{
if (archive_)
{
zzip_dir_close(archive_);
archive_ = 0;
}
}
bool ZipAssetBundle::DeserializeFromDiskSource()
{
if (!assetAPI_->Cache())
{
LogError("ZipAssetBundle::DeserializeFromDiskSource: Cannot process archive, AssetAPI cache is null.");
return false;
}
else if (DiskSource().Empty())
{
LogError("ZipAssetBundle::DeserializeFromDiskSource: Cannot process archive, no disk source for " + Name());
return false;
}
/* We want to detect if the extracted files are already up to date to save time.
If the last modified date for the sub asset is the same as the parent zip file,
we don't extract it. If the zip is re-downloaded from source everything will get unpacked even
if only one file would have changed inside it. We could do uncompressed size comparisons
but that is not a absolute guarantee that the file has not changed. We'll be on the safe side
to unpack the whole zip file. Zip files are meant for deploying the scene and should be touched
rather rarely. Note that local:// refs are unpacked to cache but the zips disk source is not in the
cache. Meaning that local:// zip files will always be extracted fully even if the disk source
was not changed, we don't have a mechanism to get the last modified date properly except from
the asset cache. For local scenes this should be fine as there is no real need to
zip the scene up as you already have the disk sources right there in the storage.
The last modified query will fail if the file is open with zziplib, do it first. */
uint zipLastModified = assetAPI_->Cache()->LastModified(Name());
const String diskSourceInternal = Urho3D::GetInternalPath(DiskSource());
zzip_error_t error = ZZIP_NO_ERROR;
archive_ = zzip_dir_open(diskSourceInternal.CString(), &error);
if (CheckAndLogZzipError(error) || CheckAndLogArchiveError(archive_) || !archive_)
{
archive_ = 0;
return false;
}
int uncompressing = 0;
ZZIP_DIRENT archiveEntry;
while(zzip_dir_read(archive_, &archiveEntry))
{
String relativePath = Urho3D::GetInternalPath(archiveEntry.d_name);
if (!relativePath.EndsWith("/"))
{
String subAssetRef = GetFullAssetReference(relativePath);
ZipArchiveFile file;
file.relativePath = relativePath;
file.cachePath = Urho3D::GetInternalPath(assetAPI_->Cache()->DiskSourceByRef(subAssetRef));
file.lastModified = assetAPI_->Cache()->LastModified(subAssetRef);
file.compressedSize = archiveEntry.d_csize;
file.uncompressedSize = archiveEntry.st_size;
/* Mark this file for extraction. If both cache files have valid dates
and they differ extract. If they have the same date stamp skip extraction.
Note that file.lastModified will be non-valid for non cached files so we
will cover also missing files. */
file.doExtract = (zipLastModified > 0 && file.lastModified > 0) ? (zipLastModified != file.lastModified) : true;
if (file.doExtract)
uncompressing++;
files_.Push(file);
fileCount_++;
}
}
// Close the zzip directory ptr
Close();
// If the zip file was empty we don't want IsLoaded to fail on the files_ check.
// The bundle loaded fine but there was no content, log a warning.
if (files_.Empty())
{
LogWarning("ZipAssetBundle: Bundle loaded but does not contain any files " + Name());
files_.Push(ZipArchiveFile());
Loaded.Emit(this);
return true;
}
// Don't spin the worker if all sub assets are up to date in cache.
if (uncompressing > 0)
{
// Now that the file info has been read, continue in a worker thread.
LogDebug("ZipAssetBundle: File information read for " + Name() + ". File count: " + String(files_.Size()) + ". Starting worker thread to uncompress " + String(uncompressing) + " files.");
// ZipWorker is a QRunnable we can pass to QThreadPool, it will handle scheduling it and deletes it when done.
worker_ = new ZipWorker(this, zipLastModified, diskSourceInternal, files_);
if (!worker_->Run())
{
LogError("ZipAssetBundle: Failed to start worker thread for " + Name());
files_.Clear();
return false;
}
assetAPI_->GetFramework()->Frame()->Updated.Connect(this, &ZipAssetBundle::CheckDone);
}
else
Loaded.Emit(this);
return true;
}
bool ZipAssetBundle::DeserializeFromData(const u8* /*data*/, uint /*numBytes*/)
{
/** @note At this point it seems zzip needs a disk source to do processing
so we require disk source for the archive. This might change in the future by changing the lib. */
return false;
}
Vector<u8> ZipAssetBundle::GetSubAssetData(const String &subAssetName)
{
/* Makes no sense to keep the whole zip file contents in memory as only
few files could be wanted from a 100mb bundle. Additionally all asset would take 2x the memory.
We could make this function also open the zip file and uncompress the data for every sub asset request.
But that would be rather pointless, not to mention slower, as we already have the unpacked individual
assets on disk. If the unpacking to disk changes we might need to rethink this. */
String filePath = GetSubAssetDiskSource(subAssetName);
if (filePath.Empty())
return Vector<u8>();
Vector<u8> data;
return LoadFileToVector(filePath, data) ? data : Vector<u8>();
}
String ZipAssetBundle::GetSubAssetDiskSource(const String &subAssetName)
{
return assetAPI_->Cache()->FindInCache(GetFullAssetReference(subAssetName));
}
String ZipAssetBundle::GetFullAssetReference(const String &subAssetName)
{
return Name() + "#" + subAssetName;
}
bool ZipAssetBundle::IsLoaded() const
{
return (archive_ != 0 || !files_.Empty());
}
void ZipAssetBundle::CheckDone(float /*frametime*/)
{
// Invoked in main thread context
{
Urho3D::MutexLock m(mutexDone_);
if (!done_)
return;
if (success_)
Loaded.Emit(this);
else
Failed.Emit(this);
}
StopThread();
assetAPI_->GetFramework()->Frame()->Updated.Disconnect(this, &ZipAssetBundle::CheckDone);
}
void ZipAssetBundle::WorkerDone(bool successful)
{
// Invoked in worker thread context
Urho3D::MutexLock m(mutexDone_);
done_ = true;
success_ = successful;
}
void ZipAssetBundle::StopThread()
{
if (worker_)
worker_->Stop();
SAFE_DELETE(worker_);
}
Urho3D::Context *ZipAssetBundle::Context() const
{
return assetAPI_->GetContext();
}
Urho3D::FileSystem *ZipAssetBundle::FileSystem() const
{
return assetAPI_->GetSubsystem<Urho3D::FileSystem>();
}
}
|
Java
|
import { Injectable } from "@angular/core";
import { InjectionFactory } from "../../L0/L0.injection-factory/injection-factory";
import { createSelector } from "../../L4/L4.ngrx/create-selector";
import { StatementsSelector } from "./statements.selector";
import { ExportDeclaration, SyntaxKind } from "typescript";
@Injectable()
export class ExportDeclarationsSelector implements InjectionFactory {
constructor(private readonly statementsSelector: StatementsSelector) {
return this.factory() as any;
}
factory() {
return createSelector(
this.statementsSelector,
statements => statements
.filter(({kind}) => kind === SyntaxKind.ExportDeclaration)
.map(item => item as ExportDeclaration)
);
}
}
|
Java
|
๏ปฟusing System;
using System.Collections.Generic;
using System.Configuration;
using System.Linq;
using System.Net;
using System.Net.Mail;
using System.Web;
namespace FashionStones.Utils
{
public class EmailSettings
{
public string Link = "www.fashion-stones.com.ua";
public string MailFromAddress = "kapitoshka0777@gmail.com";
public string ServerName = "smtp.gmail.com";
public bool UseSsl = true;
public int ServerPort = 587; //465;
public string password = "8425999kapitoshka";
}
//public class GMailer
//{
// public static string GmailUsername { get { return "kapitoshka0777@gmail.com"; } }
// public static string GmailPassword { get {return "8425999kapitoshka";} }
// public static int GmailPort { get; set; }
// public static bool GmailSSL { get; set; }
// public string ToEmail { get; set; }
// public string Subject { get; set; }
// public string Body { get; set; }
// public bool IsHtml { get; set; }
// static GMailer()
// {
// GmailHost = "smtp.gmail.com";
// GmailPort = 587; // Gmail can use ports 25, 465 & 587; but must be 25 for medium trust environment.
// GmailSSL = true;
// }
//public void Send()
//{
// SmtpClient smtp = new SmtpClient();
// smtp.Host = GmailHost;
// smtp.Port = GmailPort;
// smtp.EnableSsl = GmailSSL;
// smtp.DeliveryMethod = SmtpDeliveryMethod.Network;
// smtp.UseDefaultCredentials = false;
// smtp.Credentials = new NetworkCredential(GmailUsername, GmailPassword);
// using (var message = new MailMessage(GmailUsername, ToEmail))
// {
// message.Subject = Subject;
// message.Body = Body;
// message.IsBodyHtml = IsHtml;
// smtp.Send(message);
// }
//}
// }
}
|
Java
|
/*
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.spanner;
import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException;
import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerExceptionForCancellation;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import com.google.api.client.util.BackOff;
import com.google.api.client.util.ExponentialBackOff;
import com.google.api.gax.retrying.RetrySettings;
import com.google.cloud.ByteArray;
import com.google.cloud.Date;
import com.google.cloud.Timestamp;
import com.google.cloud.spanner.Type.StructField;
import com.google.cloud.spanner.spi.v1.SpannerRpc;
import com.google.cloud.spanner.v1.stub.SpannerStubSettings;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Uninterruptibles;
import com.google.protobuf.ByteString;
import com.google.protobuf.ListValue;
import com.google.protobuf.Value.KindCase;
import com.google.spanner.v1.PartialResultSet;
import com.google.spanner.v1.ResultSetMetadata;
import com.google.spanner.v1.ResultSetStats;
import com.google.spanner.v1.Transaction;
import com.google.spanner.v1.TypeCode;
import io.grpc.Context;
import io.opencensus.common.Scope;
import io.opencensus.trace.AttributeValue;
import io.opencensus.trace.Span;
import io.opencensus.trace.Tracer;
import io.opencensus.trace.Tracing;
import java.io.IOException;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
/** Implementation of {@link ResultSet}. */
abstract class AbstractResultSet<R> extends AbstractStructReader implements ResultSet {
private static final Tracer tracer = Tracing.getTracer();
interface Listener {
/**
* Called when transaction metadata is seen. This method may be invoked at most once. If the
* method is invoked, it will precede {@link #onError(SpannerException)} or {@link #onDone()}.
*/
void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId)
throws SpannerException;
/** Called when the read finishes with an error. Returns the error that should be thrown. */
SpannerException onError(SpannerException e, boolean withBeginTransaction);
/** Called when the read finishes normally. */
void onDone(boolean withBeginTransaction);
}
@VisibleForTesting
static class GrpcResultSet extends AbstractResultSet<List<Object>> {
private final GrpcValueIterator iterator;
private final Listener listener;
private GrpcStruct currRow;
private SpannerException error;
private ResultSetStats statistics;
private boolean closed;
GrpcResultSet(CloseableIterator<PartialResultSet> iterator, Listener listener) {
this.iterator = new GrpcValueIterator(iterator);
this.listener = listener;
}
@Override
protected GrpcStruct currRow() {
checkState(!closed, "ResultSet is closed");
checkState(currRow != null, "next() call required");
return currRow;
}
@Override
public boolean next() throws SpannerException {
if (error != null) {
throw newSpannerException(error);
}
try {
if (currRow == null) {
ResultSetMetadata metadata = iterator.getMetadata();
if (metadata.hasTransaction()) {
listener.onTransactionMetadata(
metadata.getTransaction(), iterator.isWithBeginTransaction());
} else if (iterator.isWithBeginTransaction()) {
// The query should have returned a transaction.
throw SpannerExceptionFactory.newSpannerException(
ErrorCode.FAILED_PRECONDITION, AbstractReadContext.NO_TRANSACTION_RETURNED_MSG);
}
currRow = new GrpcStruct(iterator.type(), new ArrayList<>());
}
boolean hasNext = currRow.consumeRow(iterator);
if (!hasNext) {
statistics = iterator.getStats();
}
return hasNext;
} catch (Throwable t) {
throw yieldError(
SpannerExceptionFactory.asSpannerException(t),
iterator.isWithBeginTransaction() && currRow == null);
}
}
@Override
@Nullable
public ResultSetStats getStats() {
return statistics;
}
@Override
public void close() {
listener.onDone(iterator.isWithBeginTransaction());
iterator.close("ResultSet closed");
closed = true;
}
@Override
public Type getType() {
checkState(currRow != null, "next() call required");
return currRow.getType();
}
private SpannerException yieldError(SpannerException e, boolean beginTransaction) {
SpannerException toThrow = listener.onError(e, beginTransaction);
close();
throw toThrow;
}
}
/**
* Adapts a stream of {@code PartialResultSet} messages into a stream of {@code Value} messages.
*/
private static class GrpcValueIterator extends AbstractIterator<com.google.protobuf.Value> {
private enum StreamValue {
METADATA,
RESULT,
}
private final CloseableIterator<PartialResultSet> stream;
private ResultSetMetadata metadata;
private Type type;
private PartialResultSet current;
private int pos;
private ResultSetStats statistics;
GrpcValueIterator(CloseableIterator<PartialResultSet> stream) {
this.stream = stream;
}
@SuppressWarnings("unchecked")
@Override
protected com.google.protobuf.Value computeNext() {
if (!ensureReady(StreamValue.RESULT)) {
endOfData();
return null;
}
com.google.protobuf.Value value = current.getValues(pos++);
KindCase kind = value.getKindCase();
if (!isMergeable(kind)) {
if (pos == current.getValuesCount() && current.getChunkedValue()) {
throw newSpannerException(ErrorCode.INTERNAL, "Unexpected chunked PartialResultSet.");
} else {
return value;
}
}
if (!current.getChunkedValue() || pos != current.getValuesCount()) {
return value;
}
Object merged =
kind == KindCase.STRING_VALUE
? value.getStringValue()
: new ArrayList<>(value.getListValue().getValuesList());
while (current.getChunkedValue() && pos == current.getValuesCount()) {
if (!ensureReady(StreamValue.RESULT)) {
throw newSpannerException(
ErrorCode.INTERNAL, "Stream closed in the middle of chunked value");
}
com.google.protobuf.Value newValue = current.getValues(pos++);
if (newValue.getKindCase() != kind) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Unexpected type in middle of chunked value. Expected: "
+ kind
+ " but got: "
+ newValue.getKindCase());
}
if (kind == KindCase.STRING_VALUE) {
merged = merged + newValue.getStringValue();
} else {
concatLists(
(List<com.google.protobuf.Value>) merged, newValue.getListValue().getValuesList());
}
}
if (kind == KindCase.STRING_VALUE) {
return com.google.protobuf.Value.newBuilder().setStringValue((String) merged).build();
} else {
return com.google.protobuf.Value.newBuilder()
.setListValue(
ListValue.newBuilder().addAllValues((List<com.google.protobuf.Value>) merged))
.build();
}
}
ResultSetMetadata getMetadata() throws SpannerException {
if (metadata == null) {
if (!ensureReady(StreamValue.METADATA)) {
throw newSpannerException(ErrorCode.INTERNAL, "Stream closed without sending metadata");
}
}
return metadata;
}
/**
* Get the query statistics. Query statistics are delivered with the last PartialResultSet in
* the stream. Any attempt to call this method before the caller has finished consuming the
* results will return null.
*/
@Nullable
ResultSetStats getStats() {
return statistics;
}
Type type() {
checkState(type != null, "metadata has not been received");
return type;
}
private boolean ensureReady(StreamValue requiredValue) throws SpannerException {
while (current == null || pos >= current.getValuesCount()) {
if (!stream.hasNext()) {
return false;
}
current = stream.next();
pos = 0;
if (type == null) {
// This is the first message on the stream.
if (!current.hasMetadata() || !current.getMetadata().hasRowType()) {
throw newSpannerException(ErrorCode.INTERNAL, "Missing type metadata in first message");
}
metadata = current.getMetadata();
com.google.spanner.v1.Type typeProto =
com.google.spanner.v1.Type.newBuilder()
.setCode(TypeCode.STRUCT)
.setStructType(metadata.getRowType())
.build();
try {
type = Type.fromProto(typeProto);
} catch (IllegalArgumentException e) {
throw newSpannerException(
ErrorCode.INTERNAL, "Invalid type metadata: " + e.getMessage(), e);
}
}
if (current.hasStats()) {
statistics = current.getStats();
}
if (requiredValue == StreamValue.METADATA) {
return true;
}
}
return true;
}
void close(@Nullable String message) {
stream.close(message);
}
boolean isWithBeginTransaction() {
return stream.isWithBeginTransaction();
}
/** @param a is a mutable list and b will be concatenated into a. */
private void concatLists(List<com.google.protobuf.Value> a, List<com.google.protobuf.Value> b) {
if (a.size() == 0 || b.size() == 0) {
a.addAll(b);
return;
} else {
com.google.protobuf.Value last = a.get(a.size() - 1);
com.google.protobuf.Value first = b.get(0);
KindCase lastKind = last.getKindCase();
KindCase firstKind = first.getKindCase();
if (isMergeable(lastKind) && lastKind == firstKind) {
com.google.protobuf.Value merged;
if (lastKind == KindCase.STRING_VALUE) {
String lastStr = last.getStringValue();
String firstStr = first.getStringValue();
merged =
com.google.protobuf.Value.newBuilder().setStringValue(lastStr + firstStr).build();
} else { // List
List<com.google.protobuf.Value> mergedList = new ArrayList<>();
mergedList.addAll(last.getListValue().getValuesList());
concatLists(mergedList, first.getListValue().getValuesList());
merged =
com.google.protobuf.Value.newBuilder()
.setListValue(ListValue.newBuilder().addAllValues(mergedList))
.build();
}
a.set(a.size() - 1, merged);
a.addAll(b.subList(1, b.size()));
} else {
a.addAll(b);
}
}
}
private boolean isMergeable(KindCase kind) {
return kind == KindCase.STRING_VALUE || kind == KindCase.LIST_VALUE;
}
}
static class GrpcStruct extends Struct implements Serializable {
private final Type type;
private final List<Object> rowData;
/**
* Builds an immutable version of this struct using {@link Struct#newBuilder()} which is used as
* a serialization proxy.
*/
private Object writeReplace() {
Builder builder = Struct.newBuilder();
List<Type.StructField> structFields = getType().getStructFields();
for (int i = 0; i < structFields.size(); i++) {
Type.StructField field = structFields.get(i);
String fieldName = field.getName();
Object value = rowData.get(i);
Type fieldType = field.getType();
switch (fieldType.getCode()) {
case BOOL:
builder.set(fieldName).to((Boolean) value);
break;
case INT64:
builder.set(fieldName).to((Long) value);
break;
case FLOAT64:
builder.set(fieldName).to((Double) value);
break;
case NUMERIC:
builder.set(fieldName).to((BigDecimal) value);
break;
case STRING:
builder.set(fieldName).to((String) value);
break;
case JSON:
builder.set(fieldName).to(Value.json((String) value));
break;
case BYTES:
builder.set(fieldName).to((ByteArray) value);
break;
case TIMESTAMP:
builder.set(fieldName).to((Timestamp) value);
break;
case DATE:
builder.set(fieldName).to((Date) value);
break;
case ARRAY:
switch (fieldType.getArrayElementType().getCode()) {
case BOOL:
builder.set(fieldName).toBoolArray((Iterable<Boolean>) value);
break;
case INT64:
builder.set(fieldName).toInt64Array((Iterable<Long>) value);
break;
case FLOAT64:
builder.set(fieldName).toFloat64Array((Iterable<Double>) value);
break;
case NUMERIC:
builder.set(fieldName).toNumericArray((Iterable<BigDecimal>) value);
break;
case STRING:
builder.set(fieldName).toStringArray((Iterable<String>) value);
break;
case JSON:
builder.set(fieldName).toJsonArray((Iterable<String>) value);
break;
case BYTES:
builder.set(fieldName).toBytesArray((Iterable<ByteArray>) value);
break;
case TIMESTAMP:
builder.set(fieldName).toTimestampArray((Iterable<Timestamp>) value);
break;
case DATE:
builder.set(fieldName).toDateArray((Iterable<Date>) value);
break;
case STRUCT:
builder
.set(fieldName)
.toStructArray(fieldType.getArrayElementType(), (Iterable<Struct>) value);
break;
default:
throw new AssertionError(
"Unhandled array type code: " + fieldType.getArrayElementType());
}
break;
case STRUCT:
if (value == null) {
builder.set(fieldName).to(fieldType, null);
} else {
builder.set(fieldName).to((Struct) value);
}
break;
default:
throw new AssertionError("Unhandled type code: " + fieldType.getCode());
}
}
return builder.build();
}
GrpcStruct(Type type, List<Object> rowData) {
this.type = type;
this.rowData = rowData;
}
@Override
public String toString() {
return this.rowData.toString();
}
boolean consumeRow(Iterator<com.google.protobuf.Value> iterator) {
rowData.clear();
if (!iterator.hasNext()) {
return false;
}
for (Type.StructField fieldType : getType().getStructFields()) {
if (!iterator.hasNext()) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value stream: end of stream reached before row is complete");
}
com.google.protobuf.Value value = iterator.next();
rowData.add(decodeValue(fieldType.getType(), value));
}
return true;
}
private static Object decodeValue(Type fieldType, com.google.protobuf.Value proto) {
if (proto.getKindCase() == KindCase.NULL_VALUE) {
return null;
}
switch (fieldType.getCode()) {
case BOOL:
checkType(fieldType, proto, KindCase.BOOL_VALUE);
return proto.getBoolValue();
case INT64:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Long.parseLong(proto.getStringValue());
case FLOAT64:
return valueProtoToFloat64(proto);
case NUMERIC:
return new BigDecimal(proto.getStringValue());
case STRING:
case JSON:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return proto.getStringValue();
case BYTES:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return ByteArray.fromBase64(proto.getStringValue());
case TIMESTAMP:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Timestamp.parseTimestamp(proto.getStringValue());
case DATE:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Date.parseDate(proto.getStringValue());
case ARRAY:
checkType(fieldType, proto, KindCase.LIST_VALUE);
ListValue listValue = proto.getListValue();
return decodeArrayValue(fieldType.getArrayElementType(), listValue);
case STRUCT:
checkType(fieldType, proto, KindCase.LIST_VALUE);
ListValue structValue = proto.getListValue();
return decodeStructValue(fieldType, structValue);
default:
throw new AssertionError("Unhandled type code: " + fieldType.getCode());
}
}
private static Struct decodeStructValue(Type structType, ListValue structValue) {
List<Type.StructField> fieldTypes = structType.getStructFields();
checkArgument(
structValue.getValuesCount() == fieldTypes.size(),
"Size mismatch between type descriptor and actual values.");
List<Object> fields = new ArrayList<>(fieldTypes.size());
List<com.google.protobuf.Value> fieldValues = structValue.getValuesList();
for (int i = 0; i < fieldTypes.size(); ++i) {
fields.add(decodeValue(fieldTypes.get(i).getType(), fieldValues.get(i)));
}
return new GrpcStruct(structType, fields);
}
static Object decodeArrayValue(Type elementType, ListValue listValue) {
switch (elementType.getCode()) {
case BOOL:
// Use a view: element conversion is virtually free.
return Lists.transform(
listValue.getValuesList(),
input -> input.getKindCase() == KindCase.NULL_VALUE ? null : input.getBoolValue());
case INT64:
// For int64/float64 types, use custom containers. These avoid wrapper object
// creation for non-null arrays.
return new Int64Array(listValue);
case FLOAT64:
return new Float64Array(listValue);
case NUMERIC:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: new BigDecimal(value.getStringValue()));
}
return list;
}
case STRING:
case JSON:
return Lists.transform(
listValue.getValuesList(),
input -> input.getKindCase() == KindCase.NULL_VALUE ? null : input.getStringValue());
case BYTES:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: ByteArray.fromBase64(value.getStringValue()));
}
return list;
}
case TIMESTAMP:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: Timestamp.parseTimestamp(value.getStringValue()));
}
return list;
}
case DATE:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: Date.parseDate(value.getStringValue()));
}
return list;
}
case STRUCT:
{
ArrayList<Struct> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
if (value.getKindCase() == KindCase.NULL_VALUE) {
list.add(null);
} else {
ListValue structValue = value.getListValue();
list.add(decodeStructValue(elementType, structValue));
}
}
return list;
}
default:
throw new AssertionError("Unhandled type code: " + elementType.getCode());
}
}
private static void checkType(
Type fieldType, com.google.protobuf.Value proto, KindCase expected) {
if (proto.getKindCase() != expected) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value for column type "
+ fieldType
+ " expected "
+ expected
+ " but was "
+ proto.getKindCase());
}
}
Struct immutableCopy() {
return new GrpcStruct(type, new ArrayList<>(rowData));
}
@Override
public Type getType() {
return type;
}
@Override
public boolean isNull(int columnIndex) {
return rowData.get(columnIndex) == null;
}
@Override
protected boolean getBooleanInternal(int columnIndex) {
return (Boolean) rowData.get(columnIndex);
}
@Override
protected long getLongInternal(int columnIndex) {
return (Long) rowData.get(columnIndex);
}
@Override
protected double getDoubleInternal(int columnIndex) {
return (Double) rowData.get(columnIndex);
}
@Override
protected BigDecimal getBigDecimalInternal(int columnIndex) {
return (BigDecimal) rowData.get(columnIndex);
}
@Override
protected String getStringInternal(int columnIndex) {
return (String) rowData.get(columnIndex);
}
@Override
protected String getJsonInternal(int columnIndex) {
return (String) rowData.get(columnIndex);
}
@Override
protected ByteArray getBytesInternal(int columnIndex) {
return (ByteArray) rowData.get(columnIndex);
}
@Override
protected Timestamp getTimestampInternal(int columnIndex) {
return (Timestamp) rowData.get(columnIndex);
}
@Override
protected Date getDateInternal(int columnIndex) {
return (Date) rowData.get(columnIndex);
}
@Override
protected Value getValueInternal(int columnIndex) {
final List<Type.StructField> structFields = getType().getStructFields();
final StructField structField = structFields.get(columnIndex);
final Type columnType = structField.getType();
final boolean isNull = rowData.get(columnIndex) == null;
switch (columnType.getCode()) {
case BOOL:
return Value.bool(isNull ? null : getBooleanInternal(columnIndex));
case INT64:
return Value.int64(isNull ? null : getLongInternal(columnIndex));
case NUMERIC:
return Value.numeric(isNull ? null : getBigDecimalInternal(columnIndex));
case FLOAT64:
return Value.float64(isNull ? null : getDoubleInternal(columnIndex));
case STRING:
return Value.string(isNull ? null : getStringInternal(columnIndex));
case BYTES:
return Value.bytes(isNull ? null : getBytesInternal(columnIndex));
case TIMESTAMP:
return Value.timestamp(isNull ? null : getTimestampInternal(columnIndex));
case DATE:
return Value.date(isNull ? null : getDateInternal(columnIndex));
case STRUCT:
return Value.struct(isNull ? null : getStructInternal(columnIndex));
case ARRAY:
switch (columnType.getArrayElementType().getCode()) {
case BOOL:
return Value.boolArray(isNull ? null : getBooleanListInternal(columnIndex));
case INT64:
return Value.int64Array(isNull ? null : getLongListInternal(columnIndex));
case NUMERIC:
return Value.numericArray(isNull ? null : getBigDecimalListInternal(columnIndex));
case FLOAT64:
return Value.float64Array(isNull ? null : getDoubleListInternal(columnIndex));
case STRING:
return Value.stringArray(isNull ? null : getStringListInternal(columnIndex));
case BYTES:
return Value.bytesArray(isNull ? null : getBytesListInternal(columnIndex));
case TIMESTAMP:
return Value.timestampArray(isNull ? null : getTimestampListInternal(columnIndex));
case DATE:
return Value.dateArray(isNull ? null : getDateListInternal(columnIndex));
case STRUCT:
return Value.structArray(
columnType.getArrayElementType(),
isNull ? null : getStructListInternal(columnIndex));
default:
throw new IllegalArgumentException(
"Invalid array value type " + this.type.getArrayElementType());
}
default:
throw new IllegalArgumentException("Invalid value type " + this.type);
}
}
@Override
protected Struct getStructInternal(int columnIndex) {
return (Struct) rowData.get(columnIndex);
}
@Override
protected boolean[] getBooleanArrayInternal(int columnIndex) {
@SuppressWarnings("unchecked") // We know ARRAY<BOOL> produces a List<Boolean>.
List<Boolean> values = (List<Boolean>) rowData.get(columnIndex);
boolean[] r = new boolean[values.size()];
for (int i = 0; i < values.size(); ++i) {
if (values.get(i) == null) {
throw throwNotNull(columnIndex);
}
r[i] = values.get(i);
}
return r;
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<BOOL> produces a List<Boolean>.
protected List<Boolean> getBooleanListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Boolean>) rowData.get(columnIndex));
}
@Override
protected long[] getLongArrayInternal(int columnIndex) {
return getLongListInternal(columnIndex).toPrimitiveArray(columnIndex);
}
@Override
protected Int64Array getLongListInternal(int columnIndex) {
return (Int64Array) rowData.get(columnIndex);
}
@Override
protected double[] getDoubleArrayInternal(int columnIndex) {
return getDoubleListInternal(columnIndex).toPrimitiveArray(columnIndex);
}
@Override
protected Float64Array getDoubleListInternal(int columnIndex) {
return (Float64Array) rowData.get(columnIndex);
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<NUMERIC> produces a List<BigDecimal>.
protected List<BigDecimal> getBigDecimalListInternal(int columnIndex) {
return (List<BigDecimal>) rowData.get(columnIndex);
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<STRING> produces a List<String>.
protected List<String> getStringListInternal(int columnIndex) {
return Collections.unmodifiableList((List<String>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<String> produces a List<String>.
protected List<String> getJsonListInternal(int columnIndex) {
return Collections.unmodifiableList((List<String>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<BYTES> produces a List<ByteArray>.
protected List<ByteArray> getBytesListInternal(int columnIndex) {
return Collections.unmodifiableList((List<ByteArray>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<TIMESTAMP> produces a List<Timestamp>.
protected List<Timestamp> getTimestampListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Timestamp>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<DATE> produces a List<Date>.
protected List<Date> getDateListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Date>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<STRUCT<...>> produces a List<STRUCT>.
protected List<Struct> getStructListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Struct>) rowData.get(columnIndex));
}
}
@VisibleForTesting
interface CloseableIterator<T> extends Iterator<T> {
/**
* Closes the iterator, freeing any underlying resources.
*
* @param message a message to include in the final RPC status
*/
void close(@Nullable String message);
boolean isWithBeginTransaction();
}
/** Adapts a streaming read/query call into an iterator over partial result sets. */
@VisibleForTesting
static class GrpcStreamIterator extends AbstractIterator<PartialResultSet>
implements CloseableIterator<PartialResultSet> {
private static final Logger logger = Logger.getLogger(GrpcStreamIterator.class.getName());
private static final PartialResultSet END_OF_STREAM = PartialResultSet.newBuilder().build();
private final ConsumerImpl consumer = new ConsumerImpl();
private final BlockingQueue<PartialResultSet> stream;
private final Statement statement;
private SpannerRpc.StreamingCall call;
private volatile boolean withBeginTransaction;
private SpannerException error;
@VisibleForTesting
GrpcStreamIterator(int prefetchChunks) {
this(null, prefetchChunks);
}
@VisibleForTesting
GrpcStreamIterator(Statement statement, int prefetchChunks) {
this.statement = statement;
// One extra to allow for END_OF_STREAM message.
this.stream = new LinkedBlockingQueue<>(prefetchChunks + 1);
}
protected final SpannerRpc.ResultStreamConsumer consumer() {
return consumer;
}
public void setCall(SpannerRpc.StreamingCall call, boolean withBeginTransaction) {
this.call = call;
this.withBeginTransaction = withBeginTransaction;
}
@Override
public void close(@Nullable String message) {
if (call != null) {
call.cancel(message);
}
}
@Override
public boolean isWithBeginTransaction() {
return withBeginTransaction;
}
@Override
protected final PartialResultSet computeNext() {
PartialResultSet next;
try {
// TODO: Ideally honor io.grpc.Context while blocking here. In practice,
// cancellation/deadline results in an error being delivered to "stream", which
// should mean that we do not block significantly longer afterwards, but it would
// be more robust to use poll() with a timeout.
next = stream.take();
} catch (InterruptedException e) {
// Treat interrupt as a request to cancel the read.
throw SpannerExceptionFactory.propagateInterrupt(e);
}
if (next != END_OF_STREAM) {
call.request(1);
return next;
}
// All done - close() no longer needs to cancel the call.
call = null;
if (error != null) {
throw SpannerExceptionFactory.newSpannerException(error);
}
endOfData();
return null;
}
private void addToStream(PartialResultSet results) {
// We assume that nothing from the user will interrupt gRPC event threads.
Uninterruptibles.putUninterruptibly(stream, results);
}
private class ConsumerImpl implements SpannerRpc.ResultStreamConsumer {
@Override
public void onPartialResultSet(PartialResultSet results) {
addToStream(results);
}
@Override
public void onCompleted() {
addToStream(END_OF_STREAM);
}
@Override
public void onError(SpannerException e) {
if (statement != null) {
if (logger.isLoggable(Level.FINEST)) {
// Include parameter values if logging level is set to FINEST or higher.
e =
SpannerExceptionFactory.newSpannerExceptionPreformatted(
e.getErrorCode(),
String.format("%s - Statement: '%s'", e.getMessage(), statement.toString()),
e);
logger.log(Level.FINEST, "Error executing statement", e);
} else {
e =
SpannerExceptionFactory.newSpannerExceptionPreformatted(
e.getErrorCode(),
String.format("%s - Statement: '%s'", e.getMessage(), statement.getSql()),
e);
}
}
error = e;
addToStream(END_OF_STREAM);
}
}
}
/**
* Wraps an iterator over partial result sets, supporting resuming RPCs on error. This class keeps
* track of the most recent resume token seen, and will buffer partial result set chunks that do
* not have a resume token until one is seen or buffer space is exceeded, which reduces the chance
* of yielding data to the caller that cannot be resumed.
*/
@VisibleForTesting
abstract static class ResumableStreamIterator extends AbstractIterator<PartialResultSet>
implements CloseableIterator<PartialResultSet> {
private static final RetrySettings STREAMING_RETRY_SETTINGS =
SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings();
private static final Logger logger = Logger.getLogger(ResumableStreamIterator.class.getName());
private final BackOff backOff = newBackOff();
private final LinkedList<PartialResultSet> buffer = new LinkedList<>();
private final int maxBufferSize;
private final Span span;
private CloseableIterator<PartialResultSet> stream;
private ByteString resumeToken;
private boolean finished;
/**
* Indicates whether it is currently safe to retry RPCs. This will be {@code false} if we have
* reached the maximum buffer size without seeing a restart token; in this case, we will drain
* the buffer and remain in this state until we see a new restart token.
*/
private boolean safeToRetry = true;
protected ResumableStreamIterator(int maxBufferSize, String streamName, Span parent) {
checkArgument(maxBufferSize >= 0);
this.maxBufferSize = maxBufferSize;
this.span = tracer.spanBuilderWithExplicitParent(streamName, parent).startSpan();
}
private static ExponentialBackOff newBackOff() {
return new ExponentialBackOff.Builder()
.setMultiplier(STREAMING_RETRY_SETTINGS.getRetryDelayMultiplier())
.setInitialIntervalMillis(
Math.max(10, (int) STREAMING_RETRY_SETTINGS.getInitialRetryDelay().toMillis()))
.setMaxIntervalMillis(
Math.max(1000, (int) STREAMING_RETRY_SETTINGS.getMaxRetryDelay().toMillis()))
.setMaxElapsedTimeMillis(Integer.MAX_VALUE) // Prevent Backoff.STOP from getting returned.
.build();
}
private static void backoffSleep(Context context, BackOff backoff) throws SpannerException {
backoffSleep(context, nextBackOffMillis(backoff));
}
private static long nextBackOffMillis(BackOff backoff) throws SpannerException {
try {
return backoff.nextBackOffMillis();
} catch (IOException e) {
throw newSpannerException(ErrorCode.INTERNAL, e.getMessage(), e);
}
}
private static void backoffSleep(Context context, long backoffMillis) throws SpannerException {
tracer
.getCurrentSpan()
.addAnnotation(
"Backing off",
ImmutableMap.of("Delay", AttributeValue.longAttributeValue(backoffMillis)));
final CountDownLatch latch = new CountDownLatch(1);
final Context.CancellationListener listener =
ignored -> {
// Wakeup on cancellation / DEADLINE_EXCEEDED.
latch.countDown();
};
context.addListener(listener, DirectExecutor.INSTANCE);
try {
if (backoffMillis == BackOff.STOP) {
// Highly unlikely but we handle it just in case.
backoffMillis = STREAMING_RETRY_SETTINGS.getMaxRetryDelay().toMillis();
}
if (latch.await(backoffMillis, TimeUnit.MILLISECONDS)) {
// Woken by context cancellation.
throw newSpannerExceptionForCancellation(context, null);
}
} catch (InterruptedException interruptExcept) {
throw newSpannerExceptionForCancellation(context, interruptExcept);
} finally {
context.removeListener(listener);
}
}
private enum DirectExecutor implements Executor {
INSTANCE;
@Override
public void execute(Runnable command) {
command.run();
}
}
abstract CloseableIterator<PartialResultSet> startStream(@Nullable ByteString resumeToken);
@Override
public void close(@Nullable String message) {
if (stream != null) {
stream.close(message);
span.end(TraceUtil.END_SPAN_OPTIONS);
stream = null;
}
}
@Override
public boolean isWithBeginTransaction() {
return stream != null && stream.isWithBeginTransaction();
}
@Override
protected PartialResultSet computeNext() {
Context context = Context.current();
while (true) {
// Eagerly start stream before consuming any buffered items.
if (stream == null) {
span.addAnnotation(
"Starting/Resuming stream",
ImmutableMap.of(
"ResumeToken",
AttributeValue.stringAttributeValue(
resumeToken == null ? "null" : resumeToken.toStringUtf8())));
try (Scope s = tracer.withSpan(span)) {
// When start a new stream set the Span as current to make the gRPC Span a child of
// this Span.
stream = checkNotNull(startStream(resumeToken));
}
}
// Buffer contains items up to a resume token or has reached capacity: flush.
if (!buffer.isEmpty()
&& (finished || !safeToRetry || !buffer.getLast().getResumeToken().isEmpty())) {
return buffer.pop();
}
try {
if (stream.hasNext()) {
PartialResultSet next = stream.next();
boolean hasResumeToken = !next.getResumeToken().isEmpty();
if (hasResumeToken) {
resumeToken = next.getResumeToken();
safeToRetry = true;
}
// If the buffer is empty and this chunk has a resume token or we cannot resume safely
// anyway, we can yield it immediately rather than placing it in the buffer to be
// returned on the next iteration.
if ((hasResumeToken || !safeToRetry) && buffer.isEmpty()) {
return next;
}
buffer.add(next);
if (buffer.size() > maxBufferSize && buffer.getLast().getResumeToken().isEmpty()) {
// We need to flush without a restart token. Errors encountered until we see
// such a token will fail the read.
safeToRetry = false;
}
} else {
finished = true;
if (buffer.isEmpty()) {
endOfData();
return null;
}
}
} catch (SpannerException e) {
if (safeToRetry && e.isRetryable()) {
span.addAnnotation(
"Stream broken. Safe to retry", TraceUtil.getExceptionAnnotations(e));
logger.log(Level.FINE, "Retryable exception, will sleep and retry", e);
// Truncate any items in the buffer before the last retry token.
while (!buffer.isEmpty() && buffer.getLast().getResumeToken().isEmpty()) {
buffer.removeLast();
}
assert buffer.isEmpty() || buffer.getLast().getResumeToken().equals(resumeToken);
stream = null;
try (Scope s = tracer.withSpan(span)) {
long delay = e.getRetryDelayInMillis();
if (delay != -1) {
backoffSleep(context, delay);
} else {
backoffSleep(context, backOff);
}
}
continue;
}
span.addAnnotation("Stream broken. Not safe to retry");
TraceUtil.setWithFailure(span, e);
throw e;
} catch (RuntimeException e) {
span.addAnnotation("Stream broken. Not safe to retry");
TraceUtil.setWithFailure(span, e);
throw e;
}
}
}
}
static double valueProtoToFloat64(com.google.protobuf.Value proto) {
if (proto.getKindCase() == KindCase.STRING_VALUE) {
switch (proto.getStringValue()) {
case "-Infinity":
return Double.NEGATIVE_INFINITY;
case "Infinity":
return Double.POSITIVE_INFINITY;
case "NaN":
return Double.NaN;
default:
// Fall-through to handling below to produce an error.
}
}
if (proto.getKindCase() != KindCase.NUMBER_VALUE) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value for column type "
+ Type.float64()
+ " expected NUMBER_VALUE or STRING_VALUE with value one of"
+ " \"Infinity\", \"-Infinity\", or \"NaN\" but was "
+ proto.getKindCase()
+ (proto.getKindCase() == KindCase.STRING_VALUE
? " with value \"" + proto.getStringValue() + "\""
: ""));
}
return proto.getNumberValue();
}
static NullPointerException throwNotNull(int columnIndex) {
throw new NullPointerException(
"Cannot call array getter for column " + columnIndex + " with null elements");
}
/**
* Memory-optimized base class for {@code ARRAY<INT64>} and {@code ARRAY<FLOAT64>} types. Both of
* these involve conversions from the type yielded by JSON parsing, which are {@code String} and
* {@code BigDecimal} respectively. Rather than construct new wrapper objects for each array
* element, we use primitive arrays and a {@code BitSet} to track nulls.
*/
abstract static class PrimitiveArray<T, A> extends AbstractList<T> {
private final A data;
private final BitSet nulls;
private final int size;
PrimitiveArray(ListValue protoList) {
this.size = protoList.getValuesCount();
A data = newArray(size);
BitSet nulls = new BitSet(size);
for (int i = 0; i < protoList.getValuesCount(); ++i) {
if (protoList.getValues(i).getKindCase() == KindCase.NULL_VALUE) {
nulls.set(i);
} else {
setProto(data, i, protoList.getValues(i));
}
}
this.data = data;
this.nulls = nulls;
}
PrimitiveArray(A data, BitSet nulls, int size) {
this.data = data;
this.nulls = nulls;
this.size = size;
}
abstract A newArray(int size);
abstract void setProto(A array, int i, com.google.protobuf.Value protoValue);
abstract T get(A array, int i);
@Override
public T get(int index) {
if (index < 0 || index >= size) {
throw new ArrayIndexOutOfBoundsException("index=" + index + " size=" + size);
}
return nulls.get(index) ? null : get(data, index);
}
@Override
public int size() {
return size;
}
A toPrimitiveArray(int columnIndex) {
if (nulls.length() > 0) {
throw throwNotNull(columnIndex);
}
A r = newArray(size);
System.arraycopy(data, 0, r, 0, size);
return r;
}
}
static class Int64Array extends PrimitiveArray<Long, long[]> {
Int64Array(ListValue protoList) {
super(protoList);
}
Int64Array(long[] data, BitSet nulls) {
super(data, nulls, data.length);
}
@Override
long[] newArray(int size) {
return new long[size];
}
@Override
void setProto(long[] array, int i, com.google.protobuf.Value protoValue) {
array[i] = Long.parseLong(protoValue.getStringValue());
}
@Override
Long get(long[] array, int i) {
return array[i];
}
}
static class Float64Array extends PrimitiveArray<Double, double[]> {
Float64Array(ListValue protoList) {
super(protoList);
}
Float64Array(double[] data, BitSet nulls) {
super(data, nulls, data.length);
}
@Override
double[] newArray(int size) {
return new double[size];
}
@Override
void setProto(double[] array, int i, com.google.protobuf.Value protoValue) {
array[i] = valueProtoToFloat64(protoValue);
}
@Override
Double get(double[] array, int i) {
return array[i];
}
}
protected abstract GrpcStruct currRow();
@Override
public Struct getCurrentRowAsStruct() {
return currRow().immutableCopy();
}
@Override
protected boolean getBooleanInternal(int columnIndex) {
return currRow().getBooleanInternal(columnIndex);
}
@Override
protected long getLongInternal(int columnIndex) {
return currRow().getLongInternal(columnIndex);
}
@Override
protected double getDoubleInternal(int columnIndex) {
return currRow().getDoubleInternal(columnIndex);
}
@Override
protected BigDecimal getBigDecimalInternal(int columnIndex) {
return currRow().getBigDecimalInternal(columnIndex);
}
@Override
protected String getStringInternal(int columnIndex) {
return currRow().getStringInternal(columnIndex);
}
@Override
protected String getJsonInternal(int columnIndex) {
return currRow().getJsonInternal(columnIndex);
}
@Override
protected ByteArray getBytesInternal(int columnIndex) {
return currRow().getBytesInternal(columnIndex);
}
@Override
protected Timestamp getTimestampInternal(int columnIndex) {
return currRow().getTimestampInternal(columnIndex);
}
@Override
protected Date getDateInternal(int columnIndex) {
return currRow().getDateInternal(columnIndex);
}
@Override
protected Value getValueInternal(int columnIndex) {
return currRow().getValueInternal(columnIndex);
}
@Override
protected boolean[] getBooleanArrayInternal(int columnIndex) {
return currRow().getBooleanArrayInternal(columnIndex);
}
@Override
protected List<Boolean> getBooleanListInternal(int columnIndex) {
return currRow().getBooleanListInternal(columnIndex);
}
@Override
protected long[] getLongArrayInternal(int columnIndex) {
return currRow().getLongArrayInternal(columnIndex);
}
@Override
protected List<Long> getLongListInternal(int columnIndex) {
return currRow().getLongListInternal(columnIndex);
}
@Override
protected double[] getDoubleArrayInternal(int columnIndex) {
return currRow().getDoubleArrayInternal(columnIndex);
}
@Override
protected List<Double> getDoubleListInternal(int columnIndex) {
return currRow().getDoubleListInternal(columnIndex);
}
@Override
protected List<BigDecimal> getBigDecimalListInternal(int columnIndex) {
return currRow().getBigDecimalListInternal(columnIndex);
}
@Override
protected List<String> getStringListInternal(int columnIndex) {
return currRow().getStringListInternal(columnIndex);
}
@Override
protected List<String> getJsonListInternal(int columnIndex) {
return currRow().getJsonListInternal(columnIndex);
}
@Override
protected List<ByteArray> getBytesListInternal(int columnIndex) {
return currRow().getBytesListInternal(columnIndex);
}
@Override
protected List<Timestamp> getTimestampListInternal(int columnIndex) {
return currRow().getTimestampListInternal(columnIndex);
}
@Override
protected List<Date> getDateListInternal(int columnIndex) {
return currRow().getDateListInternal(columnIndex);
}
@Override
protected List<Struct> getStructListInternal(int columnIndex) {
return currRow().getStructListInternal(columnIndex);
}
@Override
public boolean isNull(int columnIndex) {
return currRow().isNull(columnIndex);
}
}
|
Java
|
/*
* Copyright 2010 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.collect.ImmutableSet;
import com.google.javascript.jscomp.NodeTraversal.AbstractPostOrderCallback;
import com.google.javascript.jscomp.SpecializeModule.SpecializationState;
import com.google.javascript.rhino.Node;
/**
* Tests for {@link SpecializeModule}.
*
* @author dcc@google.com (Devin Coughlin)
*/
public class SpecializeModuleTest extends CompilerTestCase {
private static final String SHARED_EXTERNS = "var alert = function() {}";
public SpecializeModuleTest() {
super(SHARED_EXTERNS);
}
private PassFactory inlineFunctions =
new PassFactory("inlineFunctions", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new InlineFunctions(compiler,
compiler.getUniqueNameIdSupplier(), true, false, true);
}
};
private PassFactory removeUnusedPrototypeProperties =
new PassFactory("removeUnusedPrototypeProperties", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new RemoveUnusedPrototypeProperties(compiler, false, false);
}
};
private PassFactory devirtualizePrototypeMethods =
new PassFactory("devirtualizePrototypeMethods", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new DevirtualizePrototypeMethods(compiler);
}
};
@Override
protected CompilerPass getProcessor(final Compiler compiler) {
final SpecializeModule specializeModule = new SpecializeModule(compiler,
devirtualizePrototypeMethods, inlineFunctions,
removeUnusedPrototypeProperties);
return new CompilerPass() {
public void process(Node externs, Node root) {
specializeModule.process(externs, root);
/* Make sure variables are declared before used */
new VarCheck(compiler).process(externs, root);
}
};
}
@Override
public void setUp() throws Exception {
super.setUp();
enableNormalize();
}
public void testSpecializeInline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return 6};" +
"A();",
// m2
"A();" +
"B();" +
"B = function() {return 7};" +
"A();" +
"B();"
);
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"A();" +
"B();" +
"B = function() {return 7};" +
"A();" +
"B();"
});
}
public void testSpecializeCascadedInline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return C()};" +
"var C = function() {return 6};" +
"A();",
// m2
"B = function() {return 7};" +
"A();");
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B, C;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return C()};" + /* Removed from m1, so add to m2 */
"C = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"A();"
});
}
public void testSpecializeInlineWithMultipleDependents() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return 6};" +
"A();",
// m2
"B = function() {return 7};" +
"A();",
// m3
"A();"
);
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"A();",
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"A();",
});
}
public void testSpecializeInlineWithNamespaces() {
JSModule[] modules = createModuleStar(
// m1
"var ns = {};" +
/* Recursion in A() prevents inline of A*/
"ns.A = function() {alert(B());ns.A()};" +
"var B = function() {return 6};" +
"ns.A();",
// m2
"B = function() {return 7};" +
"ns.A();");
test(modules, new String[] {
// m1
"var ns = {};" +
"ns.A = function() {alert(6);ns.A()};" + /* Specialized A */
"ns.A();" +
"var B;",
// m2
"ns.A = function() {alert(B());ns.A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"ns.A();"
});
}
public void testSpecializeInlineWithRegularFunctions() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"A();",
// m2
"B = function() {return 7};" +
"A();");
test(modules, new String[] {
// m1
"function A() {alert(6);A()}" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
/* Start of original m2 */
"B = function() {return 7};" +
"A();"
});
}
public void testDontSpecializeLocalNonAnonymousFunctions() {
/* normalize result, but not expected */
enableNormalize(false);
JSModule[] modules = createModuleStar(
// m1
"(function(){var noSpecialize = " +
"function() {alert(6)};noSpecialize()})()",
// m2
"");
test(modules, new String[] {
// m1
"(function(){var noSpecialize = " +
"function() {alert(6)};noSpecialize()})()",
// m2
""
});
}
public void testAddDummyVarsForRemovedFunctions() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B() + C());A()};" +
"var B = function() {return 6};" +
"var C = function() {return 8};" +
"A();",
// m2
"" +
"A();");
test(modules, new String[] {
// m1
"var A = function() {alert(6 + 8);A()};" + /* Specialized A */
"A();" +
"var B, C;",
// m2
"A = function() {alert(B() + C());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"C = function() {return 8};" + /* Removed from m1, so add to m2 */
"A();"
});
}
public void testSpecializeRemoveUnusedProperties() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"Foo.prototype.c = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"var aliasA = Foo.prototype.a;" +
"var x = new Foo();" +
"x.a();",
// m2
"Foo.prototype.b = function() {return 6};" +
"Foo.prototype.c = function() {return 7};"
});
}
public void testDontSpecializeAliasedFunctions_inline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"var aliasA = A;" +
"A();",
// m2
"B = function() {return 7};" +
"B();");
test(modules, new String[] {
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"var aliasA = A;" +
"A();",
// m2
"B = function() {return 7};" +
"B();"
});
}
public void testDontSpecializeAliasedFunctions_remove_unused_properties() {
JSModule[] modules = createModuleStar(
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"var aliasB = Foo.prototype.b;" +
"Foo.prototype.c = function() {return 7};" +
"Foo.prototype.d = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();" +
"var aliasC = (new Foo).c",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"var aliasB = Foo.prototype.b;" +
"Foo.prototype.c = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();" +
"var aliasC = (new Foo).c",
// m2
"Foo.prototype.d = function() {return 7};"
});
}
public void testSpecializeDevirtualizePrototypeMethods() {
JSModule[] modules = createModuleStar(
// m1
"/** @constructor */" +
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a();return 7};" +
"Foo.prototype.b = function() {this.a()};" +
"var x = new Foo();" +
"x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"var JSCompiler_StaticMethods_a =" +
"function(JSCompiler_StaticMethods_a$self) {" +
"JSCompiler_StaticMethods_a(JSCompiler_StaticMethods_a$self);" +
"return 7" +
"};" +
"var x = new Foo();" +
"JSCompiler_StaticMethods_a(x);",
// m2
"Foo.prototype.a = function() {this.a();return 7};" +
"Foo.prototype.b = function() {this.a()};"
});
}
public void testSpecializeDevirtualizePrototypeMethodsWithInline() {
JSModule[] modules = createModuleStar(
// m1
"/** @constructor */" +
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {return 7};" +
"var x = new Foo();" +
"var z = x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"var x = new Foo();" +
"var z = 7;",
// m2
"Foo.prototype.a = function() {return 7};"
});
}
/**
* Tests for {@link SpecializeModule.SpecializationState}.
*/
public static class SpecializeModuleSpecializationStateTest
extends CompilerTestCase {
Compiler lastCompiler;
SpecializationState lastState;
@Override
public CompilerPass getProcessor(final Compiler compiler) {
lastCompiler = compiler;
return new CompilerPass() {
public void process(Node externs, Node root) {
SimpleDefinitionFinder defFinder =
new SimpleDefinitionFinder(compiler);
defFinder.process(externs, root);
SimpleFunctionAliasAnalysis functionAliasAnalysis =
new SimpleFunctionAliasAnalysis();
functionAliasAnalysis.analyze(defFinder);
lastState = new SpecializationState(functionAliasAnalysis);
}
};
}
public void testRemovedFunctions() {
testSame("function F(){}\nvar G = function(a){};");
assertEquals(ImmutableSet.of(), lastState.getRemovedFunctions());
Node functionF = findFunction("F");
lastState.reportRemovedFunction(functionF, functionF.getParent());
assertEquals(ImmutableSet.of(functionF), lastState.getRemovedFunctions());
Node functionG = findFunction("F");
lastState.reportRemovedFunction(functionG, functionF.getParent());
assertEquals(ImmutableSet.of(functionF, functionG),
lastState.getRemovedFunctions());
assertEquals(ImmutableSet.of(), lastState.getSpecializedFunctions());
}
public void testSpecializedFunctions() {
testSame("function F(){}\nvar G = function(a){};");
assertEquals(ImmutableSet.of(), lastState.getSpecializedFunctions());
Node functionF = findFunction("F");
lastState.reportSpecializedFunction(functionF);
assertEquals(ImmutableSet.of(functionF),
lastState.getSpecializedFunctions());
Node functionG = findFunction("F");
lastState.reportSpecializedFunction(functionG);
assertEquals(ImmutableSet.of(functionF, functionG),
lastState.getSpecializedFunctions());
assertEquals(ImmutableSet.of(), lastState.getRemovedFunctions());
}
public void testCanFixupFunction() {
testSame("function F(){}\n" +
"var G = function(a){};\n" +
"var ns = {};" +
"ns.H = function(){};" +
"var ns2 = {I : function anon1(){}};" +
"(function anon2(){})();");
assertTrue(lastState.canFixupFunction(findFunction("F")));
assertTrue(lastState.canFixupFunction(findFunction("G")));
assertTrue(lastState.canFixupFunction(findFunction("ns.H")));
assertFalse(lastState.canFixupFunction(findFunction("anon1")));
assertFalse(lastState.canFixupFunction(findFunction("anon2")));
// Can't guarantee safe fixup for aliased functions
testSame("function A(){}\n" +
"var aliasA = A;\n");
assertFalse(lastState.canFixupFunction(findFunction("A")));
}
private Node findFunction(String name) {
FunctionFinder f = new FunctionFinder(name);
new NodeTraversal(lastCompiler, f).traverse(lastCompiler.jsRoot);
assertNotNull("Couldn't find " + name, f.found);
return f.found;
}
/**
* Quick Traversal to find a given function in the AST.
*/
private class FunctionFinder extends AbstractPostOrderCallback {
Node found = null;
final String target;
FunctionFinder(String target) {
this.target = target;
}
public void visit(NodeTraversal t, Node n, Node parent) {
if (NodeUtil.isFunction(n)
&& target.equals(NodeUtil.getFunctionName(n))) {
found = n;
}
}
}
}
}
|
Java
|
/*
* Copyright 2015 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ff7.characters
import scalaz.NonEmptyList
sealed trait CharacterAction
object CharacterAction {
val attack: CharacterAction = Attack
val magic: CharacterAction = Magic
// val item: CharacterAction = Item
// val defend: CharacterAction = Defend
val skip: CharacterAction = Skip
// val actions = NonEmptyList(attack, magic, item, defend, skip)
val actions = NonEmptyList(attack, magic, skip)
case object Attack extends CharacterAction
case object Magic extends CharacterAction
// case object Item extends CharacterAction
// case object Defend extends CharacterAction
case object Skip extends CharacterAction
}
|
Java
|
'use strict';
const { extend } = require('underscore');
const dbclient = require('abacus-dbclient');
const { testCollectedUsageID, testResourceID, testOrganizationID, testSpaceID, testConsumerID, testPlanID,
testResourceType, testAccountID, testMeteringPlanID, testRatingPlanID,
testPricingPlanID } = require('./fixtures/usageDocumentFieldsConstants');
const _commonBlueprint = {
collected_usage_id: testCollectedUsageID,
resource_id: testResourceID,
organization_id: testOrganizationID,
space_id: testSpaceID,
consumer_id: testConsumerID,
plan_id: testPlanID,
resource_type: testResourceType,
account_id: testAccountID,
metering_plan_id: testMeteringPlanID,
rating_plan_id: testRatingPlanID,
pricing_plan_id: testPricingPlanID
};
const buildUsage = (...builders) => {
const usage = {};
for(let builder of builders)
builder(usage);
return extend(usage, {
id: dbclient.kturi(usage.resource_instance_id, usage.processed)
});
};
const withEndTimestamp = (timestamp) => (usage) => usage.end = timestamp;
const withStartTimestamp = (timestamp) => (usage) => usage.start = timestamp;
const withProcessedTimestamp = (timestamp) => (usage) => usage.processed = timestamp;
const withBlueprint = (blueprint) => (usage) => extend(usage, blueprint);
const withDefaultBlueprint = () => (usage) => extend(usage, _commonBlueprint);
const withResourceInstanceId = (resourceInstanceId) => (usage) => usage.resource_instance_id = resourceInstanceId;
const withAccumulatedUsage = (accumulatedUsage) => (usage) => usage.accumulated_usage = accumulatedUsage;
const buildAccumulatedUsage = (...builders) => {
const accumulatedUsage = { windows: [[null], [null], [null], [null, null, null, null, null, null], [null, null]] };
for(let builder of builders)
builder(accumulatedUsage);
return accumulatedUsage;
};
const withMetricName = (metricName) => (accumulatedUsage) => accumulatedUsage.metric = metricName;
const withCurrentDayQuantity = (quantity) => (accumulatedUsage) =>
accumulatedUsage.windows[3][0] = { quantity: quantity };
const withPreviousDayQuantity = (quantity) => (accumulatedUsage) =>
accumulatedUsage.windows[3][1] = { quantity: quantity };
const withCurrentMonthQuantity = (quantity) => (accumulatedUsage) =>
accumulatedUsage.windows[4][0] = { quantity: quantity };
module.exports = {
buildUsage, withEndTimestamp, withStartTimestamp, withProcessedTimestamp, withBlueprint, withDefaultBlueprint,
withResourceInstanceId, withAccumulatedUsage, buildAccumulatedUsage, withMetricName, withCurrentDayQuantity,
withCurrentMonthQuantity, withPreviousDayQuantity
};
|
Java
|
package com.rbmhtechnology.eventuate.chaos
import akka.actor.ActorSystem
import akka.actor.Props
import akka.pattern.BackoffSupervisor
import com.rbmhtechnology.eventuate.ReplicationConnection
import com.rbmhtechnology.eventuate.ReplicationEndpoint
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration.DurationInt
trait ChaosSetup extends App {
def getSystem: ActorSystem
def getEndpoint(implicit system: ActorSystem): ReplicationEndpoint
protected def baseConfig(hostname: String) = ConfigFactory.parseString(
s"""
|akka.actor.provider = "akka.remote.RemoteActorRefProvider"
|akka.remote.enabled-transports = ["akka.remote.netty.tcp"]
|akka.remote.netty.tcp.hostname = "$hostname"
|akka.remote.netty.tcp.port = 2552
|akka.test.single-expect-default = 10s
|akka.loglevel = "INFO"
|eventuate.log.write-batch-size = 16
|eventuate.log.read-timeout = 3s
|eventuate.log.retry-delay = 3s
|akka.remote.netty.tcp.maximum-frame-size = 1024000b
""".stripMargin)
protected def quote(str: String) = "\"" + str + "\""
/** starts the actor watched by a `BackoffSupervisor` */
protected def supervised(props: Props, name: String): Props =
BackoffSupervisor.props(props, name, 1.second, 30.seconds, 0.1)
def name = {
if (args == null || args.length < 1) {
Console.err.println("no <nodename> specified")
sys.exit(1)
} else {
args(0)
}
}
def hostname = sys.env.getOrElse("HOSTNAME", s"$name.eventuate-chaos.docker")
// replication connection to other node(s)
def connections = args.drop(1).map { conn =>
conn.split(":") match {
case Array(host, port) =>
ReplicationConnection(host, port.toInt)
case Array(host) =>
ReplicationConnection(host, 2552)
}
}.toSet
}
|
Java
|
package me.tatarka.timesync.lib;
import android.content.Context;
import java.util.Arrays;
/**
* A class for interacting with a {@link TimeSync}. You can get and set it's configuration, and
* force it to sync immediately. Ta get an instance of the class for a given {@link TimeSync}, use
* {@link TimeSync#get(android.content.Context, Class)}.
*/
public final class TimeSyncProxy {
private Context context;
private String name;
private TimeSync listener;
TimeSyncProxy(Context context, String name) {
this.context = context;
this.name = name;
listener = TimeSyncParser.parseListeners(context).get(name);
}
/**
* Syncs immediately. This is useful for a response to a user action. Use this sparingly, as
* frequent syncs defeat the purpose of using this library.
*/
public void sync() {
TimeSyncService.sync(context, name);
}
/**
* Syncs sometime in the near future, randomizing per device. This is useful in response to a
* server message, using GCM for example, so that the server is not overwhelmed with all devices
* trying to sync at once.
*/
public void syncInexact() {
TimeSyncService.syncInexact(context, name);
}
/**
* Gets the current configuration for the {@link TimeSync}.
*
* @return the configuration
* @see TimeSync.Config
*/
public TimeSync.Config config() {
return listener.config();
}
/**
* Modifies the current configuration for the {@link TimeSync}.
*
* @param edits the edits
* @see TimeSync#edit(TimeSync.Edit...)
*/
public void edit(Iterable<TimeSync.Edit> edits) {
listener.edit(edits);
TimeSyncService.update(context, name);
}
/**
* Modifies the current configuration for the {@link TimeSync}.
*
* @param edits the edits
* @see TimeSync#edit(TimeSync.Edit...)
*/
public void edit(TimeSync.Edit... edits) {
edit(Arrays.asList(edits));
}
}
|
Java
|
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package database::oracle::mode::dictionarycacheusage;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use Digest::MD5 qw(md5_hex);
sub custom_hitratio_calc {
my ($self, %options) = @_;
my $delta_total = ($options{new_datas}->{$self->{instance} . '_gets'} - $options{old_datas}->{$self->{instance} . '_gets'});
my $delta_cache = ($options{new_datas}->{$self->{instance} . '_getmisses'} - $options{old_datas}->{$self->{instance} . '_getmisses'});
$self->{result_values}->{hit_ratio} = $delta_total ? (100 * $delta_cache / $delta_total) : 0;
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'global', cb_prefix_output => 'prefix_global_output', type => 0 },
];
$self->{maps_counters}->{global} = [
{ label => 'get-hits', nlabel => 'dictionary.cache.get.hitratio.percentage', set => {
key_values => [ { name => 'getmisses', diff => 1 }, { name => 'gets', diff => 1 } ],
closure_custom_calc => $self->can('custom_hitratio_calc'),
output_template => 'get hit ratio %.2f%%',
output_use => 'hit_ratio', threshold_use => 'hit_ratio',
perfdatas => [
{ label => 'get_hit_ratio', value => 'hit_ratio', template => '%.2f', min => 0, max => 100, unit => '%' },
],
}
},
];
}
sub prefix_global_output {
my ($self, %options) = @_;
return 'SGA dictionary cache ';
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, statefile => 1, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
});
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
my $query = q{
SELECT SUM(gets), SUM(gets-getmisses) FROM v$rowcache
};
$options{sql}->connect();
$options{sql}->query(query => $query);
my @result = $options{sql}->fetchrow_array();
$options{sql}->disconnect();
$self->{global} = {
gets => $result[0],
getmisses => $result[1],
};
$self->{cache_name} = "oracle_" . $self->{mode} . '_' . $options{sql}->get_unique_id4save() . '_' .
(defined($self->{option_results}->{filter_counters}) ? md5_hex($self->{option_results}->{filter_counters}) : md5_hex('all'));
}
1;
__END__
=head1 MODE
Check Oracle dictionary cache usage.
=over 8
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'get-hits'.
=back
=cut
|
Java
|
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::cisco::wlc::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$self->{modes} = {
'ap-channel-interference' => 'centreon::common::airespace::snmp::mode::apchannelinterference',
'ap-channel-noise' => 'centreon::common::airespace::snmp::mode::apchannelnoise',
'ap-status' => 'centreon::common::airespace::snmp::mode::apstatus',
'ap-users' => 'centreon::common::airespace::snmp::mode::apusers',
'cpu' => 'centreon::common::airespace::snmp::mode::cpu',
'discovery' => 'centreon::common::airespace::snmp::mode::discovery',
'hardware' => 'centreon::common::airespace::snmp::mode::hardware',
'interfaces' => 'snmp_standard::mode::interfaces',
'list-groups' => 'centreon::common::airespace::snmp::mode::listgroups',
'list-interfaces' => 'snmp_standard::mode::listinterfaces',
'list-radius-acc-servers' => 'centreon::common::airespace::snmp::mode::listradiusaccservers',
'list-radius-auth-servers' => 'centreon::common::airespace::snmp::mode::listradiusauthservers',
'memory' => 'centreon::common::airespace::snmp::mode::memory',
'radius-acc-servers' => 'centreon::common::airespace::snmp::mode::radiusaccservers',
'radius-auth-servers' => 'centreon::common::airespace::snmp::mode::radiusauthservers'
};
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Cisco Wireless Lan Controller in SNMP.
=cut
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="de">
<head>
<!-- Generated by javadoc (version 1.7.0_17) on Tue May 14 03:45:03 CEST 2013 -->
<title>com.badlogic.gdx.maps (libgdx API)</title>
<meta name="date" content="2013-05-14">
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
</head>
<body>
<h1 class="bar"><a href="../../../../com/badlogic/gdx/maps/package-summary.html" target="classFrame">com.badlogic.gdx.maps</a></h1>
<div class="indexContainer">
<h2 title="Interfaces">Interfaces</h2>
<ul title="Interfaces">
<li><a href="ImageResolver.html" title="interface in com.badlogic.gdx.maps" target="classFrame"><i>ImageResolver</i></a></li>
<li><a href="MapRenderer.html" title="interface in com.badlogic.gdx.maps" target="classFrame"><i>MapRenderer</i></a></li>
</ul>
<h2 title="Classes">Classes</h2>
<ul title="Classes">
<li><a href="ImageResolver.AssetManagerImageResolver.html" title="class in com.badlogic.gdx.maps" target="classFrame">ImageResolver.AssetManagerImageResolver</a></li>
<li><a href="ImageResolver.DirectImageResolver.html" title="class in com.badlogic.gdx.maps" target="classFrame">ImageResolver.DirectImageResolver</a></li>
<li><a href="ImageResolver.TextureAtlasImageResolver.html" title="class in com.badlogic.gdx.maps" target="classFrame">ImageResolver.TextureAtlasImageResolver</a></li>
<li><a href="Map.html" title="class in com.badlogic.gdx.maps" target="classFrame">Map</a></li>
<li><a href="MapLayer.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapLayer</a></li>
<li><a href="MapLayers.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapLayers</a></li>
<li><a href="MapObject.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapObject</a></li>
<li><a href="MapObjects.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapObjects</a></li>
<li><a href="MapProperties.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapProperties</a></li>
</ul>
</div>
</body>
</html>
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_101) on Wed Dec 14 17:18:32 GMT 2016 -->
<title>API Help (ExoPlayer library)</title>
<meta name="date" content="2016-12-14">
<link rel="stylesheet" type="text/css" href="stylesheet.css" title="Style">
<script type="text/javascript" src="script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="API Help (ExoPlayer library)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="overview-summary.html">Overview</a></li>
<li>Package</li>
<li>Class</li>
<li><a href="overview-tree.html">Tree</a></li>
<li><a href="deprecated-list.html">Deprecated</a></li>
<li><a href="index-all.html">Index</a></li>
<li class="navBarCell1Rev">Help</li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="index.html?help-doc.html" target="_top">Frames</a></li>
<li><a href="help-doc.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 class="title">How This API Document Is Organized</h1>
<div class="subTitle">This API (Application Programming Interface) document has pages corresponding to the items in the navigation bar, described as follows.</div>
</div>
<div class="contentContainer">
<ul class="blockList">
<li class="blockList">
<h2>Overview</h2>
<p>The <a href="overview-summary.html">Overview</a> page is the front page of this API document and provides a list of all packages with a summary for each. This page can also contain an overall description of the set of packages.</p>
</li>
<li class="blockList">
<h2>Package</h2>
<p>Each package has a page that contains a list of its classes and interfaces, with a summary for each. This page can contain six categories:</p>
<ul>
<li>Interfaces (italic)</li>
<li>Classes</li>
<li>Enums</li>
<li>Exceptions</li>
<li>Errors</li>
<li>Annotation Types</li>
</ul>
</li>
<li class="blockList">
<h2>Class/Interface</h2>
<p>Each class, interface, nested class and nested interface has its own separate page. Each of these pages has three sections consisting of a class/interface description, summary tables, and detailed member descriptions:</p>
<ul>
<li>Class inheritance diagram</li>
<li>Direct Subclasses</li>
<li>All Known Subinterfaces</li>
<li>All Known Implementing Classes</li>
<li>Class/interface declaration</li>
<li>Class/interface description</li>
</ul>
<ul>
<li>Nested Class Summary</li>
<li>Field Summary</li>
<li>Constructor Summary</li>
<li>Method Summary</li>
</ul>
<ul>
<li>Field Detail</li>
<li>Constructor Detail</li>
<li>Method Detail</li>
</ul>
<p>Each summary entry contains the first sentence from the detailed description for that item. The summary entries are alphabetical, while the detailed descriptions are in the order they appear in the source code. This preserves the logical groupings established by the programmer.</p>
</li>
<li class="blockList">
<h2>Annotation Type</h2>
<p>Each annotation type has its own separate page with the following sections:</p>
<ul>
<li>Annotation Type declaration</li>
<li>Annotation Type description</li>
<li>Required Element Summary</li>
<li>Optional Element Summary</li>
<li>Element Detail</li>
</ul>
</li>
<li class="blockList">
<h2>Enum</h2>
<p>Each enum has its own separate page with the following sections:</p>
<ul>
<li>Enum declaration</li>
<li>Enum description</li>
<li>Enum Constant Summary</li>
<li>Enum Constant Detail</li>
</ul>
</li>
<li class="blockList">
<h2>Tree (Class Hierarchy)</h2>
<p>There is a <a href="overview-tree.html">Class Hierarchy</a> page for all packages, plus a hierarchy for each package. Each hierarchy page contains a list of classes and a list of interfaces. The classes are organized by inheritance structure starting with <code>java.lang.Object</code>. The interfaces do not inherit from <code>java.lang.Object</code>.</p>
<ul>
<li>When viewing the Overview page, clicking on "Tree" displays the hierarchy for all packages.</li>
<li>When viewing a particular package, class or interface page, clicking "Tree" displays the hierarchy for only that package.</li>
</ul>
</li>
<li class="blockList">
<h2>Deprecated API</h2>
<p>The <a href="deprecated-list.html">Deprecated API</a> page lists all of the API that have been deprecated. A deprecated API is not recommended for use, generally due to improvements, and a replacement API is usually given. Deprecated APIs may be removed in future implementations.</p>
</li>
<li class="blockList">
<h2>Index</h2>
<p>The <a href="index-all.html">Index</a> contains an alphabetic list of all classes, interfaces, constructors, methods, and fields.</p>
</li>
<li class="blockList">
<h2>Prev/Next</h2>
<p>These links take you to the next or previous class, interface, package, or related page.</p>
</li>
<li class="blockList">
<h2>Frames/No Frames</h2>
<p>These links show and hide the HTML frames. All pages are available with or without frames.</p>
</li>
<li class="blockList">
<h2>All Classes</h2>
<p>The <a href="allclasses-noframe.html">All Classes</a> link shows all classes and interfaces except non-static nested types.</p>
</li>
<li class="blockList">
<h2>Serialized Form</h2>
<p>Each serializable or externalizable class has a description of its serialization fields and methods. This information is of interest to re-implementors, not to developers using the API. While there is no link in the navigation bar, you can get to this information by going to any serialized class and clicking "Serialized Form" in the "See also" section of the class description.</p>
</li>
<li class="blockList">
<h2>Constant Field Values</h2>
<p>The <a href="constant-values.html">Constant Field Values</a> page lists the static final fields and their values.</p>
</li>
</ul>
<span class="emphasizedPhrase">This help file applies to API documentation generated using the standard doclet.</span></div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="overview-summary.html">Overview</a></li>
<li>Package</li>
<li>Class</li>
<li><a href="overview-tree.html">Tree</a></li>
<li><a href="deprecated-list.html">Deprecated</a></li>
<li><a href="index-all.html">Index</a></li>
<li class="navBarCell1Rev">Help</li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="index.html?help-doc.html" target="_top">Frames</a></li>
<li><a href="help-doc.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
|
Java
|
# Sniper
**Evolution naturelle de:** Ingรฉnieur, Rodeur.
## Rang 1
### Ciblage
> Vous dรฉsignez une cible, lorsque cette cible se dissimule vous pouvez tout de mรชme la repรฉrer avec un test de chance.
### Mitraille
Z: ARM + Ag -2
> Attaque en arc de cercle devant le joueur, frappe tous les ennemis dans l'arc de cercle.
> Utilise 1 projectile par ennemi.
### Immobilisation
AD:TA-[Ag], ARM + Ag + 2, immobilisรฉ si dรฉgats > 0
> Vise la cheville
## Rang 2
### Bou-Portant
CAC:ARM + Fo
> Attaque CAC ร rรฉaliser avec une arme de tir
> L'Ag n'est pas comptรฉe si elle fait partie de ARM.
### Tir en cloche
AD:TA-[Ag, Int], ARM - 1d6 + Ag + Fo
## Rang 3
### Projectile fantome
AD:ARM + Ag - 1d8 - 2, imparable.
### Projection
AD:TA-[Fo, Ag], ARM + Ag + Fo - 1d8
> projette la cible si parade รฉchoue
### Head-Shot
AD:TS-[Ag], ARM + 1d12 + Fo + Ag
> Frappe automatiquement en pleine tรชte.
|
Java
|
package semver
import (
"fmt"
"strings"
"github.com/blang/semver"
"github.com/pivotal-cf/go-pivnet/v7/logger"
)
type SemverConverter struct {
logger logger.Logger
}
func NewSemverConverter(logger logger.Logger) *SemverConverter {
return &SemverConverter{logger}
}
// ToValidSemver attempts to return the input as valid semver.
// If the input fails to parse as semver, it appends .0 or .0.0 to the input and retries
// If this is still not valid semver, it returns an error
func (s SemverConverter) ToValidSemver(input string) (semver.Version, error) {
v, err := semver.Parse(input)
if err == nil {
return v, nil
}
s.logger.Info(fmt.Sprintf(
"failed to parse semver: '%s', appending zeros and trying again",
input,
))
maybeSemver := input
segs := strings.SplitN(maybeSemver, ".", 3)
switch len(segs) {
case 2:
maybeSemver += ".0"
case 1:
maybeSemver += ".0.0"
}
v, err = semver.Parse(maybeSemver)
if err == nil {
return v, nil
}
s.logger.Info(fmt.Sprintf(
"still failed to parse semver: '%s', giving up",
maybeSemver,
))
return semver.Version{}, err
}
|
Java
|
# Default node behavior
The default behavior gets implemented by the intermediate node types such as `SoftwareComponent`. This behavior will look at the standard lifecycle operations.
If all of them are present they are added in the following order into the Dockerfile:
1. `create`
2. `configure`
3. `start`
The scripts and their dependencies for each phase get copied into the Dockerfile,
the corresponding properties get set as environment variable,
and the command gets executed (`create` and `configure`)
The `start` script does not get executed during the building process. It gets copied just like all other scripts.
Environment variables also get set normally, however the script will not get executed using a `RUN` command.
Instead, it will be added to the entrypoint list that is responsible to run the `start` commands once the container gets created.
# Custom node behavior
**NOTE**: All current implementations of the custom node only work this way if they do not feature a custom standard lifecycle.
If they have one, we refer to the default behavior.
## Apache
We assume that Apache always comes with PHP, that is the reason why we use the `library/php:httpd` image.
Furthermore we expect that all child nodes (WebApplications) have a create or configure script that copies the contents to the `/var/www` folder.
These scripts are executed as root user.
## MySQL (including Database)
MySQL defaults to the `library/mysql:latest` image. The predefined properties are taken (such as root password) and set as configuration environment variables.
If a child database contains a `.sql` artifact, the file will be copied in a special directory that is executed when starting the container.
## Java Runtime and Application
The java runtime and application types will use the `library/openjdk` image by default. The jar defined in the JavaApplication node template will be copied into the Dockerfile (including its dependencies).
A `java -jar <JAR_FILE>` command is triggered to launch the application.
|
Java
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
import six
import yaml
from heat.common import config
from heat.common import exception
from heat.common import template_format
from heat.tests.common import HeatTestCase
from heat.tests import utils
class JsonToYamlTest(HeatTestCase):
def setUp(self):
super(JsonToYamlTest, self).setUp()
self.expected_test_count = 2
self.longMessage = True
self.maxDiff = None
def test_convert_all_templates(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates')
template_test_count = 0
for (json_str,
yml_str,
file_name) in self.convert_all_json_to_yaml(path):
self.compare_json_vs_yaml(json_str, yml_str, file_name)
template_test_count += 1
if template_test_count >= self.expected_test_count:
break
self.assertTrue(template_test_count >= self.expected_test_count,
'Expected at least %d templates to be tested, not %d' %
(self.expected_test_count, template_test_count))
def compare_json_vs_yaml(self, json_str, yml_str, file_name):
yml = template_format.parse(yml_str)
self.assertEqual(u'2012-12-12', yml[u'HeatTemplateFormatVersion'],
file_name)
self.assertFalse(u'AWSTemplateFormatVersion' in yml, file_name)
del(yml[u'HeatTemplateFormatVersion'])
jsn = template_format.parse(json_str)
if u'AWSTemplateFormatVersion' in jsn:
del(jsn[u'AWSTemplateFormatVersion'])
self.assertEqual(yml, jsn, file_name)
def convert_all_json_to_yaml(self, dirpath):
for path in os.listdir(dirpath):
if not path.endswith('.template') and not path.endswith('.json'):
continue
f = open(os.path.join(dirpath, path), 'r')
json_str = f.read()
yml_str = template_format.convert_json_to_yaml(json_str)
yield (json_str, yml_str, f.name)
class YamlMinimalTest(HeatTestCase):
def _parse_template(self, tmpl_str, msg_str):
parse_ex = self.assertRaises(ValueError,
template_format.parse,
tmpl_str)
self.assertIn(msg_str, six.text_type(parse_ex))
def test_long_yaml(self):
template = {'HeatTemplateFormatVersion': '2012-12-12'}
config.cfg.CONF.set_override('max_template_size', 1024)
template['Resources'] = ['a'] * (config.cfg.CONF.max_template_size / 3)
limit = config.cfg.CONF.max_template_size
long_yaml = yaml.safe_dump(template)
self.assertTrue(len(long_yaml) > limit)
ex = self.assertRaises(exception.RequestLimitExceeded,
template_format.parse, long_yaml)
msg = ('Request limit exceeded: Template exceeds maximum allowed size '
'(1024 bytes)')
self.assertEqual(msg, six.text_type(ex))
def test_parse_no_version_format(self):
yaml = ''
self._parse_template(yaml, 'Template format version not found')
yaml2 = '''Parameters: {}
Mappings: {}
Resources: {}
Outputs: {}
'''
self._parse_template(yaml2, 'Template format version not found')
def test_parse_string_template(self):
tmpl_str = 'just string'
msg = 'The template is not a JSON object or YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_invalid_yaml_and_json_template(self):
tmpl_str = '{test'
msg = 'line 1, column 1'
self._parse_template(tmpl_str, msg)
def test_parse_json_document(self):
tmpl_str = '["foo" , "bar"]'
msg = 'The template is not a JSON object or YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_empty_json_template(self):
tmpl_str = '{}'
msg = 'Template format version not found'
self._parse_template(tmpl_str, msg)
def test_parse_yaml_template(self):
tmpl_str = 'heat_template_version: 2013-05-23'
expected = {'heat_template_version': '2013-05-23'}
self.assertEqual(expected, template_format.parse(tmpl_str))
class YamlParseExceptions(HeatTestCase):
scenarios = [
('scanner', dict(raised_exception=yaml.scanner.ScannerError())),
('parser', dict(raised_exception=yaml.parser.ParserError())),
('reader',
dict(raised_exception=yaml.reader.ReaderError('', '', '', '', ''))),
]
def test_parse_to_value_exception(self):
text = 'not important'
with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = self.raised_exception
self.assertRaises(ValueError,
template_format.parse, text)
class JsonYamlResolvedCompareTest(HeatTestCase):
def setUp(self):
super(JsonYamlResolvedCompareTest, self).setUp()
self.longMessage = True
self.maxDiff = None
def load_template(self, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates', file_name)
f = open(filepath)
t = template_format.parse(f.read())
f.close()
return t
def compare_stacks(self, json_file, yaml_file, parameters):
t1 = self.load_template(json_file)
t2 = self.load_template(yaml_file)
del(t1[u'AWSTemplateFormatVersion'])
t1[u'HeatTemplateFormatVersion'] = t2[u'HeatTemplateFormatVersion']
stack1 = utils.parse_stack(t1, parameters)
stack2 = utils.parse_stack(t2, parameters)
# compare resources separately so that resolved static data
# is compared
t1nr = dict(stack1.t.t)
del(t1nr['Resources'])
t2nr = dict(stack2.t.t)
del(t2nr['Resources'])
self.assertEqual(t1nr, t2nr)
self.assertEqual(set(stack1.keys()), set(stack2.keys()))
for key in stack1:
self.assertEqual(stack1[key].t, stack2[key].t)
def test_neutron_resolved(self):
self.compare_stacks('Neutron.template', 'Neutron.yaml', {})
def test_wordpress_resolved(self):
self.compare_stacks('WordPress_Single_Instance.template',
'WordPress_Single_Instance.yaml',
{'KeyName': 'test'})
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="fr">
<head>
<!-- Generated by javadoc (version 1.7.0_04) on Fri Mar 15 01:08:46 CET 2013 -->
<title>U-Index</title>
<meta name="date" content="2013-03-15">
<link rel="stylesheet" type="text/css" href="../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="U-Index";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../fr/ups/djapi/package-summary.html">Package</a></li>
<li>Class</li>
<li>Use</li>
<li><a href="../fr/ups/djapi/package-tree.html">Tree</a></li>
<li><a href="../deprecated-list.html">Deprecated</a></li>
<li class="navBarCell1Rev">Index</li>
<li><a href="../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="index-11.html">Prev Letter</a></li>
<li>Next Letter</li>
</ul>
<ul class="navList">
<li><a href="../index.html?index-filesindex-12.html" target="_top">Frames</a></li>
<li><a href="index-12.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="contentContainer"><a href="index-1.html">C</a> <a href="index-2.html">D</a> <a href="index-3.html">E</a> <a href="index-4.html">F</a> <a href="index-5.html">G</a> <a href="index-6.html">I</a> <a href="index-7.html">L</a> <a href="index-8.html">N</a> <a href="index-9.html">P</a> <a href="index-10.html">R</a> <a href="index-11.html">S</a> <a href="index-12.html">U</a> <a name="_U_">
<!-- -->
</a>
<h2 class="title">U</h2>
<dl>
<dt><span class="strong"><a href="../fr/ups/djapi/DJAPIConfiguration.html#url">url</a></span> - Variable in class fr.ups.djapi.<a href="../fr/ups/djapi/DJAPIConfiguration.html" title="class in fr.ups.djapi">DJAPIConfiguration</a></dt>
<dd> </dd>
</dl>
<a href="index-1.html">C</a> <a href="index-2.html">D</a> <a href="index-3.html">E</a> <a href="index-4.html">F</a> <a href="index-5.html">G</a> <a href="index-6.html">I</a> <a href="index-7.html">L</a> <a href="index-8.html">N</a> <a href="index-9.html">P</a> <a href="index-10.html">R</a> <a href="index-11.html">S</a> <a href="index-12.html">U</a> </div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../fr/ups/djapi/package-summary.html">Package</a></li>
<li>Class</li>
<li>Use</li>
<li><a href="../fr/ups/djapi/package-tree.html">Tree</a></li>
<li><a href="../deprecated-list.html">Deprecated</a></li>
<li class="navBarCell1Rev">Index</li>
<li><a href="../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="index-11.html">Prev Letter</a></li>
<li>Next Letter</li>
</ul>
<ul class="navList">
<li><a href="../index.html?index-filesindex-12.html" target="_top">Frames</a></li>
<li><a href="index-12.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
|
Java
|
/**
* jetbrick-template
* http://subchen.github.io/jetbrick-template/
*
* Copyright 2010-2014 Guoqiang Chen. All rights reserved.
* Email: subchen@gmail.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jetbrick.template.resource;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.atomic.AtomicLong;
import jetbrick.template.utils.UnsafeByteArrayInputStream;
/**
* ไปฅๆบ็ ๅฝขๅผๅญๅจ็่ตๆบ.
*
* @since 1.1.3
* @author Guoqiang Chen
*/
public class SourceCodeResource extends Resource {
private static final String ENCODING = "utf-8";
private static AtomicLong index = new AtomicLong();
private final String source;
public SourceCodeResource(String source) {
super("/unknown/file." + index.incrementAndGet(), ENCODING);
this.source = source;
}
@Override
public String getAbsolutePath() {
return "(unknown)";
}
@Override
public long lastModified() {
return 0;
}
@Override
public InputStream getInputStream() throws IOException {
return new UnsafeByteArrayInputStream(source.getBytes(ENCODING));
}
@Override
public char[] getSource() {
return source.toCharArray();
}
@Override
public char[] getSource(String encoding) {
return source.toCharArray();
}
}
|
Java
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
import requests
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests.unit import base as test_base
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
ENV = {
'__actions': {
'std.http': {
'auth': 'librarian:password123',
'timeout': 30,
}
}
}
EXPECTED_ENV_AUTH = ('librarian', 'password123')
WORKFLOW1 = """
---
version: "2.0"
wf1:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books"
publish:
result: <% $ %>
"""
WORKFLOW2 = """
---
version: "2.0"
wf2:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books" timeout=60
publish:
result: <% $ %>
"""
WORKFLOW1_WITH_ITEMS = """
---
version: "2.0"
wf1_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %>
publish:
result: <% $ %>
"""
WORKFLOW2_WITH_ITEMS = """
---
version: "2.0"
wf2_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %> timeout=60
publish:
result: <% $ %>
"""
class ActionDefaultTest(base.EngineTestCase):
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1)
wf_ex = self.engine.start_workflow('wf1', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=ENV['__actions']['std.http']['timeout'])
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2)
wf_ex = self.engine.start_workflow('wf2', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=60
)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf1_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=ENV['__actions']['std.http']['timeout'])
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf2_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=60)
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
|
Java
|
/*
* Copyright 2012 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#import "ZXReader.h"
@class ZXBinaryBitmap, ZXDecodeHints, ZXResult;
/**
* This implementation can detect and decode a MaxiCode in an image.
*/
@interface ZXMaxiCodeReader : NSObject <ZXReader>
@end
|
Java
|
<?php
namespace Bigbank\Gcm;
/**
* Gcm response parser
*/
class Response
{
/**
* Unique ID (number) identifying the multicast message.
*
* @var integer
*/
private $multicastId = null;
/**
* Unique id identifying the single message.
*
* Only have value if single or topic message is sent to google
*
* @var int
*/
private $messageId = null;
/**
* Number of messages that were processed without an error.
*
* @var integer
*/
private $success = null;
/**
* Number of messages that could not be processed.
*
* @var integer
*/
private $failure = null;
/**
* Number of results that contain a canonical registration ID.
*
* @var integer
*/
private $canonicalIds = null;
/**
* Holds single message error
*
* @var string
*/
private $error = null;
/**
* Array of objects representing the status of the messages processed.
* The objects are listed in the same order as the request
* (i.e., for each registration ID in the request, its result is listed in the same index in the response)
* and they can have these fields:
* message_id: String representing the message when it was successfully processed.
* registration_id: If set, means that GCM processed the message but it has another canonical
* registration ID for that device, so sender should replace the IDs on future requests
* (otherwise they might be rejected). This field is never set if there is an error in the request.
* error: String describing an error that occurred while processing the message for that recipient.
* The possible values are the same as documented in the above table, plus "Unavailable"
* (meaning GCM servers were busy and could not process the message for that particular recipient,
* so it could be retried).
*
* @var array
*/
private $results = [];
/**
* @param Message $message
* @param string $responseBody json string of google cloud message server response
*
* @throws Exception
*/
public function __construct(Message $message, $responseBody)
{
$data = \json_decode($responseBody, true);
if ($data === null) {
throw new Exception("Malformed response body. " . $responseBody, Exception::MALFORMED_RESPONSE);
}
if (!$data['error']) {
$this->messageId = (isset($data['message_id'])) ? $data['message_id'] : null;
$this->multicastId = $data['multicast_id'];
$this->failure = $data['failure'];
$this->success = (!$this->multicastId) ? 1 : $data['success'];
$this->canonicalIds = $data['canonical_ids'];
$this->results = [];
$this->parseResults($message, $data);
} else {
$this->error = $data['error'];
$this->messageId = (isset($data['message_id'])) ? $data['message_id'] : null;
$this->failure = (!isset($data['failure'])) ? 1 : $data['failure'];
}
}
/**
* @return int
*/
public function getMulticastId()
{
return $this->multicastId;
}
/**
* @return int|null
*/
public function getMessageId()
{
return $this->messageId;
}
/**
* @return int
*/
public function getSuccessCount()
{
return $this->success;
}
/**
* @return int
*/
public function getFailureCount()
{
return $this->failure;
}
/**
* @return int
*/
public function getNewRegistrationIdsCount()
{
return $this->canonicalIds;
}
/**
* @return array
*/
public function getResults()
{
return $this->results;
}
/**
* @return string
*/
public function getError()
{
return $this->error;
}
/**
* Return an array of expired registration ids linked to new id
* All old registration ids must be updated to new ones in DB
*
* @return array oldRegistrationId => newRegistrationId
*/
public function getNewRegistrationIds()
{
if ($this->getNewRegistrationIdsCount() == 0) {
return [];
}
$filteredResults = array_filter($this->results,
function ($result) {
return isset($result['registration_id']);
});
$data = array_map(function ($result) {
return $result['registration_id'];
}, $filteredResults);
return $data;
}
/**
* Returns an array containing invalid registration ids
* They must be removed from DB because the application was uninstalled from the device.
*
* @return array
*/
public function getInvalidRegistrationIds()
{
if ($this->getFailureCount() == 0) {
return [];
}
$filteredResults = array_filter($this->results,
function ($result) {
return (
isset($result['error'])
&&
(
($result['error'] == "NotRegistered")
||
($result['error'] == "InvalidRegistration")
)
);
});
return array_keys($filteredResults);
}
/**
* Returns an array of registration ids for which you must resend a message,
* cause devices are not available now.
*
* @return array
*/
public function getUnavailableRegistrationIds()
{
if ($this->getFailureCount() == 0) {
return [];
}
$filteredResults = array_filter($this->results,
function ($result) {
return (
isset($result['error'])
&&
($result['error'] == "Unavailable")
);
});
return array_keys($filteredResults);
}
/**
* Parse result array with correct data
*
* @param Message $message
* @param array $response
*/
private function parseResults(Message $message, array $response)
{
if (is_array($message->getRecipients())) {
foreach ($message->getRecipients() as $key => $registrationId) {
$this->results[$registrationId] = $response['results'][$key];
}
} else {
$this->results[$message->getRecipients()] = $response['results'];
}
}
}
|
Java
|
__Description__: If there is a `applyTo: <value>` key/value pair in the `option` object within a `common` and `static` `state` object then said `applyTo` value should be applied to the scooped level at which it's declared in both `<selector>:<state>` and `<selector>:not(:<state>)`
__Notes__
- `applyTo` differs from `appendTo` in that there is a space in between the scooped level value and the `applyTo` value
+ `applyTo` -> `<.level> <applyTo>`
+ `appendTo` -> `<.level><appendTo>`
|
Java
|
package org.jboss.examples.ticketmonster.model;
import static javax.persistence.CascadeType.ALL;
import static javax.persistence.FetchType.EAGER;
import static javax.persistence.GenerationType.IDENTITY;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.ManyToOne;
import javax.persistence.OneToMany;
import org.hibernate.validator.constraints.NotEmpty;
/**
* <p>
* Represents a single venue
* </p>
*
* @author Shane Bryzak
* @author Pete Muir
*/
/*
* We suppress the warning about not specifying a serialVersionUID, as we are still developing this app, and want the JVM to
* generate the serialVersionUID for us. When we put this app into production, we'll generate and embed the serialVersionUID
*/
@SuppressWarnings("serial")
@Entity
public class Venue implements Serializable {
/* Declaration of fields */
/**
* The synthetic id of the object.
*/
@Id
@GeneratedValue(strategy = IDENTITY)
private Long id;
/**
* <p>
* The name of the event.
* </p>
*
* <p>
* The name of the event forms it's natural identity and cannot be shared between events.
* </p>
*
* <p>
* The name must not be null and must be one or more characters, the Bean Validation constrain <code>@NotEmpty</code>
* enforces this.
* </p>
*/
@Column(unique = true)
@NotEmpty
private String name;
/**
* The address of the venue
*/
private Address address = new Address();
/**
* A description of the venue
*/
private String description;
/**
* <p>
* A set of sections in the venue
* </p>
*
* <p>
* The <code>@OneToMany<code> JPA mapping establishes this relationship. TODO Explain EAGER fetch.
* This relationship is bi-directional (a section knows which venue it is part of), and the <code>mappedBy</code>
* attribute establishes this. We cascade all persistence operations to the set of performances, so, for example if a venue
* is removed, then all of it's sections will also be removed.
* </p>
*/
@OneToMany(cascade = ALL, fetch = EAGER, mappedBy = "venue")
private Set<Section> sections = new HashSet<Section>();
/**
* The capacity of the venue
*/
private int capacity;
/**
* An optional media item to entice punters to the venue. The <code>@ManyToOne</code> establishes the relationship.
*/
@ManyToOne
private MediaItem mediaItem;
/* Boilerplate getters and setters */
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
public MediaItem getMediaItem() {
return mediaItem;
}
public void setMediaItem(MediaItem description) {
this.mediaItem = description;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Set<Section> getSections() {
return sections;
}
public void setSections(Set<Section> sections) {
this.sections = sections;
}
public int getCapacity() {
return capacity;
}
public void setCapacity(int capacity) {
this.capacity = capacity;
}
/* toString(), equals() and hashCode() for Venue, using the natural identity of the object */
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
Venue venue = (Venue) o;
if (address != null ? !address.equals(venue.address) : venue.address != null)
return false;
if (name != null ? !name.equals(venue.name) : venue.name != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = name != null ? name.hashCode() : 0;
result = 31 * result + (address != null ? address.hashCode() : 0);
return result;
}
@Override
public String toString() {
return name;
}
}
|
Java
|
# 0.2.1
Handle unspecified data bag items gracefully instead of raising an uncaught exception.
# 0.2.0
Added support for creating mappings per layer in addition to per hostname.
# 0.1.0
Initial release of aws-elb-registration
|
Java
|
# Dermatea pallidula Cooke SPECIES
#### Status
ACCEPTED
#### According to
Index Fungorum
#### Published in
null
#### Original name
Dermatea pallidula Cooke
### Remarks
null
|
Java
|
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.hash;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import junit.framework.TestCase;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
/**
* Tests for the MessageDigestHashFunction.
*
* @author Kurt Alfred Kluever
*/
public class MessageDigestHashFunctionTest extends TestCase {
private static final ImmutableSet<String> INPUTS = ImmutableSet.of("", "Z", "foobar");
// From "How Provider Implementations Are Requested and Supplied" from
// http://docs.oracle.com/javase/6/docs/technotes/guides/security/crypto/CryptoSpec.html
// - Some providers may choose to also include alias names.
// - For example, the "SHA-1" algorithm might be referred to as "SHA1".
// - The algorithm name is not case-sensitive.
private static final ImmutableMap<String, HashFunction> ALGORITHMS =
new ImmutableMap.Builder<String, HashFunction>()
.put("MD5", Hashing.md5())
.put("SHA", Hashing.sha1()) // Not the official name, but still works
.put("SHA1", Hashing.sha1()) // Not the official name, but still works
.put("sHa-1", Hashing.sha1()) // Not the official name, but still works
.put("SHA-1", Hashing.sha1())
.put("SHA-256", Hashing.sha256())
.put("SHA-384", Hashing.sha384())
.put("SHA-512", Hashing.sha512())
.build();
public void testHashing() {
for (String stringToTest : INPUTS) {
for (String algorithmToTest : ALGORITHMS.keySet()) {
assertMessageDigestHashing(HashTestUtils.ascii(stringToTest), algorithmToTest);
}
}
}
public void testPutAfterHash() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
sha1.putInt(42);
fail();
} catch (IllegalStateException expected) {
}
}
public void testHashTwice() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
HashCode unused = sha1.hash();
fail();
} catch (IllegalStateException expected) {
}
}
public void testToString() {
assertEquals("Hashing.md5()", Hashing.md5().toString());
assertEquals("Hashing.sha1()", Hashing.sha1().toString());
assertEquals("Hashing.sha256()", Hashing.sha256().toString());
assertEquals("Hashing.sha512()", Hashing.sha512().toString());
}
private static void assertMessageDigestHashing(byte[] input, String algorithmName) {
try {
MessageDigest digest = MessageDigest.getInstance(algorithmName);
assertEquals(
HashCode.fromBytes(digest.digest(input)),
ALGORITHMS.get(algorithmName).hashBytes(input));
for (int bytes = 4; bytes <= digest.getDigestLength(); bytes++) {
assertEquals(
HashCode.fromBytes(Arrays.copyOf(digest.digest(input), bytes)),
new MessageDigestHashFunction(algorithmName, bytes, algorithmName).hashBytes(input));
}
try {
int maxSize = digest.getDigestLength();
new MessageDigestHashFunction(algorithmName, maxSize + 1, algorithmName);
fail();
} catch (IllegalArgumentException expected) {
}
} catch (NoSuchAlgorithmException nsae) {
throw new AssertionError(nsae);
}
}
}
|
Java
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>OR-Tools</title>
<meta http-equiv="Content-Type" content="text/html;"/>
<meta charset="utf-8"/>
<!--<link rel='stylesheet' type='text/css' href="https://fonts.googleapis.com/css?family=Ubuntu:400,700,400italic"/>-->
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="styleSheet.tmp.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="banner-container">
<div id="banner">
<span id="sfml">Google OR-Tools 9.2</span>
</div>
</div>
<div id="content" style="width: 100%; overflow: hidden;">
<div style="margin-left: 15px; margin-top: 5px; float: left; color: #145A32;">
<h2>Java Reference</h2>
<ul>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1sat.html">CP-SAT</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1graph.html">Graph</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1algorithms.html">Knapsack solver</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1linearsolver.html">Linear solver</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1constraintsolver.html">Routing</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1util.html">Util</a></li>
</ul>
</div>
<div id="content">
<div align="center">
<h1 style="color: #145A32;">Java Reference</h1>
</div>
<!-- Generated by Doxygen 1.9.2 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt MIT */
var searchBox = new SearchBox("searchBox", "search",'Search','.html');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt MIT */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */
</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt MIT */
$(document).ready(function(){initNavTree('interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html',''); initResizable(); });
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="summary">
<a href="#pub-methods">Public Member Functions</a> |
<a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder-members.html">List of all members</a> </div>
<div class="headertitle"><div class="title">IntervalConstraintProtoOrBuilder</div></div>
</div><!--header-->
<div class="contents">
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock">
<p class="definition">Definition at line <a class="el" href="IntervalConstraintProtoOrBuilder_8java_source.html#l00006">6</a> of file <a class="el" href="IntervalConstraintProtoOrBuilder_8java_source.html">IntervalConstraintProtoOrBuilder.java</a>.</p>
</div><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a id="pub-methods" name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr class="memitem:af9618a9e1f1a516f3afe9accf2f68e9e"><td class="memItemLeft" align="right" valign="top">boolean </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#af9618a9e1f1a516f3afe9accf2f68e9e">hasStart</a> ()</td></tr>
<tr class="separator:af9618a9e1f1a516f3afe9accf2f68e9e"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a8471b7bf1bceb8a6b370d0b4f61cc6da"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a8471b7bf1bceb8a6b370d0b4f61cc6da">getStart</a> ()</td></tr>
<tr class="separator:a8471b7bf1bceb8a6b370d0b4f61cc6da"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a5e52f9711ecacca9fc2b3b02f0a524bf"><td class="memItemLeft" align="right" valign="top"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a5e52f9711ecacca9fc2b3b02f0a524bf">getStartOrBuilder</a> ()</td></tr>
<tr class="separator:a5e52f9711ecacca9fc2b3b02f0a524bf"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a9b0197a2b2718c7b0061d19d4b1fbcb4"><td class="memItemLeft" align="right" valign="top">boolean </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a9b0197a2b2718c7b0061d19d4b1fbcb4">hasEnd</a> ()</td></tr>
<tr class="memdesc:a9b0197a2b2718c7b0061d19d4b1fbcb4"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto end = 5;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a9b0197a2b2718c7b0061d19d4b1fbcb4">More...</a><br /></td></tr>
<tr class="separator:a9b0197a2b2718c7b0061d19d4b1fbcb4"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a13b3a6bdbc3183c45d0197e8d7171849"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a13b3a6bdbc3183c45d0197e8d7171849">getEnd</a> ()</td></tr>
<tr class="memdesc:a13b3a6bdbc3183c45d0197e8d7171849"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto end = 5;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a13b3a6bdbc3183c45d0197e8d7171849">More...</a><br /></td></tr>
<tr class="separator:a13b3a6bdbc3183c45d0197e8d7171849"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:aac9907139f4212fc3afeb8db5d2c6645"><td class="memItemLeft" align="right" valign="top"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#aac9907139f4212fc3afeb8db5d2c6645">getEndOrBuilder</a> ()</td></tr>
<tr class="memdesc:aac9907139f4212fc3afeb8db5d2c6645"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto end = 5;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#aac9907139f4212fc3afeb8db5d2c6645">More...</a><br /></td></tr>
<tr class="separator:aac9907139f4212fc3afeb8db5d2c6645"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a3ad38ce6c081e909851785725d3c4f8a"><td class="memItemLeft" align="right" valign="top">boolean </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a3ad38ce6c081e909851785725d3c4f8a">hasSize</a> ()</td></tr>
<tr class="memdesc:a3ad38ce6c081e909851785725d3c4f8a"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto size = 6;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a3ad38ce6c081e909851785725d3c4f8a">More...</a><br /></td></tr>
<tr class="separator:a3ad38ce6c081e909851785725d3c4f8a"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:aa15366d92d2522f2c4bbb87ccbda5047"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#aa15366d92d2522f2c4bbb87ccbda5047">getSize</a> ()</td></tr>
<tr class="memdesc:aa15366d92d2522f2c4bbb87ccbda5047"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto size = 6;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#aa15366d92d2522f2c4bbb87ccbda5047">More...</a><br /></td></tr>
<tr class="separator:aa15366d92d2522f2c4bbb87ccbda5047"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a237a9bec82dc82d4048ff2ab810601e2"><td class="memItemLeft" align="right" valign="top"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a237a9bec82dc82d4048ff2ab810601e2">getSizeOrBuilder</a> ()</td></tr>
<tr class="memdesc:a237a9bec82dc82d4048ff2ab810601e2"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto size = 6;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a237a9bec82dc82d4048ff2ab810601e2">More...</a><br /></td></tr>
<tr class="separator:a237a9bec82dc82d4048ff2ab810601e2"><td class="memSeparator" colspan="2"> </td></tr>
</table>
<h2 class="groupheader">Member Function Documentation</h2>
<a id="a13b3a6bdbc3183c45d0197e8d7171849" name="a13b3a6bdbc3183c45d0197e8d7171849"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a13b3a6bdbc3183c45d0197e8d7171849">◆ </a></span>getEnd()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> getEnd </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto end = 5;</code> </p>
<dl class="section return"><dt>Returns</dt><dd>The end. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#ac9171ca504d921151aeb477411c3b87d">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a13b3a6bdbc3183c45d0197e8d7171849">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="aac9907139f4212fc3afeb8db5d2c6645" name="aac9907139f4212fc3afeb8db5d2c6645"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aac9907139f4212fc3afeb8db5d2c6645">◆ </a></span>getEndOrBuilder()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> getEndOrBuilder </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto end = 5;</code> </p>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#ab7d75ba562819ebaf4f3174a34bae7c1">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#aac9907139f4212fc3afeb8db5d2c6645">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="aa15366d92d2522f2c4bbb87ccbda5047" name="aa15366d92d2522f2c4bbb87ccbda5047"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa15366d92d2522f2c4bbb87ccbda5047">◆ </a></span>getSize()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> getSize </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto size = 6;</code> </p>
<dl class="section return"><dt>Returns</dt><dd>The size. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#aaf089b475af5c0506025e946bb3cb054">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#aa15366d92d2522f2c4bbb87ccbda5047">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a237a9bec82dc82d4048ff2ab810601e2" name="a237a9bec82dc82d4048ff2ab810601e2"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a237a9bec82dc82d4048ff2ab810601e2">◆ </a></span>getSizeOrBuilder()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> getSizeOrBuilder </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto size = 6;</code> </p>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#aa3cd3b64451c6eb1510d64b4802d78e3">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a237a9bec82dc82d4048ff2ab810601e2">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a8471b7bf1bceb8a6b370d0b4f61cc6da" name="a8471b7bf1bceb8a6b370d0b4f61cc6da"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a8471b7bf1bceb8a6b370d0b4f61cc6da">◆ </a></span>getStart()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> getStart </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<pre>
IMPORTANT: For now, this constraint do not enforce any relations on the
view, and a linear constraint must be added together with this to enforce
enforcement => start + size == end. An enforcement => size >=0 might also
be needed.
IMPORTANT: For now, we just support affine relation. We could easily
create an intermediate variable to support full linear expression, but this
isn't done currently.
</pre><p ><code>.operations_research.sat.LinearExpressionProto start = 4;</code> </p><dl class="section return"><dt>Returns</dt><dd>The start. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#a2c4b3e0b0fbe2599af27edb00d47b759">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a8471b7bf1bceb8a6b370d0b4f61cc6da">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a5e52f9711ecacca9fc2b3b02f0a524bf" name="a5e52f9711ecacca9fc2b3b02f0a524bf"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a5e52f9711ecacca9fc2b3b02f0a524bf">◆ </a></span>getStartOrBuilder()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> getStartOrBuilder </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<pre>
IMPORTANT: For now, this constraint do not enforce any relations on the
view, and a linear constraint must be added together with this to enforce
enforcement => start + size == end. An enforcement => size >=0 might also
be needed.
IMPORTANT: For now, we just support affine relation. We could easily
create an intermediate variable to support full linear expression, but this
isn't done currently.
</pre><p ><code>.operations_research.sat.LinearExpressionProto start = 4;</code> </p>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#a3e71ab24003723fe61b18d77f826c001">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a5e52f9711ecacca9fc2b3b02f0a524bf">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a9b0197a2b2718c7b0061d19d4b1fbcb4" name="a9b0197a2b2718c7b0061d19d4b1fbcb4"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a9b0197a2b2718c7b0061d19d4b1fbcb4">◆ </a></span>hasEnd()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">boolean hasEnd </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto end = 5;</code> </p>
<dl class="section return"><dt>Returns</dt><dd>Whether the end field is set. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#adb53e4a8cf21af1718b697ba52ee1a15">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a9b0197a2b2718c7b0061d19d4b1fbcb4">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a3ad38ce6c081e909851785725d3c4f8a" name="a3ad38ce6c081e909851785725d3c4f8a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a3ad38ce6c081e909851785725d3c4f8a">◆ </a></span>hasSize()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">boolean hasSize </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto size = 6;</code> </p>
<dl class="section return"><dt>Returns</dt><dd>Whether the size field is set. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#aae9643420ff88cb4c38c8e9181dd35ac">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a3ad38ce6c081e909851785725d3c4f8a">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="af9618a9e1f1a516f3afe9accf2f68e9e" name="af9618a9e1f1a516f3afe9accf2f68e9e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#af9618a9e1f1a516f3afe9accf2f68e9e">◆ </a></span>hasStart()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">boolean hasStart </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<pre>
IMPORTANT: For now, this constraint do not enforce any relations on the
view, and a linear constraint must be added together with this to enforce
enforcement => start + size == end. An enforcement => size >=0 might also
be needed.
IMPORTANT: For now, we just support affine relation. We could easily
create an intermediate variable to support full linear expression, but this
isn't done currently.
</pre><p ><code>.operations_research.sat.LinearExpressionProto start = 4;</code> </p><dl class="section return"><dt>Returns</dt><dd>Whether the start field is set. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#af42348e54b4d3cb22d8020f260aa886c">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#af9618a9e1f1a516f3afe9accf2f68e9e">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<hr/>The documentation for this interface was generated from the following file:<ul>
<li><a class="el" href="IntervalConstraintProtoOrBuilder_8java_source.html">IntervalConstraintProtoOrBuilder.java</a></li>
</ul>
</div><!-- contents -->
</div><!-- doc-content -->
</div>
</div>
<div id="footer-container">
<div id="footer">
</div>
</div>
</body>
</html>
|
Java
|
import re
import unicodedata
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Union
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models.query import QuerySet
from django.forms.models import model_to_dict
from django.utils.translation import gettext as _
from typing_extensions import TypedDict
from zulip_bots.custom_exceptions import ConfigValidationError
from zerver.lib.avatar import avatar_url, get_avatar_field
from zerver.lib.cache import (
bulk_cached_fetch,
realm_user_dict_fields,
user_profile_by_id_cache_key,
user_profile_cache_key_id,
)
from zerver.lib.exceptions import OrganizationAdministratorRequired
from zerver.lib.request import JsonableError
from zerver.lib.timezone import canonicalize_timezone
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
Realm,
Service,
UserProfile,
get_realm_user_dicts,
get_user_profile_by_id_in_realm,
)
def check_full_name(full_name_raw: str) -> str:
full_name = full_name_raw.strip()
if len(full_name) > UserProfile.MAX_NAME_LENGTH:
raise JsonableError(_("Name too long!"))
if len(full_name) < UserProfile.MIN_NAME_LENGTH:
raise JsonableError(_("Name too short!"))
for character in full_name:
if unicodedata.category(character)[0] == "C" or character in UserProfile.NAME_INVALID_CHARS:
raise JsonableError(_("Invalid characters in name!"))
# Names ending with e.g. `|15` could be ambiguous for
# sloppily-written parsers of our Markdown syntax for mentioning
# users with ambiguous names, and likely have no real use, so we
# ban them.
if re.search(r"\|\d+$", full_name_raw):
raise JsonableError(_("Invalid format!"))
return full_name
# NOTE: We don't try to absolutely prevent 2 bots from having the same
# name (e.g. you can get there by reactivating a deactivated bot after
# making a new bot with the same name). This is just a check designed
# to make it unlikely to happen by accident.
def check_bot_name_available(realm_id: int, full_name: str) -> None:
dup_exists = UserProfile.objects.filter(
realm_id=realm_id,
full_name=full_name.strip(),
is_active=True,
).exists()
if dup_exists:
raise JsonableError(_("Name is already in use!"))
def check_short_name(short_name_raw: str) -> str:
short_name = short_name_raw.strip()
if len(short_name) == 0:
raise JsonableError(_("Bad name or username"))
return short_name
def check_valid_bot_config(bot_type: int, service_name: str, config_data: Dict[str, str]) -> None:
if bot_type == UserProfile.INCOMING_WEBHOOK_BOT:
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
config_options = None
for integration in WEBHOOK_INTEGRATIONS:
if integration.name == service_name:
# key: validator
config_options = {c[1]: c[2] for c in integration.config_options}
break
if not config_options:
raise JsonableError(_("Invalid integration '{}'.").format(service_name))
missing_keys = set(config_options.keys()) - set(config_data.keys())
if missing_keys:
raise JsonableError(
_("Missing configuration parameters: {}").format(
missing_keys,
)
)
for key, validator in config_options.items():
value = config_data[key]
error = validator(key, value)
if error:
raise JsonableError(_("Invalid {} value {} ({})").format(key, value, error))
elif bot_type == UserProfile.EMBEDDED_BOT:
try:
from zerver.lib.bot_lib import get_bot_handler
bot_handler = get_bot_handler(service_name)
if hasattr(bot_handler, "validate_config"):
bot_handler.validate_config(config_data)
except ConfigValidationError:
# The exception provides a specific error message, but that
# message is not tagged translatable, because it is
# triggered in the external zulip_bots package.
# TODO: Think of some clever way to provide a more specific
# error message.
raise JsonableError(_("Invalid configuration data!"))
# Adds an outgoing webhook or embedded bot service.
def add_service(
name: str,
user_profile: UserProfile,
base_url: Optional[str] = None,
interface: Optional[int] = None,
token: Optional[str] = None,
) -> None:
Service.objects.create(
name=name, user_profile=user_profile, base_url=base_url, interface=interface, token=token
)
def check_bot_creation_policy(user_profile: UserProfile, bot_type: int) -> None:
# Realm administrators can always add bot
if user_profile.is_realm_admin:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_EVERYONE:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_ADMINS_ONLY:
raise OrganizationAdministratorRequired()
if (
user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
and bot_type == UserProfile.DEFAULT_BOT
):
raise OrganizationAdministratorRequired()
def check_valid_bot_type(user_profile: UserProfile, bot_type: int) -> None:
if bot_type not in user_profile.allowed_bot_types:
raise JsonableError(_("Invalid bot type"))
def check_valid_interface_type(interface_type: Optional[int]) -> None:
if interface_type not in Service.ALLOWED_INTERFACE_TYPES:
raise JsonableError(_("Invalid interface type"))
def is_administrator_role(role: int) -> bool:
return role in {UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_REALM_OWNER}
def bulk_get_users(
emails: List[str], realm: Optional[Realm], base_query: "QuerySet[UserProfile]" = None
) -> Dict[str, UserProfile]:
if base_query is None:
assert realm is not None
query = UserProfile.objects.filter(realm=realm, is_active=True)
realm_id = realm.id
else:
# WARNING: Currently, this code path only really supports one
# version of `base_query` being used (because otherwise,
# they'll share the cache, which can screw up the filtering).
# If you're using this flow, you'll need to re-do any filters
# in base_query in the code itself; base_query is just a perf
# optimization.
query = base_query
realm_id = 0
def fetch_users_by_email(emails: List[str]) -> List[UserProfile]:
# This should be just
#
# UserProfile.objects.select_related("realm").filter(email__iexact__in=emails,
# realm=realm)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = "upper(zerver_userprofile.email::text) IN (SELECT upper(email) FROM unnest(%s) AS email)"
return query.select_related("realm").extra(where=[where_clause], params=(emails,))
def user_to_email(user_profile: UserProfile) -> str:
return user_profile.email.lower()
return bulk_cached_fetch(
# Use a separate cache key to protect us from conflicts with
# the get_user cache.
lambda email: "bulk_get_users:" + user_profile_cache_key_id(email, realm_id),
fetch_users_by_email,
[email.lower() for email in emails],
id_fetcher=user_to_email,
)
def get_user_id(user: UserProfile) -> int:
return user.id
def user_ids_to_users(user_ids: Sequence[int], realm: Realm) -> List[UserProfile]:
# TODO: Consider adding a flag to control whether deactivated
# users should be included.
def fetch_users_by_id(user_ids: List[int]) -> List[UserProfile]:
return list(UserProfile.objects.filter(id__in=user_ids).select_related())
user_profiles_by_id: Dict[int, UserProfile] = bulk_cached_fetch(
cache_key_function=user_profile_by_id_cache_key,
query_function=fetch_users_by_id,
object_ids=user_ids,
id_fetcher=get_user_id,
)
found_user_ids = user_profiles_by_id.keys()
missed_user_ids = [user_id for user_id in user_ids if user_id not in found_user_ids]
if missed_user_ids:
raise JsonableError(_("Invalid user ID: {}").format(missed_user_ids[0]))
user_profiles = list(user_profiles_by_id.values())
for user_profile in user_profiles:
if user_profile.realm != realm:
raise JsonableError(_("Invalid user ID: {}").format(user_profile.id))
return user_profiles
def access_bot_by_id(user_profile: UserProfile, user_id: int) -> UserProfile:
try:
target = get_user_profile_by_id_in_realm(user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such bot"))
if not target.is_bot:
raise JsonableError(_("No such bot"))
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
def access_user_by_id(
user_profile: UserProfile,
target_user_id: int,
*,
allow_deactivated: bool = False,
allow_bots: bool = False,
for_admin: bool,
) -> UserProfile:
"""Master function for accessing another user by ID in API code;
verifies the user ID is in the same realm, and if requested checks
for administrative privileges, with flags for various special
cases.
"""
try:
target = get_user_profile_by_id_in_realm(target_user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such user"))
if target.is_bot and not allow_bots:
raise JsonableError(_("No such user"))
if not target.is_active and not allow_deactivated:
raise JsonableError(_("User is deactivated"))
if not for_admin:
# Administrative access is not required just to read a user.
return target
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
class Accounts(TypedDict):
realm_name: str
realm_id: int
full_name: str
avatar: Optional[str]
def get_accounts_for_email(email: str) -> List[Accounts]:
profiles = (
UserProfile.objects.select_related("realm")
.filter(
delivery_email__iexact=email.strip(),
is_active=True,
realm__deactivated=False,
is_bot=False,
)
.order_by("date_joined")
)
accounts: List[Accounts] = []
for profile in profiles:
accounts.append(
dict(
realm_name=profile.realm.name,
realm_id=profile.realm.id,
full_name=profile.full_name,
avatar=avatar_url(profile),
)
)
return accounts
def get_api_key(user_profile: UserProfile) -> str:
return user_profile.api_key
def get_all_api_keys(user_profile: UserProfile) -> List[str]:
# Users can only have one API key for now
return [user_profile.api_key]
def validate_user_custom_profile_field(
realm_id: int, field: CustomProfileField, value: Union[int, str, List[int]]
) -> Union[int, str, List[int]]:
validators = CustomProfileField.FIELD_VALIDATORS
field_type = field.field_type
var_name = f"{field.name}"
if field_type in validators:
validator = validators[field_type]
return validator(var_name, value)
elif field_type == CustomProfileField.SELECT:
choice_field_validator = CustomProfileField.SELECT_FIELD_VALIDATORS[field_type]
field_data = field.field_data
# Put an assertion so that mypy doesn't complain.
assert field_data is not None
return choice_field_validator(var_name, field_data, value)
elif field_type == CustomProfileField.USER:
user_field_validator = CustomProfileField.USER_FIELD_VALIDATORS[field_type]
return user_field_validator(realm_id, value, False)
else:
raise AssertionError("Invalid field type")
def validate_user_custom_profile_data(
realm_id: int, profile_data: List[Dict[str, Union[int, str, List[int]]]]
) -> None:
# This function validate all custom field values according to their field type.
for item in profile_data:
field_id = item["id"]
try:
field = CustomProfileField.objects.get(id=field_id)
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
try:
validate_user_custom_profile_field(realm_id, field, item["value"])
except ValidationError as error:
raise JsonableError(error.message)
def can_access_delivery_email(user_profile: UserProfile) -> bool:
realm = user_profile.realm
if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS:
return user_profile.is_realm_admin
if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_MODERATORS:
return user_profile.is_realm_admin or user_profile.is_moderator
return False
def format_user_row(
realm: Realm,
acting_user: Optional[UserProfile],
row: Dict[str, Any],
client_gravatar: bool,
user_avatar_url_field_optional: bool,
custom_profile_field_data: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Formats a user row returned by a database fetch using
.values(*realm_user_dict_fields) into a dictionary representation
of that user for API delivery to clients. The acting_user
argument is used for permissions checks.
"""
is_admin = is_administrator_role(row["role"])
is_owner = row["role"] == UserProfile.ROLE_REALM_OWNER
is_guest = row["role"] == UserProfile.ROLE_GUEST
is_bot = row["is_bot"]
result = dict(
email=row["email"],
user_id=row["id"],
avatar_version=row["avatar_version"],
is_admin=is_admin,
is_owner=is_owner,
is_guest=is_guest,
is_billing_admin=row["is_billing_admin"],
role=row["role"],
is_bot=is_bot,
full_name=row["full_name"],
timezone=canonicalize_timezone(row["timezone"]),
is_active=row["is_active"],
date_joined=row["date_joined"].isoformat(),
)
# Zulip clients that support using `GET /avatar/{user_id}` as a
# fallback if we didn't send an avatar URL in the user object pass
# user_avatar_url_field_optional in client_capabilities.
#
# This is a major network performance optimization for
# organizations with 10,000s of users where we would otherwise
# send avatar URLs in the payload (either because most users have
# uploaded avatars or because EMAIL_ADDRESS_VISIBILITY_ADMINS
# prevents the older client_gravatar optimization from helping).
# The performance impact is large largely because the hashes in
# avatar URLs structurally cannot compress well.
#
# The user_avatar_url_field_optional gives the server sole
# discretion in deciding for which users we want to send the
# avatar URL (Which saves clients an RTT at the cost of some
# bandwidth). At present, the server looks at `long_term_idle` to
# decide which users to include avatars for, piggy-backing on a
# different optimization for organizations with 10,000s of users.
include_avatar_url = not user_avatar_url_field_optional or not row["long_term_idle"]
if include_avatar_url:
result["avatar_url"] = get_avatar_field(
user_id=row["id"],
realm_id=realm.id,
email=row["delivery_email"],
avatar_source=row["avatar_source"],
avatar_version=row["avatar_version"],
medium=False,
client_gravatar=client_gravatar,
)
if acting_user is not None and can_access_delivery_email(acting_user):
result["delivery_email"] = row["delivery_email"]
if is_bot:
result["bot_type"] = row["bot_type"]
if row["email"] in settings.CROSS_REALM_BOT_EMAILS:
result["is_cross_realm_bot"] = True
# Note that bot_owner_id can be None with legacy data.
result["bot_owner_id"] = row["bot_owner_id"]
elif custom_profile_field_data is not None:
result["profile_data"] = custom_profile_field_data
return result
def user_profile_to_user_row(user_profile: UserProfile) -> Dict[str, Any]:
# What we're trying to do is simulate the user_profile having been
# fetched from a QuerySet using `.values(*realm_user_dict_fields)`
# even though we fetched UserProfile objects. This is messier
# than it seems.
#
# What we'd like to do is just call model_to_dict(user,
# fields=realm_user_dict_fields). The problem with this is
# that model_to_dict has a different convention than
# `.values()` in its handling of foreign keys, naming them as
# e.g. `bot_owner`, not `bot_owner_id`; we work around that
# here.
#
# This could be potentially simplified in the future by
# changing realm_user_dict_fields to name the bot owner with
# the less readable `bot_owner` (instead of `bot_owner_id`).
user_row = model_to_dict(user_profile, fields=[*realm_user_dict_fields, "bot_owner"])
user_row["bot_owner_id"] = user_row["bot_owner"]
del user_row["bot_owner"]
return user_row
def get_cross_realm_dicts() -> List[Dict[str, Any]]:
users = bulk_get_users(
list(settings.CROSS_REALM_BOT_EMAILS),
None,
base_query=UserProfile.objects.filter(realm__string_id=settings.SYSTEM_BOT_REALM),
).values()
result = []
for user in users:
# Important: We filter here, is addition to in
# `base_query`, because of how bulk_get_users shares its
# cache with other UserProfile caches.
if user.realm.string_id != settings.SYSTEM_BOT_REALM: # nocoverage
continue
user_row = user_profile_to_user_row(user)
# Because we want to avoid clients becing exposed to the
# implementation detail that these bots are self-owned, we
# just set bot_owner_id=None.
user_row["bot_owner_id"] = None
result.append(
format_user_row(
user.realm,
acting_user=user,
row=user_row,
client_gravatar=False,
user_avatar_url_field_optional=False,
custom_profile_field_data=None,
)
)
return result
def get_custom_profile_field_values(
custom_profile_field_values: List[CustomProfileFieldValue],
) -> Dict[int, Dict[str, Any]]:
profiles_by_user_id: Dict[int, Dict[str, Any]] = defaultdict(dict)
for profile_field in custom_profile_field_values:
user_id = profile_field.user_profile_id
if profile_field.field.is_renderable():
profiles_by_user_id[user_id][str(profile_field.field_id)] = {
"value": profile_field.value,
"rendered_value": profile_field.rendered_value,
}
else:
profiles_by_user_id[user_id][str(profile_field.field_id)] = {
"value": profile_field.value,
}
return profiles_by_user_id
def get_raw_user_data(
realm: Realm,
acting_user: Optional[UserProfile],
*,
target_user: Optional[UserProfile] = None,
client_gravatar: bool,
user_avatar_url_field_optional: bool,
include_custom_profile_fields: bool = True,
) -> Dict[int, Dict[str, str]]:
"""Fetches data about the target user(s) appropriate for sending to
acting_user via the standard format for the Zulip API. If
target_user is None, we fetch all users in the realm.
"""
profiles_by_user_id = None
custom_profile_field_data = None
# target_user is an optional parameter which is passed when user data of a specific user
# is required. It is 'None' otherwise.
if target_user is not None:
user_dicts = [user_profile_to_user_row(target_user)]
else:
user_dicts = get_realm_user_dicts(realm.id)
if include_custom_profile_fields:
base_query = CustomProfileFieldValue.objects.select_related("field")
# TODO: Consider optimizing this query away with caching.
if target_user is not None:
custom_profile_field_values = base_query.filter(user_profile=target_user)
else:
custom_profile_field_values = base_query.filter(field__realm_id=realm.id)
profiles_by_user_id = get_custom_profile_field_values(custom_profile_field_values)
result = {}
for row in user_dicts:
if profiles_by_user_id is not None:
custom_profile_field_data = profiles_by_user_id.get(row["id"], {})
result[row["id"]] = format_user_row(
realm,
acting_user=acting_user,
row=row,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=user_avatar_url_field_optional,
custom_profile_field_data=custom_profile_field_data,
)
return result
|
Java
|
<!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Test Page for sap.m.CheckBox</title>
<script src="../shared-config.js"></script>
<script id="sap-ui-bootstrap" data-sap-ui-noConflict="true"
data-sap-ui-libs="sap.m" src="../../../../resources/sap-ui-core.js">
</script>
<link rel="stylesheet" href="../../../../resources/sap/ui/thirdparty/qunit.css" type="text/css" media="screen">
<script src="../../../../resources/sap/ui/thirdparty/qunit.js"></script>
<script src="../../../../resources/sap/ui/qunit/qunit-junit.js"></script>
<script src="../../../../resources/sap/ui/qunit/QUnitUtils.js"></script>
<script src="../../../../resources/sap/ui/thirdparty/sinon.js"></script>
<script src="../../../../resources/sap/ui/thirdparty/sinon-qunit.js"></script>
<script>
jQuery.sap.require("sap.m.CheckBox");
jQuery.sap.require("sap.ui.core.ValueState");
QUnit.module("Properties");
/* --------------------------------------- */
/* Test: Default Values */
/* --------------------------------------- */
QUnit.test("Default Values", function(assert) {
var bEnabled = true;
var bEditable = true;
var bVisible = true;
var bSelected = false;
var sName = "";
var sText = "";
var sTextDirection = sap.ui.core.TextDirection.Inherit;
var sWidth = "";
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oCheckBox.getEnabled(), bEnabled, "Property 'enabled': Default value should be '" + bEnabled + "'");
assert.strictEqual(oCheckBox.getEditable(), bEditable, "Property 'editable': Default value should be '" + bEditable + "'");
assert.strictEqual(oCheckBox.getVisible(), bVisible, "Property 'visible': Default value should be '" + bVisible + "'");
assert.strictEqual(oCheckBox.getSelected(), bSelected, "Property 'selected': Default value should be '" + bSelected + "'");
assert.strictEqual(oCheckBox.getName(), sName, "Property 'name': Default value should be '" + sName + "'");
assert.strictEqual(oCheckBox.getText(), sText, "Property 'text': Default value should be '" + sText + "'");
assert.strictEqual(oCheckBox.getTextDirection(), sTextDirection, "Property 'textDirection': Default value should be '" + sTextDirection + "'");
assert.strictEqual(oCheckBox.getWidth(), sWidth, "Property 'width': Default value should be '" + sWidth + "'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'visible=true' */
/* ----------------------------------------------- */
QUnit.test("'visible=true'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({visible: true});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.getDomRef(), "visible=true: CheckBox should have been rendered");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'visible=false' */
/* ----------------------------------------------- */
QUnit.test("'visible=false'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({visible: false});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(!oCheckBox.getDomRef(), "visible=false: CheckBox should not have been rendered");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'enabled=true' */
/* ----------------------------------------------- */
QUnit.test("'enabled=true'", function(assert) {
// system under test
var bEnabled = true;
var oCheckBox = new sap.m.CheckBox({enabled: bEnabled});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(!oCheckBox.$("CbBg").hasClass("sapMCbBgDis"), "enabled=" + bEnabled + ": CheckBox should not have class sapMCbBgDis");
assert.strictEqual(oCheckBox.$("CB").attr("disabled"), undefined, "enabled=" + bEnabled + ": CheckBox should not have attribute 'disabled'");
var iTabindex = oCheckBox.getTabIndex();
assert.strictEqual(oCheckBox.$().attr("tabindex"), iTabindex.toString() , "enabled=" + bEnabled + ": CheckBox should have attribute 'tabindex=" + iTabindex +"'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'enabled=false' */
/* ----------------------------------------------- */
QUnit.test("'enabled=false'", function(assert) {
// system under test
var bEnabled = false;
var oCheckBox = new sap.m.CheckBox({enabled: bEnabled});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(!oCheckBox.$().hasClass("sapMPointer"), "enabled=" + bEnabled + ": CheckBox should not have class sapMPointer");
assert.ok(oCheckBox.$().hasClass("sapMCbBgDis"), "enabled=" + bEnabled + ": CheckBox should have class sapMCbBgDis");
assert.strictEqual(oCheckBox.$("CB").attr("disabled"), "disabled", "enabled=" + bEnabled + ": CheckBox should have attribute 'disabled=disabled'");
assert.strictEqual(oCheckBox.$().attr("aria-disabled"), "true", "Property 'aria-disabled' should be 'true'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'editable=false' */
/* ----------------------------------------------- */
QUnit.test("'editable=false'", function(assert) {
// system under test
var bEditable = false;
var oCheckBox = new sap.m.CheckBox({editable: bEditable});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oCheckBox.getTabIndex(), 0 , "'getTabindex' should return 0");
assert.equal(oCheckBox.$().hasClass("sapMCbRo"), true, ": The CheckBox should have class sapMCbRo");
assert.strictEqual(oCheckBox.$("CB").attr("readonly"), "readonly", "The Checkbox should have attribute 'readonly=readonly'");
assert.strictEqual(oCheckBox.$().attr("aria-readonly"), "true", "Property 'aria-readonly' should be 'true'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'selected=true' */
/* ----------------------------------------------- */
QUnit.test("'selected=true'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({selected: true});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.$("CbBg").hasClass("sapMCbMarkChecked"), "selected=true: CheckBox should have class sapMCbMarkChecked");
assert.ok(oCheckBox.$("CB").is(":checked"), "selected=false: CheckBox should have attribute 'checked'");
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "true", "Property 'aria-checked': Default value should be 'true'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'selected=false' */
/* ----------------------------------------------- */
QUnit.test("'selected=false'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({selected: false});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(!oCheckBox.$("CbBg").hasClass("sapMCbMarkChecked"), "selected=false: CheckBox should not have class sapMCbMarkChecked");
assert.ok(!oCheckBox.$("CB").is(":checked"), "selected=false: CheckBox should not have attribute 'checked'");
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "false", "Property 'aria-checked': Default value should be 'false'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'ValueState=Error' */
/* ----------------------------------------------- */
QUnit.test("'ValueState=Error'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({valueState: sap.ui.core.ValueState.Error});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.$().hasClass("sapMCbErr"), "The CheckBox has value state error css class.");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'ValueState=Warning' */
/* ----------------------------------------------- */
QUnit.test("'ValueState=Warning'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({valueState: sap.ui.core.ValueState.Warning});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.$().hasClass("sapMCbWarn"), "The CheckBox has value state warning css class.");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'name' */
/* ----------------------------------------------- */
QUnit.test("'name'", function(assert) {
var sName = "my Name";
// system under test
var oCheckBox = new sap.m.CheckBox({name: sName});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oCheckBox.$("CB").attr("name"), sName, "Property 'name=" + sName + "': CheckBox input element should have attribute 'name=" + sName + "'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: getTabIndex (enabled=true) */
/* ----------------------------------------------- */
QUnit.test("'getTabIndex (enabled=true)'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({enabled: true});
// assertions
assert.strictEqual(oCheckBox.getTabIndex(), 0 , "'getTabindex' should return 0");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: getTabIndex (enabled=false */
/* ----------------------------------------------- */
QUnit.test("'getTabIndex (enabled=false)'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({enabled: false});
// assertions
assert.strictEqual(oCheckBox.getTabIndex(), -1 , "'getTabindex' should return -1");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------------------------------- */
/* Test: getTabIndex (tabIndex previously set explicitely via setTabIndex) */
/* ----------------------------------------------------------------------- */
QUnit.test("'getTabIndex (tabIndex previously set explicitely via setTabIndex)'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.setTabIndex(2);
// assertions
assert.strictEqual(oCheckBox.getTabIndex(), 2 , "'getTabindex' should return 2");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'tabIndex' */
/* ----------------------------------------------- */
QUnit.test("'tabIndex'", function(assert) {
var iTabIndex = 2;
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.placeAt("content");
oCheckBox.setTabIndex(iTabIndex);
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oCheckBox.$().attr("tabindex"), iTabIndex.toString() , "Property 'tabIndex=" + iTabIndex + "': CheckBox should have attribute 'tabindex=" + iTabIndex + "'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: testSetLabelProperty */
/* ----------------------------------------------- */
function testSetLabelProperty(property, value, mode) {
var sPropertyCamelCase = property[0].toUpperCase() + property.slice(1);
var sSetterMethod = "set" + sPropertyCamelCase;
var oSpy = sinon.spy(sap.m.Label.prototype, sSetterMethod);
// system under test
switch (mode) {
case "Constructor":
// set property via contructor
var args = {};
args[property] = value;
var oCheckBox = new sap.m.CheckBox(args);
break;
case "Setter":
// set property via setter method
var oCheckBox = new sap.m.CheckBox();
oCheckBox[sSetterMethod](value);
break;
default: console.error(": wrong argument for parameter 'mode'")
}
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oSpy.lastCall.args[0], value, "Property '" + property + "=" + value + "'testSetLabelProperty: Corresponding setter method of label control should have been called accordingly");
// cleanup
oCheckBox.destroy();
sap.m.Label.prototype[sSetterMethod].restore();
}
QUnit.test("Should render the text of a Checkbox after rendering the checkbox without setting label properties", function(assert) {
// Arrange
var oCheckBox = new sap.m.CheckBox();
// System under Test
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// Act
oCheckBox.setText("foo");
sap.ui.getCore().applyChanges();
// Assert
assert.ok(oCheckBox.$("label").length);
// Cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'text' - via Constructor */
/* ----------------------------------------------- */
QUnit.test("'text' - via Constructor", function(assert) {
testSetLabelProperty("text", "my Text", "Constructor");
});
/* ----------------------------------------------- */
/* Test: 'text' - via Setter Method */
/* ----------------------------------------------- */
QUnit.test("'text' - via Setter Method", function(assert) {
testSetLabelProperty("text", "my Text", "Setter");
});
/* ----------------------------------------------- */
/* Test: 'textDirection' - via Constructor */
/* ----------------------------------------------- */
QUnit.test("'textDirection' - via Constructor", function(assert) {
testSetLabelProperty("textDirection", "RTL", "Constructor");
testSetLabelProperty("textDirection", "LTR", "Constructor");
testSetLabelProperty("textDirection", "Inherit", "Constructor");
});
/* ----------------------------------------------- */
/* Test: 'textDirection' - via Setter Method */
/* ----------------------------------------------- */
QUnit.test("'textDirection' - via Setter Method", function(assert) {
testSetLabelProperty("textDirection", "RTL", "Setter");
testSetLabelProperty("textDirection", "LTR", "Setter");
testSetLabelProperty("textDirection", "Inherit", "Setter");
});
/* ----------------------------------------------- */
/* Test: 'textAlign' - via Constructor */
/* ----------------------------------------------- */
QUnit.test("'textAlign' - via Constructor", function(assert) {
testSetLabelProperty("textAlign", "Begin", "Constructor");
testSetLabelProperty("textAlign", "End", "Constructor");
testSetLabelProperty("textAlign", "Left", "Constructor");
testSetLabelProperty("textAlign", "Right", "Constructor");
testSetLabelProperty("textAlign", "Center", "Constructor");
testSetLabelProperty("textAlign", "Initial", "Constructor");
});
/* ----------------------------------------------- */
/* Test: 'textAlign' - via Setter Method */
/* ----------------------------------------------- */
QUnit.test("'textAlign' - via Setter Method", function(assert) {
testSetLabelProperty("textAlign", "Begin", "Setter");
testSetLabelProperty("textAlign", "End", "Setter");
testSetLabelProperty("textAlign", "Left", "Setter");
testSetLabelProperty("textAlign", "Right", "Setter");
testSetLabelProperty("textAlign", "Center", "Setter");
testSetLabelProperty("textAlign", "Initial", "Setter");
});
/* ----------------------------------------------- */
/* Test: 'width' - via Constructor */
/* ----------------------------------------------- */
QUnit.test("'width' - via Constructor", function(assert) {
testSetLabelProperty("width", "100px", "Constructor");
});
/* ----------------------------------------------- */
/* Test: 'width' - via Setter Method */
/* ----------------------------------------------- */
QUnit.test("'width' - via Setter Method", function(assert) {
testSetLabelProperty("width", "100px", "Setter");
});
QUnit.module("Basic CSS classes");
/* ----------------------------------------------- */
/* Test: Existence */
/* ----------------------------------------------- */
QUnit.test("Existence", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.$().hasClass("sapMCb"), "CheckBox should have class sapMCb");
assert.ok(oCheckBox.$("CbBg").hasClass("sapMCbBg"), "CheckBox should have class sapMCbBg");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* function: sapMCbHoverable */
/* ----------------------------------------------- */
function testSapMCbHoverable(oThat, bDesktop, sMessage) {
var stub = oThat.stub(sap.ui.Device, "system", {desktop : bDesktop});
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
if (bDesktop){
assert.ok(oCheckBox.$("CbBg").hasClass("sapMCbHoverable"), sMessage);
} else {
assert.ok(!oCheckBox.$("CbBg").hasClass("sapMCbHoverable"), sMessage);
}
// cleanup
oCheckBox.destroy();
}
/* ----------------------------------------------- */
/* Test: sapMCbHoverable (non-desktop environment) */
/* ----------------------------------------------- */
QUnit.test("sapMCbHoverable (non-desktop environment)", function(assert) {
testSapMCbHoverable(this, false, "CheckBox should not have class sapMCbHoverable");
});
/* ----------------------------------------------- */
/* Test: sapMCbHoverable (desktop environment) */
/* ----------------------------------------------- */
QUnit.test("sapMCbHoverable (desktop environment)", function(assert) {
testSapMCbHoverable(this, true, "CheckBox should have class sapMCbHoverable");
});
QUnit.module("Events");
/* ----------------------------------------------- */
/* Test: tap */
/* ----------------------------------------------- */
QUnit.test("tap", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox();
var oSpy = this.spy();
oCheckBox.attachSelect(oSpy);
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.equal(oCheckBox.getSelected(), false, "CheckBox should not be selected");
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "false", "Property 'aria-checked': Default value should be 'false'");
qutils.triggerEvent("tap", oCheckBox.getId());
assert.ok(oSpy.calledOnce, "Event 'select' should have been fired");
assert.equal(oCheckBox.getSelected(), true, "CheckBox should be selected");
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "true", "Property 'aria-checked': Default value should be 'true'");
qutils.triggerEvent("tap", oCheckBox.getId());
assert.ok(oSpy.calledTwice, "Event 'select' should have been fired");
assert.equal(oCheckBox.getSelected(), false, "CheckBox should not be selected");
oCheckBox.setEditable(false);
qutils.triggerEvent("tap", oCheckBox.getId());
assert.ok(oSpy.calledTwice, "Event 'select' should have been fired");
assert.equal(oCheckBox.getSelected(), false, "CheckBox should not be selected");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: SPACE key */
/* ----------------------------------------------- */
function testSpaceKey(sTestName, oOptions) {
QUnit.test(sTestName, function(assert) {
//Arrange
var oSpy = this.spy();
var oCheckBox = new sap.m.CheckBox({select : oSpy, selected : oOptions.selected});
// System under Test
oCheckBox.placeAt("qunit-fixture");
sap.ui.getCore().applyChanges();
oCheckBox.$().focus(); // set focus on checkbox
sap.ui.test.qunit.triggerKeydown(oCheckBox.$(), jQuery.sap.KeyCodes.SPACE); // trigger Space on checkbox
assert.strictEqual(oSpy.callCount, 1, "SPACE is pressed, select event was fired");
assert.equal(oCheckBox.getSelected(), oOptions.expectedSelection, oOptions.expectedMessage);
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "" + oOptions.expectedSelection, oOptions.expectedMessageAria);
// Clean up
oCheckBox.destroy();
});
}
testSpaceKey("Press Space on not selected checkBox", {
selected : false,
expectedSelection : true,
expectedMessage: "CheckBox should be selected",
expectedMessageAria: "Property 'aria-checked' should be 'true'"
});
testSpaceKey("Press Space on selected checkBox", {
selected : true,
expectedSelection : false,
expectedMessage: "CheckBox should be deselected",
expectedMessageAria: "Property 'aria-checked' should be 'false'"
});
/* ----------------------------------------------- */
/* Test: ENTER key */
/* ----------------------------------------------- */
function testEnterKey(sTestName, oOptions) {
QUnit.test(sTestName, function(assert) {
//Arrange
var oSpy = this.spy();
var oCheckBox = new sap.m.CheckBox({select : oSpy, selected : oOptions.selected});
// System under Test
oCheckBox.placeAt("qunit-fixture");
sap.ui.getCore().applyChanges();
oCheckBox.$().focus(); // set focus on checkbox
sap.ui.test.qunit.triggerKeydown(oCheckBox.$(), jQuery.sap.KeyCodes.ENTER); // trigger Enter on checkbox
assert.strictEqual(oSpy.callCount, 1, "Enter is pressed, select event was fired");
assert.equal(oCheckBox.getSelected(), oOptions.expectedSelection, oOptions.expectedMessage);
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "" + oOptions.expectedSelection, oOptions.expectedMessageAria);
// Clean up
oCheckBox.destroy();
});
}
testEnterKey("Press Enter on not selected checkBox", {
selected : false,
expectedSelection : true,
expectedMessage: "CheckBox should be selected",
expectedMessageAria: "Property 'aria-checked' should be 'true'"
});
testEnterKey("Press Enter on selected checkBox", {
selected : true,
expectedSelection : false,
expectedMessage: "CheckBox should be deselected",
expectedMessageAria: "Property 'aria-checked' should be 'false'"
});
QUnit.module("Accessibility");
QUnit.test("getAccessibilityInfo", function(assert) {
var oControl = new sap.m.CheckBox({text: "Text"});
assert.ok(!!oControl.getAccessibilityInfo, "CheckBox has a getAccessibilityInfo function");
var oInfo = oControl.getAccessibilityInfo();
assert.ok(!!oInfo, "getAccessibilityInfo returns a info object");
assert.strictEqual(oInfo.role, "checkbox", "AriaRole");
assert.strictEqual(oInfo.type, sap.ui.getCore().getLibraryResourceBundle("sap.m").getText("ACC_CTR_TYPE_CHECKBOX"), "Type");
assert.strictEqual(oInfo.description, "Text", "Description");
assert.strictEqual(oInfo.focusable, true, "Focusable");
assert.strictEqual(oInfo.enabled, true, "Enabled");
assert.strictEqual(oInfo.editable, true, "Editable");
oControl.setSelected(true);
oControl.setEnabled(false);
oControl.setEditable(false);
oInfo = oControl.getAccessibilityInfo();
assert.strictEqual(oInfo.description, "Text " + sap.ui.getCore().getLibraryResourceBundle("sap.m").getText("ACC_CTR_STATE_CHECKED"), "Description");
assert.strictEqual(oInfo.focusable, false, "Focusable");
assert.strictEqual(oInfo.enabled, false, "Enabled");
assert.strictEqual(oInfo.editable, false, "Editable");
oControl.destroy();
});
</script>
</head>
<body id="body" class="sapUiBody">
<h1 id="qunit-header">QUnit Page for sap.m.CheckBox</h1>
<h2 id="qunit-banner"></h2>
<h2 id="qunit-userAgent"></h2>
<div id="qunit-testrunner-toolbar"></div>
<ol id="qunit-tests"></ol>
<div id="content"></div>
<div id="qunit-fixture"></div>
</body>
</html>
|
Java
|
<!DOCTYPE html >
<html>
<head>
<title>SeeStackDepthException - ScalaTest 3.0.2 - org.scalatest.events.SeeStackDepthException</title>
<meta name="description" content="SeeStackDepthException - ScalaTest 3.0.2 - org.scalatest.events.SeeStackDepthException" />
<meta name="keywords" content="SeeStackDepthException ScalaTest 3.0.2 org.scalatest.events.SeeStackDepthException" />
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<link href="../../../lib/template.css" media="screen" type="text/css" rel="stylesheet" />
<link href="../../../lib/diagrams.css" media="screen" type="text/css" rel="stylesheet" id="diagrams-css" />
<script type="text/javascript" src="../../../lib/jquery.js" id="jquery-js"></script>
<script type="text/javascript" src="../../../lib/jquery-ui.js"></script>
<script type="text/javascript" src="../../../lib/template.js"></script>
<script type="text/javascript" src="../../../lib/tools.tooltip.js"></script>
<script type="text/javascript">
if(top === self) {
var url = '../../../index.html';
var hash = 'org.scalatest.events.SeeStackDepthException$';
var anchor = window.location.hash;
var anchor_opt = '';
if (anchor.length >= 1)
anchor_opt = '@' + anchor.substring(1);
window.location.href = url + '#' + hash + anchor_opt;
}
</script>
</head>
<body class="value">
<!-- Top of doc.scalatest.org [javascript] -->
<script type="text/javascript">
var rnd = window.rnd || Math.floor(Math.random()*10e6);
var pid204546 = window.pid204546 || rnd;
var plc204546 = window.plc204546 || 0;
var abkw = window.abkw || '';
var absrc = 'http://ab167933.adbutler-ikon.com/adserve/;ID=167933;size=468x60;setID=204546;type=js;sw='+screen.width+';sh='+screen.height+';spr='+window.devicePixelRatio+';kw='+abkw+';pid='+pid204546+';place='+(plc204546++)+';rnd='+rnd+';click=CLICK_MACRO_PLACEHOLDER';
document.write('<scr'+'ipt src="'+absrc+'" type="text/javascript"></scr'+'ipt>');
</script>
<div id="definition">
<img alt="Object" src="../../../lib/object_big.png" />
<p id="owner"><a href="../../package.html" class="extype" name="org">org</a>.<a href="../package.html" class="extype" name="org.scalatest">scalatest</a>.<a href="package.html" class="extype" name="org.scalatest.events">events</a></p>
<h1>SeeStackDepthException</h1><h3><span class="morelinks"><div>Related Doc:
<a href="package.html" class="extype" name="org.scalatest.events">package events</a>
</div></span></h3><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
</div>
<h4 id="signature" class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">object</span>
</span>
<span class="symbol">
<span class="name">SeeStackDepthException</span><span class="result"> extends <a href="Location.html" class="extype" name="org.scalatest.events.Location">Location</a> with <span class="extype" name="scala.Product">Product</span> with <span class="extype" name="scala.Serializable">Serializable</span></span>
</span>
</h4>
<div id="comment" class="fullcommenttop"><div class="comment cmt"><p>Indicates the location should be taken from the stack depth exception, included elsewhere in
the event that contained this location.
</p></div><dl class="attributes block"> <dt>Source</dt><dd><a href="https://github.com/scalatest/scalatest/tree/release-3.0.2/scalatest//src/main/scala/org/scalatest/events/Location.scala" target="_blank">Location.scala</a></dd></dl><div class="toggleContainer block">
<span class="toggle">Linear Supertypes</span>
<div class="superTypes hiddenContent"><a href="Location.html" class="extype" name="org.scalatest.events.Location">Location</a>, <span class="extype" name="scala.Serializable">Serializable</span>, <span class="extype" name="java.io.Serializable">Serializable</span>, <span class="extype" name="scala.Product">Product</span>, <span class="extype" name="scala.Equals">Equals</span>, <span class="extype" name="scala.AnyRef">AnyRef</span>, <span class="extype" name="scala.Any">Any</span></div>
</div></div>
<div id="mbrsel">
<div id="textfilter"><span class="pre"></span><span class="input"><input id="mbrsel-input" type="text" accesskey="/" /></span><span class="post"></span></div>
<div id="order">
<span class="filtertype">Ordering</span>
<ol>
<li class="alpha in"><span>Alphabetic</span></li>
<li class="inherit out"><span>By Inheritance</span></li>
</ol>
</div>
<div id="ancestors">
<span class="filtertype">Inherited<br />
</span>
<ol id="linearization">
<li class="in" name="org.scalatest.events.SeeStackDepthException"><span>SeeStackDepthException</span></li><li class="in" name="org.scalatest.events.Location"><span>Location</span></li><li class="in" name="scala.Serializable"><span>Serializable</span></li><li class="in" name="java.io.Serializable"><span>Serializable</span></li><li class="in" name="scala.Product"><span>Product</span></li><li class="in" name="scala.Equals"><span>Equals</span></li><li class="in" name="scala.AnyRef"><span>AnyRef</span></li><li class="in" name="scala.Any"><span>Any</span></li>
</ol>
</div><div id="ancestors">
<span class="filtertype"></span>
<ol>
<li class="hideall out"><span>Hide All</span></li>
<li class="showall in"><span>Show All</span></li>
</ol>
</div>
<div id="visbl">
<span class="filtertype">Visibility</span>
<ol><li class="public in"><span>Public</span></li><li class="all out"><span>All</span></li></ol>
</div>
</div>
<div id="template">
<div id="allMembers">
<div id="values" class="values members">
<h3>Value Members</h3>
<ol><li name="scala.AnyRef#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="!=(x$1:Any):Boolean"></a>
<a id="!=(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@!=(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="scala.AnyRef###" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="##():Int"></a>
<a id="##():Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $hash$hash" class="name">##</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@##():Int" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="scala.AnyRef#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="==(x$1:Any):Boolean"></a>
<a id="==(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@==(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="scala.Any#asInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="asInstanceOf[T0]:T0"></a>
<a id="asInstanceOf[T0]:T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">asInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Any.asInstanceOf.T0">T0</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@asInstanceOf[T0]:T0" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef#clone" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="clone():Object"></a>
<a id="clone():AnyRef"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">clone</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.AnyRef">AnyRef</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@clone():Object" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.CloneNotSupportedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#eq" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="eq(x$1:AnyRef):Boolean"></a>
<a id="eq(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">eq</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@eq(x$1:AnyRef):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#equals" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="equals(x$1:Any):Boolean"></a>
<a id="equals(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">equals</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@equals(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="scala.AnyRef#finalize" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="finalize():Unit"></a>
<a id="finalize():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">finalize</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@finalize():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="symbol">classOf[java.lang.Throwable]</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#getClass" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="getClass():Class[_]"></a>
<a id="getClass():Class[_]"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">getClass</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.Class">Class</span>[_]</span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@getClass():Class[_]" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef โ Any</dd></dl></div>
</li><li name="scala.Any#isInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="isInstanceOf[T0]:Boolean"></a>
<a id="isInstanceOf[T0]:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">isInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@isInstanceOf[T0]:Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef#ne" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="ne(x$1:AnyRef):Boolean"></a>
<a id="ne(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">ne</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@ne(x$1:AnyRef):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notify" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notify():Unit"></a>
<a id="notify():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notify</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@notify():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notifyAll" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notifyAll():Unit"></a>
<a id="notifyAll():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notifyAll</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@notifyAll():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#synchronized" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="synchronized[T0](x$1:=>T0):T0"></a>
<a id="synchronized[T0](โT0):T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">synchronized</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="params">(<span name="arg0">arg0: โ <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>)</span><span class="result">: <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@synchronized[T0](x$1:=>T0):T0" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait():Unit"></a>
<a id="wait():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@wait():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long,x$2:Int):Unit"></a>
<a id="wait(Long,Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>, <span name="arg1">arg1: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@wait(x$1:Long,x$2:Int):Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long):Unit"></a>
<a id="wait(Long):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@wait(x$1:Long):Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li></ol>
</div>
</div>
<div id="inheritedMembers">
<div class="parent" name="org.scalatest.events.Location">
<h3>Inherited from <a href="Location.html" class="extype" name="org.scalatest.events.Location">Location</a></h3>
</div><div class="parent" name="scala.Serializable">
<h3>Inherited from <span class="extype" name="scala.Serializable">Serializable</span></h3>
</div><div class="parent" name="java.io.Serializable">
<h3>Inherited from <span class="extype" name="java.io.Serializable">Serializable</span></h3>
</div><div class="parent" name="scala.Product">
<h3>Inherited from <span class="extype" name="scala.Product">Product</span></h3>
</div><div class="parent" name="scala.Equals">
<h3>Inherited from <span class="extype" name="scala.Equals">Equals</span></h3>
</div><div class="parent" name="scala.AnyRef">
<h3>Inherited from <span class="extype" name="scala.AnyRef">AnyRef</span></h3>
</div><div class="parent" name="scala.Any">
<h3>Inherited from <span class="extype" name="scala.Any">Any</span></h3>
</div>
</div>
<div id="groupedMembers">
<div class="group" name="Ungrouped">
<h3>Ungrouped</h3>
</div>
</div>
</div>
<div id="tooltip"></div>
<div id="footer"> </div>
</body>
</html>
|
Java
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import hashlib
import os
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import bulk_index
from warehouse.utils import AttributeDict
class Index(object):
_index = "warehouse"
def __init__(self, models, config):
self.models = models
self.config = config
self.es = Elasticsearch(
hosts=self.config.hosts,
**self.config.get("client_options", {})
)
self.types = AttributeDict()
def register(self, type_):
obj = type_(self)
self.types[obj._type] = obj
def reindex(self, index=None, alias=True, keep_old=False):
# Generate an Index Name for Warehouse
index = "".join([
index if index is not None else self._index,
hashlib.md5(os.urandom(16)).hexdigest()[:8],
])
# Create this index
self.es.indices.create(index, {
"mappings": {
doc_type._type: doc_type.get_mapping()
for doc_type in self.types.values()
},
})
# Index everything into the new index
for doc_type in self.types.values():
doc_type.index_all(index=index)
# Update the alias unless we've been told not to
if alias:
self.update_alias(self._index, index, keep_old=keep_old)
def update_alias(self, alias, index, keep_old=False):
# Get the old index from ElasticSearch
try:
old_index = self.es.indices.get_alias(self._index).keys()[0]
except TransportError as exc:
if not exc.status_code == 404:
raise
old_index = None
# Remove the alias to the old index if it exists
if old_index is not None:
actions = [{"remove": {"index": old_index, "alias": alias}}]
else:
actions = []
# Add the alias to the new index
actions += [{"add": {"index": index, "alias": alias}}]
# Update To the New Index
self.es.indices.update_aliases({"actions": actions})
# Delete the old index if it exists and unless we're keeping it
if not keep_old and old_index is not None:
self.es.indices.delete(old_index)
class BaseMapping(object):
SEARCH_LIMIT = 25
def __init__(self, index):
self.index = index
def get_mapping(self):
raise NotImplementedError
def get_indexable(self):
raise NotImplementedError
def extract_id(self, item):
raise NotImplementedError
def extract_document(self, item):
raise NotImplementedError
def index_all(self, index=None):
# Determine which index we are indexing into
_index = index if index is not None else self.index._index
# Bulk Index our documents
bulk_index(
self.index.es,
[
{
"_index": _index,
"_type": self._type,
"_id": self.extract_id(item),
"_source": self.extract_document(item),
}
for item in self.get_indexable()
],
)
def search(self, query):
raise NotImplementedError
|
Java
|
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support.master;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.CapturingTransport;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportService;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class TransportMasterNodeActionTests extends ESTestCase {
private static ThreadPool threadPool;
private ClusterService clusterService;
private TransportService transportService;
private CapturingTransport transport;
private DiscoveryNode localNode;
private DiscoveryNode remoteNode;
private DiscoveryNode[] allNodes;
@BeforeClass
public static void beforeClass() {
threadPool = new TestThreadPool("TransportMasterNodeActionTests");
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(threadPool);
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
localNode = new DiscoveryNode("local_node", DummyTransportAddress.INSTANCE, Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
remoteNode = new DiscoveryNode("remote_node", DummyTransportAddress.INSTANCE, Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
allNodes = new DiscoveryNode[]{localNode, remoteNode};
}
@After
public void tearDown() throws Exception {
super.tearDown();
clusterService.close();
transportService.close();
}
@AfterClass
public static void afterClass() {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
threadPool = null;
}
void assertListenerThrows(String msg, ActionFuture<?> listener, Class<?> klass) throws InterruptedException {
try {
listener.get();
fail(msg);
} catch (ExecutionException ex) {
assertThat(ex.getCause(), instanceOf(klass));
}
}
public static class Request extends MasterNodeRequest<Request> {
@Override
public ActionRequestValidationException validate() {
return null;
}
}
class Response extends ActionResponse {}
class Action extends TransportMasterNodeAction<Request, Response> {
Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, actionName, transportService, clusterService, threadPool,
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new);
}
@Override
protected void doExecute(Task task, final Request request, ActionListener<Response> listener) {
// remove unneeded threading by wrapping listener with SAME to prevent super.doExecute from wrapping it with LISTENER
super.doExecute(task, request, new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.SAME, listener));
}
@Override
protected String executor() {
// very lightweight operation in memory, no need to fork to a thread
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
listener.onResponse(new Response()); // default implementation, overridden in specific tests
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return null; // default implementation, overridden in specific tests
}
}
public void testLocalOperationWithoutBlocks() throws ExecutionException, InterruptedException {
final boolean masterOperationFailure = randomBoolean();
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final Throwable exception = new Throwable();
final Response response = new Response();
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
if (masterOperationFailure) {
listener.onFailure(exception);
} else {
listener.onResponse(response);
}
}
}.execute(request, listener);
assertTrue(listener.isDone());
if (masterOperationFailure) {
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause(), equalTo(exception));
}
} else {
assertThat(listener.get(), equalTo(response));
}
}
public void testLocalOperationWithBlocks() throws ExecutionException, InterruptedException {
final boolean retryableBlock = randomBoolean();
final boolean unblockBeforeTimeout = randomBoolean();
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(unblockBeforeTimeout ? 60 : 0));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ClusterBlock block = new ClusterBlock(1, "", retryableBlock, true,
randomFrom(RestStatus.values()), ClusterBlockLevel.ALL);
ClusterState stateWithBlock = ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes))
.blocks(ClusterBlocks.builder().addGlobalBlock(block)).build();
setState(clusterService, stateWithBlock);
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
Set<ClusterBlock> blocks = state.blocks().global();
return blocks.isEmpty() ? null : new ClusterBlockException(blocks);
}
}.execute(request, listener);
if (retryableBlock && unblockBeforeTimeout) {
assertFalse(listener.isDone());
setState(clusterService, ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build());
assertTrue(listener.isDone());
listener.get();
return;
}
assertTrue(listener.isDone());
if (retryableBlock) {
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause(), instanceOf(MasterNotDiscoveredException.class));
assertThat(ex.getCause().getCause(), instanceOf(ClusterBlockException.class));
}
} else {
assertListenerThrows("ClusterBlockException should be thrown", listener, ClusterBlockException.class);
}
}
public void testForceLocalOperation() throws ExecutionException, InterruptedException {
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(null, localNode, remoteNode), allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected boolean localExecute(Request request) {
return true;
}
}.execute(request, listener);
assertTrue(listener.isDone());
listener.get();
}
public void testMasterNotAvailable() throws ExecutionException, InterruptedException {
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0));
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertTrue(listener.isDone());
assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class);
}
public void testMasterBecomesAvailable() throws ExecutionException, InterruptedException {
Request request = new Request();
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertFalse(listener.isDone());
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
assertTrue(listener.isDone());
listener.get();
}
public void testDelegateToMaster() throws ExecutionException, InterruptedException {
Request request = new Request();
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
Response response = new Response();
transport.handleResponse(capturedRequest.requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
}
public void testDelegateToFailingMaster() throws ExecutionException, InterruptedException {
boolean failsWithConnectTransportException = randomBoolean();
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(failsWithConnectTransportException ? 60 : 0));
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
if (failsWithConnectTransportException) {
transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(remoteNode, "Fake error"));
assertFalse(listener.isDone());
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
assertTrue(listener.isDone());
listener.get();
} else {
Throwable t = new Throwable();
transport.handleRemoteError(capturedRequest.requestId, t);
assertTrue(listener.isDone());
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause().getCause(), equalTo(t));
}
}
}
public void testMasterFailoverAfterStepDown() throws ExecutionException, InterruptedException {
Request request = new Request().masterNodeTimeout(TimeValue.timeValueHours(1));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final Response response = new Response();
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
// The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
Throwable failure = randomBoolean()
? new Discovery.FailedToCommitClusterStateException("Fake error")
: new NotMasterException("Fake error");
listener.onFailure(failure);
}
}.execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
transport.handleResponse(capturedRequest.requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
}
}
|
Java
|
/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ui.components;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.util.registry.Registry;
import com.intellij.openapi.wm.IdeGlassPane;
import com.intellij.ui.IdeBorderFactory;
import com.intellij.util.ArrayUtil;
import com.intellij.util.ReflectionUtil;
import com.intellij.util.ui.ButtonlessScrollBarUI;
import com.intellij.util.ui.JBInsets;
import com.intellij.util.ui.RegionPainter;
import com.intellij.util.ui.UIUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import javax.swing.border.Border;
import javax.swing.border.LineBorder;
import javax.swing.plaf.ScrollBarUI;
import javax.swing.plaf.ScrollPaneUI;
import javax.swing.plaf.UIResource;
import javax.swing.plaf.basic.BasicScrollBarUI;
import javax.swing.plaf.basic.BasicScrollPaneUI;
import java.awt.*;
import java.awt.event.InputEvent;
import java.awt.event.MouseEvent;
import java.awt.event.MouseWheelEvent;
import java.awt.event.MouseWheelListener;
import java.lang.reflect.Field;
public class JBScrollPane extends JScrollPane {
/**
* This key is used to specify which colors should use the scroll bars on the pane.
* If a client property is set to {@code true} the bar's brightness
* will be modified according to the view's background.
*
* @see UIUtil#putClientProperty
* @see UIUtil#isUnderDarcula
*/
public static final Key<Boolean> BRIGHTNESS_FROM_VIEW = Key.create("JB_SCROLL_PANE_BRIGHTNESS_FROM_VIEW");
@Deprecated
public static final RegionPainter<Float> THUMB_PAINTER = ScrollPainter.EditorThumb.DEFAULT;
@Deprecated
public static final RegionPainter<Float> THUMB_DARK_PAINTER = ScrollPainter.EditorThumb.DARCULA;
@Deprecated
public static final RegionPainter<Float> MAC_THUMB_PAINTER = ScrollPainter.EditorThumb.Mac.DEFAULT;
@Deprecated
public static final RegionPainter<Float> MAC_THUMB_DARK_PAINTER = ScrollPainter.EditorThumb.Mac.DARCULA;
private static final Logger LOG = Logger.getInstance(JBScrollPane.class);
private int myViewportBorderWidth = -1;
private boolean myHasOverlayScrollbars;
private volatile boolean myBackgroundRequested; // avoid cyclic references
public JBScrollPane(int viewportWidth) {
init(false);
myViewportBorderWidth = viewportWidth;
updateViewportBorder();
}
public JBScrollPane() {
init();
}
public JBScrollPane(Component view) {
super(view);
init();
}
public JBScrollPane(int vsbPolicy, int hsbPolicy) {
super(vsbPolicy, hsbPolicy);
init();
}
public JBScrollPane(Component view, int vsbPolicy, int hsbPolicy) {
super(view, vsbPolicy, hsbPolicy);
init();
}
@Override
public Color getBackground() {
Color color = super.getBackground();
if (!myBackgroundRequested && EventQueue.isDispatchThread() && Registry.is("ide.scroll.background.auto")) {
if (!isBackgroundSet() || color instanceof UIResource) {
Component child = getViewport();
if (child != null) {
try {
myBackgroundRequested = true;
return child.getBackground();
}
finally {
myBackgroundRequested = false;
}
}
}
}
return color;
}
static Color getViewBackground(JScrollPane pane) {
if (pane == null) return null;
JViewport viewport = pane.getViewport();
if (viewport == null) return null;
Component view = viewport.getView();
if (view == null) return null;
return view.getBackground();
}
public static JScrollPane findScrollPane(Component c) {
if (c == null) return null;
if (!(c instanceof JViewport)) {
Container vp = c.getParent();
if (vp instanceof JViewport) c = vp;
}
c = c.getParent();
if (!(c instanceof JScrollPane)) return null;
return (JScrollPane)c;
}
private void init() {
init(true);
}
private void init(boolean setupCorners) {
setLayout(Registry.is("ide.scroll.new.layout") ? new Layout() : new ScrollPaneLayout());
if (setupCorners) {
setupCorners();
}
}
protected void setupCorners() {
setBorder(IdeBorderFactory.createBorder());
setCorner(UPPER_RIGHT_CORNER, new Corner(UPPER_RIGHT_CORNER));
setCorner(UPPER_LEFT_CORNER, new Corner(UPPER_LEFT_CORNER));
setCorner(LOWER_RIGHT_CORNER, new Corner(LOWER_RIGHT_CORNER));
setCorner(LOWER_LEFT_CORNER, new Corner(LOWER_LEFT_CORNER));
}
@Override
public void setUI(ScrollPaneUI ui) {
super.setUI(ui);
updateViewportBorder();
if (ui instanceof BasicScrollPaneUI) {
try {
Field field = BasicScrollPaneUI.class.getDeclaredField("mouseScrollListener");
field.setAccessible(true);
Object value = field.get(ui);
if (value instanceof MouseWheelListener) {
MouseWheelListener oldListener = (MouseWheelListener)value;
MouseWheelListener newListener = event -> {
if (isScrollEvent(event)) {
Object source = event.getSource();
if (source instanceof JScrollPane) {
JScrollPane pane = (JScrollPane)source;
if (pane.isWheelScrollingEnabled()) {
JScrollBar bar = event.isShiftDown() ? pane.getHorizontalScrollBar() : pane.getVerticalScrollBar();
if (bar != null && bar.isVisible()) oldListener.mouseWheelMoved(event);
}
}
}
};
field.set(ui, newListener);
// replace listener if field updated successfully
removeMouseWheelListener(oldListener);
addMouseWheelListener(newListener);
}
}
catch (Exception exception) {
LOG.warn(exception);
}
}
}
@Override
public boolean isOptimizedDrawingEnabled() {
if (getLayout() instanceof Layout) {
return isOptimizedDrawingEnabledFor(getVerticalScrollBar()) &&
isOptimizedDrawingEnabledFor(getHorizontalScrollBar());
}
return !myHasOverlayScrollbars;
}
/**
* Returns {@code false} for visible translucent scroll bars, or {@code true} otherwise.
* It is needed to repaint translucent scroll bars on viewport repainting.
*/
private static boolean isOptimizedDrawingEnabledFor(JScrollBar bar) {
return bar == null || bar.isOpaque() || !bar.isVisible();
}
private void updateViewportBorder() {
if (getViewportBorder() instanceof ViewportBorder) {
setViewportBorder(new ViewportBorder(myViewportBorderWidth >= 0 ? myViewportBorderWidth : 1));
}
}
public static ViewportBorder createIndentBorder() {
return new ViewportBorder(2);
}
@Override
public JScrollBar createVerticalScrollBar() {
return new MyScrollBar(Adjustable.VERTICAL);
}
@NotNull
@Override
public JScrollBar createHorizontalScrollBar() {
return new MyScrollBar(Adjustable.HORIZONTAL);
}
@Override
protected JViewport createViewport() {
return new JBViewport();
}
@SuppressWarnings("deprecation")
@Override
public void layout() {
LayoutManager layout = getLayout();
ScrollPaneLayout scrollLayout = layout instanceof ScrollPaneLayout ? (ScrollPaneLayout)layout : null;
// Now we let JScrollPane layout everything as necessary
super.layout();
if (layout instanceof Layout) return;
if (scrollLayout != null) {
// Now it's time to jump in and expand the viewport so it fits the whole area
// (taking into consideration corners, headers and other stuff).
myHasOverlayScrollbars = relayoutScrollbars(
this, scrollLayout,
myHasOverlayScrollbars // If last time we did relayouting, we should restore it back.
);
}
else {
myHasOverlayScrollbars = false;
}
}
private boolean relayoutScrollbars(@NotNull JComponent container, @NotNull ScrollPaneLayout layout, boolean forceRelayout) {
JViewport viewport = layout.getViewport();
if (viewport == null) return false;
JScrollBar vsb = layout.getVerticalScrollBar();
JScrollBar hsb = layout.getHorizontalScrollBar();
JViewport colHead = layout.getColumnHeader();
JViewport rowHead = layout.getRowHeader();
Rectangle viewportBounds = viewport.getBounds();
boolean extendViewportUnderVScrollbar = vsb != null && shouldExtendViewportUnderScrollbar(vsb);
boolean extendViewportUnderHScrollbar = hsb != null && shouldExtendViewportUnderScrollbar(hsb);
boolean hasOverlayScrollbars = extendViewportUnderVScrollbar || extendViewportUnderHScrollbar;
if (!hasOverlayScrollbars && !forceRelayout) return false;
container.setComponentZOrder(viewport, container.getComponentCount() - 1);
if (vsb != null) container.setComponentZOrder(vsb, 0);
if (hsb != null) container.setComponentZOrder(hsb, 0);
if (extendViewportUnderVScrollbar) {
int x2 = Math.max(vsb.getX() + vsb.getWidth(), viewportBounds.x + viewportBounds.width);
viewportBounds.x = Math.min(viewportBounds.x, vsb.getX());
viewportBounds.width = x2 - viewportBounds.x;
}
if (extendViewportUnderHScrollbar) {
int y2 = Math.max(hsb.getY() + hsb.getHeight(), viewportBounds.y + viewportBounds.height);
viewportBounds.y = Math.min(viewportBounds.y, hsb.getY());
viewportBounds.height = y2 - viewportBounds.y;
}
if (extendViewportUnderVScrollbar) {
if (hsb != null) {
Rectangle scrollbarBounds = hsb.getBounds();
scrollbarBounds.width = viewportBounds.x + viewportBounds.width - scrollbarBounds.x;
hsb.setBounds(scrollbarBounds);
}
if (colHead != null) {
Rectangle headerBounds = colHead.getBounds();
headerBounds.width = viewportBounds.width;
colHead.setBounds(headerBounds);
}
hideFromView(layout.getCorner(UPPER_RIGHT_CORNER));
hideFromView(layout.getCorner(LOWER_RIGHT_CORNER));
}
if (extendViewportUnderHScrollbar) {
if (vsb != null) {
Rectangle scrollbarBounds = vsb.getBounds();
scrollbarBounds.height = viewportBounds.y + viewportBounds.height - scrollbarBounds.y;
vsb.setBounds(scrollbarBounds);
}
if (rowHead != null) {
Rectangle headerBounds = rowHead.getBounds();
headerBounds.height = viewportBounds.height;
rowHead.setBounds(headerBounds);
}
hideFromView(layout.getCorner(LOWER_LEFT_CORNER));
hideFromView(layout.getCorner(LOWER_RIGHT_CORNER));
}
viewport.setBounds(viewportBounds);
return hasOverlayScrollbars;
}
private boolean shouldExtendViewportUnderScrollbar(@Nullable JScrollBar scrollbar) {
if (scrollbar == null || !scrollbar.isVisible()) return false;
return isOverlaidScrollbar(scrollbar);
}
protected boolean isOverlaidScrollbar(@Nullable JScrollBar scrollbar) {
if (!ButtonlessScrollBarUI.isMacOverlayScrollbarSupported()) return false;
ScrollBarUI vsbUI = scrollbar == null ? null : scrollbar.getUI();
return vsbUI instanceof ButtonlessScrollBarUI && !((ButtonlessScrollBarUI)vsbUI).alwaysShowTrack();
}
private static void hideFromView(Component component) {
if (component == null) return;
component.setBounds(-10, -10, 1, 1);
}
private class MyScrollBar extends ScrollBar implements IdeGlassPane.TopComponent {
public MyScrollBar(int orientation) {
super(orientation);
}
@Override
public void updateUI() {
ScrollBarUI ui = getUI();
if (ui instanceof DefaultScrollBarUI) return;
setUI(JBScrollBar.createUI(this));
}
@Override
public boolean canBePreprocessed(MouseEvent e) {
return JBScrollPane.canBePreprocessed(e, this);
}
}
public static boolean canBePreprocessed(MouseEvent e, JScrollBar bar) {
if (e.getID() == MouseEvent.MOUSE_MOVED || e.getID() == MouseEvent.MOUSE_PRESSED) {
ScrollBarUI ui = bar.getUI();
if (ui instanceof BasicScrollBarUI) {
BasicScrollBarUI bui = (BasicScrollBarUI)ui;
try {
Rectangle rect = (Rectangle)ReflectionUtil.getDeclaredMethod(BasicScrollBarUI.class, "getThumbBounds", ArrayUtil.EMPTY_CLASS_ARRAY).invoke(bui);
Point point = SwingUtilities.convertPoint(e.getComponent(), e.getX(), e.getY(), bar);
return !rect.contains(point);
}
catch (Exception e1) {
return true;
}
}
else if (ui instanceof DefaultScrollBarUI) {
DefaultScrollBarUI dui = (DefaultScrollBarUI)ui;
Point point = e.getLocationOnScreen();
SwingUtilities.convertPointFromScreen(point, bar);
return !dui.isThumbContains(point.x, point.y);
}
}
return true;
}
private static class Corner extends JPanel {
private final String myPos;
public Corner(String pos) {
myPos = pos;
ScrollColorProducer.setBackground(this);
ScrollColorProducer.setForeground(this);
}
@Override
protected void paintComponent(Graphics g) {
g.setColor(getBackground());
g.fillRect(0, 0, getWidth(), getHeight());
if (SystemInfo.isMac || !Registry.is("ide.scroll.track.border.paint")) return;
g.setColor(getForeground());
int x2 = getWidth() - 1;
int y2 = getHeight() - 1;
if (myPos == UPPER_LEFT_CORNER || myPos == UPPER_RIGHT_CORNER) {
g.drawLine(0, y2, x2, y2);
}
if (myPos == LOWER_LEFT_CORNER || myPos == LOWER_RIGHT_CORNER) {
g.drawLine(0, 0, x2, 0);
}
if (myPos == UPPER_LEFT_CORNER || myPos == LOWER_LEFT_CORNER) {
g.drawLine(x2, 0, x2, y2);
}
if (myPos == UPPER_RIGHT_CORNER || myPos == LOWER_RIGHT_CORNER) {
g.drawLine(0, 0, 0, y2);
}
}
}
private static class ViewportBorder extends LineBorder {
public ViewportBorder(int thickness) {
super(null, thickness);
}
@Override
public void paintBorder(Component c, Graphics g, int x, int y, int width, int height) {
updateColor(c);
super.paintBorder(c, g, x, y, width, height);
}
private void updateColor(Component c) {
if (!(c instanceof JScrollPane)) return;
lineColor = getViewBackground((JScrollPane)c);
}
}
/**
* These client properties modify a scroll pane layout.
* Use the class object as a property key.
*
* @see #putClientProperty(Object, Object)
*/
public enum Flip {
NONE, VERTICAL, HORIZONTAL, BOTH
}
/**
* These client properties show a component position on a scroll pane.
* It is set by internal layout manager of the scroll pane.
*/
public enum Alignment {
TOP, LEFT, RIGHT, BOTTOM;
public static Alignment get(JComponent component) {
if (component != null) {
Object property = component.getClientProperty(Alignment.class);
if (property instanceof Alignment) return (Alignment)property;
Container parent = component.getParent();
if (parent instanceof JScrollPane) {
JScrollPane pane = (JScrollPane)parent;
if (component == pane.getColumnHeader()) {
return TOP;
}
if (component == pane.getHorizontalScrollBar()) {
return BOTTOM;
}
boolean ltr = pane.getComponentOrientation().isLeftToRight();
if (component == pane.getVerticalScrollBar()) {
return ltr ? RIGHT : LEFT;
}
if (component == pane.getRowHeader()) {
return ltr ? LEFT : RIGHT;
}
}
// assume alignment for a scroll bar,
// which is not contained in a scroll pane
if (component instanceof JScrollBar) {
JScrollBar bar = (JScrollBar)component;
switch (bar.getOrientation()) {
case Adjustable.HORIZONTAL:
return BOTTOM;
case Adjustable.VERTICAL:
return bar.getComponentOrientation().isLeftToRight()
? RIGHT
: LEFT;
}
}
}
return null;
}
}
/**
* ScrollPaneLayout implementation that supports
* ScrollBar flipping and non-opaque ScrollBars.
*/
private static class Layout extends ScrollPaneLayout {
private static final Insets EMPTY_INSETS = new Insets(0, 0, 0, 0);
@Override
public void layoutContainer(Container parent) {
JScrollPane pane = (JScrollPane)parent;
// Calculate inner bounds of the scroll pane
Rectangle bounds = new Rectangle(pane.getWidth(), pane.getHeight());
JBInsets.removeFrom(bounds, pane.getInsets());
// Determine positions of scroll bars on the scroll pane
Object property = pane.getClientProperty(Flip.class);
Flip flip = property instanceof Flip ? (Flip)property : Flip.NONE;
boolean hsbOnTop = flip == Flip.BOTH || flip == Flip.VERTICAL;
boolean vsbOnLeft = pane.getComponentOrientation().isLeftToRight()
? flip == Flip.BOTH || flip == Flip.HORIZONTAL
: flip == Flip.NONE || flip == Flip.VERTICAL;
// If there's a visible row header remove the space it needs.
// The row header is treated as if it were fixed width, arbitrary height.
Rectangle rowHeadBounds = new Rectangle(bounds.x, 0, 0, 0);
if (rowHead != null && rowHead.isVisible()) {
rowHeadBounds.width = min(bounds.width, rowHead.getPreferredSize().width);
bounds.width -= rowHeadBounds.width;
if (vsbOnLeft) {
rowHeadBounds.x += bounds.width;
}
else {
bounds.x += rowHeadBounds.width;
}
}
// If there's a visible column header remove the space it needs.
// The column header is treated as if it were fixed height, arbitrary width.
Rectangle colHeadBounds = new Rectangle(0, bounds.y, 0, 0);
if (colHead != null && colHead.isVisible()) {
colHeadBounds.height = min(bounds.height, colHead.getPreferredSize().height);
bounds.height -= colHeadBounds.height;
if (hsbOnTop) {
colHeadBounds.y += bounds.height;
}
else {
bounds.y += colHeadBounds.height;
}
}
// If there's a JScrollPane.viewportBorder, remove the space it occupies
Border border = pane.getViewportBorder();
Insets insets = border == null ? null : border.getBorderInsets(parent);
JBInsets.removeFrom(bounds, insets);
if (insets == null) insets = EMPTY_INSETS;
// At this point:
// colHeadBounds is correct except for its width and x
// rowHeadBounds is correct except for its height and y
// bounds - the space available for the viewport and scroll bars
// Once we're through computing the dimensions of these three parts
// we can go back and set the bounds for the corners and the dimensions of
// colHeadBounds.x, colHeadBounds.width, rowHeadBounds.y, rowHeadBounds.height.
boolean isEmpty = bounds.width < 0 || bounds.height < 0;
Component view = viewport == null ? null : viewport.getView();
Dimension viewPreferredSize = view == null ? new Dimension() : view.getPreferredSize();
if (view instanceof JComponent) JBViewport.fixPreferredSize(viewPreferredSize, (JComponent)view, vsb, hsb);
Dimension viewportExtentSize = viewport == null ? new Dimension() : viewport.toViewCoordinates(bounds.getSize());
// If the view is tracking the viewports width we don't bother with a horizontal scrollbar.
// If the view is tracking the viewports height we don't bother with a vertical scrollbar.
Scrollable scrollable = null;
boolean viewTracksViewportWidth = false;
boolean viewTracksViewportHeight = false;
// Don't bother checking the Scrollable methods if there is no room for the viewport,
// we aren't going to show any scroll bars in this case anyway.
if (!isEmpty && view instanceof Scrollable) {
scrollable = (Scrollable)view;
viewTracksViewportWidth = scrollable.getScrollableTracksViewportWidth();
viewTracksViewportHeight = scrollable.getScrollableTracksViewportHeight();
}
// If there's a vertical scroll bar and we need one, allocate space for it.
// A vertical scroll bar is considered to be fixed width, arbitrary height.
boolean vsbOpaque = false;
boolean vsbNeeded = false;
int vsbPolicy = pane.getVerticalScrollBarPolicy();
if (!isEmpty && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
vsbNeeded = vsbPolicy == VERTICAL_SCROLLBAR_ALWAYS
|| !viewTracksViewportHeight && viewPreferredSize.height > viewportExtentSize.height;
}
Rectangle vsbBounds = new Rectangle(0, bounds.y - insets.top, 0, 0);
if (vsb != null) {
if (!SystemInfo.isMac && view instanceof JTable) vsb.setOpaque(true);
vsbOpaque = vsb.isOpaque();
if (vsbNeeded) {
adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
if (vsbOpaque && viewport != null) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
}
}
}
// If there's a horizontal scroll bar and we need one, allocate space for it.
// A horizontal scroll bar is considered to be fixed height, arbitrary width.
boolean hsbOpaque = false;
boolean hsbNeeded = false;
int hsbPolicy = pane.getHorizontalScrollBarPolicy();
if (!isEmpty && hsbPolicy != HORIZONTAL_SCROLLBAR_NEVER) {
hsbNeeded = hsbPolicy == HORIZONTAL_SCROLLBAR_ALWAYS
|| !viewTracksViewportWidth && viewPreferredSize.width > viewportExtentSize.width;
}
Rectangle hsbBounds = new Rectangle(bounds.x - insets.left, 0, 0, 0);
if (hsb != null) {
if (!SystemInfo.isMac && view instanceof JTable) hsb.setOpaque(true);
hsbOpaque = hsb.isOpaque();
if (hsbNeeded) {
adjustForHSB(bounds, insets, hsbBounds, hsbOpaque, hsbOnTop);
if (hsbOpaque && viewport != null) {
// If we added the horizontal scrollbar and reduced the vertical space
// we may have to add the vertical scrollbar, if that hasn't been done so already.
if (vsb != null && !vsbNeeded && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
vsbNeeded = viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded) adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
}
}
}
// Set the size of the viewport first, and then recheck the Scrollable methods.
// Some components base their return values for the Scrollable methods on the size of the viewport,
// so that if we don't ask after resetting the bounds we may have gotten the wrong answer.
if (viewport != null) {
viewport.setBounds(bounds);
if (scrollable != null && hsbOpaque && vsbOpaque) {
viewTracksViewportWidth = scrollable.getScrollableTracksViewportWidth();
viewTracksViewportHeight = scrollable.getScrollableTracksViewportHeight();
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
boolean vsbNeededOld = vsbNeeded;
if (vsb != null && vsbPolicy == VERTICAL_SCROLLBAR_AS_NEEDED) {
boolean vsbNeededNew = !viewTracksViewportHeight && viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded != vsbNeededNew) {
vsbNeeded = vsbNeededNew;
if (vsbNeeded) {
adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
else if (vsbOpaque) {
bounds.width += vsbBounds.width;
}
if (vsbOpaque) viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
}
}
boolean hsbNeededOld = hsbNeeded;
if (hsb != null && hsbPolicy == HORIZONTAL_SCROLLBAR_AS_NEEDED) {
boolean hsbNeededNew = !viewTracksViewportWidth && viewPreferredSize.width > viewportExtentSize.width;
if (hsbNeeded != hsbNeededNew) {
hsbNeeded = hsbNeededNew;
if (hsbNeeded) {
adjustForHSB(bounds, insets, hsbBounds, hsbOpaque, hsbOnTop);
}
else if (hsbOpaque) {
bounds.height += hsbBounds.height;
}
if (hsbOpaque && vsb != null && !vsbNeeded && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
vsbNeeded = viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded) adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
}
}
if (hsbNeededOld != hsbNeeded || vsbNeededOld != vsbNeeded) {
viewport.setBounds(bounds);
// You could argue that we should recheck the Scrollable methods again until they stop changing,
// but they might never stop changing, so we stop here and don't do any additional checks.
}
}
}
// Set the bounds of the row header.
rowHeadBounds.y = bounds.y - insets.top;
rowHeadBounds.height = bounds.height + insets.top + insets.bottom;
if (rowHead != null) {
rowHead.setBounds(rowHeadBounds);
rowHead.putClientProperty(Alignment.class, vsbOnLeft ? Alignment.RIGHT : Alignment.LEFT);
}
// Set the bounds of the column header.
colHeadBounds.x = bounds.x - insets.left;
colHeadBounds.width = bounds.width + insets.left + insets.right;
if (colHead != null) {
colHead.setBounds(colHeadBounds);
colHead.putClientProperty(Alignment.class, hsbOnTop ? Alignment.BOTTOM : Alignment.TOP);
}
// Calculate overlaps for translucent scroll bars
int overlapWidth = 0;
int overlapHeight = 0;
if (vsbNeeded && !vsbOpaque && hsbNeeded && !hsbOpaque) {
overlapWidth = vsbBounds.width; // shrink horizontally
//overlapHeight = hsbBounds.height; // shrink vertically
}
// Set the bounds of the vertical scroll bar.
vsbBounds.y = bounds.y - insets.top;
vsbBounds.height = bounds.height + insets.top + insets.bottom;
if (vsb != null) {
vsb.setVisible(vsbNeeded);
if (vsbNeeded) {
if (vsbOpaque && colHead != null && UIManager.getBoolean("ScrollPane.fillUpperCorner")) {
if ((vsbOnLeft ? upperLeft : upperRight) == null) {
// This is used primarily for GTK L&F, which needs to extend
// the vertical scrollbar to fill the upper corner near the column header.
// Note that we skip this step (and use the default behavior)
// if the user has set a custom corner component.
if (!hsbOnTop) vsbBounds.y -= colHeadBounds.height;
vsbBounds.height += colHeadBounds.height;
}
}
int overlapY = !hsbOnTop ? 0 : overlapHeight;
vsb.setBounds(vsbBounds.x, vsbBounds.y + overlapY, vsbBounds.width, vsbBounds.height - overlapHeight);
vsb.putClientProperty(Alignment.class, vsbOnLeft ? Alignment.LEFT : Alignment.RIGHT);
}
// Modify the bounds of the translucent scroll bar.
if (!vsbOpaque) {
if (!vsbOnLeft) vsbBounds.x += vsbBounds.width;
vsbBounds.width = 0;
}
}
// Set the bounds of the horizontal scroll bar.
hsbBounds.x = bounds.x - insets.left;
hsbBounds.width = bounds.width + insets.left + insets.right;
if (hsb != null) {
hsb.setVisible(hsbNeeded);
if (hsbNeeded) {
if (hsbOpaque && rowHead != null && UIManager.getBoolean("ScrollPane.fillLowerCorner")) {
if ((vsbOnLeft ? lowerRight : lowerLeft) == null) {
// This is used primarily for GTK L&F, which needs to extend
// the horizontal scrollbar to fill the lower corner near the row header.
// Note that we skip this step (and use the default behavior)
// if the user has set a custom corner component.
if (!vsbOnLeft) hsbBounds.x -= rowHeadBounds.width;
hsbBounds.width += rowHeadBounds.width;
}
}
int overlapX = !vsbOnLeft ? 0 : overlapWidth;
hsb.setBounds(hsbBounds.x + overlapX, hsbBounds.y, hsbBounds.width - overlapWidth, hsbBounds.height);
hsb.putClientProperty(Alignment.class, hsbOnTop ? Alignment.TOP : Alignment.BOTTOM);
}
// Modify the bounds of the translucent scroll bar.
if (!hsbOpaque) {
if (!hsbOnTop) hsbBounds.y += hsbBounds.height;
hsbBounds.height = 0;
}
}
// Set the bounds of the corners.
if (lowerLeft != null) {
lowerLeft.setBounds(vsbOnLeft ? vsbBounds.x : rowHeadBounds.x,
hsbOnTop ? colHeadBounds.y : hsbBounds.y,
vsbOnLeft ? vsbBounds.width : rowHeadBounds.width,
hsbOnTop ? colHeadBounds.height : hsbBounds.height);
}
if (lowerRight != null) {
lowerRight.setBounds(vsbOnLeft ? rowHeadBounds.x : vsbBounds.x,
hsbOnTop ? colHeadBounds.y : hsbBounds.y,
vsbOnLeft ? rowHeadBounds.width : vsbBounds.width,
hsbOnTop ? colHeadBounds.height : hsbBounds.height);
}
if (upperLeft != null) {
upperLeft.setBounds(vsbOnLeft ? vsbBounds.x : rowHeadBounds.x,
hsbOnTop ? hsbBounds.y : colHeadBounds.y,
vsbOnLeft ? vsbBounds.width : rowHeadBounds.width,
hsbOnTop ? hsbBounds.height : colHeadBounds.height);
}
if (upperRight != null) {
upperRight.setBounds(vsbOnLeft ? rowHeadBounds.x : vsbBounds.x,
hsbOnTop ? hsbBounds.y : colHeadBounds.y,
vsbOnLeft ? rowHeadBounds.width : vsbBounds.width,
hsbOnTop ? hsbBounds.height : colHeadBounds.height);
}
if (!vsbOpaque && vsbNeeded || !hsbOpaque && hsbNeeded) {
fixComponentZOrder(vsb, 0);
fixComponentZOrder(viewport, -1);
}
}
private static void fixComponentZOrder(Component component, int index) {
if (component != null) {
Container parent = component.getParent();
synchronized (parent.getTreeLock()) {
if (index < 0) index += parent.getComponentCount();
parent.setComponentZOrder(component, index);
}
}
}
private void adjustForVSB(Rectangle bounds, Insets insets, Rectangle vsbBounds, boolean vsbOpaque, boolean vsbOnLeft) {
vsbBounds.width = !vsb.isEnabled() ? 0 : min(bounds.width, vsb.getPreferredSize().width);
if (vsbOnLeft) {
vsbBounds.x = bounds.x - insets.left/* + vsbBounds.width*/;
if (vsbOpaque) bounds.x += vsbBounds.width;
}
else {
vsbBounds.x = bounds.x + bounds.width + insets.right - vsbBounds.width;
}
if (vsbOpaque) bounds.width -= vsbBounds.width;
}
private void adjustForHSB(Rectangle bounds, Insets insets, Rectangle hsbBounds, boolean hsbOpaque, boolean hsbOnTop) {
hsbBounds.height = !hsb.isEnabled() ? 0 : min(bounds.height, hsb.getPreferredSize().height);
if (hsbOnTop) {
hsbBounds.y = bounds.y - insets.top/* + hsbBounds.height*/;
if (hsbOpaque) bounds.y += hsbBounds.height;
}
else {
hsbBounds.y = bounds.y + bounds.height + insets.bottom - hsbBounds.height;
}
if (hsbOpaque) bounds.height -= hsbBounds.height;
}
private static int min(int one, int two) {
return Math.max(0, Math.min(one, two));
}
}
/**
* Indicates whether the specified event is not consumed and does not have unexpected modifiers.
*
* @param event a mouse wheel event to check for validity
* @return {@code true} if the specified event is valid, {@code false} otherwise
*/
public static boolean isScrollEvent(@NotNull MouseWheelEvent event) {
if (event.isConsumed()) return false; // event should not be consumed already
if (event.getWheelRotation() == 0) return false; // any rotation expected (forward or backward)
return 0 == (SCROLL_MODIFIERS & event.getModifiers());
}
private static final int SCROLL_MODIFIERS = // event modifiers allowed during scrolling
~InputEvent.SHIFT_MASK & ~InputEvent.SHIFT_DOWN_MASK & // for horizontal scrolling
~InputEvent.BUTTON1_MASK & ~InputEvent.BUTTON1_DOWN_MASK; // for selection
}
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head>
<title>SQLite Database File Format Requirements</title>
<style type="text/css">
body {
margin: auto;
font-family: "Verdana" "sans-serif";
padding: 8px 1%;
}
a { color: #45735f }
a:visited { color: #734559 }
.logo { position:absolute; margin:3px; }
.tagline {
float:right;
text-align:right;
font-style:italic;
width:240px;
margin:12px;
margin-top:58px;
}
.toolbar {
font-variant: small-caps;
text-align: center;
line-height: 1.6em;
margin: 0;
padding:1px 8px;
}
.toolbar a { color: white; text-decoration: none; padding: 6px 12px; }
.toolbar a:visited { color: white; }
.toolbar a:hover { color: #80a796; background: white; }
.content { margin: 5%; }
.content dt { font-weight:bold; }
.content dd { margin-bottom: 25px; margin-left:20%; }
.content ul { padding:0px; padding-left: 15px; margin:0px; }
/* rounded corners */
.se { background: url(images/se.png) 100% 100% no-repeat #80a796}
.sw { background: url(images/sw.png) 0% 100% no-repeat }
.ne { background: url(images/ne.png) 100% 0% no-repeat }
.nw { background: url(images/nw.png) 0% 0% no-repeat }
</style>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
</head>
<body>
<div><!-- container div to satisfy validator -->
<a href="index.html">
<img class="logo" src="images/SQLite.gif" alt="SQLite Logo"
border="0"></a>
<div><!-- IE hack to prevent disappearing logo--></div>
<div class="tagline">Small. Fast. Reliable.<br>Choose any three.</div>
<table width=100% style="clear:both"><tr><td>
<div class="se"><div class="sw"><div class="ne"><div class="nw">
<div class="toolbar">
<a href="about.html">About</a>
<a href="sitemap.html">Sitemap</a>
<a href="docs.html">Documentation</a>
<a href="download.html">Download</a>
<a href="copyright.html">License</a>
<a href="news.html">News</a>
<a href="http://www.sqlite.org/cvstrac/index">Developers</a>
<a href="support.html">Support</a>
</div></div></div></div></div>
</td></tr></table>
<h1 align="center">
Requirements for the SQLite Database File Format
</h1>
<p>
This document contains detailed <a href="requirements.html">requirements</a> for the database
<a href="fileformat.html">file format</a> and the <a href="fileio.html">file I/O</a> characteristics of SQLite.
</p>
<hr>
<a name="H30010"></a>
<p><b>H30010:</b>
The system shall ensure that at the successful conclusion of a
database transaction the contents of the database file constitute
a <i>well-formed SQLite database file</i>.
<a name="H30020"></a>
<p><b>H30020:</b>
The system shall ensure that at the successful conclusion of a
database transaction the contents of the database file are a valid
serialization of the contents of the logical SQL database produced
by the transaction.
<a name="H30030"></a>
<p><b>H30030:</b>
The first 16 bytes of a well-formed database file contain the UTF-8
encoding of the string "SQLite format 3" followed by a single
nul-terminator byte.
<a name="H30040"></a>
<p><b>H30040:</b>
The 19th byte (byte offset 18), the <i>file-format write version</i>,
of a well-formed database file contains the value 0x01.
<a name="H30050"></a>
<p><b>H30050:</b>
The 20th byte (byte offset 19), the <i>file-format read version</i>,
of a well-formed database file contains the value 0x01.
<a name="H30060"></a>
<p><b>H30060:</b>
The 21st byte (byte offset 20), the number of unused bytes on each
page, of a well-formed database file shall contain the value 0x00.
<a name="H30070"></a>
<p><b>H30070:</b>
The 22nd byte (byte offset 21), the maximum fraction of an index
B-Tree page to use for embedded content, of a well-formed database
file shall contain the value 0x40.
<a name="H30080"></a>
<p><b>H30080:</b>
The 23rd byte (byte offset 22), the minimum fraction of an index
B-Tree page to use for embedded content when using overflow pages,
of a well-formed database file contains the value 0x20.
<a name="H30090"></a>
<p><b>H30090:</b>
The 24th byte (byte offset 23), the minimum fraction of a table
B-Tree page to use for embedded content when using overflow pages,
of a well-formed database file contains the value 0x20.
<a name="H30100"></a>
<p><b>H30100:</b>
The 4 byte block starting at byte offset 24 of a well-formed
database file contains the <i>file change counter</i> formatted
as a 4-byte big-endian integer.
<a name="H30110"></a>
<p><b>H30110:</b>
The 4 byte block starting at byte offset 40 of a well-formed
database file contains the <i>schema version</i> formatted
as a 4-byte big-endian integer.
<a name="H30120"></a>
<p><b>H30120:</b>
The 4 byte block starting at byte offset 44 of a well-formed
database file, the <i>schema layer file format</i>, contains a
big-endian integer value between 1 and 4, inclusive.
<a name="H30130"></a>
<p><b>H30130:</b>
The 4 byte block starting at byte offset 48 of a well-formed
database file contains the <i>default pager cache size</i> formatted
as a 4-byte big-endian integer.
<a name="H30140"></a>
<p><b>H30140:</b>
The 4 byte block starting at byte offset 52 of a well-formed
database file contains the <i>auto-vacuum last root-page</i>
formatted as a 4-byte big-endian integer. If this value is non-zero,
the database is said to be an <i>auto-vacuum database</i>.
<a name="H30150"></a>
<p><b>H30150:</b>
The 4 byte block starting at byte offset 56 of a well-formed
database file, the <i>text encoding</i> contains a big-endian integer
value between 1 and 3, inclusive.
<a name="H30160"></a>
<p><b>H30160:</b>
The 4 byte block starting at byte offset 60 of a well-formed
database file contains the <i>user cookie</i> formatted
as a 4-byte big-endian integer.
<a name="H30170"></a>
<p><b>H30170:</b>
The 4 byte block starting at byte offset 64 of a well-formed
database file, the <i>incremental vaccum flag</i> contains a big-endian
integer value between 0 and 1, inclusive.
<a name="H30180"></a>
<p><b>H30180:</b>
In a well-formed non-autovacuum database (one with a zero stored
in the 4-byte big-endian integer value beginning at byte offset
52 of the database file header, the incremental vacuum flag is
set to 0.
<a name="H30190"></a>
<p><b>H30190:</b>
The <i>database page size</i> of a well-formed database, stored as a
2-byte big-endian unsigned integer at byte offset 16 of the file,
shall be an integer power of 2 between 512 and 32768, inclusive.
<a name="H30200"></a>
<p><b>H30200:</b>
The size of a <i>well formed database file</i> shall be an integer
multiple of the <i>database page size</i>.
<a name="H30210"></a>
<p><b>H30210:</b>
Each page of a <i>well formed database file</i> is exactly one of a
<i>B-Tree page</i>, an <i>overflow page</i>, a <i>free page</i>, a
<i>pointer-map page</i> or the <i>locking page</i>.
<a name="H30220"></a>
<p><b>H30220:</b>
The database page that starts at byte offset 2<sup>30</sup>, the
<i>locking page</i>, is never used for any purpose.
<a name="H30230"></a>
<p><b>H30230:</b>
In a <i>well-formed database file</i>, the portion of the first
database page not consumed by the database file-header (all but the
first 100 bytes) contains the root node of a table B-Tree,
the <i>schema table</i>.
<a name="H30240"></a>
<p><b>H30240:</b>
All records stored in the <i>schema table</i> contain exactly five
fields.
<a name="H30250"></a>
<p><b>H30250:</b>
For each SQL table in the database apart from itself
("sqlite_master"), the <i>schema table</i> of a <i>well-formed
database file</i> contains an associated record.
<a name="H30260"></a>
<p><b>H30260:</b>
The first field of each <i>schema table</i> record associated with an
SQL table shall be the text value "table".
<a name="H30270"></a>
<p><b>H30270:</b>
The second field of each <i>schema table</i> record associated with an
SQL table shall be a text value set to the name of the SQL table.
<a name="H30280"></a>
<p><b>H30280:</b>
In a <i>well-formed database file</i>, the third field of all
<i>schema table</i> records associated with SQL tables shall contain
the same value as the second field.
<a name="H30290"></a>
<p><b>H30290:</b>
In a <i>well-formed database file</i>, the fourth field of all
<i>schema table</i> records associated with SQL tables that are not
virtual tables contains the page number (an integer value) of the root
page of the associated <i>table B-Tree</i> structure within the
database file.
<a name="H30300"></a>
<p><b>H30300:</b>
If the associated database table is a virtual table, the fourth
field of the <i>schema table</i> record shall contain an SQL NULL
value.
<a name="H30310"></a>
<p><b>H30310:</b>
In a well-formed database, the fifth field of all <i>schema table</i>
records associated with SQL tables shall contain a "CREATE TABLE"
or "CREATE VIRTUAL TABLE" statment (a text value). The details
of the statement shall be such that executing the statement
would create a table of precisely the same name and schema as the
existing database table.
<a name="H30320"></a>
<p><b>H30320:</b>
For each PRIMARY KEY or UNIQUE constraint present in the definition
of each SQL table in the database, the schema table of a well-formed
database shall contain a record with the first field set to the text
value "index", and the second field set to a text value containing a
string of the form "sqlite_autoindex_<name>_<idx>", where
<name> is the name of the SQL table and <idx> is an
integer value.
<a name="H30330"></a>
<p><b>H30330:</b>
In a well-formed database, the third field of all schema table
records associated with SQL PRIMARY KEY or UNIQUE constraints shall
contain the name of the table to which the constraint applies (a
text value).
<a name="H30340"></a>
<p><b>H30340:</b>
In a well-formed database, the fourth field of all schema table
records associated with SQL PRIMARY KEY or UNIQUE constraints shall
contain the page number (an integer value) of the root page of the
associated index B-Tree structure.
<a name="H30350"></a>
<p><b>H30350:</b>
In a well-formed database, the fifth field of all schema table
records associated with SQL PRIMARY KEY or UNIQUE constraints shall
contain an SQL NULL value.
<a name="H30360"></a>
<p><b>H30360:</b>
For each SQL index in the database, the schema table of a well-formed
database shall contain a record with the first field set to the text
value "index" and the second field set to a text value containing the
name of the SQL index.
<a name="H30370"></a>
<p><b>H30370:</b>
In a well-formed database, the third field of all schema table
records associated with SQL indexes shall contain the name of the
SQL table that the index applies to.
<a name="H30380"></a>
<p><b>H30380:</b>
In a well-formed database, the fourth field of all schema table
records associated with SQL indexes shall contain the page number
(an integer value) of the root page of the associated index B-Tree
structure.
<a name="H30390"></a>
<p><b>H30390:</b>
In a well-formed database, the fifth field of all schema table
records associated with SQL indexes shall contain an SQL "CREATE
INDEX" statement (a text value). The details of the statement shall
be such that executing the statement would create an index of
precisely the same name and content as the existing database index.
<a name="H30400"></a>
<p><b>H30400:</b>
For each SQL view in the database, the schema table of a well-formed
database shall contain a record with the first field set to the text
value "view" and the second field set to a text value containing the
name of the SQL view.
<a name="H30410"></a>
<p><b>H30410:</b>
In a well-formed database, the third field of all schema table
records associated with SQL views shall contain the same value as
the second field.
<a name="H30420"></a>
<p><b>H30420:</b>
In a well-formed database, the third field of all schema table
records associated with SQL views shall contain the integer value 0.
<a name="H30430"></a>
<p><b>H30430:</b>
In a well-formed database, the fifth field of all schema table
records associated with SQL indexes shall contain an SQL "CREATE
VIEW" statement (a text value). The details of the statement shall
be such that executing the statement would create a view of
precisely the same name and definition as the existing database view.
<a name="H30440"></a>
<p><b>H30440:</b>
For each SQL trigger in the database, the schema table of a well-formed
database shall contain a record with the first field set to the text
value "trigger" and the second field set to a text value containing the
name of the SQL trigger.
<a name="H30450"></a>
<p><b>H30450:</b>
In a well-formed database, the third field of all schema table
records associated with SQL triggers shall contain the name of the
database table or view to which the trigger applies.
<a name="H30460"></a>
<p><b>H30460:</b>
In a well-formed database, the third field of all schema table
records associated with SQL triggers shall contain the integer value 0.
<a name="H30470"></a>
<p><b>H30470:</b>
In a well-formed database, the fifth field of all schema table
records associated with SQL indexes shall contain an SQL "CREATE
TRIGGER" statement (a text value). The details of the statement shall
be such that executing the statement would create a trigger of
precisely the same name and definition as the existing database trigger.
<a name="H30480"></a>
<p><b>H30480:</b>
In an auto-vacuum database, all pages that occur before the page
number stored in the <i>auto-vacuum last root-page</i> field
of the database file header (see H30140) must be either B-Tree <i>root
pages</i>, <i>pointer-map pages</i> or the <i>locking page</i>.
<a name="H30490"></a>
<p><b>H30490:</b>
In an auto-vacuum database, no B-Tree <i>root pages</i> may occur
on or after the page number stored in the <i>auto-vacuum last root-page</i> field
of the database file header (see H30140) must be either B-Tree <i>root
pages</i>, <i>pointer-map pages</i> or the <i>locking page</i>.
<a name="H30500"></a>
<p><b>H30500:</b>
As well as the <i>schema table</i>, a <i>well-formed database file</i>
contains <i>N</i> table B-Tree structures, where <i>N</i> is the
number of non-virtual tables in the logical database, excluding the
sqlite_master table but including sqlite_sequence and other system
tables.
<a name="H30510"></a>
<p><b>H30510:</b>
A well-formed database file contains <i>N</i> table B-Tree structures,
where <i>N</i> is the number of indexes in the logical database,
including indexes created by UNIQUE or PRIMARY KEY clauses in the
declaration of SQL tables.
<a name="H30520"></a>
<p><b>H30520:</b>
A 64-bit signed integer value stored in <i>variable length integer</i>
format consumes from 1 to 9 bytes of space.
<a name="H30530"></a>
<p><b>H30530:</b>
The most significant bit of all bytes except the last in a serialized
<i>variable length integer</i> is always set. Unless the serialized
form consumes the maximum 9 bytes available, then the most significant
bit of the final byte of the representation is always cleared.
<a name="H30540"></a>
<p><b>H30540:</b>
The eight least significant bytes of the 64-bit twos-compliment
representation of a value stored in a 9 byte <i>variable length
integer</i> are stored in the final byte (byte offset 8) of the
serialized <i>variable length integer</i>. The other 56 bits are
stored in the 7 least significant bits of each of the first 8 bytes
of the serialized <i>variable length integer</i>, in order from
most significant to least significant.
<a name="H30550"></a>
<p><b>H30550:</b>
A <i>variable length integer</i> that consumes less than 9 bytes of
space contains a value represented as an <i>N</i>-bit unsigned
integer, where <i>N</i> is equal to the number of bytes consumed by
the serial representation (between 1 and 8) multiplied by 7. The
<i>N</i> bits are stored in the 7 least significant bits of each
byte of the serial representation, from most to least significant.
<a name="H30560"></a>
<p><b>H30560:</b>
A <i>database record</i> consists of a <i>database record header</i>,
followed by <i>database record data</i>. The first part of the
<i>database record header</i> is a <i>variable length integer</i>
containing the total size (including itself) of the header in bytes.
<a name="H30570"></a>
<p><b>H30570:</b>
Following the length field, the remainder of the <i>database record
header</i> is populated with <i>N</i> <i>variable length integer</i>
fields, where <i>N</i> is the number of database values stored in
the record.
<a name="H30580"></a>
<p><b>H30580:</b>
Following the <i>database record header</i>, the <i>database record
data</i> is made up of <i>N</i> variable length blobs of data, where
<i>N</i> is again the number of database values stored in the record.
The <i>n</i> blob contains the data for the <i>n</i>th value in
the database record. The size and format of each blob of data is
encoded in the corresponding <i>variable length integer</i> field
in the <i>database record header</i>.
<a name="H30590"></a>
<p><b>H30590:</b>
A value of 0 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL NULL. In this case
the blob of data in the data area is 0 bytes in size.
<a name="H30600"></a>
<p><b>H30600:</b>
A value of 1 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 1-byte
big-endian signed integer.
<a name="H30610"></a>
<p><b>H30610:</b>
A value of 2 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 2-byte
big-endian signed integer.
<a name="H30620"></a>
<p><b>H30620:</b>
A value of 3 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 3-byte
big-endian signed integer.
<a name="H30630"></a>
<p><b>H30630:</b>
A value of 4 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 4-byte
big-endian signed integer.
<a name="H30640"></a>
<p><b>H30640:</b>
A value of 5 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 6-byte
big-endian signed integer.
<a name="H30650"></a>
<p><b>H30650:</b>
A value of 6 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 8-byte
big-endian signed integer.
<a name="H30660"></a>
<p><b>H30660:</b>
A value of 7 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL real (floating
point number). In this case the blob of data contains an 8-byte
IEEE floating point number, stored in big-endian byte order.
<a name="H30670"></a>
<p><b>H30670:</b>
A value of 8 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer, value 0.
In this case the blob of data in the data area is 0 bytes in size.
<a name="H30680"></a>
<p><b>H30680:</b>
A value of 9 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer, value 1.
In this case the blob of data in the data area is 0 bytes in size.
<a name="H30690"></a>
<p><b>H30690:</b>
An even value greater than or equal to 12 stored within the
<i>database record header</i> indicates that the corresponding
database value is an SQL blob field. The blob of data contains the
value data. The blob of data is exactly (<i>n</i>-12)/2 bytes
in size, where <i>n</i> is the integer value stored in the
<i>database record header</i>.
<a name="H30700"></a>
<p><b>H30700:</b>
An odd value greater than or equal to 13 stored within the
<i>database record header</i> indicates that the corresponding
database value is an SQL text field. The blob of data contains the
value text stored using the <i>database encoding</i>, with no
nul-terminator. The blob of data is exactly (<i>n</i>-12)/2 bytes
in size, where <i>n</i> is the integer value stored in the
<i>database record header</i>.
<a name="H30710"></a>
<p><b>H30710:</b>
In a well-formed database file, if the values 8 or 9 appear within
any <i>database record header</i> within the database, then the
<i>schema-layer file format</i> (stored at byte offset 44 of the
database file header) must be set to 4.
<a name="H30720"></a>
<p><b>H30720:</b>
In a well-formed database file, the values 10 and 11, and all
negative values may not appear within any <i>database record header</i>
in the database.
<a name="H30730"></a>
<p><b>H30730:</b>
The pages in an index B-Tree structures are arranged into a tree
structure such that all leaf pages are at the same depth.
<a name="H30740"></a>
<p><b>H30740:</b>
Each leaf node page in an index B-Tree contains one or more
B-Tree cells, where each cell contains a database record.
<a name="H30750"></a>
<p><b>H30750:</b>
Each internal node page in an index B-Tree contains one or more
B-Tree cells, where each cell contains a child page number, <i>C</i>,
and a database record <i>R</i>. All database records stored within
the sub-tree headed by page <i>C</i> are smaller than record <i>R</i>,
according to the index sort order (see below). Additionally, unless
<i>R</i> is the smallest database record stored on the internal node
page, all integer keys within the sub-tree headed by <i>C</i> are
greater than <i>R<sub>-1</sub></i>, where <i>R<sub>-1</sub></i> is the
largest database record on the internal node page that is smaller
than <i>R</i>.
<a name="H30760"></a>
<p><b>H30760:</b>
As well as child page numbers associated with B-Tree cells, each
internal node page in an index B-Tree contains the page number
of an extra child page, the <i>right-child page</i>. All database
records stored in all B-Tree cells within the sub-tree headed by the
<i>right-child page</i> are greater than all database records
stored within B-Tree cells on the internal node page.
<a name="H30770"></a>
<p><b>H30770:</b>
In a well-formed database, each index B-Tree contains a single entry
for each row in the indexed logical database table.
<a name="H30780"></a>
<p><b>H30780:</b>
Each <i>database record</i> (key) stored by an index B-Tree in a
well-formed database contains the same number of values, the number
of indexed columns plus one.
<a name="H30790"></a>
<p><b>H30790:</b>
The final value in each <i>database record</i> (key) stored by an
index B-Tree in a well-formed database contains the rowid (an integer
value) of the corresponding logical database row.
<a name="H30800"></a>
<p><b>H30800:</b>
The first <i>N</i> values in each <i>database record</i> (key)
stored in an index B-Tree where <i>N</i> is the number of indexed
columns, contain the values of the indexed columns from the
corresponding logical database row, in the order specified for the
index.
<a name="H30810"></a>
<p><b>H30810:</b>
The <i>b-tree page flags</i> field (the first byte) of each database
page used as an internal node of an index B-Tree structure is set to
0x02.
<a name="H30820"></a>
<p><b>H30820:</b>
The <i>b-tree page flags</i> field (the first byte) of each database
page used as a leaf node of an index B-Tree structure is set to 0x0A.
<a name="H30830"></a>
<p><b>H30830:</b>
The first byte of each database page used as a B-Tree page contains
the <i>b-tree page flags</i> field. On page 1, the <i>b-tree page
flags</i> field is stored directly after the 100 byte file header
at byte offset 100.
<a name="H30840"></a>
<p><b>H30840:</b>
The number of B-Tree cells stored on a B-Tree page is stored as a
2-byte big-endian integer starting at byte offset 3 of the B-Tree
page. On page 1, this field is stored at byte offset 103.
<a name="H30850"></a>
<p><b>H30850:</b>
The 2-byte big-endian integer starting at byte offset 5 of each
B-Tree page contains the byte-offset from the start of the page
to the start of the <i>cell content area</i>, which consumes all space
from this offset to the end of the usable region of the page.
On page 1, this field is stored at byte offset 105. All B-Tree
cells on the page are stored within the cell-content area.
<a name="H30860"></a>
<p><b>H30860:</b>
On each page used as an internal node a of B-Tree structures, the
page number of the rightmost child node in the B-Tree structure is
stored as a 4-byte big-endian unsigned integer beginning at byte
offset 8 of the database page, or byte offset 108 on page 1.
<a name="H30870"></a>
<p><b>H30870:</b>
Immediately following the <i>page header</i> on each B-Tree page is the
<i>cell offset array</i>, consisting of <i>N</i> 2-byte big-endian
unsigned integers, where <i>N</i> is the number of cells stored
on the B-Tree page (H30840). On an internal node B-Tree page,
the cell offset array begins at byte offset 12, or on a leaf
page, byte offset 8. For the B-Tree node on page 1, these
offsets are 112 and 108, respectively.
<a name="H30880"></a>
<p><b>H30880:</b>
The <i>cell offset array</i> and the <i>cell content area</i> (H30850)
may not overlap.
<a name="H30890"></a>
<p><b>H30890:</b>
Each value stored in the <i>cell offset array</i> must be greater
than or equal to the offset to the <i>cell content area</i> (H30850),
and less than the database <i>page size</i>.
<a name="H30900"></a>
<p><b>H30900:</b>
The <i>N</i> values stored within the <i>cell offset array</i> are the
byte offsets from the start of the B-Tree page to the beginning of
each of the <i>N</i> cells stored on the page.
<a name="H30910"></a>
<p><b>H30910:</b>
No two B-Tree cells may overlap.
<a name="H30920"></a>
<p><b>H30920:</b>
Within the <i>cell content area</i>, all blocks of contiguous
free-space (space not used by B-Tree cells) greater than 3 bytes in
size are linked together into a linked list, the <i>free block list</i>.
Such blocks of free space are known as <i>free blocks</i>.
<a name="H30930"></a>
<p><b>H30930:</b>
The first two bytes of each <i>free block</i> contain the offset
of the next <i>free block</i> in the <i>free block list</i> formatted
as a 2-byte big-endian integer, relative to the start of the database
page. If there is no next <i>free block</i>, then the first two
bytes are set to 0x00.
<a name="H30940"></a>
<p><b>H30940:</b>
The second two bytes (byte offsets 2 and 3) of each <i>free block</i>
contain the total size of the <i>free block</i>, formatted as a 2-byte
big-endian integer.
<a name="H30950"></a>
<p><b>H30950:</b>
On all B-Tree pages, the offset of the first <i>free block</i> in the
<i>free block list</i>, relative to the start of the database page,
is stored as a 2-byte big-endian integer starting at byte offset
1 of the database page. If there is no first <i>free block</i>
(because the <i>free block list</i> is empty), then the two bytes
at offsets 1 and 2 of the database page are set to 0x00. On page 1,
this field is stored at byte offset 101 of the page.
<a name="H30960"></a>
<p><b>H30960:</b>
Within the cell-content area, all blocks of contiguous free-space
(space not used by B-Tree cells) less than or equal to 3 bytes in
size are known as <i>fragments</i>. The total size of all
<i>fragments</i> on a B-Tree page is stored as a 1-byte unsigned
integer at byte offset 7 of the database page. On page 1, this
field is stored at byte offset 107.
<a name="H30970"></a>
<p><b>H30970:</b>
Each B-Tree cell belonging to an internal node page of an index
B-Tree consists of a 4-byte big-endian unsigned integer, the
<i>child page number</i>, followed by a <i>variable length integer</i>
field, followed by a <i>database record</i>. The
<i>variable length integer</i> field contains the length of the
database record in bytes.
<a name="H30980"></a>
<p><b>H30980:</b>
Each B-Tree cell belonging to an leaf page of an index B-Tree
consists of a <i>variable length integer</i> field, followed by
a <i>database record</i>. The <i>variable length integer</i> field
contains the length of the database record in bytes.
<a name="H30990"></a>
<p><b>H30990:</b>
If the database record stored in an index B-Tree page is
sufficiently small, then the entire cell is stored within the
index B-Tree page. Sufficiently small is defined as equal to or
less than <i>max-local</i>, where:
<code>
<i>max-local</i> := (<i>usable-size</i> - 12) * 64 / 255 - 23</code>
<a name="H31000"></a>
<p><b>H31000:</b>
If the database record stored as part of an index B-Tree cell is too
large to be stored entirely within the B-Tree page (as defined by
H30520), then only a prefix of the <i>database record</i> is stored
within the B-Tree page and the remainder stored in an <i>overflow
chain</i>. In this case, the database record prefix is immediately
followed by the page number of the first page of the
<i>overflow chain</i>, formatted as a 4-byte big-endian unsigned
integer.
<a name="H31010"></a>
<p><b>H31010:</b>
When a <i>database record</i> belonging to a table B-Tree cell is
stored partially within an <i>overflow page chain</i>, the size
of the prefix stored within the index B-Tree page is <i>N</i> bytes,
where <i>N</i> is calculated using the following algorithm:
<code>
<i>min-local</i> := (<i>usable-size</i> - 12) * 32 / 255 - 23
<i>max-local</i> := (<i>usable-size</i> - 12) * 64 / 255 - 23
<i>N</i> := <i>min-local</i> + ((<i>record-size</i> - <i>min-local</i>) % (<i>usable-size</i> - 4))
if( <i>N</i> > <i>max-local</i> ) <i>N</i> := <i>min-local</i></code>
<a name="H31020"></a>
<p><b>H31020:</b>
The pages in a table B-Tree structures are arranged into a tree
structure such that all leaf pages are at the same depth.
<a name="H31030"></a>
<p><b>H31030:</b>
Each leaf page in a table B-Tree structure contains one or more
B-Tree cells, where each cell contains a 64-bit signed integer key
value and a database record.
<a name="H31040"></a>
<p><b>H31040:</b>
Each internal node page in a table B-Tree structure contains one or
more B-Tree cells, where each cell contains a 64-bit signed integer
key value, <i>K</i>, and a child page number, <i>C</i>. All integer key
values in all B-Tree cells within the sub-tree headed by page <i>C</i>
are less than or equal to <i>K</i>. Additionally, unless <i>K</i>
is the smallest integer key value stored on the internal node page,
all integer keys within the sub-tree headed by <i>C</i> are greater
than <i>K<sub>-1</sub></i>, where <i>K<sub>-1</sub></i> is the largest
integer key on the internal node page that is smaller than <i>K</i>.
<a name="H31050"></a>
<p><b>H31050:</b>
As well as child page numbers associated with B-Tree cells, each
internal node page in a table B-Tree contains the page number
of an extra child page, the <i>right-child page</i>. All key values
in all B-Tree cells within the sub-tree headed by the <i>right-child
page</i> are greater than all key values stored within B-Tree cells
on the internal node page.
<a name="H31060"></a>
<p><b>H31060:</b>
In a well-formed database, each table B-Tree contains a single entry
for each row in the corresponding logical database table.
<a name="H31070"></a>
<p><b>H31070:</b>
The key value (a 64-bit signed integer) for each B-Tree entry is
the same as the value of the rowid field of the corresponding
logical database row.
<a name="H31080"></a>
<p><b>H31080:</b>
The SQL values serialized to make up each <i>database record</i>
stored as ancillary data in a table B-Tree shall be the equal to the
values taken by the <i>N</i> leftmost columns of the corresponding
logical database row, where <i>N</i> is the number of values in the
database record.
<a name="H31090"></a>
<p><b>H31090:</b>
If a logical database table column is declared as an "INTEGER
PRIMARY KEY", then instead of its integer value, an SQL NULL
shall be stored in its place in any database records used as
ancillary data in a table B-Tree.
<a name="H31100"></a>
<p><b>H31100:</b>
If the database <i>schema layer file-format</i> (the value stored
as a 4-byte integer at byte offset 44 of the file header) is 1,
then all database records stored as ancillary data in a table
B-Tree structure have the same number of fields as there are
columns in the corresponding logical database table.
<a name="H31110"></a>
<p><b>H31110:</b>
If the database <i>schema layer file-format</i> value is two or
greater and the rightmost <i>M</i> columns of a row contain SQL NULL
values, then the corresponding record stored as ancillary data in
the table B-Tree has between <i>N</i>-<i>M</i> and <i>N</i> fields,
where <i>N</i> is the number of columns in the logical database
table.
<a name="H31120"></a>
<p><b>H31120:</b>
If the database <i>schema layer file-format</i> value is three or
greater and the rightmost <i>M</i> columns of a row contain their
default values according to the logical table declaration, then the
corresponding record stored as ancillary data in the table B-Tree
may have as few as <i>N</i>-<i>M</i> fields, where <i>N</i> is the
number of columns in the logical database table.
<a name="H31130"></a>
<p><b>H31130:</b>
In a <i>well-formed database file</i>, the first byte of each page used
as an internal node of a table B-Tree structure is set to 0x05.
<a name="H31140"></a>
<p><b>H31140:</b>
In a <i>well-formed database file</i>, the first byte of each page used
as a leaf node of a table B-Tree structure is set to 0x0D.
<a name="H31150"></a>
<p><b>H31150:</b>
B-Tree cells belonging to table B-Tree internal node pages consist
of exactly two fields, a 4-byte big-endian unsigned integer
immediately followed by a <i>variable length integer</i>. These
fields contain the child page number and key value respectively
(see H31030).
<a name="H31160"></a>
<p><b>H31160:</b>
B-Tree cells belonging to table B-Tree leaf node pages consist
of three fields, two <i>variable length integer</i> values
followed by a database record. The size of the database record
in bytes is stored in the first of the two
<i>variable length integer</i> fields. The second of the two
<i>variable length integer</i> fields contains the 64-bit signed
integer key (see H31030).
<a name="H31170"></a>
<p><b>H31170:</b>
If the size of the record stored in a table B-Tree leaf page cell
is less than or equal to (<i>usable page size</i>-35) bytes, then
the entire cell is stored on the B-Tree leaf page. In a well-formed
database, <i>usable page size</i> is the same as the database
<i>page size</i>.
<a name="H31180"></a>
<p><b>H31180:</b>
If a table B-Tree cell is too large to be stored entirely on
a leaf page (as defined by H31170), then a prefix of the cell
is stored on the leaf page, and the remainder stored in an
<i>overflow page chain</i>. In this case the cell prefix
stored on the B-Tree leaf page is immediately followed by a
4-byte big-endian unsigned integer containing the page number
of the first overflow page in the chain.
<a name="H31190"></a>
<p><b>H31190:</b>
When a table B-Tree cell is stored partially in an
<i>overflow page chain</i>, the prefix stored on the B-Tree
leaf page consists of the two <i>variable length integer</i> fields,
followed by the first <i>N</i> bytes of the database record, where
<i>N</i> is determined by the following algorithm:
<code>
<i>min-local</i> := (<i>usable-size</i> - 12) * 255 / 32 - 23
<i>max-local</i> := (<i>usable-size</i> - 35)
<i>N</i> := <i>min-local</i> + (<i>record-size</i> - <i>min-local</i>) % (<i>usable-size</i> - 4)
if( <i>N</i> > <i>max-local</i> ) N := <i>min-local</i>
</code>
<a name="H31200"></a>
<p><b>H31200:</b>
A single <i>overflow page</i> may store up to <i>available-space</i>
bytes of database record data, where <i>available-space</i> is equal
to (<i>usable-size</i> - 4).
<a name="H31210"></a>
<p><b>H31210:</b>
When a database record is too large to store within a B-Tree page
(see H31170 and H31000), a prefix of the record is stored within
the B-Tree page and the remainder stored across <i>N</i> overflow
pages. In this case <i>N</i> is the minimum number of pages required
to store the portion of the record not stored on the B-Tree page,
given the maximum payload per overflow page defined by H31200.
<a name="H31220"></a>
<p><b>H31220:</b>
The list of overflow pages used to store a single database record
are linked together in a singly linked list known as an
<i>overflow chain</i>. The first four bytes of each page except the
last in an <i>overflow chain</i> are used to store the page number
of the next page in the linked list, formatted as an unsigned
big-endian integer. The first four bytes of the last page in an
<i>overflow chain</i> are set to 0x00.
<a name="H31230"></a>
<p><b>H31230:</b>
Each overflow page except the last in an <i>overflow chain</i>
contains <i>N</i> bytes of record data starting at byte offset 4 of
the page, where <i>N</i> is the maximum payload per overflow page,
as defined by H31200. The final page in an <i>overflow chain</i>
contains the remaining data, also starting at byte offset 4.
<a name="H31240"></a>
<p><b>H31240:</b>
All <i>free pages</i> in a <i>well-formed database file</i> are part of
the database <i>free page list</i>.
<a name="H31250"></a>
<p><b>H31250:</b>
Each free page is either a <i>free list trunk</i> page or a
<i>free list leaf</i> page.
<a name="H31260"></a>
<p><b>H31260:</b>
All <i>free list trunk</i> pages are linked together into a singly
linked list. The first 4 bytes of each page in the linked list
contains the page number of the next page in the list, formatted
as an unsigned big-endian integer. The first 4 bytes of the last
page in the linked list are set to 0x00.
<a name="H31270"></a>
<p><b>H31270:</b>
The second 4 bytes of each <i>free list trunk</i> page contains
the number of </i>free list leaf</i> page numbers stored on the free list
trunk page, formatted as an unsigned big-endian integer.
<a name="H31280"></a>
<p><b>H31280:</b>
Beginning at byte offset 8 of each <i>free list trunk</i> page are
<i>N</i> page numbers, each formatted as a 4-byte unsigned big-endian
integers, where <i>N</i> is the value described in requirement H31270.
<a name="H31290"></a>
<p><b>H31290:</b>
All page numbers stored on all <i>free list trunk</i> pages refer to
database pages that are <i>free list leaves</i>.
<a name="H31300"></a>
<p><b>H31300:</b>
The page number of each <i>free list leaf</i> page in a well-formed
database file appears exactly once within the set of pages numbers
stored on <i>free list trunk</i> pages.
<a name="H31310"></a>
<p><b>H31310:</b>
The total number of pages in the free list, including all <i>free list
trunk</i> and <i>free list leaf</i> pages, is stored as a 4-byte unsigned
big-endian integer at offset 36 of the database file header.
<a name="H31320"></a>
<p><b>H31320:</b>
The page number of the first page in the linked list of <i>free list
trunk</i> pages is stored as a 4-byte big-endian unsigned integer at
offset 32 of the database file header. If there are no <i>free list
trunk</i> pages in the database file, then the value stored at
offset 32 of the database file header is 0.
<a name="H31330"></a>
<p><b>H31330:</b>
Non auto-vacuum databases do not contain pointer map pages.
<a name="H31340"></a>
<p><b>H31340:</b>
In an auto-vacuum database file, every <i>(num-entries + 1)</i>th
page beginning with page 2 is designated a pointer-map page, where
<i>num-entries</i> is calculated as:
<code>
<i>num-entries</i> := <i>database-usable-page-size</i> / 5
</code>
<a name="H31350"></a>
<p><b>H31350:</b>
In an auto-vacuum database file, each pointer-map page contains
a pointer map entry for each of the <i>num-entries</i> (defined by
H31340) pages that follow it, if they exist.
<a name="H31360"></a>
<p><b>H31360:</b>
Each pointer-map page entry consists of a 1-byte page type and a
4-byte page parent number, 5 bytes in total.
<a name="H31370"></a>
<p><b>H31370:</b>
Pointer-map entries are packed into the pointer-map page in order,
starting at offset 0. The entry associated with the database
page that immediately follows the pointer-map page is located at
offset 0. The entry for the following page at offset 5 etc.
<a name="H31380"></a>
<p><b>H31380:</b>
For each page except page 1 in an auto-vacuum database file that is
the root page of a B-Tree structure, the page type of the
corresponding pointer-map entry is set to the value 0x01 and the
parent page number is zero.
<a name="H31390"></a>
<p><b>H31390:</b>
For each page that is a part of an auto-vacuum database file free-list,
the page type of the corresponding pointer-map entry is set to the
value 0x02 and the parent page number is zero.
<a name="H31400"></a>
<p><b>H31400:</b>
For each page in a well-formed auto-vacuum database that is the first
page in an overflow chain, the page type of the corresponding
pointer-map entry is set to 0x03 and the parent page number field
is set to the page number of the B-Tree page that contains the start
of the B-Tree cell stored in the overflow-chain.
<a name="H31410"></a>
<p><b>H31410:</b>
For each page that is the second or a subsequent page in an overflow
chain, the page type of the corresponding pointer-map entry is set to
0x04 and the parent page number field is set to the page number of the
preceding page in the overflow chain.
<a name="H31420"></a>
<p><b>H31420:</b>
For each page that is not a root page but is a part of a B-Tree tree
structure (not part of an overflow chain), the page type of the
corresponding pointer-map entry is set to the value 0x05 and the parent
page number field is set to the page number of the parent node in the
B-Tree structure.
<a name="H32000"></a>
<p><b>H32000:</b>
If a <i>journal file</i> contains a well-formed <i>master-journal
pointer</i>, and the named <i>master-journal file</i> either does
not exist or does not contain the name of the <i>journal file</i>,
then the <i>journal file</i> shall be considered invalid.
<a name="H32010"></a>
<p><b>H32010:</b>
If the first 28 bytes of a <i>journal file</i> do not contain a well-formed
<i>journal header</i>, then the <i>journal file</i> shall be considered
invalid.
<a name="H32020"></a>
<p><b>H32020:</b>
If the journal file exists within the file-system and neither H32000
, H32010 nor H33080 apply, then the journal file shall be considered valid.
<a name="H32030"></a>
<p><b>H32030:</b>
If there exists a valid <i>journal file</i> in the file-system, then the
database <i>page-size</i> in bytes used to interpret the <i>database image</i>
shall be the value stored as a 4-byte big-endian unsigned integer at byte
offset 24 of the <i>journal file</i>.
<a name="H32040"></a>
<p><b>H32040:</b>
If there exists a valid <i>journal file</i> in the file-system, then the
number of pages in the <i>database image</i> shall be the value stored as
a 4-byte big-endian unsigned integer at byte offset 24 of the
<i>journal file</i>.
<a name="H32050"></a>
<p><b>H32050:</b>
If there is no valid <i>journal file</i> in the file-system, then the
database <i>page-size</i> in bytes used to interpret the <i>database image</i>
shall be the value stored as a 2-byte big-endian unsigned integer at byte
offset 16 of the <i>database file</i>.
<a name="H32060"></a>
<p><b>H32060:</b>
If there is no valid <i>journal file</i> in the file-system, then the
number of pages in the <i>database image</i> shall be calculated by dividing
the size of the <i>database file</i> in bytes by the database <i>page-size</i>.
<a name="H32070"></a>
<p><b>H32070:</b>
If there exists a valid <i>journal file</i> in the file-system, then the
contents of each page of the <i>database image</i> for which there is a valid
<i>journal record</i> in the <i>journal file</i> shall be read from the
corresponding journal record.
<a name="H32080"></a>
<p><b>H32080:</b>
The contents of all <i>database image</i> pages for which there is no valid
<i>journal record</i> shall be read from the database file.
<a name="H32090"></a>
<p><b>H32090:</b>
A buffer of 28 bytes shall be considered a well-formed journal
header if it is not excluded by requirements H32180, H32190 or H32200.
<a name="H32180"></a>
<p><b>H32180:</b>
A buffer of 28 bytes shall only be considered a well-formed journal
header if the first eight bytes of the buffer contain the values 0xd9,
0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively.
<a name="H32190"></a>
<p><b>H32190:</b>
A buffer of 28 bytes shall only be considered a well-formed journal
header if the value stored in the sector size field (the 4-byte big-endian
unsigned integer at offset 20 of the buffer) contains a value that
is an integer power of two greater than 512.
<a name="H32200"></a>
<p><b>H32200:</b>
A buffer of 28 bytes shall only be considered a well-formed journal
header if the value stored in the page size field (the 4-byte big-endian
unsigned integer at offset 24 of the buffer) contains a value that
is an integer power of two greater than 512.
<a name="H32100"></a>
<p><b>H32100:</b>
A buffer of (8 + page size) bytes shall be considered a well-formed journal
record if it is not excluded by requirements H32110 or H32120.
<a name="H32110"></a>
<p><b>H32110:</b>
A journal record shall only be considered to be well-formed if the page number
field contains a value other than zero and the locking-page number, calculated
using the page size found in the first journal header of the journal file that
contains the journal record.
<a name="H32120"></a>
<p><b>H32120:</b>
A journal record shall only be considered to be well-formed if the checksum
field contains a value equal to the sum of the value stored in the
checksum-initializer field of the journal header that precedes the record
and the value stored in every 200th byte of the page data field, interpreted
as an 8-bit unsigned integer), starting with byte offset (page-size % 200) and
ending with the byte at byte offset (page-size - 200).
<a name="H32130"></a>
<p><b>H32130:</b>
A buffer shall be considered to contain a well-formed master journal pointer
record if it is not excluded from this category by requirements H32140,
H32150, H32160 or H32170.
<a name="H32140"></a>
<p><b>H32140:</b>
A buffer shall only be considered to be a well-formed master journal pointer
if the final eight bytes of the buffer contain the values 0xd9, 0xd5, 0x05,
0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively.
<a name="H32150"></a>
<p><b>H32150:</b>
A buffer shall only be considered to be a well-formed master journal pointer
if the size of the buffer in bytes is equal to the value stored as a 4-byte
big-endian unsigned integer starting 16 bytes before the end of the buffer.
<a name="H32160"></a>
<p><b>H32160:</b>
A buffer shall only be considered to be a well-formed master journal pointer
if the first four bytes of the buffer, interpreted as a big-endian unsigned
integer, contain the page number of the locking page (the value
(1 + 2<sup>30</sup> / page-size), where page-size is the value stored in
the page-size field of the first journal header of the journal file).
<a name="H32170"></a>
<p><b>H32170:</b>
A buffer shall only be considered to be a well-formed master journal pointer
if the value stored as a 4-byte big-endian integer starting 12 bytes before
the end of the buffer is equal to the sum of all bytes, each interpreted
as an 8-bit unsigned integer, starting at offset 4 of the buffer and continuing
until offset (buffer-size - 16) (the 17th last byte of the buffer).
<a name="H32210"></a>
<p><b>H32210:</b>
A buffer shall be considered to contain a well-formed journal section
if it is not excluded from this category by requirements H32220, H32230 or
H32240.
<a name="H32220"></a>
<p><b>H32220:</b>
A buffer shall only be considered to contain a well-formed journal section
if the first 28 bytes of it contain a well-formed journal header.
<a name="H32230"></a>
<p><b>H32230:</b>
A buffer shall only be considered to contain a well-formed journal section
if, beginning at byte offset sector-size, it contains a sequence of
record-count well-formed journal records. In this case sector-size and
record-count are the integer values stored in the sector size and record
count fields of the journal section's journal header.
<a name="H32240"></a>
<p><b>H32240:</b>
A buffer shall only be considered to contain a well-formed journal section
if it is an integer multiple of sector-size bytes in size, where sector-size
is the value stored in the sector size field of the journal section's journal
header.
<a name="H32250"></a>
<p><b>H32250:</b>
A journal record found within a valid journal file shall be considered a valid
journal record if it is not excluded from this category by requirement H32260,
H32270 or H32280.
<a name="H32260"></a>
<p><b>H32260:</b>
A journal record shall only be considered a valid journal record if it and any
other journal records that occur before it within the same journal section are
well-formed.
<a name="H32270"></a>
<p><b>H32270:</b>
A journal record shall only be considered a valid journal record if the journal
section to which it belongs begins with a well-formed journal header.
<a name="H32280"></a>
<p><b>H32280:</b>
A journal record shall only be considered a valid journal record if all journal
sections that occur before the journal section containing the journal record
are well-formed journal sections.
<a name="H32290"></a>
<p><b>H32290:</b>
Two database images shall be considered to be equivalent if they (a) have the
same page size, (b) contain the same number of pages and (c) the content of
each page of the first database image that is not a free-list leaf page is
the same as the content of the corresponding page in the second database image.
<a name="H32300"></a>
<p><b>H32300:</b>
When writing to an SQLite database file-system representation in order to
replace database image A with database image B, the file-system representation
shall at all times contain a database image equivalent to either A or B.
<a name="H32310"></a>
<p><b>H32310:</b>
If, while writing to an SQLite database file-system representation in
order to replace database image A with database image B, an operating
system or power failure should occur, then following recovery the database
file-system representation shall contain a database image equivalent to
either A or B.
<a name="H32320"></a>
<p><b>H32320:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that before the size of
the database file is modified, the first 28 bytes of the journal file contain a
stable valid journal header with the page-size and page-count fields set to
values corresponding to the original database image.
<a name="H32330"></a>
<p><b>H32330:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that the first 28 bytes
of the journal file does not become unstable at any point after the size of the
database file is modified until the journal file is invalidated to commit the
transaction.
<a name="H32340"></a>
<p><b>H32340:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that before any part of
the database file that contained a page of the original database image that was
not a free-list leaf page is overwritten or made unstable the journal file
contains a valid and stable journal record containing the original page data.
<a name="H32350"></a>
<p><b>H32350:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that after any part of
the database file that contained a page of the original database image that was
not a free-list leaf page has been overwritten or made unstable the corresponding
journal record (see H32340) is not modified or made unstable.
<a name="H32360"></a>
<p><b>H32360:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that before the database
file is truncated, the journal file contains stable valid journal records
corresponding to all pages of the original database image that were part of the
region being discarded by the truncate operation and were not free-list leaf
pages.
<a name="H32370"></a>
<p><b>H32370:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that after the database
file has been truncated the journal records corresponding to pages from the
original database image that were part of the truncated region and were not
free-list leaf pages are not modified or made unstable.
<a name="H33000"></a>
<p><b>H33000:</b>
Before reading from a database file , a database reader shall establish a
SHARED or greater lock on the database file-system representation.
<a name="H33010"></a>
<p><b>H33010:</b>
Before writing to a database file, a database writer shall establish
an EXCLUSIVE lock on the database file-system representation.
<a name="H33020"></a>
<p><b>H33020:</b>
Before writing to a journal file, a database writer shall establish
a RESERVED, PENDING or EXCLUSIVE lock on the database file-system
representation.
<a name="H33030"></a>
<p><b>H33030:</b>
Before establishing a RESERVED or PENDING lock on a database file, a
database writer shall ensure that the database file contains a valid
database image.
<a name="H33060"></a>
<p><b>H33060:</b>
Before establishing a RESERVED or PENDING lock on a database file, a
database writer shall ensure that any journal file that may be present
is not a valid journal file.
<a name="H33080"></a>
<p><b>H33080:</b>
If another database client holds either a RESERVED or PENDING lock on the
database file-system representation, then any journal file that exists within
the file system shall be considered invalid.
<a name="H33040"></a>
<p><b>H33040:</b>
A database writer shall increment the value of the database header change
counter field (H30100) either as part of the first database image modification
that it performs after obtaining an EXCLUSIVE lock.
<a name="H33050"></a>
<p><b>H33050:</b>
A database writer shall increment the value of the database schema version
field (H30110) as part of the first database image modification that includes
a schema change that it performs after obtaining an EXCLUSIVE lock.
<a name="H33070"></a>
<p><b>H33070:</b>
If a database writer is required by either H33050 or H33040 to increment a
database header field, and that header field already contains the maximum
value possible (0xFFFFFFFF, or 4294967295 for 32-bit unsigned integer
fields), "incrementing" the field shall be interpreted to mean setting it to
zero.
<a name="H35010"></a>
<p><b>H35010:</b>
Except for the read operation required by H35070 and those reads made
as part of opening a read-only transaction, SQLite shall ensure that
a <i>database connection</i> has an open read-only or read/write
transaction when any data is read from the <i>database file</i>.
<a name="H35020"></a>
<p><b>H35020:</b>
Aside from those read operations described by H35070 and H21XXX, SQLite
shall read data from the database file in aligned blocks of
<i>page-size</i> bytes, where <i>page-size</i> is the database page size
used by the database file.
<a name="H35030"></a>
<p><b>H35030:</b>
While opening a <i>read-only transaction</i>, after successfully
obtaining a <i>shared lock</i> on the database file, SQLite shall
attempt to detect and roll back a <i>hot journal file</i> associated
with the same database file.
<a name="H35040"></a>
<p><b>H35040:</b>
Assuming no errors have occured, then after attempting to detect and
roll back a <i>hot journal file</i>, if the <i>page cache</i> contains
any entries associated with the current <i>database connection</i>,
then SQLite shall validate the contents of the <i>page cache</i> by
testing the <i>file change counter</i>. This procedure is known as
<i>cache validiation</i>.
<a name="H35050"></a>
<p><b>H35050:</b>
If the cache validiate procedure prescribed by H35040 is required and
does not prove that the <i>page cache</i> entries associated with the
current <i>database connection</i> are valid, then SQLite shall discard
all entries associated with the current <i>database connection</i> from
the <i>page cache</i>.
<a name="H35060"></a>
<p><b>H35060:</b>
When a new <i>database connection</i> is required, SQLite shall attempt
to open a file-handle on the database file. If the attempt fails, then
no new <i>database connection</i> is created and an error returned.
<a name="H35070"></a>
<p><b>H35070:</b>
When a new <i>database connection</i> is required, after opening the
new file-handle, SQLite shall attempt to read the first 100 bytes
of the database file. If the attempt fails for any other reason than
that the opened file is less than 100 bytes in size, then
the file-handle is closed, no new <i>database connection</i> is created
and an error returned instead.
<a name="H35080"></a>
<p><b>H35080:</b>
If the <i>database file header</i> is successfully read from a newly
opened database file, the connections <i>expected page-size</i> shall
be set to the value stored in the <i>page-size field</i> of the
database header.
<a name="H35090"></a>
<p><b>H35090:</b>
If the <i>database file header</i> cannot be read from a newly opened
database file (because the file is less than 100 bytes in size), the
connections <i>expected page-size</i> shall be set to the compile time
value of the SQLITE_DEFAULT_PAGESIZE option.
<a name="H35100"></a>
<p><b>H35100:</b>
When required to open a <i>read-only transaction</i> using a
<i>database connection</i>, SQLite shall first attempt to obtain
a <i>shared-lock</i> on the file-handle open on the database file.
<a name="H35110"></a>
<p><b>H35110:</b>
If, while opening a <i>read-only transaction</i>, SQLite fails to obtain
the <i>shared-lock</i> on the database file, then the process is
abandoned, no transaction is opened and an error returned to the user.
<a name="H35120"></a>
<p><b>H35120:</b>
If, while opening a <i>read-only transaction</i>, SQLite encounters
an error while attempting to detect or roll back a <i>hot journal
file</i>, then the <i>shared-lock</i> on the database file is released,
no transaction is opened and an error returned to the user.
<a name="H35130"></a>
<p><b>H35130:</b>
When required to end a <i>read-only transaction</i>, SQLite shall
relinquish the <i>shared lock</i> held on the database file by
calling the xUnlock() method of the file-handle.
<a name="H35140"></a>
<p><b>H35140:</b>
When required to attempt to detect a <i>hot-journal file</i>, SQLite
shall first use the xAccess() method of the VFS layer to check if a
journal file exists in the file-system.
<a name="H35150"></a>
<p><b>H35150:</b>
When required to attempt to detect a <i>hot-journal file</i>, if the
call to xAccess() required by H35140 indicates that a journal file does
not exist, then SQLite shall conclude that there is no <i>hot-journal
file</i> in the file system and therefore that no <i>hot journal
rollback</i> is required.
<a name="H35160"></a>
<p><b>H35160:</b>
When required to attempt to detect a <i>hot-journal file</i>, if the
call to xAccess() required by H35140 indicates that a journal file
is present, then the xCheckReservedLock() method of the database file
file-handle is invoked to determine whether or not some other
process is holding a <i>reserved</i> or greater lock on the database
file.
<a name="H35170"></a>
<p><b>H35170:</b>
If the call to xCheckReservedLock() required by H35160 indicates that
some other <i>database connection</i> is holding a <i>reserved</i>
or greater lock on the database file, then SQLite shall conclude that
there is no <i>hot journal file</i>. In this case the attempt to detect
a <i>hot journal file</i> is concluded.
<a name="H35180"></a>
<p><b>H35180:</b>
When a file-handle open on a database file is unlocked, if the
<i>page cache</i> contains one or more entries belonging to the
associated <i>database connection</i>, SQLite shall store the value
of the <i>file change counter</i> internally.
<a name="H35190"></a>
<p><b>H35190:</b>
When required to perform <i>cache validation</i> as part of opening
a <i>read transaction</i>, SQLite shall read a 16 byte block
starting at byte offset 24 of the <i>database file</i> using the xRead()
method of the <i>database connections</i> file handle.
<a name="H35200"></a>
<p><b>H35200:</b>
While performing <i>cache validation</i>, after loading the 16 byte
block as required by H35190, SQLite shall compare the 32-bit big-endian
integer stored in the first 4 bytes of the block to the most
recently stored value of the <i>file change counter</i> (see H35180).
If the values are not the same, then SQLite shall conclude that
the contents of the cache are invalid.
<a name="H35210"></a>
<p><b>H35210:</b>
During the conclusion of a <i>read transaction</i>, before unlocking
the database file, SQLite shall set the connections
<i>expected page size</i> to the current database <i>page-size</i>.
<a name="H35220"></a>
<p><b>H35220:</b>
As part of opening a new <i>read transaction</i>, immediately after
performing <i>cache validation</i>, if there is no data for database
page 1 in the <i>page cache</i>, SQLite shall read <i>N</i> bytes from
the start of the database file using the xRead() method of the
connections file handle, where <i>N</i> is the connections current
<i>expected page size</i> value.
<a name="H35230"></a>
<p><b>H35230:</b>
If page 1 data is read as required by H35230, then the value of the
<i>page-size</i> field that appears in the database file header that
consumes the first 100 bytes of the read block is not the same as the
connections current <i>expected page size</i>, then the
<i>expected page size</i> is set to this value, the database file is
unlocked and the entire procedure to open a <i>read transaction</i>
is repeated.
<a name="H35240"></a>
<p><b>H35240:</b>
If page 1 data is read as required by H35230, then the value of the
<i>page-size</i> field that appears in the database file header that
consumes the first 100 bytes of the read block is the same as the
connections current <i>expected page size</i>, then the block of data
read is stored in the <i>page cache</i> as page 1.
<a name="H35270"></a>
<p><b>H35270:</b>
When required to <i>journal a database page</i>, SQLite shall first
append the <i>page number</i> of the page being journalled to the
<i>journal file</i>, formatted as a 4-byte big-endian unsigned integer,
using a single call to the xWrite method of the file-handle opened
on the journal file.
<a name="H35280"></a>
<p><b>H35280:</b>
When required to <i>journal a database page</i>, if the attempt to
append the <i>page number</i> to the journal file is successful,
then the current page data (<i>page-size</i> bytes) shall be appended
to the journal file, using a single call to the xWrite method of the
file-handle opened on the journal file.
<a name="H35290"></a>
<p><b>H35290:</b>
When required to <i>journal a database page</i>, if the attempt to
append the current page data to the journal file is successful,
then SQLite shall append a 4-byte big-endian integer checksum value
to the to the journal file, using a single call to the xWrite method
of the file-handle opened on the journal file.
<a name="H35300"></a>
<p><b>H35300:</b>
The checksum value written to the <i>journal file</i> by the write
required by H35290 shall be equal to the sum of the <i>checksum
initializer</i> field stored in the <i>journal header</i> (H35700) and
every 200th byte of the page data, beginning with the
(<i>page-size</i> % 200)th byte.
<a name="H35350"></a>
<p><b>H35350:</b>
When required to open a <i>write transaction</i> on the database,
SQLite shall first open a <i>read transaction</i>, if the <i>database
connection</i> in question has not already opened one.
<a name="H35360"></a>
<p><b>H35360:</b>
When required to open a <i>write transaction</i> on the database, after
ensuring a <i>read transaction</i> has already been opened, SQLite
shall obtain a <i>reserved lock</i> on the database file by calling
the xLock method of the file-handle open on the database file.
<a name="H35370"></a>
<p><b>H35370:</b>
When required to open a <i>write transaction</i> on the database, after
obtaining a <i>reserved lock</i> on the database file, SQLite shall
open a read/write file-handle on the corresponding <i>journal file</i>.
<a name="H35380"></a>
<p><b>H35380:</b>
When required to open a <i>write transaction</i> on the database, after
opening a file-handle on the <i>journal file</i>, SQLite shall append
a <i>journal header</i> to the (currently empty) <i>journal file</i>.
<a name="H35400"></a>
<p><b>H35400:</b>
When a <i>database connection</i> is closed, SQLite shall close the
associated file handle at the VFS level.
<a name="H35420"></a>
<p><b>H35420:</b>
SQLite shall ensure that a <i>database connection</i> has an open
read-only or read/write transaction before using data stored in the <i>page
cache</i> to satisfy user queries.
<a name="H35430"></a>
<p><b>H35430:</b>
When a <i>database connection</i> is closed, all associated <i>page
cache</i> entries shall be discarded.
<a name="H35440"></a>
<p><b>H35440:</b>
If while attempting to detect a <i>hot-journal file</i> the call to
xCheckReservedLock() indicates that no process holds a <i>reserved</i>
or greater lock on the <i>database file</i>, then SQLite shall open
a file handle on the potentially hot journal file using the VFS xOpen()
method.
<a name="H35450"></a>
<p><b>H35450:</b>
After successfully opening a file-handle on a potentially hot journal
file, SQLite shall query the file for its size in bytes using the
xFileSize() method of the open file handle.
<a name="H35460"></a>
<p><b>H35460:</b>
If the size of a potentially hot journal file is revealed to be zero
bytes by a query required by H35450, then SQLite shall close the
file handle opened on the journal file and delete the journal file using
a call to the VFS xDelete() method. In this case SQLite shall conclude
that there is no <i>hot journal file</i>.
<a name="H35470"></a>
<p><b>H35470:</b>
If the size of a potentially hot journal file is revealed to be greater
than zero bytes by a query required by H35450, then SQLite shall attempt
to upgrade the <i>shared lock</i> held by the <i>database connection</i>
on the <i>database file</i> directly to an <i>exclusive lock</i>.
<a name="H35480"></a>
<p><b>H35480:</b>
If an attempt to upgrade to an <i>exclusive lock</i> prescribed by
H35470 fails for any reason, then SQLite shall release all locks held by
the <i>database connection</i> and close the file handle opened on the
<i>journal file</i>. The attempt to open a <i>read-only transaction</i>
shall be deemed to have failed and an error returned to the user.
<a name="H35490"></a>
<p><b>H35490:</b>
If, as part of the <i>hot journal file</i> detection process, the
attempt to upgrade to an <i>exclusive lock</i> mandated by H35470 is
successful, then SQLite shall query the file-system using the xAccess()
method of the VFS implementation to test whether or not the journal
file is still present in the file-system.
<a name="H35500"></a>
<p><b>H35500:</b>
If the xAccess() query required by H35490 reveals that the journal
file is still present in the file system, then SQLite shall conclude
that the journal file is a <i>hot journal file</i> that needs to
be rolled back. SQLite shall immediately begin <i>hot journal
rollback</i>.
<a name="H35510"></a>
<p><b>H35510:</b>
If the call to xAccess() required by H35140 fails (due to an IO error or
similar), then SQLite shall abandon the attempt to open a <i>read-only
transaction</i>, relinquish the <i>shared lock</i> held on the database
file and return an error to the user.
<a name="H35520"></a>
<p><b>H35520:</b>
If the call to xCheckReservedLock() required by H35160 fails (due to an
IO or other internal VFS error), then SQLite shall abandon the attempt
to open a <i>read-only transaction</i>, relinquish the <i>shared lock</i>
held on the database file and return an error to the user.
<a name="H35530"></a>
<p><b>H35530:</b>
If the call to xOpen() required by H35440 fails (due to an IO or other
internal VFS error), then SQLite shall abandon the attempt to open a
<i>read-only transaction</i>, relinquish the <i>shared lock</i> held on
the database file and return an error to the user.
<a name="H35540"></a>
<p><b>H35540:</b>
If the call to xFileSize() required by H35450 fails (due to an IO or
other internal VFS error), then SQLite shall abandon the attempt to open
a <i>read-only transaction</i>, relinquish the <i>shared lock</i> held on
the database file, close the file handle opened on the journal file and
return an error to the user.
<a name="H35550"></a>
<p><b>H35550:</b>
If the call to xDelete() required by H35450 fails (due to an IO or
other internal VFS error), then SQLite shall abandon the attempt to open
a <i>read-only transaction</i>, relinquish the <i>shared lock</i> held on
the database file and return an error to the user.
<a name="H35560"></a>
<p><b>H35560:</b>
If the call to xAccess() required by H35490 fails (due to an IO or
other internal VFS error), then SQLite shall abandon the attempt to open
a <i>read-only transaction</i>, relinquish the lock held on the
database file, close the file handle opened on the journal file and
return an error to the user.
<a name="H35570"></a>
<p><b>H35570:</b>
If the call to xAccess() required by H35490 reveals that the journal
file is no longer present in the file system, then SQLite shall abandon
the attempt to open a <i>read-only transaction</i>, relinquish the
lock held on the database file, close the file handle opened on the
journal file and return an SQLITE_BUSY error to the user.
<a name="H35580"></a>
<p><b>H35580:</b>
If an attempt to acquire a <i>reserved lock</i> prescribed by
requirement H35360 fails, then SQLite shall deem the attempt to
open a <i>write transaction</i> to have failed and return an error
to the user.
<a name="H35590"></a>
<p><b>H35590:</b>
When required to modify the contents of an existing database page that
existed and was not a <i>free-list leaf page</i> when the <i>write
transaction</i> was opened, SQLite shall journal the page if it has not
already been journalled within the current <i>write transaction</i>.
<a name="H35600"></a>
<p><b>H35600:</b>
When required to modify the contents of an existing database page,
SQLite shall update the cached version of the database page content
stored as part of the <i>page cache entry</i> associated with the page.
<a name="H35610"></a>
<p><b>H35610:</b>
When required to append a new database page to the database file,
SQLite shall create a new <i>page cache entry</i> corresponding to
the new page and insert it into the <i>page cache</i>. The <i>dirty
flag</i> of the new <i>page cache entry</i> shall be set.
<a name="H35620"></a>
<p><b>H35620:</b>
When required to truncate (remove) a database page that existed and was
not a <i>free-list leaf page</i> when the <i>write transaction</i> was
opened from the end of a database file, SQLite shall journal the page if
it has not already been journalled within the current <i>write
transaction</i>.
<a name="H35630"></a>
<p><b>H35630:</b>
When required to truncate a database page from the end of the database
file, SQLite shall discard the associated <i>page cache entry</i>
from the page cache.
<a name="H35640"></a>
<p><b>H35640:</b>
When required to purge a <i>non-writable dirty page</i> from the
<i>page cache</i>, SQLite shall <i>sync the journal file</i> before
proceding with the write operation required by H35670.
<a name="H35660"></a>
<p><b>H35660:</b>
After <i>syncing the journal file</i> as required by H35640, SQLite
shall append a new <i>journal header</i> to the <i>journal file</i>
before proceding with the write operation required by H35670.
<a name="H35670"></a>
<p><b>H35670:</b>
When required to purge a <i>page cache entry</i> that is a
<i>dirty page</i> SQLite shall write the page data into the database
file, using a single call to the xWrite method of the <i>database
connection</i> file handle.
<a name="H35680"></a>
<p><b>H35680:</b>
When required to append a <i>journal header</i> to the <i>journal
file</i>, SQLite shall do so by writing a block of <i>sector-size</i>
bytes using a single call to the xWrite method of the file-handle
open on the <i>journal file</i>. The block of data written shall begin
at the smallest sector-size aligned offset at or following the current
end of the <i>journal file</i>.
<a name="H35690"></a>
<p><b>H35690:</b>
The first 8 bytes of the <i>journal header</i> required to be written
by H35680 shall contain the following values, in order from byte offset 0
to 7: 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63 and 0xd7.
<a name="H35700"></a>
<p><b>H35700:</b>
Bytes 8-11 of the <i>journal header</i> required to be written by
H35680 shall contain 0x00.
<a name="H35710"></a>
<p><b>H35710:</b>
Bytes 12-15 of the <i>journal header</i> required to be written by
H35680 shall contain the number of pages that the database file
contained when the current <i>write-transaction</i> was started,
formatted as a 4-byte big-endian unsigned integer.
<a name="H35720"></a>
<p><b>H35720:</b>
Bytes 16-19 of the <i>journal header</i> required to be written by
H35680 shall contain pseudo-randomly generated values.
<a name="H35730"></a>
<p><b>H35730:</b>
Bytes 20-23 of the <i>journal header</i> required to be written by
H35680 shall contain the <i>sector size</i> used by the VFS layer,
formatted as a 4-byte big-endian unsigned integer.
<a name="H35740"></a>
<p><b>H35740:</b>
Bytes 24-27 of the <i>journal header</i> required to be written by
H35680 shall contain the <i>page size</i> used by the database at
the start of the <i>write transaction</i>, formatted as a 4-byte
big-endian unsigned integer.
<a name="H35750"></a>
<p><b>H35750:</b>
When required to <i>sync the journal file</i>, SQLite shall invoke the
xSync method of the file handle open on the <i>journal file</i>.
<a name="H35760"></a>
<p><b>H35760:</b>
When required to <i>sync the journal file</i>, after invoking the
xSync method as required by H35750, SQLite shall update the <i>record
count</i> of the <i>journal header</i> most recently written to the
<i>journal file</i>. The 4-byte field shall be updated to contain
the number of <i>journal records</i> that have been written to the
<i>journal file</i> since the <i>journal header</i> was written,
formatted as a 4-byte big-endian unsigned integer.
<a name="H35770"></a>
<p><b>H35770:</b>
When required to <i>sync the journal file</i>, after updating the
<i>record count</i> field of a <i>journal header</i> as required by
H35760, SQLite shall invoke the xSync method of the file handle open
on the <i>journal file</i>.
<a name="H35780"></a>
<p><b>H35780:</b>
When required to upgrade to an <i>exclusive lock</i> as part of a write
transaction, SQLite shall first attempt to obtain a <i>pending lock</i>
on the database file if one is not already held by invoking the xLock
method of the file handle opened on the <i>database file</i>.
<a name="H35790"></a>
<p><b>H35790:</b>
When required to upgrade to an <i>exclusive lock</i> as part of a write
transaction, after successfully obtaining a <i>pending lock</i> SQLite
shall attempt to obtain an <i>exclusive lock</i> by invoking the
xLock method of the file handle opened on the <i>database file</i>.
<a name="H35800"></a>
<p><b>H35800:</b>
When required to <i>commit a write-transaction</i>, SQLite shall
modify page 1 to increment the value stored in the <i>change counter</i>
field of the <i>database file header</i>.
<a name="H35810"></a>
<p><b>H35810:</b>
When required to <i>commit a write-transaction</i>, after incrementing
the <i>change counter</i> field, SQLite shall <i>sync the journal
file</i>.
<a name="H35820"></a>
<p><b>H35820:</b>
When required to <i>commit a write-transaction</i>, after <i>syncing
the journal file</i> as required by H35810, if an <i>exclusive lock</i>
on the database file is not already held, SQLite shall attempt to
<i>upgrade to an exclusive lock</i>.
<a name="H35830"></a>
<p><b>H35830:</b>
When required to <i>commit a write-transaction</i>, after <i>syncing
the journal file</i> as required by H35810 and ensuring that an
<i>exclusive lock</i> is held on the database file as required by
H35830, SQLite shall copy the contents of all <i>dirty page</i>
stored in the <i>page cache</i> into the <i>database file</i> using
calls to the xWrite method of the <i>database connection</i> file
handle. Each call to xWrite shall write the contents of a single
<i>dirty page</i> (<i>page-size</i> bytes of data) to the database
file. Dirty pages shall be written in order of <i>page number</i>,
from lowest to highest.
<a name="H35840"></a>
<p><b>H35840:</b>
When required to <i>commit a write-transaction</i>, after copying the
contents of any <i>dirty pages</i> to the database file as required
by H35830, SQLite shall sync the database file by invoking the xSync
method of the <i>database connection</i> file handle.
<a name="H35850"></a>
<p><b>H35850:</b>
When required to <i>commit a write-transaction</i>, after syncing
the database file as required by H35840, SQLite shall close the
file-handle opened on the <i>journal file</i> and delete the
<i>journal file</i> from the file system via a call to the VFS
xDelete method.
<a name="H35860"></a>
<p><b>H35860:</b>
When required to <i>commit a write-transaction</i>, after deleting
the <i>journal file</i> as required by H35850, SQLite shall relinquish
all locks held on the <i>database file</i> by invoking the xUnlock
method of the <i>database connection</i> file handle.
<hr><small><i>
This page last modified 2009/02/19 14:35:32 UTC
</i></small></div></body></html>
|
Java
|
---
title: Schnapper Rock B
subtitle: Schnapper Rock Rd, Albany
layout: default
modal-id: 3
date: 2015-7-1
thumbnail: dreams-thumbnail.png
bedroom: 5
bathroom: 3
livingroom: 3
parking: 2
landarea: 786
floorarea: 370
description: Sold.
images:
- url: assets/img/portfolio/238csrlo1.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csro2.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csrl3.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csrl6.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csrbd2.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csrb2.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
---
|
Java
|
package cn.xishan.oftenporter.porter.core.init;
import cn.xishan.oftenporter.porter.core.advanced.IConfigData;
import com.alibaba.fastjson.JSON;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* @author Created by https://github.com/CLovinr on 2018-12-21.
*/
public class DealSharpProperties
{
private static final Logger LOGGER = LoggerFactory.getLogger(DealSharpProperties.class);
private static class PropOne
{
private String propKey,
originValue;
private int startIndex, endIndex;
public PropOne(String propKey, String originValue, int startIndex, int endIndex)
{
this.propKey = propKey;
this.originValue = originValue;
this.startIndex = startIndex;
this.endIndex = endIndex;
}
public String getPropKey()
{
return propKey;
}
public String replace(String propValue)
{
String str = originValue.substring(0, startIndex) + propValue + originValue.substring(endIndex);
return str;
}
}
/**
* ๆฟๆขๆๆ็#{propertyName}.
*
* @param string
* @param properties
* @param forEmpty ๅฆๆไธไธบnull๏ผๅ็จไบๆฟๆขๆๆไธๅญๅจ็ๅฑๆงใ
* @return
*/
public static String replaceSharpProperties(String string, Map<String, ?> properties, String forEmpty)
{
for (Map.Entry<String, ?> entry : properties.entrySet())
{
if (string.contains("#{" + entry.getKey() + "}"))
{
String rs;
// if (entry.getValue() instanceof Map || entry.getValue() instanceof Collection)
// {
// rs = JSON.toJSONString(entry.getValue());
// } else
// {
// rs = String.valueOf(entry.getValue());
// }
if (entry.getValue() instanceof CharSequence)
{
rs = String.valueOf(entry.getValue());
} else if (entry.getValue() == null)
{
rs = "";
} else
{
rs = JSON.toJSONString(entry.getValue());
}
string = string.replace("#{" + entry.getKey() + "}", rs);
}
}
if (forEmpty != null)
{
string = string.replaceAll("#\\{[^{}]+\\}", forEmpty);//ๅปๆๆช่ฎพ็ฝฎ็
}
return string;
}
/**
* ๆฟๆข#{properName}ๅ้ใ
*
* @param srcMap ๅพ
ๆฟๆขๅฑๆงๅผ็map
* @param propertiesMap ๆไพๅฑๆง็map
*/
public static void dealSharpProperties(Map srcMap, Map propertiesMap)
{
dealSharpProperties(srcMap, propertiesMap, false);
}
/**
* ๆฟๆข#{properName}ๅ้ใ
*
* @param srcMap ๅพ
ๆฟๆขๅฑๆงๅผ็map
* @param propertiesMap ๆไพๅฑๆง็map
* @param keepNotFound ๆฏๅฆไฟ็ๆชๆพๅฐ็ๅ้ใ
*/
public static void dealSharpProperties(Map srcMap, Map propertiesMap, boolean keepNotFound)
{
Set<String> containsVar = null;
boolean isFirst = true;
boolean hasSet = true;
//ๅค็properties
while (hasSet)
{
hasSet = false;
Collection<String> nameCollection;
if (isFirst)
{
nameCollection = srcMap.keySet();
} else
{
nameCollection = containsVar;
}
containsVar = new HashSet<>();
for (String properName : nameCollection)
{
Object value = srcMap.get(properName);
if (!(value instanceof CharSequence))
{
continue;
}
String valueString = String.valueOf(value);
PropOne propOne = getPropertiesKey(String.valueOf(valueString));
if (propOne != null && propOne.getPropKey().equals(properName))
{
throw new RuntimeException(
"can not set property of \"" + properName + "\" with value \"" + valueString + "\",prop name eq value attr name");
} else if (propOne != null)
{
containsVar.add(properName);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},replace-attr={},origin-value={}", properName,
propOne.getPropKey(), valueString);
}
String replaceStr = null;
if (propertiesMap.containsKey(propOne.getPropKey()))
{
replaceStr = String.valueOf(propertiesMap.get(propOne.getPropKey()));
} else
{
if (keepNotFound)
{
containsVar.remove(properName);
} else
{
replaceStr = "";
LOGGER.warn("proper value with key '{}' is empty", propOne.getPropKey());
}
}
if (replaceStr != null)
{
String newValue = propOne.replace(replaceStr);
srcMap.put(properName, newValue);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},new-value={}", properName, newValue);
}
}
hasSet = true;
}
}
isFirst = false;
}
}
static void dealProperties(IConfigData configData)
{
Set<String> containsVar = null;
boolean isFirst = true;
boolean hasSet = true;
//ๅค็properties
while (hasSet)
{
hasSet = false;
Collection<String> nameCollection;
if (isFirst)
{
nameCollection = configData.propertyNames();
} else
{
nameCollection = containsVar;
}
containsVar = new HashSet<>();
for (String properName : nameCollection)
{
Object value = configData.get(properName);
if (!(value instanceof CharSequence))
{
continue;
}
String valueString = String.valueOf(value);
PropOne propOne = getPropertiesKey(String.valueOf(valueString));
if (propOne != null && propOne.getPropKey().equals(properName))
{
throw new RuntimeException(
"can not set property of " + properName + " with value \"" + valueString + "\"");
} else if (propOne != null)
{
containsVar.add(properName);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},replace-attr={},origin-value={}", properName,
propOne.getPropKey(), valueString);
}
String replaceStr;
if (configData.contains(propOne.getPropKey()))
{
replaceStr = configData.getString(propOne.getPropKey());
} else
{
replaceStr = "";
LOGGER.warn("proper value with key '{}' is empty", propOne.getPropKey());
}
String newValue = propOne.replace(replaceStr);
configData.set(properName, newValue);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},new-value={}", properName, newValue);
}
hasSet = true;
}
}
isFirst = false;
}
}
private static final Pattern PROPERTIES_PATTERN = Pattern.compile("#\\{([^{}]+)}");
private static PropOne getPropertiesKey(String value)
{
Matcher matcher = PROPERTIES_PATTERN.matcher(value);
if (matcher.find())
{
PropOne propOne = new PropOne(matcher.group(1).trim(), value, matcher.start(), matcher.end());
return propOne;
} else
{
return null;
}
}
}
|
Java
|
package dk.dbc.kafka.dispatch.sources;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
/**
* Source for reading InputStreams line-by-line
* @author Adam Tulinius
*/
public class InputStreamSource extends Source<String> {
private BufferedReader reader;
public InputStreamSource(InputStream inputStream) {
this.reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
}
@Override
public Optional<String> next() throws IOException {
String line = reader.readLine();
if (line != null) {
return Optional.of(line);
} else {
return Optional.empty();
}
}
}
|
Java
|
/// <reference path="browser/ambient/angular-protractor/index.d.ts" />
/// <reference path="browser/ambient/angular/index.d.ts" />
/// <reference path="browser/ambient/assertion-error/index.d.ts" />
/// <reference path="browser/ambient/chai/index.d.ts" />
/// <reference path="browser/ambient/jquery/index.d.ts" />
/// <reference path="browser/ambient/lodash/index.d.ts" />
/// <reference path="browser/ambient/mocha/index.d.ts" />
/// <reference path="browser/ambient/selenium-webdriver/index.d.ts" />
/// <reference path="browser/ambient/sinon-chai/index.d.ts" />
/// <reference path="browser/ambient/sinon/index.d.ts" />
/// <reference path="browser/ambient/socket.io-client/index.d.ts" />
|
Java
|
# Piper paraense C.DC. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v3
import (
"context"
"time"
"github.com/rancher/lasso/pkg/client"
"github.com/rancher/lasso/pkg/controller"
v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/rancher/wrangler/pkg/generic"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
type PodSecurityPolicyTemplateHandler func(string, *v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error)
type PodSecurityPolicyTemplateController interface {
generic.ControllerMeta
PodSecurityPolicyTemplateClient
OnChange(ctx context.Context, name string, sync PodSecurityPolicyTemplateHandler)
OnRemove(ctx context.Context, name string, sync PodSecurityPolicyTemplateHandler)
Enqueue(name string)
EnqueueAfter(name string, duration time.Duration)
Cache() PodSecurityPolicyTemplateCache
}
type PodSecurityPolicyTemplateClient interface {
Create(*v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error)
Update(*v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error)
Delete(name string, options *metav1.DeleteOptions) error
Get(name string, options metav1.GetOptions) (*v3.PodSecurityPolicyTemplate, error)
List(opts metav1.ListOptions) (*v3.PodSecurityPolicyTemplateList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v3.PodSecurityPolicyTemplate, err error)
}
type PodSecurityPolicyTemplateCache interface {
Get(name string) (*v3.PodSecurityPolicyTemplate, error)
List(selector labels.Selector) ([]*v3.PodSecurityPolicyTemplate, error)
AddIndexer(indexName string, indexer PodSecurityPolicyTemplateIndexer)
GetByIndex(indexName, key string) ([]*v3.PodSecurityPolicyTemplate, error)
}
type PodSecurityPolicyTemplateIndexer func(obj *v3.PodSecurityPolicyTemplate) ([]string, error)
type podSecurityPolicyTemplateController struct {
controller controller.SharedController
client *client.Client
gvk schema.GroupVersionKind
groupResource schema.GroupResource
}
func NewPodSecurityPolicyTemplateController(gvk schema.GroupVersionKind, resource string, namespaced bool, controller controller.SharedControllerFactory) PodSecurityPolicyTemplateController {
c := controller.ForResourceKind(gvk.GroupVersion().WithResource(resource), gvk.Kind, namespaced)
return &podSecurityPolicyTemplateController{
controller: c,
client: c.Client(),
gvk: gvk,
groupResource: schema.GroupResource{
Group: gvk.Group,
Resource: resource,
},
}
}
func FromPodSecurityPolicyTemplateHandlerToHandler(sync PodSecurityPolicyTemplateHandler) generic.Handler {
return func(key string, obj runtime.Object) (ret runtime.Object, err error) {
var v *v3.PodSecurityPolicyTemplate
if obj == nil {
v, err = sync(key, nil)
} else {
v, err = sync(key, obj.(*v3.PodSecurityPolicyTemplate))
}
if v == nil {
return nil, err
}
return v, err
}
}
func (c *podSecurityPolicyTemplateController) Updater() generic.Updater {
return func(obj runtime.Object) (runtime.Object, error) {
newObj, err := c.Update(obj.(*v3.PodSecurityPolicyTemplate))
if newObj == nil {
return nil, err
}
return newObj, err
}
}
func UpdatePodSecurityPolicyTemplateDeepCopyOnChange(client PodSecurityPolicyTemplateClient, obj *v3.PodSecurityPolicyTemplate, handler func(obj *v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error)) (*v3.PodSecurityPolicyTemplate, error) {
if obj == nil {
return obj, nil
}
copyObj := obj.DeepCopy()
newObj, err := handler(copyObj)
if newObj != nil {
copyObj = newObj
}
if obj.ResourceVersion == copyObj.ResourceVersion && !equality.Semantic.DeepEqual(obj, copyObj) {
return client.Update(copyObj)
}
return copyObj, err
}
func (c *podSecurityPolicyTemplateController) AddGenericHandler(ctx context.Context, name string, handler generic.Handler) {
c.controller.RegisterHandler(ctx, name, controller.SharedControllerHandlerFunc(handler))
}
func (c *podSecurityPolicyTemplateController) AddGenericRemoveHandler(ctx context.Context, name string, handler generic.Handler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), handler))
}
func (c *podSecurityPolicyTemplateController) OnChange(ctx context.Context, name string, sync PodSecurityPolicyTemplateHandler) {
c.AddGenericHandler(ctx, name, FromPodSecurityPolicyTemplateHandlerToHandler(sync))
}
func (c *podSecurityPolicyTemplateController) OnRemove(ctx context.Context, name string, sync PodSecurityPolicyTemplateHandler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), FromPodSecurityPolicyTemplateHandlerToHandler(sync)))
}
func (c *podSecurityPolicyTemplateController) Enqueue(name string) {
c.controller.Enqueue("", name)
}
func (c *podSecurityPolicyTemplateController) EnqueueAfter(name string, duration time.Duration) {
c.controller.EnqueueAfter("", name, duration)
}
func (c *podSecurityPolicyTemplateController) Informer() cache.SharedIndexInformer {
return c.controller.Informer()
}
func (c *podSecurityPolicyTemplateController) GroupVersionKind() schema.GroupVersionKind {
return c.gvk
}
func (c *podSecurityPolicyTemplateController) Cache() PodSecurityPolicyTemplateCache {
return &podSecurityPolicyTemplateCache{
indexer: c.Informer().GetIndexer(),
resource: c.groupResource,
}
}
func (c *podSecurityPolicyTemplateController) Create(obj *v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error) {
result := &v3.PodSecurityPolicyTemplate{}
return result, c.client.Create(context.TODO(), "", obj, result, metav1.CreateOptions{})
}
func (c *podSecurityPolicyTemplateController) Update(obj *v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error) {
result := &v3.PodSecurityPolicyTemplate{}
return result, c.client.Update(context.TODO(), "", obj, result, metav1.UpdateOptions{})
}
func (c *podSecurityPolicyTemplateController) Delete(name string, options *metav1.DeleteOptions) error {
if options == nil {
options = &metav1.DeleteOptions{}
}
return c.client.Delete(context.TODO(), "", name, *options)
}
func (c *podSecurityPolicyTemplateController) Get(name string, options metav1.GetOptions) (*v3.PodSecurityPolicyTemplate, error) {
result := &v3.PodSecurityPolicyTemplate{}
return result, c.client.Get(context.TODO(), "", name, result, options)
}
func (c *podSecurityPolicyTemplateController) List(opts metav1.ListOptions) (*v3.PodSecurityPolicyTemplateList, error) {
result := &v3.PodSecurityPolicyTemplateList{}
return result, c.client.List(context.TODO(), "", result, opts)
}
func (c *podSecurityPolicyTemplateController) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return c.client.Watch(context.TODO(), "", opts)
}
func (c *podSecurityPolicyTemplateController) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v3.PodSecurityPolicyTemplate, error) {
result := &v3.PodSecurityPolicyTemplate{}
return result, c.client.Patch(context.TODO(), "", name, pt, data, result, metav1.PatchOptions{}, subresources...)
}
type podSecurityPolicyTemplateCache struct {
indexer cache.Indexer
resource schema.GroupResource
}
func (c *podSecurityPolicyTemplateCache) Get(name string) (*v3.PodSecurityPolicyTemplate, error) {
obj, exists, err := c.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(c.resource, name)
}
return obj.(*v3.PodSecurityPolicyTemplate), nil
}
func (c *podSecurityPolicyTemplateCache) List(selector labels.Selector) (ret []*v3.PodSecurityPolicyTemplate, err error) {
err = cache.ListAll(c.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v3.PodSecurityPolicyTemplate))
})
return ret, err
}
func (c *podSecurityPolicyTemplateCache) AddIndexer(indexName string, indexer PodSecurityPolicyTemplateIndexer) {
utilruntime.Must(c.indexer.AddIndexers(map[string]cache.IndexFunc{
indexName: func(obj interface{}) (strings []string, e error) {
return indexer(obj.(*v3.PodSecurityPolicyTemplate))
},
}))
}
func (c *podSecurityPolicyTemplateCache) GetByIndex(indexName, key string) (result []*v3.PodSecurityPolicyTemplate, err error) {
objs, err := c.indexer.ByIndex(indexName, key)
if err != nil {
return nil, err
}
result = make([]*v3.PodSecurityPolicyTemplate, 0, len(objs))
for _, obj := range objs {
result = append(result, obj.(*v3.PodSecurityPolicyTemplate))
}
return result, nil
}
|
Java
|
---
layout: default.html.ejs
title: Set body class
akBodyClassAdd: 'addedClass'
---
|
Java
|
package com.planet_ink.coffee_mud.Commands;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2000-2010 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
@SuppressWarnings("unchecked")
public class Go extends StdCommand
{
public Go(){}
private String[] access={"GO","WALK"};
public String[] getAccessWords(){return access;}
public int energyExpenseFactor(){return 1;}
public void ridersBehind(Vector riders,
Room sourceRoom,
Room destRoom,
int directionCode,
boolean flee)
{
if(riders!=null)
for(int r=0;r<riders.size();r++)
{
Rider rider=(Rider)riders.elementAt(r);
if(rider instanceof MOB)
{
MOB rMOB=(MOB)rider;
if((rMOB.location()==sourceRoom)
||(rMOB.location()==destRoom))
{
boolean fallOff=false;
if(rMOB.location()==sourceRoom)
{
if(rMOB.riding()!=null)
rMOB.tell("You ride "+rMOB.riding().name()+" "+Directions.getDirectionName(directionCode)+".");
if(!move(rMOB,directionCode,flee,false,true,false))
fallOff=true;
}
if(fallOff)
{
if(rMOB.riding()!=null)
rMOB.tell("You fall off "+rMOB.riding().name()+"!");
rMOB.setRiding(null);
}
}
else
rMOB.setRiding(null);
}
else
if(rider instanceof Item)
{
Item rItem=(Item)rider;
if((rItem.owner()==sourceRoom)
||(rItem.owner()==destRoom))
destRoom.bringItemHere(rItem,-1,false);
else
rItem.setRiding(null);
}
}
}
public static Vector addRiders(Rider theRider,
Rideable riding,
Vector riders)
{
if((riding!=null)&&(riding.mobileRideBasis()))
for(int r=0;r<riding.numRiders();r++)
{
Rider rider=riding.fetchRider(r);
if((rider!=null)
&&(rider!=theRider)
&&(!riders.contains(rider)))
{
riders.addElement(rider);
if(rider instanceof Rideable)
addRiders(theRider,(Rideable)rider,riders);
}
}
return riders;
}
public Vector ridersAhead(Rider theRider,
Room sourceRoom,
Room destRoom,
int directionCode,
boolean flee)
{
Vector riders=new Vector();
Rideable riding=theRider.riding();
Vector rideables=new Vector();
while((riding!=null)&&(riding.mobileRideBasis()))
{
rideables.addElement(riding);
addRiders(theRider,riding,riders);
if((riding instanceof Rider)&&((Rider)riding).riding()!=theRider.riding())
riding=((Rider)riding).riding();
else
riding=null;
}
if(theRider instanceof Rideable)
addRiders(theRider,(Rideable)theRider,riders);
for(int r=riders.size()-1;r>=0;r--)
{
Rider R=(Rider)riders.elementAt(r);
if((R instanceof Rideable)&&(((Rideable)R).numRiders()>0))
{
if(!rideables.contains(R))
rideables.addElement(R);
riders.removeElement(R);
}
}
for(int r=0;r<rideables.size();r++)
{
riding=(Rideable)rideables.elementAt(r);
if((riding instanceof Item)
&&((sourceRoom).isContent((Item)riding)))
destRoom.bringItemHere((Item)riding,-1,false);
else
if((riding instanceof MOB)
&&((sourceRoom).isInhabitant((MOB)riding)))
{
((MOB)riding).tell("You are ridden "+Directions.getDirectionName(directionCode)+".");
if(!move(((MOB)riding),directionCode,false,false,true,false))
{
if(theRider instanceof MOB)
((MOB)theRider).tell(((MOB)riding).name()+" won't seem to let you go that way.");
r=r-1;
for(;r>=0;r--)
{
riding=(Rideable)rideables.elementAt(r);
if((riding instanceof Item)
&&((destRoom).isContent((Item)riding)))
sourceRoom.bringItemHere((Item)riding,-1,false);
else
if((riding instanceof MOB)
&&(((MOB)riding).isMonster())
&&((destRoom).isInhabitant((MOB)riding)))
sourceRoom.bringMobHere((MOB)riding,false);
}
return null;
}
}
}
return riders;
}
public boolean move(MOB mob,
int directionCode,
boolean flee,
boolean nolook,
boolean noriders)
{
return move(mob,directionCode,flee,nolook,noriders,false);
}
public boolean move(MOB mob,
int directionCode,
boolean flee,
boolean nolook,
boolean noriders,
boolean always)
{
if(directionCode<0) return false;
if(mob==null) return false;
Room thisRoom=mob.location();
if(thisRoom==null) return false;
Room destRoom=thisRoom.getRoomInDir(directionCode);
Exit exit=thisRoom.getExitInDir(directionCode);
if(destRoom==null)
{
mob.tell("You can't go that way.");
return false;
}
Exit opExit=thisRoom.getReverseExit(directionCode);
String directionName=(directionCode==Directions.GATE)&&(exit!=null)?"through "+exit.name():Directions.getDirectionName(directionCode);
String otherDirectionName=(Directions.getOpDirectionCode(directionCode)==Directions.GATE)&&(exit!=null)?exit.name():Directions.getFromDirectionName(Directions.getOpDirectionCode(directionCode));
int generalMask=always?CMMsg.MASK_ALWAYS:0;
int leaveCode=generalMask|CMMsg.MSG_LEAVE;
if(flee)
leaveCode=generalMask|CMMsg.MSG_FLEE;
CMMsg enterMsg=null;
CMMsg leaveMsg=null;
if((mob.riding()!=null)&&(mob.riding().mobileRideBasis()))
{
enterMsg=CMClass.getMsg(mob,destRoom,exit,generalMask|CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,"<S-NAME> ride(s) "+mob.riding().name()+" in from "+otherDirectionName+".");
leaveMsg=CMClass.getMsg(mob,thisRoom,opExit,leaveCode,((flee)?"You flee "+directionName+".":null),leaveCode,null,leaveCode,((flee)?"<S-NAME> flee(s) with "+mob.riding().name()+" "+directionName+".":"<S-NAME> ride(s) "+mob.riding().name()+" "+directionName+"."));
}
else
{
enterMsg=CMClass.getMsg(mob,destRoom,exit,generalMask|CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,"<S-NAME> "+CMLib.flags().dispositionString(mob,CMFlagLibrary.flag_arrives)+" from "+otherDirectionName+".");
leaveMsg=CMClass.getMsg(mob,thisRoom,opExit,leaveCode,((flee)?"You flee "+directionName+".":null),leaveCode,null,leaveCode,((flee)?"<S-NAME> flee(s) "+directionName+".":"<S-NAME> "+CMLib.flags().dispositionString(mob,CMFlagLibrary.flag_leaves)+" "+directionName+"."));
}
boolean gotoAllowed=CMSecurity.isAllowed(mob,destRoom,"GOTO");
if((exit==null)&&(!gotoAllowed))
{
mob.tell("You can't go that way.");
return false;
}
else
if(exit==null)
thisRoom.showHappens(CMMsg.MSG_OK_VISUAL,"The area to the "+directionName+" shimmers and becomes transparent.");
else
if((!exit.okMessage(mob,enterMsg))&&(!gotoAllowed))
return false;
else
if(!leaveMsg.target().okMessage(mob,leaveMsg)&&(!gotoAllowed))
return false;
else
if((opExit!=null)&&(!opExit.okMessage(mob,leaveMsg))&&(!gotoAllowed))
return false;
else
if(!enterMsg.target().okMessage(mob,enterMsg)&&(!gotoAllowed))
return false;
else
if(!mob.okMessage(mob,enterMsg)&&(!gotoAllowed))
return false;
if(mob.riding()!=null)
{
if((!mob.riding().okMessage(mob,enterMsg))&&(!gotoAllowed))
return false;
}
else
{
if(!mob.isMonster())
for(int i=0;i<energyExpenseFactor();i++)
mob.curState().expendEnergy(mob,mob.maxState(),true);
if((!flee)&&(!mob.curState().adjMovement(-1,mob.maxState()))&&(!gotoAllowed))
{
mob.tell("You are too tired.");
return false;
}
if((mob.soulMate()==null)&&(mob.playerStats()!=null)&&(mob.riding()==null)&&(mob.location()!=null))
mob.playerStats().adjHygiene(mob.location().pointsPerMove(mob));
}
Vector riders=null;
if(!noriders)
{
riders=ridersAhead(mob,(Room)leaveMsg.target(),(Room)enterMsg.target(),directionCode,flee);
if(riders==null) return false;
}
Vector enterTrailersSoFar=null;
Vector leaveTrailersSoFar=null;
if((leaveMsg.trailerMsgs()!=null)&&(leaveMsg.trailerMsgs().size()>0))
{
leaveTrailersSoFar=new Vector();
leaveTrailersSoFar.addAll(leaveMsg.trailerMsgs());
leaveMsg.trailerMsgs().clear();
}
if((enterMsg.trailerMsgs()!=null)&&(enterMsg.trailerMsgs().size()>0))
{
enterTrailersSoFar=new Vector();
enterTrailersSoFar.addAll(enterMsg.trailerMsgs());
enterMsg.trailerMsgs().clear();
}
if(exit!=null) exit.executeMsg(mob,enterMsg);
if(mob.location()!=null) mob.location().delInhabitant(mob);
((Room)leaveMsg.target()).send(mob,leaveMsg);
if(enterMsg.target()==null)
{
((Room)leaveMsg.target()).bringMobHere(mob,false);
mob.tell("You can't go that way.");
return false;
}
mob.setLocation((Room)enterMsg.target());
((Room)enterMsg.target()).addInhabitant(mob);
((Room)enterMsg.target()).send(mob,enterMsg);
if(opExit!=null) opExit.executeMsg(mob,leaveMsg);
if(!nolook)
{
CMLib.commands().postLook(mob,true);
if((!mob.isMonster())
&&(CMath.bset(mob.getBitmap(),MOB.ATT_AUTOWEATHER))
&&(((Room)enterMsg.target())!=null)
&&((thisRoom.domainType()&Room.INDOORS)>0)
&&((((Room)enterMsg.target()).domainType()&Room.INDOORS)==0)
&&(((Room)enterMsg.target()).getArea().getClimateObj().weatherType(((Room)enterMsg.target()))!=Climate.WEATHER_CLEAR)
&&(((Room)enterMsg.target()).isInhabitant(mob)))
mob.tell("\n\r"+((Room)enterMsg.target()).getArea().getClimateObj().weatherDescription(((Room)enterMsg.target())));
}
if(!noriders)
ridersBehind(riders,(Room)leaveMsg.target(),(Room)enterMsg.target(),directionCode,flee);
if(!flee)
for(int f=0;f<mob.numFollowers();f++)
{
MOB follower=mob.fetchFollower(f);
if(follower!=null)
{
if((follower.amFollowing()==mob)
&&((follower.location()==thisRoom)||(follower.location()==destRoom)))
{
if((follower.location()==thisRoom)&&(CMLib.flags().aliveAwakeMobile(follower,true)))
{
if(CMath.bset(follower.getBitmap(),MOB.ATT_AUTOGUARD))
thisRoom.show(follower,null,null,CMMsg.MSG_OK_ACTION,"<S-NAME> remain(s) on guard here.");
else
{
follower.tell("You follow "+mob.name()+" "+Directions.getDirectionName(directionCode)+".");
if(!move(follower,directionCode,false,false,false,false))
{
//follower.setFollowing(null);
}
}
}
}
//else
// follower.setFollowing(null);
}
}
if((leaveTrailersSoFar!=null)&&(leaveMsg.target() instanceof Room))
for(int t=0;t<leaveTrailersSoFar.size();t++)
((Room)leaveMsg.target()).send(mob,(CMMsg)leaveTrailersSoFar.elementAt(t));
if((enterTrailersSoFar!=null)&&(enterMsg.target() instanceof Room))
for(int t=0;t<enterTrailersSoFar.size();t++)
((Room)enterMsg.target()).send(mob,(CMMsg)enterTrailersSoFar.elementAt(t));
return true;
}
protected Command stander=null;
protected Vector ifneccvec=null;
public void standIfNecessary(MOB mob, int metaFlags)
throws java.io.IOException
{
if((ifneccvec==null)||(ifneccvec.size()!=2))
{
ifneccvec=new Vector();
ifneccvec.addElement("STAND");
ifneccvec.addElement("IFNECESSARY");
}
if(stander==null) stander=CMClass.getCommand("Stand");
if((stander!=null)&&(ifneccvec!=null))
stander.execute(mob,ifneccvec,metaFlags);
}
public boolean execute(MOB mob, Vector commands, int metaFlags)
throws java.io.IOException
{
standIfNecessary(mob,metaFlags);
if((commands.size()>3)
&&(commands.firstElement() instanceof Integer))
{
return move(mob,
((Integer)commands.elementAt(0)).intValue(),
((Boolean)commands.elementAt(1)).booleanValue(),
((Boolean)commands.elementAt(2)).booleanValue(),
((Boolean)commands.elementAt(3)).booleanValue(),false);
}
String whereStr=CMParms.combine(commands,1);
Room R=mob.location();
int direction=-1;
if(whereStr.equalsIgnoreCase("OUT"))
{
if(!CMath.bset(R.domainType(),Room.INDOORS))
{
mob.tell("You aren't indoors.");
return false;
}
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
if((R.getExitInDir(d)!=null)
&&(R.getRoomInDir(d)!=null)
&&(!CMath.bset(R.getRoomInDir(d).domainType(),Room.INDOORS)))
{
if(direction>=0)
{
mob.tell("Which way out? Try North, South, East, etc..");
return false;
}
direction=d;
}
if(direction<0)
{
mob.tell("There is no direct way out of this place. Try a direction.");
return false;
}
}
if(direction<0)
direction=Directions.getGoodDirectionCode(whereStr);
if(direction<0)
{
Environmental E=null;
if(R!=null)
E=R.fetchFromRoomFavorItems(null,whereStr,Item.WORNREQ_UNWORNONLY);
if(E instanceof Rideable)
{
Command C=CMClass.getCommand("Enter");
return C.execute(mob,commands,metaFlags);
}
if((E instanceof Exit)&&(R!=null))
{
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
if(R.getExitInDir(d)==E)
{ direction=d; break;}
}
}
String doing=(String)commands.elementAt(0);
if(direction>=0)
move(mob,direction,false,false,false,false);
else
{
boolean doneAnything=false;
if(commands.size()>2)
for(int v=1;v<commands.size();v++)
{
int num=1;
String s=(String)commands.elementAt(v);
if(CMath.s_int(s)>0)
{
num=CMath.s_int(s);
v++;
if(v<commands.size())
s=(String)commands.elementAt(v);
}
else
if(("NSEWUDnsewud".indexOf(s.charAt(s.length()-1))>=0)
&&(CMath.s_int(s.substring(0,s.length()-1))>0))
{
num=CMath.s_int(s.substring(0,s.length()-1));
s=s.substring(s.length()-1);
}
direction=Directions.getGoodDirectionCode(s);
if(direction>=0)
{
doneAnything=true;
for(int i=0;i<num;i++)
{
if(mob.isMonster())
{
if(!move(mob,direction,false,false,false,false))
return false;
}
else
{
Vector V=new Vector();
V.addElement(doing);
V.addElement(Directions.getDirectionName(direction));
mob.enqueCommand(V,metaFlags,0);
}
}
}
else
break;
}
if(!doneAnything)
mob.tell(CMStrings.capitalizeAndLower(doing)+" which direction?\n\rTry north, south, east, west, up, or down.");
}
return false;
}
public double actionsCost(MOB mob, Vector cmds){
double cost=CMath.div(CMProps.getIntVar(CMProps.SYSTEMI_DEFCMDTIME),100.0);
if((mob!=null)&&(CMath.bset(mob.getBitmap(),MOB.ATT_AUTORUN)))
cost /= 4.0;
return cost;
}
public boolean canBeOrdered(){return true;}
}
|
Java
|
# hamcrest-string-matcher
Custom hamcrest matcher that counts occurrences of a substring.
## Current Release
The current release is 0.0.5.
## Basic Usage
```
import static io.zinx.hamcrest.string.pattern.OccurrenceMatcher.hasOccurrenceCount;
...
@Test
public void testMatch() {
String item = "Fred,Joe,John,Tim";
String searchString = ",";
int count = 3;
assertThat(item, hasOccurrenceCount(count, searchString));
}
```
## Dependency
The code uses the Apache Commons Lang3 library.
## Building with Gradle
- Clone the repo from github.
- gradlew build
## Artifact available through Jitpack.io
The artifact is available on <https://jitpack.io/>.
#### Gradle
To include it using gradle, do the following:
```
repositories {
maven {
url "https://jitpack.io"
}
}
```
```
dependencies {
compile 'com.github.zinx-io:hamcrest-string-matcher:0.0.6'
}
```
#### Maven
To include it using maven, do the following:
```
<repository>
<id>jitpack.io</id>
<url>https://jitpack.io</url>
</repository>
```
```
<dependency>
<groupId>com.github.zinx-io</groupId>
<artifactId>hamcrest-string-matcher</artifactId>
<version>0.0.6</version>
</dependency>
```
|
Java
|
#!/usr/bin/python2.7
from __future__ import print_function
# -*- coding: utf-8 -*-
import wx
import threading
import lcm
import random
import Forseti
import configurator
BLUE = (24, 25, 141)
GOLD = (241, 169, 50)
class TeamPanel(wx.Panel):
def __init__(self, remote, letter, number, name, colour, *args, **kwargs):
super(TeamPanel, self).__init__(*args, **kwargs)
self.remote = remote
self.InitUI(letter, number, name, colour)
def InitUI(self, letter, number, name, colour=None):
if colour is not None:
self.SetBackgroundColour(colour)
dc = wx.ScreenDC()
self.num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2, dc.GetCharHeight()))
self.num_ctrl.AppendText(str(number))
self.get_button = wx.Button(self, label='Get', size=(dc.GetCharWidth() * 2, dc.GetCharHeight()))
self.get_button.Bind(wx.EVT_BUTTON, self.do_get_name)
self.name_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 16,
dc.GetCharHeight()))
self.name_ctrl.AppendText(name)
name_num_box = wx.BoxSizer(wx.HORIZONTAL)
name_num_box.Add(wx.StaticText(self, label=letter,
size=(dc.GetCharWidth() * 0.6, dc.GetCharHeight())))
name_num_box.Add(self.num_ctrl)
name_num_box.Add(self.get_button)
name_num_box.Add(self.name_ctrl)
#button_box = wx.BoxSizer(wx.HORIZONTAL)
#button_box.Add(wx.Button(self, label='Reset'))
#button_box.Add(wx.Button(self, label='Configure'))
#button_box.Add(wx.Button(self, label='Disable'))
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(name_num_box, flag=wx.CENTER)
#vbox.Add(button_box, flag=wx.CENTER)
self.SetSizer(self.vbox)
self.Show(True)
def do_get_name(self, event):
self.name = configurator.get_team_name(self.number)
@property
def name(self):
return self.name_ctrl.GetValue()
@name.setter
def name(self, val):
self.name_ctrl.SetValue(val)
@property
def number(self):
try:
return int(self.num_ctrl.GetValue())
except ValueError:
return 0
@number.setter
def number(self, val):
self.num_ctrl.SetValue(str(val))
class MatchControl(wx.Panel):
def __init__(self, remote, *args, **kwargs):
super(MatchControl, self).__init__(*args, **kwargs)
self.remote = remote
self.InitUI()
def InitUI(self):
vbox = wx.BoxSizer(wx.VERTICAL)
dc = wx.ScreenDC()
match_number = wx.BoxSizer(wx.HORIZONTAL)
match_number.Add(wx.StaticText(self, label='Match #'.format(1)))
self.match_num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2,
dc.GetCharHeight()))
match_number.Add(self.match_num_ctrl)
vbox.Add(match_number, flag=wx.CENTER)
teamSizer = wx.GridSizer(3, 2)
self.team_panels = [
TeamPanel(self.remote, 'A', 0, 'Unknown Team', BLUE, self),
TeamPanel(self.remote, 'C', 0, 'Unknown Team', GOLD, self),
TeamPanel(self.remote, 'B', 0, 'Unknown Team', BLUE, self),
TeamPanel(self.remote, 'D', 0, 'Unknown Team', GOLD, self),
]
teamSizer.AddMany(
[wx.StaticText(self, label='Blue Team'),
wx.StaticText(self, label='Gold Team')] +
[(panel, 0) for panel in self.team_panels])
vbox.Add(teamSizer, flag=wx.CENTER)
buttons = wx.BoxSizer(wx.HORIZONTAL)
self.init_button = wx.Button(self, label='Init')
self.init_button.Bind(wx.EVT_BUTTON, self.do_init)
self.go_button = wx.Button(self, label='GO!')
self.go_button.Bind(wx.EVT_BUTTON, self.do_go)
self.pause_button = wx.Button(self, label='Pause')
self.pause_button.Bind(wx.EVT_BUTTON, self.do_pause)
#self.save_button = wx.Button(self, label='Save')
#self.save_button.Bind(wx.EVT_BUTTON, self.do_save)
self.time_text = wx.StaticText(self, label='0:00')
self.stage_text = wx.StaticText(self, label='Unknown')
self.remote.time_text = self.time_text
#buttons.Add(self.save_button, flag=wx.LEFT)
buttons.Add(self.init_button)
buttons.Add(self.go_button)
buttons.Add(self.pause_button)
buttons.Add(self.time_text)
buttons.Add(self.stage_text)
vbox.Add(buttons, flag=wx.CENTER)
self.SetSizer(vbox)
self.Show(True)
def do_go(self, e):
self.remote.do_go()
def do_pause(self, e):
self.remote.do_pause()
def do_save(self, e):
self.remote.do_save(self.get_match())
def do_init(self, e):
self.remote.do_init(self.get_match())
def _set_match_panel(self, match, team_idx, panel_idx):
match.team_numbers[team_idx] = self.team_panels[panel_idx].number
match.team_names[team_idx] = self.team_panels[panel_idx].name
def _set_panel_match(self, match, team_idx, panel_idx):
self.team_panels[panel_idx].number = match.team_numbers[team_idx]
self.team_panels[panel_idx].name = match.team_names[team_idx]
def get_match(self):
match = Forseti.Match()
self._set_match_panel(match, 0, 0)
self._set_match_panel(match, 1, 2)
self._set_match_panel(match, 2, 1)
self._set_match_panel(match, 3, 3)
try:
match.match_number = int(self.match_num_ctrl.GetValue())
except ValueError:
match.match_number = random.getrandbits(31)
return match
def set_match(self, match):
self._set_panel_match(match, 0, 0)
self._set_panel_match(match, 1, 2)
self._set_panel_match(match, 2, 1)
self._set_panel_match(match, 3, 3)
self.match_num_ctrl.SetValue(str(match.match_number))
def set_time(self, match):
self.time_text.SetLabel(format_time(match.game_time_so_far))
self.stage_text.SetLabel(match.stage_name)
class ScheduleControl(wx.Panel):
def __init__(self, remote, match_control, *args, **kwargs):
self.remote = remote
super(ScheduleControl, self).__init__(*args, **kwargs)
self.InitUI()
self.remote.match_list_box = self.match_list
self.match_control = match_control
def InitUI(self):
self.match_list = wx.ListBox(self)
self.match_list.Bind(wx.EVT_LISTBOX, self.choose_match)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.load_button = wx.Button(self, label='Load All')
self.load_button.Bind(wx.EVT_BUTTON, self.do_load)
hbox.Add(self.load_button)
self.clear_first = wx.CheckBox(self, label='Clear first')
self.clear_first.SetValue(True)
hbox.Add(self.clear_first)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.match_list, 1, wx.EXPAND)
vbox.Add(hbox)
self.SetSizer(vbox)
self.Show(True)
def do_load(self, e):
self.remote.do_load(self.clear_first.GetValue())
def choose_match(self, event):
self.match_control.set_match(event.GetClientData())
class MainWindow(wx.Frame):
def __init__(self, remote, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.remote = remote
self.InitUI()
def InitUI(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
fitem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')
menubar.Append(fileMenu, '&File')
self.SetMenuBar(menubar)
match_control = MatchControl(self.remote, self)
schedule_control = ScheduleControl(self.remote, match_control, self)
self.remote.match_control = match_control
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(match_control, 0, wx.ALIGN_CENTER | wx.ALIGN_TOP, 8)
vbox.Add(schedule_control, 1, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, 8)
self.Bind(wx.EVT_MENU, self.OnQuit, fitem)
self.SetSize((800, 600))
self.SetSizer(vbox)
self.SetTitle('Forseti Dashboard')
self.Centre()
self.Show(True)
def OnQuit(self, e):
self.Close()
def format_match(match):
print(match.match_number)
print(match.team_names)
print(match.team_numbers)
return '{}: {} ({}) & {} ({}) vs. {} ({}) & {} ({})'.format(
match.match_number,
match.team_names[0], match.team_numbers[0],
match.team_names[1], match.team_numbers[1],
match.team_names[2], match.team_numbers[2],
match.team_names[3], match.team_numbers[3],
)
class Remote(object):
def __init__(self):
self.lc = lcm.LCM('udpm://239.255.76.67:7667?ttl=1')
self.lc.subscribe('Schedule/Schedule', self.handle_schedule)
self.lc.subscribe('Timer/Time', self.handle_time)
self.match_list_box = None
self.match_control = None
self.thread = threading.Thread(target=self._loop)
self.thread.daemon = True
def start(self):
self.thread.start()
def _loop(self):
while True:
try:
self.lc.handle()
except Exception as ex:
print('Got exception while handling lcm message', ex)
def handle_schedule(self, channel, data):
msg = Forseti.Schedule.decode(data)
for i in range(msg.num_matches):
self.match_list_box.Insert(format_match(msg.matches[i]), i,
msg.matches[i])
def handle_time(self, channel, data):
msg = Forseti.Time.decode(data)
#wx.CallAfter(self.time_text.SetLabel, format_time(msg.game_time_so_far))
wx.CallAfter(self.match_control.set_time, msg)
def do_load(self, clear_first):
if clear_first:
self.match_list_box.Clear()
msg = Forseti.ScheduleLoadCommand()
msg.clear_first = clear_first
print('Requesting load')
self.lc.publish('Schedule/Load', msg.encode())
def do_save(self, match):
self.lc.publish('Match/Save', match.encode())
def do_init(self, match):
self.lc.publish('Match/Init', match.encode())
def do_time_ctrl(self, command):
msg = Forseti.TimeControl()
msg.command_name = command
self.lc.publish('Timer/Control', msg.encode())
def do_go(self):
self.do_time_ctrl('start')
def do_pause(self):
self.do_time_ctrl('pause')
def format_time(seconds):
return '{}:{:02}'.format(seconds // 60,
seconds % 60)
def main():
app = wx.App()
remote = Remote()
MainWindow(remote, None)
remote.start()
remote.do_load(False)
app.MainLoop()
if __name__ == '__main__':
main()
|
Java
|
/*
* Copyright 2013-2020 consulo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.plugins;
import com.intellij.icons.AllIcons;
import com.intellij.ide.DataManager;
import com.intellij.openapi.actionSystem.ActionGroup;
import com.intellij.openapi.ui.popup.JBPopupFactory;
import com.intellij.ui.ClickListener;
import com.intellij.util.ui.JBUI;
import com.intellij.util.ui.UIUtil;
import consulo.awt.TargetAWT;
import consulo.localize.LocalizeValue;
import javax.annotation.Nonnull;
import javax.swing.*;
import java.awt.event.MouseEvent;
import java.util.function.Function;
/**
* @author VISTALL
* @since 03/12/2020
*/
public class LabelPopup extends JLabel {
private final LocalizeValue myPrefix;
public LabelPopup(LocalizeValue prefix, Function<LabelPopup, ? extends ActionGroup> groupBuilder) {
myPrefix = prefix;
setForeground(UIUtil.getLabelDisabledForeground());
setBorder(JBUI.Borders.empty(1, 1, 1, 5));
setIcon(TargetAWT.to(AllIcons.General.ComboArrow));
setHorizontalTextPosition(SwingConstants.LEADING);
new ClickListener() {
@Override
public boolean onClick(@Nonnull MouseEvent event, int clickCount) {
LabelPopup component = LabelPopup.this;
JBPopupFactory.getInstance()
.createActionGroupPopup(myPrefix.get(), groupBuilder.apply(component), DataManager.getInstance().getDataContext(component), JBPopupFactory.ActionSelectionAid.SPEEDSEARCH, true)
.showUnderneathOf(component);
return true;
}
}.installOn(this);
}
public void setPrefixedText(LocalizeValue tagValue) {
setText(LocalizeValue.join(myPrefix, LocalizeValue.space(), tagValue).get());
}
}
|
Java
|
/*
* nativescriptassert.h
*
* Created on: 12.11.2013
* Author: blagoev
*/
#ifndef NATIVESCRIPTASSERT_H_
#define NATIVESCRIPTASSERT_H_
#include <android/log.h>
namespace tns {
extern bool LogEnabled;
#define DEBUG_WRITE(fmt, args...) if (tns::LogEnabled) __android_log_print(ANDROID_LOG_DEBUG, "TNS.Native", fmt, ##args)
#define DEBUG_WRITE_FORCE(fmt, args...) __android_log_print(ANDROID_LOG_DEBUG, "TNS.Native", fmt, ##args)
#define DEBUG_WRITE_FATAL(fmt, args...) __android_log_print(ANDROID_LOG_FATAL, "TNS.Native", fmt, ##args)
}
#endif /* NATIVESCRIPTASSERT_H_ */
|
Java
|
package yaputil
import (
"io/ioutil"
"net"
"regexp"
)
var (
nsRegex = regexp.MustCompile(`(?m)^nameserver\s+([0-9a-fA-F\.:]+)`)
)
func LookupIP(host string) (ips []net.IP, err error) {
return net.LookupIP(host)
}
func GetLocalNameServers() ([]string, error) {
b, err := ioutil.ReadFile("/etc/resolv.conf")
if err != nil {
return nil, err
}
nameservers := make([]string, 0, 4)
for _, m := range nsRegex.FindAllStringSubmatch(string(b), -1) {
nameservers = append(nameservers, m[1])
}
return nameservers, nil
}
|
Java
|
// Copyright (C) 2014 Space Monkey, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build cgo
package openssl
/*
#include <openssl/crypto.h>
#include <openssl/ssl.h>
#include <openssl/err.h>
#include <openssl/conf.h>
static long SSL_set_options_not_a_macro(SSL* ssl, long options) {
return SSL_set_options(ssl, options);
}
static long SSL_get_options_not_a_macro(SSL* ssl) {
return SSL_get_options(ssl);
}
static long SSL_clear_options_not_a_macro(SSL* ssl, long options) {
return SSL_clear_options(ssl, options);
}
extern int verify_ssl_cb(int ok, X509_STORE_CTX* store);
*/
import "C"
import (
"os"
"unsafe"
)
type SSLTLSExtErr int
const (
SSLTLSExtErrOK SSLTLSExtErr = C.SSL_TLSEXT_ERR_OK
SSLTLSExtErrAlertWarning SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_WARNING
SSLTLSEXTErrAlertFatal SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_FATAL
SSLTLSEXTErrNoAck SSLTLSExtErr = C.SSL_TLSEXT_ERR_NOACK
)
var (
ssl_idx = C.SSL_get_ex_new_index(0, nil, nil, nil, nil)
)
//export get_ssl_idx
func get_ssl_idx() C.int {
return ssl_idx
}
type SSL struct {
ssl *C.SSL
verify_cb VerifyCallback
}
//export verify_ssl_cb_thunk
func verify_ssl_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int {
defer func() {
if err := recover(); err != nil {
logger.Critf("openssl: verify callback panic'd: %v", err)
os.Exit(1)
}
}()
verify_cb := (*SSL)(p).verify_cb
// set up defaults just in case verify_cb is nil
if verify_cb != nil {
store := &CertificateStoreCtx{ctx: ctx}
if verify_cb(ok == 1, store) {
ok = 1
} else {
ok = 0
}
}
return ok
}
// Wrapper around SSL_get_servername. Returns server name according to rfc6066
// http://tools.ietf.org/html/rfc6066.
func (s *SSL) GetServername() string {
return C.GoString(C.SSL_get_servername(s.ssl, C.TLSEXT_NAMETYPE_host_name))
}
// GetOptions returns SSL options. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
func (s *SSL) GetOptions() Options {
return Options(C.SSL_get_options_not_a_macro(s.ssl))
}
// SetOptions sets SSL options. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
func (s *SSL) SetOptions(options Options) Options {
return Options(C.SSL_set_options_not_a_macro(s.ssl, C.long(options)))
}
// ClearOptions clear SSL options. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
func (s *SSL) ClearOptions(options Options) Options {
return Options(C.SSL_clear_options_not_a_macro(s.ssl, C.long(options)))
}
// SetVerify controls peer verification settings. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) SetVerify(options VerifyOptions, verify_cb VerifyCallback) {
s.verify_cb = verify_cb
if verify_cb != nil {
C.SSL_set_verify(s.ssl, C.int(options), (*[0]byte)(C.verify_ssl_cb))
} else {
C.SSL_set_verify(s.ssl, C.int(options), nil)
}
}
// SetVerifyMode controls peer verification setting. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) SetVerifyMode(options VerifyOptions) {
s.SetVerify(options, s.verify_cb)
}
// SetVerifyCallback controls peer verification setting. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) SetVerifyCallback(verify_cb VerifyCallback) {
s.SetVerify(s.VerifyMode(), verify_cb)
}
// GetVerifyCallback returns callback function. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) GetVerifyCallback() VerifyCallback {
return s.verify_cb
}
// VerifyMode returns peer verification setting. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) VerifyMode() VerifyOptions {
return VerifyOptions(C.SSL_get_verify_mode(s.ssl))
}
// SetVerifyDepth controls how many certificates deep the certificate
// verification logic is willing to follow a certificate chain. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) SetVerifyDepth(depth int) {
C.SSL_set_verify_depth(s.ssl, C.int(depth))
}
// GetVerifyDepth controls how many certificates deep the certificate
// verification logic is willing to follow a certificate chain. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) GetVerifyDepth() int {
return int(C.SSL_get_verify_depth(s.ssl))
}
// SetSSLCtx changes context to new one. Useful for Server Name Indication (SNI)
// rfc6066 http://tools.ietf.org/html/rfc6066. See
// http://stackoverflow.com/questions/22373332/serving-multiple-domains-in-one-box-with-sni
func (s *SSL) SetSSLCtx(ctx *Ctx) {
/*
* SSL_set_SSL_CTX() only changes certs as of 1.0.0d
* adjust other things we care about
*/
C.SSL_set_SSL_CTX(s.ssl, ctx.ctx)
}
//export sni_cb_thunk
func sni_cb_thunk(p unsafe.Pointer, con *C.SSL, ad unsafe.Pointer, arg unsafe.Pointer) C.int {
defer func() {
if err := recover(); err != nil {
logger.Critf("openssl: verify callback sni panic'd: %v", err)
os.Exit(1)
}
}()
sni_cb := (*Ctx)(p).sni_cb
s := &SSL{ssl: con}
// This attaches a pointer to our SSL struct into the SNI callback.
C.SSL_set_ex_data(s.ssl, get_ssl_idx(), unsafe.Pointer(s))
// Note: this is ctx.sni_cb, not C.sni_cb
return C.int(sni_cb(s))
}
|
Java
|
# Ranunculus gentryanus var. typicus L.D.Benson VARIETY
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
package org.support.project.knowledge.vo.notification.webhook;
public class WebhookLongIdJson {
public long id;
}
|
Java
|
package etri.sdn.controller.module.vxlanflowmapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.codehaus.jackson.map.ObjectMapper;
public class Tester {
public static void main(String[] args) {
testV2PRequest();
testV2PResponse();
}
public static void testV2PResponse() {
HeaderInfoPair pair1 = new HeaderInfoPair(
new OuterPacketHeader.Builder()
.srcMac("00:00:00:00:00:11")
.dstMac("00:00:00:00:00:22").
srcIp("10.0.0.11").
dstIp("10.0.0.22").
udpPort("1001")
.build(),
new OrginalPacketHeader.Builder()
.srcMac("00:00:00:00:00:11")
.dstMac("00:00:00:00:00:22")
.srcIp("10.0.0.11")
.dstIp("10.0.0.22")
.vnid("1001")
.build() );
List<HeaderInfoPair> pairs = Arrays.asList(pair1);
V2PResponse response = new V2PResponse(pairs);
ObjectMapper mapper = new ObjectMapper();
String output = null;
try {
output = mapper.defaultPrettyPrintingWriter().writeValueAsString(response);
System.out.println(output);
} catch (IOException e) {
e.printStackTrace();
}
}
public static void testV2PRequest() {
OuterPacketHeader orgHeader = new OuterPacketHeader("00:00:00:00:00:01", "00:00:00:00:00:02", "10.0.0.1", "10.0.0.2", "1234");
List<OuterPacketHeader> headers= Arrays.asList(orgHeader);
P2VRequest request = new P2VRequest(headers);
// request.outerList = headers;
ObjectMapper mapper = new ObjectMapper();
List<OuterPacketHeader> switchs = new ArrayList<>();
String output = null;
try {
output = mapper.defaultPrettyPrintingWriter().writeValueAsString(request);
System.out.println(output);
} catch (IOException e) {
e.printStackTrace();
}
}
}
|
Java
|
# Olax glabriflora Danguy SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Mortierella fusca E. Wolf, 1954 SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
Zentbl. Bakt. ParasitKde, Abt. II 107: 534 (1954)
#### Original name
Mortierella fusca E. Wolf, 1954
### Remarks
null
|
Java
|
# Daucus commutatus Thell. SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Decaspermum raymundi Diels SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Sagittaria latifolia var. glabra VARIETY
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Ornithogalum xanthocodon (Hilliard & B.L.Burtt) J.C.Manning & Goldblatt SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Mansoa schwackei Bureau & K.Schum. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Juniperus coahuilensis (Martรญnez) Gaussen ex R.P. Adams SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
Phytologia 74:413. 1993 (Gaussen, Trav. Lab. Forest. Toulouse tome II(I,1,pt. II 2, fasc. 10:101, 154. 1968, nom. inval. )
#### Original name
Juniperus erythrocarpa var. coahuilensis Martรญnez
### Remarks
null
|
Java
|
# Nitzschia gruendleri Grunow, 1878 SPECIES
#### Status
ACCEPTED
#### According to
World Register of Marine Species
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Petalomonas sulcata Stokes SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
# Pseudopeziza alismatis (W. Phillips & Trail) Sacc., 1889 SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
Syll. fung. (Abellini) 8: 728 (1889)
#### Original name
Mollisia alismatis W. Phillips & Trail, 1888
### Remarks
null
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_45) on Mon Mar 03 10:44:38 EST 2014 -->
<title>Uses of Class org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder (Hibernate JavaDocs)</title>
<meta name="date" content="2014-03-03">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder (Hibernate JavaDocs)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/hibernate/metamodel/source/annotations/EntityHierarchyBuilder.html" title="class in org.hibernate.metamodel.source.annotations">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/hibernate/metamodel/source/annotations/class-use/EntityHierarchyBuilder.html" target="_top">Frames</a></li>
<li><a href="EntityHierarchyBuilder.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder" class="title">Uses of Class<br>org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder</h2>
</div>
<div class="classUseContainer">No usage of org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/hibernate/metamodel/source/annotations/EntityHierarchyBuilder.html" title="class in org.hibernate.metamodel.source.annotations">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/hibernate/metamodel/source/annotations/class-use/EntityHierarchyBuilder.html" target="_top">Frames</a></li>
<li><a href="EntityHierarchyBuilder.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2001-2014 <a href="http://redhat.com">Red Hat, Inc.</a> All Rights Reserved.</small></p>
</body>
</html>
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_91) on Mon Aug 22 09:59:23 CST 2016 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>play.data.parsing (Play! API)</title>
<meta name="date" content="2016-08-22">
<link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../script.js"></script>
</head>
<body>
<h1 class="bar"><a href="../../../play/data/parsing/package-summary.html" target="classFrame">play.data.parsing</a></h1>
<div class="indexContainer">
<h2 title="Classes">Classes</h2>
<ul title="Classes">
<li><a href="ApacheMultipartParser.html" title="class in play.data.parsing" target="classFrame">ApacheMultipartParser</a></li>
<li><a href="ApacheMultipartParser.AutoFileItem.html" title="class in play.data.parsing" target="classFrame">ApacheMultipartParser.AutoFileItem</a></li>
<li><a href="DataParser.html" title="class in play.data.parsing" target="classFrame">DataParser</a></li>
<li><a href="DataParsers.html" title="class in play.data.parsing" target="classFrame">DataParsers</a></li>
<li><a href="MultipartStream.html" title="class in play.data.parsing" target="classFrame">MultipartStream</a></li>
<li><a href="TempFilePlugin.html" title="class in play.data.parsing" target="classFrame">TempFilePlugin</a></li>
<li><a href="TextParser.html" title="class in play.data.parsing" target="classFrame">TextParser</a></li>
<li><a href="UrlEncodedParser.html" title="class in play.data.parsing" target="classFrame">UrlEncodedParser</a></li>
</ul>
<h2 title="Exceptions">Exceptions</h2>
<ul title="Exceptions">
<li><a href="ApacheMultipartParser.SizeException.html" title="class in play.data.parsing" target="classFrame">ApacheMultipartParser.SizeException</a></li>
<li><a href="MultipartStream.IllegalBoundaryException.html" title="class in play.data.parsing" target="classFrame">MultipartStream.IllegalBoundaryException</a></li>
<li><a href="MultipartStream.MalformedStreamException.html" title="class in play.data.parsing" target="classFrame">MultipartStream.MalformedStreamException</a></li>
</ul>
</div>
</body>
</html>
|
Java
|
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// MODULES //
var tape = require( 'tape' );
var Readable = require( 'readable-stream' ).Readable;
var now = require( '@stdlib/time/now' );
var arcsine = require( '@stdlib/random/base/arcsine' ).factory;
var isBuffer = require( '@stdlib/assert/is-buffer' );
var isnan = require( '@stdlib/math/base/assert/is-nan' );
var isUint32Array = require( '@stdlib/assert/is-uint32array' );
var UINT32_MAX = require( '@stdlib/constants/uint32/max' );
var Uint32Array = require( '@stdlib/array/uint32' );
var minstd = require( '@stdlib/random/base/minstd' );
var inspectStream = require( '@stdlib/streams/node/inspect-sink' );
var randomStream = require( './../lib/main.js' );
// TESTS //
tape( 'main export is a function', function test( t ) {
t.ok( true, __filename );
t.equal( typeof randomStream, 'function', 'main export is a function' );
t.end();
});
tape( 'the function throws an error if minimum support `a` is not a number primitive', function test( t ) {
var values;
var i;
values = [
'5',
null,
true,
false,
void 0,
NaN,
[],
{},
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( value, 2.0 );
};
}
});
tape( 'the function throws an error if maximum support `b` is not a number primitive', function test( t ) {
var values;
var i;
values = [
'5',
null,
true,
false,
void 0,
NaN,
[],
{},
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, value );
};
}
});
tape( 'the function throws an error if minimum support `a` is greater than or equal to maximum support `b`', function test( t ) {
var values;
var i;
values = [
[ 0.0, 0.0 ],
[ -2.0, -4.0 ],
[ 2.0, 1.0 ]
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), RangeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( arr ) {
return function badValue() {
randomStream( arr[0], arr[1] );
};
}
});
tape( 'the function throws an error if provided an options argument which is not an object', function test( t ) {
var values;
var i;
values = [
'abc',
5,
null,
true,
false,
void 0,
NaN,
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, value );
};
}
});
tape( 'the function throws an error if provided an invalid `iter` option', function test( t ) {
var values;
var i;
values = [
'abc',
-5,
3.14,
null,
true,
false,
void 0,
NaN,
[],
{},
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'iter': value
});
};
}
});
tape( 'if provided a `prng` option which is not a function, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
3.14,
NaN,
true,
false,
null,
void 0,
[],
{}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'prng': value
});
};
}
});
tape( 'if provided a `copy` option which is not a boolean, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
5,
NaN,
null,
void 0,
{},
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'copy': value
});
};
}
});
tape( 'if provided a `seed` which is not a positive integer or a non-empty array-like object, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
3.14,
0.0,
-5.0,
NaN,
true,
false,
null,
void 0,
{},
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'seed': value
});
};
}
});
tape( 'the function throws a range error if provided a `seed` which is an integer greater than the maximum unsigned 32-bit integer', function test( t ) {
var values;
var i;
values = [
UINT32_MAX + 1,
UINT32_MAX + 2,
UINT32_MAX + 3
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), RangeError, 'throws a range error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'seed': value
});
};
}
});
tape( 'if provided a `state` option which is not a Uint32Array, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
5,
NaN,
true,
false,
null,
void 0,
{},
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'state': value
});
};
}
});
tape( 'if provided an invalid `state` option, the function throws an error', function test( t ) {
var values;
var i;
values = [
new Uint32Array( 0 ),
new Uint32Array( 10 ),
new Uint32Array( 100 )
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), RangeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'state': value
});
};
}
});
tape( 'if provided an invalid readable stream option, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
5,
NaN,
null,
void 0,
{},
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'objectMode': value
});
};
}
});
tape( 'the function is a constructor which returns a readable stream', function test( t ) {
var RandomStream = randomStream;
var s;
s = new RandomStream( 2.0, 5.0 );
t.equal( s instanceof Readable, true, 'returns expected value' );
t.end();
});
tape( 'the constructor does not require the `new` operator', function test( t ) {
var RandomStream = randomStream;
var s;
s = randomStream( 2.0, 5.0 );
t.equal( s instanceof RandomStream, true, 'returns expected value' );
t.end();
});
tape( 'the constructor returns a readable stream (no new)', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( s instanceof Readable, true, 'returns expected value' );
t.end();
});
tape( 'the returned stream provides a method to destroy a stream (object)', function test( t ) {
var count = 0;
var s;
s = randomStream( 2.0, 5.0 );
t.equal( typeof s.destroy, 'function', 'has destroy method' );
s.on( 'error', onError );
s.on( 'close', onClose );
s.destroy({
'message': 'beep'
});
function onError( err ) {
count += 1;
if ( err ) {
t.ok( true, err.message );
} else {
t.ok( false, 'does not error' );
}
if ( count === 2 ) {
t.end();
}
}
function onClose() {
count += 1;
t.ok( true, 'stream closes' );
if ( count === 2 ) {
t.end();
}
}
});
tape( 'the returned stream provides a method to destroy a stream (error object)', function test( t ) {
var count = 0;
var s;
s = randomStream( 2.0, 5.0 );
t.equal( typeof s.destroy, 'function', 'has destroy method' );
s.on( 'error', onError );
s.on( 'close', onClose );
s.destroy( new Error( 'beep' ) );
function onError( err ) {
count += 1;
if ( err ) {
t.ok( true, err.message );
} else {
t.ok( false, 'does not error' );
}
if ( count === 2 ) {
t.end();
}
}
function onClose() {
count += 1;
t.ok( true, 'stream closes' );
if ( count === 2 ) {
t.end();
}
}
});
tape( 'the returned stream does not allow itself to be destroyed more than once', function test( t ) {
var s;
s = randomStream( 2.0, 5.0 );
s.on( 'error', onError );
s.on( 'close', onClose );
// If the stream is closed twice, the test will error...
s.destroy();
s.destroy();
function onClose() {
t.ok( true, 'stream closes' );
t.end();
}
function onError( err ) {
t.ok( false, err.message );
}
});
tape( 'attached to the returned stream is the underlying PRNG', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( typeof s.PRNG, 'function', 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.PRNG, minstd.normalized, 'has property' );
t.end();
});
tape( 'attached to the returned stream is the generator seed', function test( t ) {
var s = randomStream( 2.0, 5.0, {
'seed': 12345
});
t.equal( isUint32Array( s.seed ), true, 'has property' );
t.equal( s.seed[ 0 ], 12345, 'equal to provided seed' );
s = randomStream( 2.0, 5.0, {
'seed': 12345,
'prng': minstd.normalized
});
t.equal( s.seed, null, 'equal to `null`' );
t.end();
});
tape( 'attached to the returned stream is the generator seed (array seed)', function test( t ) {
var actual;
var seed;
var s;
var i;
seed = [ 1234, 5678 ];
s = randomStream( 2.0, 5.0, {
'seed': seed
});
actual = s.seed;
t.equal( isUint32Array( actual ), true, 'has property' );
for ( i = 0; i < seed.length; i++ ) {
t.equal( actual[ i ], seed[ i ], 'returns expected value for word '+i );
}
t.end();
});
tape( 'attached to the returned stream is the generator seed length', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( typeof s.seedLength, 'number', 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.seedLength, null, 'equal to `null`' );
t.end();
});
tape( 'attached to the returned stream is the generator state', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( isUint32Array( s.state ), true, 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.state, null, 'equal to `null`' );
t.end();
});
tape( 'attached to the returned stream is the generator state length', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( typeof s.stateLength, 'number', 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.stateLength, null, 'equal to `null`' );
t.end();
});
tape( 'attached to the returned stream is the generator state size', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( typeof s.byteLength, 'number', 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.byteLength, null, 'equal to `null`' );
t.end();
});
tape( 'the constructor returns a stream for generating pseudorandom numbers from an arcsine distribution', function test( t ) {
var iStream;
var result;
var rand;
var opts;
var s;
// Note: we assume that the underlying generator is the following PRNG...
rand = arcsine( 2.0, 5.0, {
'seed': 12345
});
opts = {
'seed': 12345,
'iter': 10,
'sep': '\n'
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
result = '';
s.pipe( iStream );
function inspect( chunk ) {
t.equal( isBuffer( chunk ), true, 'returns a buffer' );
result += chunk.toString();
}
function onEnd() {
var i;
t.pass( 'stream ended' );
result = result.split( '\n' );
t.equal( result.length, 10, 'has expected length' );
for ( i = 0; i < result.length; i++ ) {
t.equal( parseFloat( result[ i ] ), rand(), 'returns expected value. i: ' + i + '.' );
}
t.end();
}
});
tape( 'the constructor returns a stream for generating pseudorandom numbers from an arcsine distribution (object mode)', function test( t ) {
var iStream;
var count;
var rand;
var opts;
var s;
// Note: we assume that the underlying generator is the following PRNG...
rand = arcsine( 2.0, 5.0, {
'seed': 12345
});
opts = {
'seed': 12345,
'objectMode': true
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'close', onClose );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
count = 0;
s.pipe( iStream );
function inspect( v ) {
count += 1;
t.equal( rand(), v, 'returns expected value. i: '+count+'.' );
if ( count >= 10 ) {
s.destroy();
}
}
function onClose() {
t.pass( 'stream closed' );
t.end();
}
});
tape( 'the constructor supports limiting the number of iterations', function test( t ) {
var iStream;
var count;
var niter;
var opts;
var s;
niter = 10;
count = 0;
opts = {
'iter': niter,
'objectMode': true
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
s.pipe( iStream );
function inspect( v ) {
count += 1;
t.equal( typeof v, 'number', 'returns expected value' );
}
function onEnd() {
t.equal( count === niter, true, 'performs expected number of iterations' );
t.end();
}
});
tape( 'by default, the constructor generates newline-delimited pseudorandom numbers', function test( t ) {
var iStream;
var result;
var opts;
var s;
opts = {
'iter': 10
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
iStream = inspectStream( inspect );
result = '';
s.pipe( iStream );
function inspect( chunk ) {
result += chunk.toString();
}
function onEnd() {
var v;
var i;
result = result.split( '\n' );
t.equal( result.length, opts.iter, 'has expected length' );
for ( i = 0; i < result.length; i++ ) {
v = parseFloat( result[ i ] );
t.equal( typeof v, 'number', 'returns expected value' );
t.equal( isnan( v ), false, 'is not NaN' );
}
t.end();
}
});
tape( 'the constructor supports providing a custom separator for streamed values', function test( t ) {
var iStream;
var result;
var opts;
var s;
opts = {
'iter': 10,
'sep': '--++--'
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
iStream = inspectStream( inspect );
result = '';
s.pipe( iStream );
function inspect( chunk ) {
result += chunk.toString();
}
function onEnd() {
var v;
var i;
result = result.split( opts.sep );
t.equal( result.length, opts.iter, 'has expected length' );
for ( i = 0; i < result.length; i++ ) {
v = parseFloat( result[ i ] );
t.equal( typeof v, 'number', 'returns expected value' );
t.equal( isnan( v ), false, 'is not NaN' );
}
t.end();
}
});
tape( 'the constructor supports returning a seeded readable stream', function test( t ) {
var iStream;
var opts;
var seed;
var arr;
var s1;
var s2;
var i;
seed = now();
opts = {
'objectMode': true,
'seed': seed,
'iter': 10
};
s1 = randomStream( 2.0, 5.0, opts );
s1.on( 'end', onEnd1 );
s2 = randomStream( 2.0, 5.0, opts );
s2.on( 'end', onEnd2 );
t.notEqual( s1, s2, 'separate streams' );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect1 );
arr = [];
i = 0;
s1.pipe( iStream );
function inspect1( v ) {
arr.push( v );
}
function onEnd1() {
var iStream;
var opts;
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect2 );
s2.pipe( iStream );
}
function inspect2( v ) {
t.equal( v, arr[ i ], 'returns expected value' );
i += 1;
}
function onEnd2() {
t.end();
}
});
tape( 'the constructor supports specifying the underlying PRNG', function test( t ) {
var iStream;
var opts;
var s;
opts = {
'prng': minstd.normalized,
'objectMode': true,
'iter': 10
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
s.pipe( iStream );
function inspect( v ) {
t.equal( typeof v, 'number', 'returns a number' );
}
function onEnd() {
t.end();
}
});
tape( 'the constructor supports providing a seeded underlying PRNG', function test( t ) {
var iStream1;
var iStream2;
var randu;
var seed;
var opts;
var FLG;
var s1;
var s2;
var r1;
var r2;
seed = now();
randu = minstd.factory({
'seed': seed
});
opts = {
'prng': randu.normalized,
'objectMode': true,
'iter': 10
};
s1 = randomStream( 2.0, 5.0, opts );
s1.on( 'end', onEnd );
randu = minstd.factory({
'seed': seed
});
opts = {
'prng': randu.normalized,
'objectMode': true,
'iter': 10
};
s2 = randomStream( 2.0, 5.0, opts );
s2.on( 'end', onEnd );
t.notEqual( s1, s2, 'separate streams' );
opts = {
'objectMode': true
};
iStream1 = inspectStream( opts, inspect1 );
iStream2 = inspectStream( opts, inspect2 );
r1 = [];
r2 = [];
s1.pipe( iStream1 );
s2.pipe( iStream2 );
function inspect1( v ) {
r1.push( v );
}
function inspect2( v ) {
r2.push( v );
}
function onEnd() {
if ( FLG ) {
t.deepEqual( r1, r2, 'streams expected values' );
return t.end();
}
FLG = true;
}
});
tape( 'the constructor supports specifying the underlying generator state', function test( t ) {
var iStream;
var state;
var count;
var opts;
var arr;
var s;
opts = {
'objectMode': true,
'iter': 10,
'siter': 5
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'state', onState );
s.on( 'end', onEnd1 );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect1 );
count = 0;
arr = [];
// Move to a future state...
s.pipe( iStream );
function onState( s ) {
// Only capture the first emitted state...
if ( !state ) {
state = s;
}
}
function inspect1( v ) {
count += 1;
if ( count > 5 ) {
arr.push( v );
}
}
function onEnd1() {
var iStream;
var opts;
var s;
t.pass( 'first stream ended' );
// Create another stream using the captured state:
opts = {
'objectMode': true,
'iter': 5,
'state': state
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd2 );
t.deepEqual( state, s.state, 'same state' );
// Create a new inspect stream:
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect2 );
// Replay previously generated values...
count = 0;
s.pipe( iStream );
}
function inspect2( v ) {
count += 1;
t.equal( v, arr[ count-1 ], 'returns expected value. i: '+(count-1)+'.' );
}
function onEnd2() {
t.pass( 'second stream ended' );
t.end();
}
});
tape( 'the constructor supports specifying a shared underlying generator state', function test( t ) {
var iStream;
var shared;
var state;
var count;
var opts;
var arr;
var s;
opts = {
'objectMode': true,
'iter': 10,
'siter': 4
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'state', onState );
s.on( 'end', onEnd1 );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect1 );
count = 0;
arr = [];
// Move to a future state...
s.pipe( iStream );
function onState( s ) {
// Only capture the first emitted state...
if ( !state ) {
state = s;
// Create a copy of the state (to prevent mutation) which will be shared by more than one PRNG:
shared = new Uint32Array( state );
}
}
function inspect1( v ) {
count += 1;
if ( count > 4 ) {
arr.push( v );
}
}
function onEnd1() {
var iStream;
var opts;
var s;
t.pass( 'first stream ended' );
// Create another stream using the captured state:
opts = {
'objectMode': true,
'iter': 3,
'state': shared,
'copy': false
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd2 );
t.deepEqual( state, s.state, 'same state' );
// Create a new inspect stream:
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect2 );
// Replay previously generated values...
count = 0;
s.pipe( iStream );
}
function inspect2( v ) {
count += 1;
t.equal( v, arr[ count-1 ], 'returns expected value. i: '+(count-1)+'.' );
}
function onEnd2() {
var iStream;
var opts;
var s;
t.pass( 'second stream ended' );
// Create another stream using the captured state:
opts = {
'objectMode': true,
'iter': 3,
'state': shared,
'copy': false
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd3 );
t.notDeepEqual( state, s.state, 'different state' );
// Create a new inspect stream:
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect3 );
// Continue replaying previously generated values...
s.pipe( iStream );
}
function inspect3( v ) {
count += 1;
t.equal( v, arr[ count-1 ], 'returns expected value. i: '+(count-1)+'.' );
}
function onEnd3() {
t.pass( 'third stream ended' );
t.end();
}
});
tape( 'the returned stream supports setting the underlying generator state', function test( t ) {
var iStream;
var state;
var rand;
var opts;
var arr;
var s;
var i;
rand = arcsine( 2.0, 5.0 );
// Move to a future state...
for ( i = 0; i < 5; i++ ) {
rand();
}
// Capture the current state:
state = rand.state;
// Move to a future state...
arr = [];
for ( i = 0; i < 5; i++ ) {
arr.push( rand() );
}
// Create a random stream:
opts = {
'objectMode': true,
'iter': 5
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
// Set the PRNG state:
s.state = state;
// Create a new inspect stream:
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
// Replay previously generated values:
i = 0;
s.pipe( iStream );
function inspect( v ) {
t.equal( v, arr[ i ], 'returns expected value. i: ' + i + '.' );
i += 1;
}
function onEnd() {
t.end();
}
});
|
Java
|
module ZendeskAPI
# Creates put, post, delete class methods for custom resource methods.
module Verbs
class << self
private
# @macro [attach] container.create_verb
# @method $1(method)
# Executes a $1 using the passed in method as a path.
# Reloads the resource's attributes if any are in the response body.
#
# Created method takes an optional options hash. Valid options to be passed in to the created method: reload (for caching, default: false)
def create_verb(method_verb)
define_method method_verb do |method|
define_method "#{method}!" do |*method_args|
opts = method_args.last.is_a?(Hash) ? method_args.pop : {}
if method_verb == :any
verb = opts.delete(:verb)
raise(ArgumentError, ":verb required for method defined as :any") unless verb
else
verb = method_verb
end
@response = @client.connection.send(verb, "#{path}/#{method}") do |req|
req.body = opts
end
return false unless @response.success?
return false unless @response.body
resource = nil
if @response.body.is_a?(Hash)
resource = @response.body[self.class.singular_resource_name]
resource ||= @response.body.fetch(self.class.resource_name, []).detect { |res| res["id"] == id }
end
@attributes.replace @attributes.deep_merge(resource || {})
@attributes.clear_changes
clear_associations
true
end
define_method method do |*method_args|
begin
send("#{method}!", *method_args)
rescue ZendeskAPI::Error::RecordInvalid => e
@errors = e.errors
false
rescue ZendeskAPI::Error::ClientError
false
end
end
end
end
end
create_verb :put
create_verb :post
create_verb :delete
create_verb :any
end
end
|
Java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package fuzzy.internal.functions;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import org.junit.Test;
/**
* Tests for Max function.
*
* @since 0.2
* @see Max
*/
public class TestMax {
@Test
public void testMax() {
Collection<Double> list = Arrays.asList(-1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, false);
assertEquals(Double.valueOf(3.5), r);
}
@Test
public void testMaxEmpty() {
Double r = Max.of(Collections.<Double>emptyList(), false);
assertEquals(Double.valueOf(0.0), r);
}
@Test
public void testMaxAbs() {
Collection<Double> list = Arrays.asList(-10.0, -1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, true);
assertEquals(Double.valueOf(-10.0), r);
}
}
|
Java
|
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html
*/
package org.hibernate.orm.type.descriptor.sql.internal;
import java.time.temporal.TemporalAccessor;
import javax.persistence.TemporalType;
import org.hibernate.dialect.Dialect;
import org.hibernate.orm.type.descriptor.internal.DateTimeUtils;
import org.hibernate.orm.type.descriptor.java.spi.TemporalJavaTypeDescriptor;
import org.hibernate.orm.type.descriptor.spi.WrapperOptions;
/**
* @author Steve Ebersole
*/
public class JdbcLiteralFormatterTemporal extends BasicJdbcLiteralFormatter {
private final TemporalType precision;
public JdbcLiteralFormatterTemporal(TemporalJavaTypeDescriptor javaTypeDescriptor, TemporalType precision) {
super( javaTypeDescriptor );
this.precision = precision;
// todo : add some validation of combos between javaTypeDescrptor#getPrecision and precision - log warnings
}
@Override
protected TemporalJavaTypeDescriptor getJavaTypeDescriptor() {
return (TemporalJavaTypeDescriptor) super.getJavaTypeDescriptor();
}
@Override
public String toJdbcLiteral(Object value, Dialect dialect, WrapperOptions wrapperOptions) {
// for performance reasons, avoid conversions if we can
if ( value instanceof java.util.Date ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(java.util.Date) value,
precision
);
}
else if ( value instanceof java.util.Calendar ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(java.util.Calendar) value,
precision
);
}
else if ( value instanceof TemporalAccessor ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(TemporalAccessor) value,
precision
);
}
switch ( getJavaTypeDescriptor().getPrecision() ) {
case DATE: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.sql.Date.class, wrapperOptions ),
precision
);
}
case TIME: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.sql.Time.class, wrapperOptions ),
precision
);
}
default: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.util.Date.class, wrapperOptions ),
precision
);
}
}
}
}
|
Java
|
# Septoria ribis f. ribis (Lib.) Desm., 1842 FORM
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0-google-v5) on Thu Dec 19 17:42:37 EST 2013 -->
<title>TechnologyTargetingErrorReason</title>
<meta name="date" content="2013-12-19">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="TechnologyTargetingErrorReason";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingError.html" title="class in com.google.api.ads.dfp.v201306"><span class="strong">Prev Class</span></a></li>
<li><a href="../../../../../../com/google/api/ads/dfp/v201306/TemplateCreative.html" title="class in com.google.api.ads.dfp.v201306"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" target="_top">Frames</a></li>
<li><a href="TechnologyTargetingErrorReason.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li><a href="#field_summary">Field</a> | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li><a href="#field_detail">Field</a> | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">com.google.api.ads.dfp.v201306</div>
<h2 title="Class TechnologyTargetingErrorReason" class="title">Class TechnologyTargetingErrorReason</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li>java.lang.Object</li>
<li>
<ul class="inheritance">
<li>com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>All Implemented Interfaces:</dt>
<dd>java.io.Serializable</dd>
</dl>
<hr>
<br>
<pre>public class <span class="strong">TechnologyTargetingErrorReason</span>
extends java.lang.Object
implements java.io.Serializable</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../serialized-form.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason">Serialized Form</a></dd></dl>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- =========== FIELD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="field_summary">
<!-- -->
</a>
<h3>Field Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Field Summary table, listing fields, and an explanation">
<caption><span>Fields</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Field and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">_DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">_DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">_MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">_MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_UNKNOWN">_UNKNOWN</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">_WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#UNKNOWN">UNKNOWN</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</a></strong></code> </td>
</tr>
</table>
</li>
</ul>
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier</th>
<th class="colLast" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>protected </code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#TechnologyTargetingErrorReason(java.lang.String)">TechnologyTargetingErrorReason</a></strong>(java.lang.String value)</code> </td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method_summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span>Methods</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#equals(java.lang.Object)">equals</a></strong>(java.lang.Object obj)</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#fromString(java.lang.String)">fromString</a></strong>(java.lang.String value)</code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#fromValue(java.lang.String)">fromValue</a></strong>(java.lang.String value)</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static org.apache.axis.encoding.Deserializer</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#getDeserializer(java.lang.String, java.lang.Class, javax.xml.namespace.QName)">getDeserializer</a></strong>(java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType)</code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static org.apache.axis.encoding.Serializer</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#getSerializer(java.lang.String, java.lang.Class, javax.xml.namespace.QName)">getSerializer</a></strong>(java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType)</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static org.apache.axis.description.TypeDesc</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#getTypeDesc()">getTypeDesc</a></strong>()</code>
<div class="block">Return type metadata object</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#getValue()">getValue</a></strong>()</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>int</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#hashCode()">hashCode</a></strong>()</code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>java.lang.Object</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#readResolve()">readResolve</a></strong>()</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#toString()">toString</a></strong>()</code> </td>
</tr>
</table>
<ul class="blockList">
<li class="blockList"><a name="methods_inherited_from_class_java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.Object</h3>
<code>clone, finalize, getClass, notify, notifyAll, wait, wait, wait</code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ============ FIELD DETAIL =========== -->
<ul class="blockList">
<li class="blockList"><a name="field_detail">
<!-- -->
</a>
<h3>Field Detail</h3>
<a name="_MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</h4>
<pre>public static final java.lang.String _MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</h4>
<pre>public static final java.lang.String _WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final java.lang.String _MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final java.lang.String _DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final java.lang.String _DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_UNKNOWN">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_UNKNOWN</h4>
<pre>public static final java.lang.String _UNKNOWN</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._UNKNOWN">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</pre>
</li>
</ul>
<a name="WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</pre>
</li>
</ul>
<a name="MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</pre>
</li>
</ul>
<a name="DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</pre>
</li>
</ul>
<a name="DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</pre>
</li>
</ul>
<a name="UNKNOWN">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>UNKNOWN</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> UNKNOWN</pre>
</li>
</ul>
</li>
</ul>
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="TechnologyTargetingErrorReason(java.lang.String)">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>TechnologyTargetingErrorReason</h4>
<pre>protected TechnologyTargetingErrorReason(java.lang.String value)</pre>
</li>
</ul>
</li>
</ul>
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method_detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="getValue()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getValue</h4>
<pre>public java.lang.String getValue()</pre>
</li>
</ul>
<a name="fromValue(java.lang.String)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>fromValue</h4>
<pre>public static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> fromValue(java.lang.String value)
throws java.lang.IllegalArgumentException</pre>
<dl><dt><span class="strong">Throws:</span></dt>
<dd><code>java.lang.IllegalArgumentException</code></dd></dl>
</li>
</ul>
<a name="fromString(java.lang.String)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>fromString</h4>
<pre>public static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> fromString(java.lang.String value)
throws java.lang.IllegalArgumentException</pre>
<dl><dt><span class="strong">Throws:</span></dt>
<dd><code>java.lang.IllegalArgumentException</code></dd></dl>
</li>
</ul>
<a name="equals(java.lang.Object)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>equals</h4>
<pre>public boolean equals(java.lang.Object obj)</pre>
<dl>
<dt><strong>Overrides:</strong></dt>
<dd><code>equals</code> in class <code>java.lang.Object</code></dd>
</dl>
</li>
</ul>
<a name="hashCode()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>hashCode</h4>
<pre>public int hashCode()</pre>
<dl>
<dt><strong>Overrides:</strong></dt>
<dd><code>hashCode</code> in class <code>java.lang.Object</code></dd>
</dl>
</li>
</ul>
<a name="toString()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>toString</h4>
<pre>public java.lang.String toString()</pre>
<dl>
<dt><strong>Overrides:</strong></dt>
<dd><code>toString</code> in class <code>java.lang.Object</code></dd>
</dl>
</li>
</ul>
<a name="readResolve()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>readResolve</h4>
<pre>public java.lang.Object readResolve()
throws java.io.ObjectStreamException</pre>
<dl><dt><span class="strong">Throws:</span></dt>
<dd><code>java.io.ObjectStreamException</code></dd></dl>
</li>
</ul>
<a name="getSerializer(java.lang.String, java.lang.Class, javax.xml.namespace.QName)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getSerializer</h4>
<pre>public static org.apache.axis.encoding.Serializer getSerializer(java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType)</pre>
</li>
</ul>
<a name="getDeserializer(java.lang.String, java.lang.Class, javax.xml.namespace.QName)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getDeserializer</h4>
<pre>public static org.apache.axis.encoding.Deserializer getDeserializer(java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType)</pre>
</li>
</ul>
<a name="getTypeDesc()">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>getTypeDesc</h4>
<pre>public static org.apache.axis.description.TypeDesc getTypeDesc()</pre>
<div class="block">Return type metadata object</div>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingError.html" title="class in com.google.api.ads.dfp.v201306"><span class="strong">Prev Class</span></a></li>
<li><a href="../../../../../../com/google/api/ads/dfp/v201306/TemplateCreative.html" title="class in com.google.api.ads.dfp.v201306"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" target="_top">Frames</a></li>
<li><a href="TechnologyTargetingErrorReason.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li><a href="#field_summary">Field</a> | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li><a href="#field_detail">Field</a> | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
|
Java
|
## user-rest-service-2.0.0 / Swagger 2.0.0-SNAPSHOT
This demo uses the stable version 2.0.0-SNAPSHOT of Swagger-Spring Integration, will always update to head.
Swagger 2.0.0 is currently under development.
### IDE
Currently maven configuration files are supplied for all projects.
* Run Spring Boot Application: mvn spring-boot:run
* Generate Eclipse configuration: mvn eclipse:eclipse
### Features of Model-Classes
* enum (user.state)
* java.lang.Byte[] (user.photo)
* java.lang.Double (user.longitude)
* java.util.List (user.category, user.ocation)
* java.math.BigDecimal (location.langitude)
### Features of Controller-Classes
* HTTP GET/POST (UserController)
* Handling Error-codes using Exceptions (UserController)
* Handling Error-codes using ResponseEntity (UserControllerResponseEntity)
* Multiple Paths for the same HTTP verb (UserControllerForCodegenWithTwoGetPaths)
>>> currently commented second verb because of errors during CodeGen
|
Java
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1", manifest={"SpecialistPool",},
)
class SpecialistPool(proto.Message):
r"""SpecialistPool represents customers' own workforce to work on
their data labeling jobs. It includes a group of specialist
managers and workers. Managers are responsible for managing the
workers in this pool as well as customers' data labeling jobs
associated with this pool. Customers create specialist pool as
well as start data labeling jobs on Cloud, managers and workers
handle the jobs using CrowdCompute console.
Attributes:
name (str):
Required. The resource name of the
SpecialistPool.
display_name (str):
Required. The user-defined name of the
SpecialistPool. The name can be up to 128
characters long and can be consist of any UTF-8
characters.
This field should be unique on project-level.
specialist_managers_count (int):
Output only. The number of managers in this
SpecialistPool.
specialist_manager_emails (Sequence[str]):
The email addresses of the managers in the
SpecialistPool.
pending_data_labeling_jobs (Sequence[str]):
Output only. The resource name of the pending
data labeling jobs.
specialist_worker_emails (Sequence[str]):
The email addresses of workers in the
SpecialistPool.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
specialist_managers_count = proto.Field(proto.INT32, number=3,)
specialist_manager_emails = proto.RepeatedField(proto.STRING, number=4,)
pending_data_labeling_jobs = proto.RepeatedField(proto.STRING, number=5,)
specialist_worker_emails = proto.RepeatedField(proto.STRING, number=7,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
Java
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace Microsoft.AspNetCore.Routing
{
/// <summary>
/// Indicates whether ASP.NET routing is processing a URL from an HTTP request or generating a URL.
/// </summary>
public enum RouteDirection
{
/// <summary>
/// A URL from a client is being processed.
/// </summary>
IncomingRequest,
/// <summary>
/// A URL is being created based on the route definition.
/// </summary>
UrlGeneration,
}
}
|
Java
|
data segment
x dw 0FFFFh
s db "00000",0Dh,0Ah,"$"
data ends
code segment
assume cs:code, ds:data
main:
mov ax, data
mov ds, ax
mov bx, 4; ไธๆ
mov ax, [x]
next:
mov dx, 0; ไฟ่ฏ่ขซ้คๆฐ็้ซ16ไฝไธบ0
mov cx, 10
div cx; (DX:AX)/CX=AX..DX
add dl, '0'
mov s[bx], dl
cmp ax, 0
je done
dec bx
jmp next
done:
mov ah, 9
mov dx, offset s
int 21h
mov ah, 4Ch
int 21h
code ends
end main
|
Java
|
---
layout: "fluid/docs_base"
version: "2.1.0"
versionHref: "/docs/v3/2.1.0"
path: ""
category: api
id: "img"
title: "Img"
header_sub_title: "Ionic API Documentation"
doc: "Img"
docType: "class"
---
<h1 class="api-title">
<a class="anchor" name="img" href="#img"></a>
Img
<h3><code>ion-img</code></h3>
</h1>
<a class="improve-v2-docs" href="http://github.com/ionic-team/ionic/edit/master//src/components/img/img.ts#L5">
Improve this doc
</a>
<p>Two of the biggest cuprits of scroll jank is starting up a new HTTP
request, and rendering images. These two reasons is largely why
<code>ion-img</code> was created. The standard HTML <code>img</code> element is often a large
source of these problems, and what makes matters worse is that the app
does not have fine-grained control of requests and rendering for each
<code>img</code> element.</p>
<p>The <code>ion-img</code> component is similar to the standard <code>img</code> element,
but it also adds features in order to provide improved performance.
Features include only loading images which are visible, using web workers
for HTTP requests, preventing jank while scrolling and in-memory caching.</p>
<p>Note that <code>ion-img</code> also comes with a few more restrictions in comparison
to the standard <code>img</code> element. A good rule is, if there are only a few
images to be rendered on a page, then the standard <code>img</code> is probably
best. However, if a page has the potential for hundreds or even thousands
of images within a scrollable area, then <code>ion-img</code> would be better suited
for the job.</p>
<blockquote>
<p>Note: <code>ion-img</code> is only meant to be used inside of <a href="/docs/api/components/virtual-scroll/VirtualScroll/">virtual-scroll</a></p>
</blockquote>
<h3 id="lazy-loading">Lazy Loading</h3>
<p>Lazy loading images refers to only loading images which are actually
visible within the user's viewport. This also means that images which are
not viewable on the initial load would not be downloaded or rendered. Next,
as the user scrolls, each image which becomes visible is then requested
then rendered on-demand.</p>
<p>The benefits of this approach is that unnecessary and resource intensive
HTTP requests are not started, valuable bandwidth isn't wasted, and this
allows the browser to free up resources which would be wasted on images
which are not even viewable. For example, animated GIFs are enourmous
performance drains, however, with <code>ion-img</code> the app is able to dedicate
resources to just the viewable images. But again, if the problems listed
above are not problems within your app, then the standard <code>img</code> element
may be best.</p>
<h3 id="image-dimensions">Image Dimensions</h3>
<p>By providing image dimensions up front, Ionic is able to accurately size
up the image's location within the viewport, which helps lazy load only
images which are viewable. Image dimensions can either by set as
properties, inline styles, or external stylesheets. It doesn't matter
which method of setting dimensions is used, but it's important that somehow
each <code>ion-img</code> has been given an exact size.</p>
<p>For example, by default <code><ion-avatar></code> and <code><ion-thumbnail></code> already come
with exact sizes when placed within an <code><ion-item></code>. By giving each image
an exact size, this then further locks in the size of each <code>ion-item</code>,
which again helps improve scroll performance.</p>
<pre><code class="lang-html"><!-- dimensions set using attributes -->
<ion-img width="80" height="80" src="..."></ion-img>
<!-- dimensions set using input properties -->
<ion-img [width]="imgWidth" [height]="imgHeight" src="..."></ion-img>
<!-- dimensions set using inline styles -->
<ion-img style="width: 80px; height: 80px;" src="..."></ion-img>
</code></pre>
<p>Additionally, each <code>ion-img</code> uses the <code>object-fit: cover</code> CSS property.
What this means is that the actual rendered image will center itself within
it's container. Or to really get detailed: The image is sized to maintain
its aspect ratio while filling the containing elementโs entire content box.
Its concrete object size is resolved as a cover constraint against the
elementโs used width and height.</p>
<h3 id="future-optimizations">Future Optimizations</h3>
<p>Future goals are to place image requests within web workers, and cache
images in-memory as datauris. This method has proven to be effective,
however there are some current limitations with Cordova which we are
currently working on.</p>
<!-- @usage tag -->
<!-- @property tags -->
<!-- instance methods on the class -->
<!-- input methods on the class -->
<h2><a class="anchor" name="input-properties" href="#input-properties"></a>Input Properties</h2>
<table class="table param-table" style="margin:0;">
<thead>
<tr>
<th>Attr</th>
<th>Type</th>
<th>Details</th>
</tr>
</thead>
<tbody>
<tr>
<td>alt</td>
<td><code>string</code></td>
<td><p> Set the <code>alt</code> attribute which gets assigned to
the inner <code>img</code> element.</p>
</td>
</tr>
<tr>
<td>bounds</td>
<td><code>any</code></td>
<td><p> Sets the bounding rectangle of the element relative to the viewport.
When using <code>VirtualScroll</code>, each virtual item should pass its bounds to each
<code>ion-img</code>. The passed in data object should include <code>top</code> and <code>bottom</code> properties.</p>
</td>
</tr>
<tr>
<td>cache</td>
<td><code>boolean</code></td>
<td><p> After an image has been successfully downloaded, it can be cached
in-memory. This is useful for <code>VirtualScroll</code> by allowing image responses to be
cached, and not rendered, until after scrolling has completed, which allows for
smoother scrolling.</p>
</td>
</tr>
<tr>
<td>height</td>
<td><code>string</code></td>
<td><p> Image height. If this property is not set it's important that
the dimensions are still set using CSS. If the dimension is just a number it
will assume the <code>px</code> unit.</p>
</td>
</tr>
<tr>
<td>src</td>
<td><code>string</code></td>
<td><p> The source of the image.</p>
</td>
</tr>
<tr>
<td>width</td>
<td><code>string</code></td>
<td><p> Image width. If this property is not set it's important that
the dimensions are still set using CSS. If the dimension is just a number it
will assume the <code>px</code> unit.</p>
</td>
</tr>
</tbody>
</table>
<h2 id="sass-variable-header"><a class="anchor" name="sass-variables" href="#sass-variables"></a>Sass Variables</h2>
<div id="sass-variables" ng-controller="SassToggleCtrl">
<div class="sass-platform-toggle">
<h3 ng-init="setSassPlatform('base')">All</h3>
</div>
<table ng-show="active === 'base'" id="sass-base" class="table param-table" style="margin:0;">
<thead>
<tr>
<th>Property</th>
<th>Default</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>$img-placeholder-background</code></td>
<td><code>#eee</code></td>
<td><p>Color of the image when it hasn't fully loaded yet</p>
</td>
</tr>
</tbody>
</table>
</div>
<!-- related link --><!-- end content block -->
<!-- end body block -->
|
Java
|
---
title: Gรฉrer les accรจs ร plusieurs clusters kunernetes
date: 2018-10-30 00:00:00 Z
tags:
- docker
- truc&astuce
- k8s
subtitle: plusieurs fichiers de config ? un fichier de config
comments: true
thumbnail: https://upload.wikimedia.org/wikipedia/commons/thumb/3/39/Kubernetes_logo_without_workmark.svg/1200px-Kubernetes_logo_without_workmark.svg.png
---
## Fusion de configuration
1. Enregistrer tous les fichiers de config dans `~/.kube/` dans des fichiers nommรฉs `config.ENV`
2. Fusionner les configs : `KUBECONFIG=$(find ~/.kube -maxdepth 1 -type f -name "*config*" ! -name config | tr '\n' ':') kubectl config view --flatten > ~/.kube/config`
## Outils pratiques
- [kubectx](https://kubectx.dev/) : changer facilement de cluster
- [kubens](https://kubectx.dev/) : changer facilement de namespace
|
Java
|
package com.etiennelawlor.loop.network.models.response;
import android.os.Parcel;
import android.os.Parcelable;
import com.google.gson.annotations.SerializedName;
/**
* Created by etiennelawlor on 5/23/15.
*/
public class Tag implements Parcelable {
// region Fields
@SerializedName("uri")
private String uri;
@SerializedName("name")
private String name;
@SerializedName("tag")
private String tag;
@SerializedName("canonical")
private String canonical;
// endregion
// region Constructors
public Tag() {
}
protected Tag(Parcel in) {
this.uri = in.readString();
this.name = in.readString();
this.tag = in.readString();
this.canonical = in.readString();
}
// endregion
// region Getters
public String getUri() {
return uri;
}
public String getName() {
return name;
}
public String getTag() {
return tag;
}
public String getCanonical() {
return canonical;
}
// endregion
// region Setters
public void setUri(String uri) {
this.uri = uri;
}
public void setName(String name) {
this.name = name;
}
public void setTag(String tag) {
this.tag = tag;
}
public void setCanonical(String canonical) {
this.canonical = canonical;
}
// endregion
// region Parcelable Methods
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeString(this.uri);
dest.writeString(this.name);
dest.writeString(this.tag);
dest.writeString(this.canonical);
}
// endregion
public static final Parcelable.Creator<Tag> CREATOR = new Parcelable.Creator<Tag>() {
@Override
public Tag createFromParcel(Parcel source) {
return new Tag(source);
}
@Override
public Tag[] newArray(int size) {
return new Tag[size];
}
};
}
|
Java
|
# -*- coding: utf-8 -*-
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Prints the env_setup banner for cmd.exe.
This is done from Python as activating colors and printing ASCII art are not
easy to do in cmd.exe. Activated colors also don't persist in the parent
process.
"""
from __future__ import print_function
import argparse
import os
import sys
from .colors import Color, enable_colors # type: ignore
_PIGWEED_BANNER = u'''
โโโโโโโ โโ โโโโโ โโ โโ โโโโโโโ โโโโโโโ โโโโโโโ
โโโ โโ โโโ โโโ โโโ โโโ โ โโ โโ โ โโ โ โโ โโโ
โโโโโโโ โโโ โโโ โโโ โโโ โ โโ โโโโ โโโโ โโ โโ
โโโ โโโ โโ โโ โโโ โ โโ โโ โ โโ โ โโ โโโ
โโ โโโ โโโโโโ โโโโโโโ โโโโโโโ โโโโโโโ โโโโโโโ
'''
def print_banner(bootstrap, no_shell_file):
"""Print the Pigweed or project-specific banner"""
enable_colors()
print(Color.green('\n WELCOME TO...'))
print(Color.magenta(_PIGWEED_BANNER))
if bootstrap:
print(
Color.green('\n BOOTSTRAP! Bootstrap may take a few minutes; '
'please be patient'))
print(
Color.green(
' On Windows, this stage is extremely slow (~10 minutes).\n'))
else:
print(
Color.green(
'\n ACTIVATOR! This sets your console environment variables.\n'
))
if no_shell_file:
print(Color.bold_red('Error!\n'))
print(
Color.red(' Your Pigweed environment does not seem to be'
' configured.'))
print(Color.red(' Run bootstrap.bat to perform initial setup.'))
return 0
def parse():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--bootstrap', action='store_true')
parser.add_argument('--no-shell-file', action='store_true')
return parser.parse_args()
def main():
"""Script entry point."""
if os.name != 'nt':
return 1
return print_banner(**vars(parse()))
if __name__ == '__main__':
sys.exit(main())
|
Java
|
package rvc.ann;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* @author nurmuhammad
*/
@Retention(RUNTIME)
@Target(METHOD)
public @interface OPTIONS {
String value() default Constants.NULL_VALUE;
boolean absolutePath() default false;
}
|
Java
|
package com.xiaojinzi.component.bean;
import javax.lang.model.element.Element;
/**
* time : 2018/07/26
*
* @author : xiaojinzi
*/
public class RouterDegradeAnnoBean {
/**
* ไผๅ
็บง
*/
private int priority;
/**
* ๆฏไธไธช็ฑปๅฎ็ฐไบ RouterDegrade ๆฅๅฃ
*/
private Element rawType;
public int getPriority() {
return priority;
}
public void setPriority(int priority) {
this.priority = priority;
}
public Element getRawType() {
return rawType;
}
public void setRawType(Element rawType) {
this.rawType = rawType;
}
}
|
Java
|
package Paws::EC2::DhcpConfiguration;
use Moose;
has Key => (is => 'ro', isa => 'Str', request_name => 'key', traits => ['NameInRequest']);
has Values => (is => 'ro', isa => 'ArrayRef[Paws::EC2::AttributeValue]', request_name => 'valueSet', traits => ['NameInRequest']);
1;
### main pod documentation begin ###
=head1 NAME
Paws::EC2::DhcpConfiguration
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::EC2::DhcpConfiguration object:
$service_obj->Method(Att1 => { Key => $value, ..., Values => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::EC2::DhcpConfiguration object:
$result = $service_obj->Method(...);
$result->Att1->Key
=head1 DESCRIPTION
This class has no description
=head1 ATTRIBUTES
=head2 Key => Str
The name of a DHCP option.
=head2 Values => ArrayRef[L<Paws::EC2::AttributeValue>]
One or more values for the DHCP option.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::EC2>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
|
Java
|
---
layout: article
title: "Constructing the Object Model"
description: "Before the browser can render content to the screen it needs to construct the DOM and CSSOM trees. As a result, we need to ensure that we deliver both the HTML and CSS to the browser as quickly as possible."
introduction: "Before the browser can render the page it needs to construct the DOM and CSSOM trees. As a result, we need to ensure that we deliver both the HTML and CSS to the browser as quickly as possible."
article:
written_on: 2014-04-01
updated_on: 2014-04-28
order: 1
collection: critical-rendering-path
key-takeaways:
construct-object-model:
- Bytes โ characters โ tokens โ nodes โ object model
- HTML markup is transformed into a Document Object Model (DOM), CSS markup is transformed into a CSS Object Model (CSSOM)
- DOM and CSSOM are independent data structures
- Chrome DevTools Timeline allows us to capture and inspect the construction and processing costs of DOM and CSSOM
notes:
devtools:
- We'll assume that you have basic familiarity with Chrome DevTools - i.e. you know how to capture a network waterfall, or record a timeline. If you need a quick refresher, check out the <a href="https://developers.google.com/chrome-developer-tools/">Chrome Developer Tools documentation</a>, or if you're new to DevTools, I recommend taking the Codeschool <a href="http://discover-devtools.codeschool.com/">Discover DevTools</a> course.
---
{% wrap content%}
<style>
img, video, object {
max-width: 100%;
}
img.center {
display: block;
margin-left: auto;
margin-right: auto;
}
</style>
{% include modules/toc.liquid %}
{% include modules/takeaway.liquid list=page.key-takeaways.construct-object-model %}
## Document Object Model (DOM)
{% include_code _code/basic_dom.html full %}
Letโs start, with the simplest possible case: a plain HTML page with some text and a single image. What does the browser need to do to process this simple page?
<img src="images/full-process.png" alt="DOM construction process">
1. **Conversion:** the browser reads the raw bytes of the HTML off the disk or network and translates them to individual characters based on specified encoding of the file (e.g. UTF-8).
1. **Tokenizing:** the browser converts strings of characters into distinct tokens specified by the [W3C HTML5 standard](http://www.w3.org/TR/html5/) - e.g. "<html>", "<body>" and other strings within the "angle brackets". Each token has a special meaning and a set of rules.
1. **Lexing:** the emitted tokens are converted into "objects" which define their properties and rules.
1. **DOM construction:** Finally, because the HTML markup defines relationships between different tags (some tags are contained within tags) the created objects are linked in a tree data structure that also captures the parent-child relationships defined in the original markup: _HTML_ object is a parent of the _body_ object, the _body_ is a parent of the _paragraph_ object, and so on.
<img src="images/dom-tree.png" class="center" alt="DOM tree">
**The final output of this entire process is the Document Object Model, or the "DOM" of our simple page, which the browser uses for all further processing of the page.**
Every time the browser has to process HTML markup it has to step through all of the steps above: convert bytes to characters, identify tokens, convert tokens to nodes, and build the DOM tree. This entire process can take some time, especially if we have a large amount of HTML to process.
<img src="images/dom-timeline.png" class="center" alt="Tracing DOM construction in DevTools">
{% include modules/remember.liquid title="Note" list=page.notes.devtools %}
If you open up Chrome DevTools and record a timeline while the page is loaded, you can see the actual time taken to perform this step — in example above, it took us ~5ms to convert a chunk of HTML bytes into a DOM tree. Of course, if the page was larger, as most pages are, this process might take significantly longer. You will see in our future sections on creating smooth animations that this can easily become your bottleneck if the browser has to process large amounts of HTML. That said, letโs not get ahead of ourselvesโฆ
With the DOM tree ready, do we have enough information to render the page to the screen? Not yet! The DOM tree captures the properties and relationships of the document markup, but it does not tell us anything about how the element should look when rendered. Thatโs the responsibility of the CSSOM, which we turn to next!
## CSS Object Model (CSSOM)
While the browser was constructing the DOM of our simple page, it encountered a link tag in the head section of the document referencing an external CSS stylesheet: style.css. Anticipating that it will need this resource to render the page, it immediately dispatches a request for this resource, which comes back with the following content:
{% include_code _code/style.css full css %}
Of course, we could have declared our styles directly within the HTML markup (inline), but keeping our CSS independent of HTML allows us to treat content and design as separate concerns: the designers can work on CSS, developers can focus on HTML, and so on.
Just as with HTML, we need to convert the received CSS rules into something that the browser can understand and work with. Hence, once again, we repeat a very similar process as we did with HTML:
<img src="images/cssom-construction.png" class="center" alt="CSSOM construction steps">
The CSS bytes are converted into characters, then to tokens and nodes, and finally are linked into a tree structure known as the "CSS Object Model", or CSSOM for short:
<img src="images/cssom-tree.png" class="center" alt="CSSOM tree">
Why does the CSSOM have a tree structure? When computing the final set of styles for any object on the page, the browser starts with the most general rule applicable to that node (e.g. if it is a child of body element, then all body styles apply) and then recursively refines the computed styles by applying more specific rules - i.e. the rules "cascade down".
To make it more concrete, consider the CSSOM tree above. Any text contained within the _span_ tag that is placed within the body element will have a font size of 16 pixels and have red text - the font-size directive cascades down from body to the span. However, if a span tag is child of a paragraph (p) tag, then its contents are not displayed.
Also, note that the above tree is not the complete CSSOM tree and only shows the styles we decided to override in our stylesheet. Every browser provides a default set of styles also known as "user agent styles" -- thatโs what we see when we donโt provide any of our own -- and our styles simply override these defaults (e.g. [default IE styles](http://www.iecss.com/)). If you have ever inspected your "computed styles" in Chrome DevTools and wondered where all the styles are coming from, now you know!
Curious to know how long the CSS processing took? Record a timeline in DevTools and look for "Recalculate Style" event: unlike DOM parsing, the timeline doesnโt show a separate "Parse CSS" entry, and instead captures parsing and CSSOM tree construction, plus the recursive calculation of computed styles under this one event.
<img src="images/cssom-timeline.png" class="center" alt="Tracing CSSOM construction in DevTools">
Our trivial stylesheet takes ~0.6ms to process and affects 8 elements on the page -- not much, but once again, not free. However, where did the 8 elements come from? The CSSOM and DOM and are independent data structures! Turns out, the browser is hiding an important step. Next, lets talk about the render tree that links the DOM and CSSOM together.
{% include modules/nextarticle.liquid %}
{% endwrap %}
|
Java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.