code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.plugins import directory from oslo_utils import timeutils from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.common import utils from neutron.db.agentschedulers_db import cfg from neutron.db.models import agent as agent_model from neutron.tests import base class TestDhcpAgentNotifyAPI(base.BaseTestCase): def setUp(self): super(TestDhcpAgentNotifyAPI, self).setUp() self.notifier = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock())) mock_util_p = mock.patch.object(utils, 'is_extension_supported') mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG') mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message') mock_cast_p = mock.patch.object(self.notifier, '_cast_message') self.mock_util = mock_util_p.start() self.mock_log = mock_log_p.start() self.mock_fanout = mock_fanout_p.start() self.mock_cast = mock_cast_p.start() def _test__schedule_network(self, network, new_agents=None, existing_agents=None, expected_casts=0, expected_warnings=0): self.notifier.plugin.schedule_network.return_value = new_agents agents = self.notifier._schedule_network( mock.ANY, network, existing_agents) if new_agents is None: new_agents = [] self.assertEqual(new_agents + existing_agents, agents) self.assertEqual(expected_casts, self.mock_cast.call_count) self.assertEqual(expected_warnings, self.mock_log.warning.call_count) def test__schedule_network(self): agent = agent_model.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_net_id'} self._test__schedule_network(network, new_agents=[agent], existing_agents=[], expected_casts=1, expected_warnings=0) def test__schedule_network_no_existing_agents(self): agent = agent_model.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_net_id'} self._test__schedule_network(network, new_agents=None, existing_agents=[agent], expected_casts=0, expected_warnings=0) def test__schedule_network_no_new_agents(self): network = {'id': 'foo_net_id'} self._test__schedule_network(network, new_agents=None, existing_agents=[], expected_casts=0, expected_warnings=1) def _test__get_enabled_agents(self, network, agents=None, port_count=0, expected_warnings=0, expected_errors=0): self.notifier.plugin.get_ports_count.return_value = port_count enabled_agents = self.notifier._get_enabled_agents( mock.ANY, network, agents, mock.ANY, mock.ANY) if not cfg.CONF.enable_services_on_agents_with_admin_state_down: agents = [x for x in agents if x.admin_state_up] self.assertEqual(agents, enabled_agents) self.assertEqual(expected_warnings, self.mock_log.warning.call_count) self.assertEqual(expected_errors, self.mock_log.error.call_count) def test__get_enabled_agents(self): agent1 = agent_model.Agent() agent1.admin_state_up = True agent1.heartbeat_timestamp = timeutils.utcnow() agent2 = agent_model.Agent() agent2.admin_state_up = False agent2.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_network_id'} self._test__get_enabled_agents(network, agents=[agent1]) def test__get_enabled_agents_with_inactive_ones(self): agent1 = agent_model.Agent() agent1.admin_state_up = True agent1.heartbeat_timestamp = timeutils.utcnow() agent2 = agent_model.Agent() agent2.admin_state_up = True # This is effectively an inactive agent agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0) network = {'id': 'foo_network_id'} self._test__get_enabled_agents(network, agents=[agent1, agent2], expected_warnings=1, expected_errors=0) def test__get_enabled_agents_with_notification_required(self): network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']} agent = agent_model.Agent() agent.admin_state_up = False agent.heartbeat_timestamp = timeutils.utcnow() self._test__get_enabled_agents(network, [agent], port_count=20, expected_warnings=0, expected_errors=1) def test__get_enabled_agents_with_admin_state_down(self): cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) agent1 = agent_model.Agent() agent1.admin_state_up = True agent1.heartbeat_timestamp = timeutils.utcnow() agent2 = agent_model.Agent() agent2.admin_state_up = False agent2.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_network_id'} self._test__get_enabled_agents(network, agents=[agent1, agent2]) def test__notify_agents_fanout_required(self): self.notifier._notify_agents(mock.ANY, 'network_delete_end', mock.ANY, 'foo_network_id') self.assertEqual(1, self.mock_fanout.call_count) def _test__notify_agents_with_function( self, function, expected_scheduling=0, expected_casts=0): with mock.patch.object(self.notifier, '_schedule_network') as f: with mock.patch.object(self.notifier, '_get_enabled_agents') as g: agent = agent_model.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() g.return_value = [agent] function() self.assertEqual(expected_scheduling, f.call_count) self.assertEqual(expected_casts, self.mock_cast.call_count) def _test__notify_agents(self, method, expected_scheduling=0, expected_casts=0, payload=None): payload = payload or {'port': {}} self._test__notify_agents_with_function( lambda: self.notifier._notify_agents( mock.Mock(), method, payload, 'foo_network_id'), expected_scheduling, expected_casts) def test__notify_agents_cast_required_with_scheduling(self): self._test__notify_agents('port_create_end', expected_scheduling=1, expected_casts=1) def test__notify_agents_cast_required_wo_scheduling_on_port_update(self): self._test__notify_agents('port_update_end', expected_scheduling=0, expected_casts=1) def test__notify_agents_cast_required_with_scheduling_subnet_create(self): self._test__notify_agents('subnet_create_end', expected_scheduling=1, expected_casts=1, payload={'subnet': {}}) def test__notify_agents_cast_required_with_scheduling_segment(self): network_id = 'foo_network_id' segment_id = 'foo_segment_id' subnet = {'subnet': {'segment_id': segment_id}} segment = {'id': segment_id, 'network_id': network_id, 'hosts': ['host-a']} self.notifier.plugin.get_network.return_value = {'id': network_id} segment_sp = mock.Mock() segment_sp.get_segment.return_value = segment directory.add_plugin('segments', segment_sp) self._test__notify_agents('subnet_create_end', expected_scheduling=1, expected_casts=1, payload=subnet) get_agents = self.notifier.plugin.get_dhcp_agents_hosting_networks get_agents.assert_called_once_with( mock.ANY, [network_id], hosts=segment['hosts']) def test__notify_agents_no_action(self): self._test__notify_agents('network_create_end', expected_scheduling=0, expected_casts=0) def test__notify_agents_with_router_interface_add(self): self._test__notify_agents_with_function( lambda: self.notifier._after_router_interface_created( mock.ANY, mock.ANY, mock.ANY, context=mock.Mock(), port={'id': 'foo_port_id', 'network_id': 'foo_network_id'}), expected_scheduling=1, expected_casts=1) def test__notify_agents_with_router_interface_delete(self): self._test__notify_agents_with_function( lambda: self.notifier._after_router_interface_deleted( mock.ANY, mock.ANY, mock.ANY, context=mock.Mock(), port={'id': 'foo_port_id', 'network_id': 'foo_network_id'}), expected_scheduling=0, expected_casts=1) def test__fanout_message(self): self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_fanout.call_count) def test__cast_message(self): self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_cast.call_count) def test__native_notification_unsubscribes(self): self.assertFalse(self.notifier._unsubscribed_resources) for res in (resources.PORT, resources.NETWORK, resources.SUBNET): self.notifier._unsubscribed_resources = [] kwargs = {res: {}} registry.notify(res, events.AFTER_CREATE, self, context=mock.Mock(), **kwargs) # don't unsubscribe until all three types are observed self.assertEqual([], self.notifier._unsubscribed_resources) registry.notify(res, events.AFTER_UPDATE, self, context=mock.Mock(), **kwargs) self.assertEqual([], self.notifier._unsubscribed_resources) registry.notify(res, events.AFTER_DELETE, self, context=mock.Mock(), **kwargs) self.assertEqual([res], self.notifier._unsubscribed_resources) # after first time, no further unsubscribing should happen registry.notify(res, events.AFTER_CREATE, self, context=mock.Mock(), **kwargs) self.assertEqual([res], self.notifier._unsubscribed_resources) def test__only_status_changed(self): p1 = {'id': 1, 'status': 'DOWN', 'updated_at': '10:00:00', 'revision_number': 1} p2 = dict(p1) p2['status'] = 'ACTIVE' p2['revision_number'] = 2 p2['updated_at'] = '10:00:01' self.assertTrue(self.notifier._only_status_changed(p1, p2)) p2['name'] = 'test' self.assertFalse(self.notifier._only_status_changed(p1, p2)) p1['name'] = 'test' self.assertTrue(self.notifier._only_status_changed(p1, p2)) p1['name'] = 'test1' self.assertFalse(self.notifier._only_status_changed(p1, p2))
eayunstack/neutron
neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py
Python
apache-2.0
12,113
from hubs.ha import haremote as ha from hubs.ha.hasshub import HAnode, RegisterDomain from controlevents import CEvent, PostEvent, ConsoleEvent, PostIfInterested from utils import timers import functools # noinspection PyTypeChecker class Thermostat(HAnode): # deprecated version def __init__(self, HAitem, d): super(Thermostat, self).__init__(HAitem, **d) self.Hub.RegisterEntity('climate', self.entity_id, self) self.timerseq = 0 # noinspection PyBroadException try: self.temperature = self.attributes['temperature'] self.curtemp = self.attributes['current_temperature'] self.target_low = self.attributes['target_temp_low'] self.target_high = self.attributes['target_temp_high'] self.mode = self.attributes['operation_mode'] self.fan = self.attributes['fan_mode'] self.fanstates = self.attributes['fan_list'] self.modelist = self.attributes['operation_list'] except: pass # noinspection PyUnusedLocal def ErrorFakeChange(self, param=None): PostEvent(ConsoleEvent(CEvent.HubNodeChange, hub=self.Hub.name, node=self.entity_id, value=self.internalstate)) def Update(self, **ns): if 'attributes' in ns: self.attributes = ns['attributes'] self.temperature = self.attributes['temperature'] self.curtemp = self.attributes['current_temperature'] self.target_low = self.attributes['target_temp_low'] self.target_high = self.attributes['target_temp_high'] self.mode = self.attributes['operation_mode'] self.fan = self.attributes['fan_mode'] PostIfInterested(self.Hub, self.entity_id, self.internalstate) # noinspection DuplicatedCode def PushSetpoints(self, t_low, t_high): ha.call_service_async(self.Hub.api, 'climate', 'set_temperature', {'entity_id': '{}'.format(self.entity_id), 'target_temp_high': str(t_high), 'target_temp_low': str(t_low)}) self.timerseq += 1 _ = timers.OnceTimer(5, start=True, name='fakepushsetpoint-{}'.format(self.timerseq), proc=self.ErrorFakeChange) def GetThermInfo(self): if self.target_low is not None: return self.curtemp, self.target_low, self.target_high, self.HVAC_state, self.mode, self.fan else: return self.curtemp, self.temperature, self.temperature, self.HVAC_state, self.mode, self.fan # noinspection PyUnusedLocal,PyUnusedLocal,PyUnusedLocal def _HVACstatechange(self, storeitem, old, new, param, chgsource): self.HVAC_state = new PostIfInterested(self.Hub, self.entity_id, new) def _connectsensors(self, HVACsensor): self.HVAC_state = HVACsensor.state # noinspection PyProtectedMember HVACsensor.SetSensorAlert(functools.partial(self._HVACstatechange)) def GetModeInfo(self): return self.modelist, self.fanstates def PushFanState(self, mode): ha.call_service_async(self.Hub.api, 'climate', 'set_fan_mode', {'entity_id': '{}'.format(self.entity_id), 'fan_mode': mode}) self.timerseq += 1 _ = timers.OnceTimer(5, start=True, name='fakepushfanstate-{}'.format(self.timerseq), proc=self.ErrorFakeChange) def PushMode(self, mode): # noinspection PyBroadException ha.call_service_async(self.Hub.api, 'climate', 'set_operation_mode', {'entity_id': '{}'.format(self.entity_id), 'operation_mode': mode}) self.timerseq += 1 _ = timers.OnceTimer(5, start=True, name='fakepushmode -{}'.format(self.timerseq), proc=self.ErrorFakeChange) RegisterDomain('climate', Thermostat)
kevinkahn/softconsole
hubs/ha/domains/__oldthermostat.py
Python
apache-2.0
3,386
import json import time import jps class MessageHolder(object): def __init__(self): self.saved_msg = [] def __call__(self, msg): self.saved_msg.append(msg) def test_pubsub_with_serialize_json(): holder = MessageHolder() sub = jps.Subscriber('/serialize_hoge1', holder, deserializer=json.loads) pub = jps.Publisher('/serialize_hoge1', serializer=json.dumps) time.sleep(0.1) obj = {'da1': 1, 'name': 'hoge'} pub.publish(obj) time.sleep(0.1) sub.spin_once() assert len(holder.saved_msg) == 1 assert holder.saved_msg[0]['da1'] == 1 assert holder.saved_msg[0]['name'] == 'hoge'
OTL/jps
test/test_serialize.py
Python
apache-2.0
697
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for QueryContextLineageSubgraph # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-aiplatform # [START aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_async] from google.cloud import aiplatform_v1beta1 async def sample_query_context_lineage_subgraph(): # Create a client client = aiplatform_v1beta1.MetadataServiceAsyncClient() # Initialize request argument(s) request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest( context="context_value", ) # Make the request response = await client.query_context_lineage_subgraph(request=request) # Handle the response print(response) # [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_async]
googleapis/python-aiplatform
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_async.py
Python
apache-2.0
1,624
""" Network Config ============== Manage the configuration on a network device given a specific static config or template. :codeauthor: Mircea Ulinic <ping@mirceaulinic.net> & Jerome Fleury <jf@cloudflare.com> :maturity: new :depends: napalm :platform: unix Dependencies ------------ - :mod:`NAPALM proxy minion <salt.proxy.napalm>` - :mod:`Network-related basic features execution module <salt.modules.napalm_network>` .. versionadded:: 2017.7.0 """ import logging import salt.utils.napalm log = logging.getLogger(__name__) # ---------------------------------------------------------------------------------------------------------------------- # state properties # ---------------------------------------------------------------------------------------------------------------------- __virtualname__ = "netconfig" # ---------------------------------------------------------------------------------------------------------------------- # global variables # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # property functions # ---------------------------------------------------------------------------------------------------------------------- def __virtual__(): """ NAPALM library must be installed for this module to work and run in a (proxy) minion. """ return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) # ---------------------------------------------------------------------------------------------------------------------- # helper functions -- will not be exported # ---------------------------------------------------------------------------------------------------------------------- def _update_config( template_name, template_source=None, template_hash=None, template_hash_name=None, template_user="root", template_group="root", template_mode="755", template_attrs="--------------e----", saltenv=None, template_engine="jinja", skip_verify=False, defaults=None, test=False, commit=True, debug=False, replace=False, **template_vars ): """ Call the necessary functions in order to execute the state. For the moment this only calls the ``net.load_template`` function from the :mod:`Network-related basic features execution module <salt.modules.napalm_network>`, but this may change in time. """ return __salt__["net.load_template"]( template_name, template_source=template_source, template_hash=template_hash, template_hash_name=template_hash_name, template_user=template_user, template_group=template_group, template_mode=template_mode, template_attrs=template_attrs, saltenv=saltenv, template_engine=template_engine, skip_verify=skip_verify, defaults=defaults, test=test, commit=commit, debug=debug, replace=replace, **template_vars ) # ---------------------------------------------------------------------------------------------------------------------- # callable functions # ---------------------------------------------------------------------------------------------------------------------- def replace_pattern( name, pattern, repl, count=0, flags=8, bufsize=1, append_if_not_found=False, prepend_if_not_found=False, not_found_content=None, search_only=False, show_changes=True, backslash_literal=False, source="running", path=None, test=False, replace=True, debug=False, commit=True, ): """ .. versionadded:: 2019.2.0 Replace occurrences of a pattern in the configuration source. If ``show_changes`` is ``True``, then a diff of what changed will be returned, otherwise a ``True`` will be returned when changes are made, and ``False`` when no changes are made. This is a pure Python implementation that wraps Python's :py:func:`~re.sub`. pattern A regular expression, to be matched using Python's :py:func:`~re.search`. repl The replacement text. count: ``0`` Maximum number of pattern occurrences to be replaced. If count is a positive integer ``n``, only ``n`` occurrences will be replaced, otherwise all occurrences will be replaced. flags (list or int): ``8`` A list of flags defined in the ``re`` module documentation from the Python standard library. Each list item should be a string that will correlate to the human-friendly flag name. E.g., ``['IGNORECASE', 'MULTILINE']``. Optionally, ``flags`` may be an int, with a value corresponding to the XOR (``|``) of all the desired flags. Defaults to 8 (which supports 'MULTILINE'). bufsize (int or str): ``1`` How much of the configuration to buffer into memory at once. The default value ``1`` processes one line at a time. The special value ``file`` may be specified which will read the entire file into memory before processing. append_if_not_found: ``False`` If set to ``True``, and pattern is not found, then the content will be appended to the file. prepend_if_not_found: ``False`` If set to ``True`` and pattern is not found, then the content will be prepended to the file. not_found_content Content to use for append/prepend if not found. If None (default), uses ``repl``. Useful when ``repl`` uses references to group in pattern. search_only: ``False`` If set to true, this no changes will be performed on the file, and this function will simply return ``True`` if the pattern was matched, and ``False`` if not. show_changes: ``True`` If ``True``, return a diff of changes made. Otherwise, return ``True`` if changes were made, and ``False`` if not. backslash_literal: ``False`` Interpret backslashes as literal backslashes for the repl and not escape characters. This will help when using append/prepend so that the backslashes are not interpreted for the repl on the second run of the state. source: ``running`` The configuration source. Choose from: ``running``, ``candidate``, or ``startup``. Default: ``running``. path Save the temporary configuration to a specific path, then read from there. test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: ``True`` Commit the configuration changes? Default: ``True``. debug: ``False`` Debug mode. Will insert a new key in the output dictionary, as ``loaded_config`` containing the raw configuration loaded on the device. replace: ``True`` Load and replace the configuration. Default: ``True``. If an equal sign (``=``) appears in an argument to a Salt command it is interpreted as a keyword argument in the format ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: State SLS Example: .. code-block:: yaml update_policy_name: netconfig.replace_pattern: - pattern: OLD-POLICY-NAME - repl: new-policy-name - debug: true """ ret = salt.utils.napalm.default_ret(name) # the user can override the flags the equivalent CLI args # which have higher precedence test = test or __opts__["test"] debug = __salt__["config.merge"]("debug", debug) commit = __salt__["config.merge"]("commit", commit) replace = __salt__["config.merge"]("replace", replace) # this might be a bit risky replace_ret = __salt__["net.replace_pattern"]( pattern, repl, count=count, flags=flags, bufsize=bufsize, append_if_not_found=append_if_not_found, prepend_if_not_found=prepend_if_not_found, not_found_content=not_found_content, search_only=search_only, show_changes=show_changes, backslash_literal=backslash_literal, source=source, path=path, test=test, replace=replace, debug=debug, commit=commit, ) return salt.utils.napalm.loaded_ret(ret, replace_ret, test, debug) def saved( name, source="running", user=None, group=None, mode=None, attrs=None, makedirs=False, dir_mode=None, replace=True, backup="", show_changes=True, create=True, tmp_dir="", tmp_ext="", encoding=None, encoding_errors="strict", allow_empty=False, follow_symlinks=True, check_cmd=None, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=True, win_perms_reset=False, **kwargs ): """ .. versionadded:: 2019.2.0 Save the configuration to a file on the local file system. name Absolute path to file where to save the configuration. To push the files to the Master, use :mod:`cp.push <salt.modules.cp.push>` Execution function. source: ``running`` The configuration source. Choose from: ``running``, ``candidate``, ``startup``. Default: ``running``. user The user to own the file, this defaults to the user salt is running as on the minion group The group ownership set for the file, this defaults to the group salt is running as on the minion. On Windows, this is ignored mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds to the umask of the salt process. The mode of existing files and directories will only be changed if ``mode`` is specified. .. note:: This option is **not** supported on Windows. attrs The attributes to have on this file, e.g. ``a``, ``i``. The attributes can be any or a combination of the following characters: ``aAcCdDeijPsStTu``. .. note:: This option is **not** supported on Windows. makedirs: ``False`` If set to ``True``, then the parent directories will be created to facilitate the creation of the named file. If ``False``, and the parent directory of the destination file doesn't exist, the state will fail. dir_mode If directories are to be created, passing this option specifies the permissions for those directories. If this is not set, directories will be assigned permissions by adding the execute bit to the mode of the files. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. replace: ``True`` If set to ``False`` and the file already exists, the file will not be modified even if changes would otherwise be made. Permissions and ownership will still be enforced, however. backup Overrides the default backup mode for this specific file. See :ref:`backup_mode documentation <file-state-backups>` for more details. show_changes: ``True`` Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create: ``True`` If set to ``False``, then the file will only be managed if the file already exists on the system. encoding If specified, then the specified encoding will be used. Otherwise, the file will be encoded using the system locale (usually UTF-8). See https://docs.python.org/3/library/codecs.html#standard-encodings for the list of available encodings. encoding_errors: ``'strict'`` Error encoding scheme. Default is ```'strict'```. See https://docs.python.org/2/library/codecs.html#codec-base-classes for the list of available schemes. allow_empty: ``True`` If set to ``False``, then the state will fail if the contents specified by ``contents_pillar`` or ``contents_grains`` are empty. follow_symlinks: ``True`` If the desired path is a symlink follow it and make changes to the file to which the symlink points. check_cmd The specified command will be run with an appended argument of a *temporary* file containing the new managed contents. If the command exits with a zero status the new managed contents will be written to the managed destination. If the command exits with a nonzero exit code, the state will fail and no changes will be made to the file. tmp_dir Directory for temp file created by ``check_cmd``. Useful for checkers dependent on config file location (e.g. daemons restricted to their own config directories by an apparmor profile). tmp_ext Suffix for temp file created by ``check_cmd``. Useful for checkers dependent on config file extension (e.g. the init-checkconf upstart config checker). win_owner: ``None`` The owner of the directory. If this is not passed, user will be used. If user is not passed, the account under which Salt is running will be used. win_perms: ``None`` A dictionary containing permissions to grant and their propagation. For example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a single basic perm or a list of advanced perms. ``perms`` must be specified. ``applies_to`` does not apply to file objects. win_deny_perms: ``None`` A dictionary containing permissions to deny and their propagation. For example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a single basic perm or a list of advanced perms. ``perms`` must be specified. ``applies_to`` does not apply to file objects. win_inheritance: ``True`` True to inherit permissions from the parent directory, False not to inherit permission. win_perms_reset: ``False`` If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False``. State SLS Example: .. code-block:: yaml /var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg: netconfig.saved: - source: running - makedirs: true The state SLS above would create a backup config grouping the files by the Minion ID, in chronological files. For example, if the state is executed at on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the configuration would saved in the file: ``/var/backups/core01.lon01/1533316558.cfg`` """ ret = __salt__["net.config"](source=source) if not ret["result"]: return {"name": name, "changes": {}, "result": False, "comment": ret["comment"]} return __states__["file.managed"]( name, user=user, group=group, mode=mode, attrs=attrs, makedirs=makedirs, dir_mode=dir_mode, replace=replace, backup=backup, show_changes=show_changes, create=create, contents=ret["out"][source], tmp_dir=tmp_dir, tmp_ext=tmp_ext, encoding=encoding, encoding_errors=encoding_errors, allow_empty=allow_empty, follow_symlinks=follow_symlinks, check_cmd=check_cmd, win_owner=win_owner, win_perms=win_perms, win_deny_perms=win_deny_perms, win_inheritance=win_inheritance, win_perms_reset=win_perms_reset, **kwargs ) def managed( name, template_name=None, template_source=None, template_hash=None, template_hash_name=None, saltenv="base", template_engine="jinja", skip_verify=False, context=None, defaults=None, test=False, commit=True, debug=False, replace=False, commit_in=None, commit_at=None, revert_in=None, revert_at=None, **template_vars ): """ Manages the configuration on network devices. By default this state will commit the changes on the device. If there are no changes required, it does not commit and the field ``already_configured`` from the output dictionary will be set as ``True`` to notify that. To avoid committing the configuration, set the argument ``test`` to ``True`` (or via the CLI argument ``test=True``) and will discard (dry run). To preserve the changes, set ``commit`` to ``False`` (either as CLI argument, either as state parameter). However, this is recommended to be used only in exceptional cases when there are applied few consecutive states and/or configuration changes. Otherwise the user might forget that the config DB is locked and the candidate config buffer is not cleared/merged in the running config. To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution! template_name Identifies path to the template source. The template can be either stored on the local machine, either remotely. The recommended location is under the ``file_roots`` as specified in the master config file. For example, let's suppose the ``file_roots`` is configured as: .. code-block:: yaml file_roots: base: - /etc/salt/states Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as ``salt://templates/example.jinja``. Alternatively, for local files, the user can specify the absolute path. If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``. Examples: - ``salt://my_template.jinja`` - ``/absolute/path/to/my_template.jinja`` - ``http://example.com/template.cheetah`` - ``https:/example.com/template.mako`` - ``ftp://example.com/template.py`` .. versionchanged:: 2019.2.0 This argument can now support a list of templates to be rendered. The resulting configuration text is loaded at once, as a single configuration chunk. template_source: None Inline config template to be rendered and loaded on the device. template_hash: None Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}`` template_hash_name: None When ``template_hash`` refers to a remote file, this specifies the filename to look for in that file. saltenv: base Specifies the template environment. This will influence the relative imports inside the templates. template_engine: jinja The following templates engines are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` skip_verify: False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionchanged:: 2017.7.1 test: False Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False`` (will commit the changes on the device). commit: True Commit? Default: ``True``. debug: False Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw result after the template was rendered. .. note:: This argument cannot be used directly on the command line. Instead, it can be passed through the ``pillar`` variable when executing either of the :py:func:`state.sls <salt.modules.state.sls>` or :py:func:`state.apply <salt.modules.state.apply>` (see below for an example). commit_in: ``None`` Commit the changes in a specific number of minutes / hours. Example of accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2 minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit the changes in 5 hours and 30 minutes). .. note:: This feature works on any platforms, as it does not rely on the native features of the network operating system. .. note:: After the command is executed and the ``diff`` is not satisfactory, or for any other reasons you have to discard the commit, you are able to do so using the :py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>` execution function, using the commit ID returned by this function. .. warning:: Using this feature, Salt will load the exact configuration you expect, however the diff may change in time (i.e., if an user applies a manual configuration change, or a different process or command changes the configuration in the meanwhile). .. versionadded:: 2019.2.0 commit_at: ``None`` Commit the changes at a specific time. Example of accepted formats: ``1am`` (will commit the changes at the next 1AM), ``13:20`` (will commit at 13:20), ``1:20am``, etc. .. note:: This feature works on any platforms, as it does not rely on the native features of the network operating system. .. note:: After the command is executed and the ``diff`` is not satisfactory, or for any other reasons you have to discard the commit, you are able to do so using the :py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>` execution function, using the commit ID returned by this function. .. warning:: Using this feature, Salt will load the exact configuration you expect, however the diff may change in time (i.e., if an user applies a manual configuration change, or a different process or command changes the configuration in the meanwhile). .. versionadded:: 2019.2.0 revert_in: ``None`` Commit and revert the changes in a specific number of minutes / hours. Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert the changes in 5 hours and 30 minutes). .. note:: To confirm the commit, and prevent reverting the changes, you will have to execute the :mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>` function, using the commit ID returned by this function. .. warning:: This works on any platform, regardless if they have or don't have native capabilities to confirming a commit. However, please be *very* cautious when using this feature: on Junos (as it is the only NAPALM core platform supporting this natively) it executes a commit confirmed as you would do from the command line. All the other platforms don't have this capability natively, therefore the revert is done via Salt. That means, your device needs to be reachable at the moment when Salt will attempt to revert your changes. Be cautious when pushing configuration changes that would prevent you reach the device. Similarly, if an user or a different process apply other configuration changes in the meanwhile (between the moment you commit and till the changes are reverted), these changes would be equally reverted, as Salt cannot be aware of them. .. versionadded:: 2019.2.0 revert_at: ``None`` Commit and revert the changes at a specific time. Example of accepted formats: ``1am`` (will commit and revert the changes at the next 1AM), ``13:20`` (will commit and revert at 13:20), ``1:20am``, etc. .. note:: To confirm the commit, and prevent reverting the changes, you will have to execute the :mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>` function, using the commit ID returned by this function. .. warning:: This works on any platform, regardless if they have or don't have native capabilities to confirming a commit. However, please be *very* cautious when using this feature: on Junos (as it is the only NAPALM core platform supporting this natively) it executes a commit confirmed as you would do from the command line. All the other platforms don't have this capability natively, therefore the revert is done via Salt. That means, your device needs to be reachable at the moment when Salt will attempt to revert your changes. Be cautious when pushing configuration changes that would prevent you reach the device. Similarly, if an user or a different process apply other configuration changes in the meanwhile (between the moment you commit and till the changes are reverted), these changes would be equally reverted, as Salt cannot be aware of them. .. versionadded:: 2019.2.0 replace: False Load and replace the configuration. Default: ``False`` (will apply load merge). context: None Overrides default context variables passed to the template. .. versionadded:: 2019.2.0 defaults: None Default variables/context passed to the template. template_vars Dictionary with the arguments/context to be used when the template is rendered. Do not explicitly specify this argument. This represents any other variable that will be sent to the template rendering system. Please see an example below! In both ``ntp_peers_example_using_pillar`` and ``ntp_peers_example``, ``peers`` is sent as template variable. .. note:: It is more recommended to use the ``context`` argument instead, to avoid any conflicts with other arguments. SLS Example (e.g.: under salt://router/config.sls) : .. code-block:: yaml whole_config_example: netconfig.managed: - template_name: salt://path/to/complete_config.jinja - debug: True - replace: True bgp_config_example: netconfig.managed: - template_name: /absolute/path/to/bgp_neighbors.mako - template_engine: mako prefix_lists_example: netconfig.managed: - template_name: prefix_lists.cheetah - debug: True - template_engine: cheetah ntp_peers_example: netconfig.managed: - template_name: http://bit.ly/2gKOj20 - skip_verify: False - debug: True - peers: - 192.168.0.1 - 192.168.0.1 ntp_peers_example_using_pillar: netconfig.managed: - template_name: http://bit.ly/2gKOj20 - peers: {{ pillar.get('ntp.peers', []) }} Multi template example: .. code-block:: yaml hostname_and_ntp: netconfig.managed: - template_name: - https://bit.ly/2OhSgqP - https://bit.ly/2M6C4Lx - https://bit.ly/2OIWVTs - debug: true - context: hostname: {{ opts.id }} servers: - 172.17.17.1 - 172.17.17.2 peers: - 192.168.0.1 - 192.168.0.2 Usage examples: .. code-block:: bash $ sudo salt 'juniper.device' state.sls router.config test=True $ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}" ``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all five steps from above. These examples above are not meant to be used in a production environment, their sole purpose is to provide usage examples. Output example: .. code-block:: bash $ sudo salt 'juniper.device' state.sls router.config test=True juniper.device: ---------- ID: ntp_peers_example_using_pillar Function: netconfig.managed Result: None Comment: Testing mode: Configuration discarded. Started: 12:01:40.744535 Duration: 8755.788 ms Changes: ---------- diff: [edit system ntp] peer 192.168.0.1 { ... } + peer 172.17.17.1; + peer 172.17.17.3; Summary for juniper.device ------------ Succeeded: 1 (unchanged=1, changed=1) Failed: 0 ------------ Total states run: 1 Total run time: 8.756 s Raw output example (useful when the output is reused in other states/execution modules): .. code-block:: bash $ sudo salt --out=pprint 'juniper.device' state.sls router.config test=True debug=True .. code-block:: python { 'juniper.device': { 'netconfig_|-ntp_peers_example_using_pillar_|-ntp_peers_example_using_pillar_|-managed': { '__id__': 'ntp_peers_example_using_pillar', '__run_num__': 0, 'already_configured': False, 'changes': { 'diff': '[edit system ntp] peer 192.168.0.1 { ... }+ peer 172.17.17.1;+ peer 172.17.17.3;' }, 'comment': 'Testing mode: Configuration discarded.', 'duration': 7400.759, 'loaded_config': 'system { ntp { peer 172.17.17.1; peer 172.17.17.3; } }', 'name': 'ntp_peers_example_using_pillar', 'result': None, 'start_time': '12:09:09.811445' } } } """ ret = salt.utils.napalm.default_ret(name) # the user can override the flags the equivalent CLI args # which have higher precedence test = test or __opts__["test"] debug = __salt__["config.merge"]("debug", debug) commit = __salt__["config.merge"]("commit", commit) replace = __salt__["config.merge"]("replace", replace) # this might be a bit risky skip_verify = __salt__["config.merge"]("skip_verify", skip_verify) commit_in = __salt__["config.merge"]("commit_in", commit_in) commit_at = __salt__["config.merge"]("commit_at", commit_at) revert_in = __salt__["config.merge"]("revert_in", revert_in) revert_at = __salt__["config.merge"]("revert_at", revert_at) config_update_ret = _update_config( template_name=template_name, template_source=template_source, template_hash=template_hash, template_hash_name=template_hash_name, saltenv=saltenv, template_engine=template_engine, skip_verify=skip_verify, context=context, defaults=defaults, test=test, commit=commit, commit_in=commit_in, commit_at=commit_at, revert_in=revert_in, revert_at=revert_at, debug=debug, replace=replace, **template_vars ) return salt.utils.napalm.loaded_ret(ret, config_update_ret, test, debug) def commit_cancelled(name): """ .. versionadded:: 2019.2.0 Cancel a commit scheduled to be executed via the ``commit_in`` and ``commit_at`` arguments from the :py:func:`net.load_template <salt.modules.napalm_network.load_template>` or :py:func:`net.load_config <salt.modules.napalm_network.load_config>` execution functions. The commit ID is displayed when the commit is scheduled via the functions named above. State SLS Example: .. code-block:: yaml '20180726083540640360': netconfig.commit_cancelled """ cancelled = {"name": name, "result": None, "changes": {}, "comment": ""} if __opts__["test"]: cancelled["comment"] = "It would cancel commit #{}".format(name) return cancelled ret = __salt__["net.cancel_commit"](name) cancelled.update(ret) return cancelled def commit_confirmed(name): """ .. versionadded:: 2019.2.0 Confirm a commit scheduled to be reverted via the ``revert_in`` and ``revert_at`` arguments from the :mod:`net.load_template <salt.modules.napalm_network.load_template>` or :mod:`net.load_config <salt.modules.napalm_network.load_config>` execution functions. The commit ID is displayed when the commit confirmed is scheduled via the functions named above. State SLS Example: .. code-block:: yaml '20180726083540640360': netconfig.commit_confirmed """ confirmed = {"name": name, "result": None, "changes": {}, "comment": ""} if __opts__["test"]: confirmed["comment"] = "It would confirm commit #{}".format(name) return confirmed ret = __salt__["net.confirm_commit"](name) confirmed.update(ret) return confirmed
saltstack/salt
salt/states/netconfig.py
Python
apache-2.0
34,222
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: release-1.23 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubernetes.client.configuration import Configuration class V1RuleWithOperations(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_groups': 'list[str]', 'api_versions': 'list[str]', 'operations': 'list[str]', 'resources': 'list[str]', 'scope': 'str' } attribute_map = { 'api_groups': 'apiGroups', 'api_versions': 'apiVersions', 'operations': 'operations', 'resources': 'resources', 'scope': 'scope' } def __init__(self, api_groups=None, api_versions=None, operations=None, resources=None, scope=None, local_vars_configuration=None): # noqa: E501 """V1RuleWithOperations - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_groups = None self._api_versions = None self._operations = None self._resources = None self._scope = None self.discriminator = None if api_groups is not None: self.api_groups = api_groups if api_versions is not None: self.api_versions = api_versions if operations is not None: self.operations = operations if resources is not None: self.resources = resources if scope is not None: self.scope = scope @property def api_groups(self): """Gets the api_groups of this V1RuleWithOperations. # noqa: E501 APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :return: The api_groups of this V1RuleWithOperations. # noqa: E501 :rtype: list[str] """ return self._api_groups @api_groups.setter def api_groups(self, api_groups): """Sets the api_groups of this V1RuleWithOperations. APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :param api_groups: The api_groups of this V1RuleWithOperations. # noqa: E501 :type: list[str] """ self._api_groups = api_groups @property def api_versions(self): """Gets the api_versions of this V1RuleWithOperations. # noqa: E501 APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :return: The api_versions of this V1RuleWithOperations. # noqa: E501 :rtype: list[str] """ return self._api_versions @api_versions.setter def api_versions(self, api_versions): """Sets the api_versions of this V1RuleWithOperations. APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :param api_versions: The api_versions of this V1RuleWithOperations. # noqa: E501 :type: list[str] """ self._api_versions = api_versions @property def operations(self): """Gets the operations of this V1RuleWithOperations. # noqa: E501 Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :return: The operations of this V1RuleWithOperations. # noqa: E501 :rtype: list[str] """ return self._operations @operations.setter def operations(self, operations): """Sets the operations of this V1RuleWithOperations. Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :param operations: The operations of this V1RuleWithOperations. # noqa: E501 :type: list[str] """ self._operations = operations @property def resources(self): """Gets the resources of this V1RuleWithOperations. # noqa: E501 Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501 :return: The resources of this V1RuleWithOperations. # noqa: E501 :rtype: list[str] """ return self._resources @resources.setter def resources(self, resources): """Sets the resources of this V1RuleWithOperations. Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501 :param resources: The resources of this V1RuleWithOperations. # noqa: E501 :type: list[str] """ self._resources = resources @property def scope(self): """Gets the scope of this V1RuleWithOperations. # noqa: E501 scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501 :return: The scope of this V1RuleWithOperations. # noqa: E501 :rtype: str """ return self._scope @scope.setter def scope(self, scope): """Sets the scope of this V1RuleWithOperations. scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501 :param scope: The scope of this V1RuleWithOperations. # noqa: E501 :type: str """ self._scope = scope def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1RuleWithOperations): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1RuleWithOperations): return True return self.to_dict() != other.to_dict()
kubernetes-client/python
kubernetes/client/models/v1_rule_with_operations.py
Python
apache-2.0
9,436
#!/usr/bin/env python2.7 from __future__ import absolute_import, division, print_function, unicode_literals import argparse import os import re from collections import namedtuple import buckutils as buck import javautils as java # Example: # timestamp level command tid class message # |---------------------| | | |----------|| || |----------------------------------------------| |---------------------------------------------------------------------------------------------------------------------------------------------------------| # [2017-10-26 01:08:10.126][debug][command:null][tid:83][com.facebook.buck.jvm.java.Jsr199JavacInvocation] javac: /Users/jkeljo/buck/third-party/java/dx/src/com/android/dx/cf/code/LocalsArraySet.java:-1: note: Some input files use unchecked or unsafe operations. BUCK_LOG_LINE_PATTERN = re.compile( r"^\[(?P<timestamp>[^]]+)\]\[(?P<level>[^]]+)\]\[command:(?P<command>[^]]+)\]\[tid:(?P<tid>\d+)\]\[(?P<class>[^]]+)\] (?P<message>.+)$" ) # Example: # path line message # |-----------------------------------------------------------------------------------| || |--------------------------------------------------------| # javac: /Users/jkeljo/buck/third-party/java/dx/src/com/android/dx/cf/code/LocalsArraySet.java:-1: note: Some input files use unchecked or unsafe operations. JAVAC_MESSAGE_PATTERN = re.compile( r"^javac: (?P<path>[^:]+):(?P<line>\d+): (?P<message>.+)$" ) # Example # spaces # |--------| # ^ JAVAC_LOCATION_PATTERN = re.compile(r"^(?P<spaces> +)\^.*$") def main(): args = parse_args() with open(args.log_file) as log_file: migrate(log_file) def parse_args(): description = """Parses a Buck log file for source-only ABI migration warnings, and applies automatic fixes for those warnings. To generate a log file with these warnings present, build the targets to be migrated with buck build --config java.abi_generation_mode=migrating_to_source_only.""" parser = argparse.ArgumentParser(description=description) parser.add_argument( "--log-file", help="buck.log file to search for migration instructions " "(default ./buck-out/log/last_buildcommand/buck.log)", default=os.path.join( os.getcwd(), "buck-out", "log", "last_buildcommand", "buck.log" ), ) return parser.parse_args() def migrate(log_file): for message in javac_messages(log_file): for pattern, plugin in migration_plugins: if len(message.details) == 0: continue match = pattern.search(message.details[0]) if match: plugin(message) java.write_all() buck.write_all() JavacMessage = namedtuple("JavacMessage", ["path", "line", "col", "summary", "details"]) def javac_messages(log_file): for message in messages(log_file): match = JAVAC_MESSAGE_PATTERN.match(message[0]) if not match: continue path = match.group("path") file_line = int(match.group("line")) summary = match.group("message") # Skip line 1; it's just the code match = JAVAC_LOCATION_PATTERN.match(message[2]) if not match: continue col = len(match.group("spaces")) + 1 details = [line.strip() for line in message[3:]] yield JavacMessage(path, file_line, col, summary, details) def messages(log_file): message = None for line in log_file: log_line = BUCK_LOG_LINE_PATTERN.match(line) if log_line: if message: yield message message = [log_line.group("message")] else: message.append(line) required_for_source_only_abi = set() def add_required_for_source_only_abi(message): match = ADD_REQUIRED_FOR_SOURCE_ABI_PATTERN.search(message.details[0]) if not match: return rule = match.group("rule") if rule in required_for_source_only_abi: return buck_target = buck.get_build_target(rule) buck_target["required_for_source_only_abi"] = "True" required_for_source_only_abi.add(rule) def add_source_only_abi_deps(message): match = ADD_SOURCE_ONLY_ABI_DEPS_PATTERN.search(message.details[0]) if not match: return rule = match.group("rule") rules = match.group("rules").split(", ") buck_target = buck.get_build_target(rule) if not "source_only_abi_deps" in buck_target: buck_target["source_only_abi_deps"] = buck.EditableList() source_only_abi_deps = buck_target["source_only_abi_deps"] for dep in rules: if not dep in source_only_abi_deps: source_only_abi_deps.prepend(dep) def do_remediations(message): for remediation in message.details[1:]: apply_remediation(message.path, message.line, message.col, remediation) def apply_remediation(path, line, col, remediation): for (pattern, fn) in remediations: match = pattern.match(remediation) if match: fn(path, line, col, **match.groupdict()) return print("Manual: %s:%s,%s: %s" % (path, line, col, remediation)) def add_import(path, line, col, type): file = java.load(path) file.add_import(type) def replace_name(path, line, col, old, new): file = java.load(path) file.replace_name(line, col, old, new) ADD_REQUIRED_FOR_SOURCE_ABI_PATTERN = re.compile( r"add required_for_source_only_abi = True to (?P<rule>[^#.]*)." ) ADD_SOURCE_ONLY_ABI_DEPS_PATTERN = re.compile( r"add the following rules to source_only_abi_deps in (?P<rule>[^:]+:[^#:]+)[^:]*: (?P<rules>.+)" ) ADD_AN_IMPORT_PATTERN = re.compile(r'^Add an import for "(?P<type>[^"]+)"') REPLACE_A_NAME_PATTERN = re.compile( r'^Use "(?P<new>[^"]+)" here instead of "(?P<old>[^"]+)"' ) migration_plugins = [ (re.compile(r"^To fix:$"), do_remediations), (ADD_REQUIRED_FOR_SOURCE_ABI_PATTERN, add_required_for_source_only_abi), (ADD_SOURCE_ONLY_ABI_DEPS_PATTERN, add_source_only_abi_deps), ] remediations = [ (ADD_AN_IMPORT_PATTERN, add_import), (REPLACE_A_NAME_PATTERN, replace_name), ] if __name__ == "__main__": main()
shs96c/buck
programs/fixes/source_only_abi/autofix_source_only_abi_warnings.py
Python
apache-2.0
6,379
#!/usr/bin/env python """Executable for the Earth Engine command line interface. This executable starts a Python Cmd instance to receive and process command line input entered by the user. If the executable is invoked with some command line arguments, the Cmd is launched in the one-off mode, where the provided arguments are processed as a single command after which the program is terminated. Otherwise, this executable will launch the Cmd in the interactive (looping) mode, where the user will be able to run multiple commands as in a typical terminal program. """ from __future__ import print_function import argparse import sys import ee from ee.cli import commands from ee.cli import utils class CommandDispatcher(commands.Dispatcher): name = 'main' COMMANDS = [ commands.AuthenticateCommand, commands.AclCommand, commands.AssetCommand, commands.CopyCommand, commands.CreateCommand, commands.ListCommand, commands.SizeCommand, commands.MoveCommand, commands.RmCommand, commands.TaskCommand, commands.UploadCommand, commands.UploadImageManifestCommand, commands.UploadTableManifestCommand, ] def main(): # Set the program name to 'earthengine' for proper help text display. parser = argparse.ArgumentParser( prog='earthengine', description='Earth Engine Command Line Interface.') parser.add_argument( '--ee_config', help='Path to the earthengine configuration file. ' 'Defaults to "~/%s".' % utils.DEFAULT_EE_CONFIG_FILE_RELATIVE) parser.add_argument( '--service_account_file', help='Path to a service account credentials' 'file. Overrides any ee_config if specified.') dispatcher = CommandDispatcher(parser) # Print the list of commands if the user supplied no arguments at all. if len(sys.argv) == 1: parser.print_help() return args = parser.parse_args() config = utils.CommandLineConfig(args.ee_config, args.service_account_file) # Catch EEException errors, which wrap server-side Earth Engine # errors, and print the error message without the irrelevant local # stack trace. (Individual commands may also catch EEException if # they want to be able to continue despite errors.) try: dispatcher.run(args, config) except ee.EEException as e: print(e) sys.exit(1) if __name__ == '__main__': main()
gena/earthengine-api
python/ee/cli/eecli.py
Python
apache-2.0
2,385
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # This file is licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # snippet-sourcedescription:[MyCodeCommitFunction.py demonstrates how to use an AWS Lambda function to return the URLs used for cloning an AWS CodeCommit repository to a CloudWatch log.] # snippet-service:[codecommit] # snippet-keyword:[Python] # snippet-sourcesyntax:[python] # snippet-sourcesyntax:[python] # snippet-keyword:[AWS CodeCommit] # snippet-keyword:[Code Sample] # snippet-keyword:[GetRepository] # snippet-sourcetype:[full-example] # snippet-sourceauthor:[AWS] # snippet-sourcedate:[2016-03-07] # snippet-start:[codecommit.python.MyCodeCommitFunction.complete] import json import boto3 codecommit = boto3.client('codecommit') def lambda_handler(event, context): #Log the updated references from the event references = { reference['ref'] for reference in event['Records'][0]['codecommit']['references'] } print("References: " + str(references)) #Get the repository from the event and show its git clone URL repository = event['Records'][0]['eventSourceARN'].split(':')[5] try: response = codecommit.get_repository(repositoryName=repository) print("Clone URL: " +response['repositoryMetadata']['cloneUrlHttp']) return response['repositoryMetadata']['cloneUrlHttp'] except Exception as e: print(e) print('Error getting repository {}. Make sure it exists and that your repository is in the same region as this function.'.format(repository)) raise e # snippet-end:[codecommit.python.MyCodeCommitFunction.complete]
awsdocs/aws-doc-sdk-examples
lambda_functions/codecommit/MyCodeCommitFunction.py
Python
apache-2.0
2,083
# -*- coding: utf-8 -*- """ /*************************************************************************** GeepsSpStats A QGIS plugin Spatial Statistics by PySAL ------------------- begin : 2014-07-01 git sha : $Format:%H$ copyright : (C) 2014 by GEEPS / Gaia3D email : geeps.man@gmail.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from PyQt4.QtGui import * from PyQt4.QtCore import * import os.path from Utility import * from Widget_MoransI import Widget_MoransI from Widget_GetisOrdsG import Widget_GetisOrdsG from Widget_NearestNeighbor import Widget_NearestNeighbor from Widget_KFunction import Widget_KFunction from Widget_KnoxStatistic import Widget_KnoxStatistic from Widget_SpatialScan import Widget_SpatialScan class WidgetContainer(object): def __init__(self, iface, classTemplet, dockType=Qt.RightDockWidgetArea): self.__iface = iface self.__dockwidget = None self.__oloWidget = None self.__classTemplet = classTemplet self.__title = classTemplet.title self.__objectName = classTemplet.objectName self.__dockType = dockType # Private def __setDocWidget(self): self.__dockwidget = QDockWidget(self.__title, self.__iface.mainWindow() ) self.__dockwidget.setObjectName(self.__objectName) self.__oloWidget = self.__classTemplet(self.__iface, self.__dockwidget) self.__dockwidget.setWidget(self.__oloWidget) self.__oloWidget.updateGuiLayerList() def __initGui(self): self.__setDocWidget() self.__iface.addDockWidget(self.__dockType, self.__dockwidget) def __unload(self): self.__dockwidget.close() self.__iface.removeDockWidget( self.__dockwidget ) # 이벤트 헨들러가 자동제거 되지 않아 강제로 제거 self.__oloWidget.disconnectGlobalSignal() del self.__oloWidget self.__dockwidget = None # Public def setVisible(self, visible): if visible: if self.__dockwidget is None: self.__initGui() else: if not self.__dockwidget is None: self.__unload() # TODO: reflash def repaint(self): if self.__dockwidget: self.__dockwidget.update() self.__dockwidget.repaint() ### QGIS Plugin Implementation. class GeepsSpStats: crrWidget = None def __init__(self, iface): """Constructor. :param iface: An interface instance that will be passed to this class which provides the hook by which you can manipulate the QGIS application at run time. :type iface: QgsInterface """ # Save reference to the QGIS interface self.iface = iface # reference to map canvas self.canvas = self.iface.mapCanvas() # initialize plugin directory self.plugin_dir = os.path.dirname(__file__) # initialize locale locale = QSettings().value('locale/userLocale')[0:2] locale_path = os.path.join( self.plugin_dir, 'i18n', 'GeepsSpStats_{}.qm'.format(locale)) # 한국어는 GeepsSpStats_ko.qm 파일이 필요 if os.path.exists(locale_path): self.translator = QTranslator() self.translator.load(locale_path) if qVersion() > '4.3.3': QCoreApplication.installTranslator(self.translator) # Overview #self.crrWidget = WidgetContainer(iface, Widget_MoransI) # noinspection PyMethodMayBeStatic def tr(self, message): """Get the translation for a string using Qt translation API. We implement this ourselves since we do not inherit QObject. :param message: String for translation. :type message: str, QString :returns: Translated version of message. :rtype: QString """ # noinspection PyTypeChecker,PyArgumentList,PyCallByClass return QCoreApplication.translate('GeepsSpStats', message) def initGui(self): """Create the menu entries and toolbar icons inside the QGIS GUI.""" # Qt에서는 Action이 메뉴의 최종 아이템이라 생각하면 됨 actions = self.iface.mainWindow().menuBar().actions() self.mainMenu = QMenu(self.iface.mainWindow()) self.mainMenu.setTitle(self.tr(u'Spatial Statics')) # 이미 메뉴가 있다면 그냥 있는 것 이용 for action in actions: if action.text() == self.tr(u'Spatial Statics'): self.mainMenu = action.menu() break ### MENU1 : spatial clusters detection icon = QIcon(os.path.dirname(__file__) + "/images/publish-to-geonode.png") self.menu1 = self.mainMenu.addMenu(icon, self.tr(u'Spatial Autocorrelation')) self.mainMenu.addMenu(self.menu1) # Moran's I Statistic Menu self.moransI_Action = QAction(self.tr("Moran's I Statistic"), self.iface.mainWindow()) self.menu1.addAction(self.moransI_Action) self.moransI_Action.triggered.connect(self.showWidgetMoransI) ### MENU2 : Spatial Clustering icon = QIcon(os.path.dirname(__file__) + "/images/tree.png") self.menu2 = self.mainMenu.addMenu(icon, self.tr(u'Spatial Clustering')) self.mainMenu.addMenu(self.menu2) # Getis-Ord's G Statistic Menu self.getisOrdsG_Action = QAction(self.tr("Getis-Ord's G Statistic"), self.iface.mainWindow()) self.menu2.addAction(self.getisOrdsG_Action) self.getisOrdsG_Action.triggered.connect(self.showWidgetGetisOrdsG) # Nearest neighbor statistic Menu self.nearestNeighborStatistic_Action = QAction( self.tr(u"Nearest Neighbor Statistic"), self.menu2) self.menu2.addAction(self.nearestNeighborStatistic_Action) self.nearestNeighborStatistic_Action.triggered.connect(self.showWidgetNearestNeighbor) # K-function Menu self.Kfunction_Action = QAction(self.tr(u"K-function"), self.menu2) self.menu2.addAction(self.Kfunction_Action) self.Kfunction_Action.triggered.connect(self.showWidgetKFunction) ### MENU3 : Spatiotemporal Clustering icon = QIcon(os.path.dirname(__file__) + "/images/workspace.png") self.menu3 = self.mainMenu.addMenu(icon, self.tr(u'Spatiotemporal Clustering')) self.mainMenu.addMenu(self.menu3) # Knox statistic Menu self.knoxStatistic_Action = QAction(self.tr(u"Knox Statistic"), self.menu3) self.menu3.addAction(self.knoxStatistic_Action) self.knoxStatistic_Action.triggered.connect(self.showWidgetKnoxStatistic) ### MENU4 : spatial clusters detection icon = QIcon(os.path.dirname(__file__) + "/images/view.png") self.menu4 = self.mainMenu.addMenu(icon, self.tr(u'Spatial Clusters Detection')) self.mainMenu.addMenu(self.menu4) # Knox statistic Menu self.spatialScanStatistic_Action = QAction(self.tr(u"Spatial Scan Statistic"), self.menu4) self.menu4.addAction(self.spatialScanStatistic_Action) self.spatialScanStatistic_Action.triggered.connect(self.showWidgetSpatialScan) # ### HELP # icon = QIcon(os.path.dirname(__file__) + "/images/help.png") # self.help_Action = QAction(icon, self.tr(u"About GEEPS Spatial Stats"), self.menu1) # self.mainMenu.addAction(self.help_Action) # self.help_Action.triggered.connect(self.run) ### Main Menu 등록 menuBar = self.iface.mainWindow().menuBar() menuBar.insertMenu(self.iface.firstRightStandardMenu().menuAction(), self.mainMenu) def unload(self): """Removes the plugin menu item and icon from QGIS GUI.""" self.mainMenu.deleteLater() if not self.crrWidget is None: self.crrWidget.setVisible( False ) del self.crrWidget self.crrWidget = None def getLayerList(self): retLayerList = [] for layer in self.canvas.layers(): retLayerList.append(layer.name()) return retLayerList def showWidgetMoransI(self): if not self.crrWidget is None: self.crrWidget.setVisible(False) del self.crrWidget self.crrWidget = None self.crrWidget = WidgetContainer(self.iface, Widget_MoransI) self.crrWidget.setVisible(True) # TODO: UI reflash self.crrWidget.repaint() pass def showWidgetGetisOrdsG(self): if not self.crrWidget is None: self.crrWidget.setVisible(False) del self.crrWidget self.crrWidget = None self.crrWidget = WidgetContainer(self.iface, Widget_GetisOrdsG) self.crrWidget.setVisible(True) pass def showWidgetNearestNeighbor(self): if not self.crrWidget is None: self.crrWidget.setVisible(False) del self.crrWidget self.crrWidget = None self.crrWidget = WidgetContainer(self.iface, Widget_NearestNeighbor) self.crrWidget.setVisible(True) pass def showWidgetKFunction(self): if not self.crrWidget is None: self.crrWidget.setVisible(False) del self.crrWidget self.crrWidget = None self.crrWidget = WidgetContainer(self.iface, Widget_KFunction) self.crrWidget.setVisible(True) pass def showWidgetKnoxStatistic(self): if not self.crrWidget is None: self.crrWidget.setVisible(False) del self.crrWidget self.crrWidget = None self.crrWidget = WidgetContainer(self.iface, Widget_KnoxStatistic) self.crrWidget.setVisible(True) pass def showWidgetSpatialScan(self): if not self.crrWidget is None: self.crrWidget.setVisible(False) del self.crrWidget self.crrWidget = None self.crrWidget = WidgetContainer(self.iface, Widget_SpatialScan) self.crrWidget.setVisible(True) pass def run(self): alert("Under Construction!!!")
Gaia3D/GeepsSpatialStatistic
mainPlugin.py
Python
apache-2.0
10,903
# Explains/tests Issues: # https://github.com/ray-project/ray/issues/6928 # https://github.com/ray-project/ray/issues/6732 import argparse from gym.spaces import Discrete, Box import numpy as np from ray.rllib.agents.ppo import PPOTrainer from ray.rllib.examples.env.random_env import RandomEnv from ray.rllib.examples.models.mobilenet_v2_with_lstm_models import \ MobileV2PlusRNNModel, TorchMobileV2PlusRNNModel from ray.rllib.models import ModelCatalog from ray.rllib.utils.framework import try_import_tf tf1, tf, tfv = try_import_tf() cnn_shape = (4, 4, 3) # The torch version of MobileNetV2 does channels first. cnn_shape_torch = (3, 224, 224) parser = argparse.ArgumentParser() parser.add_argument("--torch", action="store_true") if __name__ == "__main__": args = parser.parse_args() # Register our custom model. ModelCatalog.register_custom_model( "my_model", TorchMobileV2PlusRNNModel if args.torch else MobileV2PlusRNNModel) # Configure our Trainer. config = { "framework": "torch" if args.torch else "tf", "model": { "custom_model": "my_model", # Extra config passed to the custom model's c'tor as kwargs. "custom_model_config": { "cnn_shape": cnn_shape_torch if args.torch else cnn_shape, }, "max_seq_len": 20, }, "vf_share_layers": True, "num_workers": 0, # no parallelism "env_config": { "action_space": Discrete(2), # Test a simple Image observation space. "observation_space": Box( 0.0, 1.0, shape=cnn_shape_torch if args.torch else cnn_shape, dtype=np.float32) }, } trainer = PPOTrainer(config=config, env=RandomEnv) print(trainer.train())
robertnishihara/ray
rllib/examples/mobilenet_v2_with_lstm.py
Python
apache-2.0
1,849
# Copyright 2018 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Blob-management tool application code. """
GoogleCloudPlatform/appengine-blobstoremgmt-python
src/app/__init__.py
Python
apache-2.0
644
# -*- coding: utf-8 -*- from setuptools import setup NAME = "apicaller" DESCRIPTION = "APICaller makes the creating API client library easier." AUTHOR = "Jan Češpivo" AUTHOR_EMAIL = "jan.cespivo@gmail.com" URL = "https://github.com/cespivo/apicaller" VERSION = '0.1.2a' setup( name=NAME, version=VERSION, description=DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, license="Apache 2.0", url=URL, py_modules=['apicaller'], classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', ], install_requires=[ "requests>=2.4.3", ], )
jancespivo/apicaller
setup.py
Python
apache-2.0
869
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from mock import Mock, patch from nose.tools import assert_equal from pylons import app_globals as g from alluratest.controller import setup_unit_test from allura.model.repo import Commit from forgesvn.model.svn import SVNImplementation class TestSVNImplementation(object): def setUp(self): setup_unit_test() def test_compute_tree_new(self): self._test_compute_tree_new('/trunk/foo/') self._test_compute_tree_new('/trunk/foo') self._test_compute_tree_new('trunk/foo/') self._test_compute_tree_new('trunk/foo') @patch('allura.model.repo.LastCommitDoc.m.update_partial') @patch('allura.model.repo.TreesDoc.m.update_partial') @patch('allura.model.repo.Tree.upsert') @patch('allura.model.repo.Tree.query.get') def _test_compute_tree_new(self, path, tree_get, tree_upsert, treesdoc_partial, lcd_partial): repo = Mock(fs_path=g.tmpdir + '/') repo.name = 'code' impl = SVNImplementation(repo) impl._svn.info2 = Mock() impl._svn.info2.return_value = [('foo', Mock())] tree_get.return_value = None # no existing tree commit = Commit() commit._id = '5057636b9c1040636b81e4b1:6' tree_upsert.return_value = (Mock(), True) tree_id = impl.compute_tree_new(commit, path) assert_equal(impl._svn.info2.call_args[0] [0], 'file://' + g.tmpdir + '/code/trunk/foo') treesdoc_partial.assert_called() lcd_partial.assert_called() def test_last_commit_ids(self): self._test_last_commit_ids('/trunk/foo/') self._test_last_commit_ids('/trunk/foo') self._test_last_commit_ids('trunk/foo/') self._test_last_commit_ids('trunk/foo') def _test_last_commit_ids(self, path): repo = Mock(fs_path=g.tmpdir + '/') repo.name = 'code' repo._id = '5057636b9c1040636b81e4b1' impl = SVNImplementation(repo) impl._svn.info2 = Mock() impl._svn.info2.return_value = [('trunk', Mock()), ('foo', Mock())] impl._svn.info2.return_value[1][1].last_changed_rev.number = '1' commit = Commit() commit._id = '5057636b9c1040636b81e4b1:6' entries = impl.last_commit_ids(commit, [path]) assert_equal(entries, {path.strip('/'): '5057636b9c1040636b81e4b1:1'}) assert_equal(impl._svn.info2.call_args[0] [0], 'file://' + g.tmpdir + '/code/trunk') @patch('forgesvn.model.svn.svn_path_exists') def test__path_to_root(self, path_exists): repo = Mock(fs_path=g.tmpdir + '/') repo.name = 'code' repo._id = '5057636b9c1040636b81e4b1' impl = SVNImplementation(repo) path_exists.return_value = False # edge cases assert_equal(impl._path_to_root(None), '') assert_equal(impl._path_to_root(''), '') assert_equal(impl._path_to_root('/some/path/'), '') assert_equal(impl._path_to_root('some/path'), '') # tags assert_equal(impl._path_to_root('/some/path/tags/1.0/some/dir'), 'some/path/tags/1.0') assert_equal(impl._path_to_root('/some/path/tags/1.0/'), 'some/path/tags/1.0') assert_equal(impl._path_to_root('/some/path/tags/'), '') # branches assert_equal(impl._path_to_root('/some/path/branches/b1/dir'), 'some/path/branches/b1') assert_equal(impl._path_to_root('/some/path/branches/b1/'), 'some/path/branches/b1') assert_equal(impl._path_to_root('/some/path/branches/'), '') # trunk assert_equal(impl._path_to_root('/some/path/trunk/some/dir/'), 'some/path/trunk') assert_equal(impl._path_to_root('/some/path/trunk'), 'some/path/trunk') # with fallback to trunk path_exists.return_value = True assert_equal(impl._path_to_root(''), 'trunk') assert_equal(impl._path_to_root('/some/path/'), 'trunk') assert_equal(impl._path_to_root('/tags/'), 'trunk') assert_equal(impl._path_to_root('/branches/'), 'trunk') assert_equal(impl._path_to_root('/tags/1.0'), 'tags/1.0') assert_equal(impl._path_to_root('/branches/branch'), 'branches/branch') @patch('forgesvn.model.svn.svn_path_exists') def test_update_checkout_url(self, svn_path_exists): impl = SVNImplementation(Mock()) opts = impl._repo.app.config.options = {} svn_path_exists.side_effect = lambda path: False opts['checkout_url'] = 'invalid' impl.update_checkout_url() assert_equal(opts['checkout_url'], '') svn_path_exists.side_effect = lambda path: path.endswith('trunk') opts['checkout_url'] = 'invalid' impl.update_checkout_url() assert_equal(opts['checkout_url'], 'trunk') svn_path_exists.side_effect = lambda path: path.endswith('trunk') opts['checkout_url'] = '' impl.update_checkout_url() assert_equal(opts['checkout_url'], 'trunk')
apache/incubator-allura
ForgeSVN/forgesvn/tests/model/test_svnimplementation.py
Python
apache-2.0
5,921
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.baremetal.v1 import _common from openstack import exceptions from openstack import resource from openstack import utils class ValidationResult(object): """Result of a single interface validation. :ivar result: Result of a validation, ``True`` for success, ``False`` for failure, ``None`` for unsupported interface. :ivar reason: If ``result`` is ``False`` or ``None``, explanation of the result. """ def __init__(self, result, reason): self.result = result self.reason = reason class Node(_common.ListMixin, resource.Resource): resources_key = 'nodes' base_path = '/nodes' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True allow_patch = True commit_method = 'PATCH' commit_jsonpatch = True _query_mapping = resource.QueryParameters( 'associated', 'conductor_group', 'driver', 'fault', 'provision_state', 'resource_class', fields={'type': _common.fields_type}, instance_id='instance_uuid', is_maintenance='maintenance', ) # The allocation_uuid field introduced in 1.52 (Stein). _max_microversion = '1.52' # Properties #: The UUID of the allocation associated with this node. Added in API #: microversion 1.52. allocation_id = resource.Body("allocation_uuid") #: A string or UUID of the tenant who owns the baremetal node. Added in API #: microversion 1.50. owner = resource.Body("owner") #: The UUID of the chassis associated wit this node. Can be empty or None. chassis_id = resource.Body("chassis_uuid") #: The current clean step. clean_step = resource.Body("clean_step") #: Hostname of the conductor currently handling this ndoe. Added in API # microversion 1.49. conductor = resource.Body("conductor") #: Conductor group this node is managed by. Added in API microversion 1.46. conductor_group = resource.Body("conductor_group") #: Timestamp at which the node was last updated. created_at = resource.Body("created_at") #: The current deploy step. Added in API microversion 1.44. deploy_step = resource.Body("deploy_step") #: The name of the driver. driver = resource.Body("driver") #: All the metadata required by the driver to manage this node. List of #: fields varies between drivers, and can be retrieved from the #: :class:`openstack.baremetal.v1.driver.Driver` resource. driver_info = resource.Body("driver_info", type=dict) #: Internal metadata set and stored by node's driver. This is read-only. driver_internal_info = resource.Body("driver_internal_info", type=dict) #: A set of one or more arbitrary metadata key and value pairs. extra = resource.Body("extra") #: Fault type that caused the node to enter maintenance mode. #: Introduced in API microversion 1.42. fault = resource.Body("fault") #: The UUID of the node resource. id = resource.Body("uuid", alternate_id=True) #: Information used to customize the deployed image, e.g. size of root #: partition, config drive in the form of base64 encoded string and other #: metadata. instance_info = resource.Body("instance_info") #: UUID of the nova instance associated with this node. instance_id = resource.Body("instance_uuid") #: Override enabling of automated cleaning. Added in API microversion 1.47. is_automated_clean_enabled = resource.Body("automated_clean", type=bool) #: Whether console access is enabled on this node. is_console_enabled = resource.Body("console_enabled", type=bool) #: Whether node is currently in "maintenance mode". Nodes put into #: maintenance mode are removed from the available resource pool. is_maintenance = resource.Body("maintenance", type=bool) # Whether the node is protected from undeploying. Added in API microversion # 1.48. is_protected = resource.Body("protected", type=bool) #: Any error from the most recent transaction that started but failed to #: finish. last_error = resource.Body("last_error") #: A list of relative links, including self and bookmark links. links = resource.Body("links", type=list) #: user settable description of the reason why the node was placed into #: maintenance mode. maintenance_reason = resource.Body("maintenance_reason") #: Human readable identifier for the node. May be undefined. Certain words #: are reserved. Added in API microversion 1.5 name = resource.Body("name") #: Links to the collection of ports on this node. ports = resource.Body("ports", type=list) #: Links to the collection of portgroups on this node. Available since #: API microversion 1.24. port_groups = resource.Body("portgroups", type=list) #: The current power state. Usually "power on" or "power off", but may be #: "None" if service is unable to determine the power state. power_state = resource.Body("power_state") #: Physical characteristics of the node. Content populated by the service #: during inspection. properties = resource.Body("properties", type=dict) # The reason why this node is protected. Added in API microversion 1.48. protected_reason = resource.Body("protected_reason") #: The current provisioning state of the node. provision_state = resource.Body("provision_state") #: The current RAID configuration of the node. raid_config = resource.Body("raid_config") #: The name of an service conductor host which is holding a lock on this #: node, if a lock is held. reservation = resource.Body("reservation") #: A string to be used by external schedulers to identify this node as a #: unit of a specific type of resource. Added in API microversion 1.21. resource_class = resource.Body("resource_class") #: Links to the collection of states. states = resource.Body("states", type=list) #: The requested state if a provisioning action has been requested. For #: example, ``AVAILABLE``, ``DEPLOYING``, ``DEPLOYWAIT``, ``DEPLOYING``, #: ``ACTIVE`` etc. target_provision_state = resource.Body("target_provision_state") #: The requested state during a state transition. target_power_state = resource.Body("target_power_state") #: The requested RAID configuration of the node which will be applied when #: the node next transitions through the CLEANING state. target_raid_config = resource.Body("target_raid_config") #: Traits of the node. Introduced in API microversion 1.37. traits = resource.Body("traits", type=list) #: Timestamp at which the node was last updated. updated_at = resource.Body("updated_at") # Hardware interfaces grouped together for convenience. #: BIOS interface to use when setting BIOS properties of the node. #: Introduced in API microversion 1.40. bios_interface = resource.Body("bios_interface") #: Boot interface to use when configuring boot of the node. #: Introduced in API microversion 1.31. boot_interface = resource.Body("boot_interface") #: Console interface to use when working with serial console. #: Introduced in API microversion 1.31. console_interface = resource.Body("console_interface") #: Deploy interface to use when deploying the node. #: Introduced in API microversion 1.31. deploy_interface = resource.Body("deploy_interface") #: Inspect interface to use when inspecting the node. #: Introduced in API microversion 1.31. inspect_interface = resource.Body("inspect_interface") #: Management interface to use for management actions on the node. #: Introduced in API microversion 1.31. management_interface = resource.Body("management_interface") #: Network interface provider to use when plumbing the network connections #: for this node. Introduced in API microversion 1.20. network_interface = resource.Body("network_interface") #: Power interface to use for power actions on the node. #: Introduced in API microversion 1.31. power_interface = resource.Body("power_interface") #: RAID interface to use for configuring RAID on the node. #: Introduced in API microversion 1.31. raid_interface = resource.Body("raid_interface") #: Rescue interface to use for rescuing of the node. #: Introduced in API microversion 1.38. rescue_interface = resource.Body("rescue_interface") #: Storage interface to use when attaching remote storage. #: Introduced in API microversion 1.33. storage_interface = resource.Body("storage_interface") #: Vendor interface to use for vendor-specific actions on the node. #: Introduced in API microversion 1.31. vendor_interface = resource.Body("vendor_interface") def _consume_body_attrs(self, attrs): if 'provision_state' in attrs and attrs['provision_state'] is None: # API version 1.1 uses None instead of "available". Make it # consistent. attrs['provision_state'] = 'available' return super(Node, self)._consume_body_attrs(attrs) def create(self, session, *args, **kwargs): """Create a remote resource based on this instance. The overridden version is capable of handling the populated ``provision_state`` field of one of three values: ``enroll``, ``manageable`` or ``available``. The default is currently ``available``, since it's the only state supported by all API versions. Note that Bare Metal API 1.4 is required for ``manageable`` and 1.11 is required for ``enroll``. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :return: This :class:`Resource` instance. :raises: ValueError if the Node's ``provision_state`` is not one of ``None``, ``enroll``, ``manageable`` or ``available``. :raises: :exc:`~openstack.exceptions.NotSupported` if the ``provision_state`` cannot be reached with any API version supported by the server. """ expected_provision_state = self.provision_state if expected_provision_state is None: expected_provision_state = 'available' if expected_provision_state not in ('enroll', 'manageable', 'available'): raise ValueError( "Node's provision_state must be one of 'enroll', " "'manageable' or 'available' for creation, got %s" % expected_provision_state) session = self._get_session(session) # Verify that the requested provision state is reachable with the API # version we are going to use. try: expected_version = _common.STATE_VERSIONS[expected_provision_state] except KeyError: pass else: self._assert_microversion_for( session, 'create', expected_version, error_message="Cannot create a node with initial provision " "state %s" % expected_provision_state) # Ironic cannot set provision_state itself, so marking it as unchanged self._clean_body_attrs({'provision_state'}) super(Node, self).create(session, *args, **kwargs) if (self.provision_state == 'enroll' and expected_provision_state != 'enroll'): self.set_provision_state(session, 'manage', wait=True) if (self.provision_state == 'manageable' and expected_provision_state == 'available'): self.set_provision_state(session, 'provide', wait=True) if (self.provision_state == 'available' and expected_provision_state == 'manageable'): self.set_provision_state(session, 'manage', wait=True) return self def commit(self, session, *args, **kwargs): """Commit the state of the instance to the remote resource. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :return: This :class:`Node` instance. """ # These fields have to be set through separate API. if ('maintenance_reason' in self._body.dirty or 'maintenance' in self._body.dirty): if not self.is_maintenance and self.maintenance_reason: if 'maintenance' in self._body.dirty: self.maintenance_reason = None else: raise ValueError('Maintenance reason cannot be set when ' 'maintenance is False') if self.is_maintenance: self._do_maintenance_action( session, 'put', {'reason': self.maintenance_reason}) else: # This corresponds to setting maintenance=False and # maintenance_reason=None in the same request. self._do_maintenance_action(session, 'delete') self._clean_body_attrs({'maintenance', 'maintenance_reason'}) if not self.requires_commit: # Other fields are not updated, re-fetch the node to reflect # the new status. return self.fetch(session) return super(Node, self).commit(session, *args, **kwargs) def set_provision_state(self, session, target, config_drive=None, clean_steps=None, rescue_password=None, wait=False, timeout=None): """Run an action modifying this node's provision state. This call is asynchronous, it will return success as soon as the Bare Metal service acknowledges the request. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param target: Provisioning action, e.g. ``active``, ``provide``. See the Bare Metal service documentation for available actions. :param config_drive: Config drive to pass to the node, only valid for ``active` and ``rebuild`` targets. You can use functions from :mod:`openstack.baremetal.configdrive` to build it. :param clean_steps: Clean steps to execute, only valid for ``clean`` target. :param rescue_password: Password for the rescue operation, only valid for ``rescue`` target. :param wait: Whether to wait for the target state to be reached. :param timeout: Timeout (in seconds) to wait for the target state to be reached. If ``None``, wait without timeout. :return: This :class:`Node` instance. :raises: ValueError if ``config_drive``, ``clean_steps`` or ``rescue_password`` are provided with an invalid ``target``. :raises: :class:`~openstack.exceptions.ResourceFailure` if the node reaches an error state while waiting for the state. :raises: :class:`~openstack.exceptions.ResourceTimeout` if timeout is reached while waiting for the state. """ session = self._get_session(session) version = None if target in _common.PROVISIONING_VERSIONS: version = '1.%d' % _common.PROVISIONING_VERSIONS[target] if config_drive: # Some config drive actions require a higher version. if isinstance(config_drive, dict): version = '1.56' elif target == 'rebuild': version = '1.35' version = utils.pick_microversion(session, version) body = {'target': target} if config_drive: if target not in ('active', 'rebuild'): raise ValueError('Config drive can only be provided with ' '"active" and "rebuild" targets') # Not a typo - ironic accepts "configdrive" (without underscore) body['configdrive'] = config_drive if clean_steps is not None: if target != 'clean': raise ValueError('Clean steps can only be provided with ' '"clean" target') body['clean_steps'] = clean_steps if rescue_password is not None: if target != 'rescue': raise ValueError('Rescue password can only be provided with ' '"rescue" target') body['rescue_password'] = rescue_password if wait: try: expected_state = _common.EXPECTED_STATES[target] except KeyError: raise ValueError('For target %s the expected state is not ' 'known, cannot wait for it' % target) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'states', 'provision') response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES) msg = ("Failed to set provision state for bare metal node {node} " "to {target}".format(node=self.id, target=target)) exceptions.raise_from_response(response, error_message=msg) if wait: return self.wait_for_provision_state(session, expected_state, timeout=timeout) else: return self.fetch(session) def wait_for_provision_state(self, session, expected_state, timeout=None, abort_on_failed_state=True): """Wait for the node to reach the expected state. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param expected_state: The expected provisioning state to reach. :param timeout: If ``wait`` is set to ``True``, specifies how much (in seconds) to wait for the expected state to be reached. The value of ``None`` (the default) means no client-side timeout. :param abort_on_failed_state: If ``True`` (the default), abort waiting if the node reaches a failure state which does not match the expected one. Note that the failure state for ``enroll`` -> ``manageable`` transition is ``enroll`` again. :return: This :class:`Node` instance. :raises: :class:`~openstack.exceptions.ResourceFailure` if the node reaches an error state and ``abort_on_failed_state`` is ``True``. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. """ for count in utils.iterate_timeout( timeout, "Timeout waiting for node %(node)s to reach " "target state '%(state)s'" % {'node': self.id, 'state': expected_state}): self.fetch(session) if self._check_state_reached(session, expected_state, abort_on_failed_state): return self session.log.debug( 'Still waiting for node %(node)s to reach state ' '"%(target)s", the current state is "%(state)s"', {'node': self.id, 'target': expected_state, 'state': self.provision_state}) def wait_for_reservation(self, session, timeout=None): """Wait for a lock on the node to be released. Bare metal nodes in ironic have a reservation lock that is used to represent that a conductor has locked the node while performing some sort of action, such as changing configuration as a result of a machine state change. This lock can occur during power syncronization, and prevents updates to objects attached to the node, such as ports. Note that nothing prevents a conductor from acquiring the lock again after this call returns, so it should be treated as best effort. Returns immediately if there is no reservation on the node. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param timeout: How much (in seconds) to wait for the lock to be released. The value of ``None`` (the default) means no timeout. :return: This :class:`Node` instance. """ if self.reservation is None: return self for count in utils.iterate_timeout( timeout, "Timeout waiting for the lock to be released on node %s" % self.id): self.fetch(session) if self.reservation is None: return self session.log.debug( 'Still waiting for the lock to be released on node ' '%(node)s, currently locked by conductor %(host)s', {'node': self.id, 'host': self.reservation}) def _check_state_reached(self, session, expected_state, abort_on_failed_state=True): """Wait for the node to reach the expected state. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param expected_state: The expected provisioning state to reach. :param abort_on_failed_state: If ``True`` (the default), abort waiting if the node reaches a failure state which does not match the expected one. Note that the failure state for ``enroll`` -> ``manageable`` transition is ``enroll`` again. :return: ``True`` if the target state is reached :raises: :class:`~openstack.exceptions.ResourceFailure` if the node reaches an error state and ``abort_on_failed_state`` is ``True``. """ # NOTE(dtantsur): microversion 1.2 changed None to available if (self.provision_state == expected_state or (expected_state == 'available' and self.provision_state is None)): return True elif not abort_on_failed_state: return False if (self.provision_state.endswith(' failed') or self.provision_state == 'error'): raise exceptions.ResourceFailure( "Node %(node)s reached failure state \"%(state)s\"; " "the last error is %(error)s" % {'node': self.id, 'state': self.provision_state, 'error': self.last_error}) # Special case: a failure state for "manage" transition can be # "enroll" elif (expected_state == 'manageable' and self.provision_state == 'enroll' and self.last_error): raise exceptions.ResourceFailure( "Node %(node)s could not reach state manageable: " "failed to verify management credentials; " "the last error is %(error)s" % {'node': self.id, 'error': self.last_error}) # TODO(dtantsur): waiting for power state def set_power_state(self, session, target): """Run an action modifying this node's power state. This call is asynchronous, it will return success as soon as the Bare Metal service acknowledges the request. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param target: Target power state, e.g. "rebooting", "power on". See the Bare Metal service documentation for available actions. """ session = self._get_session(session) if target.startswith("soft "): version = '1.27' else: version = None version = utils.pick_microversion(session, version) # TODO(dtantsur): server timeout support body = {'target': target} request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'states', 'power') response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES) msg = ("Failed to set power state for bare metal node {node} " "to {target}".format(node=self.id, target=target)) exceptions.raise_from_response(response, error_message=msg) def attach_vif(self, session, vif_id, retry_on_conflict=True): """Attach a VIF to the node. The exact form of the VIF ID depends on the network interface used by the node. In the most common case it is a Network service port (NOT a Bare Metal port) ID. A VIF can only be attached to one node at a time. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param string vif_id: Backend-specific VIF ID. :param retry_on_conflict: Whether to retry HTTP CONFLICT errors. This can happen when either the VIF is already used on a node or the node is locked. Since the latter happens more often, the default value is True. :return: ``None`` :raises: :exc:`~openstack.exceptions.NotSupported` if the server does not support the VIF API. """ session = self._get_session(session) version = self._assert_microversion_for( session, 'commit', _common.VIF_VERSION, error_message=("Cannot use VIF attachment API")) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'vifs') body = {'id': vif_id} retriable_status_codes = _common.RETRIABLE_STATUS_CODES if not retry_on_conflict: retriable_status_codes = set(retriable_status_codes) - {409} response = session.post( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=retriable_status_codes) msg = ("Failed to attach VIF {vif} to bare metal node {node}" .format(node=self.id, vif=vif_id)) exceptions.raise_from_response(response, error_message=msg) def detach_vif(self, session, vif_id, ignore_missing=True): """Detach a VIF from the node. The exact form of the VIF ID depends on the network interface used by the node. In the most common case it is a Network service port (NOT a Bare Metal port) ID. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param string vif_id: Backend-specific VIF ID. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the VIF does not exist. Otherwise, ``False`` is returned. :return: ``True`` if the VIF was detached, otherwise ``False``. :raises: :exc:`~openstack.exceptions.NotSupported` if the server does not support the VIF API. """ session = self._get_session(session) version = self._assert_microversion_for( session, 'commit', _common.VIF_VERSION, error_message=("Cannot use VIF attachment API")) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'vifs', vif_id) response = session.delete( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES) if ignore_missing and response.status_code == 400: session.log.debug( 'VIF %(vif)s was already removed from node %(node)s', {'vif': vif_id, 'node': self.id}) return False msg = ("Failed to detach VIF {vif} from bare metal node {node}" .format(node=self.id, vif=vif_id)) exceptions.raise_from_response(response, error_message=msg) return True def list_vifs(self, session): """List IDs of VIFs attached to the node. The exact form of the VIF ID depends on the network interface used by the node. In the most common case it is a Network service port (NOT a Bare Metal port) ID. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :return: List of VIF IDs as strings. :raises: :exc:`~openstack.exceptions.NotSupported` if the server does not support the VIF API. """ session = self._get_session(session) version = self._assert_microversion_for( session, 'fetch', _common.VIF_VERSION, error_message=("Cannot use VIF attachment API")) request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'vifs') response = session.get( request.url, headers=request.headers, microversion=version) msg = ("Failed to list VIFs attached to bare metal node {node}" .format(node=self.id)) exceptions.raise_from_response(response, error_message=msg) return [vif['id'] for vif in response.json()['vifs']] def validate(self, session, required=('boot', 'deploy', 'power')): """Validate required information on a node. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param required: List of interfaces that are required to pass validation. The default value is the list of minimum required interfaces for provisioning. :return: dict mapping interface names to :class:`ValidationResult` objects. :raises: :exc:`~openstack.exceptions.ValidationException` if validation fails for a required interface. """ session = self._get_session(session) version = self._get_microversion_for(session, 'fetch') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'validate') response = session.get(request.url, headers=request.headers, microversion=version) msg = ("Failed to validate node {node}".format(node=self.id)) exceptions.raise_from_response(response, error_message=msg) result = response.json() if required: failed = [ '%s (%s)' % (key, value.get('reason', 'no reason')) for key, value in result.items() if key in required and not value.get('result') ] if failed: raise exceptions.ValidationException( 'Validation failed for required interfaces of node {node}:' ' {failures}'.format(node=self.id, failures=', '.join(failed))) return {key: ValidationResult(value.get('result'), value.get('reason')) for key, value in result.items()} def set_maintenance(self, session, reason=None): """Enable maintenance mode on the node. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param reason: Optional reason for maintenance. :return: This :class:`Node` instance. """ self._do_maintenance_action(session, 'put', {'reason': reason}) return self.fetch(session) def unset_maintenance(self, session): """Disable maintenance mode on the node. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :return: This :class:`Node` instance. """ self._do_maintenance_action(session, 'delete') return self.fetch(session) def _do_maintenance_action(self, session, verb, body=None): session = self._get_session(session) version = self._get_microversion_for(session, 'commit') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'maintenance') response = getattr(session, verb)( request.url, json=body, headers=request.headers, microversion=version) msg = ("Failed to change maintenance mode for node {node}" .format(node=self.id)) exceptions.raise_from_response(response, error_message=msg) def set_boot_device(self, session, boot_device, persistent=False): """Set node boot device :param session: The session to use for making this request. :param boot_device: Boot device to assign to the node. :param persistent: If the boot device change is maintained after node reboot :return: The updated :class:`~openstack.baremetal.v1.node.Node` """ session = self._get_session(session) version = self._get_microversion_for(session, 'commit') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'management', 'boot_device') body = {'boot_device': boot_device, 'persistent': persistent} response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES) msg = ("Failed to set boot device for node {node}" .format(node=self.id)) exceptions.raise_from_response(response, error_message=msg) def add_trait(self, session, trait): """Add a trait to a node. :param session: The session to use for making this request. :param trait: The trait to add to the node. :returns: The updated :class:`~openstack.baremetal.v1.node.Node` """ session = self._get_session(session) version = utils.pick_microversion(session, '1.37') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'traits', trait) response = session.put( request.url, json=None, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES) msg = ("Failed to add trait {trait} for node {node}" .format(trait=trait, node=self.id)) exceptions.raise_from_response(response, error_message=msg) self.traits = list(set(self.traits or ()) | {trait}) def remove_trait(self, session, trait, ignore_missing=True): """Remove a trait from a node. :param session: The session to use for making this request. :param trait: The trait to remove from the node. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the trait does not exist. Otherwise, ``False`` is returned. :returns: The updated :class:`~openstack.baremetal.v1.node.Node` """ session = self._get_session(session) version = utils.pick_microversion(session, '1.37') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'traits', trait) response = session.delete( request.url, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES) if ignore_missing or response.status_code == 400: session.log.debug( 'Trait %(trait)s was already removed from node %(node)s', {'trait': trait, 'node': self.id}) return False msg = ("Failed to remove trait {trait} from bare metal node {node}" .format(node=self.id, trait=trait)) exceptions.raise_from_response(response, error_message=msg) self.traits = list(set(self.traits) - {trait}) return True def set_traits(self, session, traits): """Set traits for a node. Removes any existing traits and adds the traits passed in to this method. :param session: The session to use for making this request. :param traits: list of traits to add to the node. :returns: The updated :class:`~openstack.baremetal.v1.node.Node` """ session = self._get_session(session) version = utils.pick_microversion(session, '1.37') request = self._prepare_request(requires_id=True) request.url = utils.urljoin(request.url, 'traits') body = {'traits': traits} response = session.put( request.url, json=body, headers=request.headers, microversion=version, retriable_status_codes=_common.RETRIABLE_STATUS_CODES) msg = ("Failed to set traits for node {node}" .format(node=self.id)) exceptions.raise_from_response(response, error_message=msg) self.traits = traits NodeDetail = Node
openstack/python-openstacksdk
openstack/baremetal/v1/node.py
Python
apache-2.0
38,102
from __future__ import unicode_literals import re from setuptools import find_packages, setup def get_version(filename): content = open(filename).read() metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content)) return metadata['version'] setup( name='Mopidy-Lcdplate', version=get_version('mopidy_lcdplate/__init__.py'), url='https://github.com/gimunu/mopidy-lcdplate', license='Apache License, Version 2.0', author='Umberto De Giovannini', author_email='umberto.degiovannini@gmail.com', description='Modipy extension for Adafruit lcd plate', long_description=open('README.rst').read(), packages=find_packages(exclude=['tests', 'tests.*']), zip_safe=False, include_package_data=True, install_requires=[ 'setuptools', 'Mopidy >= 0.18', 'Pykka >= 1.1', ], test_suite='nose.collector', tests_require=[ 'nose', 'mock >= 1.0', ], entry_points={ 'mopidy.ext': [ 'lcdplate = mopidy_lcdplate:Extension', ], }, classifiers=[ 'Environment :: No Input/Output (Daemon)', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Topic :: Multimedia :: Sound/Audio :: Players', ], )
gimunu/mopidy-lcdplate
setup.py
Python
apache-2.0
1,406
""" Calculate the sum of two integers a and b, but you are not allowed to use the operator + and -. Example: Given a = 1 and b = 2, return 3. """ class Solution(object): def getSum(self, a, b): """ :type a: int :type b: int :rtype: int """ while a != 0 and b != 0: a, b = a^b, (a&b)<<1 if a > 1<<31 or b > 1<<31: a %= 1<<31 b %= 1<<31 return a or b if __name__ == "__main__": a = Solution() print a.getSum(-14, 16)
danielsunzhongyuan/my_leetcode_in_python
sum_of_two_integers_371.py
Python
apache-2.0
536
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys from kmip.core import enums from kmip.demos import utils from kmip.pie import client if __name__ == '__main__': logger = utils.build_console_logger(logging.INFO) # Build and parse arguments parser = utils.build_cli_parser(enums.Operation.CREATE) opts, args = parser.parse_args(sys.argv[1:]) config = opts.config algorithm = opts.algorithm length = opts.length # Exit early if the arguments are not specified if algorithm is None: logger.error('No algorithm provided, exiting early from demo') sys.exit() if length is None: logger.error("No key length provided, exiting early from demo") sys.exit() algorithm = getattr(enums.CryptographicAlgorithm, algorithm, None) # Build the client and connect to the server with client.ProxyKmipClient(config=config) as client: try: uid = client.create(algorithm, length) logger.info("Successfully created symmetric key with ID: " "{0}".format(uid)) except Exception as e: logger.error(e)
viktorTarasov/PyKMIP
kmip/demos/pie/create.py
Python
apache-2.0
1,766
import base64 import binascii import json import re import uuid import warnings import zlib from collections import deque from types import TracebackType from typing import ( # noqa TYPE_CHECKING, Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, Union, cast, ) from urllib.parse import parse_qsl, unquote, urlencode from multidict import CIMultiDict, CIMultiDictProxy, MultiMapping # noqa from .hdrs import ( CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_TRANSFER_ENCODING, CONTENT_TYPE, ) from .helpers import CHAR, TOKEN, parse_mimetype, reify from .http import HeadersParser from .payload import ( JsonPayload, LookupError, Order, Payload, StringPayload, get_payload, payload_type, ) from .streams import StreamReader __all__ = ('MultipartReader', 'MultipartWriter', 'BodyPartReader', 'BadContentDispositionHeader', 'BadContentDispositionParam', 'parse_content_disposition', 'content_disposition_filename') if TYPE_CHECKING: # pragma: no cover from .client_reqrep import ClientResponse # noqa class BadContentDispositionHeader(RuntimeWarning): pass class BadContentDispositionParam(RuntimeWarning): pass def parse_content_disposition(header: Optional[str]) -> Tuple[Optional[str], Dict[str, str]]: def is_token(string: str) -> bool: return bool(string) and TOKEN >= set(string) def is_quoted(string: str) -> bool: return string[0] == string[-1] == '"' def is_rfc5987(string: str) -> bool: return is_token(string) and string.count("'") == 2 def is_extended_param(string: str) -> bool: return string.endswith('*') def is_continuous_param(string: str) -> bool: pos = string.find('*') + 1 if not pos: return False substring = string[pos:-1] if string.endswith('*') else string[pos:] return substring.isdigit() def unescape(text: str, *, chars: str=''.join(map(re.escape, CHAR))) -> str: return re.sub('\\\\([{}])'.format(chars), '\\1', text) if not header: return None, {} disptype, *parts = header.split(';') if not is_token(disptype): warnings.warn(BadContentDispositionHeader(header)) return None, {} params = {} # type: Dict[str, str] while parts: item = parts.pop(0) if '=' not in item: warnings.warn(BadContentDispositionHeader(header)) return None, {} key, value = item.split('=', 1) key = key.lower().strip() value = value.lstrip() if key in params: warnings.warn(BadContentDispositionHeader(header)) return None, {} if not is_token(key): warnings.warn(BadContentDispositionParam(item)) continue elif is_continuous_param(key): if is_quoted(value): value = unescape(value[1:-1]) elif not is_token(value): warnings.warn(BadContentDispositionParam(item)) continue elif is_extended_param(key): if is_rfc5987(value): encoding, _, value = value.split("'", 2) encoding = encoding or 'utf-8' else: warnings.warn(BadContentDispositionParam(item)) continue try: value = unquote(value, encoding, 'strict') except UnicodeDecodeError: # pragma: nocover warnings.warn(BadContentDispositionParam(item)) continue else: failed = True if is_quoted(value): failed = False value = unescape(value[1:-1].lstrip('\\/')) elif is_token(value): failed = False elif parts: # maybe just ; in filename, in any case this is just # one case fix, for proper fix we need to redesign parser _value = '%s;%s' % (value, parts[0]) if is_quoted(_value): parts.pop(0) value = unescape(_value[1:-1].lstrip('\\/')) failed = False if failed: warnings.warn(BadContentDispositionHeader(header)) return None, {} params[key] = value return disptype.lower(), params def content_disposition_filename(params: Mapping[str, str], name: str='filename') -> Optional[str]: name_suf = '%s*' % name if not params: return None elif name_suf in params: return params[name_suf] elif name in params: return params[name] else: parts = [] fnparams = sorted((key, value) for key, value in params.items() if key.startswith(name_suf)) for num, (key, value) in enumerate(fnparams): _, tail = key.split('*', 1) if tail.endswith('*'): tail = tail[:-1] if tail == str(num): parts.append(value) else: break if not parts: return None value = ''.join(parts) if "'" in value: encoding, _, value = value.split("'", 2) encoding = encoding or 'utf-8' return unquote(value, encoding, 'strict') return value class MultipartResponseWrapper: """Wrapper around the MultipartBodyReader. It takes care about underlying connection and close it when it needs in. """ def __init__(self, resp: 'ClientResponse', stream: Any) -> None: # TODO: add strong annotation to stream self.resp = resp self.stream = stream def __aiter__(self) -> 'MultipartResponseWrapper': return self async def __anext__(self) -> Any: part = await self.next() if part is None: raise StopAsyncIteration # NOQA return part def at_eof(self) -> bool: """Returns True when all response data had been read.""" return self.resp.content.at_eof() async def next(self) -> Any: """Emits next multipart reader object.""" item = await self.stream.next() if self.stream.at_eof(): await self.release() return item async def release(self) -> None: """Releases the connection gracefully, reading all the content to the void.""" await self.resp.release() class BodyPartReader: """Multipart reader for single body part.""" chunk_size = 8192 def __init__(self, boundary: bytes, headers: Mapping[str, Optional[str]], content: StreamReader) -> None: self.headers = headers self._boundary = boundary self._content = content self._at_eof = False length = self.headers.get(CONTENT_LENGTH, None) self._length = int(length) if length is not None else None self._read_bytes = 0 # TODO: typeing.Deque is not supported by Python 3.5 self._unread = deque() # type: Any self._prev_chunk = None # type: Optional[bytes] self._content_eof = 0 self._cache = {} # type: Dict[str, Any] def __aiter__(self) -> 'BodyPartReader': return self async def __anext__(self) -> Any: part = await self.next() if part is None: raise StopAsyncIteration # NOQA return part async def next(self) -> Any: item = await self.read() if not item: return None return item async def read(self, *, decode: bool=False) -> Any: """Reads body part data. decode: Decodes data following by encoding method from Content-Encoding header. If it missed data remains untouched """ if self._at_eof: return b'' data = bytearray() while not self._at_eof: data.extend((await self.read_chunk(self.chunk_size))) if decode: return self.decode(data) return data async def read_chunk(self, size: int=chunk_size) -> bytes: """Reads body part content chunk of the specified size. size: chunk size """ if self._at_eof: return b'' if self._length: chunk = await self._read_chunk_from_length(size) else: chunk = await self._read_chunk_from_stream(size) self._read_bytes += len(chunk) if self._read_bytes == self._length: self._at_eof = True if self._at_eof: clrf = await self._content.readline() assert b'\r\n' == clrf, \ 'reader did not read all the data or it is malformed' return chunk async def _read_chunk_from_length(self, size: int) -> bytes: # Reads body part content chunk of the specified size. # The body part must has Content-Length header with proper value. assert self._length is not None, \ 'Content-Length required for chunked read' chunk_size = min(size, self._length - self._read_bytes) chunk = await self._content.read(chunk_size) return chunk async def _read_chunk_from_stream(self, size: int) -> bytes: # Reads content chunk of body part with unknown length. # The Content-Length header for body part is not necessary. assert size >= len(self._boundary) + 2, \ 'Chunk size must be greater or equal than boundary length + 2' first_chunk = self._prev_chunk is None if first_chunk: self._prev_chunk = await self._content.read(size) chunk = await self._content.read(size) self._content_eof += int(self._content.at_eof()) assert self._content_eof < 3, "Reading after EOF" assert self._prev_chunk is not None window = self._prev_chunk + chunk sub = b'\r\n' + self._boundary if first_chunk: idx = window.find(sub) else: idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub))) if idx >= 0: # pushing boundary back to content with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) self._content.unread_data(window[idx:]) if size > idx: self._prev_chunk = self._prev_chunk[:idx] chunk = window[len(self._prev_chunk):idx] if not chunk: self._at_eof = True result = self._prev_chunk self._prev_chunk = chunk return result async def readline(self) -> bytes: """Reads body part by line by line.""" if self._at_eof: return b'' if self._unread: line = self._unread.popleft() else: line = await self._content.readline() if line.startswith(self._boundary): # the very last boundary may not come with \r\n, # so set single rules for everyone sline = line.rstrip(b'\r\n') boundary = self._boundary last_boundary = self._boundary + b'--' # ensure that we read exactly the boundary, not something alike if sline == boundary or sline == last_boundary: self._at_eof = True self._unread.append(line) return b'' else: next_line = await self._content.readline() if next_line.startswith(self._boundary): line = line[:-2] # strip CRLF but only once self._unread.append(next_line) return line async def release(self) -> None: """Like read(), but reads all the data to the void.""" if self._at_eof: return while not self._at_eof: await self.read_chunk(self.chunk_size) async def text(self, *, encoding: Optional[str]=None) -> str: """Like read(), but assumes that body part contains text data.""" data = await self.read(decode=True) # see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA # and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA encoding = encoding or self.get_charset(default='utf-8') return data.decode(encoding) async def json(self, *, encoding: Optional[str]=None) -> Any: """Like read(), but assumes that body parts contains JSON data.""" data = await self.read(decode=True) if not data: return None encoding = encoding or self.get_charset(default='utf-8') return json.loads(data.decode(encoding)) async def form(self, *, encoding: Optional[str]=None) -> List[Tuple[str, str]]: """Like read(), but assumes that body parts contains form urlencoded data. """ data = await self.read(decode=True) if not data: return [] if encoding is not None: real_encoding = encoding else: real_encoding = self.get_charset(default='utf-8') return parse_qsl(data.rstrip().decode(real_encoding), keep_blank_values=True, encoding=real_encoding) def at_eof(self) -> bool: """Returns True if the boundary was reached or False otherwise.""" return self._at_eof def decode(self, data: bytes) -> bytes: """Decodes data according the specified Content-Encoding or Content-Transfer-Encoding headers value. """ if CONTENT_TRANSFER_ENCODING in self.headers: data = self._decode_content_transfer(data) if CONTENT_ENCODING in self.headers: return self._decode_content(data) return data def _decode_content(self, data: bytes) -> bytes: encoding = cast(str, self.headers[CONTENT_ENCODING]).lower() if encoding == 'deflate': return zlib.decompress(data, -zlib.MAX_WBITS) elif encoding == 'gzip': return zlib.decompress(data, 16 + zlib.MAX_WBITS) elif encoding == 'identity': return data else: raise RuntimeError('unknown content encoding: {}'.format(encoding)) def _decode_content_transfer(self, data: bytes) -> bytes: encoding = cast(str, self.headers[CONTENT_TRANSFER_ENCODING]).lower() if encoding == 'base64': return base64.b64decode(data) elif encoding == 'quoted-printable': return binascii.a2b_qp(data) elif encoding in ('binary', '8bit', '7bit'): return data else: raise RuntimeError('unknown content transfer encoding: {}' ''.format(encoding)) def get_charset(self, default: str) -> str: """Returns charset parameter from Content-Type header or default.""" ctype = self.headers.get(CONTENT_TYPE, '') mimetype = parse_mimetype(ctype) return mimetype.parameters.get('charset', default) @reify def name(self) -> Optional[str]: """Returns name specified in Content-Disposition header or None if missed or header is malformed. """ _, params = parse_content_disposition( self.headers.get(CONTENT_DISPOSITION)) return content_disposition_filename(params, 'name') @reify def filename(self) -> Optional[str]: """Returns filename specified in Content-Disposition header or None if missed or header is malformed. """ _, params = parse_content_disposition( self.headers.get(CONTENT_DISPOSITION)) return content_disposition_filename(params, 'filename') @payload_type(BodyPartReader, order=Order.try_first) class BodyPartReaderPayload(Payload): def __init__(self, value: BodyPartReader, *args: Any, **kwargs: Any) -> None: super().__init__(value, *args, **kwargs) params = {} # type: Dict[str, str] if value.name is not None: params['name'] = value.name if value.filename is not None: params['filename'] = value.filename if params: self.set_content_disposition('attachment', True, **params) async def write(self, writer: Any) -> None: field = self._value chunk = await field.read_chunk(size=2**16) while chunk: await writer.write(field.decode(chunk)) chunk = await field.read_chunk(size=2**16) class MultipartReader: """Multipart body reader.""" #: Response wrapper, used when multipart readers constructs from response. response_wrapper_cls = MultipartResponseWrapper #: Multipart reader class, used to handle multipart/* body parts. #: None points to type(self) multipart_reader_cls = None #: Body part reader class for non multipart/* content types. part_reader_cls = BodyPartReader def __init__(self, headers: Mapping[str, str], content: StreamReader) -> None: self.headers = headers self._boundary = ('--' + self._get_boundary()).encode() self._content = content self._last_part = None self._at_eof = False self._at_bof = True self._unread = [] # type: List[bytes] def __aiter__(self) -> 'MultipartReader': return self async def __anext__(self) -> Any: part = await self.next() if part is None: raise StopAsyncIteration # NOQA return part @classmethod def from_response(cls, response: 'ClientResponse') -> Any: """Constructs reader instance from HTTP response. :param response: :class:`~aiohttp.client.ClientResponse` instance """ obj = cls.response_wrapper_cls(response, cls(response.headers, response.content)) return obj def at_eof(self) -> bool: """Returns True if the final boundary was reached or False otherwise. """ return self._at_eof async def next(self) -> Any: """Emits the next multipart body part.""" # So, if we're at BOF, we need to skip till the boundary. if self._at_eof: return await self._maybe_release_last_part() if self._at_bof: await self._read_until_first_boundary() self._at_bof = False else: await self._read_boundary() if self._at_eof: # we just read the last boundary, nothing to do there return self._last_part = await self.fetch_next_part() return self._last_part async def release(self) -> None: """Reads all the body parts to the void till the final boundary.""" while not self._at_eof: item = await self.next() if item is None: break await item.release() async def fetch_next_part(self) -> Any: """Returns the next body part reader.""" headers = await self._read_headers() return self._get_part_reader(headers) def _get_part_reader(self, headers: 'CIMultiDictProxy[str]') -> Any: """Dispatches the response by the `Content-Type` header, returning suitable reader instance. :param dict headers: Response headers """ ctype = headers.get(CONTENT_TYPE, '') mimetype = parse_mimetype(ctype) if mimetype.type == 'multipart': if self.multipart_reader_cls is None: return type(self)(headers, self._content) return self.multipart_reader_cls(headers, self._content) else: return self.part_reader_cls(self._boundary, headers, self._content) def _get_boundary(self) -> str: mimetype = parse_mimetype(self.headers[CONTENT_TYPE]) assert mimetype.type == 'multipart', ( 'multipart/* content type expected' ) if 'boundary' not in mimetype.parameters: raise ValueError('boundary missed for Content-Type: %s' % self.headers[CONTENT_TYPE]) boundary = mimetype.parameters['boundary'] if len(boundary) > 70: raise ValueError('boundary %r is too long (70 chars max)' % boundary) return boundary async def _readline(self) -> bytes: if self._unread: return self._unread.pop() return await self._content.readline() async def _read_until_first_boundary(self) -> None: while True: chunk = await self._readline() if chunk == b'': raise ValueError("Could not find starting boundary %r" % (self._boundary)) chunk = chunk.rstrip() if chunk == self._boundary: return elif chunk == self._boundary + b'--': self._at_eof = True return async def _read_boundary(self) -> None: chunk = (await self._readline()).rstrip() if chunk == self._boundary: pass elif chunk == self._boundary + b'--': self._at_eof = True epilogue = await self._readline() next_line = await self._readline() # the epilogue is expected and then either the end of input or the # parent multipart boundary, if the parent boundary is found then # it should be marked as unread and handed to the parent for # processing if next_line[:2] == b'--': self._unread.append(next_line) # otherwise the request is likely missing an epilogue and both # lines should be passed to the parent for processing # (this handles the old behavior gracefully) else: self._unread.extend([next_line, epilogue]) else: raise ValueError('Invalid boundary %r, expected %r' % (chunk, self._boundary)) async def _read_headers(self) -> 'CIMultiDictProxy[str]': lines = [b''] while True: chunk = await self._content.readline() chunk = chunk.strip() lines.append(chunk) if not chunk: break parser = HeadersParser() headers, raw_headers = parser.parse_headers(lines) return headers async def _maybe_release_last_part(self) -> None: """Ensures that the last read body part is read completely.""" if self._last_part is not None: if not self._last_part.at_eof(): await self._last_part.release() self._unread.extend(self._last_part._unread) self._last_part = None _Part = Tuple[Payload, str, str] class MultipartWriter(Payload): """Multipart body writer.""" def __init__(self, subtype: str='mixed', boundary: Optional[str]=None) -> None: boundary = boundary if boundary is not None else uuid.uuid4().hex # The underlying Payload API demands a str (utf-8), not bytes, # so we need to ensure we don't lose anything during conversion. # As a result, require the boundary to be ASCII only. # In both situations. try: self._boundary = boundary.encode('ascii') except UnicodeEncodeError: raise ValueError('boundary should contain ASCII only chars') \ from None ctype = ('multipart/{}; boundary={}' .format(subtype, self._boundary_value)) super().__init__(None, content_type=ctype) self._parts = [] # type: List[_Part] # noqa self._headers = CIMultiDict() # type: CIMultiDict[str] assert self.content_type is not None self._headers[CONTENT_TYPE] = self.content_type def __enter__(self) -> 'MultipartWriter': return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: pass def __iter__(self) -> Iterator[_Part]: return iter(self._parts) def __len__(self) -> int: return len(self._parts) _valid_tchar_regex = re.compile(br"\A[!#$%&'*+\-.^_`|~\w]+\Z") _invalid_qdtext_char_regex = re.compile(br"[\x00-\x08\x0A-\x1F\x7F]") @property def _boundary_value(self) -> str: """Wrap boundary parameter value in quotes, if necessary. Reads self.boundary and returns a unicode sting. """ # Refer to RFCs 7231, 7230, 5234. # # parameter = token "=" ( token / quoted-string ) # token = 1*tchar # quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE # qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text # obs-text = %x80-FF # quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" # / DIGIT / ALPHA # ; any VCHAR, except delimiters # VCHAR = %x21-7E value = self._boundary if re.match(self._valid_tchar_regex, value): return value.decode('ascii') # cannot fail if re.search(self._invalid_qdtext_char_regex, value): raise ValueError("boundary value contains invalid characters") # escape %x5C and %x22 quoted_value_content = value.replace(b'\\', b'\\\\') quoted_value_content = quoted_value_content.replace(b'"', b'\\"') return '"' + quoted_value_content.decode('ascii') + '"' @property def boundary(self) -> str: return self._boundary.decode('ascii') def append( self, obj: Any, headers: Optional['MultiMapping[str]']=None ) -> Payload: if headers is None: headers = CIMultiDict() if isinstance(obj, Payload): if obj.headers is not None: obj.headers.update(headers) else: if isinstance(headers, CIMultiDict): obj._headers = headers else: obj._headers = CIMultiDict(headers) return self.append_payload(obj) else: try: return self.append_payload(get_payload(obj, headers=headers)) except LookupError: raise TypeError def append_payload(self, payload: Payload) -> Payload: """Adds a new body part to multipart writer.""" # content-type assert payload.headers is not None if CONTENT_TYPE not in payload.headers: assert payload.content_type is not None payload.headers[CONTENT_TYPE] = payload.content_type # compression encoding = payload.headers.get(CONTENT_ENCODING, '').lower() # type: Optional[str] # noqa if encoding and encoding not in ('deflate', 'gzip', 'identity'): raise RuntimeError('unknown content encoding: {}'.format(encoding)) if encoding == 'identity': encoding = None # te encoding te_encoding = payload.headers.get( CONTENT_TRANSFER_ENCODING, '').lower() # type: Optional[str] # noqa if te_encoding not in ('', 'base64', 'quoted-printable', 'binary'): raise RuntimeError('unknown content transfer encoding: {}' ''.format(te_encoding)) if te_encoding == 'binary': te_encoding = None # size size = payload.size if size is not None and not (encoding or te_encoding): payload.headers[CONTENT_LENGTH] = str(size) self._parts.append((payload, encoding, te_encoding)) # type: ignore return payload def append_json( self, obj: Any, headers: Optional['MultiMapping[str]']=None ) -> Payload: """Helper to append JSON part.""" if headers is None: headers = CIMultiDict() return self.append_payload(JsonPayload(obj, headers=headers)) def append_form( self, obj: Union[Sequence[Tuple[str, str]], Mapping[str, str]], headers: Optional['MultiMapping[str]']=None ) -> Payload: """Helper to append form urlencoded part.""" assert isinstance(obj, (Sequence, Mapping)) if headers is None: headers = CIMultiDict() if isinstance(obj, Mapping): obj = list(obj.items()) data = urlencode(obj, doseq=True) return self.append_payload( StringPayload(data, headers=headers, content_type='application/x-www-form-urlencoded')) @property def size(self) -> Optional[int]: """Size of the payload.""" if not self._parts: return 0 total = 0 for part, encoding, te_encoding in self._parts: if encoding or te_encoding or part.size is None: return None total += int( 2 + len(self._boundary) + 2 + # b'--'+self._boundary+b'\r\n' part.size + len(part._binary_headers) + 2 # b'\r\n' ) total += 2 + len(self._boundary) + 4 # b'--'+self._boundary+b'--\r\n' return total async def write(self, writer: Any, close_boundary: bool=True) -> None: """Write body.""" if not self._parts: return for part, encoding, te_encoding in self._parts: await writer.write(b'--' + self._boundary + b'\r\n') await writer.write(part._binary_headers) if encoding or te_encoding: w = MultipartPayloadWriter(writer) if encoding: w.enable_compression(encoding) if te_encoding: w.enable_encoding(te_encoding) await part.write(w) # type: ignore await w.write_eof() else: await part.write(writer) await writer.write(b'\r\n') if close_boundary: await writer.write(b'--' + self._boundary + b'--\r\n') class MultipartPayloadWriter: def __init__(self, writer: Any) -> None: self._writer = writer self._encoding = None # type: Optional[str] self._compress = None # type: Any self._encoding_buffer = None # type: Optional[bytearray] def enable_encoding(self, encoding: str) -> None: if encoding == 'base64': self._encoding = encoding self._encoding_buffer = bytearray() elif encoding == 'quoted-printable': self._encoding = 'quoted-printable' def enable_compression(self, encoding: str='deflate') -> None: zlib_mode = (16 + zlib.MAX_WBITS if encoding == 'gzip' else -zlib.MAX_WBITS) self._compress = zlib.compressobj(wbits=zlib_mode) async def write_eof(self) -> None: if self._compress is not None: chunk = self._compress.flush() if chunk: self._compress = None await self.write(chunk) if self._encoding == 'base64': if self._encoding_buffer: await self._writer.write(base64.b64encode( self._encoding_buffer)) async def write(self, chunk: bytes) -> None: if self._compress is not None: if chunk: chunk = self._compress.compress(chunk) if not chunk: return if self._encoding == 'base64': buf = self._encoding_buffer assert buf is not None buf.extend(chunk) if buf: div, mod = divmod(len(buf), 3) enc_chunk, self._encoding_buffer = ( buf[:div * 3], buf[div * 3:]) if enc_chunk: b64chunk = base64.b64encode(enc_chunk) await self._writer.write(b64chunk) elif self._encoding == 'quoted-printable': await self._writer.write(binascii.b2a_qp(chunk)) else: await self._writer.write(chunk)
arthurdarcet/aiohttp
aiohttp/multipart.py
Python
apache-2.0
32,819
__author__ = 'bett' import MySQLdb as db import pandas.io.sql as psql from config import db_config def getData(symbols,start,end): database = db.connect(**db_config) data=psql.frame_query("SELECT * FROM tbl_historical where start", database) return data; if(__name__=='__main__'): getData('000009.sz','2013-1-1','2015-4-8')
dingmingliu/quanttrade
quanttrade/core/data.py
Python
apache-2.0
350
import SocketServer from abc import ABCMeta, abstractmethod import json import requests import six from .. import LOG as _LOG from ..signal.signal import DEFAULT_ORCHESTRATOR_URL from ..signal.event import LogEvent LOG = _LOG.getChild(__name__) @six.add_metaclass(ABCMeta) class SyslogInspectorBase(object): def __init__( self, udp_port=10514, orchestrator_rest_url=DEFAULT_ORCHESTRATOR_URL, entity_id='_earthquake_syslog_inspector'): LOG.info('Syslog UDP port: %d', udp_port) LOG.info('Orchestrator REST URL: %s', orchestrator_rest_url) self.orchestrator_rest_url = orchestrator_rest_url LOG.info('Inspector System Entity ID: %s', entity_id) self.entity_id = entity_id that = self class SyslogUDPHandler(SocketServer.BaseRequestHandler): def handle(self): data = bytes.decode(self.request[0].strip(), 'utf-8') that.on_syslog_recv( self.client_address[0], self.client_address[1], data) self.syslog_server = SocketServer.UDPServer( ('0.0.0.0', udp_port), SyslogUDPHandler) def start(self): self.syslog_server.serve_forever() def on_syslog_recv(self, ip, port, data): LOG.info('SYSLOG from %s:%d: "%s"', ip, port, data) event = self.map_syslog_to_event(ip, port, data) assert event is None or isinstance(event, LogEvent) if event: try: self.send_event_to_orchestrator(event) except Exception as e: LOG.error('cannot send event: %s', event, exc_info=True) def send_event_to_orchestrator(self, event): event_jsdict = event.to_jsondict() headers = {'content-type': 'application/json'} post_url = self.orchestrator_rest_url + \ '/events/' + self.entity_id + '/' + event.uuid # LOG.debug('POST %s', post_url) r = requests.post( post_url, data=json.dumps(event_jsdict), headers=headers) @abstractmethod def map_syslog_to_event(self, ip, port, data): """ :param ip: :param port: :param data: :return: None or LogEvent """ pass class BasicSyslogInspector(SyslogInspectorBase): # @Override def map_syslog_to_event(self, ip, port, data): entity = 'entity-%s:%d' % (ip, port) event = LogEvent.from_message(entity, data) return event if __name__ == "__main__": insp = BasicSyslogInspector() insp.start()
AkihiroSuda/earthquake
pyearthquake/inspector/syslog.py
Python
apache-2.0
2,555
#!/usr/bin/python class Solution: # @param {string} s # @return {boolean} def isValid(self, s): slist=' '.join(s).split(' ') print slist stack=[] for item in slist: if item in ('[','{','('): stack.append(item) else: if len(stack)==0: return False elif stack[-1:][0]==self.rev(item): stack = stack[:-1] else: return False if len(stack)==0: return True else: return False def rev(self,item): if item == ']': return '[' elif item == '}': return '{' else: return '(' s=Solution() print s.isValid(']')
aertoria/MiscCode
stack.py
Python
apache-2.0
585
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import sys import unittest from copy import deepcopy from parameterized import parameterized from airflow.contrib.operators.ecs_operator import ECSOperator from airflow.exceptions import AirflowException from tests.compat import mock RESPONSE_WITHOUT_FAILURES = { "failures": [], "tasks": [ { "containers": [ { "containerArn": "arn:aws:ecs:us-east-1:012345678910:container/e1ed7aac-d9b2-4315-8726-d2432bf11868", "lastStatus": "PENDING", "name": "wordpress", "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55" } ], "desiredStatus": "RUNNING", "lastStatus": "PENDING", "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55", "taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11" } ] } class TestECSOperator(unittest.TestCase): @mock.patch('airflow.contrib.operators.ecs_operator.AwsHook') def setUp(self, aws_hook_mock): self.aws_hook_mock = aws_hook_mock self.ecs_operator_args = { 'task_id': 'task', 'task_definition': 't', 'cluster': 'c', 'overrides': {}, 'aws_conn_id': None, 'region_name': 'eu-west-1', 'group': 'group', 'placement_constraints': [{ 'expression': 'attribute:ecs.instance-type =~ t2.*', 'type': 'memberOf' }], 'network_configuration': { 'awsvpcConfiguration': { 'securityGroups': ['sg-123abc'], 'subnets': ['subnet-123456ab'] } } } self.ecs = ECSOperator(**self.ecs_operator_args) def test_init(self): self.assertEqual(self.ecs.region_name, 'eu-west-1') self.assertEqual(self.ecs.task_definition, 't') self.assertEqual(self.ecs.aws_conn_id, None) self.assertEqual(self.ecs.cluster, 'c') self.assertEqual(self.ecs.overrides, {}) self.assertEqual(self.ecs.hook, self.aws_hook_mock.return_value) self.aws_hook_mock.assert_called_once_with(aws_conn_id=None) def test_template_fields_overrides(self): self.assertEqual(self.ecs.template_fields, ('overrides',)) @parameterized.expand([ ['EC2', None], ['FARGATE', None], ['EC2', {'testTagKey': 'testTagValue'}], ]) @mock.patch.object(ECSOperator, '_wait_for_task_ended') @mock.patch.object(ECSOperator, '_check_success_task') @mock.patch('airflow.contrib.operators.ecs_operator.AwsHook') def test_execute_without_failures(self, launch_type, tags, aws_hook_mock, check_mock, wait_mock): client_mock = aws_hook_mock.return_value.get_client_type.return_value client_mock.run_task.return_value = RESPONSE_WITHOUT_FAILURES ecs = ECSOperator(launch_type=launch_type, tags=tags, **self.ecs_operator_args) ecs.execute(None) aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs', region_name='eu-west-1') extend_args = {} if launch_type == 'FARGATE': extend_args['platformVersion'] = 'LATEST' if tags: extend_args['tags'] = [{'key': k, 'value': v} for (k, v) in tags.items()] client_mock.run_task.assert_called_once_with( cluster='c', launchType=launch_type, overrides={}, startedBy=mock.ANY, # Can by 'airflow' or 'Airflow' taskDefinition='t', group='group', placementConstraints=[ { 'expression': 'attribute:ecs.instance-type =~ t2.*', 'type': 'memberOf' } ], networkConfiguration={ 'awsvpcConfiguration': { 'securityGroups': ['sg-123abc'], 'subnets': ['subnet-123456ab'] } }, **extend_args ) wait_mock.assert_called_once_with() check_mock.assert_called_once_with() self.assertEqual(ecs.arn, 'arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55') def test_execute_with_failures(self): client_mock = self.aws_hook_mock.return_value.get_client_type.return_value resp_failures = deepcopy(RESPONSE_WITHOUT_FAILURES) resp_failures['failures'].append('dummy error') client_mock.run_task.return_value = resp_failures with self.assertRaises(AirflowException): self.ecs.execute(None) self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs', region_name='eu-west-1') client_mock.run_task.assert_called_once_with( cluster='c', launchType='EC2', overrides={}, startedBy=mock.ANY, # Can by 'airflow' or 'Airflow' taskDefinition='t', group='group', placementConstraints=[ { 'expression': 'attribute:ecs.instance-type =~ t2.*', 'type': 'memberOf' } ], networkConfiguration={ 'awsvpcConfiguration': { 'securityGroups': ['sg-123abc'], 'subnets': ['subnet-123456ab'], } } ) def test_wait_end_tasks(self): client_mock = mock.Mock() self.ecs.arn = 'arn' self.ecs.client = client_mock self.ecs._wait_for_task_ended() client_mock.get_waiter.assert_called_once_with('tasks_stopped') client_mock.get_waiter.return_value.wait.assert_called_once_with( cluster='c', tasks=['arn']) self.assertEqual( sys.maxsize, client_mock.get_waiter.return_value.config.max_attempts) def test_check_success_tasks_raises(self): client_mock = mock.Mock() self.ecs.arn = 'arn' self.ecs.client = client_mock client_mock.describe_tasks.return_value = { 'tasks': [{ 'containers': [{ 'name': 'foo', 'lastStatus': 'STOPPED', 'exitCode': 1 }] }] } with self.assertRaises(Exception) as e: self.ecs._check_success_task() # Ordering of str(dict) is not guaranteed. self.assertIn("This task is not in success state ", str(e.exception)) self.assertIn("'name': 'foo'", str(e.exception)) self.assertIn("'lastStatus': 'STOPPED'", str(e.exception)) self.assertIn("'exitCode': 1", str(e.exception)) client_mock.describe_tasks.assert_called_once_with( cluster='c', tasks=['arn']) def test_check_success_tasks_raises_pending(self): client_mock = mock.Mock() self.ecs.client = client_mock self.ecs.arn = 'arn' client_mock.describe_tasks.return_value = { 'tasks': [{ 'containers': [{ 'name': 'container-name', 'lastStatus': 'PENDING' }] }] } with self.assertRaises(Exception) as e: self.ecs._check_success_task() # Ordering of str(dict) is not guaranteed. self.assertIn("This task is still pending ", str(e.exception)) self.assertIn("'name': 'container-name'", str(e.exception)) self.assertIn("'lastStatus': 'PENDING'", str(e.exception)) client_mock.describe_tasks.assert_called_once_with( cluster='c', tasks=['arn']) def test_check_success_tasks_raises_multiple(self): client_mock = mock.Mock() self.ecs.client = client_mock self.ecs.arn = 'arn' client_mock.describe_tasks.return_value = { 'tasks': [{ 'containers': [{ 'name': 'foo', 'exitCode': 1 }, { 'name': 'bar', 'lastStatus': 'STOPPED', 'exitCode': 0 }] }] } self.ecs._check_success_task() client_mock.describe_tasks.assert_called_once_with( cluster='c', tasks=['arn']) def test_host_terminated_raises(self): client_mock = mock.Mock() self.ecs.client = client_mock self.ecs.arn = 'arn' client_mock.describe_tasks.return_value = { 'tasks': [{ 'stoppedReason': 'Host EC2 (instance i-1234567890abcdef) terminated.', "containers": [ { "containerArn": "arn:aws:ecs:us-east-1:012345678910:container/e1ed7aac-d9b2-4315-8726-d2432bf11868", # noqa: E501 "lastStatus": "RUNNING", "name": "wordpress", "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55" # noqa: E501 } ], "desiredStatus": "STOPPED", "lastStatus": "STOPPED", "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55", # noqa: E501 "taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11" # noqa: E501 }] } with self.assertRaises(AirflowException) as e: self.ecs._check_success_task() self.assertIn( "The task was stopped because the host instance terminated:", str(e.exception)) self.assertIn("Host EC2 (", str(e.exception)) self.assertIn(") terminated", str(e.exception)) client_mock.describe_tasks.assert_called_once_with( cluster='c', tasks=['arn']) def test_check_success_task_not_raises(self): client_mock = mock.Mock() self.ecs.client = client_mock self.ecs.arn = 'arn' client_mock.describe_tasks.return_value = { 'tasks': [{ 'containers': [{ 'name': 'container-name', 'lastStatus': 'STOPPED', 'exitCode': 0 }] }] } self.ecs._check_success_task() client_mock.describe_tasks.assert_called_once_with( cluster='c', tasks=['arn']) if __name__ == '__main__': unittest.main()
Fokko/incubator-airflow
tests/contrib/operators/test_ecs_operator.py
Python
apache-2.0
11,678
"""A spectral clusterer class to perform clustering.""" import numpy as np from spectralcluster import constraint from spectralcluster import custom_distance_kmeans from spectralcluster import laplacian from spectralcluster import refinement from spectralcluster import utils RefinementName = refinement.RefinementName LaplacianType = laplacian.LaplacianType ConstraintName = constraint.ConstraintName EigenGapType = utils.EigenGapType class SpectralClusterer: """Spectral clustering class.""" def __init__(self, min_clusters=None, max_clusters=None, refinement_options=None, autotune=None, laplacian_type=None, stop_eigenvalue=1e-2, row_wise_renorm=False, custom_dist="cosine", max_iter=300, constraint_options=None, eigengap_type=EigenGapType.Ratio, affinity_function=utils.compute_affinity_matrix, post_eigen_cluster_function=custom_distance_kmeans.run_kmeans): """Constructor of the clusterer. Args: min_clusters: minimal number of clusters allowed (only effective if not None) max_clusters: maximal number of clusters allowed (only effective if not None), can be used together with min_clusters to fix the number of clusters refinement_options: a RefinementOptions object that contains refinement arguments for the affinity matrix. If None, we will not refine autotune: an AutoTune object to automatically search p_percentile laplacian_type: a LaplacianType. If None, we do not use a laplacian matrix stop_eigenvalue: when computing the number of clusters using Eigen Gap, we do not look at eigen values smaller than this value row_wise_renorm: if True, perform row-wise re-normalization on the spectral embeddings custom_dist: str or callable. custom distance measure for k-means. If a string, "cosine", "euclidean", "mahalanobis", or any other distance functions defined in scipy.spatial.distance can be used max_iter: the maximum number of iterations for the custom k-means constraint_options: a ConstraintOptions object that contains constraint arguments eigengap_type: the type of the eigengap computation affinity_function: a function to compute the affinity matrix from the embeddings. This defaults to (cos(x,y)+1)/2 post_eigen_cluster_function: a function to cluster the spectral embeddings after the eigenvalue computations. This function must have the same signature as custom_distance_kmeans.run_kmeans """ self.min_clusters = min_clusters self.max_clusters = max_clusters if not refinement_options: self.refinement_options = refinement.RefinementOptions() else: self.refinement_options = refinement_options self.autotune = autotune self.laplacian_type = laplacian_type self.row_wise_renorm = row_wise_renorm self.stop_eigenvalue = stop_eigenvalue self.custom_dist = custom_dist self.max_iter = max_iter self.constraint_options = constraint_options self.eigengap_type = eigengap_type self.affinity_function = affinity_function self.post_eigen_cluster_function = post_eigen_cluster_function def _compute_eigenvectors_ncluster(self, affinity, constraint_matrix=None): """Perform eigen decomposition and estiamte the number of clusters. Perform affinity refinement, eigen decomposition and sort eigenvectors by the real part of eigenvalues. Estimate the number of clusters using EigenGap principle. Args: affinity: the affinity matrix of input data constraint_matrix: numpy array of shape (n_samples, n_samples). The constraint matrix with prior information Returns: eigenvectors: sorted eigenvectors. numpy array of shape (n_samples, n_samples) n_clusters: number of clusters as an integer max_delta_norm: normalized maximum eigen gap """ # Perform refinement operations on the affinity matrix. for refinement_name in self.refinement_options.refinement_sequence: refinement_operator = self.refinement_options.get_refinement_operator( refinement_name) affinity = refinement_operator.refine(affinity) if (self.constraint_options and not self.constraint_options.apply_before_refinement): # Perform the constraint operation after refinement affinity = self.constraint_options.constraint_operator.adjust_affinity( affinity, constraint_matrix) if not self.laplacian_type or self.laplacian_type == LaplacianType.Affinity: # Perform eigen decomposion. (eigenvalues, eigenvectors) = utils.compute_sorted_eigenvectors(affinity) # Get number of clusters. n_clusters, max_delta_norm = utils.compute_number_of_clusters( eigenvalues, max_clusters=self.max_clusters, stop_eigenvalue=self.stop_eigenvalue, eigengap_type=self.eigengap_type, descend=True) else: # Compute Laplacian matrix laplacian_norm = laplacian.compute_laplacian( affinity, laplacian_type=self.laplacian_type) # Perform eigen decomposion. Eigen values are sorted in an ascending # order (eigenvalues, eigenvectors) = utils.compute_sorted_eigenvectors( laplacian_norm, descend=False) # Get number of clusters. Eigen values are sorted in an ascending order n_clusters, max_delta_norm = utils.compute_number_of_clusters( eigenvalues, max_clusters=self.max_clusters, eigengap_type=self.eigengap_type, descend=False) return eigenvectors, n_clusters, max_delta_norm def predict(self, embeddings, constraint_matrix=None): """Perform spectral clustering on data embeddings. The spectral clustering is performed on an affinity matrix. Args: embeddings: numpy array of shape (n_samples, n_features) constraint_matrix: numpy array of shape (n_samples, n_samples). The constraint matrix with prior information Returns: labels: numpy array of shape (n_samples,) Raises: TypeError: if embeddings has wrong type ValueError: if embeddings has wrong shape """ if not isinstance(embeddings, np.ndarray): raise TypeError("embeddings must be a numpy array") if len(embeddings.shape) != 2: raise ValueError("embeddings must be 2-dimensional") # Compute affinity matrix. affinity = self.affinity_function(embeddings) if (self.constraint_options and self.constraint_options.apply_before_refinement): # Perform the constraint operation before refinement affinity = self.constraint_options.constraint_operator.adjust_affinity( affinity, constraint_matrix) if self.autotune: # Use Auto-tuning method to find a good p_percentile. if (RefinementName.RowWiseThreshold not in self.refinement_options.refinement_sequence): raise ValueError( "AutoTune is only effective when the refinement sequence" "contains RowWiseThreshold") def p_percentile_to_ratio(p_percentile): """Compute the `ratio` given a `p_percentile` value.""" self.refinement_options.p_percentile = p_percentile (eigenvectors, n_clusters, max_delta_norm) = self._compute_eigenvectors_ncluster( affinity, constraint_matrix) ratio = np.sqrt(1 - p_percentile) / max_delta_norm return ratio, eigenvectors, n_clusters eigenvectors, n_clusters, _ = self.autotune.tune(p_percentile_to_ratio) else: # Do not use Auto-tune. eigenvectors, n_clusters, _ = self._compute_eigenvectors_ncluster( affinity, constraint_matrix) if self.min_clusters is not None: n_clusters = max(n_clusters, self.min_clusters) # Get spectral embeddings. spectral_embeddings = eigenvectors[:, :n_clusters] if self.row_wise_renorm: # Perform row wise re-normalization. rows_norm = np.linalg.norm(spectral_embeddings, axis=1, ord=2) spectral_embeddings = spectral_embeddings / np.reshape( rows_norm, (spectral_embeddings.shape[0], 1)) # Run clustering algorithm on spectral embeddings. This defaults # to customized K-means. labels = self.post_eigen_cluster_function( spectral_embeddings=spectral_embeddings, n_clusters=n_clusters, custom_dist=self.custom_dist, max_iter=self.max_iter) return labels
wq2012/SpectralCluster
spectralcluster/spectral_clusterer.py
Python
apache-2.0
8,653
# -*- coding: utf-8 -*- # Copyright 2015 Spanish National Research Council # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import shlex from ooi import exception _MEDIA_TYPE_MAP = collections.OrderedDict([ ('text/plain', 'text'), ('text/occi', 'header') ]) def _quoted_split(s, separator=',', quotes='"'): """Splits a string considering quotes. e.g. _quoted_split('a,"b,c",d') -> ['a', '"b,c"', 'd'] """ splits = [] partial = [] in_quote = None for c in s: if in_quote: if c == in_quote: in_quote = None else: if c in quotes: in_quote = c if not in_quote and c in separator: if partial: splits.append(''.join(partial)) partial = [] else: partial.append(c) if partial: splits.append(''.join(partial)) return splits def _split_unquote(s, separator="="): """Splits a string considering quotes and removing them in the result. e.g. _split_unquote('a="b=d"') -> ['a', 'b=d'] """ lex = shlex.shlex(s, posix=True) lex.commenters = "" lex.whitespace = separator lex.whitespace_split = True return list(lex) class BaseParser(object): def __init__(self, headers, body): self.headers = headers self.body = body def parse(self): raise NotImplemented class TextParser(BaseParser): def parse_categories(self, headers): kind = action = None mixins = collections.Counter() schemes = collections.defaultdict(list) try: categories = headers["Category"] except KeyError: raise exception.OCCIInvalidSchema("No categories") for ctg in _quoted_split(categories): ll = _quoted_split(ctg, "; ") d = {"term": ll[0]} # assumes 1st element => term's value try: d.update(dict([_split_unquote(i) for i in ll[1:]])) except ValueError: raise exception.OCCIInvalidSchema("Unable to parse category") ctg_class = d.get("class", None) ctg_type = '%(scheme)s%(term)s' % d if ctg_class == "kind": if kind is not None: raise exception.OCCIInvalidSchema("Duplicated Kind") kind = ctg_type elif ctg_class == "action": if action is not None: raise exception.OCCIInvalidSchema("Duplicated action") action = ctg_type elif ctg_class == "mixin": mixins[ctg_type] += 1 schemes[d["scheme"]].append(d["term"]) if action and kind: raise exception.OCCIInvalidSchema("Action and kind together?") return { "category": kind or action, "mixins": mixins, "schemes": schemes, } def parse_attributes(self, headers): attrs = {} try: header_attrs = headers["X-OCCI-Attribute"] for attr in _quoted_split(header_attrs): l = _split_unquote(attr) attrs[l[0].strip()] = l[1] except KeyError: pass return attrs def parse_links(self, headers): links = {} try: header_links = headers["Link"] except KeyError: return links for link in _quoted_split(header_links): ll = _quoted_split(link, "; ") # remove the "<" and ">" if ll[0][1] != "<" and ll[0][-1] != ">": raise exception.OCCIInvalidSchema("Unable to parse link") link_dest = ll[0][1:-1] try: d = dict([_split_unquote(i) for i in ll[1:]]) except ValueError: raise exception.OCCIInvalidSchema("Unable to parse link") links[link_dest] = d return links def _convert_to_headers(self): if not self.body: raise exception.OCCIInvalidSchema("No schema found") hdrs = collections.defaultdict(list) for l in self.body.splitlines(): hdr, content = l.split(":", 1) hdrs[hdr].append(content) return {hdr: ','.join(hdrs[hdr]) for hdr in hdrs} def _parse(self, headers): obj = self.parse_categories(headers) obj['attributes'] = self.parse_attributes(headers) obj['links'] = self.parse_links(headers) return obj def parse(self): return self._parse(self._convert_to_headers()) class HeaderParser(TextParser): def parse(self): return self._parse(self.headers) _PARSERS_MAP = { "text": TextParser, "header": HeaderParser, } def get_media_map(): return _MEDIA_TYPE_MAP def get_default_parsers(): return _PARSERS_MAP def get_supported_content_types(): return _MEDIA_TYPE_MAP.keys()
LIP-Computing/occi-net
ooi/wsgi/parsers.py
Python
apache-2.0
5,430
import time def progress(index, size, for_what='当前进度', step=10): block_size = int(size / step) if index % block_size == 0: crt = int(index / block_size) print('%s ==> [%d / %d]' % (for_what, crt, step)) def log_time(): def _log_time(func): # func() def wrapper(*args, **kwargs): print("start") start_time = time.time() result = func() if len(args) == len(kwargs) == 0 else func(*args, **kwargs) end_time = time.time() cost_time = end_time - start_time print("[%s] cost time -> %s" % (func.__name__, cost_time)) return result return wrapper return _log_time def line(log_str, style='-'): print(style * 12 + str(log_str) + style * 12) def block(style="-",w=100,h=5): for _ in range(h): print(style*w)
DannyLee1991/article_cosine_similarity
utils/log.py
Python
apache-2.0
873
from pytz import utc from datetime import datetime from django.db import models from django.conf import settings from django.core.exceptions import ValidationError from django.dispatch import receiver from django.db.models.signals import pre_save, post_save from django_pgjson.fields import JsonBField from simple_history.models import HistoricalRecords from geokey.core.exceptions import InputError from .base import OBSERVATION_STATUS, COMMENT_STATUS, COMMENT_REVIEW from .manager import ObservationManager, CommentManager from django.contrib.gis.db import models as gis from .base import LOCATION_STATUS from .manager import LocationManager from .manager import MediaFileManager from .base import MEDIA_STATUS class Location(models.Model): """ Represents a location to which an arbitrary number of observations can be attached. """ name = models.CharField(max_length=100, blank=True, null=True) description = models.TextField(blank=True, null=True) geometry = gis.GeometryField(geography=True) created_at = models.DateTimeField(auto_now_add=True) creator = models.ForeignKey(settings.AUTH_USER_MODEL) version = models.IntegerField(default=1) private = models.BooleanField(default=False) private_for_project = models.ForeignKey('projects.Project', null=True) status = models.CharField( choices=LOCATION_STATUS, default=LOCATION_STATUS.active, max_length=20 ) objects = LocationManager() class Observation(models.Model): """ Stores a single observation. """ location = models.ForeignKey( Location, related_name='locations' ) project = models.ForeignKey( 'projects.Project', related_name='observations' ) category = models.ForeignKey('categories.Category') status = models.CharField( choices=OBSERVATION_STATUS, default=OBSERVATION_STATUS.active, max_length=20 ) properties = JsonBField(default={}) created_at = models.DateTimeField(auto_now_add=True) creator = models.ForeignKey( settings.AUTH_USER_MODEL, related_name='creator' ) updated_at = models.DateTimeField(null=True, auto_now_add=True) updator = models.ForeignKey( settings.AUTH_USER_MODEL, related_name='updator', null=True ) version = models.IntegerField(default=1) search_matches = models.TextField() display_field = models.TextField(null=True, blank=True) num_media = models.IntegerField(default=0) num_comments = models.IntegerField(default=0) history = HistoricalRecords() objects = ObservationManager() class Meta: ordering = ['-updated_at', 'id'] @classmethod def validate_partial(self, category, data): """ Validates the data against the category field definition. This is a partial validation, which is used to validate drafts, field values that are not provided are not validated. Parameter --------- category : geokey.categories.models.Category Category that the data is validated against data : dict Dictionary of key-value-pairs; incoming data that is validated Raises ------ ValidationError: when data is invalid """ is_valid = True error_messages = [] for field in category.fields.all().filter(status='active'): if field.key in data and data.get(field.key) is not None: try: field.validate_input(data.get(field.key)) except InputError, error: is_valid = False error_messages.append(error) if not is_valid: raise ValidationError(error_messages) @classmethod def validate_full(self, category, data): """ Validates the data against the category field definition. This is a full validation. Parameter --------- category : geokey.categories.models.Category Category that the data is validated against data : dict Dictionary of key-value-pairs; incoming data that is validated Raises ------ ValidationError: when data is invalid """ is_valid = True error_messages = [] for field in category.fields.all().filter(status='active'): try: field.validate_input(data.get(field.key)) except InputError, error: is_valid = False error_messages.append(error) if not is_valid: raise ValidationError(error_messages) @classmethod def create(cls, properties=None, creator=None, location=None, category=None, project=None, status=None): """ Creates and returns a new observation. Validates all fields first and raises a ValidationError if at least one field did not validate. Creates the object if all fields are valid. Parameter --------- properties : dict Attributes of the observation creator : geokey.users.models.User User who creates the observation location : geokey.contributions.models.Location Location of the contribution category : geokey.categories.models.Category Category of the contribution project : geokey.projects.models.Project Project the contribution is assigned to status : str Status of the contribution; one of active, review, pending or draft Return ------ geokey.contributions.models.Observation The observation created """ if not properties: properties = {} location.save() observation = cls.objects.create( location=location, category=category, project=project, properties=properties, creator=creator, status=status ) return observation def update(self, properties, updator, status=None): """ Updates data of the observation Parameter --------- properties : dict Attributes of the observation updator : geokey.users.models.User User who creates the observation status : str Status of the contribution; one of active, review, pending or draft Return ------ geokey.contributions.models.Observation The updated observation """ if status != 'draft': self.version = self.version + 1 if properties: self.properties = properties self.updator = updator self.status = status or self.status self.updated_at = datetime.utcnow().replace(tzinfo=utc) self.save() return self def update_display_field(self): """ Updates the display_field attribute. It uses the display field of the contributions category and adds a string line 'key:value' to the display field property """ display_field = self.category.display_field if display_field is not None: value = None if self.properties: value = self.properties.get(display_field.key) self.display_field = '%s:%s' % (display_field.key, value) def update_count(self): """ Updates the count of media files attached and comments. Should be called each time a file or comment is added/deleted. """ self.num_media = self.files_attached.count() self.num_comments = self.comments.count() self.save() def update_search_matches(self): """ Updates the search_matches property, which is used to filter contributions against a query string. It reads all fields from the category and creates a string like 'key1:value#####key2:value2' """ search_matches = [] for field in self.category.fields.all(): if self.properties and field.key in self.properties.keys(): if field.fieldtype == 'LookupField': l_id = self.properties.get(field.key) if l_id is not None: lookup = field.lookupvalues.get(pk=l_id) search_matches.append('%s:%s' % ( field.key, lookup.name )) elif field.fieldtype == 'MultipleLookupField': values = self.properties.get(field.key) if values is not None: lookups = [] for l_id in values: lookups.append( field.lookupvalues.get(pk=l_id).name ) search_matches.append('%s:%s' % ( field.key, ', '.join(lookups)) ) else: term = self.properties.get(field.key) if term is not None: search_matches.append( '%s:%s' % (field.key, term) ) self.search_matches = '#####'.join(search_matches) def delete(self): """ Deletes the observation by setting it's status to DELETED """ self.status = OBSERVATION_STATUS.deleted self.save() @receiver(pre_save, sender=Observation) def pre_save_observation_update(sender, **kwargs): """ Receiver that is called before an observation is saved. Updates search_matches and display_field properties. """ observation = kwargs.get('instance') observation.update_display_field() observation.update_search_matches() class Comment(models.Model): """ A comment that is added to a contribution. """ text = models.TextField() created_at = models.DateTimeField(auto_now_add=True) creator = models.ForeignKey(settings.AUTH_USER_MODEL) commentto = models.ForeignKey('Observation', related_name='comments') respondsto = models.ForeignKey( 'Comment', null=True, blank=True, related_name='responses' ) status = models.CharField( choices=COMMENT_STATUS, default=COMMENT_STATUS.active, max_length=20 ) review_status = models.CharField( choices=COMMENT_REVIEW, null=True, blank=True, max_length=10 ) objects = CommentManager() class Meta: ordering = ['id'] def delete(self): """ Deletes the comment by setting it's status to DELETED """ self.responses.all().delete() self.status = COMMENT_STATUS.deleted self.save() @receiver(post_save, sender=Comment) def post_save_comment_update(sender, **kwargs): """ Receiver that is called after a comment is saved. Updates num_media and num_comments properties. """ comment = kwargs.get('instance') comment.commentto.update_count() class MediaFile(models.Model): """ Base class for all media files. Not to be instaciate; instaciate one of the child classes instead. """ name = models.CharField(max_length=100) description = models.TextField(null=True, blank=True) contribution = models.ForeignKey( 'contributions.Observation', related_name='files_attached' ) creator = models.ForeignKey(settings.AUTH_USER_MODEL) created_at = models.DateTimeField(auto_now_add=True) status = models.CharField( choices=MEDIA_STATUS, default=MEDIA_STATUS.active, max_length=20 ) objects = MediaFileManager() class Meta: ordering = ['id'] @property def type_name(self): """ Returns the type of media file. To be implemented by child classes. Raises ------ NotImplementedError if called on MediaFile base class """ raise NotImplementedError( 'The property `type_name` has not been implemented for this ' 'subclass of `MediaFile`.' ) def delete(self): """ Deletes a file by setting its status to deleted """ self.status = MEDIA_STATUS.deleted self.save() @receiver(post_save, sender=MediaFile) def post_save_media_file_update(sender, **kwargs): """ Receiver that is called after a media file is saved. Updates num_media and num_comments properties. """ media_file = kwargs.get('instance') media_file.contribution.update_count() class ImageFile(MediaFile): """ Stores images uploaded by users. """ image = models.ImageField(upload_to='user-uploads/images') class Meta: ordering = ['id'] app_label = 'contributions' @property def type_name(self): """ Returns file type name Returns ------- str 'ImageFile' """ return 'ImageFile' class VideoFile(MediaFile): """ Stores images uploaded by users. """ video = models.ImageField(upload_to='user-uploads/videos') youtube_id = models.CharField(max_length=100) thumbnail = models.ImageField(upload_to='user-uploads/videos', null=True) youtube_link = models.URLField(max_length=255, null=True, blank=True) swf_link = models.URLField(max_length=255, null=True, blank=True) class Meta: ordering = ['id'] @property def type_name(self): """ Returns file type name Returns ------- str 'VideoFile' """ return 'VideoFile'
nagyistoce/geokey
geokey/contributions/models.py
Python
apache-2.0
13,888
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from operator import mul import paddle.fluid.core as core import paddle.fluid as fluid from op_test import OpTest from testsuite import create_op def group_norm_naive(x, scale, bias, epsilon, groups, data_layout): if data_layout == "NHWC": x = np.transpose(x, (0, 3, 1, 2)) # NHWC => NCHW N, C, H, W = x.shape G = groups x = x.reshape((N * G, -1)) mean = np.mean(x, axis=1, keepdims=True) var = np.var(x, axis=1, keepdims=True) output = (x - mean) / np.sqrt(var + epsilon) output = output.reshape((N, C, H, W)) * scale.reshape( (-1, 1, 1)) + bias.reshape((-1, 1, 1)) if data_layout == "NHWC": output = np.transpose(output, (0, 2, 3, 1)) # NCHW => NHWC return output, mean.reshape((N, G)), var.reshape((N, G)) class TestGroupNormOp(OpTest): def setUp(self): self.op_type = "group_norm" self.data_format = "NCHW" self.dtype = np.float32 self.shape = (2, 4, 3, 3) self.attrs = {'epsilon': 1e-5, 'groups': 2, 'data_layout': "NCHW"} self.compare_between_place = False self.init_test_case() input = np.random.random(self.shape).astype(self.dtype) if self.data_format == "NHWC": input = np.transpose(input, (0, 2, 3, 1)) scale = np.random.random([self.shape[1]]).astype(self.dtype) bias = np.random.random([self.shape[1]]).astype(self.dtype) output, mean, var = group_norm_naive( input, scale, bias, self.attrs['epsilon'], self.attrs['groups'], self.data_format) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(input), 'Scale': OpTest.np_dtype_to_fluid_dtype(scale), 'Bias': OpTest.np_dtype_to_fluid_dtype(bias) } self.outputs = {'Y': output, 'Mean': mean, 'Variance': var} self.attrs['data_layout'] = self.data_format def test_check_output(self): atol = 1e-4 inplace_atol = 1e-4 place = core.CPUPlace() # add inplace_atol bacause group_norm doesn't ensure computational consistency self.check_output_with_place( place, atol=atol, inplace_atol=inplace_atol) if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) self.check_output_with_place( place, atol=atol, inplace_atol=inplace_atol) def do_compare_between_place(self): if not core.is_compiled_with_cuda(): return place = core.CPUPlace() place2 = core.CUDAPlace(0) self.scope = core.Scope() op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else dict() self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, op_attrs) inputs_to_check = set(['X', 'Scale', 'Bias']) output_names = 'Y' cpu_grads = self._get_gradient(inputs_to_check, place, output_names, None) gpu_grads = self._get_gradient(inputs_to_check, place2, output_names, None) self._assert_is_close(cpu_grads, gpu_grads, inputs_to_check, 0.005, "Gradient Check On %s" % str(place)) def test_check_grad(self): if self.compare_between_place: self.do_compare_between_place() return place = core.CPUPlace() self.check_grad_with_place( place, set(['X', 'Scale', 'Bias']), 'Y', max_relative_error=0.01) if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) self.check_grad_with_place( place, set(['X', 'Scale', 'Bias']), 'Y', max_relative_error=0.01) def init_test_case(self): pass class TestGroupNormOp1(TestGroupNormOp): def init_test_case(self): self.attrs['groups'] = 1 class TestGroupNormOp2(TestGroupNormOp): def init_test_case(self): self.attrs['groups'] = 4 class TestGroupNormOpBigEps1(TestGroupNormOp): def init_test_case(self): self.attrs['groups'] = 1 self.attrs['epsilon'] = 0.5 class TestGroupNormOpBigEps2(TestGroupNormOp): def init_test_case(self): self.attrs['groups'] = 4 self.attrs['epsilon'] = 0.5 class TestGroupNormOpBigEps3(TestGroupNormOp): def init_test_case(self): self.attrs['epsilon'] = 0.5 class TestGroupNormOpLargeData(TestGroupNormOp): def init_test_case(self): self.shape = (2, 32, 64, 64) self.attrs['groups'] = 8 self.compare_between_place = True class TestGroupNormOp1_With_NHWC(TestGroupNormOp): def init_test_case(self): self.attrs['groups'] = 1 self.data_format = "NHWC" class TestGroupNormOp2_With_NHWC(TestGroupNormOp): def init_test_case(self): self.attrs['groups'] = 4 self.data_format = "NHWC" class TestGroupNormOpBigEps1_With_NHWC(TestGroupNormOp): def init_test_case(self): self.attrs['groups'] = 1 self.attrs['epsilon'] = 0.5 self.data_format = "NHWC" class TestGroupNormOpBigEps2_With_NHWC(TestGroupNormOp): def init_test_case(self): self.attrs['groups'] = 4 self.attrs['epsilon'] = 0.5 self.data_format = "NHWC" class TestGroupNormOpBigEps3_With_NHWC(TestGroupNormOp): def init_test_case(self): self.attrs['epsilon'] = 0.5 self.data_format = "NHWC" class TestGroupNormOpLargeData_With_NHWC(TestGroupNormOp): def init_test_case(self): self.shape = (2, 64, 32, 32) # NCHW self.attrs['groups'] = 8 self.data_format = "NHWC" self.compare_between_place = True class TestGroupNormAPI_With_NHWC(OpTest): def test_case1(self): data1 = fluid.data(name='data1', shape=[None, 3, 3, 4], dtype='float32') out1 = fluid.layers.group_norm( input=data1, groups=2, data_layout="NHWC") data2 = fluid.data(name='data2', shape=[None, 4, 3, 3], dtype='float32') out2 = fluid.layers.group_norm( input=data2, groups=2, data_layout="NCHW") data1_np = np.random.random((2, 3, 3, 4)).astype("float32") data2_np = np.random.random((2, 4, 3, 3)).astype("float32") scale = np.array([1]).astype("float32") bias = np.array([0]).astype("float32") place = core.CPUPlace() exe = fluid.Executor(place) results = exe.run(fluid.default_main_program(), feed={"data1": data1_np, "data2": data2_np}, fetch_list=[out1, out2], return_numpy=True) expect_res1 = group_norm_naive( data1_np, scale, bias, epsilon=1e-5, groups=2, data_layout="NHWC") expect_res2 = group_norm_naive( data2_np, scale, bias, epsilon=1e-5, groups=2, data_layout="NCHW") self.assertTrue(np.allclose(results[0], expect_res1[0])) self.assertTrue(np.allclose(results[1], expect_res2[0])) class TestGroupNormException(OpTest): # data_layout is not NHWC or NCHW def test_exception(self): data = fluid.data(name='data', shape=[None, 3, 3, 4], dtype="float32") def attr_data_format(): out = fluid.layers.group_norm( input=data, groups=2, data_layout="NDHW") self.assertRaises(ValueError, attr_data_format) if __name__ == '__main__': unittest.main()
chengduoZH/Paddle
python/paddle/fluid/tests/unittests/test_group_norm_op.py
Python
apache-2.0
8,301
# # Copyright 2017 University of Southern California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.core import setup setup(name='youtubecli', description='library for uploading a video file to YouTube', version='0.1', packages=['youtubecli'], classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
informatics-isi-edu/microscopy
youtube/youtubecli/setup.py
Python
apache-2.0
1,075
from . import series from . import images def _setup(): import logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) formatter = logging.Formatter('[%(name)s] %(levelname)s %(message)s') ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) _setup() __version__ = '1.1.1'
jwittenbach/thunder
thunder/__init__.py
Python
apache-2.0
347
# -*- coding: utf-8 -*- import unittest from copy import deepcopy from openprocurement.api.tests.base import snitch from openprocurement.tender.belowthreshold.adapters import TenderBelowThersholdConfigurator from openprocurement.tender.belowthreshold.tests.base import ( TenderContentWebTest, test_bids, test_lots, test_organization ) from openprocurement.tender.belowthreshold.tests.award_blanks import ( # TenderAwardResourceTest create_tender_award_invalid, create_tender_award, patch_tender_award, patch_tender_award_unsuccessful, get_tender_award, patch_tender_award_Administrator_change, # TenderLotAwardCheckResourceTest check_tender_award, # TenderLotAwardResourceTest create_tender_lot_award, patch_tender_lot_award, patch_tender_lot_award_unsuccessful, # Tender2LotAwardResourceTest create_tender_lots_award, patch_tender_lots_award, # TenderAwardComplaintResourceTest create_tender_award_complaint_invalid, create_tender_award_complaint, patch_tender_award_complaint, review_tender_award_complaint, get_tender_award_complaint, get_tender_award_complaints, # TenderLotAwardComplaintResourceTest create_tender_lot_award_complaint, patch_tender_lot_award_complaint, get_tender_lot_award_complaint, get_tender_lot_award_complaints, # Tender2LotAwardComplaintResourceTest create_tender_lots_award_complaint, patch_tender_lots_award_complaint, # TenderAwardComplaintDocumentResourceTest not_found, create_tender_award_complaint_document, put_tender_award_complaint_document, patch_tender_award_complaint_document, # Tender2LotAwardComplaintDocumentResourceTest create_tender_lots_award_complaint_document, put_tender_lots_award_complaint_document, patch_tender_lots_award_complaint_document, # TenderAwardDocumentResourceTest not_found_award_document, create_tender_award_document, put_tender_award_document, patch_tender_award_document, create_award_document_bot, patch_not_author, # Tender2LotAwardDocumentResourceTest create_tender_lots_award_document, put_tender_lots_award_document, patch_tender_lots_award_document, ) class TenderAwardResourceTestMixin(object): test_create_tender_award_invalid = snitch(create_tender_award_invalid) test_get_tender_award = snitch(get_tender_award) test_patch_tender_award_Administrator_change = snitch(patch_tender_award_Administrator_change) class TenderAwardComplaintResourceTestMixin(object): test_create_tender_award_complaint_invalid = snitch(create_tender_award_complaint_invalid) test_get_tender_award_complaint = snitch(get_tender_award_complaint) test_get_tender_award_complaints = snitch(get_tender_award_complaints) class TenderAwardDocumentResourceTestMixin(object): test_not_found_award_document = snitch(not_found_award_document) test_create_tender_award_document = snitch(create_tender_award_document) test_put_tender_award_document = snitch(put_tender_award_document) test_patch_tender_award_document = snitch(patch_tender_award_document) test_create_award_document_bot = snitch(create_award_document_bot) test_patch_not_author = snitch(patch_not_author) class TenderAwardComplaintDocumentResourceTestMixin(object): test_not_found = snitch(not_found) test_create_tender_award_complaint_document = snitch(create_tender_award_complaint_document) test_put_tender_award_complaint_document = snitch(put_tender_award_complaint_document) class TenderLotAwardCheckResourceTestMixin(object): test_check_tender_award = snitch(check_tender_award) class Tender2LotAwardDocumentResourceTestMixin(object): test_create_tender_lots_award_document = snitch(create_tender_lots_award_document) test_put_tender_lots_award_document = snitch(put_tender_lots_award_document) test_patch_tender_lots_award_document = snitch(patch_tender_lots_award_document) class TenderAwardResourceTest(TenderContentWebTest, TenderAwardResourceTestMixin): initial_status = 'active.qualification' initial_bids = test_bids test_create_tender_award = snitch(create_tender_award) test_patch_tender_award = snitch(patch_tender_award) test_patch_tender_award_unsuccessful = snitch(patch_tender_award_unsuccessful) class TenderLotAwardCheckResourceTest(TenderContentWebTest, TenderLotAwardCheckResourceTestMixin): initial_status = 'active.auction' initial_lots = test_lots initial_bids = deepcopy(test_bids) initial_bids.append(deepcopy(test_bids[0])) initial_bids[1]['tenderers'][0]['name'] = u'Не зовсім Державне управління справами' initial_bids[1]['tenderers'][0]['identifier']['id'] = u'88837256' initial_bids[2]['tenderers'][0]['name'] = u'Точно не Державне управління справами' initial_bids[2]['tenderers'][0]['identifier']['id'] = u'44437256' reverse = TenderBelowThersholdConfigurator.reverse_awarding_criteria awarding_key = TenderBelowThersholdConfigurator.awarding_criteria_key def setUp(self): super(TenderLotAwardCheckResourceTest, self).setUp() self.app.authorization = ('Basic', ('auction', '')) response = self.app.get('/tenders/{}/auction'.format(self.tender_id)) auction_bids_data = response.json['data']['bids'] for lot_id in self.initial_lots: response = self.app.post_json('/tenders/{}/auction/{}'.format(self.tender_id, lot_id['id']), {'data': {'bids': auction_bids_data}}) self.assertEqual(response.status, "200 OK") self.assertEqual(response.content_type, 'application/json') response = self.app.get('/tenders/{}'.format(self.tender_id)) self.assertEqual(response.json['data']['status'], "active.qualification") class TenderLotAwardResourceTest(TenderContentWebTest): initial_status = 'active.qualification' initial_lots = test_lots initial_bids = test_bids test_create_tender_lot_award = snitch(create_tender_lot_award) test_patch_tender_lot_award = snitch(patch_tender_lot_award) test_patch_tender_lot_award_unsuccessful = snitch(patch_tender_lot_award_unsuccessful) class Tender2LotAwardResourceTest(TenderContentWebTest): initial_status = 'active.qualification' initial_lots = 2 * test_lots initial_bids = test_bids test_create_tender_lots_award = snitch(create_tender_lots_award) test_patch_tender_lots_award = snitch(patch_tender_lots_award) class TenderAwardComplaintResourceTest(TenderContentWebTest, TenderAwardComplaintResourceTestMixin): initial_status = 'active.qualification' initial_bids = test_bids def setUp(self): super(TenderAwardComplaintResourceTest, self).setUp() # Create award auth = self.app.authorization self.app.authorization = ('Basic', ('token', '')) response = self.app.post_json('/tenders/{}/awards'.format( self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': self.initial_bids[0]['id']}}) award = response.json['data'] self.award_id = award['id'] self.app.authorization = auth test_create_tender_award_complaint = snitch(create_tender_award_complaint) test_patch_tender_award_complaint = snitch(patch_tender_award_complaint) test_review_tender_award_complaint = snitch(review_tender_award_complaint) class TenderLotAwardComplaintResourceTest(TenderContentWebTest): initial_status = 'active.qualification' initial_lots = test_lots initial_bids = test_bids def setUp(self): super(TenderLotAwardComplaintResourceTest, self).setUp() # Create award auth = self.app.authorization self.app.authorization = ('Basic', ('token', '')) bid = self.initial_bids[0] response = self.app.post_json('/tenders/{}/awards'.format( self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}}) award = response.json['data'] self.award_id = award['id'] self.app.authorization = auth test_create_tender_lot_award_complaint = snitch(create_tender_lot_award_complaint) test_patch_tender_lot_award_complaint = snitch(patch_tender_lot_award_complaint) test_get_tender_lot_award_complaint = snitch(get_tender_lot_award_complaint) test_get_tender_lot_award_complaints = snitch(get_tender_lot_award_complaints) class Tender2LotAwardComplaintResourceTest(TenderLotAwardComplaintResourceTest): initial_lots = 2 * test_lots test_create_tender_lots_award_complaint = snitch(create_tender_lots_award_complaint) test_patch_tender_lots_award_complaint = snitch(patch_tender_lots_award_complaint) class TenderAwardComplaintDocumentResourceTest(TenderContentWebTest, TenderAwardComplaintDocumentResourceTestMixin): initial_status = 'active.qualification' initial_bids = test_bids def setUp(self): super(TenderAwardComplaintDocumentResourceTest, self).setUp() # Create award auth = self.app.authorization self.app.authorization = ('Basic', ('token', '')) response = self.app.post_json('/tenders/{}/awards'.format( self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': self.initial_bids[0]['id']}}) award = response.json['data'] self.award_id = award['id'] self.app.authorization = auth # Create complaint for award self.bid_token = self.initial_bids_tokens.values()[0] response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format( self.tender_id, self.award_id, self.bid_token), {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': test_organization}}) complaint = response.json['data'] self.complaint_id = complaint['id'] self.complaint_owner_token = response.json['access']['token'] test_patch_tender_award_complaint_document = snitch(patch_tender_award_complaint_document) class Tender2LotAwardComplaintDocumentResourceTest(TenderContentWebTest): initial_status = 'active.qualification' initial_bids = test_bids initial_lots = 2 * test_lots def setUp(self): super(Tender2LotAwardComplaintDocumentResourceTest, self).setUp() # Create award bid = self.initial_bids[0] auth = self.app.authorization self.app.authorization = ('Basic', ('token', '')) response = self.app.post_json('/tenders/{}/awards'.format( self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}}) award = response.json['data'] self.award_id = award['id'] self.app.authorization = auth # Create complaint for award bid_token = self.initial_bids_tokens.values()[0] response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format( self.tender_id, self.award_id, bid_token), {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': test_organization}}) complaint = response.json['data'] self.complaint_id = complaint['id'] self.complaint_owner_token = response.json['access']['token'] test_create_tender_lots_award_complaint_document = snitch(create_tender_lots_award_complaint_document) test_put_tender_lots_award_complaint_document = snitch(put_tender_lots_award_complaint_document) test_patch_tender_lots_award_complaint_document = snitch(patch_tender_lots_award_complaint_document) class TenderAwardDocumentResourceTest(TenderContentWebTest, TenderAwardDocumentResourceTestMixin): initial_status = 'active.qualification' initial_bids = test_bids def setUp(self): super(TenderAwardDocumentResourceTest, self).setUp() # Create award auth = self.app.authorization self.app.authorization = ('Basic', ('token', '')) response = self.app.post_json('/tenders/{}/awards'.format( self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': self.initial_bids[0]['id']}}) award = response.json['data'] self.award_id = award['id'] self.app.authorization = auth class TenderAwardDocumentWithDSResourceTest(TenderAwardDocumentResourceTest): docservice = True class Tender2LotAwardDocumentResourceTest(TenderContentWebTest, Tender2LotAwardDocumentResourceTestMixin): initial_status = 'active.qualification' initial_bids = test_bids initial_lots = 2 * test_lots def setUp(self): super(Tender2LotAwardDocumentResourceTest, self).setUp() # Create award auth = self.app.authorization self.app.authorization = ('Basic', ('token', '')) bid = self.initial_bids[0] response = self.app.post_json('/tenders/{}/awards'.format( self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}}) award = response.json['data'] self.award_id = award['id'] self.app.authorization = auth class Tender2LotAwardDocumentWithDSResourceTest(Tender2LotAwardDocumentResourceTest): docservice = True def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(Tender2LotAwardComplaintDocumentResourceTest)) suite.addTest(unittest.makeSuite(Tender2LotAwardComplaintResourceTest)) suite.addTest(unittest.makeSuite(Tender2LotAwardDocumentResourceTest)) suite.addTest(unittest.makeSuite(Tender2LotAwardResourceTest)) suite.addTest(unittest.makeSuite(TenderAwardComplaintDocumentResourceTest)) suite.addTest(unittest.makeSuite(TenderAwardComplaintResourceTest)) suite.addTest(unittest.makeSuite(TenderAwardDocumentResourceTest)) suite.addTest(unittest.makeSuite(TenderAwardResourceTest)) suite.addTest(unittest.makeSuite(TenderLotAwardResourceTest)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
openprocurement/openprocurement.tender.belowthreshold
openprocurement/tender/belowthreshold/tests/award.py
Python
apache-2.0
14,334
# Copyright 2014 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest class Test_PropertyMixin(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.storage._helpers import _PropertyMixin return _PropertyMixin def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def _derivedClass(self, path=None): class Derived(self._get_target_class()): client = None @property def path(self): return path return Derived def test_path_is_abstract(self): mixin = self._make_one() self.assertRaises(NotImplementedError, lambda: mixin.path) def test_client_is_abstract(self): mixin = self._make_one() self.assertRaises(NotImplementedError, lambda: mixin.client) def test_reload(self): connection = _Connection({'foo': 'Foo'}) client = _Client(connection) derived = self._derivedClass('/path')() # Make sure changes is not a set, so we can observe a change. derived._changes = object() derived.reload(client=client) self.assertEqual(derived._properties, {'foo': 'Foo'}) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'GET') self.assertEqual(kw[0]['path'], '/path') self.assertEqual(kw[0]['query_params'], {'projection': 'noAcl'}) # Make sure changes get reset by reload. self.assertEqual(derived._changes, set()) def test__set_properties(self): mixin = self._make_one() self.assertEqual(mixin._properties, {}) VALUE = object() mixin._set_properties(VALUE) self.assertEqual(mixin._properties, VALUE) def test__patch_property(self): derived = self._derivedClass()() derived._patch_property('foo', 'Foo') self.assertEqual(derived._properties, {'foo': 'Foo'}) def test_patch(self): connection = _Connection({'foo': 'Foo'}) client = _Client(connection) derived = self._derivedClass('/path')() # Make sure changes is non-empty, so we can observe a change. BAR = object() BAZ = object() derived._properties = {'bar': BAR, 'baz': BAZ} derived._changes = set(['bar']) # Ignore baz. derived.patch(client=client) self.assertEqual(derived._properties, {'foo': 'Foo'}) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') self.assertEqual(kw[0]['path'], '/path') self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) # Since changes does not include `baz`, we don't see it sent. self.assertEqual(kw[0]['data'], {'bar': BAR}) # Make sure changes get reset by patch(). self.assertEqual(derived._changes, set()) class Test__scalar_property(unittest.TestCase): def _call_fut(self, fieldName): from google.cloud.storage._helpers import _scalar_property return _scalar_property(fieldName) def test_getter(self): class Test(object): def __init__(self, **kw): self._properties = kw.copy() do_re_mi = self._call_fut('solfege') test = Test(solfege='Latido') self.assertEqual(test.do_re_mi, 'Latido') def test_setter(self): class Test(object): def _patch_property(self, name, value): self._patched = (name, value) do_re_mi = self._call_fut('solfege') test = Test() test.do_re_mi = 'Latido' self.assertEqual(test._patched, ('solfege', 'Latido')) class Test__base64_md5hash(unittest.TestCase): def _call_fut(self, bytes_to_sign): from google.cloud.storage._helpers import _base64_md5hash return _base64_md5hash(bytes_to_sign) def test_it(self): from io import BytesIO BYTES_TO_SIGN = b'FOO' BUFFER = BytesIO() BUFFER.write(BYTES_TO_SIGN) BUFFER.seek(0) SIGNED_CONTENT = self._call_fut(BUFFER) self.assertEqual(SIGNED_CONTENT, b'kBiQqOnIz21aGlQrIp/r/w==') def test_it_with_stubs(self): import mock class _Buffer(object): def __init__(self, return_vals): self.return_vals = return_vals self._block_sizes = [] def read(self, block_size): self._block_sizes.append(block_size) return self.return_vals.pop() BASE64 = _Base64() DIGEST_VAL = object() BYTES_TO_SIGN = b'BYTES_TO_SIGN' BUFFER = _Buffer([b'', BYTES_TO_SIGN]) MD5 = _MD5(DIGEST_VAL) patch = mock.patch.multiple( 'google.cloud.storage._helpers', base64=BASE64, md5=MD5) with patch: SIGNED_CONTENT = self._call_fut(BUFFER) self.assertEqual(BUFFER._block_sizes, [8192, 8192]) self.assertIs(SIGNED_CONTENT, DIGEST_VAL) self.assertEqual(BASE64._called_b64encode, [DIGEST_VAL]) self.assertEqual(MD5._called, [None]) self.assertEqual(MD5.hash_obj.num_digest_calls, 1) self.assertEqual(MD5.hash_obj._blocks, [BYTES_TO_SIGN]) class _Connection(object): def __init__(self, *responses): self._responses = responses self._requested = [] def api_request(self, **kw): self._requested.append(kw) response, self._responses = self._responses[0], self._responses[1:] return response class _MD5Hash(object): def __init__(self, digest_val): self.digest_val = digest_val self.num_digest_calls = 0 self._blocks = [] def update(self, block): self._blocks.append(block) def digest(self): self.num_digest_calls += 1 return self.digest_val class _MD5(object): def __init__(self, digest_val): self.hash_obj = _MD5Hash(digest_val) self._called = [] def __call__(self, data=None): self._called.append(data) return self.hash_obj class _Base64(object): def __init__(self): self._called_b64encode = [] def b64encode(self, value): self._called_b64encode.append(value) return value class _Client(object): def __init__(self, connection): self._connection = connection
dstrockis/outlook-autocategories
lib/unit_tests/test__helpers.py
Python
apache-2.0
6,951
# -*- coding: utf-8 -*- import pytest from model.group import Group from fixture.application import Application @pytest.fixture def app(request): fixture = Application() request.addfinalizer(fixture.destroy) return fixture def test_add_group(app): app.session.login(username="admin", password="secret") app.group.create(Group(name="fgfg", header="fgfg", footer="fgfgfgfg")) app.session.logout()
fleksso99/python_training
test/test_add_group.py
Python
apache-2.0
422
# coding: utf-8 # # Esri start of added imports import sys, os, arcpy # Esri end of added imports # Esri start of added variables g_ESRI_variable_1 = 'lyrFC' g_ESRI_variable_2 = 'lyrTmp' g_ESRI_variable_3 = 'ID' g_ESRI_variable_4 = 'lyrOut' g_ESRI_variable_5 = ';' # Esri end of added variables #------------------------------------------------------------------------------ # Copyright 2014 Esri # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------ # # ================================================== # PointTargetGRG.py # -------------------------------------------------- # Built on ArcGIS # ================================================== # # Creates a Gridded Reference Graphic # # # ================================================== # HISTORY: # # 8/25/2015 - mf - Needed to update script for non-ArcMap/Pro testing environment # # ================================================== import os, sys, math, traceback import arcpy from arcpy import env import Utilities # Read in the parameters targetPointOrigin = arcpy.GetParameterAsText(0) numberCellsHo = arcpy.GetParameterAsText(1) numberCellsVert = arcpy.GetParameterAsText(2) cellWidth = arcpy.GetParameterAsText(3) cellHeight = arcpy.GetParameterAsText(4) cellUnits = arcpy.GetParameterAsText(5) gridSize = arcpy.GetParameterAsText(6) labelStartPos = arcpy.GetParameterAsText(7) labelStyle = arcpy.GetParameterAsText(8) outputFeatureClass = arcpy.GetParameterAsText(9) tempOutput = os.path.join("in_memory", "tempFishnetGrid") sysPath = sys.path[0] appEnvironment = None DEBUG = True mxd = None mapList = None df, aprx = None, None def labelFeatures(layer, field): ''' set up labeling for layer ''' if appEnvironment == "ARCGIS_PRO": if layer.supports("SHOWLABELS"): for lblclass in layer.listLabelClasses(): lblclass.visible = True lblclass.expression = " [" + str(field) + "]" layer.showLabels = True elif appEnvironment == "ARCMAP": if layer.supports("LABELCLASSES"): for lblclass in layer.labelClasses: lblclass.showClassLabels = True lblclass.expression = " [" + str(field) + "]" layer.showLabels = True arcpy.RefreshActiveView() else: pass # if returns "OTHER" def findLayerByName(layerName): ''' find layer in app ''' global mapList global mxd #UPDATE # if isPro: if appEnvironment == "ARCGIS_PRO": for layer in mapList.listLayers(): if layer.name == layerName: arcpy.AddMessage("Found matching layer [" + layer.name + "]") return layer else: arcpy.AddMessage("Incorrect layer: [" + layer.name + "]") # else: elif appEnvironment == "ARCMAP": for layer in arcpy.mapping.ListLayers(mxd): if layer.name == layerName: arcpy.AddMessage("Found matching layer [" + layer.name + "]") return layer else: arcpy.AddMessage("Incorrect layer: [" + layer.name + "]") else: arcpy.AddMessage("Non-map application (ArcCatalog, stand-alone test, etc.") def RotateFeatureClass(inputFC, outputFC, angle=0, pivot_point=None): """Rotate Feature Class inputFC Input features outputFC Output feature class angle Angle to rotate, in degrees pivot_point X,Y coordinates (as space-separated string) Default is lower-left of inputFC As the output feature class no longer has a "real" xy locations, after rotation, it no coordinate system defined. """ def RotateXY(x, y, xc=0, yc=0, angle=0, units="DEGREES"): """Rotate an xy cooordinate about a specified origin x,y xy coordinates xc,yc center of rotation angle angle units "DEGREES" (default) or "RADIANS" """ import math x = x - xc y = y - yc # make angle clockwise (like Rotate_management) angle = angle * -1 if units == "DEGREES": angle = math.radians(angle) xr = (x * math.cos(angle)) - (y * math.sin(angle)) + xc yr = (x * math.sin(angle)) + (y * math.cos(angle)) + yc return xr, yr # temp names for cleanup env_file = None lyrFC, lyrTmp, lyrOut = [None] * 3 # layers tmpFC = None # temp dataset Row, Rows, oRow, oRows = [None] * 4 # cursors try: # process parameters try: xcen, ycen = [float(xy) for xy in pivot_point.split()] pivot_point = xcen, ycen except: # if pivot point was not specified, get it from # the lower-left corner of the feature class ext = arcpy.Describe(inputFC).extent xcen, ycen = ext.XMin, ext.YMin pivot_point = xcen, ycen angle = float(angle) # set up environment env_file = arcpy.CreateScratchName("xxenv",".xml","file", os.environ["TEMP"]) arcpy.SaveSettings(env_file) # Disable any GP environment clips or project on the fly arcpy.ClearEnvironment("extent") arcpy.ClearEnvironment("outputCoordinateSystem") WKS = env.workspace if not WKS: if os.path.dirname(outputFC): WKS = os.path.dirname(outputFC) else: WKS = os.path.dirname( arcpy.Describe(inputFC).catalogPath) env.workspace = env.scratchWorkspace = WKS # Disable GP environment clips or project on the fly arcpy.ClearEnvironment("extent") arcpy.ClearEnvironment("outputCoordinateSystem") # get feature class properties lyrFC = g_ESRI_variable_1 arcpy.MakeFeatureLayer_management(inputFC, lyrFC) dFC = arcpy.Describe(lyrFC) shpField = dFC.shapeFieldName shpType = dFC.shapeType FID = dFC.OIDFieldName # create temp feature class tmpFC = arcpy.CreateScratchName("xxfc", "", "featureclass") arcpy.CreateFeatureclass_management(os.path.dirname(tmpFC), os.path.basename(tmpFC), shpType) lyrTmp = g_ESRI_variable_2 arcpy.MakeFeatureLayer_management(tmpFC, lyrTmp) # set up id field (used to join later) TFID = "XXXX_FID" arcpy.AddField_management(lyrTmp, TFID, "LONG") arcpy.DeleteField_management(lyrTmp, g_ESRI_variable_3) # rotate the feature class coordinates # only points, polylines, and polygons are supported # open read and write cursors Rows = arcpy.SearchCursor(lyrFC, "", "", "%s;%s" % (shpField,FID)) oRows = arcpy.InsertCursor(lyrTmp) arcpy.AddMessage("Opened search cursor") if shpType == "Point": for Row in Rows: shp = Row.getValue(shpField) pnt = shp.getPart() pnt.X, pnt.Y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle) oRow = oRows.newRow() oRow.setValue(shpField, pnt) oRow.setValue(TFID, Row. getValue(FID)) oRows.insertRow(oRow) elif shpType in ["Polyline", "Polygon"]: parts = arcpy.Array() rings = arcpy.Array() ring = arcpy.Array() for Row in Rows: shp = Row.getValue(shpField) p = 0 for part in shp: for pnt in part: if pnt: x, y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle) ring.add(arcpy.Point(x, y, pnt.ID)) else: # if we have a ring, save it if len(ring) > 0: rings.add(ring) ring.removeAll() # we have our last ring, add it rings.add(ring) ring.removeAll() # if only one, remove nesting if len(rings) == 1: rings = rings.getObject(0) parts.add(rings) rings.removeAll() p += 1 # if only one, remove nesting if len(parts) == 1: parts = parts.getObject(0) if dFC.shapeType == "Polyline": shp = arcpy.Polyline(parts) else: shp = arcpy.Polygon(parts) parts.removeAll() oRow = oRows.newRow() oRow.setValue(shpField, shp) oRow.setValue(TFID,Row.getValue(FID)) oRows.insertRow(oRow) else: #raise Exception, "Shape type {0} is not supported".format(shpType) #UPDATE raise Exception("Shape type {0} is not supported".format(shpType)) del oRow, oRows # close write cursor (ensure buffer written) oRow, oRows = None, None # restore variables for cleanup # join attributes, and copy to output arcpy.AddJoin_management(lyrTmp, TFID, lyrFC, FID) env.qualifiedFieldNames = False arcpy.Merge_management(lyrTmp, outputFC) lyrOut = g_ESRI_variable_4 arcpy.MakeFeatureLayer_management(outputFC, lyrOut) # drop temp fields 2,3 (TFID, FID) fnames = [f.name for f in arcpy.ListFields(lyrOut)] dropList = g_ESRI_variable_5.join(fnames[2:4]) arcpy.DeleteField_management(lyrOut, dropList) #except MsgError, xmsg: #UPDATE except MsgError as xmsg: arcpy.AddError(str(xmsg)) except arcpy.ExecuteError: tbinfo = traceback.format_tb(sys.exc_info()[2])[0] arcpy.AddError(tbinfo.strip()) arcpy.AddError(arcpy.GetMessages()) numMsg = arcpy.GetMessageCount() for i in range(0, numMsg): arcpy.AddReturnMessage(i) #except Exception, xmsg: #UPDATE except Exception as xmsg: tbinfo = traceback.format_tb(sys.exc_info()[2])[0] arcpy.AddError(tbinfo + str(xmsg)) finally: # reset environment if env_file: arcpy.LoadSettings(env_file) # Clean up temp files for f in [lyrFC, lyrTmp, lyrOut, tmpFC, env_file]: try: if f: arcpy.Delete_management(f) except: pass # delete cursors try: for c in [Row, Rows, oRow, oRows]: del c except: pass # return pivot point try: pivot_point = "{0} {1}".format(*pivot_point) except: pivot_point = None return pivot_point def ColIdxToXlName(index): ''' Converts an index into a letter, labeled like excel columns, A to Z, AA to ZZ, etc. ''' if index < 1: raise ValueError("Index is too small") result = "" while True: if index > 26: index, r = divmod(index - 1, 26) result = chr(r + ord('A')) + result else: return chr(index + ord('A') - 1) + result def main(): ''' main method ''' try: #UPDATE gisVersion = arcpy.GetInstallInfo()["Version"] global appEnvironment appEnvironment = Utilities.GetApplication() if DEBUG == True: arcpy.AddMessage("App environment: " + appEnvironment) global aprx global mapList global mxd global df isPro = False #if gisVersion == "1.0": #Pro: if appEnvironment == "ARCGIS_PRO": from arcpy import mp aprx = arcpy.mp.ArcGISProject("CURRENT") mapList = aprx.listMaps()[0] isPro = True #else: elif appEnvironment == "ARCMAP": from arcpy import mapping mxd = arcpy.mapping.MapDocument('CURRENT') df = arcpy.mapping.ListDataFrames(mxd)[0] isPro = False else: if DEBUG == True: arcpy.AddMessage("Non-map application...") # If grid size is drawn on the map, use this instead of cell width and cell height inputExtentDrawnFromMap = False angleDrawn = 0 workspace = arcpy.env.workspace topLeftDrawn = 0 global cellWidth global cellHeight if float(cellWidth) == 0 and float(cellHeight) == 0: inputExtentDrawnFromMap = True tempGridFC = os.path.join(arcpy.env.scratchWorkspace, "GridSize") arcpy.CopyFeatures_management(gridSize, tempGridFC) pts = None with arcpy.da.SearchCursor(tempGridFC, 'SHAPE@XY', explode_to_points=True) as cursor: pts = [r[0] for r in cursor][0:4] arcpy.Delete_management(tempGridFC) # Find the highest points in the drawn rectangle, to calculate the top left and top right coordinates. highestPoint = None nextHighestPoint = None for pt in pts: if highestPoint is None or pt[1] > highestPoint[1]: nextHighestPoint = highestPoint highestPoint = pt elif nextHighestPoint is None or pt[1] > nextHighestPoint[1]: nextHighestPoint = pt topLeft = highestPoint if highestPoint[0] < nextHighestPoint[0] else nextHighestPoint topRight = highestPoint if highestPoint[0] > nextHighestPoint[0] else nextHighestPoint topLeftDrawn = topLeft # Calculate the cell height and cell width cellWidth= math.sqrt((pts[0][0] - pts[1][0]) ** 2 + (pts[0][1] - pts[1][1]) ** 2) cellHeight = math.sqrt((pts[1][0] - pts[2][0]) ** 2 + (pts[1][1] - pts[2][1]) ** 2) # Calculate angle hypotenuse = math.sqrt(math.pow(topLeft[0] - topRight[0], 2) + math.pow(topLeft[1] - topRight[1], 2)) adjacent = topRight[0] - topLeft[0] numberToCos = float(adjacent)/float(hypotenuse) angleInRadians = math.acos(numberToCos) angleDrawn = math.degrees(angleInRadians) if (topRight[1] > topLeft[1]): angleDrawn = 360 - angleDrawn else: if (cellUnits == "Feet"): cellWidth = float(cellWidth) * 0.3048 cellHeight = float(cellHeight) * 0.3048 # Get the coordinates of the point inputExtentDrawnFromMap. rows = arcpy.SearchCursor(targetPointOrigin) extent = None for row in rows: shape = row.getValue("SHAPE") extent = shape.extent pointExtents = str.split(str(extent)) ''' This seemed to be shifting the grid when it was not required so commented out # Shift the grid center point if the rows and/or columns are even. if (float(numberCellsHo)%2 == 0.0): hoShiftAmt = float(cellHeight) / 2.0 # Determines shift up/down based on where box was inputExtentDrawnFromMap if inputExtentDrawnFromMap == False: pointExtents[1] = str(float(pointExtents[1]) - hoShiftAmt) elif (float(topLeftDrawn[1]) > float(pointExtents[1])): pointExtents[1] = str(float(pointExtents[1]) - hoShiftAmt) else: pointExtents[1] = str(float(pointExtents[1]) + hoShiftAmt) if (float(numberCellsVert)%2 == 0.0): vertShiftAmt = float(cellWidth) / 2.0 # Determines shift left/right based on where box was inputExtentDrawnFromMap if inputExtentDrawnFromMap == False: pointExtents[0] = str(float(pointExtents[0]) - vertShiftAmt) elif (float(topLeftDrawn[0]) > float(pointExtents[0])): pointExtents[0] = str(float(pointExtents[0]) - vertShiftAmt) else: pointExtents[0] = str(float(pointExtents[0]) + vertShiftAmt) ''' # From the template extent, get the origin, y axis, and opposite corner coordinates rightCorner = float(pointExtents[0]) + ((float(cellWidth) * float(numberCellsVert)) /2.0) leftCorner = float(pointExtents[0]) - ((float(cellWidth) * float(numberCellsVert)) /2.0) topCorner = float(pointExtents[1]) + ((float(cellHeight) * float(numberCellsHo)) /2.0) bottomCorner = float(pointExtents[1]) - ((float(cellHeight) * float(numberCellsHo)) /2.0) originCoordinate = str(leftCorner) + " " + str(bottomCorner) yAxisCoordinate = str(leftCorner) + " " + str(bottomCorner + 10) oppCornerCoordinate = str(rightCorner) + " " + str(topCorner) fullExtent = str(leftCorner) + " " + str(bottomCorner) + " " + str(rightCorner) + " " + str(topCorner) # If grid size is drawn on the map, then calculate the rotation of the grid if inputExtentDrawnFromMap: # Find the highest two points in the inputExtentDrawnFromMap shape highestPoint = None nextHighestPoint = None for pt in pts: if highestPoint is None or pt[1] > highestPoint[1]: nextHighestPoint = highestPoint highestPoint = pt elif nextHighestPoint is None or pt[1] > nextHighestPoint[1]: nextHighestPoint = pt topLeft = highestPoint if highestPoint[0] < nextHighestPoint[0] else nextHighestPoint topRight = highestPoint if highestPoint[0] > nextHighestPoint[0] else nextHighestPoint yDiff = topRight[1] - topLeft[1] xDiff = topRight[0] - topLeft[0] # Set the Y-Axis Coordinate so that the grid rotates properly extentHeight = float(topCorner) - float(bottomCorner) # Set the start position for labeling startPos = None if (labelStartPos == "Upper-Right"): startPos = "UR" elif (labelStartPos == "Upper-Left"): startPos = "UL" elif (labelStartPos == "Lower-Left"): startPos = "LL" elif (labelStartPos == "Lower-Right"): startPos = "LR" arcpy.AddMessage("Creating Fishnet Grid") arcpy.CreateFishnet_management(tempOutput, originCoordinate, yAxisCoordinate, 0, 0, str(numberCellsHo), str(numberCellsVert), oppCornerCoordinate, "NO_LABELS", fullExtent, "POLYGON") # Sort the grid upper left to lower right, and delete the in memory one arcpy.AddMessage("Sorting the grid for labeling") tempSort = os.path.join("in_memory", "tempSort") arcpy.Sort_management(tempOutput, tempSort, [["Shape", "ASCENDING"]], startPos) # arcpy.Delete_management("in_memory") #Not sure why we are trying to delete in_memory # Add a field which will be used to add the grid labels arcpy.AddMessage("Adding field for labeling the grid") gridField = "Grid" arcpy.AddField_management(tempSort, gridField, "TEXT") # Number the fields arcpy.AddMessage("Numbering the grids") letterIndex = 1 secondLetterIndex = 1 letter = 'A' secondLetter = 'A' number = 1 lastY = -9999 cursor = arcpy.UpdateCursor(tempSort) for row in cursor: yPoint = row.getValue("SHAPE").firstPoint.Y if (lastY != yPoint) and (lastY != -9999): letterIndex += 1 letter = ColIdxToXlName(letterIndex) if (labelStyle != "Numeric"): number = 1 secondLetter = 'A' secondLetterIndex = 1 lastY = yPoint if (labelStyle == "Alpha-Numeric"): row.setValue(gridField, str(letter) + str(number)) elif (labelStyle == "Alpha-Alpha"): row.setValue(gridField, str(letter) + str(secondLetter)) elif (labelStyle == "Numeric"): row.setValue(gridField, str(number)) cursor.updateRow(row) number += 1 secondLetterIndex += 1 secondLetter = ColIdxToXlName(secondLetterIndex) # Rotate the shape, if needed. if (inputExtentDrawnFromMap): arcpy.AddMessage("Rotating the grid") RotateFeatureClass(tempSort, outputFeatureClass, angleDrawn, pointExtents[0] + " " + pointExtents[1]) else: arcpy.CopyFeatures_management(tempSort, outputFeatureClass) arcpy.Delete_management(tempSort) # Get and label the output feature #UPDATE targetLayerName = os.path.basename(outputFeatureClass) if appEnvironment == "ARCGIS_PRO": #params = arcpy.GetParameterInfo() ## get the symbology from the GRG.lyr #scriptPath = sys.path[0] #layerFilePath = os.path.join(scriptPath,r"commondata\userdata\GRG.lyrx") #arcpy.AddMessage("Applying Symbology from {0}".format(layerFilePath)) #params[8].symbology = layerFilePath arcpy.AddMessage("Do not apply symbology it will be done in the next task step") elif appEnvironment == "ARCMAP": #arcpy.AddMessage("Adding features to map (" + str(targetLayerName) + ")...") #arcpy.MakeFeatureLayer_management(outputFeatureClass, targetLayerName) # create a layer object #layer = arcpy.mapping.Layer(targetLayerName) # get the symbology from the NumberedStructures.lyr #layerFilePath = os.path.join(os.getcwd(),"data\Layers\GRG.lyr") #layerFilePath = os.path.join(os.path.dirname(os.path.dirname(__file__)),"layers\GRG.lyr") # apply the symbology to the layer #arcpy.ApplySymbologyFromLayer_management(layer, layerFilePath) # add layer to map #arcpy.mapping.AddLayer(df, layer, "AUTO_ARRANGE") # find the target layer in the map #mapLyr = arcpy.mapping.ListLayers(mxd, targetLayerName)[0] #arcpy.AddMessage("Labeling output features (" + str(targetLayerName) + ")...") # Work around needed as ApplySymbologyFromLayer_management does not honour labels #labelLyr = arcpy.mapping.Layer(layerFilePath) # copy the label info from the source to the map layer #mapLyr.labelClasses = labelLyr.labelClasses # turn labels on #mapLyr.showLabels = True arcpy.AddMessage("Non-map environment, skipping labeling based on best practices") else: arcpy.AddMessage("Non-map environment, skipping labeling...") # Apply symbology to the GRG layer #UPDATE #symbologyPath = os.path.dirname(workspace) + "\\Layers\GRG.lyr" #arcpy.ApplySymbologyFromLayer_management(layer, symbologyPath) # Set tool output arcpy.SetParameter(8, outputFeatureClass) except arcpy.ExecuteError: # Get the tool error messages msgs = arcpy.GetMessages() arcpy.AddError(msgs) print(msgs) except: # Get the traceback object tb = sys.exc_info()[2] tbinfo = traceback.format_tb(tb)[0] # Concatenate information together concerning the error into a message string pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + \ "\nError Info:\n" + str(sys.exc_info()[1]) msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n" # Return python error messages for use in script tool or Python Window arcpy.AddError(pymsg) arcpy.AddError(msgs) # Print Python error messages for use in Python / Python Window print(pymsg + "\n") print(msgs) # MAIN ============================================= if __name__ == "__main__": main()
pshowalter/solutions-geoprocessing-toolbox
clearing_operations/scripts/PointTargetGRG.py
Python
apache-2.0
24,658
# -*- coding: utf-8 -*- from datetime import datetime from datetime import date from datetime import timedelta import re import Entity.User as User import Entity.Event as Event import Entity.RepeatingEvent as RepeatingEvent import Responder def createSingleEvent(name, place, date): event = Event.getByDate(date) if event: return u'Am gegebenen Datum existiert bereits ein Event: \n' + event.toString() event = Event.create(name, place, date) return u'Event erstellt: \n' + event.toString() def createRepeatingEvent(name, place , day, time, endDate): rep = RepeatingEvent.create(name, place, day, time, endDate) events = rep.createEvents() answer = u'Events an diesen Daten erstellt:' for created in events[0]: answer = answer + u'\n' + created.strftime("%d.%m.%Y") if events[1]: answer = answer + u'\nFolgende Daten übersprungen, da an diesen schon ein Event existiert:' for skipped in events[1]: answer = answer + u'\n' + skipped.strftime("%d.%m.%Y") return answer def updateRepeating(user, additional): rep = RepeatingEvent.get(int(additional.split()[0])) events = Event.getByRepeating(rep.key) i = 0 for event in events: event.date = event.date + timedelta(minutes=int(additional.split()[1])) event.put() i = i +1 return str(i) + ' events updated' def create(user, additional): if not additional: return u'Um ein Event zu erstellen müssen entsprechende Angaben gemacht werden: Name;[Ort];Datum/Tag;Zeit;[EndDatum])' split = additional.split(';') if (len(split) != 4) and (len(split) != 5): return u'Anzahl der Argumente ist Falsch! Es müssen 4 oder 5 sein. (erstelle Name;[Ort];Datum/Tag;Zeit;[EndDatum])' if not split[0]: return u'Es muss ein Name angegeben werden' name = split[0].strip() place = split[1].strip() timeString = split[3].strip() time = None try: time = datetime.strptime(timeString, "%H:%M").time() except ValueError as err: return u'Die Zeitangabe ' + timeString + u'hat das falsche Format (HH:MM).' dateOrDayString = split[2].strip() try: date = datetime.strptime(dateOrDayString, "%d.%m.%Y").date() return createSingleEvent(name, place, datetime.combine(date, time)) except ValueError as err: try: if dateOrDayString: dateOrDayString = dateOrDayString.lower() day = Event.DAY_DICT[dateOrDayString] try: endDateString = split[4] if endDateString: endDate = datetime.strptime(endDateString, "%d.%m.%Y").date() return createRepeatingEvent(name, place, day, time, endDate) except ValueError as err2: return u'EndDatum hat falsches Format.' except KeyError: return u'Als drittes Argument muss entweder ein Datum(TT.MM.JJJJ) oder ein Wochentag eingegeben werden' def delete(user, additional): result = Responder.parseEvent(user, additional) if isinstance(result,Event.Event): date = result.date name = result.name result.key.delete() return u' Event ' + name + u' am ' + date.strftime("%d.%m.%Y %H:%M") + u' gelöscht.' if isinstance(result,basestring): return result
ThomasPfeiffer/SCWKbot
Logic/EventController.py
Python
apache-2.0
3,381
# coding=utf-8 import logging from unittest.mock import patch from django.contrib.auth import get_user_model from django.test import TestCase from django.conf import settings from accounts.authentication import PersonaAuthenticationBackend, PERSONA_VERIFY_URL __author__ = 'peter' User = get_user_model() @patch('accounts.authentication.requests.post') class AuthenticateTest(TestCase): def setUp(self): self.backend = PersonaAuthenticationBackend() user = User(email='other@user.com') user.username = 'otheruser' user.save() def test_sends_assertion_to_mozilla_with_domain(self, mock_post): self.backend.authenticate('an assertion') mock_post.assert_called_once_with( PERSONA_VERIFY_URL, data={'assertion': 'an assertion', 'audience': settings.DOMAIN} ) def test_returns_none_if_response_errors(self, mock_post): mock_post.return_value.ok = False mock_post.return_value.json.return_value = {} user = self.backend.authenticate('an assertion') self.assertIsNone(user) def test_returns_none_if_status_not_okay(self, mock_post): mock_post.return_value.json.return_value = {'status': 'not okay!'} user = self.backend.authenticate('an assertion') self.assertIsNone(user) def test_finds_existing_user_with_email(self, mock_post): mock_post.return_value.json.return_value = {'status': 'okay', 'email': 'a@b.com'} actual_user = User.objects.create(email='a@b.com') found_user = self.backend.authenticate('an assertion') self.assertEqual(found_user, actual_user) def test_creates_new_user_if_necessary_for_valid_assertion(self, mock_post): mock_post.return_value.json.return_value = {'status': 'okay', 'email': 'a@b.com'} found_user = self.backend.authenticate('an assertion') new_user = User.objects.get(email='a@b.com') self.assertEqual(found_user, new_user) def test_logs_non_okay_responses_from_persona(self, mock_post): response_json = { 'status': 'not okay', 'reason': 'eg, audience mismatch' } mock_post.return_value.ok = True mock_post.return_value.json.return_value = response_json logger = logging.getLogger('accounts.authentication') with patch.object(logger, 'warning') as mock_log_warning: self.backend.authenticate('an assertion') mock_log_warning.assert_called_once_with( 'Persona says no. Json was: {}'.format(response_json) ) class GetUserTest(TestCase): def test_gets_user_by_email(self): backend = PersonaAuthenticationBackend() other_user = User(email='other@user.com') other_user.username = 'otheruser' other_user.save() desired_user = User.objects.create(email='a@b.com') found_user = backend.get_user('a@b.com') self.assertEqual(found_user, desired_user) def test_returns_none_if_no_user_with_that_email(self): backend = PersonaAuthenticationBackend() self.assertIsNone(backend.get_user('a@b.com'))
PeterHo/mysite
accounts/tests/test_authentication.py
Python
apache-2.0
3,148
from restclients.canvas import Canvas #from restclients.models.canvas import ExternalTool class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id): """ Return external tools for the passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index """ url = "/api/v1/accounts/%s/external_tools" % account_id external_tools = [] for data in self._get_resource(url): external_tools.append(self._external_tool_from_json(data)) return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): """ Return external tools for given account sis id. """ return self.get_external_tools_in_account(self._sis_id(sis_id, "account")) def get_external_tools_in_course(self, course_id): """ Return external tools for the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index """ url = "/api/v1/courses/%s/external_tools" % course_id external_tools = [] for data in self._get_resource(url): external_tools.append(self._external_tool_from_json(data)) return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): """ Return external tools for given course sis id. """ return self.get_external_tools_in_course(self._sis_id(sis_id, "course")) def _external_tool_from_json(self, data): return data
jeffFranklin/uw-restclients
restclients/canvas/external_tools.py
Python
apache-2.0
1,719
# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from eventlet.green import subprocess from eventlet import greenthread from neutron_lib.utils import helpers from oslo_log import log as logging from oslo_utils import encodeutils from neutron._i18n import _ from neutron.common import utils LOG = logging.getLogger(__name__) def create_process(cmd, addl_env=None): cmd = list(map(str, cmd)) LOG.debug("Running command: %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) obj = utils.subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, preexec_fn=None, close_fds=False) return obj, cmd def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, extra_ok_codes=None, run_as_root=False, do_decode=True): try: if process_input is not None: _process_input = encodeutils.to_utf8(process_input) else: _process_input = None obj, cmd = create_process(cmd, addl_env=addl_env) _stdout, _stderr = obj.communicate(_process_input) obj.stdin.close() _stdout = helpers.safe_decode_utf8(_stdout) _stderr = helpers.safe_decode_utf8(_stderr) m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdin: %(stdin)s\n" "Stdout: %(stdout)s\nStderr: %(stderr)s") % \ {'cmd': cmd, 'code': obj.returncode, 'stdin': process_input or '', 'stdout': _stdout, 'stderr': _stderr} extra_ok_codes = extra_ok_codes or [] if obj.returncode and obj.returncode in extra_ok_codes: obj.returncode = None log_msg = m.strip().replace('\n', '; ') if obj.returncode and log_fail_as_error: LOG.error(log_msg) else: LOG.debug(log_msg) if obj.returncode and check_exit_code: raise RuntimeError(m) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) return (_stdout, _stderr) if return_stderr else _stdout
cloudbase/neutron
neutron/agent/windows/utils.py
Python
apache-2.0
3,141
#!/usr/bin/env python ### <command interpreter="python">send_to_cgd.py ### $pipeline_output $endpoint $cgd_url $output $runid $barcodeid $qcversion ### </command> ### Galaxy wrapper for cgd_client.jar. ### CGD_CLIENT is hard coded, but this is not expected to move. import argparse import subprocess from subprocess import Popen, STDOUT, PIPE import os import sys import shutil def renameOutput(runid, barcodeid, endpoint): """ CGD needs the filename to be restructured. """ if endpoint == "uploadqcsheet": newfile = "/tmp/" + '_'.join([runid, barcodeid, "R1"]) + ".html" elif endpoint == "uploadqcsheetrtwo": newfile = "/tmp/" + '_'.join([runid, barcodeid, "R2"]) + ".html" else: print("Not sending FastQC.") return None return newfile def splitUrl(url, n): return url.split('/')[-n:] def main(): # CGD_CLIENT="/opt/installed/cgd_client-1.0.7.jar" CGD_CLIENT="/home/exacloud/clinical/installedTest/cgd_client-1.0.7.jar" parser = argparse.ArgumentParser(description='') parser.add_argument("--pipeline_out", help='') parser.add_argument("--cgd_url", help='') parser.add_argument(dest='stdout_log', help='') parser.add_argument(dest='endpoint', help='') parser.add_argument("--runid", help='') parser.add_argument("--barcodeid", help='') parser.add_argument("--qcversion", help='') args = parser.parse_args() if args.endpoint != "none": newfile = renameOutput(args.runid, args.barcodeid, args.endpoint) else: id_list = splitUrl(args.cgd_url, 3) newfile = renameOutput(id_list[1], id_list[2], id_list[0]) if args.endpoint == "uploadqcsheet" or args.endpoint == "uploadqcsheetrtwo": print("Copying to " + newfile) shutil.copyfile(args.pipeline_out, newfile) cmd = ["java", "-jar", CGD_CLIENT, "-f", newfile, "-n", args.endpoint] else: if args.pipeline_out: cmd = ["java", "-jar", CGD_CLIENT, "-f", args.pipeline_out, "-n", args.endpoint] else: cmd = ["java", "-jar", CGD_CLIENT, "-n", args.endpoint] if args.cgd_url: # cmd.append("-u") # cmd.append(args.cgd_url) cmd = ["java", "-jar", CGD_CLIENT, "-f", newfile, "-u", args.cgd_url] if args.runid: cmd.append("-r") cmd.append(args.runid) if args.barcodeid: cmd.append("-b") cmd.append(args.barcodeid) if args.qcversion: cmd.append("-v") cmd.append(args.qcversion) cmd.append("-d") print("We are running this command:") print(' '.join(cmd)) proc = subprocess.call(cmd) outfile = open(args.stdout_log, 'w') outfile.write("The process has run.") outfile.close() ## Clean up temp file. if newfile != None: os.remove(newfile) if __name__ == "__main__": main()
jhl667/galaxy_tools
tools/jhl_tools/send_to_cgd.py
Python
apache-2.0
2,897
from django.apps import AppConfig class InstaappConfig(AppConfig): name = 'instaapp'
rocity/dj-instagram
djinstagram/instaapp/apps.py
Python
apache-2.0
91
# Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility for managing projects via the Cloud Resource Manager API.""" from gcloud.exceptions import NotFound class Project(object): """Projects are containers for your work on Google Cloud Platform. .. note:: A :class:`Project` can also be created via :meth:`Client.new_project() \ <gcloud.resource_manager.client.Client.new_project>` To manage labels on a :class:`Project`:: >>> from gcloud import resource_manager >>> client = resource_manager.Client() >>> project = client.new_project('purple-spaceship-123') >>> project.labels = {'color': 'purple'} >>> project.labels['environment'] = 'production' >>> project.update() See: https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects :type project_id: string :param project_id: The globally unique ID of the project. :type client: :class:`gcloud.resource_manager.client.Client` :param client: The Client used with this project. :type name: string :param name: The display name of the project. :type labels: dict :param labels: A list of labels associated with the project. """ def __init__(self, project_id, client, name=None, labels=None): self._client = client self.project_id = project_id self.name = name self.number = None self.labels = labels or {} self.status = None def __repr__(self): return '<Project: %r (%r)>' % (self.name, self.project_id) @classmethod def from_api_repr(cls, resource, client): """Factory: construct a project given its API representation. :type resource: dict :param resource: project resource representation returned from the API :type client: :class:`gcloud.resource_manager.client.Client` :param client: The Client used with this project. :rtype: :class:`gcloud.resource_manager.project.Project` :returns: The project created. """ project = cls(project_id=resource['projectId'], client=client) project.set_properties_from_api_repr(resource) return project def set_properties_from_api_repr(self, resource): """Update specific properties from its API representation.""" self.name = resource.get('name') self.number = resource['projectNumber'] self.labels = resource.get('labels', {}) self.status = resource['lifecycleState'] @property def full_name(self): """Fully-qualified name (ie, ``'projects/purple-spaceship-123'``).""" if not self.project_id: raise ValueError('Missing project ID.') return 'projects/%s' % (self.project_id) @property def path(self): """URL for the project (ie, ``'/projects/purple-spaceship-123'``).""" return '/%s' % (self.full_name) def _require_client(self, client): """Check client or verify over-ride. :type client: :class:`gcloud.resource_manager.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current project. :rtype: :class:`gcloud.resource_manager.client.Client` :returns: The client passed in or the currently bound client. """ if client is None: client = self._client return client def create(self, client=None): """API call: create the project via a ``POST`` request. See https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/create :type client: :class:`gcloud.resource_manager.client.Client` or :data:`NoneType <types.NoneType>` :param client: the client to use. If not passed, falls back to the client stored on the current project. """ client = self._require_client(client) data = { 'projectId': self.project_id, 'name': self.name, 'labels': self.labels, } resp = client.connection.api_request(method='POST', path='/projects', data=data) self.set_properties_from_api_repr(resource=resp) def reload(self, client=None): """API call: reload the project via a ``GET`` request. This method will reload the newest metadata for the project. If you've created a new :class:`Project` instance via :meth:`Client.new_project() \ <gcloud.resource_manager.client.Client.new_project>`, this method will retrieve project metadata. .. warning:: This will overwrite any local changes you've made and not saved via :meth:`update`. See https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/get :type client: :class:`gcloud.resource_manager.client.Client` or :data:`NoneType <types.NoneType>` :param client: the client to use. If not passed, falls back to the client stored on the current project. """ client = self._require_client(client) # We assume the project exists. If it doesn't it will raise a NotFound # exception. resp = client.connection.api_request(method='GET', path=self.path) self.set_properties_from_api_repr(resource=resp) def exists(self, client=None): """API call: test the existence of a project via a ``GET`` request. See https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/get :type client: :class:`gcloud.resource_manager.client.Client` or :data:`NoneType <types.NoneType>` :param client: the client to use. If not passed, falls back to the client stored on the current project. :rtype: bool :returns: Boolean indicating existence of the project. """ client = self._require_client(client) try: # Note that we have to request the entire resource as the API # doesn't provide a way tocheck for existence only. client.connection.api_request(method='GET', path=self.path) except NotFound: return False else: return True def update(self, client=None): """API call: update the project via a ``PUT`` request. See https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/update :type client: :class:`gcloud.resource_manager.client.Client` or :data:`NoneType <types.NoneType>` :param client: the client to use. If not passed, falls back to the client stored on the current project. """ client = self._require_client(client) data = {'name': self.name, 'labels': self.labels} resp = client.connection.api_request(method='PUT', path=self.path, data=data) self.set_properties_from_api_repr(resp) def delete(self, client=None, reload_data=False): """API call: delete the project via a ``DELETE`` request. See: https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/delete This actually changes the status (``lifecycleState``) from ``ACTIVE`` to ``DELETE_REQUESTED``. Later (it's not specified when), the project will move into the ``DELETE_IN_PROGRESS`` state, which means the deleting has actually begun. :type client: :class:`gcloud.resource_manager.client.Client` or :data:`NoneType <types.NoneType>` :param client: the client to use. If not passed, falls back to the client stored on the current project. :type reload_data: bool :param reload_data: Whether to reload the project with the latest state. If you want to get the updated status, you'll want this set to :data:`True` as the DELETE method doesn't send back the updated project. Default: :data:`False`. """ client = self._require_client(client) client.connection.api_request(method='DELETE', path=self.path) # If the reload flag is set, reload the project. if reload_data: self.reload() def undelete(self, client=None, reload_data=False): """API call: undelete the project via a ``POST`` request. See https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/undelete This actually changes the project status (``lifecycleState``) from ``DELETE_REQUESTED`` to ``ACTIVE``. If the project has already reached a status of ``DELETE_IN_PROGRESS``, this request will fail and the project cannot be restored. :type client: :class:`gcloud.resource_manager.client.Client` or :data:`NoneType <types.NoneType>` :param client: the client to use. If not passed, falls back to the client stored on the current project. :type reload_data: bool :param reload_data: Whether to reload the project with the latest state. If you want to get the updated status, you'll want this set to :data:`True` as the DELETE method doesn't send back the updated project. Default: :data:`False`. """ client = self._require_client(client) client.connection.api_request(method='POST', path=self.path + ':undelete') # If the reload flag is set, reload the project. if reload_data: self.reload()
elibixby/gcloud-python
gcloud/resource_manager/project.py
Python
apache-2.0
10,587
import io import os import re import abc import csv import sys import email import pathlib import zipfile import operator import functools import itertools import collections from configparser import ConfigParser from contextlib import suppress from importlib import import_module from importlib.abc import MetaPathFinder from itertools import starmap __all__ = [ 'Distribution', 'DistributionFinder', 'PackageNotFoundError', 'distribution', 'distributions', 'entry_points', 'files', 'metadata', 'requires', 'version', ] class PackageNotFoundError(ModuleNotFoundError): """The package was not found.""" class EntryPoint(collections.namedtuple('EntryPointBase', 'name value group')): """An entry point as defined by Python packaging conventions. See `the packaging docs on entry points <https://packaging.python.org/specifications/entry-points/>`_ for more information. """ pattern = re.compile( r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) """ A regular expression describing the syntax for an entry point, which might look like: - module - package.module - package.module:attribute - package.module:object.attribute - package.module:attr [extra1, extra2] Other combinations are possible as well. The expression is lenient about whitespace around the ':', following the attr, and following any extras. """ def load(self): """Load the entry point from its definition. If only a module is indicated by the value, return that module. Otherwise, return the named object. """ match = self.pattern.match(self.value) module = import_module(match.group('module')) attrs = filter(None, (match.group('attr') or '').split('.')) return functools.reduce(getattr, attrs, module) @property def extras(self): match = self.pattern.match(self.value) return list(re.finditer(r'\w+', match.group('extras') or '')) @classmethod def _from_config(cls, config): return [ cls(name, value, group) for group in config.sections() for name, value in config.items(group) ] @classmethod def _from_text(cls, text): config = ConfigParser(delimiters='=') # case sensitive: https://stackoverflow.com/q/1611799/812183 config.optionxform = str try: config.read_string(text) except AttributeError: # pragma: nocover # Python 2 has no read_string config.readfp(io.StringIO(text)) return EntryPoint._from_config(config) def __iter__(self): """ Supply iter so one may construct dicts of EntryPoints easily. """ return iter((self.name, self)) class PackagePath(pathlib.PurePosixPath): """A reference to a path in a package""" def read_text(self, encoding='utf-8'): with self.locate().open(encoding=encoding) as stream: return stream.read() def read_binary(self): with self.locate().open('rb') as stream: return stream.read() def locate(self): """Return a path-like object for this path""" return self.dist.locate_file(self) class FileHash: def __init__(self, spec): self.mode, _, self.value = spec.partition('=') def __repr__(self): return '<FileHash mode: {} value: {}>'.format(self.mode, self.value) class Distribution: """A Python distribution package.""" @abc.abstractmethod def read_text(self, filename): """Attempt to load metadata file given by the name. :param filename: The name of the file in the distribution info. :return: The text if found, otherwise None. """ @abc.abstractmethod def locate_file(self, path): """ Given a path to a file in this distribution, return a path to it. """ @classmethod def from_name(cls, name): """Return the Distribution for the given package name. :param name: The name of the distribution package to search for. :return: The Distribution instance (or subclass thereof) for the named package, if found. :raises PackageNotFoundError: When the named package's distribution metadata cannot be found. """ for resolver in cls._discover_resolvers(): dists = resolver(DistributionFinder.Context(name=name)) dist = next(dists, None) if dist is not None: return dist else: raise PackageNotFoundError(name) @classmethod def discover(cls, **kwargs): """Return an iterable of Distribution objects for all packages. Pass a ``context`` or pass keyword arguments for constructing a context. :context: A ``DistributionFinder.Context`` object. :return: Iterable of Distribution objects for all packages. """ context = kwargs.pop('context', None) if context and kwargs: raise ValueError("cannot accept context and kwargs") context = context or DistributionFinder.Context(**kwargs) return itertools.chain.from_iterable( resolver(context) for resolver in cls._discover_resolvers() ) @staticmethod def at(path): """Return a Distribution for the indicated metadata path :param path: a string or path-like object :return: a concrete Distribution instance for the path """ return PathDistribution(pathlib.Path(path)) @staticmethod def _discover_resolvers(): """Search the meta_path for resolvers.""" declared = ( getattr(finder, 'find_distributions', None) for finder in sys.meta_path ) return filter(None, declared) @property def metadata(self): """Return the parsed metadata for this Distribution. The returned object will have keys that name the various bits of metadata. See PEP 566 for details. """ text = ( self.read_text('METADATA') or self.read_text('PKG-INFO') # This last clause is here to support old egg-info files. Its # effect is to just end up using the PathDistribution's self._path # (which points to the egg-info file) attribute unchanged. or self.read_text('') ) return email.message_from_string(text) @property def version(self): """Return the 'Version' metadata for the distribution package.""" return self.metadata['Version'] @property def entry_points(self): return EntryPoint._from_text(self.read_text('entry_points.txt')) @property def files(self): """Files in this distribution. :return: List of PackagePath for this distribution or None Result is `None` if the metadata file that enumerates files (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is missing. Result may be empty if the metadata exists but is empty. """ file_lines = self._read_files_distinfo() or self._read_files_egginfo() def make_file(name, hash=None, size_str=None): result = PackagePath(name) result.hash = FileHash(hash) if hash else None result.size = int(size_str) if size_str else None result.dist = self return result return file_lines and list(starmap(make_file, csv.reader(file_lines))) def _read_files_distinfo(self): """ Read the lines of RECORD """ text = self.read_text('RECORD') return text and text.splitlines() def _read_files_egginfo(self): """ SOURCES.txt might contain literal commas, so wrap each line in quotes. """ text = self.read_text('SOURCES.txt') return text and map('"{}"'.format, text.splitlines()) @property def requires(self): """Generated requirements specified for this Distribution""" reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs() return reqs and list(reqs) def _read_dist_info_reqs(self): return self.metadata.get_all('Requires-Dist') def _read_egg_info_reqs(self): source = self.read_text('requires.txt') return source and self._deps_from_requires_text(source) @classmethod def _deps_from_requires_text(cls, source): section_pairs = cls._read_sections(source.splitlines()) sections = { section: list(map(operator.itemgetter('line'), results)) for section, results in itertools.groupby(section_pairs, operator.itemgetter('section')) } return cls._convert_egg_info_reqs_to_simple_reqs(sections) @staticmethod def _read_sections(lines): section = None for line in filter(None, lines): section_match = re.match(r'\[(.*)\]$', line) if section_match: section = section_match.group(1) continue yield locals() @staticmethod def _convert_egg_info_reqs_to_simple_reqs(sections): """ Historically, setuptools would solicit and store 'extra' requirements, including those with environment markers, in separate sections. More modern tools expect each dependency to be defined separately, with any relevant extras and environment markers attached directly to that requirement. This method converts the former to the latter. See _test_deps_from_requires_text for an example. """ def make_condition(name): return name and 'extra == "{name}"'.format(name=name) def parse_condition(section): section = section or '' extra, sep, markers = section.partition(':') if extra and markers: markers = '({markers})'.format(markers=markers) conditions = list(filter(None, [markers, make_condition(extra)])) return '; ' + ' and '.join(conditions) if conditions else '' for section, deps in sections.items(): for dep in deps: yield dep + parse_condition(section) class DistributionFinder(MetaPathFinder): """ A MetaPathFinder capable of discovering installed distributions. """ class Context: name = None """ Specific name for which a distribution finder should match. """ def __init__(self, **kwargs): vars(self).update(kwargs) @property def path(self): """ The path that a distribution finder should search. """ return vars(self).get('path', sys.path) @property def pattern(self): return '.*' if self.name is None else re.escape(self.name) @abc.abstractmethod def find_distributions(self, context=Context()): """ Find distributions. Return an iterable of all Distribution instances capable of loading the metadata for packages matching the ``context``, a DistributionFinder.Context instance. """ class MetadataPathFinder(DistributionFinder): @classmethod def find_distributions(cls, context=DistributionFinder.Context()): """ Find distributions. Return an iterable of all Distribution instances capable of loading the metadata for packages matching ``context.name`` (or all names if ``None`` indicated) along the paths in the list of directories ``context.path``. """ found = cls._search_paths(context.pattern, context.path) return map(PathDistribution, found) @classmethod def _search_paths(cls, pattern, paths): """Find metadata directories in paths heuristically.""" return itertools.chain.from_iterable( cls._search_path(path, pattern) for path in map(cls._switch_path, paths) ) @staticmethod def _switch_path(path): PYPY_OPEN_BUG = False if not PYPY_OPEN_BUG or os.path.isfile(path): # pragma: no branch with suppress(Exception): return zipfile.Path(path) return pathlib.Path(path) @classmethod def _matches_info(cls, normalized, item): template = r'{pattern}(-.*)?\.(dist|egg)-info' manifest = template.format(pattern=normalized) return re.match(manifest, item.name, flags=re.IGNORECASE) @classmethod def _matches_legacy(cls, normalized, item): template = r'{pattern}-.*\.egg[\\/]EGG-INFO' manifest = template.format(pattern=normalized) return re.search(manifest, str(item), flags=re.IGNORECASE) @classmethod def _search_path(cls, root, pattern): if not root.is_dir(): return () normalized = pattern.replace('-', '_') return (item for item in root.iterdir() if cls._matches_info(normalized, item) or cls._matches_legacy(normalized, item)) class PathDistribution(Distribution): def __init__(self, path): """Construct a distribution from a path to the metadata directory. :param path: A pathlib.Path or similar object supporting .joinpath(), __div__, .parent, and .read_text(). """ self._path = path def read_text(self, filename): with suppress(FileNotFoundError, IsADirectoryError, KeyError, NotADirectoryError, PermissionError): return self._path.joinpath(filename).read_text(encoding='utf-8') read_text.__doc__ = Distribution.read_text.__doc__ def locate_file(self, path): return self._path.parent / path def distribution(distribution_name): """Get the ``Distribution`` instance for the named package. :param distribution_name: The name of the distribution package as a string. :return: A ``Distribution`` instance (or subclass thereof). """ return Distribution.from_name(distribution_name) def distributions(**kwargs): """Get all ``Distribution`` instances in the current environment. :return: An iterable of ``Distribution`` instances. """ return Distribution.discover(**kwargs) def metadata(distribution_name): """Get the metadata for the named package. :param distribution_name: The name of the distribution package to query. :return: An email.Message containing the parsed metadata. """ return Distribution.from_name(distribution_name).metadata def version(distribution_name): """Get the version string for the named package. :param distribution_name: The name of the distribution package to query. :return: The version string for the package as defined in the package's "Version" metadata key. """ return distribution(distribution_name).version def entry_points(): """Return EntryPoint objects for all installed packages. :return: EntryPoint objects for all installed packages. """ eps = itertools.chain.from_iterable( dist.entry_points for dist in distributions()) by_group = operator.attrgetter('group') ordered = sorted(eps, key=by_group) grouped = itertools.groupby(ordered, by_group) return { group: tuple(eps) for group, eps in grouped } def files(distribution_name): """Return a list of files for the named package. :param distribution_name: The name of the distribution package to query. :return: List of files composing the distribution. """ return distribution(distribution_name).files def requires(distribution_name): """ Return a list of requirements for the named package. :return: An iterator of requirements, suitable for packaging.requirement.Requirement. """ return distribution(distribution_name).requires
batermj/algorithm-challenger
code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/importlib/metadata.py
Python
apache-2.0
16,174
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('groups', '0001_initial'), ] operations = [ migrations.AddField( model_name='group', name='members', field=models.ManyToManyField(to=settings.AUTH_USER_MODEL), ), ]
yrchen/CommonRepo
commonrepo/groups/migrations/0002_group_members.py
Python
apache-2.0
508
from django.conf.urls import patterns, include, url from django.contrib import admin from django.conf import settings admin.autodiscover() from front.views import * from front.views import views as front_views from django.views.decorators.csrf import csrf_exempt if not settings.DEBUG: s = {'SSL': settings.ENABLE_SSL} else: s = {} urlpatterns = patterns('', # Examples: # url(r'^$', 'DisSoNet.views.home', name='home'), url(r'^$', HomeView.as_view(), name='home'), url(r'^$', HomeView.as_view(), name='home'), url(r'^test/', front_views.test, name='test'), url(r'^privacy/', front_views.privacy, name='privacy'), url(r'^stream_debug/', front_views.stream_debug, name='stream'), url(r'^admin/', include(admin.site.urls)), url(r'^github/setup/', GitHubView.as_view(), s, name='initGithub'), url(r'^accounts/login/', LoginView.as_view(), s, name='login'), url(r'^accounts/logout/', LogoutView.as_view(), s, name='logout'), url(r'^accounts/view/', UserView.as_view(), s, name='user_view'), url(r'^accounts/register/', RegisterView.as_view(), s, name='register'), url(r'^accounts/reset/$', front_views.reset, s, name='reset'), url(r'^accounts/reset/e/(?P<email>[\w-]+)/$', front_views.reset, s, name='reset'), url(r'^accounts/reset/done/$', front_views.reset_done, s, name='reset_done'), url(r'^accounts/reset/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>[\w-]+)/$', front_views.reset_confirm, s, name='reset_confirm'), url(r'^accounts/reset/complete/$', front_views.reset_complete, name='reset_complete'), # urls for post(s)/ url(r'^post/?$', PublicPosts.as_view(), name='public_posts'), url(r'^posts/?$', PublicPosts.as_view(), name='public_posts'), # urls for post(s)/<post_id>/ url(r'^post/(?P<post_id>[\w-]+)/?$', PostResource.as_view(), name='post_resource'), url(r'^posts/(?P<post_id>[\w-]+)/?$', PostResource.as_view(), name='post_resource'), # urls for post(s)/<post_id>/comments/ url(r'^post/(?P<post_id>[\w-]+)/comments/?$', csrf_exempt(PostComments.as_view()), name='post_comments'), url(r'^posts/(?P<post_id>[\w-]+)/comments/?$', csrf_exempt(PostComments.as_view()), name='post_comments'), url(r'^author/posts/?$', AuthorStream.as_view(), name='author_posts'), url(r'^author/(?P<author_id>[\w-]+)/posts/?$', VisiblePostToUser.as_view(), name='visibile_posts'), url(r'^author/(?P<author_id>[\w-]+)/?$', AuthorProfile.as_view(), name='author_profile'), url(r'^friendrequest/$', csrf_exempt(FriendRequestView.as_view()), name='friend_request'), url(r'^friends/(?P<user_id_1>[\w-]+)/(?P<user_id_2>[\w-]+)/$', AreFriends.as_view(), name='are_friends'), url(r'^friends/?', FriendsView.as_view(), s, name='friends_view'), url(r'^test_rest/(?P<id>[\w-]+)/?$', front_views.test_rest, name="test_rest"), )
Solanar/CMPUT410-Project
DisSoNet/DisSoNet/urls.py
Python
apache-2.0
2,954
#!/usr/bin/env python3 """ Copyright 2015 Stefano Benvenuti <ste.benve86@gmail.com> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import sys import os import json import shutil # helper function for reading a file content def read_file(filename): f = None try: f = open(filename) content = json.load(f) except Exception as e: print("File \"%s\" cannot be opened or read: %s", filename, e) sys.exit(1) finally: if f is not None: f.close() return content if len(sys.argv) is not 2: print(""" USAGE: ./add_poem.py JSON_DELTA_FILE_PATH """) sys.exit(1) conf_file = os.path.join("..","poems","poems.json") # reads old configuration file and new content content = read_file(conf_file) new_content = read_file(sys.argv[1]) # merge the values content.update(new_content) # write file shutil.copyfile(conf_file, conf_file + ".bak") f = None try: f = open(conf_file,'w') json.dump(content, f) except Exception as e: print("File \"%s\" cannot be opened or written: %s", filename, e) sys.exit(1) finally: if f is not None: f.close()
stebenve86/poem_reader
utilities/add_poem.py
Python
apache-2.0
1,540
# -*- coding: utf-8 -*- # FOGLAMP_BEGIN # See: http://foglamp.readthedocs.io/ # FOGLAMP_END """ Storage layer python client """ __author__ = "Praveen Garg, Amarendra K Sinha" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" import aiohttp import http.client import json import time from abc import ABC, abstractmethod from foglamp.common import logger from foglamp.common.service_record import ServiceRecord from foglamp.common.storage_client.exceptions import * from foglamp.common.storage_client.utils import Utils _LOGGER = logger.setup(__name__) class AbstractStorage(ABC): """ abstract class for storage client """ def __init__(self): super().__init__() @abstractmethod def connect(self): pass @abstractmethod def disconnect(self): pass # Allow with context def __enter__(self): return self.connect() def __exit__(self, *args): self.disconnect() class StorageClientAsync(AbstractStorage): def __init__(self, core_management_host, core_management_port, svc=None): try: if svc: self.service = svc else: self.connect(core_management_host, core_management_port) self.base_url = '{}:{}'.format(self.service._address, self.service._port) self.management_api_url = '{}:{}'.format(self.service._address, self.service._management_port) except Exception: raise InvalidServiceInstance @property def base_url(self): return self.__base_url @base_url.setter def base_url(self, url): self.__base_url = url @property def service(self): return self.__service @service.setter def service(self, svc): if not isinstance(svc, ServiceRecord): w_msg = 'Storage should be a valid FogLAMP micro-service instance' _LOGGER.warning(w_msg) raise InvalidServiceInstance if not getattr(svc, "_type") == "Storage": w_msg = 'Storage should be a valid *Storage* micro-service instance' _LOGGER.warning(w_msg) raise InvalidServiceInstance self.__service = svc def _get_storage_service(self, host, port): """ get Storage service """ conn = http.client.HTTPConnection("{0}:{1}".format(host, port)) # TODO: need to set http / https based on service protocol conn.request('GET', url='/foglamp/service?name=FogLAMP%20Storage') r = conn.getresponse() if r.status in range(400, 500): _LOGGER.error("Get Service: Client error code: %d, %s", r.status.r.reason) if r.status in range(500, 600): _LOGGER.error("Get Service: Server error code: %d, %s", r.status, r.reason) res = r.read().decode() conn.close() response = json.loads(res) svc = response["services"][0] return svc def connect(self, core_management_host, core_management_port): svc = self._get_storage_service(host=core_management_host, port=core_management_port) if len(svc) == 0: raise InvalidServiceInstance self.service = ServiceRecord(s_id=svc["id"], s_name=svc["name"], s_type=svc["type"], s_port=svc["service_port"], m_port=svc["management_port"], s_address=svc["address"], s_protocol=svc["protocol"]) return self def disconnect(self): pass # FIXME: As per JIRA-615 strict=false at python side (interim solution) # fix is required at storage layer (error message with escape sequence using a single quote) async def insert_into_tbl(self, tbl_name, data): """ insert json payload into given table :param tbl_name: :param data: JSON payload :return: :Example: curl -X POST http://0.0.0.0:8080/storage/table/statistics_history -d @payload2.json @payload2.json content: { "key" : "SENT_test", "history_ts" : "now()", "value" : 1 } """ if not tbl_name: raise ValueError("Table name is missing") if not data: raise ValueError("Data to insert is missing") if not Utils.is_json(data): raise TypeError("Provided data to insert must be a valid JSON") post_url = '/storage/table/{tbl_name}'.format(tbl_name=tbl_name) url = 'http://' + self.base_url + post_url async with aiohttp.ClientSession() as session: async with session.post(url, data=data) as resp: status_code = resp.status jdoc = await resp.json() if status_code not in range(200, 209): _LOGGER.info("POST %s, with payload: %s", post_url, data) _LOGGER.error("Error code: %d, reason: %s, details: %s", resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return jdoc async def update_tbl(self, tbl_name, data): """ update json payload for specified condition into given table :param tbl_name: :param data: JSON payload :return: :Example: curl -X PUT http://0.0.0.0:8080/storage/table/statistics_history -d @payload3.json @payload3.json content: { "condition" : { "column" : "key", "condition" : "=", "value" : "SENT_test" }, "values" : { "value" : 44444 } } """ if not tbl_name: raise ValueError("Table name is missing") if not data: raise ValueError("Data to update is missing") if not Utils.is_json(data): raise TypeError("Provided data to update must be a valid JSON") put_url = '/storage/table/{tbl_name}'.format(tbl_name=tbl_name) url = 'http://' + self.base_url + put_url async with aiohttp.ClientSession() as session: async with session.put(url, data=data) as resp: status_code = resp.status jdoc = await resp.json() if status_code not in range(200, 209): _LOGGER.info("PUT %s, with payload: %s", put_url, data) _LOGGER.error("Error code: %d, reason: %s, details: %s", resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return jdoc async def delete_from_tbl(self, tbl_name, condition=None): """ Delete for specified condition from given table :param tbl_name: :param condition: JSON payload :return: :Example: curl -X DELETE http://0.0.0.0:8080/storage/table/statistics_history -d @payload_del.json @payload_del.json content: "condition" : { "column" : "key", "condition" : "=", "value" : "SENT_test" } """ if not tbl_name: raise ValueError("Table name is missing") del_url = '/storage/table/{tbl_name}'.format(tbl_name=tbl_name) if condition and (not Utils.is_json(condition)): raise TypeError("condition payload must be a valid JSON") url = 'http://' + self.base_url + del_url async with aiohttp.ClientSession() as session: async with session.delete(url, data=condition) as resp: status_code = resp.status jdoc = await resp.json() if status_code not in range(200, 209): _LOGGER.info("DELETE %s, with payload: %s", del_url, condition if condition else '') _LOGGER.error("Error code: %d, reason: %s, details: %s", resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return jdoc async def query_tbl(self, tbl_name, query=None): """ Simple SELECT query for the specified table with optional query params :param tbl_name: :param query: query params in format k1=v1&k2=v2 :return: :Example: curl -X GET http://0.0.0.0:8080/storage/table/statistics_history curl -X GET http://0.0.0.0:8080/storage/table/statistics_history?key=PURGE """ if not tbl_name: raise ValueError("Table name is missing") get_url = '/storage/table/{tbl_name}'.format(tbl_name=tbl_name) if query: # else SELECT * FROM <tbl_name> get_url += '?{}'.format(query) url = 'http://' + self.base_url + get_url async with aiohttp.ClientSession() as session: async with session.get(url) as resp: status_code = resp.status jdoc = await resp.json() if status_code not in range(200, 209): _LOGGER.info("GET %s", get_url) _LOGGER.error("Error code: %d, reason: %s, details: %s", resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return jdoc async def query_tbl_with_payload(self, tbl_name, query_payload): """ Complex SELECT query for the specified table with a payload :param tbl_name: :param query_payload: payload in valid JSON format :return: :Example: curl -X PUT http://0.0.0.0:8080/storage/table/statistics_history/query -d @payload.json @payload.json content: "where" : { "column" : "key", "condition" : "=", "value" : "SENT_test" } """ if not tbl_name: raise ValueError("Table name is missing") if not query_payload: raise ValueError("Query payload is missing") if not Utils.is_json(query_payload): raise TypeError("Query payload must be a valid JSON") put_url = '/storage/table/{tbl_name}/query'.format(tbl_name=tbl_name) url = 'http://' + self.base_url + put_url async with aiohttp.ClientSession() as session: async with session.put(url, data=query_payload) as resp: status_code = resp.status jdoc = await resp.json() if status_code not in range(200, 209): _LOGGER.info("PUT %s, with query payload: %s", put_url, query_payload) _LOGGER.error("Error code: %d, reason: %s, details: %s", resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return jdoc async def post_snapshot(self, tbl_name): """Create a table snapshot :param tbl_name: :return: :Example: curl -X POST http://0.0.0.0:8080/storage/table/configuration/snapshot """ post_url = '/storage/table/{tbl_name}/snapshot'.format(tbl_name=tbl_name) data = {"id": str(int(time.time()))} url = 'http://' + self.base_url + post_url async with aiohttp.ClientSession() as session: async with session.post(url, data=json.dumps(data)) as resp: status_code = resp.status jdoc = await resp.text() if status_code not in range(200, 209): _LOGGER.info("POST %s", post_url) _LOGGER.error("Error code: %d, reason: %s, details: %s", resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return json.loads(jdoc) async def put_snapshot(self, tbl_name, snapshot_id): """Restore a table snapshot :param tbl_name: :param snapshot_id: :return: :Example: curl -X PUT http://0.0.0.0:8080/storage/table/configuration/snapshot/cea17db8-6ccc-11e7-907b-a6006ad3dba0 """ put_url = '/storage/table/{tbl_name}/snapshot/{id}'.format(tbl_name=tbl_name, id=snapshot_id) url = 'http://' + self.base_url + put_url async with aiohttp.ClientSession() as session: async with session.put(url) as resp: status_code = resp.status jdoc = await resp.text() if status_code not in range(200, 209): _LOGGER.info("PUT %s", put_url) _LOGGER.error("Error code: %d, reason: %s, details: %s", resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return json.loads(jdoc) async def delete_snapshot(self, tbl_name, snapshot_id): """Delete a table snapshot :param tbl_name: :param snapshot_id: :return: :Example: curl -X DELETE http://0.0.0.0:8080/storage/table/configuration/snapshot/cea17db8-6ccc-11e7-907b-a6006ad3dba0 """ delete_url = '/storage/table/{tbl_name}/snapshot/{id}'.format(tbl_name=tbl_name, id=snapshot_id) url = 'http://' + self.base_url + delete_url async with aiohttp.ClientSession() as session: async with session.delete(url) as resp: status_code = resp.status jdoc = await resp.text() if status_code not in range(200, 209): _LOGGER.info("DELETE %s", delete_url) _LOGGER.error("Error code: %d, reason: %s, details: %s", resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return json.loads(jdoc) async def get_snapshot(self, tbl_name): """Get a table snapshot :param tbl_name: :return: :Example: curl -X GET http://0.0.0.0:8080/storage/table/configuration/snapshot """ get_url = '/storage/table/{tbl_name}/snapshot'.format(tbl_name=tbl_name) url = 'http://' + self.base_url + get_url async with aiohttp.ClientSession() as session: async with session.get(url) as resp: status_code = resp.status jdoc = await resp.text() if status_code not in range(200, 209): _LOGGER.info("GET %s", get_url) _LOGGER.error("Error code: %d, reason: %s, details: %s", resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return json.loads(jdoc) class ReadingsStorageClientAsync(StorageClientAsync): """ Readings table operations """ _base_url = "" def __init__(self, core_mgt_host, core_mgt_port, svc=None): super().__init__(core_management_host=core_mgt_host, core_management_port=core_mgt_port, svc=svc) self.__class__._base_url = self.base_url async def append(self, readings): """ :param readings: :return: :Example: curl -X POST http://0.0.0.0:8080/storage/reading -d @payload.json { "readings" : [ { "asset_code": "MyAsset", "reading" : { "rate" : 18.4 }, "user_ts" : "2017-09-21 15:00:09.025655" }, { "asset_code": "MyAsset", "reading" : { "rate" : 45.1 }, "user_ts" : "2017-09-21 15:03:09.025655" } ] } """ if not readings: raise ValueError("Readings payload is missing") if not Utils.is_json(readings): raise TypeError("Readings payload must be a valid JSON") url = 'http://' + self._base_url + '/storage/reading' async with aiohttp.ClientSession() as session: async with session.post(url, data=readings) as resp: status_code = resp.status jdoc = await resp.json() if status_code not in range(200, 209): _LOGGER.error("POST url %s with payload: %s, Error code: %d, reason: %s, details: %s", '/storage/reading', readings, resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return jdoc async def fetch(self, reading_id, count): """ :param reading_id: the first reading ID in the block that is retrieved :param count: the number of readings to return, if available :return: :Example: curl -X GET http://0.0.0.0:8080/storage/reading?id=2&count=3 """ if reading_id is None: raise ValueError("first reading id to retrieve the readings block is required") if count is None: raise ValueError("count is required to retrieve the readings block") try: count = int(count) except ValueError: raise get_url = '/storage/reading?id={}&count={}'.format(reading_id, count) url = 'http://' + self._base_url + get_url async with aiohttp.ClientSession() as session: async with session.get(url) as resp: status_code = resp.status jdoc = await resp.json() if status_code not in range(200, 209): _LOGGER.error("GET url: %s, Error code: %d, reason: %s, details: %s", url, resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return jdoc async def query(self, query_payload): """ :param query_payload: :return: :Example: curl -X PUT http://0.0.0.0:8080/storage/reading/query -d @payload.json @payload.json content: { "where" : { "column" : "asset_code", "condition" : "=", "value" : "MyAsset" } } """ if not query_payload: raise ValueError("Query payload is missing") if not Utils.is_json(query_payload): raise TypeError("Query payload must be a valid JSON") url = 'http://' + self._base_url + '/storage/reading/query' async with aiohttp.ClientSession() as session: async with session.put(url, data=query_payload) as resp: status_code = resp.status jdoc = await resp.json() if status_code not in range(200, 209): _LOGGER.error("PUT url %s with query payload: %s, Error code: %d, reason: %s, details: %s", '/storage/reading/query', query_payload, resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return jdoc async def purge(self, age=None, sent_id=0, size=None, flag=None): """ Purge readings based on the age of the readings :param age: the maximum age of data to retain, expressed in hours :param sent_id: the id of the last reading to be sent out of FogLAMP :param size: the maximum size of data to retain, expressed in Kbytes :param flag: define what to do about unsent readings. Valid options are retain or purge :return: a JSON with the number of readings removed, the number of unsent readings removed and the number of readings that remain :Example: curl -X PUT "http://0.0.0.0:<storage_service_port>/storage/reading/purge?age=<age>&sent=<reading id>&flags=<flags>" curl -X PUT "http://0.0.0.0:<storage_service_port>/storage/reading/purge?age=24&sent=2&flags=PURGE" curl -X PUT "http://0.0.0.0:<storage_service_port>/storage/reading/purge?size=1024&sent=0&flags=PURGE" """ valid_flags = ['retain', 'purge'] if flag and flag.lower() not in valid_flags: raise InvalidReadingsPurgeFlagParameters if age and size: raise PurgeOnlyOneOfAgeAndSize if not age and not size: raise PurgeOneOfAgeAndSize # age should be int # size should be int # sent_id should again be int try: if age is not None: _age = int(age) if size is not None: _size = int(size) _sent_id = int(sent_id) except ValueError: raise if age: put_url = '/storage/reading/purge?age={}&sent={}'.format(_age, _sent_id) if size: put_url = '/storage/reading/purge?size={}&sent={}'.format(_size, _sent_id) if flag: put_url += "&flags={}".format(flag.lower()) url = 'http://' + self._base_url + put_url async with aiohttp.ClientSession() as session: async with session.put(url, data=None) as resp: status_code = resp.status jdoc = await resp.json() if status_code not in range(200, 209): _LOGGER.error("PUT url %s, Error code: %d, reason: %s, details: %s", put_url, resp.status, resp.reason, jdoc) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) return jdoc
foglamp/FogLAMP
python/foglamp/common/storage_client/storage_client.py
Python
apache-2.0
21,779
# Copyright 2016 Sotera Defense Solutions Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import six if six.PY2 from ConfigParser import SafeConfigParser else from configparser import SafeConfigParser class AggregateMicroPathConfig: config_file = "" table_name = "" table_schema_id = "" table_schema_dt = "" table_schema_lat = "" table_schema_lon = "" time_filter = 0 distance_filter = 0 tripLat1 = 0 tripLon1 = 0 tripLat2 = 0 tripLon2 = 0 tripname = "" resolutionLat = 0 resolutionLon = 0 tripLatMin = 0 tripLatMax = 0 tripLonMin = 0 tripLonMax = 0 triplineBlankets = [] def __init__(self, config, basePath = "./"): configParser = SafeConfigParser() configParser.read(basePath + config) self.config_file = config self.database_name = configParser.get("AggregateMicroPath", "database_name") self.table_name = configParser.get("AggregateMicroPath", "table_name") self.table_schema_id = configParser.get("AggregateMicroPath", "table_schema_id") self.table_schema_dt = configParser.get("AggregateMicroPath", "table_schema_dt") self.table_schema_lat = configParser.get("AggregateMicroPath", "table_schema_lat") self.table_schema_lon = configParser.get("AggregateMicroPath", "table_schema_lon") self.time_filter = long(configParser.get("AggregateMicroPath", "time_filter")) self.distance_filter = long(configParser.get("AggregateMicroPath", "distance_filter")) self.tripLat1 = float(configParser.get("AggregateMicroPath", "lower_left_lat")) self.tripLon1 = float(configParser.get("AggregateMicroPath", "lower_left_lon")) self.tripLat2 = float(configParser.get("AggregateMicroPath", "upper_right_lat")) self.tripLon2 = float(configParser.get("AggregateMicroPath", "upper_right_lon")) self.tripname = configParser.get("AggregateMicroPath", "trip_name") self.resolutionLat = float(configParser.get("AggregateMicroPath", "resolution_lat")) self.resolutionLon = float(configParser.get("AggregateMicroPath", "resolution_lon")) self.tripLatMin = int(math.floor(self.tripLat1/self.resolutionLat))#6 self.tripLatMax = int(math.ceil(self.tripLat2/self.resolutionLat)) #7 self.tripLonMin = int(math.floor(self.tripLon1/self.resolutionLon)) #8 self.tripLonMax = int(math.ceil(self.tripLon2/self.resolutionLon)) #9 self.triplineBlankets.append([self.tripLat1,self.tripLon1,self.tripLat2,self.tripLon2,self.tripname,self.resolutionLat,self.resolutionLon,self.tripLatMin,self.tripLatMax,self.tripLonMin,self.tripLonMax]) self.temporal_split = configParser.get("AggregateMicroPath", "temporal_split")
Sotera/aggregate-micro-paths
hive-streaming/conf/config.py
Python
apache-2.0
3,321
#!/usr/bin/python # Copyright (c) 2016 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import jinja2 from jinja2.loaders import TemplateNotFound from jinja2.utils import open_if_exists import os def get_dist_templates_path(): return os.path.join(os.path.dirname(__file__), 'dist-templates') class RenderspecLoader(jinja2.BaseLoader): """A special template loader which allows rendering supplied .spec template with distro specific blocks maintained as part of renderspec. '.spec' returns the spec template (which you need to supply during init) while other strings map to corresponding child templates included in renderspec which simply extend the '.spec' template. """ base_ref = '.spec' template_postfix = '.spec.j2' def __init__(self, template_fn, encoding='utf-8'): self.base_fn = template_fn self.encoding = encoding self.disttemp_path = get_dist_templates_path() def get_source(self, environment, template): if template == self.base_ref: fn = self.base_fn else: fn = os.path.join(self.disttemp_path, template + self.template_postfix) f = open_if_exists(fn) if not f: return TemplateNotFound(template) try: contents = f.read().decode(self.encoding) finally: f.close() mtime = os.path.getmtime(self.base_fn) def uptodate(): try: return os.path.getmtime(self.base_fn) == mtime except OSError: return False return contents, fn, uptodate def list_templates(self): found = set([self.base_ref]) walk_dir = os.walk(self.disttemp_path) for _, _, filenames in walk_dir: for fn in filenames: if fn.endswith(self.template_postfix): template = fn[:-len(self.template_postfix)] found.add(template) return sorted(found)
openstack/renderspec
renderspec/distloader.py
Python
apache-2.0
2,522
#---------------------------------------------------------------------------------- # Introdução a Programação de Computadores - IPC # Universidade do Estado do Amazonas - UEA # # Adham Lucas da Silva Oliveira 1715310059 # Alexandre Marques Uchôa 1715310028 # André Luís Laborda Neves 1515070006 # Carlos Eduardo Tapudima de Oliveira 1715310030 # Diego Reis Figueira 1515070169 # #Faça um programa que calcule e mostre o volume de uma esfera sendo fornecido o valor de seu raio (R). #A fórmula para calcular o volume é: (4/3) * pi * R3. Considere (atribua) para pi o valor 3.14159. # #Entrada # O arquivo de entrada contém um valor de ponto flutuante (dupla precisão), correspondente ao raio da esfera. # #Saída # A saída deverá ser uma mensagem "VOLUME" conforme o exemplo fornecido abaixo, #com um espaço antes e um espaço depois da igualdade. #O valor deverá ser apresentado com 3 casas após o ponto. #---------------------------------------------------------------------------------- radius = float(input()) pi = 3.14159 volume = 4/(3*pi*radius**3) print('volume = %.3f' % volume)
jucimarjr/IPC_2017-1
lista04/lista04_lista02_questao11.py
Python
apache-2.0
1,164
from django.conf.urls import patterns,url from main import views urlpatterns = patterns('', url(r'^$',views.index,name='index'), url(r'^tags/$',views.tags,name='tags'), url(r'^tags/(?P<tag_name>\w+)/$',views.tag,name='tag'), url(r'^add_link/$',views.add_link,name='add_link'), )
davischau/CMPUT410Lab6
bookmarks/main/urls.py
Python
apache-2.0
295
# Copyright 2016 NOKIA # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from logging import handlers import netaddr from nuage_neutron.plugins.common import constants from nuage_neutron.plugins.common import exceptions as nuage_exc from nuage_neutron.plugins.common.extensions import nuage_router from nuage_neutron.plugins.common import nuagedb from nuage_neutron.plugins.common.time_tracker import TimeTracker from nuage_neutron.plugins.common import utils as nuage_utils from nuage_neutron.vsdclient.common.helper import get_l2_and_l3_sub_id from oslo_config import cfg from oslo_log.formatters import ContextFormatter from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from sqlalchemy.orm import exc from neutron._i18n import _ from neutron.callbacks import resources from neutron.db import api as db from neutron.extensions import l3 from neutron_lib import constants as lib_constants from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.utils import helpers from nuage_neutron.plugins.nuage_ml2.nuage_ml2_wrapper import NuageL3Wrapper LOG = logging.getLogger(__name__) class NuageL3Plugin(NuageL3Wrapper): supported_extension_aliases = ['router', 'nuage-router', 'nuage-floatingip', 'extraroute', 'ext-gw-mode'] def __init__(self): super(NuageL3Plugin, self).__init__() self._l2_plugin = None self._default_np_id = None self.init_fip_rate_log() @property def core_plugin(self): if self._l2_plugin is None: self._l2_plugin = directory.get_plugin() return self._l2_plugin @property def default_np_id(self): if self._default_np_id is None: self._default_np_id = directory.get_plugin( constants.NUAGE_APIS).default_np_id return self._default_np_id def get_plugin_type(self): return lib_constants.L3 def get_plugin_description(self): return "Plugin providing support for routers and floatingips." def init_fip_rate_log(self): self.def_fip_rate = cfg.CONF.FIPRATE.default_fip_rate self.def_ingress_rate_kbps = ( cfg.CONF.FIPRATE.default_ingress_fip_rate_kbps) self.def_egress_rate_kbps = ( cfg.CONF.FIPRATE.default_egress_fip_rate_kbps) self._validate_fip_rate_value(self.def_fip_rate, 'default_fip_rate') self._validate_fip_rate_value(self.def_ingress_rate_kbps, 'default_ingress_fip_rate_kbps', units='kbps') if cfg.CONF.FIPRATE.default_egress_fip_rate_kbps is not None: self._validate_fip_rate_value(self.def_egress_rate_kbps, 'default_egress_fip_rate_kbps', units='kbps') self.fip_rate_log = None if cfg.CONF.FIPRATE.fip_rate_change_log: formatter = ContextFormatter() formatter.conf.logging_context_format_string = ( '%(asctime)s %(levelname)s [%(user_name)s] %(message)s') self.fip_rate_log = logging.getLogger('neutron.nuage.fip.rate') handler = handlers.WatchedFileHandler( cfg.CONF.FIPRATE.fip_rate_change_log) handler.setFormatter(formatter) self.fip_rate_log.logger.addHandler(handler) else: self.fip_rate_log = LOG def _validate_fip_rate_value(self, fip_value, attribute, units='mbps'): if fip_value < -1: raise cfg.ConfigFileValueError(_('%s can not be < -1') % attribute) if self.def_fip_rate > constants.MAX_VSD_INTEGER: raise cfg.ConfigFileValueError(_('%(attr)s cannot be > %(max)s') % {'attr': attribute, 'max': constants.MAX_VSD_INTEGER}) if units == 'kbps' and int(fip_value) != fip_value: raise cfg.ConfigFileValueError(_('%s cannot be' ' in fraction') % attribute) @nuage_utils.handle_nuage_api_error @log_helpers.log_method_call @TimeTracker.tracked def add_router_interface(self, context, router_id, interface_info): session = context.session rtr_if_info = super(NuageL3Plugin, self).add_router_interface( context, router_id, interface_info) try: network = self.core_plugin.get_network(context, rtr_if_info['network_id']) if not self.is_vxlan_network(network): return rtr_if_info if network['router:external']: msg = _("Subnet in external network cannot be an interface of " "a router.") raise nuage_exc.NuageBadRequest(msg=msg) return self._nuage_add_router_interface(context, interface_info, router_id, rtr_if_info, session) except Exception: with excutils.save_and_reraise_exception(): super(NuageL3Plugin, self).remove_router_interface( context, router_id, interface_info) def _nuage_add_router_interface(self, context, interface_info, router_id, rtr_if_info, session): if 'port_id' in interface_info: port_id = interface_info['port_id'] port = self.core_plugin._get_port(context, port_id) subnet_id = port['fixed_ips'][0]['subnet_id'] subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, subnet_id) port_params = {'neutron_port_id': port['id']} if subnet_l2dom['nuage_l2dom_tmplt_id']: port_params['l2dom_id'] = subnet_l2dom['nuage_subnet_id'] else: port_params['l3dom_id'] = subnet_l2dom['nuage_subnet_id'] vport = self.vsdclient.get_nuage_vport_by_neutron_id( port_params, required=False) if vport: self.vsdclient.delete_nuage_vport(vport['ID']) else: subnet_id = rtr_if_info['subnet_id'] subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, subnet_id) l2domain_id = subnet_l2dom['nuage_subnet_id'] subnet = self.core_plugin.get_subnet(context, subnet_id) vsd_zone = self.vsdclient.get_zone_by_routerid( router_id, subnet['shared']) self._nuage_validate_add_rtr_itf( session, router_id, subnet, subnet_l2dom, vsd_zone) filters = { 'fixed_ips': {'subnet_id': [subnet_id]}, 'device_owner': [constants.DEVICE_OWNER_DHCP_NUAGE] } gw_ports = self.core_plugin.get_ports(context, filters=filters) for port in gw_ports: self.core_plugin.delete_port(context, port['id']) pnet_binding = nuagedb.get_network_binding(context.session, subnet['network_id']) with nuage_utils.rollback() as on_exc, \ session.begin(subtransactions=True): vsd_subnet = self.vsdclient.create_domain_subnet( vsd_zone, subnet, pnet_binding) on_exc(self.vsdclient.delete_domain_subnet, vsd_subnet['ID'], subnet['id'], pnet_binding) nuagedb.update_subnetl2dom_mapping( subnet_l2dom, {'nuage_subnet_id': vsd_subnet['ID'], 'nuage_l2dom_tmplt_id': None}) self.vsdclient.move_l2domain_to_l3subnet( l2domain_id, vsd_subnet['ID']) rollbacks = [] try: self.nuage_callbacks.notify(resources.ROUTER_INTERFACE, constants.AFTER_CREATE, self, context=context, router_id=router_id, subnet_id=subnet_id, rollbacks=rollbacks, subnet_mapping=subnet_l2dom) except Exception: with excutils.save_and_reraise_exception(): for rollback in reversed(rollbacks): rollback[0](*rollback[1], **rollback[2]) self.core_plugin.update_port_status(context, rtr_if_info['port_id'], lib_constants.PORT_STATUS_ACTIVE) return rtr_if_info def _nuage_validate_add_rtr_itf(self, session, router_id, subnet, subnet_l2dom, nuage_zone): subnet_id = subnet['id'] ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, router_id) if not nuage_zone or not ent_rtr_mapping: raise nuage_router.RtrItfAddIncompleteRouterOnVsd(id=router_id) if not subnet_l2dom: raise nuage_router.RtrItfAddVsdSubnetNotFound(subnet=subnet_id) if subnet_l2dom['nuage_managed_subnet']: raise nuage_router.RtrItfAddSubnetIsVsdManaged(subnet=subnet_id) if (subnet_l2dom['net_partition_id'] != ent_rtr_mapping['net_partition_id']): raise nuage_router.RtrItfAddDifferentNetpartitions( subnet=subnet_id, router=router_id) nuage_subnet_id = subnet_l2dom['nuage_subnet_id'] nuage_rtr_id = ent_rtr_mapping['nuage_router_id'] self.vsdclient.validate_create_domain_subnet( subnet, nuage_subnet_id, nuage_rtr_id) @nuage_utils.handle_nuage_api_error @log_helpers.log_method_call @TimeTracker.tracked def remove_router_interface(self, context, router_id, interface_info): if 'subnet_id' in interface_info: subnet_id = interface_info['subnet_id'] subnet = self.core_plugin.get_subnet(context, subnet_id) if not self.is_vxlan_network_by_id(context, subnet['network_id']): return super(NuageL3Plugin, self).remove_router_interface(context, router_id, interface_info) found = False try: filters = {'device_id': [router_id], 'device_owner': [lib_constants.DEVICE_OWNER_ROUTER_INTF], 'network_id': [subnet['network_id']]} ports = self.core_plugin.get_ports(context, filters) for p in ports: if p['fixed_ips'][0]['subnet_id'] == subnet_id: found = True break except exc.NoResultFound: msg = (_("No router interface found for Router %s. " "Router-IF delete failed") % router_id) raise n_exc.BadRequest(resource='router', msg=msg) if not found: msg = (_("No router interface found for Router %s. " "Router-IF delete failed") % router_id) raise n_exc.BadRequest(resource='router', msg=msg) elif 'port_id' in interface_info: port_db = self.core_plugin._get_port(context, interface_info['port_id']) if not self.is_vxlan_network_by_id(context, port_db['network_id']): return super(NuageL3Plugin, self).remove_router_interface(context, router_id, interface_info) if not port_db: msg = (_("No router interface found for Router %s. " "Router-IF delete failed") % router_id) raise n_exc.BadRequest(resource='router', msg=msg) subnet_id = port_db['fixed_ips'][0]['subnet_id'] subnet = self.core_plugin.get_subnet(context, subnet_id) else: return super(NuageL3Plugin, self).remove_router_interface(context, router_id, interface_info) session = context.session subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, subnet_id) if not subnet_l2dom: return super(NuageL3Plugin, self).remove_router_interface(context, router_id, interface_info) nuage_subn_id = subnet_l2dom['nuage_subnet_id'] neutron_subnet = self.core_plugin.get_subnet(context, subnet_id) ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( context.session, router_id) if not ent_rtr_mapping: msg = (_("Router %s does not hold net_partition " "assoc on Nuage VSD. Router-IF delete failed") % router_id) raise n_exc.BadRequest(resource='router', msg=msg) with nuage_utils.rollback() as on_exc: last_address = neutron_subnet['allocation_pools'][-1]['end'] port = self._reserve_ip(self.core_plugin, context, neutron_subnet, last_address) pnet_binding = nuagedb.get_network_binding( context.session, neutron_subnet['network_id']) on_exc(self.core_plugin.delete_port, context, port['id']) self.vsdclient.confirm_router_interface_not_in_use( router_id, subnet) vsd_l2domain = self.vsdclient.create_l2domain_for_router_detach( subnet, subnet_l2dom) on_exc(self.vsdclient.delete_subnet, subnet['id']) result = super(NuageL3Plugin, self).remove_router_interface(context, router_id, interface_info) nuagedb.update_subnetl2dom_mapping( subnet_l2dom, {'nuage_subnet_id': vsd_l2domain['nuage_l2domain_id'], 'nuage_l2dom_tmplt_id': vsd_l2domain['nuage_l2template_id']}) self.vsdclient.move_l3subnet_to_l2domain( nuage_subn_id, vsd_l2domain['nuage_l2domain_id'], subnet_l2dom, pnet_binding) rollbacks = [] try: self.nuage_callbacks.notify(resources.ROUTER_INTERFACE, constants.AFTER_DELETE, self, context=context, router_id=router_id, subnet_id=subnet_id, rollbacks=rollbacks, subnet_mapping=subnet_l2dom) except Exception: with excutils.save_and_reraise_exception(): for rollback in reversed(rollbacks): rollback[0](*rollback[1], **rollback[2]) LOG.debug("Deleted nuage domain subnet %s", nuage_subn_id) return result @log_helpers.log_method_call def _get_net_partition_for_router(self, context, rtr): ent = rtr.get('net_partition', None) if not ent: net_partition = nuagedb.get_net_partition_by_id(context.session, self.default_np_id) else: net_partition = ( nuagedb.get_net_partition_by_id(context.session, rtr['net_partition']) or nuagedb.get_net_partition_by_name(context.session, rtr['net_partition']) ) if not net_partition: msg = _("Either net_partition is not provided with router OR " "default net_partition is not created at the start") raise n_exc.BadRequest(resource='router', msg=msg) return net_partition @nuage_utils.handle_nuage_api_error @db.retry_if_session_inactive() @log_helpers.log_method_call @TimeTracker.tracked def get_router(self, context, id, fields=None): router = super(NuageL3Plugin, self).get_router(context, id, fields) nuage_router = self.vsdclient.get_router_by_external(id) self._add_nuage_router_attributes(router, nuage_router) return self._fields(router, fields) def _add_nuage_router_attributes(self, router, nuage_router): if not nuage_router: return router['tunnel_type'] = nuage_router.get('tunnelType') router['rd'] = nuage_router.get('routeDistinguisher') router['rt'] = nuage_router.get('routeTarget') router['ecmp_count'] = nuage_router.get('ECMPCount') router['nuage_backhaul_vnid'] = nuage_router.get('backHaulVNID') router['nuage_backhaul_rd'] = (nuage_router.get( 'backHaulRouteDistinguisher')) router['nuage_backhaul_rt'] = nuage_router.get('backHaulRouteTarget') for route in router.get('routes', []): params = { 'address': route['destination'].split("/")[0], 'nexthop': route['nexthop'], 'nuage_domain_id': nuage_router['ID'] } nuage_route = self.vsdclient.get_nuage_static_route(params) if nuage_route: route['rd'] = nuage_route['rd'] @nuage_utils.handle_nuage_api_error @db.retry_if_session_inactive() @log_helpers.log_method_call @TimeTracker.tracked def create_router(self, context, router): req_router = copy.deepcopy(router['router']) net_partition = self._get_net_partition_for_router( context, router['router']) if 'ecmp_count' in router and not context.is_admin: msg = _("ecmp_count can only be set by an admin user.") raise nuage_exc.NuageNotAuthorized(resource='router', msg=msg) if (cfg.CONF.RESTPROXY.nuage_pat == constants.NUAGE_PAT_NOT_AVAILABLE and req_router.get('external_gateway_info')): msg = _("nuage_pat config is set to 'not_available'. " "Can't set external_gateway_info") raise nuage_exc.NuageBadRequest(resource='router', msg=msg) neutron_router = super(NuageL3Plugin, self).create_router(context, router) params = { 'net_partition': net_partition, 'tenant_id': neutron_router['tenant_id'], 'nuage_pat': cfg.CONF.RESTPROXY.nuage_pat } nuage_router = None try: nuage_router = self.vsdclient.create_router( neutron_router, req_router, params) except Exception: with excutils.save_and_reraise_exception(): super(NuageL3Plugin, self).delete_router( context, neutron_router['id']) if nuage_router: LOG.debug("Created nuage domain %s", nuage_router[ 'nuage_domain_id']) with context.session.begin(subtransactions=True): nuagedb.add_entrouter_mapping(context.session, net_partition['id'], neutron_router['id'], nuage_router['nuage_domain_id'], nuage_router['rt'], nuage_router['rd']) neutron_router['tunnel_type'] = nuage_router['tunnel_type'] neutron_router['rd'] = nuage_router['rd'] neutron_router['rt'] = nuage_router['rt'] neutron_router['ecmp_count'] = nuage_router['ecmp_count'] neutron_router['nuage_backhaul_vnid'] = \ nuage_router['nuage_backhaul_vnid'] neutron_router['nuage_backhaul_rd'] = \ nuage_router['nuage_backhaul_rd'] neutron_router['nuage_backhaul_rt'] = \ nuage_router['nuage_backhaul_rt'] return neutron_router @nuage_utils.handle_nuage_api_error @db.retry_if_session_inactive() @log_helpers.log_method_call @TimeTracker.tracked def update_router(self, context, id, router): updates = router['router'] original_router = self.get_router(context, id) self._validate_update_router(context, id, updates) ent_rtr_mapping = context.ent_rtr_mapping nuage_domain_id = ent_rtr_mapping['nuage_router_id'] curr_router = self.get_router(context, id) old_routes = self._get_extra_routes_by_router_id(context, id) with nuage_utils.rollback() as on_exc: router_updated = super(NuageL3Plugin, self).update_router( context, id, copy.deepcopy(router)) on_exc(super(NuageL3Plugin, self).update_router, context, id, {'router': copy.deepcopy(original_router)}) if (len(updates) == 1 and 'external_gateway_info' in updates and 'enable_snat' not in updates['external_gateway_info']): return router_updated if 'routes' in updates: self._update_nuage_router_static_routes( id, nuage_domain_id, old_routes, updates['routes']) on_exc(self._update_nuage_router_static_routes, id, nuage_domain_id, updates['routes'], old_routes) if 'routes' in updates and len(updates) == 1: pass else: self._update_nuage_router(nuage_domain_id, curr_router, updates, ent_rtr_mapping) on_exc(self._update_nuage_router, updates, curr_router, ent_rtr_mapping) nuage_router = self.vsdclient.get_router_by_external(id) self._add_nuage_router_attributes(router_updated, nuage_router) rollbacks = [] try: self.nuage_callbacks.notify( resources.ROUTER, constants.AFTER_UPDATE, self, context=context, updated_router=router_updated, original_router=original_router, request_router=updates, domain=nuage_router, rollbacks=rollbacks) except Exception: with excutils.save_and_reraise_exception(): for rollback in reversed(rollbacks): rollback[0](*rollback[1], **rollback[2]) return router_updated def _validate_update_router(self, context, id, router): if 'ecmp_count' in router and not context.is_admin: msg = _("ecmp_count can only be set by an admin user.") raise nuage_exc.NuageNotAuthorized(resource='router', msg=msg) if (cfg.CONF.RESTPROXY.nuage_pat == constants.NUAGE_PAT_NOT_AVAILABLE and router.get('external_gateway_info')): msg = _("nuage_pat config is set to 'notavailable'. " "Can't update ext-gw-info") raise nuage_exc.OperationNotSupported(resource='router', msg=msg) ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(context.session, id) if not ent_rtr_mapping: msg = (_("Router %s does not hold net-partition " "assoc on VSD. extra-route failed") % id) raise n_exc.BadRequest(resource='router', msg=msg) context.ent_rtr_mapping = ent_rtr_mapping def _update_nuage_router_static_routes(self, id, nuage_domain_id, old_routes, new_routes): added, removed = helpers.diff_list_of_dict(old_routes, new_routes) routes_removed = [] routes_added = [] try: for route in removed: self._delete_nuage_static_route(nuage_domain_id, route) routes_removed.append(route) for route in added: self._add_nuage_static_route(id, nuage_domain_id, route) routes_added.append(route) except Exception as e: for route in routes_added: self._delete_nuage_static_route(nuage_domain_id, route) for route in routes_removed: self._add_nuage_static_route(id, nuage_domain_id, route) raise e def _add_nuage_static_route(self, router_id, nuage_domain_id, route): params = { 'nuage_domain_id': nuage_domain_id, 'neutron_rtr_id': router_id, 'net': netaddr.IPNetwork(route['destination']), 'nexthop': route['nexthop'] } self.vsdclient.create_nuage_staticroute(params) def _delete_nuage_static_route(self, nuage_domain_id, route): destaddr = route['destination'] cidr = destaddr.split('/') params = { "address": cidr[0], "nexthop": route['nexthop'], "nuage_domain_id": nuage_domain_id } self.vsdclient.delete_nuage_staticroute(params) def _update_nuage_router(self, nuage_id, curr_router, router_updates, ent_rtr_mapping): params = { 'net_partition_id': ent_rtr_mapping['net_partition_id'], 'nuage_pat': cfg.CONF.RESTPROXY.nuage_pat } curr_router.update(router_updates) self.vsdclient.update_router(nuage_id, curr_router, params) ns_dict = { 'nuage_rtr_rt': router_updates.get('rt', ent_rtr_mapping.get('nuage_rtr_rt')), 'nuage_rtr_rd': router_updates.get('rd', ent_rtr_mapping.get('nuage_rtr_rd')) } nuagedb.update_entrouter_mapping(ent_rtr_mapping, ns_dict) @nuage_utils.handle_nuage_api_error @db.retry_if_session_inactive() @log_helpers.log_method_call @TimeTracker.tracked def delete_router(self, context, id): neutron_router = self.get_router(context, id) session = context.session ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, id) # Can probably be removed after blueprint enginefacade-switch reaches # router-delete code upstream. # https://blueprints.launchpad.net/neutron/+spec/enginefacade-switch session.expunge(ent_rtr_mapping) if ent_rtr_mapping: LOG.debug("Enterprise to router mapping found for router %s", id) filters = { 'device_id': [id], 'device_owner': [lib_constants.DEVICE_OWNER_ROUTER_INTF] } ports = self.core_plugin.get_ports(context, filters) if ports: raise l3.RouterInUse(router_id=id) nuage_domain_id = ent_rtr_mapping['nuage_router_id'] self.vsdclient.delete_router(nuage_domain_id) super(NuageL3Plugin, self).delete_router(context, id) if ent_rtr_mapping and not self._check_router_subnet_for_tenant( context, neutron_router['tenant_id']): LOG.debug("No router/subnet found for tenant %s", neutron_router['tenant_id']) user_id, group_id = self.vsdclient.get_usergroup( neutron_router['tenant_id'], ent_rtr_mapping['net_partition_id']) self.vsdclient.delete_user(user_id) self.vsdclient.delete_group(group_id) @log_helpers.log_method_call @TimeTracker.tracked def _check_floatingip_update(self, context, port, vport_type=constants.VM_VPORT, vport_id=None): filter = {'fixed_port_id': [port['id']]} local_fip = self.get_floatingips(context, filters=filter) if local_fip: fip = local_fip[0] self._create_update_floatingip(context, fip, port['id'], vport_type=vport_type, vport_id=vport_id, rate_update=False) @log_helpers.log_method_call def _create_update_floatingip(self, context, neutron_fip, port_id, last_known_router_id=None, vport_type=constants.VM_VPORT, vport_id=None, rate_update=True): if last_known_router_id: rtr_id = last_known_router_id else: rtr_id = neutron_fip['router_id'] net_id = neutron_fip['floating_network_id'] subn = nuagedb.get_ipalloc_for_fip(context.session, net_id, neutron_fip['floating_ip_address']) fip_pool = self.vsdclient.get_nuage_fip_pool_by_id(subn['subnet_id']) if not fip_pool: msg = _('sharedresource %s not found on VSD') % subn['subnet_id'] raise n_exc.BadRequest(resource='floatingip', msg=msg) ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(context.session, rtr_id) if not ent_rtr_mapping: msg = _('router %s is not associated with ' 'any net-partition') % rtr_id raise n_exc.BadRequest(resource='floatingip', msg=msg) params = { 'router_id': ent_rtr_mapping['nuage_router_id'], 'fip_id': neutron_fip['id'], 'neutron_fip': neutron_fip } fip = self.vsdclient.get_nuage_fip_by_id(params) if not fip: LOG.debug("Floating ip not found in VSD for fip %s", neutron_fip['id']) params = { 'nuage_rtr_id': ent_rtr_mapping['nuage_router_id'], 'nuage_fippool_id': fip_pool['nuage_fip_pool_id'], 'neutron_fip_ip': neutron_fip['floating_ip_address'], 'neutron_fip_id': neutron_fip['id'] } nuage_fip_id = self.vsdclient.create_nuage_floatingip(params) else: nuage_fip_id = fip['nuage_fip_id'] # Update VM if required nuage_vport = self._get_vport_for_fip(context, port_id, vport_type=vport_type, vport_id=vport_id, required=False) if nuage_vport: nuage_fip = self.vsdclient.get_nuage_fip(nuage_fip_id) if nuage_fip['assigned']: n_vport = self.vsdclient.get_vport_assoc_with_fip( nuage_fip_id) if n_vport: disassoc_params = { 'nuage_vport_id': n_vport['ID'], 'nuage_fip_id': None } self.vsdclient.update_nuage_vm_vport(disassoc_params) if (nuage_vport['domainID']) != ( ent_rtr_mapping['nuage_router_id']): fip_dict = { 'fip_id': neutron_fip['id'], 'fip_last_known_rtr_id': ent_rtr_mapping['router_id'] } fip = self.vsdclient.get_nuage_fip_by_id(fip_dict) if fip: self._delete_nuage_fip(context, fip_dict) # Now change the rtd_id to vport's router id rtr_id = neutron_fip['router_id'] ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( context.session, rtr_id ) if not ent_rtr_mapping: msg = _('router %s is not associated with ' 'any net-partition') % rtr_id raise n_exc.BadRequest(resource='floatingip', msg=msg) params = { 'router_id': ent_rtr_mapping['nuage_router_id'], 'fip_id': neutron_fip['id'], 'neutron_fip': neutron_fip } fip = self.vsdclient.get_nuage_fip_by_id(params) if not fip: LOG.debug("Floating ip not found in VSD for fip %s", neutron_fip['id']) params = { 'nuage_rtr_id': ent_rtr_mapping['nuage_router_id'], 'nuage_fippool_id': fip_pool['nuage_fip_pool_id'], 'neutron_fip_ip': neutron_fip['floating_ip_address'], 'neutron_fip_id': neutron_fip['id'] } nuage_fip_id = \ self.vsdclient.create_nuage_floatingip(params) else: nuage_fip_id = fip['nuage_fip_id'] params = { 'nuage_vport_id': nuage_vport['ID'], 'nuage_fip_id': nuage_fip_id } self.vsdclient.update_nuage_vm_vport(params) self.fip_rate_log.info( 'FIP %s (owned by tenant %s) associated to port %s' % (neutron_fip['id'], neutron_fip['tenant_id'], port_id)) # Check if we have to associate a FIP to a VIP self._process_fip_to_vip(context, port_id, nuage_fip_id) if not rate_update: return # Add QOS to port for rate limiting nuage_fip_rate = neutron_fip.get('nuage_fip_rate_values') nuage_fip_rate_configured = nuage_fip_rate.pop('cli_configured', None) if nuage_fip_rate_configured and not nuage_vport: msg = _('Rate limiting requires the floating ip to be ' 'associated to a port.') raise nuage_exc.NuageBadRequest(msg=msg) if nuage_fip_rate_configured and not nuage_vport: del neutron_fip['nuage_fip_rate_values'] if nuage_vport: self.vsdclient.create_update_rate_limiting( nuage_fip_rate, nuage_vport['ID'], neutron_fip['id']) for direction, value in nuage_fip_rate.iteritems(): if 'kbps' in direction: rate_unit = 'K' if 'ingress' in direction: neutron_fip['nuage_ingress_fip_rate_kbps'] = value else: neutron_fip['nuage_egress_fip_rate_kbps'] = value else: rate_unit = 'M' neutron_fip['nuage_egress_fip_rate_kbps'] = float( value) * 1000 if float(value) != -1 else -1 self.fip_rate_log.info( 'FIP %s (owned by tenant %s) %s updated to %s %sb/s' % (neutron_fip['id'], neutron_fip['tenant_id'], direction, value, rate_unit)) @nuage_utils.handle_nuage_api_error @db.retry_if_session_inactive() @log_helpers.log_method_call @TimeTracker.tracked def get_floatingip(self, context, id, fields=None): fip = super(NuageL3Plugin, self).get_floatingip(context, id) if (not fields or 'nuage_egress_fip_rate_kbps' in fields or 'nuage_ingress_fip_rate_kbps' in fields) and fip.get( 'port_id'): try: nuage_vport = self._get_vport_for_fip(context, fip['port_id']) nuage_rate_limit = self.vsdclient.get_rate_limit( nuage_vport['ID'], fip['id']) for direction, value in nuage_rate_limit.iteritems(): if 'ingress' in direction: fip['nuage_ingress_fip_rate_kbps'] = value elif 'egress' in direction: fip['nuage_egress_fip_rate_kbps'] = value except Exception as e: msg = (_('Got exception while retrieving fip rate from vsd: ' '%s') % e.message) LOG.error(msg) return self._fields(fip, fields) @nuage_utils.handle_nuage_api_error @db.retry_if_session_inactive() @log_helpers.log_method_call @TimeTracker.tracked def create_floatingip(self, context, floatingip, initial_status=lib_constants. FLOATINGIP_STATUS_ACTIVE): fip = floatingip['floatingip'] neutron_fip = super(NuageL3Plugin, self).create_floatingip( context, floatingip, initial_status=lib_constants.FLOATINGIP_STATUS_DOWN) if not self.is_vxlan_network_by_id(context, neutron_fip['floating_network_id']): return neutron_fip nuage_fip_rate = self._get_values_for_fip_rate( fip, for_update='port_id' not in fip) fip_rate_configured = nuage_fip_rate.get('cli_configured') if fip_rate_configured: if not fip.get('port_id'): msg = _('Rate limiting requires the floating ip to be ' 'associated to a port.') raise nuage_exc.NuageBadRequest(msg=msg) if not neutron_fip['router_id']: neutron_fip['nuage_egress_fip_rate_kbps'] = None neutron_fip['nuage_ingress_fip_rate_kbps'] = None return neutron_fip neutron_fip['nuage_fip_rate_values'] = nuage_fip_rate try: self._create_update_floatingip(context, neutron_fip, fip['port_id']) self.update_floatingip_status( context, neutron_fip['id'], lib_constants.FLOATINGIP_STATUS_ACTIVE) neutron_fip['status'] = lib_constants.FLOATINGIP_STATUS_ACTIVE except (nuage_exc.OperationNotSupported, n_exc.BadRequest): with excutils.save_and_reraise_exception(): super(NuageL3Plugin, self).delete_floatingip( context, neutron_fip['id']) return neutron_fip @nuage_utils.handle_nuage_api_error @log_helpers.log_method_call @TimeTracker.tracked def disassociate_floatingips(self, context, port_id, do_notify=True): fips = self.get_floatingips(context, filters={'port_id': [port_id]}) router_ids = super(NuageL3Plugin, self).disassociate_floatingips( context, port_id, do_notify=do_notify) if not fips: return router_ids # we can hav only 1 fip associated with a vPort at a time.fips[0] self.update_floatingip_status( context, fips[0]['id'], lib_constants.FLOATINGIP_STATUS_DOWN) # Disassociate only if nuage_port has a FIP associated with it. # Calling disassociate on a port with no FIP causes no issue in Neutron # but VSD throws an exception nuage_vport = self._get_vport_for_fip(context, port_id, required=False) if nuage_vport and nuage_vport.get('associatedFloatingIPID'): for fip in fips: self.vsdclient.delete_rate_limiting( nuage_vport['ID'], fip['id']) self.fip_rate_log.info('FIP %s (owned by tenant %s) ' 'disassociated from port %s' % (fip['id'], fip['tenant_id'], port_id)) params = { 'nuage_vport_id': nuage_vport['ID'], 'nuage_fip_id': None } self.vsdclient.update_nuage_vm_vport(params) LOG.debug("Disassociated floating ip from VM attached at port %s", port_id) return router_ids def _get_values_for_fip_rate(self, fip, for_update=False): fip_rate_values = {} egress_fip_rate_mbps = fip.get('nuage_fip_rate', lib_constants.ATTR_NOT_SPECIFIED) ingress_fip_rate_kbps = fip.get('nuage_ingress_fip_rate_kbps', lib_constants.ATTR_NOT_SPECIFIED) egress_fip_rate_kbps = fip.get('nuage_egress_fip_rate_kbps', lib_constants.ATTR_NOT_SPECIFIED) egress_fip_rate_mbps_configured = (egress_fip_rate_mbps is not lib_constants.ATTR_NOT_SPECIFIED) egress_fip_rate_kbps_configured = (egress_fip_rate_kbps is not lib_constants.ATTR_NOT_SPECIFIED) ingress_fip_rate_kbps_configured = (ingress_fip_rate_kbps is not lib_constants.ATTR_NOT_SPECIFIED) if egress_fip_rate_kbps_configured: fip_rate_values['egress_nuage_fip_rate_kbps'] = ( egress_fip_rate_kbps) fip_rate_values['cli_configured'] = True elif egress_fip_rate_mbps_configured: fip_rate_values['egress_nuage_fip_rate_mbps'] = ( egress_fip_rate_mbps) fip_rate_values['cli_configured'] = True if ingress_fip_rate_kbps_configured: fip_rate_values['ingress_nuage_fip_rate_kbps'] = ( ingress_fip_rate_kbps) fip_rate_values['cli_configured'] = True if for_update: return fip_rate_values return self._get_missing_rate_values(fip_rate_values) def _get_missing_rate_values(self, fip_rate_values): if not (fip_rate_values.get('egress_nuage_fip_rate_kbps') is not None or fip_rate_values.get( 'egress_nuage_fip_rate_mbps') is not None): if self.def_egress_rate_kbps is not None: fip_rate_values['egress_nuage_fip_rate_kbps'] = ( self.def_egress_rate_kbps) elif self.def_fip_rate is not None: fip_rate_values['egress_nuage_fip_rate_mbps'] = ( self.def_fip_rate) if not (fip_rate_values.get('ingress_nuage_fip_rate_kbps' ) is not None): fip_rate_values['ingress_nuage_fip_rate_kbps'] = ( self.def_ingress_rate_kbps) return fip_rate_values @nuage_utils.handle_nuage_api_error @db.retry_if_session_inactive() @log_helpers.log_method_call @TimeTracker.tracked def update_floatingip(self, context, id, floatingip): # Upstream Neutron disassociates port from fip if updated with None # so we simulate same behavior in our plugin as well if not floatingip['floatingip']: floatingip['floatingip'] = {'port_id': None} fip = floatingip['floatingip'] orig_fip = self._get_floatingip(context, id) if not self.is_vxlan_network_by_id(context, orig_fip['floating_network_id']): return super(NuageL3Plugin, self).update_floatingip(context, id, floatingip) port_id = orig_fip['fixed_port_id'] router_ids = [] neutron_fip = self._make_floatingip_dict(orig_fip) nuage_fip_rate = self._get_values_for_fip_rate( fip, for_update='port_id' not in fip) fip_rate_configured = nuage_fip_rate.get('cli_configured', None) with context.session.begin(subtransactions=True): if 'port_id' in fip or fip.get('description'): neutron_fip = super(NuageL3Plugin, self).update_floatingip( context, id, floatingip) last_known_router_id = orig_fip['last_known_router_id'] if fip.get('port_id'): if not neutron_fip['router_id']: ret_msg = 'floating-ip is not associated yet' raise n_exc.BadRequest(resource='floatingip', msg=ret_msg) neutron_fip['nuage_fip_rate_values'] = nuage_fip_rate try: self._create_update_floatingip(context, neutron_fip, fip['port_id'], last_known_router_id) self.update_floatingip_status( context, neutron_fip['id'], lib_constants.FLOATINGIP_STATUS_ACTIVE) neutron_fip['status'] = ( lib_constants.FLOATINGIP_STATUS_ACTIVE) except nuage_exc.OperationNotSupported: with excutils.save_and_reraise_exception(): router_ids = super( NuageL3Plugin, self).disassociate_floatingips( context, fip['port_id'], do_notify=False) except n_exc.BadRequest: with excutils.save_and_reraise_exception(): super(NuageL3Plugin, self).delete_floatingip( context, id) elif 'port_id' in fip: # This happens when {'port_id': null} is in request. # Disassociate if fip_rate_configured: ret_msg = _('Rate limiting requires the floating ip to be ' 'associated to a port.') raise n_exc.BadRequest(resource='floatingip', msg=ret_msg) # Check for disassociation of fip from vip, only if port_id # is not None if port_id: self._process_fip_to_vip(context, port_id) nuage_vport = self._get_vport_for_fip(context, port_id, required=False) if nuage_vport: params = { 'nuage_vport_id': nuage_vport['ID'], 'nuage_fip_id': None } self.vsdclient.update_nuage_vm_vport(params) ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( context.session, last_known_router_id) if not ent_rtr_mapping: msg = _('router %s is not associated with ' 'any net-partition') % last_known_router_id raise n_exc.BadRequest(resource='floatingip', msg=msg) self.vsdclient.delete_rate_limiting( nuage_vport['ID'], id) self.fip_rate_log.info('FIP %s (owned by tenant %s) ' 'disassociated from port %s' % (id, neutron_fip['tenant_id'], port_id)) params = {'fip_id': id} nuage_fip = self.vsdclient.get_nuage_fip_by_id(params) if nuage_fip: self.vsdclient.delete_nuage_floatingip( nuage_fip['nuage_fip_id']) LOG.debug('Floating-ip %s deleted from VSD', id) self.update_floatingip_status( context, neutron_fip['id'], lib_constants.FLOATINGIP_STATUS_DOWN) neutron_fip['status'] = lib_constants.FLOATINGIP_STATUS_DOWN # purely rate limit update. Use existing port data. if 'port_id' not in fip and fip_rate_configured: if not port_id: msg = _('Rate limiting requires the floating ip to be ' 'associated to a port.') raise n_exc.BadRequest(resource='floatingip', msg=msg) # Add QOS to port for rate limiting nuage_vport = self._get_vport_for_fip(context, port_id) nuage_fip_rate.pop('cli_configured', None) orig_fip['nuage_fip_rate_values'] = nuage_fip_rate self.vsdclient.create_update_rate_limiting( nuage_fip_rate, nuage_vport['ID'], orig_fip['id']) for direction, value in nuage_fip_rate.iteritems(): if 'kbps' in direction: rate_unit = 'K' if 'ingress' in direction: neutron_fip['nuage_ingress_fip_rate_kbps'] = value else: neutron_fip['nuage_egress_fip_rate_kbps'] = value else: rate_unit = 'M' neutron_fip['nuage_egress_fip_rate_kbps'] = float( value) * 1000 if float(value) != -1 else -1 self.fip_rate_log.info( 'FIP %s (owned by tenant %s) %s updated to %s %sb/s' % (orig_fip['id'], orig_fip['tenant_id'], direction, value, rate_unit)) neutron_fip['nuage_fip_rate'] = orig_fip['nuage_fip_rate_values'] elif not fip_rate_configured: neutron_fip = self.get_floatingip(context, id) # now that we've left db transaction, we are safe to notify self.notify_routers_updated(context, router_ids) return neutron_fip @nuage_utils.handle_nuage_api_error @db.retry_if_session_inactive() @log_helpers.log_method_call @TimeTracker.tracked def delete_floatingip(self, context, fip_id): fip = self._get_floatingip(context, fip_id) if not self.is_vxlan_network_by_id(context, fip['floating_network_id']): return super(NuageL3Plugin, self).delete_floatingip(context, fip_id) port_id = fip['fixed_port_id'] if port_id: nuage_vport = self._get_vport_for_fip(context, port_id, required=False) if nuage_vport and nuage_vport['ID'] is not None: params = { 'nuage_vport_id': nuage_vport['ID'], 'nuage_fip_id': None } self.vsdclient.update_nuage_vm_vport(params) LOG.debug("Floating-ip %(fip)s is disassociated from " "vport %(vport)s", {'fip': fip_id, 'vport': nuage_vport['ID']}) self.vsdclient.delete_rate_limiting( nuage_vport['ID'], fip_id) self.fip_rate_log.info('FIP %s (owned by tenant %s) ' 'disassociated from port %s' % (fip_id, fip['tenant_id'], port_id)) else: # Could be vip-port (fip2vip feature) port = self.core_plugin.get_port(context, port_id) if (port.get('device_owner') in nuage_utils.get_device_owners_vip()): neutron_subnet_id = port['fixed_ips'][0]['subnet_id'] vip = port['fixed_ips'][0]['ip_address'] self.vsdclient.disassociate_fip_from_vips( neutron_subnet_id, vip) router_id = fip['router_id'] else: router_id = fip['last_known_router_id'] if router_id: ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( context.session, router_id) if ent_rtr_mapping: params = { 'router_id': ent_rtr_mapping['nuage_router_id'], 'fip_id': fip_id } nuage_fip = self.vsdclient.get_nuage_fip_by_id(params) if nuage_fip: self.vsdclient.delete_nuage_floatingip( nuage_fip['nuage_fip_id']) LOG.debug('Floating-ip %s deleted from VSD', fip_id) super(NuageL3Plugin, self).delete_floatingip(context, fip_id) self.fip_rate_log.info('FIP %s (owned by tenant %s) deleted' % (fip_id, fip['tenant_id'])) def _get_vport_for_fip(self, context, port_id, vport_type=constants.VM_VPORT, vport_id=None, required=True): port = self.core_plugin.get_port(context, port_id) if not port['fixed_ips']: return vport = None params = { 'neutron_port_id': port_id, 'nuage_vport_type': vport_type, 'nuage_vport_id': vport_id } try: vport = self.vsdclient.get_nuage_port_by_id(params) except Exception: pass if vport: return vport subnet_id = port['fixed_ips'][0]['subnet_id'] subnet_mapping = nuagedb.get_subnet_l2dom_by_id(context.session, subnet_id) params['neutron_port_id'] = port['id'] l2_id, l3_id = get_l2_and_l3_sub_id(subnet_mapping) params['l2dom_id'] = l2_id params['l3dom_id'] = l3_id return self.vsdclient.get_nuage_vport_by_neutron_id( params, required=required) def _process_fip_to_vip(self, context, port_id, nuage_fip_id=None): port = self.core_plugin._get_port(context, port_id) neutron_subnet_id = port['fixed_ips'][0]['subnet_id'] vip = port['fixed_ips'][0]['ip_address'] self.vsdclient.associate_fip_to_vips( neutron_subnet_id, vip, nuage_fip_id) @log_helpers.log_method_call def _delete_nuage_fip(self, context, fip_dict): if fip_dict: fip_id = fip_dict['fip_id'] port_id = fip_dict.get('fip_fixed_port_id') if port_id: router_id = fip_dict['fip_router_id'] else: router_id = fip_dict['fip_last_known_rtr_id'] if router_id: ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( context.session, router_id) if not ent_rtr_mapping: msg = _('router %s is not associated with ' 'any net-partition') % router_id raise n_exc.BadRequest(resource='floatingip', msg=msg) params = { 'router_id': ent_rtr_mapping['nuage_router_id'], 'fip_id': fip_id } nuage_fip = self.vsdclient.get_nuage_fip_by_id(params) if nuage_fip: self.vsdclient.delete_nuage_floatingip( nuage_fip['nuage_fip_id']) LOG.debug('Floating-ip %s deleted from VSD', fip_id)
naveensan1/nuage-openstack-neutron
nuage_neutron/plugins/common/service_plugins/l3.py
Python
apache-2.0
57,131
import re from lxml import etree from nxpy.util import tag_pattern, whitespace_pattern class Flow(object): def __init__(self): self.routes = [] def export(self): flow = etree.Element('flow') if len(self.routes): for route in self.routes: flow.append(route.export()) return flow else: return False def build(self, node): for child in node: nodeName_ = tag_pattern.match(child.tag).groups()[-1] self.buildChildren(child, nodeName_) def buildChildren(self, child_, nodeName_, from_subclass=False): if nodeName_ == 'route': obj_ = Route() obj_.build(child_) self.routes.append(obj_) class Route(object): def __init__(self): self.name = '' self.operation = None self.match = { "destination": [], "source": [], "protocol": [], "port": [], "destination-port": [], "source-port": [], "icmp-code": [], "icmp-type": [], "tcp-flags": [], "packet-length": [], "dscp": [], "fragment": [] } ''' Match is a dict with list values example: self. match = { "destination": [<ip-prefix(es)>], "source": [<ip-prefix(es)>], "protocol": [<numeric-expression(s)>], "port": [<numeric-expression(s)>], "destination-port": [<numeric-expression(s)>] "source-port": [<numeric-expression(s)>], "icmp-code": [<numeric-expression(s)>], "icmp-type": [<numeric-expression(s)>], "tcp-flags": [<bitwise-expression(s)>], "packet-length": [<numeric-expression(s)>], "dscp": [<numeric-expression(s)>], "fragment": [ "dont-fragment" "not-a-fragment" "is-fragment" "first-fragment" "last-fragment" ] ''' self.then = { "accept": False, "discard": False, "community": False, "next-term": False, "rate-limit": False, "sample": False, "routing-instance": False } '''Then is a dict (have to see about this in the future: self.then = { "accept": True/False, "discard": True/False, "community": "<name>"/False, "next-term": True/False, "rate-limit": <rate>/False, "sample": True/False, "routing-instance": "<RouteTarget extended community>" } ''' def export(self): if self.operation: ro = etree.Element('route', {'operation': self.operation}) else: ro = etree.Element('route') if self.name: etree.SubElement(ro, "name").text = self.name match = etree.Element("match") for key in self.match: if self.match[key]: for value in self.match[key]: etree.SubElement(match, key).text = value if match.getchildren(): ro.append(match) then = etree.Element("then") for key in self.then: if self.then[key]: if self.then[key] is not True and self.then[key] is not False: etree.SubElement(then, key).text = self.then[key] else: etree.SubElement(then, key) if then.getchildren(): ro.append(then) if ro.getchildren(): return ro else: return False def build(self, node): for child in node: nodeName_ = tag_pattern.match(child.tag).groups()[-1] self.buildChildren(child, nodeName_) def buildChildren(self, child_, nodeName_, from_subclass=False): if nodeName_ == 'name': name_ = child_.text name_ = re.sub(whitespace_pattern, " ", name_).strip() self.name = name_ elif nodeName_ == 'match': for grandChild_ in child_: grandChildName_ = tag_pattern.match( grandChild_.tag).groups()[-1] grandChildText = grandChild_.text grandChildText = re.sub( whitespace_pattern, " ", grandChildText).strip() self.match[grandChildName_].append(grandChildText) elif nodeName_ == 'then': for grandChild_ in child_: grandChildName_ = tag_pattern.match( grandChild_.tag).groups()[-1] self.then[grandChildName_] = True
Kent1/nxpy
nxpy/flow.py
Python
apache-2.0
4,735
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from oslo_log import log as logging from oslo_utils import encodeutils import webob.exc from glance.api import policy from glance.api.v2 import images as v2_api from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier LOG = logging.getLogger(__name__) class Controller(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) @utils.mutating def update(self, req, image_id, tag_value): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) image.tags.add(tag_value) image_repo.save(image) except exception.NotFound: msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = _("Not allowed to update tags for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) except exception.Invalid as e: msg = (_("Could not update image: %s") % encodeutils.exception_to_unicode(e)) LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.ImageTagLimitExceeded as e: msg = (_("Image tag limit exceeded for image %(id)s: %(e)s:") % {"id": image_id, "e": encodeutils.exception_to_unicode(e)}) LOG.warning(msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) @utils.mutating def delete(self, req, image_id, tag_value): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) if tag_value not in image.tags: raise webob.exc.HTTPNotFound() image.tags.remove(tag_value) image_repo.save(image) except exception.NotFound: msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = _("Not allowed to delete tags for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) class ResponseSerializer(wsgi.JSONResponseSerializer): def update(self, response, result): response.status_int = 204 def delete(self, response, result): response.status_int = 204 class RequestDeserializer(wsgi.JSONRequestDeserializer): def update(self, request): try: schema = v2_api.get_schema() schema_format = {"tags": [request.urlvars.get('tag_value')]} schema.validate(schema_format) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) return super(RequestDeserializer, self).default(request) def create_resource(): """Images resource factory method""" serializer = ResponseSerializer() deserializer = RequestDeserializer() controller = Controller() return wsgi.Resource(controller, deserializer, serializer)
klmitch/glance
glance/api/v2/image_tags.py
Python
apache-2.0
4,348
''' @author: Michael Wan @since : 2014-11-08 ''' from math import log import operator def createDataSet(): dataSet = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']] labels = ['no surfacing','flippers'] #change to discrete values return dataSet, labels def calcShannonEnt(dataSet): numEntries = len(dataSet) labelCounts = {} for featVec in dataSet: #the the number of unique elements and their occurance currentLabel = featVec[-1] if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0 labelCounts[currentLabel] += 1 shannonEnt = 0.0 for key in labelCounts: prob = float(labelCounts[key])/numEntries shannonEnt -= prob * log(prob,2) #log base 2 return shannonEnt def splitDataSet(dataSet, axis, value): retDataSet = [] for featVec in dataSet: if featVec[axis] == value: reducedFeatVec = featVec[:axis] #chop out axis used for splitting reducedFeatVec.extend(featVec[axis+1:]) retDataSet.append(reducedFeatVec) return retDataSet def chooseBestFeatureToSplit(dataSet): numFeatures = len(dataSet[0]) - 1 #the last column is used for the labels baseEntropy = calcShannonEnt(dataSet) bestInfoGain = 0.0; bestFeature = -1 for i in range(numFeatures): #iterate over all the features featList = [example[i] for example in dataSet]#create a list of all the examples of this feature uniqueVals = set(featList) #get a set of unique values newEntropy = 0.0 for value in uniqueVals: subDataSet = splitDataSet(dataSet, i, value) prob = len(subDataSet)/float(len(dataSet)) newEntropy += prob * calcShannonEnt(subDataSet) infoGain = baseEntropy - newEntropy #calculate the info gain; ie reduction in entropy if (infoGain > bestInfoGain): #compare this to the best gain so far bestInfoGain = infoGain #if better than current best, set to best bestFeature = i return bestFeature #returns an integer def majorityCnt(classList): classCount={} for vote in classList: if vote not in classCount.keys(): classCount[vote] = 0 classCount[vote] += 1 sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True) return sortedClassCount[0][0] def createTree(dataSet,labels): classList = [example[-1] for example in dataSet] if classList.count(classList[0]) == len(classList): return classList[0]#stop splitting when all of the classes are equal if len(dataSet[0]) == 1: #stop splitting when there are no more features in dataSet return majorityCnt(classList) bestFeat = chooseBestFeatureToSplit(dataSet) bestFeatLabel = labels[bestFeat] myTree = {bestFeatLabel:{}} del(labels[bestFeat]) featValues = [example[bestFeat] for example in dataSet] uniqueVals = set(featValues) for value in uniqueVals: subLabels = labels[:] #copy all of labels, so trees don't mess up existing labels myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels) return myTree def classify(inputTree,featLabels,testVec): firstStr = inputTree.keys()[0] secondDict = inputTree[firstStr] featIndex = featLabels.index(firstStr) key = testVec[featIndex] valueOfFeat = secondDict[key] if isinstance(valueOfFeat, dict): classLabel = classify(valueOfFeat, featLabels, testVec) else: classLabel = valueOfFeat return classLabel def storeTree(inputTree,filename): import pickle fw = open(filename,'w') pickle.dump(inputTree,fw) fw.close() def grabTree(filename): import pickle fr = open(filename) return pickle.load(fr)
onehao/opensource
pyml/inaction/ch03/decisiontree/trees.py
Python
apache-2.0
4,087
#!/usr/bin/env python3 # Copyright 2019 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # python ./show_status.py /home/rhel72/config-test.yml DEBUG import os import sys import subprocess import readline from lib.inventory import Inventory from lib.logger import Logger from lib.ssh import SSH_CONNECTION, SSH_Exception from lib import genesis GEN_PATH = genesis.GEN_PATH GEN_CONTAINER_NAME = genesis.container_name GEN_CONTAINER_RUNNING = genesis.container_running() GEN_CONTAINER_ADDR = genesis.container_addr() GEN_CONTAINER_SSH_KEY_PRIVATE = genesis.get_ssh_private_key_file() HOME_DIR = os.path.expanduser('~') FILE_PATH = os.path.dirname(os.path.abspath(__file__)) def rlinput(prompt, prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt) finally: readline.set_startup_hook() def main(log, inv_file): inv = Inventory(log, inv_file) print('\nBridge Status: \n') vlan_mgmt = inv.get_vlan_mgmt_network() bridge_vlan_mgmt = 'br' + str(vlan_mgmt) vlan_mgmt_client = inv.get_vlan_mgmt_client_network() bridge_vlan_mgmt_client = 'br' + str(vlan_mgmt_client) output = subprocess.check_output(['bash', '-c', 'brctl show'] ).decode("utf-8") if bridge_vlan_mgmt not in output: print(' Management bridge {} not found\n'.format(bridge_vlan_mgmt)) else: print(subprocess.check_output( ['bash', '-c', 'brctl show ' + bridge_vlan_mgmt])) if bridge_vlan_mgmt_client not in output: print(' Client bridge {} not found\n'.format(bridge_vlan_mgmt_client)) else: print(subprocess.check_output( ['bash', '-c', 'brctl show ' + bridge_vlan_mgmt_client])) print('Container Status: \n') output = subprocess.check_output(['bash', '-c', 'sudo lxc-ls -f'] ).decode("utf-8") if GEN_CONTAINER_NAME + ' ' in output: print(output) else: print(' ' + GEN_CONTAINER_NAME + ' container does not exist\n') if GEN_CONTAINER_RUNNING: ssh_cont = None ssh_log_filename = GEN_PATH + '/gen_ssh.log' if os.path.isfile(GEN_CONTAINER_SSH_KEY_PRIVATE): try: ssh_cont = SSH_CONNECTION( GEN_CONTAINER_ADDR, log=log, ssh_log=ssh_log_filename, username='deployer', look_for_keys=False, key_filename=GEN_CONTAINER_SSH_KEY_PRIVATE) except SSH_Exception as exc: print('Failed to SSH to container {} using private key {}' .format(GEN_CONTAINER_NAME, GEN_CONTAINER_SSH_KEY_PRIVATE)) print(exc) if not ssh_cont: PASSWORD = 'ubuntu' print('Trying password "{}"'.format(PASSWORD)) while PASSWORD[-1:] != '.': try: ssh_cont = SSH_CONNECTION( GEN_CONTAINER_ADDR, log=log, ssh_log=ssh_log_filename, username='deployer', password=PASSWORD, look_for_keys=False) break except SSH_Exception as exc: print('Failed to SSH to container {} using password {}' .format(GEN_CONTAINER_NAME, PASSWORD)) print(exc) PASSWORD = rlinput("Enter a password for container (last char = '.' to terminate): ", PASSWORD) else: sys.exit(1) print() _, cobbler_running, _ = ssh_cont.send_cmd( 'ps aux|grep cobbler') if 'root' in cobbler_running: print('cobbler is running') _, cobbler_status, _ = ssh_cont.send_cmd( 'sudo cobbler status') print(cobbler_status) else: print('cobbler is not running') _, dnsmasq_running, _ = ssh_cont.send_cmd( 'ps aux|grep dnsmasq') if 'root' in dnsmasq_running: print('dnsmasq is running') _, dnsmasq_status, _ = ssh_cont.send_cmd( 'cat /var/lib/misc/dnsmasq.leases') print(dnsmasq_status) else: print('dnsmasq is not running') ssh_cont.close() else: print('Container not running {}'.format(GEN_CONTAINER_RUNNING)) def print_lines(str, line_list): """Splits str at newline (\n) characters, then prints the lines which contain elements from line_list. If line_list=[*] then all lines are printed.""" str = str.splitlines() index = 0 for _ in range(len(str)): for substr in line_list: if substr in str[index] or substr == '*': print(str[index]) index += 1 def get_int_input(prompt_str, minn, maxx): while 1: try: _input = int(input(prompt_str)) if not (minn <= _input <= maxx): raise ValueError() else: break except ValueError: print("enter an integer between " + str(minn) + ' and ' + str(maxx)) return _input if __name__ == '__main__': """Show status of the POWER-Up environment Args: INV_FILE (string): Inventory file. LOG_LEVEL (string): Log level. Raises: Exception: If parameter count is invalid. """ LOG = Logger(__file__) ARGV_MAX = 3 ARGV_COUNT = len(sys.argv) if ARGV_COUNT > ARGV_MAX: try: raise Exception() except: LOG.error('Invalid argument count') sys.exit(1) INV_FILE = sys.argv[1] LOG.set_level(sys.argv[2]) main(LOG, INV_FILE)
open-power-ref-design-toolkit/cluster-genesis
scripts/python/show_status.py
Python
apache-2.0
6,393
# # Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/prometheus.py""" import datetime from unittest import mock import uuid from oslotest import base import requests from urllib import parse as urlparse from ceilometer.publisher import prometheus from ceilometer import sample from ceilometer import service class TestPrometheusPublisher(base.BaseTestCase): resource_id = str(uuid.uuid4()) sample_data = [ sample.Sample( name='alpha', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='beta', type=sample.TYPE_DELTA, unit='', volume=3, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='gamma', type=sample.TYPE_GAUGE, unit='', volume=5, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='delta.epsilon', type=sample.TYPE_GAUGE, unit='', volume=7, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def setUp(self): super(TestPrometheusPublisher, self).setUp() self.CONF = service.prepare_service([], []) def test_post_samples(self): """Test publisher post.""" parsed_url = urlparse.urlparse( 'prometheus://localhost:90/metrics/job/os') publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) data = """# TYPE alpha counter alpha{resource_id="%s", project_id="test"} 1 beta{resource_id="%s", project_id="test"} 3 # TYPE gamma gauge gamma{resource_id="%s", project_id="test"} 5 # TYPE delta_epsilon gauge delta_epsilon{resource_id="%s", project_id="test"} 7 """ % (self.resource_id, self.resource_id, self.resource_id, self.resource_id) expected = [ mock.call('http://localhost:90/metrics/job/os', auth=None, cert=None, data=data, headers={'Content-type': 'plain/text'}, timeout=5, verify=True) ] self.assertEqual(expected, m_req.mock_calls) def test_post_samples_ssl(self): """Test publisher post.""" parsed_url = urlparse.urlparse( 'prometheus://localhost:90/metrics/job/os?ssl=1') publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) data = """# TYPE alpha counter alpha{resource_id="%s", project_id="test"} 1 beta{resource_id="%s", project_id="test"} 3 # TYPE gamma gauge gamma{resource_id="%s", project_id="test"} 5 # TYPE delta_epsilon gauge delta_epsilon{resource_id="%s", project_id="test"} 7 """ % (self.resource_id, self.resource_id, self.resource_id, self.resource_id) expected = [ mock.call('https://localhost:90/metrics/job/os', auth=None, cert=None, data=data, headers={'Content-type': 'plain/text'}, timeout=5, verify=True) ] self.assertEqual(expected, m_req.mock_calls)
openstack/ceilometer
ceilometer/tests/unit/publisher/test_prometheus.py
Python
apache-2.0
4,934
# Lint as: python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base models for point-cloud based detection.""" from lingvo import compat as tf from lingvo.core import metrics from lingvo.core import py_utils from lingvo.tasks.car import base_decoder from lingvo.tasks.car import detection_3d_metrics from lingvo.tasks.car import transform_util from lingvo.tasks.car.waymo import waymo_ap_metric from lingvo.tasks.car.waymo import waymo_metadata import numpy as np class WaymoOpenDatasetDecoder(base_decoder.BaseDecoder): """A decoder to use for decoding a detector model on Waymo.""" @classmethod def Params(cls): p = super().Params() p.Define( 'draw_visualizations', False, 'Boolean for whether to draw ' 'visualizations. This is independent of laser_sampling_rate.') p.ap_metric = waymo_ap_metric.WaymoAPMetrics.Params( waymo_metadata.WaymoMetadata()) p.Define( 'extra_ap_metrics', {}, 'Dictionary of extra AP metrics to run in the decoder. The key' 'is the name of the metric and the value is a sub-class of ' 'APMetric') p.Define( 'save_residuals', False, 'If True, this expects the residuals and ground-truth to be available ' 'in the decoder output dictionary, and it will save it to the decoder ' 'output file. See decode_include_residuals in PointDetectorBase ' 'for details.') return p def CreateDecoderMetrics(self): """Decoder metrics for WaymoOpenDataset.""" p = self.params waymo_metric_p = p.ap_metric.Copy().Set(cls=waymo_ap_metric.WaymoAPMetrics) waymo_metrics = waymo_metric_p.Instantiate() class_names = waymo_metrics.metadata.ClassNames() # TODO(bencaine,vrv): There's some code smell with this ap_metrics params # usage. We create local copies of the params to then instantiate them. # Failing to do this risks users editing the params after construction of # the object, making each object method call have the potential for side # effects. # Create a new dictionary with copies of the params converted to objects # so we can then add these to the decoder metrics. extra_ap_metrics = {} for k, metric_p in p.extra_ap_metrics.items(): extra_ap_metrics[k] = metric_p.Instantiate() waymo_metric_bev_p = waymo_metric_p.Copy() waymo_metric_bev_p.box_type = '2d' waymo_metrics_bev = waymo_metric_bev_p.Instantiate() # Convert the list of class names to a dictionary mapping class_id -> name. class_id_to_name = dict(enumerate(class_names)) # TODO(vrv): This uses the same top down transform as for KITTI; # re-visit these settings since detections can happen all around # the car. top_down_transform = transform_util.MakeCarToImageTransform( pixels_per_meter=32., image_ref_x=512., image_ref_y=1408., flip_axes=True) decoder_metrics = py_utils.NestedMap({ 'top_down_visualization': (detection_3d_metrics.TopDownVisualizationMetric( top_down_transform, image_height=1536, image_width=1024, class_id_to_name=class_id_to_name)), 'num_samples_in_batch': metrics.AverageMetric(), 'waymo_metrics': waymo_metrics, 'waymo_metrics_bev': waymo_metrics_bev, }) self._update_metrics_class_keys = ['waymo_metrics_bev', 'waymo_metrics'] for k, metric in extra_ap_metrics.items(): decoder_metrics[k] = metric self._update_metrics_class_keys.append(k) decoder_metrics.mesh = detection_3d_metrics.WorldViewer() return decoder_metrics def ProcessOutputs(self, input_batch, model_outputs): """Produce additional decoder outputs for WaymoOpenDataset. Args: input_batch: A .NestedMap of the inputs to the model. model_outputs: A .NestedMap of the outputs of the model, including:: - per_class_predicted_bboxes: [batch, num_classes, num_boxes, 7] float Tensor with per class 3D (7 DOF) bounding boxes. - per_class_predicted_bbox_scores: [batch, num_classes, num_boxes] float Tensor with per class, per box scores. - per_class_valid_mask: [batch, num_classes, num_boxes] masking Tensor indicating which boxes were still kept after NMS for each class. Returns: A NestedMap of additional decoder outputs needed for PostProcessDecodeOut. """ del model_outputs p = self.params input_labels = input_batch.labels input_metadata = input_batch.metadata source_ids = tf.strings.join([ input_metadata.run_segment, tf.as_string(input_metadata.run_start_offset) ], separator='_') ret = py_utils.NestedMap({ 'num_points_in_bboxes': input_batch.labels.bboxes_3d_num_points, # Ground truth. 'bboxes_3d': input_labels.bboxes_3d, 'bboxes_3d_mask': input_labels.bboxes_3d_mask, 'labels': input_labels.labels, 'label_ids': input_labels.label_ids, 'speed': input_labels.speed, 'acceleration': input_labels.acceleration, # Fill the following in. 'source_ids': source_ids, 'difficulties': input_labels.single_frame_detection_difficulties, 'unfiltered_bboxes_3d_mask': input_labels.unfiltered_bboxes_3d_mask, 'run_segment': input_metadata.run_segment, 'run_start_offset': input_metadata.run_start_offset, 'pose': input_metadata.pose, }) if p.draw_visualizations: laser_sample = self._SampleLaserForVisualization( input_batch.lasers.points_xyz, input_batch.lasers.points_padding) ret.update(laser_sample) return ret def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict): """Post-processes the decoder outputs.""" p = self.params # Update num_samples_in_batch. batch_size, num_classes, num_boxes, _ = ( dec_out_dict.per_class_predicted_bboxes.shape) dec_metrics_dict.num_samples_in_batch.Update(batch_size) # Update decoder output by removing z-coordinate, thus reshaping the bboxes # to [batch, num_bboxes, 5] to be compatible with # TopDownVisualizationMetric. # Indices corresponding to the 2D bbox parameters (x, y, dx, dy, phi). bbox_2d_idx = np.asarray([1, 1, 0, 1, 1, 0, 1], dtype=np.bool) bboxes_2d = dec_out_dict.bboxes_3d[..., bbox_2d_idx] predicted_bboxes = dec_out_dict.per_class_predicted_bboxes[..., bbox_2d_idx] if p.draw_visualizations and dec_out_dict.points_sampled: tf.logging.info('Updating sample for top down visualization') dec_metrics_dict.mesh.Update( py_utils.NestedMap({ 'points_xyz': dec_out_dict.points_xyz, 'points_padding': dec_out_dict.points_padding, })) # Flatten our predictions/scores to match the API of the visualization # The last dimension of flattened_bboxes is 5 due to the mask # above using bbox_2d_idx. flattened_bboxes = np.reshape(predicted_bboxes, [batch_size, num_classes * num_boxes, 5]) flattened_visualization_weights = np.reshape( dec_out_dict.visualization_weights, [batch_size, num_classes * num_boxes]) # Create a label id mask for now to maintain compatibility. # TODO(bencaine): Refactor visualizations to reflect new structure. flattened_visualization_labels = np.tile( np.arange(0, num_classes)[np.newaxis, :, np.newaxis], [batch_size, 1, num_boxes]) flattened_visualization_labels = np.reshape( flattened_visualization_labels, [batch_size, num_classes * num_boxes]) dec_metrics_dict.top_down_visualization.Update( py_utils.NestedMap({ 'visualization_labels': flattened_visualization_labels, 'predicted_bboxes': flattened_bboxes, 'visualization_weights': flattened_visualization_weights, 'points_xyz': dec_out_dict.points_xyz, 'points_padding': dec_out_dict.points_padding, 'gt_bboxes_2d': bboxes_2d, 'gt_bboxes_2d_weights': dec_out_dict.bboxes_3d_mask, 'labels': dec_out_dict.labels, 'difficulties': dec_out_dict.difficulties, 'source_ids': dec_out_dict.source_ids, })) # Update AP metrics. # Skip zeroth step decoding. if dec_out_dict.global_step == 0: return None # TODO(bencaine/vrv): Refactor to unify Waymo code and KITTI # Returned values are saved in model_dir/decode_* directories. output_to_save = [] for batch_idx in range(batch_size): pred_bboxes = dec_out_dict.per_class_predicted_bboxes[batch_idx] pred_bbox_scores = dec_out_dict.per_class_predicted_bbox_scores[batch_idx] # The current API expects a 'height' matrix to be passed for filtering # detections based on height. This is a KITTI-ism that we need to remove, # but for now we just give a height of 1. The MinHeight metadata function # for non-KITTI datasets should have a threshold lower than this value. heights = np.ones((num_classes, num_boxes)).astype(np.float32) gt_mask = dec_out_dict.bboxes_3d_mask[batch_idx].astype(bool) gt_labels = dec_out_dict.labels[batch_idx][gt_mask] gt_bboxes = dec_out_dict.bboxes_3d[batch_idx][gt_mask] gt_difficulties = dec_out_dict.difficulties[batch_idx][gt_mask] gt_num_points = dec_out_dict.num_points_in_bboxes[batch_idx][gt_mask] # Note that this is not used in the KITTI evaluation. gt_speed = dec_out_dict.speed[batch_idx][gt_mask] # TODO(shlens): Update me for metric_key in self._update_metrics_class_keys: metric_cls = dec_metrics_dict[metric_key] metric_cls.Update( dec_out_dict.source_ids[batch_idx], py_utils.NestedMap( groundtruth_labels=gt_labels, groundtruth_bboxes=gt_bboxes, groundtruth_difficulties=gt_difficulties, groundtruth_num_points=gt_num_points, groundtruth_speed=gt_speed, detection_scores=pred_bbox_scores, detection_boxes=pred_bboxes, detection_heights_in_pixels=heights, )) # We still want to save all ground truth (even if it was filtered # in some way) so we use the unfiltered_bboxes_3d_mask here. gt_save_mask = dec_out_dict.unfiltered_bboxes_3d_mask[batch_idx].astype( bool) pd_save_mask = dec_out_dict.per_class_valid_mask[batch_idx] > 0 class_ids = np.tile(np.arange(num_classes)[:, np.newaxis], [1, num_boxes]) saved_results = py_utils.NestedMap( pose=dec_out_dict.pose[batch_idx], frame_id=dec_out_dict.source_ids[batch_idx], bboxes=pred_bboxes[pd_save_mask], scores=pred_bbox_scores[pd_save_mask], gt_labels=dec_out_dict.labels[batch_idx][gt_save_mask], gt_label_ids=dec_out_dict.label_ids[batch_idx][gt_save_mask], gt_speed=dec_out_dict.speed[batch_idx][gt_save_mask], gt_acceleration=dec_out_dict.acceleration[batch_idx][gt_save_mask], class_ids=class_ids[pd_save_mask], gt_bboxes=dec_out_dict.bboxes_3d[batch_idx][gt_save_mask], gt_difficulties=dec_out_dict.difficulties[batch_idx][gt_save_mask], ) if p.save_residuals: # The leading shapes of these tensors should match bboxes and scores. # These are the underlying tensors that can are used to compute score # and bboxes. saved_results.update({ 'bboxes_gt_residuals': dec_out_dict.per_class_gt_residuals[batch_idx][pd_save_mask], 'bboxes_gt_labels': dec_out_dict.per_class_gt_labels[batch_idx][pd_save_mask], 'bboxes_residuals': dec_out_dict.per_class_residuals[batch_idx][pd_save_mask], 'bboxes_logits': dec_out_dict.per_class_logits[batch_idx][pd_save_mask], 'bboxes_anchor_boxes': dec_out_dict.per_class_anchor_boxes[batch_idx][pd_save_mask], }) serialized = self.SaveTensors(saved_results) output_to_save += [(dec_out_dict.source_ids[batch_idx], serialized)] return output_to_save
tensorflow/lingvo
lingvo/tasks/car/waymo/waymo_decoder.py
Python
apache-2.0
13,038
# -*- coding: utf-8 -*- # Copyright 2017 GIG Technology NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.3@@ import mc_unittest from rogerthat.bizz.profile import create_user_profile from rogerthat.bizz.system import update_app_asset_response from rogerthat.capi.system import updateAppAsset from rogerthat.dal.mobile import get_mobile_settings_cached from rogerthat.models.properties.profiles import MobileDetails from rogerthat.rpc import users from rogerthat.rpc.models import Mobile from rogerthat.rpc.rpc import logError from rogerthat.to.app import UpdateAppAssetRequestTO class Test(mc_unittest.TestCase): def testSendNews(self): self.set_datastore_hr_probability(1) scale_x = 1 request = UpdateAppAssetRequestTO(u"kind", u"url", scale_x) app_user = users.User('geert@example.com') user_profile = create_user_profile(app_user, 'geert', language='en') mobile = users.get_current_mobile() user_profile.mobiles = MobileDetails() user_profile.mobiles.addNew(mobile.account, Mobile.TYPE_ANDROID_HTTP, None, u"rogerthat") user_profile.put() ms = get_mobile_settings_cached(mobile) ms.majorVersion = 0 ms.minorVersion = 2447 ms.put() updateAppAsset(update_app_asset_response, logError, app_user, request=request) ms.minorVersion = 2449 ms.put() updateAppAsset(update_app_asset_response, logError, app_user, request=request)
rogerthat-platform/rogerthat-backend
src-test/rogerthat_tests/mobicage/capi/test_feature_version.py
Python
apache-2.0
1,999
from __future__ import absolute_import, print_function, division from pony.py23compat import basestring from functools import wraps from contextlib import contextmanager from pony.orm.core import Database from pony.utils import import_module def raises_exception(exc_class, msg=None): def decorator(func): def wrapper(self, *args, **kwargs): try: func(self, *args, **kwargs) self.fail("expected exception %s wasn't raised" % exc_class.__name__) except exc_class as e: if not e.args: self.assertEqual(msg, None) elif msg is not None: self.assertEqual(e.args[0], msg, "incorrect exception message. expected '%s', got '%s'" % (msg, e.args[0])) wrapper.__name__ = func.__name__ return wrapper return decorator @contextmanager def raises_if(test, cond, exc_class, exc_msg=None): try: yield except exc_class as e: test.assertTrue(cond) if exc_msg is None: pass elif exc_msg.startswith('...') and exc_msg != '...': if exc_msg.endswith('...'): test.assertIn(exc_msg[3:-3], str(e)) else: test.assertTrue(str(e).endswith(exc_msg[3:])) elif exc_msg.endswith('...'): test.assertTrue(str(e).startswith(exc_msg[:-3])) else: test.assertEqual(str(e), exc_msg) else: test.assertFalse(cond) def flatten(x): result = [] for el in x: if hasattr(el, "__iter__") and not isinstance(el, basestring): result.extend(flatten(el)) else: result.append(el) return result class TestConnection(object): def __init__(con, database): con.database = database if database and database.provider_name == 'postgres': con.autocommit = True def commit(con): pass def rollback(con): pass def cursor(con): return test_cursor class TestCursor(object): def __init__(cursor): cursor.description = [] cursor.rowcount = 0 def execute(cursor, sql, args=None): pass def fetchone(cursor): return None def fetchmany(cursor, size): return [] def fetchall(cursor): return [] test_cursor = TestCursor() class TestPool(object): def __init__(pool, database): pool.database = database def connect(pool): return TestConnection(pool.database) def release(pool, con): pass def drop(pool, con): pass def disconnect(pool): pass class TestDatabase(Database): real_provider_name = None raw_server_version = None sql = None def bind(self, provider_name, *args, **kwargs): if self.real_provider_name is not None: provider_name = self.real_provider_name self.provider_name = provider_name provider_module = import_module('pony.orm.dbproviders.' + provider_name) provider_cls = provider_module.provider_cls raw_server_version = self.raw_server_version if raw_server_version is None: if provider_name == 'sqlite': raw_server_version = '3.7.17' elif provider_name in ('postgres', 'pygresql'): raw_server_version = '9.2' elif provider_name == 'oracle': raw_server_version = '11.2.0.2.0' elif provider_name == 'mysql': raw_server_version = '5.6.11' else: assert False, provider_name # pragma: no cover t = [ int(component) for component in raw_server_version.split('.') ] if len(t) == 2: t.append(0) server_version = tuple(t) if provider_name in ('postgres', 'pygresql'): server_version = int('%d%02d%02d' % server_version) class TestProvider(provider_cls): def inspect_connection(provider, connection): pass TestProvider.server_version = server_version kwargs['pony_check_connection'] = False kwargs['pony_pool_mockup'] = TestPool(self) Database.bind(self, TestProvider, *args, **kwargs) def _execute(database, sql, globals, locals, frame_depth): assert False # pragma: no cover def _exec_sql(database, sql, arguments=None, returning_id=False): assert type(arguments) is not list and not returning_id database.sql = sql database.arguments = arguments return test_cursor def generate_mapping(database, filename=None, check_tables=True, create_tables=False): return Database.generate_mapping(database, filename, create_tables=False)
Ahmad31/Web_Flask_Cassandra
flask/lib/python2.7/site-packages/pony/orm/tests/testutils.py
Python
apache-2.0
4,752
"""Tabular QL agent""" import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm import framework import utils DEBUG = False GAMMA = 0.5 # discounted factor TRAINING_EP = 0.5 # epsilon-greedy parameter for training TESTING_EP = 0.05 # epsilon-greedy parameter for testing NUM_RUNS = 10 NUM_EPOCHS = 300 NUM_EPIS_TRAIN = 25 # number of episodes for training at each epoch NUM_EPIS_TEST = 50 # number of episodes for testing ALPHA = 0.1 # learning rate for training ACTIONS = framework.get_actions() OBJECTS = framework.get_objects() NUM_ACTIONS = len(ACTIONS) NUM_OBJECTS = len(OBJECTS) model = None optimizer = None def epsilon_greedy(state_vector, epsilon): """Returns an action selected by an epsilon-greedy exploration policy Args: state_vector (torch.FloatTensor): extracted vector representation theta (np.ndarray): current weight matrix epsilon (float): the probability of choosing a random command Returns: (int, int): the indices describing the action/object to take """ if np.random.binomial(1, epsilon): action_index, object_index = np.random.randint(0, NUM_ACTIONS), np.random.randint(0, NUM_OBJECTS) else: act_arr, obj_arr = model(state_vector) action_index, object_index = torch.argmax(act_arr), torch.argmax(obj_arr) return (action_index, object_index) class DQN(nn.Module): """A simple deep Q network implementation. Computes Q values for each (action, object) tuple given an input state vector """ def __init__(self, state_dim, action_dim, object_dim, hidden_size=100): super(DQN, self).__init__() self.state_encoder = nn.Linear(state_dim, hidden_size) self.state2action = nn.Linear(hidden_size, action_dim) self.state2object = nn.Linear(hidden_size, object_dim) def forward(self, x): state = F.relu(self.state_encoder(x)) return self.state2action(state), self.state2object(state) # pragma: coderesponse template def deep_q_learning(current_state_vector, action_index, object_index, reward, next_state_vector, terminal): """Updates the weights of the DQN for a given transition Args: current_state_vector (torch.FloatTensor): vector representation of current state action_index (int): index of the current action object_index (int): index of the current object reward (float): the immediate reward the agent recieves from playing current command next_state_vector (torch.FloatTensor): vector representation of next state terminal (bool): True if this epsiode is over Returns: None """ with torch.no_grad(): q_values_action_next, q_values_object_next = model(next_state_vector) maxq_next = 1 / 2 * (q_values_action_next.max() + q_values_object_next.max()) q_value_cur_state = model(current_state_vector) Q_val_cur = 1/2 * (q_value_cur_state[0][action_index] + q_value_cur_state[1][object_index]) # Current Q value maxQ = 0.0 if terminal else maxq_next y = reward + GAMMA*maxQ # Target loss = 1/2 * (y - Q_val_cur)**2 optimizer.zero_grad() loss.backward() optimizer.step() # pragma: coderesponse end def run_episode(for_training): """ Runs one episode If for training, update Q function If for testing, computes and return cumulative discounted reward """ epsilon = TRAINING_EP if for_training else TESTING_EP # initialize for each episode i = 0 epi_reward = 0 (current_room_desc, current_quest_desc, terminal) = framework.newGame() while not terminal: # Choose next action and execute current_state = current_room_desc + current_quest_desc current_state_vector = torch.FloatTensor( utils.extract_bow_feature_vector(current_state, dictionary)) next_action_index, next_object_index = epsilon_greedy(current_state_vector, epsilon) next_room_desc, next_quest_desc, reward, terminal = framework.step_game( current_room_desc, current_quest_desc, next_action_index, next_object_index) next_state = next_room_desc + next_quest_desc next_state_vector = torch.FloatTensor( utils.extract_bow_feature_vector(next_state, dictionary)) if for_training: # update Q-function. deep_q_learning(current_state_vector, next_action_index, next_object_index, reward, next_state_vector, terminal) if not for_training: # update reward epi_reward += (GAMMA**i)*reward # prepare next step i+=1 current_room_desc, current_quest_desc = next_room_desc, next_quest_desc if not for_training: return epi_reward def run_epoch(): """Runs one epoch and returns reward averaged over test episodes""" rewards = [] for _ in range(NUM_EPIS_TRAIN): run_episode(for_training=True) for _ in range(NUM_EPIS_TEST): rewards.append(run_episode(for_training=False)) return np.mean(np.array(rewards)) def run(): """Returns array of test reward per epoch for one run""" global model global optimizer model = DQN(state_dim, NUM_ACTIONS, NUM_OBJECTS) optimizer = optim.SGD(model.parameters(), lr=ALPHA) single_run_epoch_rewards_test = [] pbar = tqdm(range(NUM_EPOCHS), ncols=80) for _ in pbar: single_run_epoch_rewards_test.append(run_epoch()) pbar.set_description( "Avg reward: {:0.6f} | Ewma reward: {:0.6f}".format( np.mean(single_run_epoch_rewards_test), utils.ewma(single_run_epoch_rewards_test))) return single_run_epoch_rewards_test if __name__ == '__main__': state_texts = utils.load_data('game.tsv') dictionary = utils.bag_of_words(state_texts) state_dim = len(dictionary) # set up the game framework.load_game_data() epoch_rewards_test = [] # shape NUM_RUNS * NUM_EPOCHS for _ in range(NUM_RUNS): epoch_rewards_test.append(run()) epoch_rewards_test = np.array(epoch_rewards_test) x = np.arange(NUM_EPOCHS) fig, axis = plt.subplots() axis.plot(x, np.mean(epoch_rewards_test, axis=0)) # plot reward per epoch averaged per run axis.set_xlabel('Epochs') axis.set_ylabel('reward') axis.set_title(('Linear: nRuns=%d, Epilon=%.2f, Epi=%d, alpha=%.4f' % (NUM_RUNS, TRAINING_EP, NUM_EPIS_TRAIN, ALPHA))) plt.show()
xunilrj/sandbox
courses/MITx/MITx 6.86x Machine Learning with Python-From Linear Models to Deep Learning/project5/rl/agent_dqn.py
Python
apache-2.0
6,756
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras initializers for TF 2. """ # pylint: disable=g-classes-have-attributes from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.keras import backend from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_linalg_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import stateless_random_ops from tensorflow.python.util.tf_export import keras_export _PARTITION_SHAPE = 'partition_shape' _PARTITION_OFFSET = 'partition_offset' @keras_export('keras.initializers.Initializer') class Initializer(object): """Initializer base class: all Keras initializers inherit from this class. Initializers should implement a `__call__` method with the following signature: ```python def __call__(self, shape, dtype=None, **kwargs): # returns a tensor of shape `shape` and dtype `dtype` # containing values drawn from a distribution of your choice. ``` Optionally, you an also implement the method `get_config` and the class method `from_config` in order to support serialization -- just like with any Keras object. Here's a simple example: a random normal initializer. ```python import tensorflow as tf class ExampleRandomNormal(tf.keras.initializers.Initializer): def __init__(self, mean, stddev): self.mean = mean self.stddev = stddev def __call__(self, shape, dtype=None, **kwargs): return tf.random.normal( shape, mean=self.mean, stddev=self.stddev, dtype=dtype) def get_config(self): # To support serialization return {"mean": self.mean, "stddev": self.stddev} ``` Note that we don't have to implement `from_config` in the example above since the constructor arguments of the class the keys in the config returned by `get_config` are the same. In this case, the default `from_config` works fine. """ def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. **kwargs: Additional keyword arguments. """ raise NotImplementedError def get_config(self): """Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict. """ return {} @classmethod def from_config(cls, config): """Instantiates an initializer from a configuration dictionary. Example: ```python initializer = RandomUniform(-1, 1) config = initializer.get_config() initializer = RandomUniform.from_config(config) ``` Args: config: A Python dictionary, the output of `get_config`. Returns: A `tf.keras.initializers.Initializer` instance. """ config.pop('dtype', None) return cls(**config) @keras_export('keras.initializers.Zeros', 'keras.initializers.zeros', v1=[]) class Zeros(Initializer): """Initializer that generates tensors initialized to 0. Also available via the shortcut function `tf.keras.initializers.zeros`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Zeros() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Zeros() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) """ def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`). **kwargs: Additional keyword arguments. """ _validate_kwargs(self.__class__.__name__, kwargs) dtype = _get_dtype(dtype) if not dtype.is_numpy_compatible or dtype == dtypes.string: raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype) if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return array_ops.zeros(shape, dtype) @keras_export('keras.initializers.Ones', 'keras.initializers.ones', v1=[]) class Ones(Initializer): """Initializer that generates tensors initialized to 1. Also available via the shortcut function `tf.keras.initializers.ones`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Ones() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Ones() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) """ def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`). **kwargs: Additional keyword arguments. """ _validate_kwargs(self.__class__.__name__, kwargs) dtype = _get_dtype(dtype) if not dtype.is_numpy_compatible or dtype == dtypes.string: raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype) if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return array_ops.ones(shape, dtype) @keras_export('keras.initializers.Constant', 'keras.initializers.constant', v1=[]) class Constant(Initializer): """Initializer that generates tensors with constant values. Also available via the shortcut function `tf.keras.initializers.constant`. Only scalar values are allowed. The constant value provided must be convertible to the dtype requested when calling the initializer. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Constant(3.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Constant(3.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: value: A Python scalar. """ def __init__(self, value=0): self.value = value def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized to `self.value`. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`). **kwargs: Additional keyword arguments. """ del kwargs return constant_op.constant( self.value, dtype=_get_dtype(dtype), shape=shape) def get_config(self): return {'value': self.value} @keras_export('keras.initializers.RandomUniform', 'keras.initializers.random_uniform', v1=[]) class RandomUniform(Initializer): """Initializer that generates tensors with a uniform distribution. Also available via the shortcut function `tf.keras.initializers.random_uniform`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate (inclusive). maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate (exclusive). seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. """ def __init__(self, minval=-0.05, maxval=0.05, seed=None): self.minval = minval self.maxval = maxval self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point and integer types are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`). **kwargs: Additional keyword arguments. """ _validate_kwargs(self.__class__.__name__, kwargs) dtype = _get_dtype(dtype) if not dtype.is_floating and not dtype.is_integer: raise ValueError('Expected float or integer dtype, got %s.' % dtype) if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype) def get_config(self): return { 'minval': self.minval, 'maxval': self.maxval, 'seed': self.seed } @keras_export('keras.initializers.RandomNormal', 'keras.initializers.random_normal', v1=[]) class RandomNormal(Initializer): """Initializer that generates tensors with a normal distribution. Also available via the shortcut function `tf.keras.initializers.random_normal`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. """ def __init__(self, mean=0.0, stddev=0.05, seed=None): self.mean = mean self.stddev = stddev self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized to random normal values. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`) **kwargs: Additional keyword arguments. """ _validate_kwargs(self.__class__.__name__, kwargs) dtype = _assert_float_dtype(_get_dtype(dtype)) if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype) def get_config(self): return { 'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed } @keras_export('keras.initializers.TruncatedNormal', 'keras.initializers.truncated_normal', v1=[]) class TruncatedNormal(Initializer): """Initializer that generates a truncated normal distribution. Also available via the shortcut function `tf.keras.initializers.truncated_normal`. The values generated are similar to values from a `tf.keras.initializers.RandomNormal` initializer except that values more than two standard deviations from the mean are discarded and re-drawn. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate before truncation. seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. """ def __init__(self, mean=0.0, stddev=0.05, seed=None): self.mean = mean self.stddev = stddev self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized to random normal values (truncated). Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`) **kwargs: Additional keyword arguments. """ _validate_kwargs(self.__class__.__name__, kwargs) dtype = _assert_float_dtype(_get_dtype(dtype)) if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype) def get_config(self): return { 'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed } @keras_export('keras.initializers.VarianceScaling', 'keras.initializers.variance_scaling', v1=[]) class VarianceScaling(Initializer): """Initializer capable of adapting its scale to the shape of weights tensors. Also available via the shortcut function `tf.keras.initializers.variance_scaling`. With `distribution="truncated_normal" or "untruncated_normal"`, samples are drawn from a truncated/untruncated normal distribution with a mean of zero and a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`, where `n` is: - number of input units in the weight tensor, if `mode="fan_in"` - number of output units, if `mode="fan_out"` - average of the numbers of input and output units, if `mode="fan_avg"` With `distribution="uniform"`, samples are drawn from a uniform distribution within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.VarianceScaling( ... scale=0.1, mode='fan_in', distribution='uniform') >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.VarianceScaling( ... scale=0.1, mode='fan_in', distribution='uniform') >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: scale: Scaling factor (positive float). mode: One of "fan_in", "fan_out", "fan_avg". distribution: Random distribution to use. One of "truncated_normal", "untruncated_normal" and "uniform". seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. """ def __init__(self, scale=1.0, mode='fan_in', distribution='truncated_normal', seed=None): if scale <= 0.: raise ValueError('`scale` must be positive float.') if mode not in {'fan_in', 'fan_out', 'fan_avg'}: raise ValueError('Invalid `mode` argument:', mode) distribution = distribution.lower() # Compatibility with keras-team/keras. if distribution == 'normal': distribution = 'truncated_normal' if distribution not in {'uniform', 'truncated_normal', 'untruncated_normal'}: raise ValueError('Invalid `distribution` argument:', distribution) self.scale = scale self.mode = mode self.distribution = distribution self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`) **kwargs: Additional keyword arguments. """ _validate_kwargs(self.__class__.__name__, kwargs) dtype = _assert_float_dtype(_get_dtype(dtype)) scale = self.scale fan_in, fan_out = _compute_fans(shape) if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] if self.mode == 'fan_in': scale /= max(1., fan_in) elif self.mode == 'fan_out': scale /= max(1., fan_out) else: scale /= max(1., (fan_in + fan_out) / 2.) if self.distribution == 'truncated_normal': # constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) stddev = math.sqrt(scale) / .87962566103423978 return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype) elif self.distribution == 'untruncated_normal': stddev = math.sqrt(scale) return self._random_generator.random_normal(shape, 0.0, stddev, dtype) else: limit = math.sqrt(3.0 * scale) return self._random_generator.random_uniform(shape, -limit, limit, dtype) def get_config(self): return { 'scale': self.scale, 'mode': self.mode, 'distribution': self.distribution, 'seed': self.seed } @keras_export('keras.initializers.Orthogonal', 'keras.initializers.orthogonal', v1=[]) class Orthogonal(Initializer): """Initializer that generates an orthogonal matrix. Also available via the shortcut function `tf.keras.initializers.orthogonal`. If the shape of the tensor to initialize is two-dimensional, it is initialized with an orthogonal matrix obtained from the QR decomposition of a matrix of random numbers drawn from a normal distribution. If the matrix has fewer rows than columns then the output will have orthogonal rows. Otherwise, the output will have orthogonal columns. If the shape of the tensor to initialize is more than two-dimensional, a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])` is initialized, where `n` is the length of the shape vector. The matrix is subsequently reshaped to give a tensor of the desired shape. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Orthogonal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Orthogonal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: gain: multiplicative factor to apply to the orthogonal matrix seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C) ([pdf](https://arxiv.org/pdf/1312.6120.pdf)) """ def __init__(self, gain=1.0, seed=None): self.gain = gain self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized to an orthogonal matrix. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`) **kwargs: Additional keyword arguments. """ _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False) dtype = _assert_float_dtype(_get_dtype(dtype)) # Check the shape if len(shape) < 2: raise ValueError('The tensor to initialize must be ' 'at least two-dimensional') # Flatten the input shape with the last dimension remaining # its original shape so it works for conv2d num_rows = 1 for dim in shape[:-1]: num_rows *= dim num_cols = shape[-1] flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows)) # Generate a random matrix a = self._random_generator.random_normal(flat_shape, dtype=dtype) # Compute the qr factorization q, r = gen_linalg_ops.qr(a, full_matrices=False) # Make Q uniform d = array_ops.tensor_diag_part(r) q *= math_ops.sign(d) if num_rows < num_cols: q = array_ops.matrix_transpose(q) return self.gain * array_ops.reshape(q, shape) def get_config(self): return {'gain': self.gain, 'seed': self.seed} @keras_export('keras.initializers.Identity', 'keras.initializers.identity', v1=[]) class Identity(Initializer): """Initializer that generates the identity matrix. Also available via the shortcut function `tf.keras.initializers.identity`. Only usable for generating 2D matrices. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Identity() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Identity() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: gain: Multiplicative factor to apply to the identity matrix. """ def __init__(self, gain=1.0): self.gain = gain def __call__(self, shape, dtype=None, **kwargs): """Returns a tensor object initialized to a 2D identity matrix. Args: shape: Shape of the tensor. It should have exactly rank 2. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`) **kwargs: Additional keyword arguments. """ _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False) dtype = _assert_float_dtype(_get_dtype(dtype)) if len(shape) != 2: raise ValueError( 'Identity matrix initializer can only be used for 2D matrices.') initializer = linalg_ops.eye(*shape, dtype=dtype) return self.gain * initializer def get_config(self): return {'gain': self.gain} @keras_export('keras.initializers.GlorotUniform', 'keras.initializers.glorot_uniform', v1=[]) class GlorotUniform(VarianceScaling): """The Glorot uniform initializer, also called Xavier uniform initializer. Also available via the shortcut function `tf.keras.initializers.glorot_uniform`. Draws samples from a uniform distribution within `[-limit, limit]`, where `limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units). Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.GlorotUniform() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.GlorotUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ def __init__(self, seed=None): super(GlorotUniform, self).__init__( scale=1.0, mode='fan_avg', distribution='uniform', seed=seed) def get_config(self): return {'seed': self.seed} @keras_export('keras.initializers.GlorotNormal', 'keras.initializers.glorot_normal', v1=[]) class GlorotNormal(VarianceScaling): """The Glorot normal initializer, also called Xavier normal initializer. Also available via the shortcut function `tf.keras.initializers.glorot_normal`. Draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.GlorotNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.GlorotNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ def __init__(self, seed=None): super(GlorotNormal, self).__init__( scale=1.0, mode='fan_avg', distribution='truncated_normal', seed=seed) def get_config(self): return {'seed': self.seed} @keras_export('keras.initializers.LecunNormal', 'keras.initializers.lecun_normal', v1=[]) class LecunNormal(VarianceScaling): """Lecun normal initializer. Also available via the shortcut function `tf.keras.initializers.lecun_normal`. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.LecunNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.LecunNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. Used to seed the random generator. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) ([pdf] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) """ def __init__(self, seed=None): super(LecunNormal, self).__init__( scale=1., mode='fan_in', distribution='truncated_normal', seed=seed) def get_config(self): return {'seed': self.seed} @keras_export('keras.initializers.LecunUniform', 'keras.initializers.lecun_uniform', v1=[]) class LecunUniform(VarianceScaling): """Lecun uniform initializer. Also available via the shortcut function `tf.keras.initializers.lecun_uniform`. Draws samples from a uniform distribution within `[-limit, limit]`, where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the weight tensor). Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.LecunUniform() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.LecunUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) """ def __init__(self, seed=None): super(LecunUniform, self).__init__( scale=1., mode='fan_in', distribution='uniform', seed=seed) def get_config(self): return {'seed': self.seed} @keras_export('keras.initializers.HeNormal', 'keras.initializers.he_normal', v1=[]) class HeNormal(VarianceScaling): """He normal initializer. Also available via the shortcut function `tf.keras.initializers.he_normal`. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.HeNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.HeNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) """ def __init__(self, seed=None): super(HeNormal, self).__init__( scale=2., mode='fan_in', distribution='truncated_normal', seed=seed) def get_config(self): return {'seed': self.seed} @keras_export('keras.initializers.HeUniform', 'keras.initializers.he_uniform', v1=[]) class HeUniform(VarianceScaling): """He uniform variance scaling initializer. Also available via the shortcut function `tf.keras.initializers.he_uniform`. Draws samples from a uniform distribution within `[-limit, limit]`, where `limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the weight tensor). Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.HeUniform() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.HeUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) """ def __init__(self, seed=None): super(HeUniform, self).__init__( scale=2., mode='fan_in', distribution='uniform', seed=seed) def get_config(self): return {'seed': self.seed} def _get_dtype(dtype): if dtype is None: dtype = backend.floatx() return dtypes.as_dtype(dtype) def _assert_float_dtype(dtype): """Validate and return floating point type based on `dtype`. `dtype` must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if `dtype` is not a floating point type. """ dtype = dtypes.as_dtype(dtype) if not dtype.is_floating: raise ValueError('Expected floating point type, got %s.' % dtype) return dtype class _RandomGenerator(object): """Random generator that selects appropriate random ops.""" def __init__(self, seed=None): super(_RandomGenerator, self).__init__() if seed is not None: # Stateless random ops requires 2-int seed. self.seed = [seed, 0] else: self.seed = None def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32): """A deterministic random normal if seed is passed.""" if self.seed: op = stateless_random_ops.stateless_random_normal else: op = random_ops.random_normal return op( shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed) def random_uniform(self, shape, minval, maxval, dtype): """A deterministic random uniform if seed is passed.""" if self.seed: op = stateless_random_ops.stateless_random_uniform else: op = random_ops.random_uniform return op( shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed) def truncated_normal(self, shape, mean, stddev, dtype): """A deterministic truncated normal if seed is passed.""" if self.seed: op = stateless_random_ops.stateless_truncated_normal else: op = random_ops.truncated_normal return op( shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed) def _compute_fans(shape): """Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of integer scalars (fan_in, fan_out). """ if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1 for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size return int(fan_in), int(fan_out) def _validate_kwargs(cls_name, kwargs, support_partition=True): for kwarg in kwargs: if kwarg not in [_PARTITION_SHAPE, _PARTITION_OFFSET]: raise TypeError('Unknown keyword arguments: %s' % kwarg) elif not support_partition: raise ValueError('%s initializer doesn\'t support partition-related ' 'arguments' % cls_name)
petewarden/tensorflow
tensorflow/python/keras/initializers/initializers_v2.py
Python
apache-2.0
35,507
import sys import os import pandas as pd from collections import defaultdict import numpy as np dirname = sys.argv[1] path = os.path.join(dirname, "weights.tsv") with open(path ,"r") as f: df = pd.read_csv(f, sep="\t") df = df[df["iter"] == 5] fc2r = defaultdict(list) features = set() for event, event_df in df.groupby("event"): pos_df = event_df[event_df["class"] == "SELECT"] event_df.loc[event_df["class"] == "SELECT", "rank"] = pos_df["weight"].argsort() for _, row in event_df.loc[event_df["class"] == "SELECT"][["name", "weight", "rank"]].iterrows(): clazz = "SELECT" feature = row["name"] rank = row["rank"] fc2r[(feature, clazz)].append(rank) features.add(feature) neg_df = event_df[event_df["class"] == "NEXT"] event_df.loc[event_df["class"] == "NEXT", "rank"] = neg_df["weight"].argsort() for _, row in event_df.loc[event_df["class"] == "NEXT"][["name", "weight", "rank"]].iterrows(): clazz = "NEXT" feature = row["name"] rank = row["rank"] fc2r[(feature, clazz)].append(rank) features.add(feature) f2d = {} for feature in features: sel_u = np.mean(fc2r[(feature, "SELECT")]) next_u = np.mean(fc2r[(feature, "NEXT")]) diff = max(sel_u, next_u) - min(sel_u, next_u) f2d[feature] = diff print feat_diff = sorted(f2d.items(), key=lambda x: x[1]) for feat, diff in feat_diff[-50:]: print feat
kedz/cuttsum
trec2015/sbin/cross-validation/best-feats.py
Python
apache-2.0
1,444
from graphics import * from Button import * from CreateNewUserScreen import * from ChangePasswordScreen import * import os class StartScreen: def is_game_in_progress(self,gid): for filename in os.listdir("games"): if str(gid) == filename: return True return False def __init__(self): self.player = None def getPlayer(self): return self.player def login(self,db): win = GraphWin("CofC Scrabble",300,300) win.setCoords(0,0,100,100) Text(Point(17,80),"User: ").draw(win) Text(Point(17,70),"Password: ").draw(win) user_entry = Entry(Point(50,80),10) user_entry.draw(win) password_entry = Entry(Point(50,70),10) password_entry.draw(win) message = Text(Point(50,90),"") message.draw(win) # Create a login button and a quit button login_button = Button("New game",Point(5,50),Point(35,60)) login_button.draw(win) continue_button = Button("Continue game",Point(5,35),Point(50,45)) continue_button.draw(win) game_id_entry = Entry(Point(70,40),10) game_id_entry.draw(win) new_button = Button("New user",Point(5,2),Point(35,12)) new_button.draw(win) change_button = Button("Change Password",Point(45,2),Point(95,12)) change_button.draw(win) quit_button = Button("Quit",Point(5,15),Point(25,25)) quit_button.draw(win) while True: # Maximum number of clicks p = win.getMouse() if login_button.clicked(p): user1 = user_entry.getText() password1 = password_entry.getText() if db.valid_user(user1,password1): self.player = db.get_user(user1) win.close() return "new" else: message.setText("Invalid user and/or password") elif continue_button.clicked(p): user1 = user_entry.getText() password1 = password_entry.getText() gid = int(game_id_entry.getText()) in_progress = self.is_game_in_progress(gid) if db.valid_user(user1,password1) and in_progress: self.player = db.get_user(user1) win.close() return gid else: message.setText("Invalid user/password or game ID") elif new_button.clicked(p): screen = CreateNewUserScreen() screen.create_new_user(db) elif change_button.clicked(p): screen = ChangePasswordScreen() screen.change_password(db) elif quit_button.clicked(p): win.close() return "quit"
itsallvoodoo/csci-school
CSCI220/Week 15 - FINAL/Scrabble/StartScreen.py
Python
apache-2.0
2,963
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "Test the function for mapping Terraform arguments." import pytest import tftest ARGS_TESTS = ( ({'auto_approve': True}, ['-auto-approve']), ({'auto_approve': False}, []), ({'backend': True}, []), ({'backend': None}, []), ({'backend': False}, ['-backend=false']), ({'color': True}, []), ({'color': False}, ['-no-color']), ({'color': False, 'input': False}, ['-no-color', '-input=false']), ({'force_copy': True}, ['-force-copy']), ({'force_copy': None}, []), ({'force_copy': False}, []), ({'input': True}, []), ({'input': False}, ['-input=false']), ({'json_format': True}, ['-json']), ({'json_format': False}, []), ({'lock': True}, []), ({'lock': False}, ['-lock=false']), ({'plugin_dir': ''}, []), ({'plugin_dir': 'abc'}, ['-plugin-dir', 'abc']), ({'refresh': True}, []), ({'refresh': None}, []), ({'refresh': False}, ['-refresh=false']), ({'upgrade': True}, ['-upgrade']), ({'upgrade': False}, []), ({'tf_var_file': None}, []), ({'tf_var_file': 'foo.tfvar'}, ['-var-file=foo.tfvar']), ) @pytest.mark.parametrize("kwargs, expected", ARGS_TESTS) def test_args(kwargs, expected): assert tftest.parse_args() == [] assert tftest.parse_args(**kwargs) == expected TERRAGRUNT_ARGS_TESTCASES = [ ({"tg_config": "Obama"}, ['--terragrunt-config', 'Obama']), ({"tg_tfpath": "Barrack"}, ['--terragrunt-tfpath', 'Barrack']), ({"tg_no_auto_init": True}, ['--terragrunt-no-auto-init']), ({"tg_no_auto_init": False}, []), ({"tg_no_auto_retry": True}, ['--terragrunt-no-auto-retry']), ({"tg_no_auto_retry": False}, []), ({"tg_non_interactive": True}, ['--terragrunt-non-interactive']), ({"tg_non_interactive": False}, []), ({"tg_working_dir": "George"}, ['--terragrunt-working-dir', 'George']), ({"tg_download_dir": "Bush"}, ['--terragrunt-download-dir', 'Bush']), ({"tg_source": "Clinton"}, ['--terragrunt-source', 'Clinton']), ({"tg_source_update": True}, ['--terragrunt-source-update']), ({"tg_source_update": False}, []), ({"tg_iam_role": "Bill"}, ['--terragrunt-iam-role', 'Bill']), ({"tg_ignore_dependency_errors": True}, ['--terragrunt-ignore-dependency-errors']), ({"tg_ignore_dependency_errors": False}, []), ({"tg_ignore_dependency_order": True}, ['--terragrunt-ignore-dependency-order']), ({"tg_ignore_dependency_order": False}, []), ({"tg_ignore_external_dependencies": "dont care what is here"}, ['--terragrunt-ignore-external-dependencies']), ({"tg_include_external_dependencies": True}, ['--terragrunt-include-external-dependencies']), ({"tg_include_external_dependencies": False}, []), ({"tg_parallelism": 20}, ['--terragrunt-parallelism 20']), ({"tg_exclude_dir": "Ronald"}, ['--terragrunt-exclude-dir', 'Ronald']), ({"tg_include_dir": "Reagan"}, ['--terragrunt-include-dir', 'Reagan']), ({"tg_check": True}, ['--terragrunt-check']), ({"tg_check": False}, []), ({"tg_hclfmt_file": "Biden"}, ['--terragrunt-hclfmt-file', 'Biden']), ({"tg_override_attr": {"Iron": "Man", "Captain": "America"}}, ['--terragrunt-override-attr=Iron=Man', '--terragrunt-override-attr=Captain=America']), ({"tg_debug": True}, ['--terragrunt-debug']), ({"tg_debug": False}, []), ] @pytest.mark.parametrize("kwargs, expected", TERRAGRUNT_ARGS_TESTCASES) def test_terragrunt_args(kwargs, expected): assert tftest.parse_args(**kwargs) == expected def test_var_args(): assert sorted(tftest.parse_args(init_vars={'a': 1, 'b': '["2"]'})) == sorted( ["-backend-config=a=1", '-backend-config=b=["2"]']) assert sorted(tftest.parse_args(tf_vars={'a': 1, 'b': '["2"]'})) == sorted( ['-var', 'b=["2"]', '-var', 'a=1']) def test_targets(): assert tftest.parse_args(targets=['one', 'two']) == sorted( ['-target=one', '-target=two'])
GoogleCloudPlatform/terraform-python-testing-helper
test/test_args.py
Python
apache-2.0
4,448
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import six from heat.common import template_format from heat.engine.clients.os import glance from heat.engine.clients.os import keystone from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks from heat.engine.clients.os import nova from heat.engine import environment from heat.engine.resources.aws.ec2 import instance as instances from heat.engine import stack as parser from heat.engine import template as templatem from heat.tests.openstack.nova import fakes as fakes_nova from heat.tests import utils wp_template = u''' heat_template_version: 2014-10-16 description: WordPress parameters: KeyName: description: KeyName type: string default: test\u2042 resources: WebServer: type: AWS::EC2::Instance properties: ImageId: F17-x86_64-gold InstanceType: m1.large KeyName: test UserData: wordpress ''' string_template_five = ''' heat_template_version: 2013-05-23 description: Random String templates parameters: salt: type: string default: "quickbrownfox" resources: A: type: OS::Heat::RandomString properties: salt: {get_param: salt} B: type: OS::Heat::RandomString properties: salt: {get_param: salt} C: type: OS::Heat::RandomString depends_on: [A, B] properties: salt: {get_attr: [A, value]} D: type: OS::Heat::RandomString depends_on: C properties: salt: {get_param: salt} E: type: OS::Heat::RandomString depends_on: C properties: salt: {get_param: salt} ''' string_template_five_update = ''' heat_template_version: 2013-05-23 description: Random String templates parameters: salt: type: string default: "quickbrownfox123" resources: A: type: OS::Heat::RandomString properties: salt: {get_param: salt} B: type: OS::Heat::RandomString properties: salt: {get_param: salt} F: type: OS::Heat::RandomString depends_on: [A, B] properties: salt: {get_param: salt} G: type: OS::Heat::RandomString depends_on: F properties: salt: {get_param: salt} H: type: OS::Heat::RandomString depends_on: F properties: salt: {get_param: salt} ''' attr_cache_template = ''' heat_template_version: 2016-04-08 resources: A: type: ResourceWithComplexAttributesType B: type: OS::Heat::RandomString properties: salt: {get_attr: [A, flat_dict, key2]} C: type: OS::Heat::RandomString depends_on: [A, B] properties: salt: {get_attr: [A, nested_dict, dict, a]} D: type: OS::Heat::RandomString depends_on: C properties: salt: {get_attr: [A, nested_dict, dict, b]} E: type: OS::Heat::RandomString depends_on: C properties: salt: {get_attr: [A, flat_dict, key3]} ''' def get_stack(stack_name, ctx, template=None, with_params=True, convergence=False, **kwargs): if template is None: t = template_format.parse(wp_template) if with_params: env = environment.Environment({'KeyName': 'test'}) tmpl = templatem.Template(t, env=env) else: tmpl = templatem.Template(t) else: t = template_format.parse(template) tmpl = templatem.Template(t) stack = parser.Stack(ctx, stack_name, tmpl, convergence=convergence, **kwargs) stack.thread_group_mgr = DummyThreadGroupManager() return stack def setup_keystone_mocks_with_mock(test_case, stack): fkc = fake_ks.FakeKeystoneClient() test_case.patchobject(keystone.KeystoneClientPlugin, '_create') keystone.KeystoneClientPlugin._create.return_value = fkc def setup_mock_for_image_constraint_with_mock(test_case, imageId_input, imageId_output=744): test_case.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id', return_value=imageId_output) def validate_setup_mocks_with_mock(stack, fc, mock_image_constraint=True, validate_create=True): instance = stack['WebServer'] metadata = instance.metadata_get() if mock_image_constraint: m_image = glance.GlanceClientPlugin.find_image_by_name_or_id m_image.assert_called_with( instance.properties['ImageId']) user_data = instance.properties['UserData'] server_userdata = instance.client_plugin().build_userdata( metadata, user_data, 'ec2-user') nova.NovaClientPlugin.build_userdata.assert_called_with( metadata, user_data, 'ec2-user') if not validate_create: return fc.servers.create.assert_called_once_with( image=744, flavor=3, key_name='test', name=utils.PhysName(stack.name, 'WebServer'), security_groups=None, userdata=server_userdata, scheduler_hints=None, meta=None, nics=None, availability_zone=None, block_device_mapping=None) def setup_mocks_with_mock(testcase, stack, mock_image_constraint=True, mock_keystone=True): fc = fakes_nova.FakeClient() testcase.patchobject(instances.Instance, 'client', return_value=fc) testcase.patchobject(nova.NovaClientPlugin, 'client', return_value=fc) instance = stack['WebServer'] metadata = instance.metadata_get() if mock_image_constraint: setup_mock_for_image_constraint_with_mock( testcase, instance.properties['ImageId']) if mock_keystone: setup_keystone_mocks_with_mock(testcase, stack) user_data = instance.properties['UserData'] server_userdata = instance.client_plugin().build_userdata( metadata, user_data, 'ec2-user') testcase.patchobject(nova.NovaClientPlugin, 'build_userdata', return_value=server_userdata) testcase.patchobject(fc.servers, 'create') fc.servers.create.return_value = fc.servers.list()[4] return fc def setup_stack_with_mock(test_case, stack_name, ctx, create_res=True, convergence=False): stack = get_stack(stack_name, ctx, convergence=convergence) stack.store() if create_res: fc = setup_mocks_with_mock(test_case, stack) stack.create() stack._persist_state() validate_setup_mocks_with_mock(stack, fc) return stack def clean_up_stack(test_case, stack, delete_res=True): if delete_res: fc = fakes_nova.FakeClient() test_case.patchobject(instances.Instance, 'client', return_value=fc) test_case.patchobject(fc.servers, 'delete', side_effect=fakes_nova.fake_exception()) stack.delete() def stack_context(stack_name, create_res=True, convergence=False): """Decorator for creating and deleting stack. Decorator which creates a stack by using the test case's context and deletes it afterwards to ensure tests clean up their stacks regardless of test success/failure. """ def stack_delete(test_fn): @six.wraps(test_fn) def wrapped_test(test_case, *args, **kwargs): def create_stack(): ctx = getattr(test_case, 'ctx', None) if ctx is not None: stack = setup_stack_with_mock(test_case, stack_name, ctx, create_res, convergence) setattr(test_case, 'stack', stack) def delete_stack(): stack = getattr(test_case, 'stack', None) if stack is not None and stack.id is not None: clean_up_stack(test_case, stack, delete_res=create_res) create_stack() try: test_fn(test_case, *args, **kwargs) except Exception: exc_class, exc_val, exc_tb = sys.exc_info() try: delete_stack() finally: six.reraise(exc_class, exc_val, exc_tb) else: delete_stack() return wrapped_test return stack_delete class DummyThread(object): def link(self, callback, *args): pass class DummyThreadGroup(object): def __init__(self): self.threads = [] def add_timer(self, interval, callback, initial_delay=None, *args, **kwargs): self.threads.append(callback) def stop_timers(self): pass def add_thread(self, callback, cnxt, trace, func, *args, **kwargs): # callback here is _start_with_trace(); func is the 'real' callback self.threads.append(func) return DummyThread() def stop(self, graceful=False): pass def wait(self): pass class DummyThreadGroupManager(object): def __init__(self): self.msg_queues = [] self.messages = [] def start(self, stack, func, *args, **kwargs): # Just run the function, so we know it's completed in the test func(*args, **kwargs) return DummyThread() def start_with_lock(self, cnxt, stack, engine_id, func, *args, **kwargs): # Just run the function, so we know it's completed in the test func(*args, **kwargs) return DummyThread() def start_with_acquired_lock(self, stack, lock, func, *args, **kwargs): # Just run the function, so we know it's completed in the test func(*args, **kwargs) return DummyThread() def send(self, stack_id, message): self.messages.append(message) def add_msg_queue(self, stack_id, msg_queue): self.msg_queues.append(msg_queue) def remove_msg_queue(self, gt, stack_id, msg_queue): for q in self.msg_queues.pop(stack_id, []): if q is not msg_queue: self.add_event(stack_id, q) class DummyThreadGroupMgrLogStart(DummyThreadGroupManager): def __init__(self): super(DummyThreadGroupMgrLogStart, self).__init__() self.started = [] def start_with_lock(self, cnxt, stack, engine_id, func, *args, **kwargs): self.started.append((stack.id, func)) return DummyThread() def start_with_acquired_lock(self, stack, lock, func, *args, **kwargs): self.started.append((stack.id, func)) return DummyThread() def start(self, stack_id, func, *args, **kwargs): # Here we only store the started task so it can be checked self.started.append((stack_id, func))
noironetworks/heat
heat/tests/engine/tools.py
Python
apache-2.0
11,355
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from builtins import range from airflow.operators import PythonOperator from airflow.models import DAG from datetime import datetime, timedelta import time from pprint import pprint seven_days_ago = datetime.combine( datetime.today() - timedelta(7), datetime.min.time()) args = { 'owner': 'airflow', 'start_date': seven_days_ago, } dag = DAG( dag_id='example_python_operator', default_args=args, schedule_interval=None) def my_sleeping_function(random_base): '''This is a function that will run within the DAG execution''' time.sleep(random_base) def print_context(ds, **kwargs): pprint(kwargs) print(ds) return 'Whatever you return gets printed in the logs' run_this = PythonOperator( task_id='print_the_context', provide_context=True, python_callable=print_context, dag=dag) for i in range(10): ''' Generating 10 sleeping task, sleeping from 0 to 9 seconds respectively ''' task = PythonOperator( task_id='sleep_for_'+str(i), python_callable=my_sleeping_function, op_kwargs={'random_base': float(i)/10}, dag=dag) task.set_upstream(run_this)
biln/airflow
airflow/example_dags/example_python_operator.py
Python
apache-2.0
1,774
# Copyright 2014 # The Cloudscaling Group, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_db import options from oslo_log import log from ec2api import paths from ec2api import version CONF = cfg.CONF _DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ec2api.sqlite') _DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'glanceclient=WARN'] _DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d ' '%(levelname)s %(name)s [%(request_id)s ' '%(user_identity)s] %(instance)s' '%(message)s') def parse_args(argv, default_config_files=None): log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS) log.register_options(CONF) options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION, sqlite_db='ec2api.sqlite') cfg.CONF(argv[1:], project='ec2api', version=version.version_info.version_string(), default_config_files=default_config_files)
MayankGo/ec2-api
ec2api/config.py
Python
apache-2.0
2,017
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import telnetlib from savanna.tests.integration import base import savanna.tests.integration.configs.parameters as param def _add_config(body, config): if config in [param.NAMENODE_CONFIG, param.DATANODE_CONFIG]: body['node_configs']['HDFS'] = config elif config == param.GENERAL_CONFIG: body['cluster_configs']['general'] = config elif config == param.CLUSTER_HDFS_CONFIG: body['cluster_configs']['HDFS'] = config elif config == param.CLUSTER_MAPREDUCE_CONFIG: body['cluster_configs']['MapReduce'] = config else: body['node_configs']['MapReduce'] = config class ClusterConfigTest(base.ITestCase): def setUp(self): super(ClusterConfigTest, self).setUp() telnetlib.Telnet(self.host, self.port) def assertConfigs(self, get_config, param_config): self.assertEqual(get_config, param_config, msg='configs are not equal: %s != %s' % (str(get_config), str(param_config))) def assertConfigOnNode(self, host, config, value): conf = config.replace(' ', '') com = self.execute_command(host, './script.sh %s -val %s -url %s' % (conf, value, param.OS_AUTH_URL)) self.assertEqual(com[0], 0, msg='host: %s, config %s is not equal: %s' % (host, config, value)) def _cluster_config_testing(self, cluster_body): cluster_id = None try: _add_config(cluster_body, param.GENERAL_CONFIG) _add_config(cluster_body, param.CLUSTER_HDFS_CONFIG) _add_config(cluster_body, param.CLUSTER_MAPREDUCE_CONFIG) cluster_id = self.create_cluster_and_get_id(cluster_body) get_data = self.get_object(self.url_cluster_with_slash, cluster_id, 200, True) get_data = get_data['cluster'] self.assertConfigs(get_data['cluster_configs']['general'], param.GENERAL_CONFIG) self.assertConfigs(get_data['cluster_configs']['HDFS'], param.CLUSTER_HDFS_CONFIG) self.assertConfigs(get_data['cluster_configs']['MapReduce'], param.CLUSTER_MAPREDUCE_CONFIG) node_groups = get_data['node_groups'] ip_instances = {} process_map = { 'namenode': { 'service': 'HDFS', 'param': param.NAMENODE_CONFIG}, 'jobtracker': { 'service': 'MapReduce', 'param': param.JOBTRACKER_CONFIG}, 'datanode': { 'service': 'HDFS', 'param': param.DATANODE_CONFIG}, 'tasktracker': { 'service': 'MapReduce', 'param': param.TASKTRACKER_CONFIG} } def get_node_configs(node_group, process): return \ node_group['node_configs'][process_map[process]['service']] def get_param(process): return process_map[process]['param'] for node_group in node_groups: for process in node_group['node_processes']: self.assertConfigs( get_node_configs(node_group, process), get_param(process)) instances = node_group['instances'] for instans in instances: management_ip = instans['management_ip'] self.transfer_script_to_node( management_ip, 'test_config/config_test_script.sh') ip_instances[management_ip] = node_group[ 'node_processes'] try: for key, processes in ip_instances.items(): telnetlib.Telnet(key, '22') for conf, value in param.CLUSTER_MAPREDUCE_CONFIG.items(): self.assertConfigOnNode(key, conf, value) for conf, value in param.CLUSTER_HDFS_CONFIG.items(): self.assertConfigOnNode(key, conf, value) for process in processes: for sec_key, sec_value in get_param(process).items(): self.assertConfigOnNode(key, sec_key, sec_value) if 'namenode' in processes: for sec_key, sec_value in param.GENERAL_CONFIG.items(): self.assertConfigOnNode( key, sec_key, sec_value) except Exception as e: self.fail(e.message) except Exception as e: self.fail(e.message) finally: self.del_object(self.url_cluster_with_slash, cluster_id, 204) def test_cluster_config_nnjt_ttdn(self): id_master_ngt = None id_worker_ngt = None try: master_ngt_body = self.make_node_group_template( 'master-ngt', 'qa probe', 'JT+NN') _add_config(master_ngt_body, param.NAMENODE_CONFIG) _add_config(master_ngt_body, param.JOBTRACKER_CONFIG) id_master_ngt = self.get_object_id( 'node_group_template', self.post_object(self.url_ngt, master_ngt_body, 202)) worker_ngt_body = self.make_node_group_template( 'worker-ngt', 'qa probe', 'TT+DN') _add_config(worker_ngt_body, param.DATANODE_CONFIG) _add_config(worker_ngt_body, param.TASKTRACKER_CONFIG) id_worker_ngt = self.get_object_id( 'node_group_template', self.post_object(self.url_ngt, worker_ngt_body, 202)) ngt_id_list = {id_master_ngt: 1, id_worker_ngt: 2} cl_body = self.make_cl_body_node_group_templates(ngt_id_list) self._cluster_config_testing(cl_body) except Exception as e: self.fail(str(e)) finally: self.del_object(self.url_ngt_with_slash, id_master_ngt, 204) self.del_object(self.url_ngt_with_slash, id_worker_ngt, 204)
rnirmal/savanna
savanna/tests/integration/test_config/test_cluster_config.py
Python
apache-2.0
6,861
from sqlalchemy import * from test.lib import * class FoundRowsTest(fixtures.TestBase, AssertsExecutionResults): """tests rowcount functionality""" __requires__ = ('sane_rowcount', ) @classmethod def setup_class(cls): global employees_table, metadata metadata = MetaData(testing.db) employees_table = Table('employees', metadata, Column('employee_id', Integer, Sequence('employee_id_seq', optional=True), primary_key=True), Column('name', String(50)), Column('department', String(1)), ) metadata.create_all() def setup(self): global data data = [ ('Angela', 'A'), ('Andrew', 'A'), ('Anand', 'A'), ('Bob', 'B'), ('Bobette', 'B'), ('Buffy', 'B'), ('Charlie', 'C'), ('Cynthia', 'C'), ('Chris', 'C') ] i = employees_table.insert() i.execute(*[{'name':n, 'department':d} for n, d in data]) def teardown(self): employees_table.delete().execute() @classmethod def teardown_class(cls): metadata.drop_all() def testbasic(self): s = employees_table.select() r = s.execute().fetchall() assert len(r) == len(data) def test_update_rowcount1(self): # WHERE matches 3, 3 rows changed department = employees_table.c.department r = employees_table.update(department=='C').execute(department='Z') print "expecting 3, dialect reports %s" % r.rowcount assert r.rowcount == 3 def test_update_rowcount2(self): # WHERE matches 3, 0 rows changed department = employees_table.c.department r = employees_table.update(department=='C').execute(department='C') print "expecting 3, dialect reports %s" % r.rowcount assert r.rowcount == 3 def test_delete_rowcount(self): # WHERE matches 3, 3 rows deleted department = employees_table.c.department r = employees_table.delete(department=='C').execute() print "expecting 3, dialect reports %s" % r.rowcount assert r.rowcount == 3
ioram7/keystone-federado-pgid2013
build/sqlalchemy/test/sql/test_rowcount.py
Python
apache-2.0
2,260
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os from lxml import etree from django.core import management from django.core.management.base import NoArgsCommand from django.utils.translation import ugettext as _ from hadoop import cluster from desktop.conf import USE_NEW_EDITOR from desktop.models import Directory, Document, Document2, Document2Permission from liboozie.submittion import create_directories from oozie.conf import LOCAL_SAMPLE_DATA_DIR, LOCAL_SAMPLE_DIR, REMOTE_SAMPLE_DIR, ENABLE_V2 from oozie.models import Workflow, Coordinator, Bundle from oozie.importlib.workflows import import_workflow_root from oozie.importlib.coordinators import import_coordinator_root from oozie.importlib.bundles import import_bundle_root from useradmin.models import get_default_user_group, install_sample_user LOG = logging.getLogger(__name__) class Command(NoArgsCommand): def _import_workflows(self, directory, managed=True): for example_directory_name in os.listdir(directory): if os.path.isdir(os.path.join(directory, example_directory_name)): with open(os.path.join(directory, example_directory_name, 'workflow.zip')) as fp: workflow_xml, metadata = Workflow.decompress(fp) workflow_root = etree.fromstring(workflow_xml) try: Workflow.objects.get(name=workflow_root.get('name'), managed=managed) except Workflow.DoesNotExist: LOG.info(_("Installing workflow %s") % workflow_root.get('name')) LOG.debug("Workflow definition:\n%s" % workflow_xml) workflow = Workflow.objects.new_workflow(owner=self.user) workflow.is_shared = True workflow.managed = managed workflow.name = workflow_root.get('name') workflow.save() Workflow.objects.initialize(workflow) import_workflow_root(workflow=workflow, workflow_definition_root=workflow_root, metadata=metadata, fs=self.fs) workflow.doc.all().delete() # Delete doc as it messes up the example sharing def _import_coordinators(self, directory): for example_directory_name in os.listdir(directory): if os.path.isdir(os.path.join(directory, example_directory_name)): with open(os.path.join(directory, example_directory_name, 'coordinator.zip')) as fp: coordinator_xml, metadata = Coordinator.decompress(fp) coordinator_root = etree.fromstring(coordinator_xml) try: Coordinator.objects.get(name=coordinator_root.get('name')) except Coordinator.DoesNotExist: LOG.info(_("Installing coordinator %s") % coordinator_root.get('name')) LOG.debug("Coordinator definition:\n%s" % coordinator_xml) coordinator = Coordinator(owner=self.user, is_shared=True) coordinator.name = coordinator_root.get('name') coordinator.save() import_coordinator_root(coordinator=coordinator, coordinator_definition_root=coordinator_root, metadata=metadata) def _import_bundles(self, directory): for example_directory_name in os.listdir(directory): if os.path.isdir(os.path.join(directory, example_directory_name)): with open(os.path.join(directory, example_directory_name, 'bundle.zip')) as fp: bundle_xml, metadata = Bundle.decompress(fp) bundle_root = etree.fromstring(bundle_xml) try: Bundle.objects.get(name=bundle_root.get('name')) except Bundle.DoesNotExist: LOG.info(_("Installing bundle %s") % bundle_root.get('name')) LOG.debug("Bundle definition:\n%s" % bundle_xml) bundle = Bundle(owner=self.user, is_shared=True) bundle.name = bundle_root.get('name') bundle.save() import_bundle_root(bundle=bundle, bundle_definition_root=bundle_root, metadata=metadata) def install_examples(self): data_dir = LOCAL_SAMPLE_DIR.get() unmanaged_dir = os.path.join(data_dir, 'unmanaged') self._import_workflows(unmanaged_dir, managed=False) def handle_noargs(self, **options): self.user = install_sample_user() self.fs = cluster.get_hdfs() LOG.info(_("Creating sample directory '%s' in HDFS") % REMOTE_SAMPLE_DIR.get()) create_directories(self.fs, [REMOTE_SAMPLE_DIR.get()]) remote_dir = REMOTE_SAMPLE_DIR.get() # Copy examples binaries for name in os.listdir(LOCAL_SAMPLE_DIR.get()): local_dir = self.fs.join(LOCAL_SAMPLE_DIR.get(), name) remote_data_dir = self.fs.join(remote_dir, name) LOG.info(_('Copying examples %(local_dir)s to %(remote_data_dir)s\n') % { 'local_dir': local_dir, 'remote_data_dir': remote_data_dir}) self.fs.do_as_user(self.fs.DEFAULT_USER, self.fs.copyFromLocal, local_dir, remote_data_dir) # Copy sample data local_dir = LOCAL_SAMPLE_DATA_DIR.get() remote_data_dir = self.fs.join(remote_dir, 'data') LOG.info(_('Copying data %(local_dir)s to %(remote_data_dir)s\n') % { 'local_dir': local_dir, 'remote_data_dir': remote_data_dir}) self.fs.do_as_user(self.fs.DEFAULT_USER, self.fs.copyFromLocal, local_dir, remote_data_dir) # Load jobs LOG.info(_("Installing examples...")) if ENABLE_V2.get(): management.call_command('loaddata', 'initial_oozie_examples.json', verbosity=2) # Get or create sample user directories home_dir = Directory.objects.get_home_directory(self.user) examples_dir, created = Directory.objects.get_or_create( parent_directory=home_dir, owner=self.user, name=Document2.EXAMPLES_DIR ) if USE_NEW_EDITOR.get(): docs = Document.objects.get_docs(self.user, Workflow).filter(owner=self.user) for doc in docs: if doc.content_object: data = doc.content_object.data_dict data.update({'content_type': doc.content_type.model, 'object_id': doc.object_id}) data = json.dumps(data) # Don't overwrite doc2, created = Document2.objects.get_or_create( owner=self.user, parent_directory=examples_dir, name=doc.name, type='link-workflow', description=doc.description, data=data ) LOG.info('Successfully installed sample link to jobsub: %s' % (doc2.name,)) # Share oozie examples with default group oozie_examples = Document2.objects.filter( type__in=['oozie-workflow2', 'oozie-coordinator2', 'oozie-bundle2'], owner=self.user, parent_directory=None ) oozie_examples.update(parent_directory=examples_dir) examples_dir.share(self.user, Document2Permission.READ_PERM, groups=[get_default_user_group()]) self.install_examples() Document.objects.sync()
jayceyxc/hue
apps/oozie/src/oozie/management/commands/oozie_setup.py
Python
apache-2.0
7,491
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for fedjax.""" import unittest import fedjax class FedjaxTest(unittest.TestCase): """Test fedjax can be imported correctly.""" def test_import(self): self.assertTrue(hasattr(fedjax, 'FederatedAlgorithm')) self.assertTrue(hasattr(fedjax.aggregators, 'Aggregator')) self.assertTrue(hasattr(fedjax.algorithms, 'fed_avg')) self.assertTrue(hasattr(fedjax.datasets, 'emnist')) self.assertTrue(hasattr(fedjax.models, 'emnist')) self.assertTrue(hasattr(fedjax.training, 'save_checkpoint')) def test_no_core(self): self.assertFalse(hasattr(fedjax, 'core')) if __name__ == '__main__': unittest.main()
google/fedjax
fedjax/fedjax_test.py
Python
apache-2.0
1,219
from __future__ import print_function import sys sys.path.insert(1,"../../../") from tests import pyunit_utils import h2o from h2o.utils.typechecks import assert_is_type from h2o.exceptions import H2OConnectionError, H2OServerError, H2OValueError import tempfile import shutil import os def h2oinit(): """ Python API test: h2o.init(url=None, ip=None, port=None, name = None, https=None, insecure=None, username=None, password=None, ookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None, **kwargs) """ start_h2o = False strict_version_check = False print("Testing h2o.init() command...") try: h2o.init(start_h2o=start_h2o) print("h2o.init() command works!") except Exception as e: # some errors are okay like version mismatch print("error message type is {0} and the error message is \n".format(e.__class__.__name__, e.args[0])) assert_is_type(e, H2OConnectionError) try: h2o.init(strict_version_check=strict_version_check, start_h2o=start_h2o) except Exception as e: print("error message type is {0} and the error message is \n".format(e.__class__.__name__, e.args[0])) assert_is_type(e, H2OConnectionError) # try to join a cluster and test out various command arguments ipS = "127.16.2.27" portS = "54321" nthread = 2 max_mem_size=10 min_mem_size=3 try: h2o.init(ip=ipS, port=portS, nthreads=nthread, max_mem_size=max_mem_size, min_mem_size=min_mem_size, start_h2o=start_h2o, strict_version_check=strict_version_check) print("Command h2o.init(ip=ipS, port=portS, nthreads=nthread, max_mem_size=max_mem_size, " "min_mem_size=min_mem_size,start_h2o=start_h2o, strict_version_check=strict_version_check) works!") except Exception as e: # make sure correct error message is received print("error message type is {0} and the error message is \n".format(e.__class__.__name__, e.args[0])) assert_is_type(e, H2OConnectionError) def h2oinitname(): """ Python API test for h2o.init :return: """ try: h2o.init(strict_version_check=False, name="test") # Should initialize h2o.init(strict_version_check=False, name="test") # Should just connect assert h2o.cluster().cloud_name == "test" except H2OConnectionError as e: # some errors are okay like version mismatch print("error message type is {0} and the error message is {1}\n".format(e.__class__.__name__, e.args[0])) try: h2o.init(strict_version_check=False, port=54321, name="test2", as_port=True) assert False, "Should fail to connect and the port should be used by previous invocation." except H2OServerError as e: print("error message type is {0} and the error message is {1}\n".format(e.__class__.__name__, e.args[0])) try: h2o.init(strict_version_check=False, port=54321, name="test2") # Should bump the port to next one assert h2o.cluster().cloud_name == "test2" except H2OConnectionError as e: print("error message type is {0} and the error message is {1}\n".format(e.__class__.__name__, e.args[0])) try: h2o.init(strict_version_check=False, port=60000, name="test3", as_port=True) assert h2o.cluster().cloud_name == "test3" except H2OConnectionError as e: print("error message type is {0} and the error message is {1}\n".format(e.__class__.__name__, e.args[0])) assert_is_type(e, H2OConnectionError) h2o.cluster().shutdown() def h2oinit_default_log_dir(): tmpdir = tempfile.mkdtemp() try: h2o.init(strict_version_check=False, name="default_log", ice_root=tmpdir) except H2OConnectionError as e: # some errors are okay like version mismatch print("error message type is {0} and the error message is {1}\n".format(e.__class__.__name__, e.args[0])) finally: assert os.path.exists(os.path.join(tmpdir, "h2ologs")) == True shutil.rmtree(tmpdir) h2o.cluster().shutdown() def h2oinit_custom_log_dir(): tmpdir = tempfile.mkdtemp() tmpdir_logs = tempfile.mkdtemp() try: h2o.init(strict_version_check=False, name="custom_log", ice_root=tmpdir, log_dir=tmpdir_logs) except H2OConnectionError as e: # some errors are okay like version mismatch print("error message type is {0} and the error message is {1}\n".format(e.__class__.__name__, e.args[0])) finally: assert os.path.exists(os.path.join(tmpdir, "h2ologs")) == False assert any(".log" in log for log in os.listdir(tmpdir_logs)) shutil.rmtree(tmpdir) shutil.rmtree(tmpdir_logs) h2o.cluster().shutdown() def h2oinit_fail_invalid_log_level(): try: h2o.init(strict_version_check=False, log_level="BAD_LOG_LEVEL") assert False, "Should fail to start an h2o instance with an invalid log level." except H2OConnectionError as e: # some errors are okay like version mismatch assert False, "Should fail to start an h2o instance with an invalid log level but H2OConnectionError was thrown." except H2OValueError: print("H2OValueError properly thrown") return finally: h2o.cluster().shutdown() def h2oinit_with_extra_classpath(): try: h2o.init(strict_version_check=False, extra_classpath=[os.path.realpath(__file__)], port=40000) finally: h2o.cluster().shutdown() # None of the tests below need a pre initialized instance h2oinit_default_log_dir() h2oinit_custom_log_dir() h2oinit_fail_invalid_log_level() h2oinitname() h2oinit_with_extra_classpath() if __name__ == "__main__": pyunit_utils.standalone_test(h2oinit) else: h2oinit()
h2oai/h2o-3
h2o-py/tests/testdir_apis/H2O_Init/h2o.init_test.py
Python
apache-2.0
5,839
#!/usr/bin/env python """ Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ class PRIORITY: LOWEST = -100 LOWER = -50 LOW = -10 NORMAL = 0 HIGH = 10 HIGHER = 50 HIGHEST = 100 class SORT_ORDER: FIRST = 0 SECOND = 1 THIRD = 2 FOURTH = 3 FIFTH = 4 LAST = 100 class DBMS: ACCESS = "Microsoft Access" DB2 = "IBM DB2" FIREBIRD = "Firebird" MAXDB = "SAP MaxDB" MSSQL = "Microsoft SQL Server" MYSQL = "MySQL" ORACLE = "Oracle" PGSQL = "PostgreSQL" SQLITE = "SQLite" SYBASE = "Sybase" HSQLDB = "HSQLDB" INFORMIX = "Informix" class DBMS_DIRECTORY_NAME: ACCESS = "access" DB2 = "db2" FIREBIRD = "firebird" MAXDB = "maxdb" MSSQL = "mssqlserver" MYSQL = "mysql" ORACLE = "oracle" PGSQL = "postgresql" SQLITE = "sqlite" SYBASE = "sybase" HSQLDB = "hsqldb" INFORMIX = "informix" class CUSTOM_LOGGING: PAYLOAD = 9 TRAFFIC_OUT = 8 TRAFFIC_IN = 7 class OS: LINUX = "Linux" WINDOWS = "Windows" class PLACE: GET = "GET" POST = "POST" URI = "URI" COOKIE = "Cookie" USER_AGENT = "User-Agent" REFERER = "Referer" HOST = "Host" CUSTOM_POST = "(custom) POST" CUSTOM_HEADER = "(custom) HEADER" class POST_HINT: SOAP = "SOAP" JSON = "JSON" JSON_LIKE = "JSON-like" MULTIPART = "MULTIPART" XML = "XML (generic)" ARRAY_LIKE = "Array-like" class HTTPMETHOD: GET = "GET" POST = "POST" HEAD = "HEAD" PUT = "PUT" DELETE = "DELETE" TRACE = "TRACE" OPTIONS = "OPTIONS" CONNECT = "CONNECT" PATCH = "PATCH" class NULLCONNECTION: HEAD = "HEAD" RANGE = "Range" SKIP_READ = "skip-read" class REFLECTIVE_COUNTER: MISS = "MISS" HIT = "HIT" class CHARSET_TYPE: BINARY = 1 DIGITS = 2 HEXADECIMAL = 3 ALPHA = 4 ALPHANUM = 5 class HEURISTIC_TEST: CASTED = 1 NEGATIVE = 2 POSITIVE = 3 class HASH: MYSQL = r'(?i)\A\*[0-9a-f]{40}\Z' MYSQL_OLD = r'(?i)\A(?![0-9]+\Z)[0-9a-f]{16}\Z' POSTGRES = r'(?i)\Amd5[0-9a-f]{32}\Z' MSSQL = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{40}\Z' MSSQL_OLD = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{80}\Z' MSSQL_NEW = r'(?i)\A0x0200[0-9a-f]{8}[0-9a-f]{128}\Z' ORACLE = r'(?i)\As:[0-9a-f]{60}\Z' ORACLE_OLD = r'(?i)\A[01-9a-f]{16}\Z' MD5_GENERIC = r'(?i)\A[0-9a-f]{32}\Z' SHA1_GENERIC = r'(?i)\A[0-9a-f]{40}\Z' SHA224_GENERIC = r'(?i)\A[0-9a-f]{28}\Z' SHA384_GENERIC = r'(?i)\A[0-9a-f]{48}\Z' SHA512_GENERIC = r'(?i)\A[0-9a-f]{64}\Z' CRYPT_GENERIC = r'(?i)\A(?!\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z)(?![0-9]+\Z)[./0-9A-Za-z]{13}\Z' WORDPRESS = r'(?i)\A\$P\$[./0-9A-Za-z]{31}\Z' # Reference: http://www.zytrax.com/tech/web/mobile_ids.html class MOBILES: BLACKBERRY = ("BlackBerry 9900", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+") GALAXY = ("Samsung Galaxy S", "Mozilla/5.0 (Linux; U; Android 2.2; en-US; SGH-T959D Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1") HP = ("HP iPAQ 6365", "Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)") HTC = ("HTC Sensation", "Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30") IPHONE = ("Apple iPhone 4s", "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3") NEXUS = ("Google Nexus 7", "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19") NOKIA = ("Nokia N97", "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344") class PROXY_TYPE: HTTP = "HTTP" HTTPS = "HTTPS" SOCKS4 = "SOCKS4" SOCKS5 = "SOCKS5" class REGISTRY_OPERATION: READ = "read" ADD = "add" DELETE = "delete" class DUMP_FORMAT: CSV = "CSV" HTML = "HTML" SQLITE = "SQLITE" class HTTP_HEADER: ACCEPT = "Accept" ACCEPT_CHARSET = "Accept-Charset" ACCEPT_ENCODING = "Accept-Encoding" ACCEPT_LANGUAGE = "Accept-Language" AUTHORIZATION = "Authorization" CACHE_CONTROL = "Cache-Control" CONNECTION = "Connection" CONTENT_ENCODING = "Content-Encoding" CONTENT_LENGTH = "Content-Length" CONTENT_RANGE = "Content-Range" CONTENT_TYPE = "Content-Type" COOKIE = "Cookie" EXPIRES = "Expires" HOST = "Host" IF_MODIFIED_SINCE = "If-Modified-Since" LAST_MODIFIED = "Last-Modified" LOCATION = "Location" PRAGMA = "Pragma" PROXY_AUTHORIZATION = "Proxy-Authorization" PROXY_CONNECTION = "Proxy-Connection" RANGE = "Range" REFERER = "Referer" REFRESH = "Refresh" # Reference: http://stackoverflow.com/a/283794 SERVER = "Server" SET_COOKIE = "Set-Cookie" TRANSFER_ENCODING = "Transfer-Encoding" URI = "URI" USER_AGENT = "User-Agent" VIA = "Via" X_POWERED_BY = "X-Powered-By" class EXPECTED: BOOL = "bool" INT = "int" class OPTION_TYPE: BOOLEAN = "boolean" INTEGER = "integer" FLOAT = "float" STRING = "string" class HASHDB_KEYS: DBMS = "DBMS" DBMS_FORK = "DBMS_FORK" CHECK_WAF_RESULT = "CHECK_WAF_RESULT" CONF_TMP_PATH = "CONF_TMP_PATH" KB_ABS_FILE_PATHS = "KB_ABS_FILE_PATHS" KB_BRUTE_COLUMNS = "KB_BRUTE_COLUMNS" KB_BRUTE_TABLES = "KB_BRUTE_TABLES" KB_CHARS = "KB_CHARS" KB_DYNAMIC_MARKINGS = "KB_DYNAMIC_MARKINGS" KB_INJECTIONS = "KB_INJECTIONS" KB_ERROR_CHUNK_LENGTH = "KB_ERROR_CHUNK_LENGTH" KB_XP_CMDSHELL_AVAILABLE = "KB_XP_CMDSHELL_AVAILABLE" OS = "OS" class REDIRECTION: YES = "Y" NO = "N" class PAYLOAD: SQLINJECTION = { 1: "boolean-based blind", 2: "error-based", 3: "inline query", 4: "stacked queries", 5: "AND/OR time-based blind", 6: "UNION query", } PARAMETER = { 1: "Unescaped numeric", 2: "Single quoted string", 3: "LIKE single quoted string", 4: "Double quoted string", 5: "LIKE double quoted string", } RISK = { 0: "No risk", 1: "Low risk", 2: "Medium risk", 3: "High risk", } CLAUSE = { 0: "Always", 1: "WHERE", 2: "GROUP BY", 3: "ORDER BY", 4: "LIMIT", 5: "OFFSET", 6: "TOP", 7: "Table name", 8: "Column name", } class METHOD: COMPARISON = "comparison" GREP = "grep" TIME = "time" UNION = "union" class TECHNIQUE: BOOLEAN = 1 ERROR = 2 QUERY = 3 STACKED = 4 TIME = 5 UNION = 6 class WHERE: ORIGINAL = 1 NEGATIVE = 2 REPLACE = 3 class WIZARD: BASIC = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba") INTERMEDIATE = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getUsers", "getDbs", "getTables", "getSchema", "excludeSysDbs") ALL = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getHostname", "getUsers", "getPasswordHashes", "getPrivileges", "getRoles", "dumpAll") class ADJUST_TIME_DELAY: DISABLE = -1 NO = 0 YES = 1 class WEB_API: PHP = "php" ASP = "asp" ASPX = "aspx" JSP = "jsp" class CONTENT_TYPE: TARGET = 0 TECHNIQUES = 1 DBMS_FINGERPRINT = 2 BANNER = 3 CURRENT_USER = 4 CURRENT_DB = 5 HOSTNAME = 6 IS_DBA = 7 USERS = 8 PASSWORDS = 9 PRIVILEGES = 10 ROLES = 11 DBS = 12 TABLES = 13 COLUMNS = 14 SCHEMA = 15 COUNT = 16 DUMP_TABLE = 17 SEARCH = 18 SQL_QUERY = 19 COMMON_TABLES = 20 COMMON_COLUMNS = 21 FILE_READ = 22 FILE_WRITE = 23 OS_CMD = 24 REG_READ = 25 PART_RUN_CONTENT_TYPES = { "checkDbms": CONTENT_TYPE.TECHNIQUES, "getFingerprint": CONTENT_TYPE.DBMS_FINGERPRINT, "getBanner": CONTENT_TYPE.BANNER, "getCurrentUser": CONTENT_TYPE.CURRENT_USER, "getCurrentDb": CONTENT_TYPE.CURRENT_DB, "getHostname": CONTENT_TYPE.HOSTNAME, "isDba": CONTENT_TYPE.IS_DBA, "getUsers": CONTENT_TYPE.USERS, "getPasswordHashes": CONTENT_TYPE.PASSWORDS, "getPrivileges": CONTENT_TYPE.PRIVILEGES, "getRoles": CONTENT_TYPE.ROLES, "getDbs": CONTENT_TYPE.DBS, "getTables": CONTENT_TYPE.TABLES, "getColumns": CONTENT_TYPE.COLUMNS, "getSchema": CONTENT_TYPE.SCHEMA, "getCount": CONTENT_TYPE.COUNT, "dumpTable": CONTENT_TYPE.DUMP_TABLE, "search": CONTENT_TYPE.SEARCH, "sqlQuery": CONTENT_TYPE.SQL_QUERY, "tableExists": CONTENT_TYPE.COMMON_TABLES, "columnExists": CONTENT_TYPE.COMMON_COLUMNS, "readFile": CONTENT_TYPE.FILE_READ, "writeFile": CONTENT_TYPE.FILE_WRITE, "osCmd": CONTENT_TYPE.OS_CMD, "regRead": CONTENT_TYPE.REG_READ } class CONTENT_STATUS: IN_PROGRESS = 0 COMPLETE = 1 class AUTH_TYPE: BASIC = "basic" DIGEST = "digest" NTLM = "ntlm" PKI = "pki" class AUTOCOMPLETE_TYPE: SQL = 0 OS = 1 SQLMAP = 2 class NOTE: FALSE_POSITIVE_OR_UNEXPLOITABLE = "false positive or unexploitable" class MKSTEMP_PREFIX: HASHES = "sqlmaphashes-" CRAWLER = "sqlmapcrawler-" IPC = "sqlmapipc-" TESTING = "sqlmaptesting-" RESULTS = "sqlmapresults-" COOKIE_JAR = "sqlmapcookiejar-" BIG_ARRAY = "sqlmapbigarray-" class TIMEOUT_STATE: NORMAL = 0 EXCEPTION = 1 TIMEOUT = 2
michaelhidalgo/7WCSQ
Tools/SQLMap/sqlmap/lib/core/enums.py
Python
apache-2.0
10,079
from __future__ import print_function, division import cPickle import gzip import os import sys import timeit import numpy import theano from theano import tensor import mdn_one_ahead # parameters batch_size = 100 L1_reg=0.00 L2_reg=0.0001 n_epochs=200 learning_rate = 0.001 momentum = 0.9 sigma_in = 320 mixing_in = 320 n_components = 5 EPS = numpy.finfo(theano.config.floatX).eps # load data datasets = mdn_one_ahead.load_data() train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] X = train_set_x.get_value(borrow=True)[:20].copy() Y = train_set_y.get_value(borrow=True)[:20].copy() n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size print( '... building the model') # allocate symbolic variables for the data index = tensor.lscalar() # index to a [mini]batch x = tensor.matrix('x') # the data is presented as rasterized images y = tensor.vector('y') # the labels are presented as 1D vector of rng = numpy.random.RandomState(1234) classifier = mdn_one_ahead.MLP( rng=rng, input=x, n_in=320, n_hiddens=[300, 300, 300, 300] ) cost = ( classifier.negative_log_likelihood(y) + L2_reg * classifier.L2_sqr ) test_model = theano.function( inputs=[index], outputs=classifier.errors(y), givens={ x: test_set_x[index * batch_size:(index + 1) * batch_size], y: test_set_y[index * batch_size:(index + 1) * batch_size] } ) validate_model = theano.function( inputs=[index], outputs=classifier.errors(y), givens={ x: valid_set_x[index * batch_size:(index + 1) * batch_size], y: valid_set_y[index * batch_size:(index + 1) * batch_size] } ) gparams = [tensor.grad(cost, param) for param in classifier.params] updates = [ (param, param - learning_rate * gparam) for param, gparam in zip(classifier.params, gparams) ] model_gradients = theano.function( inputs = [x, y], outputs=gparams) train_gradients = theano.function( inputs=[index], outputs=gparams, givens={ x: train_set_x[index * batch_size: (index + 1) * batch_size], y: train_set_y[index * batch_size: (index + 1) * batch_size] } ) train_model = theano.function( inputs=[index], outputs=cost, updates=updates, givens={ x: train_set_x[index * batch_size: (index + 1) * batch_size], y: train_set_y[index * batch_size: (index + 1) * batch_size] } ) print('... training') # early-stopping parameters patience = 10000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is # found improvement_threshold = 0.99995 # a relative improvement of this much is # considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_validation_loss = numpy.inf best_iter = 0 test_score = 0. start_time = timeit.default_timer() epoch = 0 done_looping = False while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): gs = train_gradients(minibatch_index) if any(numpy.any(numpy.isnan(g)) for g in gs): import pdb; pdb.set_trace() minibatch_avg_cost = train_model(minibatch_index) # iteration number iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [validate_model(i) for i in xrange(n_valid_batches)] this_validation_loss = numpy.mean(validation_losses) print( 'epoch %i, minibatch %i/%i, validation error %f %%' % ( epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100. ) ) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if ( this_validation_loss < best_validation_loss * improvement_threshold ): patience = max(patience, iter * patience_increase) best_validation_loss = this_validation_loss best_iter = iter # test it on the test set test_losses = [test_model(i) for i in xrange(n_test_batches)] test_score = numpy.mean(test_losses) print((' epoch %i, minibatch %i/%i, test error of ' 'best model %f %%') % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.)) if patience <= iter: done_looping = True break end_time = timeit.default_timer() print(('Optimization complete. Best validation score of %f %% ' 'obtained at iteration %i, with test performance %f %%') % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.)) # l = 7.752, tanh, 3 components, 20 hid, 1 hidlayer, # l = 5.057, relu, 3 components, (100, 100) hid # l = 4.865, relu, 5 components, (150, 150, 150) hid
markstoehr/structured_gaussian_mixtures
structured_gaussian_mixtures/mdn_experiment_one_ahead.py
Python
apache-2.0
5,886
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LSTM layer.""" from kws_streaming.layers import modes from kws_streaming.layers.compat import tf from kws_streaming.layers.compat import tf1 class LSTM(tf.keras.layers.Layer): """LSTM with support of streaming inference with internal/external state. In training mode we use LSTM. It receives input data [batch, time, feature] and returns [batch, time, units] if return_sequences==True or returns [batch, 1, units] if return_sequences==False In inference mode we use LSTMCell In streaming mode with internal state it receives input data [batch, 1, feature] In streaming mode with internal state it returns: [batch, 1, units] In streaming mode with external state it receives input data with states: [batch, 1, feature] + state1[batch, units] + state2[batch, units] In streaming mode with external state it returns: (output[batch, 1, units], state1[batch, units], state2[batch, units]) We use layer and parameter description from: https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM https://www.tensorflow.org/api_docs/python/tf/compat/v1/nn/rnn_cell/LSTMCell https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN Attributes: units: dimensionality of the output space. mode: Training or inference modes: non streaming, streaming. inference_batch_size: batch size for inference mode return_sequences: Whether to return the last output. in the output sequence, or the full sequence. use_peepholes: True to enable diagonal/peephole connections num_proj: The output dimensionality for the projection matrices. If None, no projection is performed. It will be used only if use_peepholes is True. unroll: If True, the network will be unrolled, else a symbolic loop will be used. For any inference mode it will be set True inside. stateful: If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. If model will be in streaming mode then it is better to train model with stateful=True This flag is about stateful training and applied during training only. """ def __init__(self, units=64, mode=modes.Modes.TRAINING, inference_batch_size=1, return_sequences=False, use_peepholes=False, num_proj=128, unroll=False, stateful=False, name='LSTM', **kwargs): super(LSTM, self).__init__(**kwargs) self.mode = mode self.inference_batch_size = inference_batch_size self.units = units self.return_sequences = return_sequences self.num_proj = num_proj self.use_peepholes = use_peepholes self.stateful = stateful if mode != modes.Modes.TRAINING: # in any inference mode # let's unroll lstm, so there is no symbolic loops / control flow unroll = True self.unroll = unroll if self.mode in (modes.Modes.TRAINING, modes.Modes.NON_STREAM_INFERENCE): if use_peepholes: self.lstm_cell = tf1.nn.rnn_cell.LSTMCell( num_units=units, use_peepholes=True, num_proj=num_proj, name='cell') self.lstm = tf.keras.layers.RNN( cell=self.lstm_cell, return_sequences=return_sequences, unroll=self.unroll, stateful=self.stateful) else: self.lstm = tf.keras.layers.LSTM( units=units, return_sequences=return_sequences, name='cell', unroll=self.unroll, stateful=self.stateful) if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE: # create state varaible for stateful streamable inference self.input_state1 = self.add_weight( name='input_state1', shape=[inference_batch_size, units], trainable=False, initializer=tf.zeros_initializer) if use_peepholes: # second state in peepholes LSTM has different dimensions with # the first state due to projection layer with dim num_proj self.input_state2 = self.add_weight( name='input_state2', shape=[inference_batch_size, num_proj], trainable=False, initializer=tf.zeros_initializer) self.lstm_cell = tf1.nn.rnn_cell.LSTMCell( num_units=units, use_peepholes=True, num_proj=num_proj, name='cell') else: # second state in the standard LSTM has the same dimensions with # the first state self.input_state2 = self.add_weight( name='input_state2', shape=[inference_batch_size, units], trainable=False, initializer=tf.zeros_initializer) self.lstm_cell = tf.keras.layers.LSTMCell(units=units, name='cell') self.lstm = None elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE: # in streaming mode with external state state, # state becomes an input output placeholders self.input_state1 = tf.keras.layers.Input( shape=(units,), batch_size=inference_batch_size, name=self.name + 'input_state1') if use_peepholes: self.input_state2 = tf.keras.layers.Input( shape=(num_proj,), batch_size=inference_batch_size, name=self.name + 'input_state2') self.lstm_cell = tf1.nn.rnn_cell.LSTMCell( num_units=units, use_peepholes=True, num_proj=num_proj) else: self.input_state2 = tf.keras.layers.Input( shape=(units,), batch_size=inference_batch_size, name=self.name + 'input_state2') self.lstm_cell = tf.keras.layers.LSTMCell(units=units, name='cell') self.lstm = None self.output_state1 = None self.output_state2 = None def call(self, inputs): if inputs.shape.rank != 3: # [batch, time, feature] raise ValueError('inputs.shape.rank:%d must be 3' % inputs.shape.rank) if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE: # run streamable inference on input [batch, 1, features] # returns output [batch, 1, units] return self._streaming_internal_state(inputs) elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE: # in streaming mode with external state # in addition to output we return output state output, self.output_state1, self.output_state2 = self._streaming_external_state( inputs, self.input_state1, self.input_state2) return output elif self.mode in (modes.Modes.TRAINING, modes.Modes.NON_STREAM_INFERENCE): # run non streamable training or non streamable inference # on input [batch, time, features], returns [batch, time, units] return self._non_streaming(inputs) else: raise ValueError(f'Encountered unexpected mode `{self.mode}`.') def get_config(self): config = { 'mode': self.mode, 'inference_batch_size': self.inference_batch_size, 'units': self.units, 'return_sequences': self.return_sequences, 'unroll': self.unroll, 'num_proj': self.num_proj, 'use_peepholes': self.use_peepholes, 'stateful': self.stateful, } base_config = super(LSTM, self).get_config() return dict(list(base_config.items()) + list(config.items())) def get_input_state(self): # input state is used only for STREAM_EXTERNAL_STATE_INFERENCE mode if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE: return [self.input_state1, self.input_state2] else: raise ValueError('Expected the layer to be in external streaming mode, ' f'not `{self.mode}`.') def get_output_state(self): # output state is used only for STREAM_EXTERNAL_STATE_INFERENCE mode if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE: return [self.output_state1, self.output_state2] else: raise ValueError('Expected the layer to be in external streaming mode, ' f'not `{self.mode}`.') def _streaming_internal_state(self, inputs): # first dimension is batch size if inputs.shape[0] != self.inference_batch_size: raise ValueError( 'inputs.shape[0]:%d must be = self.inference_batch_size:%d' % (inputs.shape[0], self.inference_batch_size)) # receive inputs: [batch, 1, feature] # convert it for lstm cell to inputs1: [batch, feature] inputs1 = tf.keras.backend.squeeze(inputs, axis=1) output, states = self.lstm_cell(inputs1, [self.input_state1, self.input_state2]) # update internal states assign_state1 = self.input_state1.assign(states[0]) assign_state2 = self.input_state2.assign(states[1]) with tf.control_dependencies([assign_state1, assign_state2]): # output [batch, 1, feature] output = tf.keras.backend.expand_dims(output, axis=1) return output def _streaming_external_state(self, inputs, state1, state2): # first dimension is batch size if inputs.shape[0] != self.inference_batch_size: raise ValueError( 'inputs.shape[0]:%d must be = self.inference_batch_size:%d' % (inputs.shape[0], self.inference_batch_size)) # receive inputs: [batch, 1, feature] # convert it for lstm cell to inputs1: [batch, feature] inputs1 = tf.keras.backend.squeeze(inputs, axis=1) output, states = self.lstm_cell(inputs1, [state1, state2]) # output [batch, 1, feature] output = tf.keras.backend.expand_dims(output, axis=1) return output, states[0], states[1] def _non_streaming(self, inputs): # inputs [batch, time, feature] output = self.lstm(inputs) # [batch, time, units] if not self.return_sequences: # if we do not return sequence the output will be [batch, units] # for consistency make it [batch, 1, units] output = tf.keras.backend.expand_dims(output, axis=1) return output
google-research/google-research
kws_streaming/layers/lstm.py
Python
apache-2.0
10,610
#!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import tempfile from polyaxon import settings from polyaxon.auxiliaries import ( get_default_init_container, get_default_sidecar_container, ) from polyaxon.connections.kinds import V1ConnectionKind from polyaxon.connections.schemas import V1BucketConnection, V1K8sResourceSchema from polyaxon.exceptions import PolyaxonCompilerError from polyaxon.managers.agent import AgentConfigManager from polyaxon.polyaxonfile.specs import kinds from polyaxon.polyflow import V1CompiledOperation, V1RunKind from polyaxon.polypod.compiler.resolver import BaseResolver from polyaxon.schemas.cli.agent_config import AgentConfig from polyaxon.schemas.types import V1ConnectionType, V1K8sResourceType from polyaxon.utils.test_utils import BaseTestCase @pytest.mark.polypod_mark class TestResolver(BaseTestCase): def setUp(self): super().setUp() self.compiled_operation = V1CompiledOperation.read( { "version": 1.1, "kind": kinds.COMPILED_OPERATION, "plugins": { "auth": False, "shm": False, "collectLogs": False, "collectArtifacts": False, "collectResources": False, }, "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } ) def test_core_resolver_instance(self): resolver = BaseResolver( run=None, compiled_operation=self.compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) assert resolver.project_uuid == resolver.project_name assert resolver.run_uuid == resolver.run_name resolver = BaseResolver( run=None, compiled_operation=self.compiled_operation, owner_name="user", project_name="p1", run_name="j1", run_path="test", project_uuid="some_uuid", run_uuid="some_uuid", params=None, ) assert resolver.project_uuid != resolver.project_name assert resolver.run_uuid != resolver.run_name def test_resolve_connections_with_no_config(self): settings.AGENT_CONFIG = None resolver = BaseResolver( run=None, compiled_operation=self.compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) with self.assertRaises(PolyaxonCompilerError): resolver.resolve_connections() def test_resolve_without_compiled_operation(self): with self.assertRaises(PolyaxonCompilerError): BaseResolver( run=None, compiled_operation=None, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) def test_resolve_connections_with_invalid_config(self): fpath = tempfile.mkdtemp() AgentConfigManager.CONFIG_PATH = fpath secret1 = V1K8sResourceType( name="secret1", schema=V1K8sResourceSchema(name="secret1"), is_requested=True, ) secret2 = V1K8sResourceType( name="secret2", schema=V1K8sResourceSchema(name="secret2"), is_requested=True, ) connection1 = V1ConnectionType( name="test_s3", kind=V1ConnectionKind.S3, schema=V1BucketConnection(bucket="s3//:foo"), secret=secret1.schema, ) connection2 = V1ConnectionType( name="test_gcs", kind=V1ConnectionKind.GCS, schema=V1BucketConnection(bucket="gcs//:foo"), secret=secret1.schema, ) connection3 = V1ConnectionType( name="test_wasb", kind=V1ConnectionKind.WASB, schema=V1BucketConnection(bucket="wasbs//:foo"), secret=secret2.schema, ) settings.AGENT_CONFIG = AgentConfig( namespace="foo", artifacts_store=connection1, connections=[connection2, connection3], ) resolver = BaseResolver( run=None, compiled_operation=self.compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) resolver.resolve_connections() assert resolver.namespace == "foo" assert resolver.connection_by_names == {connection1.name: connection1} assert resolver.artifacts_store == connection1 assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema] assert resolver.polyaxon_sidecar == get_default_sidecar_container() assert resolver.polyaxon_init == get_default_init_container() # Add run spec to resolve connections compiled_operation = V1CompiledOperation.read( { "version": 1.1, "kind": kinds.COMPILED_OPERATION, "plugins": { "auth": False, "shm": False, "collectLogs": False, "collectArtifacts": False, "collectResources": False, }, "run": { "kind": V1RunKind.JOB, "container": {"image": "test"}, "connections": {connection3.name}, }, } ) resolver = BaseResolver( run=None, compiled_operation=compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) resolver.resolve_connections() assert resolver.namespace == "foo" assert resolver.connection_by_names == { connection1.name: connection1, connection3.name: connection3, } assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema] assert resolver.artifacts_store == connection1 assert resolver.polyaxon_sidecar == get_default_sidecar_container() assert resolver.polyaxon_init == get_default_init_container() # Add run spec to resolve connections compiled_operation = V1CompiledOperation.read( { "version": 1.1, "kind": kinds.COMPILED_OPERATION, "plugins": { "auth": False, "shm": False, "collectLogs": False, "collectArtifacts": False, "collectResources": False, }, "run": { "kind": V1RunKind.JOB, "container": {"image": "test"}, "connections": { connection1.name, connection2.name, connection3.name, }, }, } ) resolver = BaseResolver( run=None, compiled_operation=compiled_operation, owner_name="user", project_name="p1", project_uuid=None, run_name="j1", run_uuid=None, run_path="test", params=None, ) resolver.resolve_connections() assert resolver.namespace == "foo" assert resolver.connection_by_names == { connection3.name: connection3, connection2.name: connection2, connection1.name: connection1, } assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema] assert resolver.artifacts_store == connection1 assert resolver.polyaxon_sidecar == get_default_sidecar_container() assert resolver.polyaxon_init == get_default_init_container()
polyaxon/polyaxon
core/tests/test_polypod/test_resolvers/test_core_resolver.py
Python
apache-2.0
9,111
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf def get_config(): return tf.contrib.training.HParams(**{ 'total_bs': 64, 'eval_total_bs': 16, 'dataset_name': 'imagenet32', 'dataset_config': tf.contrib.training.HParams(), 'model_name': 'SlicedChannelModel', 'model_config': tf.contrib.training.HParams(**{ 'optim': tf.contrib.training.HParams(**{ 'max_lr': 1e-4, 'warmup': 5000, 'grad_clip_norm': 1.0, 'ema': 0.99995, 'optimizer': 'adam', 'adam_beta1': 0.9, 'adam_beta2': 0.999, }), 'dropout': 0.04, 'img_size': 32, 'ardec': tf.contrib.training.HParams(**{ 'emb_dim': 1536, 'hdim_factor': 1, 'emb_init_scale': 5.0, 'num_heads': 16, 'num_exterior_layers': 8, 'num_outer_layers': 8, 'num_inner_layers': 8, 'res_init_scale': 1e-10, }), }) })
google-research/google-research
axial/config_imagenet32.py
Python
apache-2.0
1,760
# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file # except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" # BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under the License. import urllib import json import time from jose import jwk, jwt from jose.utils import base64url_decode region = 'ap-southeast-2' userpool_id = 'ap-southeast-2_xxxxxxxxx' app_client_id = '<ENTER APP CLIENT ID HERE>' keys_url = 'https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json'.format(region, userpool_id) # instead of re-downloading the public keys every time # we download them only on cold start # https://aws.amazon.com/blogs/compute/container-reuse-in-lambda/ response = urllib.urlopen(keys_url) keys = json.loads(response.read())['keys'] def lambda_handler(event, context): token = event['token'] # get the kid from the headers prior to verification headers = jwt.get_unverified_headers(token) kid = headers['kid'] # search for the kid in the downloaded public keys key_index = -1 for i in range(len(keys)): if kid == keys[i]['kid']: key_index = i break if key_index == -1: print('Public key not found in jwks.json'); return False # construct the public key public_key = jwk.construct(keys[key_index]) # get the last two sections of the token, # message and signature (encoded in base64) message, encoded_signature = str(token).rsplit('.', 1) # decode the signature decoded_signature = base64url_decode(encoded_signature.encode('utf-8')) # verify the signature if not public_key.verify(message.encode("utf8"), decoded_signature): print('Signature verification failed') return False print('Signature successfully verified') # since we passed the verification, we can now safely # use the unverified claims claims = jwt.get_unverified_claims(token) # additionally we can verify the token expiration if time.time() > claims['exp']: print('Token is expired') return False # and the Audience (use claims['client_id'] if verifying an access token) if claims['aud'] != app_client_id: print('Token was not issued for this audience') return False # now we can use the claims print(claims) return claims # the following is useful to make this script executable in both # AWS Lambda and any other local environments if __name__ == '__main__': # for testing locally you can enter the JWT ID Token here event = {'token': ''} lambda_handler(event, None)
rsavordelli/aws-support-tools
Cognito/decode-verify-jwt/decode-verify-jwt.py
Python
apache-2.0
2,958
# Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for MT Models.""" import lingvo.compat as tf from lingvo.core import base_input_generator from lingvo.core import base_layer from lingvo.core import cluster_factory from lingvo.core import layers from lingvo.core import optimizer from lingvo.core import py_utils from lingvo.core import schedule from lingvo.core import test_helper from lingvo.core import test_utils from lingvo.tasks.mt import decoder from lingvo.tasks.mt import encoder from lingvo.tasks.mt import input_generator from lingvo.tasks.mt import model import numpy as np FLAGS = tf.flags.FLAGS _TF_RANDOM_SEED = 93820986 class TestInputGenerator(base_input_generator.BaseSequenceInputGenerator): @classmethod def Params(cls): p = super().Params() p.Define('split', True, '') return p def __init__(self, params): super().__init__(params) self._step = 0 def InfeedBatchSize(self): if self.params.split: return 10 / 2 return 10 def _InputBatch(self): np.random.seed(1) bs, sl = 10, 7 src_ids = tf.constant( np.random.randint(low=0, high=8192 - 1, size=[bs, sl], dtype=np.int32)) tgt_ids = tf.constant( np.random.randint(low=0, high=8192 - 1, size=[bs, sl], dtype=np.int32)) tgt_labels = tf.constant( np.random.randint(low=0, high=8192 - 1, size=[bs, sl], dtype=np.int32)) tgt_weights = tf.constant(np.ones(shape=[bs, sl], dtype=np.float32)) src_paddings = tf.zeros([bs, sl]) tgt_paddings = tf.zeros([bs, sl]) ret = py_utils.NestedMap() ret.src = py_utils.NestedMap() ret.tgt = py_utils.NestedMap() if self.params.split: src_ids = tf.split(src_ids, 2, 0) src_paddings = tf.split(src_paddings, 2, 0) tgt_ids = tf.split(tgt_ids, 2, 0) tgt_labels = tf.split(tgt_labels, 2, 0) tgt_paddings = tf.split(tgt_paddings, 2, 0) tgt_weights = tf.split(tgt_weights, 2, 0) ret.src.ids = tf.cond( tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0), lambda: src_ids[0], lambda: src_ids[1]) ret.src.paddings = tf.cond( tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0), lambda: src_paddings[0], lambda: src_paddings[1]) ret.tgt.ids = tf.cond( tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0), lambda: tgt_ids[0], lambda: tgt_ids[1]) ret.tgt.labels = tf.cond( tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0), lambda: tgt_labels[0], lambda: tgt_labels[1]) ret.tgt.paddings = tf.cond( tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0), lambda: tgt_paddings[0], lambda: tgt_paddings[1]) ret.tgt.weights = tf.cond( tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0), lambda: tgt_weights[0], lambda: tgt_weights[1]) else: ret.src.ids = src_ids ret.src.paddings = src_paddings ret.tgt.ids = tgt_ids ret.tgt.labels = tgt_labels ret.tgt.paddings = tgt_paddings ret.tgt.weights = tgt_weights return ret class TransformerModelTest(test_utils.TestCase): def _InputParams(self): p = input_generator.NmtInput.Params() input_file = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord') vocab_file = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab') p.file_pattern = 'tfrecord:' + input_file p.file_random_seed = 31415 p.file_parallelism = 1 p.bucket_upper_bound = [40] p.bucket_batch_limit = [8] p.source_max_length = 200 p.target_max_length = 200 p.tokenizer.token_vocab_filepath = vocab_file p.tokenizer.vocab_size = 32000 return p def _EncoderParams(self): p = encoder.TransformerEncoder.Params() p.name = 'encoder' p.random_seed = 1234 p.model_dim = 4 p.token_emb.embedding_dim = 4 p.token_emb.max_num_shards = 1 p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim( seed=p.random_seed) p.position_emb.embedding_dim = 4 p.transformer_stack.transformer_tpl.tr_atten_tpl.num_attention_heads = 2 p.transformer_stack.transformer_tpl.tr_fflayer_tpl.hidden_dim = 5 return p def _DecoderParams(self): p = decoder.TransformerDecoder.Params() p.name = 'decoder' p.random_seed = 1234 p.source_dim = 4 p.model_dim = 4 p.token_emb.embedding_dim = 4 p.token_emb.max_num_shards = 1 p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim( seed=p.random_seed) p.position_emb.embedding_dim = 4 p.trans_tpl.source_dim = 4 p.trans_tpl.tr_atten_tpl.source_dim = 4 p.trans_tpl.tr_atten_tpl.num_attention_heads = 2 p.trans_tpl.tr_fflayer_tpl.input_dim = 4 p.trans_tpl.tr_fflayer_tpl.hidden_dim = 8 p.softmax.num_shards = 1 p.target_seq_len = 5 return p def _testParams(self): p = model.TransformerModel.Params() p.name = 'test_mdl' p.input = self._InputParams() p.encoder = self._EncoderParams() p.decoder = self._DecoderParams() p.train.learning_rate = 2e-4 return p def testConstruction(self): with self.session(): p = self._testParams() mdl = p.Instantiate() print('vars = ', mdl.vars) flatten_vars = mdl.vars.Flatten() print('vars flattened = ', flatten_vars) self.assertEqual(len(flatten_vars), 238) # Should match tf.trainable_variables(). self.assertEqual(len(tf.trainable_variables()), len(flatten_vars)) def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() p.dtype = dtype if fprop_dtype: p.fprop_dtype = fprop_dtype p.input.dtype = fprop_dtype mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp))] print('actual vals = %s' % np.array_repr(np.array(vals))) self.assertAllClose(vals, [[226.99771, 10.377038], [243.92978, 10.379991], [260.7751, 10.379107], [201.10846, 10.379791], [272.22006, 10.370288]]) def testFPropEvalMode(self): with self.session(), self.SetEval(True): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp))] print('actual vals = ', vals) self.assertAllClose(vals, [(226.99771, 10.377038), (243.92978, 10.379991), (260.7751, 10.379107), (201.10846, 10.379791), (272.22006, 10.370288)]) def testBProp(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp, mdl.train_op))[:2]] print('BProp actual vals = ', vals) expected_vals = [(226.99771, 10.377038), (243.87854, 10.3778105), (260.66788, 10.374841), (200.94312, 10.371258), (271.9328, 10.3593445)] self.assertAllClose(vals, expected_vals) def testBPropWithAccumComparison(self): def _SetDefaults(p): p.random_seed = 12345 p.decoder.input_dropout_prob = 0.0 mp = p.encoder.transformer_stack.transparent_merger_tpl mp.weighted_merger_dropout_prob = 0.0 disable_vn = py_utils.VariationalNoiseParams(1.0, False, False) for lp in base_layer.RecursiveFindLayerParams(p): # TODO(lepikhin): lp.dtype = dtype lp.params_init = py_utils.WeightInit.Gaussian(0.1, 12345) lp.vn = disable_vn tp = p.train assert tp.l2_regularizer_weight is None tp.clip_gradient_norm_to_value = False tp.grad_norm_to_clip_to_zero = False tp.optimizer = optimizer.SGD.Params() tp.learning_rate = 1e-2 tp.lr_schedule = schedule.ContinuousSchedule.Params() for l in p.ToText().split('\n'): print(l) return p with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() p.input = TestInputGenerator.Params() p.input.split = True p = _SetDefaults(p) p.train.optimizer = optimizer.Accumulator.Params().Set( accum_steps=2, optimizer_tpl=p.train.optimizer) mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() self.evaluate(tf.global_variables_initializer()) for _ in range(2): self.evaluate(mdl.train_op) expected = self.evaluate(mdl.dec.softmax.vars['weight_0']) with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() p.input = TestInputGenerator.Params() p.input.split = False p = _SetDefaults(p) mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() self.evaluate(tf.global_variables_initializer()) self.evaluate(mdl.train_op) actual = self.evaluate(mdl.dec.softmax.vars['weight_0']) self.assertAllClose(expected, actual, rtol=1e-2, atol=1e-2) def testBatchSplit(self): def Run(num_splits): with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(93820981) p = self._testParams() p.input.bucket_batch_limit = [ b * 2 / num_splits for b in p.input.bucket_batch_limit ] with cluster_factory.ForTestingWorker(gpus=num_splits): mdl = p.Instantiate() metrics = mdl.FPropDefaultTheta()[0] self.evaluate(tf.global_variables_initializer()) return self.evaluate(metrics['loss']) res1, res2 = Run(1), Run(2) self.assertAllClose(res1[0], res2[0]) self.assertAllEqual(res1[1], res2[1]) def testBatchSizeInInputGenerator(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() with cluster_factory.ForTestingWorker( mode='sync', job='trainer_client', gpus=5): mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss self.evaluate(tf.global_variables_initializer()) _ = self.evaluate(loss) self.assertEqual(mdl.input_generator.infeed_bucket_batch_limit, [40]) def testDecode(self): with self.session(use_gpu=False): tf.random.set_seed(93820985) p = self._testParams() mdl = p.Instantiate() input_batch = mdl.input_generator.GetPreprocessedInputBatch() dec_out_dict = mdl.Decode(input_batch) self.evaluate(tf.global_variables_initializer()) dec_out = self.evaluate(dec_out_dict) metrics_dict = mdl.CreateDecoderMetrics() key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict) self.assertNear(0.0, metrics_dict['corpus_bleu'].value, 1.0e-5) self.assertLen(key_value_pairs, 8) for k, v in key_value_pairs: self.assertIn(k, v) class RNMTModelTest(test_utils.TestCase): def _InputParams(self): p = input_generator.NmtInput.Params() input_file = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord') vocab_file = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab') p.file_pattern = 'tfrecord:' + input_file p.file_random_seed = 31415 p.file_parallelism = 1 p.bucket_upper_bound = [40] p.bucket_batch_limit = [8] p.source_max_length = 200 p.target_max_length = 200 p.tokenizer.token_vocab_filepath = vocab_file p.tokenizer.vocab_size = 32000 return p def _EncoderParams(self): p = encoder.MTEncoderBiRNN.Params() p.name = 'encoder' p.emb.vocab_size = 32000 p.emb.embedding_dim = 4 p.emb.max_num_shards = 1 p.lstm_cell_size = 4 p.num_lstm_layers = 3 p.encoder_out_dim = 4 return p def _DecoderParams(self): p = decoder.MTDecoderV1.Params() p.name = 'decoder' p.source_dim = 4 p.emb.vocab_size = 32000 p.emb.embedding_dim = 4 p.emb.max_num_shards = 1 p.rnn_cell_dim = 4 p.rnn_layers = 3 p.attention.hidden_dim = 2 p.softmax.num_classes = 32000 p.softmax.num_shards = 1 return p def _testParams(self): p = model.RNMTModel.Params() p.name = 'test_mdl' p.input = self._InputParams() p.encoder = self._EncoderParams() p.decoder = self._DecoderParams() p.train.learning_rate = 1.0 return p def testConstruction(self): with self.session(): p = self._testParams() mdl = p.Instantiate() flatten_vars = mdl.vars.Flatten() # encoder/embedding: 1 # encoder/lstms: 2 * (3 (forward) + 3 (backward)) # encoder/proj: 2 # decoder/embedding: 1 # decoder/atten: 3 # decoder/lstms: 2 * 3 # decoder/softmax: 2 self.assertEqual(len(flatten_vars), 1 + 12 + 2 + 1 + 3 + 6 + 2) # Should match tf.trainable_variables(). self.assertEqual(len(tf.trainable_variables()), len(flatten_vars)) def testFProp(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp))] print('actual vals = %s' % np.array_repr(np.array(vals))) self.assertAllClose(vals, [[226.92014, 10.373492], [243.77704, 10.373491], [260.63403, 10.373494], [200.98639, 10.373491], [272.30417, 10.373492]]) def testFPropEvalMode(self): with self.session(), self.SetEval(True): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp))] print('actual vals = %s' % np.array_repr(np.array(vals))) self.assertAllClose(vals, [[226.92014, 10.373492], [243.77704, 10.373491], [260.63403, 10.373494], [200.98639, 10.373491], [272.30417, 10.373492]]) def testBProp(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp, mdl.train_op))[:2]] print('bprop actual vals = %s' % np.array_repr(np.array(vals))) expected_vals = [ [226.92014, 10.373492], [225.25146, 9.585169], [248.49757, 9.8904505], [212.02884, 10.943424], [314.57098, 11.983657], ] self.assertAllClose(vals, expected_vals, atol=1e-3) def testDecode(self): with self.session(use_gpu=False), self.SetEval(True): tf.random.set_seed(93820985) p = self._testParams() mdl = p.Instantiate() input_batch = mdl.input_generator.GetPreprocessedInputBatch() dec_out_dict = mdl.Decode(input_batch) self.evaluate(tf.global_variables_initializer()) dec_out = self.evaluate(dec_out_dict) metrics_dict = mdl.CreateDecoderMetrics() key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict) self.assertNear(0.0, metrics_dict['corpus_bleu'].value, 1.0e-5) self.assertLen(key_value_pairs, 8) for k, v in key_value_pairs: self.assertIn(k, v) def testBatchSplit(self): def Run(num_splits): with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(93820981) p = self._testParams() p.input.bucket_batch_limit = [ b * 2 / num_splits for b in p.input.bucket_batch_limit ] with cluster_factory.ForTestingWorker(gpus=num_splits): mdl = p.Instantiate() metrics = mdl.FPropDefaultTheta()[0] self.evaluate(tf.global_variables_initializer()) return self.evaluate(metrics['loss']) res1, res2 = Run(1), Run(2) self.assertAllClose(res1[0], res2[0]) self.assertAllEqual(res1[1], res2[1]) def testBatchSizeInInputGenerator(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() cluster_params = cluster_factory.Cluster.Params() cluster_params.mode = 'sync' cluster_params.job = 'trainer_client' cluster_params.worker.name = '/job:localhost' cluster_params.worker.gpus_per_replica = 5 cluster_params.input.name = '/job:localhost' cluster_params.input.replicas = 1 cluster_params.input.gpus_per_replica = 0 with cluster_params.Instantiate(): mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss self.evaluate(tf.global_variables_initializer()) _ = self.evaluate(loss) self.assertEqual(mdl.input_generator.infeed_bucket_batch_limit, [40]) class HybridModelTest(test_utils.TestCase): def _InputParams(self): p = input_generator.NmtInput.Params() input_file = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord') vocab_file = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab') p.file_pattern = 'tfrecord:' + input_file p.file_random_seed = 31415 p.file_parallelism = 1 p.bucket_upper_bound = [40] p.bucket_batch_limit = [8] p.source_max_length = 200 p.target_max_length = 200 p.tokenizer.token_vocab_filepath = vocab_file p.tokenizer.vocab_size = 32000 return p def _EncoderParams(self): p = encoder.TransformerEncoder.Params() p.name = 'encoder' p.random_seed = 1234 p.model_dim = 4 p.token_emb.embedding_dim = 4 p.token_emb.max_num_shards = 1 p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim( seed=p.random_seed) p.position_emb.embedding_dim = 4 p.transformer_stack.transformer_tpl.tr_atten_tpl.num_attention_heads = 2 p.transformer_stack.transformer_tpl.tr_fflayer_tpl.hidden_dim = 5 return p def _DecoderParams(self): p = decoder.MTDecoderV1.Params() p.name = 'decoder' p.source_dim = 4 p.emb.vocab_size = 32000 p.emb.embedding_dim = 4 p.emb.max_num_shards = 1 p.rnn_cell_dim = 4 p.rnn_layers = 3 p.attention.hidden_dim = 2 p.softmax.num_classes = 32000 p.softmax.num_shards = 1 return p def _testParams(self): p = model.HybridModel.Params() p.name = 'test_mdl' p.input = self._InputParams() p.encoder = self._EncoderParams() p.decoder = self._DecoderParams() p.train.learning_rate = 1.0 return p def testConstruction(self): with self.session(): p = self._testParams() mdl = p.Instantiate() flatten_vars = mdl.vars.Flatten() print('vars flattened = ', flatten_vars) # encoder: 91 (1 + 36 + 54) # encoder/embedding: 1 # encoder/ff_layer: 6 * 6 # encoder/attention: 9 * 6 # decoder: 12 (1 + 3 + 6 + 2) # decoder/embedding: 1 # decoder/atten: 3 # decoder/lstms: 2 * 3 # decoder/softmax: 2 self.assertEqual(len(flatten_vars), 91 + 12) # Should match tf.trainable_variables(). self.assertEqual(len(tf.trainable_variables()), len(flatten_vars)) def testFProp(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp))] print('actual vals = %s' % np.array_repr(np.array(vals))) self.assertAllClose(vals, [[226.91527, 10.373269], [243.76906, 10.373152], [260.62787, 10.373248], [200.98814, 10.373582], [272.297, 10.373219]]) def testFPropEvalMode(self): with self.session(), self.SetEval(True): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp))] print('actual vals = %s' % np.array_repr(np.array(vals))) self.assertAllClose(vals, [[226.91527, 10.373269], [243.76906, 10.373152], [260.62787, 10.373248], [200.98814, 10.373582], [272.297, 10.373219]]) def testBProp(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() loss = mdl.loss logp = mdl.eval_metrics['log_pplx'][0] self.evaluate(tf.global_variables_initializer()) vals = [] for _ in range(5): vals += [self.evaluate((loss, logp, mdl.train_op))[:2]] print('bprop actual vals = %s' % np.array_repr(np.array(vals))) expected_vals = [[226.91527, 10.373269], [222.4018, 9.463906], [248.72293, 9.89942], [181.65323, 9.37565], [312.97754, 11.922954]] self.assertAllClose(vals, expected_vals, atol=1e-3) def testDecode(self): with self.session(use_gpu=False), self.SetEval(True): tf.random.set_seed(93820985) p = self._testParams() mdl = p.Instantiate() input_batch = mdl.input_generator.GetPreprocessedInputBatch() dec_out_dict = mdl.Decode(input_batch) self.evaluate(tf.global_variables_initializer()) dec_out = self.evaluate(dec_out_dict) metrics_dict = mdl.CreateDecoderMetrics() key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict) self.assertNear(0.0, metrics_dict['corpus_bleu'].value, 1.0e-5) self.assertLen(key_value_pairs, 8) for k, v in key_value_pairs: self.assertIn(k, v) def testBatchSplit(self): def Run(num_splits): with self.session(use_gpu=False, graph=tf.Graph()): tf.random.set_seed(93820981) p = self._testParams() p.input.bucket_batch_limit = [ b * 2 / num_splits for b in p.input.bucket_batch_limit ] with cluster_factory.ForTestingWorker(gpus=num_splits): mdl = p.Instantiate() metrics = mdl.FPropDefaultTheta()[0] self.evaluate(tf.global_variables_initializer()) return self.evaluate(metrics['loss']) res1, res2 = Run(1), Run(2) self.assertAllClose(res1[0], res2[0]) self.assertAllEqual(res1[1], res2[1]) def testBatchSizeInInputGenerator(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() cluster_params = cluster_factory.Cluster.Params() cluster_params.mode = 'sync' cluster_params.job = 'trainer_client' cluster_params.worker.name = '/job:localhost' cluster_params.worker.gpus_per_replica = 5 cluster_params.input.name = '/job:localhost' cluster_params.input.replicas = 1 cluster_params.input.gpus_per_replica = 0 with cluster_params.Instantiate(): mdl = p.Instantiate() mdl.FPropDefaultTheta() loss = mdl.loss self.evaluate(tf.global_variables_initializer()) _ = self.evaluate(loss) self.assertEqual(mdl.input_generator.infeed_bucket_batch_limit, [40]) class InsertionModelTest(test_utils.TestCase): def _InputParams(self): p = input_generator.NmtInput.Params() input_file = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord') vocab_file = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab') p.file_pattern = 'tfrecord:' + input_file p.file_random_seed = 31415 p.file_parallelism = 1 p.bucket_upper_bound = [40] p.bucket_batch_limit = [8] p.source_max_length = 200 p.target_max_length = 200 p.tokenizer.token_vocab_filepath = vocab_file p.tokenizer.vocab_size = 32000 return p def _DecoderParams(self): p = decoder.InsertionDecoder.Params() p.name = 'decoder' return p def _testParams(self): p = model.InsertionModel.Params() p.name = 'insertion' p.input = self._InputParams() p.decoder = self._DecoderParams() p.random_seed = 12345 return p def testSampleCanvasAndTargets(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) x = np.asarray([[10, 11, 12, 13, 14, 15, 2], [10, 11, 12, 13, 14, 15, 2], [2, 0, 0, 0, 0, 0, 0], [10, 11, 12, 13, 14, 2, 0]], np.int32) x_paddings = np.asarray([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1]], np.float32) p = self._testParams() mdl = p.Instantiate() descriptor = mdl._SampleCanvasAndTargets( tf.convert_to_tensor(x), tf.convert_to_tensor(x_paddings)) canvas, canvas_paddings, target_indices, target_weights = self.evaluate([ descriptor.canvas, descriptor.canvas_paddings, descriptor.target_indices, descriptor.target_weights ]) canvas_gold = np.asarray([[13, 15, 2, 0, 0], [10, 11, 14, 2, 0], [2, 0, 0, 0, 0], [10, 11, 13, 14, 2]], np.int32) canvas_paddings_gold = np.asarray( [[0., 0., 0., 1., 1.], [0., 0., 0., 0., 1.], [0., 1., 1., 1., 1.], [0., 0., 0., 0., 0.]], np.float32) target_indices_gold = np.asarray( [[0, 0, 10], [0, 0, 11], [0, 0, 12], [0, 0, 2], [0, 1, 14], [0, 1, 2], [0, 2, 2], [1, 0, 2], [1, 1, 2], [1, 2, 12], [1, 2, 13], [1, 2, 2], [1, 3, 15], [1, 3, 2], [2, 0, 2], [3, 0, 2], [3, 1, 2], [3, 2, 12], [3, 2, 2], [3, 3, 2], [3, 4, 2]], np.int32) target_weights_gold = np.asarray([1, 1, 1, 0, 1, 0, 1] + [1, 1, 1, 1, 0, 1, 0] + [1] + [1, 1, 1, 0, 1, 1], np.float32) target_weights_gold = np.reshape(target_weights_gold, [target_weights_gold.shape[0], 1]) self.assertAllEqual(canvas, canvas_gold) self.assertAllEqual(canvas_paddings, canvas_paddings_gold) self.assertAllEqual(target_indices, target_indices_gold) self.assertAllEqual(target_weights, target_weights_gold) def testCreateCanvasAndTargets(self): with self.session(): tf.random.set_seed(_TF_RANDOM_SEED) batch = py_utils.NestedMap( src=py_utils.NestedMap( ids=tf.convert_to_tensor( np.asarray([ [10, 11, 12, 14, 2, 0], [20, 21, 22, 24, 25, 2], ], np.int32)), paddings=tf.convert_to_tensor( np.asarray([[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]], np.float32))), tgt=py_utils.NestedMap( ids=tf.convert_to_tensor( np.asarray([[100, 101, 102, 104, 2, 0], [200, 201, 202, 204, 205, 2]], np.int32)), paddings=tf.convert_to_tensor( np.asarray([[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]], np.float32)))) p = self._testParams() mdl = p.Instantiate() descriptor = mdl._CreateCanvasAndTargets(batch) canvas, canvas_paddings, target_indices, target_weights = self.evaluate([ descriptor.canvas, descriptor.canvas_paddings, descriptor.target_indices, descriptor.target_weights ]) canvas_gold = np.asarray([ [32014, 32002, 104, 2, 0, 0, 0, 0], [32020, 32021, 32022, 32002, 200, 201, 202, 2], ], np.int32) canvas_paddings_gold = np.asarray( [[0., 0., 0., 0., 1., 1., 1., 1.], [0., 0., 0., 0., 0., 0., 0., 0.]], np.float32) target_indices_gold = np.asarray( [[0, 0, 10], [0, 0, 11], [0, 0, 12], [0, 0, 2], [0, 1, 2], [1, 0, 2], [1, 1, 2], [1, 2, 2], [1, 3, 24], [1, 3, 25], [1, 3, 2], [0, 2, 100], [0, 2, 101], [0, 2, 102], [0, 2, 2], [0, 3, 2], [1, 4, 2], [1, 5, 2], [1, 6, 2], [1, 7, 204], [1, 7, 205], [1, 7, 2]], np.int32) target_weights_gold = np.asarray([1, 1, 1, 0, 1] + [1, 1, 1, 1, 1, 0] + [1, 1, 1, 0, 1] + [1, 1, 1, 1, 1, 0], np.float32) target_weights_gold = np.reshape(target_weights_gold, [target_weights_gold.shape[0], 1]) self.assertAllEqual(canvas, canvas_gold) self.assertAllEqual(canvas_paddings, canvas_paddings_gold) self.assertAllEqual(target_indices, target_indices_gold) self.assertAllEqual(target_weights, target_weights_gold) def testConstruction(self): with self.session(): p = self._testParams() mdl = p.Instantiate() flatten_vars = mdl.vars.Flatten() self.assertEqual(len(flatten_vars), 122) self.assertEqual(len(tf.trainable_variables()), len(flatten_vars)) def testFPropGraph(self): """Test the construction of the fprop graph, then fprop the graph.""" with self.session(): p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() self.evaluate(tf.global_variables_initializer()) self.evaluate(mdl.loss) class TransformerXEnDecTest(test_utils.TestCase): def _InputParams(self): p = input_generator.NmtDoubleInput.Params() input_file = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_doublebatch_test-000-001') p.file_pattern = 'tfrecord:' + input_file p.tokenizer.token_vocab_filepath = test_helper.test_src_dir_path( 'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab') p.file_random_seed = 31415 p.file_parallelism = 1 p.bucket_upper_bound = [10, 20] p.bucket_batch_limit = [4, 2] p.source_mask_ratio = -1 p.source_mask_ratio_beta = '2,6' p.mask_word_id = 31999 p.pad_id = 31998 p.mask_words_ratio = 0.25 p.permutation_distance = 3 p.vocab_file = p.tokenizer.token_vocab_filepath p.packed_input = False return p def _EncoderParams(self): p = encoder.TransformerXEncoder.Params() p.name = 'mix_encoder' p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim() p.token_emb.vocab_size = 32000 p.token_emb.embedding_dim = 4 p.token_emb.max_num_shards = 1 p.token_emb.scale_sqrt_depth = True p.token_emb.vn = py_utils.VariationalNoiseParams(1.0, False, False) p.position_emb.embedding_dim = 4 p.position_emb.trainable_scaling = False p.model_dim = 4 ts = p.transformer_stack ts.model_dim = 4 ts.num_transformer_layers = 6 ts.transformer_tpl.tr_atten_tpl.num_attention_heads = 2 ts.transformer_tpl.tr_fflayer_tpl.hidden_dim = 4 p.random_seed = 54321 return p def _DecoderParams(self): p = decoder.TransformerXDecoder.Params() p.name = 'mix_decoder' p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim() p.token_emb.vocab_size = 32000 p.token_emb.embedding_dim = 4 p.token_emb.max_num_shards = 1 p.token_emb.scale_sqrt_depth = True p.token_emb.vn = py_utils.VariationalNoiseParams(1.0, False, False) p.position_emb.embedding_dim = 4 p.position_emb.trainable_scaling = False p.model_dim = 4 p.source_dim = 4 p.num_trans_layers = 6 p.trans_tpl.source_dim = p.model_dim p.trans_tpl.tr_atten_tpl.source_dim = p.model_dim p.trans_tpl.tr_atten_tpl.num_attention_heads = 2 p.trans_tpl.tr_atten_tpl.atten_hidden_dim = 4 p.trans_tpl.tr_atten_tpl.atten_tpl.context_dim = p.model_dim p.trans_tpl.tr_fflayer_tpl.hidden_dim = 4 p.trans_tpl.tr_fflayer_tpl.input_dim = p.model_dim p.label_smoothing = layers.UniformLabelSmoother.Params() p.label_smoothing.uncertainty = 0.1 p.per_word_avg_loss = True p.softmax.num_classes = 32000 p.softmax.num_shards = 1 p.random_seed = 54321 return p def _testParams(self): p = model.TransformerXEnDecModel.Params() p.name = 'xendec' p.input = self._InputParams() p.encoder = self._EncoderParams() p.decoder = self._DecoderParams() p.random_seed = 12345 return p def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32): with self.session(use_gpu=False): tf.random.set_seed(_TF_RANDOM_SEED) p = self._testParams() p.dtype = dtype if fprop_dtype: p.fprop_dtype = fprop_dtype p.input.dtype = fprop_dtype mdl = p.Instantiate() dec_metrics, _ = mdl.FPropDefaultTheta() self.evaluate(tf.global_variables_initializer()) vals = [] print(mdl) for _ in range(5): vals += [ self.evaluate( (dec_metrics['clean_loss'][0], dec_metrics['other_loss'][0], dec_metrics['mix_loss'][0], dec_metrics['loss'][0])) ] print('actual vals = %s' % np.array_repr(np.array(vals))) self.assertAllClose( vals, [[10.373864, 10.371083, 10.372491, 31.11744], [10.36428, 10.379262, 10.366394, 31.109936], [10.369206, 10.372709, 10.369126, 31.111042], [10.363656, 10.364362, 10.362683, 31.090702], [10.371622, 10.374066, 10.371591, 31.11728]], rtol=1e-02, atol=1e-02) if __name__ == '__main__': tf.test.main()
tensorflow/lingvo
lingvo/tasks/mt/model_test.py
Python
apache-2.0
35,425
from django.apps import AppConfig class FundConfig(AppConfig): name = 'fund'
szu-stu/ezFund
fund/apps.py
Python
apache-2.0
83
# https://www.reddit.com/r/learnpython/comments/82ucgu/calling_an_input_inside_a_def_function/ def main(): while True: word = raw_input('Enter a word: ') if word == '-1': break not_ = '' if word[:] == word[::-1] else ' not' print "Word '%s' is%s a palindrome" % (word, not_) main()
bandarji/lekhan
python/reddit/palindrome.py
Python
apache-2.0
332
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Rohit Agarwalla, Cisco Systems, Inc. # @author: Arvind Somya, Cisco Systems, Inc. (asomya@cisco.com) # from sqlalchemy.orm import exc import quantum.db.api as db from quantum.openstack.common import log as logging from quantum.plugins.cisco.common import cisco_exceptions as c_exc from quantum.plugins.cisco.db import nexus_models_v2 LOG = logging.getLogger(__name__) def initialize(): """Establish database connection and load models""" db.configure_db() def get_all_nexusport_bindings(): """Lists all the nexusport bindings""" LOG.debug(_("get_all_nexusport_bindings() called")) session = db.get_session() try: bindings = session.query(nexus_models_v2.NexusPortBinding).all() return bindings except exc.NoResultFound: return [] def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): """Lists a nexusport binding""" LOG.debug(_("get_nexusport_binding() called")) session = db.get_session() try: binding = (session.query(nexus_models_v2.NexusPortBinding). filter_by(vlan_id=vlan_id).filter_by(switch_ip=switch_ip). filter_by(port_id=port_id). filter_by(instance_id=instance_id).all()) return binding except exc.NoResultFound: raise c_exc.NexusPortBindingNotFound(vlan_id=vlan_id) def get_nexusvlan_binding(vlan_id, switch_ip): """Lists a vlan and switch binding""" LOG.debug(_("get_nexusvlan_binding() called")) session = db.get_session() try: binding = (session.query(nexus_models_v2.NexusPortBinding). filter_by(vlan_id=vlan_id).filter_by(switch_ip=switch_ip). all()) return binding except exc.NoResultFound: raise c_exc.NexusPortBindingNotFound(vlan_id=vlan_id) def add_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): """Adds a nexusport binding""" LOG.debug(_("add_nexusport_binding() called")) session = db.get_session() binding = nexus_models_v2.NexusPortBinding( port_id, vlan_id, switch_ip, instance_id) session.add(binding) session.flush() return binding def remove_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): """Removes a nexusport binding""" LOG.debug(_("remove_nexusport_binding() called")) session = db.get_session() try: binding = (session.query(nexus_models_v2.NexusPortBinding). filter_by(vlan_id=vlan_id).filter_by(switch_ip=switch_ip). filter_by(port_id=port_id). filter_by(instance_id=instance_id).all()) for bind in binding: session.delete(bind) session.flush() return binding except exc.NoResultFound: pass def update_nexusport_binding(port_id, new_vlan_id): """Updates nexusport binding""" LOG.debug(_("update_nexusport_binding called")) session = db.get_session() try: binding = (session.query(nexus_models_v2.NexusPortBinding). filter_by(port_id=port_id).one()) if new_vlan_id: binding["vlan_id"] = new_vlan_id session.merge(binding) session.flush() return binding except exc.NoResultFound: raise c_exc.NexusPortBindingNotFound() def get_nexusvm_binding(vlan_id, instance_id): """Lists nexusvm bindings""" LOG.debug(_("get_nexusvm_binding() called")) session = db.get_session() try: binding = (session.query(nexus_models_v2.NexusPortBinding). filter_by(instance_id=instance_id). filter_by(vlan_id=vlan_id).first()) return binding except exc.NoResultFound: raise c_exc.NexusPortBindingNotFound(vlan_id=vlan_id) def get_port_vlan_switch_binding(port_id, vlan_id, switch_ip): """Lists nexusvm bindings""" LOG.debug(_("get_port_vlan_switch_binding() called")) session = db.get_session() try: binding = (session.query(nexus_models_v2.NexusPortBinding). filter_by(port_id=port_id).filter_by(switch_ip=switch_ip). filter_by(vlan_id=vlan_id).all()) return binding except exc.NoResultFound: raise c_exc.NexusPortBindingNotFound(vlan_id=vlan_id)
rossella/neutron
quantum/plugins/cisco/db/nexus_db_v2.py
Python
apache-2.0
4,960
from django.test import TestCase from tenancy.filters import * from tenancy.models import Tenant, TenantGroup class TenantGroupTestCase(TestCase): queryset = TenantGroup.objects.all() filterset = TenantGroupFilterSet @classmethod def setUpTestData(cls): parent_tenant_groups = ( TenantGroup(name='Parent Tenant Group 1', slug='parent-tenant-group-1'), TenantGroup(name='Parent Tenant Group 2', slug='parent-tenant-group-2'), TenantGroup(name='Parent Tenant Group 3', slug='parent-tenant-group-3'), ) for tenantgroup in parent_tenant_groups: tenantgroup.save() tenant_groups = ( TenantGroup(name='Tenant Group 1', slug='tenant-group-1', parent=parent_tenant_groups[0], description='A'), TenantGroup(name='Tenant Group 2', slug='tenant-group-2', parent=parent_tenant_groups[1], description='B'), TenantGroup(name='Tenant Group 3', slug='tenant-group-3', parent=parent_tenant_groups[2], description='C'), ) for tenantgroup in tenant_groups: tenantgroup.save() def test_id(self): params = {'id': self.queryset.values_list('pk', flat=True)[:2]} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) def test_name(self): params = {'name': ['Tenant Group 1', 'Tenant Group 2']} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) def test_slug(self): params = {'slug': ['tenant-group-1', 'tenant-group-2']} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) def test_description(self): params = {'description': ['A', 'B']} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) def test_parent(self): parent_groups = TenantGroup.objects.filter(name__startswith='Parent')[:2] params = {'parent_id': [parent_groups[0].pk, parent_groups[1].pk]} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) params = {'parent': [parent_groups[0].slug, parent_groups[1].slug]} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) class TenantTestCase(TestCase): queryset = Tenant.objects.all() filterset = TenantFilterSet @classmethod def setUpTestData(cls): tenant_groups = ( TenantGroup(name='Tenant Group 1', slug='tenant-group-1'), TenantGroup(name='Tenant Group 2', slug='tenant-group-2'), TenantGroup(name='Tenant Group 3', slug='tenant-group-3'), ) for tenantgroup in tenant_groups: tenantgroup.save() tenants = ( Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]), Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]), Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]), ) Tenant.objects.bulk_create(tenants) def test_id(self): params = {'id': self.queryset.values_list('pk', flat=True)[:2]} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) def test_name(self): params = {'name': ['Tenant 1', 'Tenant 2']} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) def test_slug(self): params = {'slug': ['tenant-1', 'tenant-2']} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) def test_group(self): group = TenantGroup.objects.all()[:2] params = {'group_id': [group[0].pk, group[1].pk]} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) params = {'group': [group[0].slug, group[1].slug]} self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
digitalocean/netbox
netbox/tenancy/tests/test_filters.py
Python
apache-2.0
3,818
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import unittest from src.test.py.bazel import test_base class BazelWindowsCppTest(test_base.TestBase): def createProjectFiles(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'package(', ' default_visibility = ["//visibility:public"],', ' features=["windows_export_all_symbols"]', ')', '', 'cc_library(', ' name = "A",', ' srcs = ["a.cc"],', ' hdrs = ["a.h"],', ' copts = ["/DCOMPILING_A_DLL"],', ' features = ["no_windows_export_all_symbols"],', ')', '', 'cc_library(', ' name = "B",', ' srcs = ["b.cc"],', ' hdrs = ["b.h"],', ' deps = [":A"],', ' copts = ["/DNO_DLLEXPORT"],', ')', '', 'cc_binary(', ' name = "C",', ' srcs = ["c.cc"],', ' deps = [":A", ":B" ],', ' linkstatic = 0,', ')', ]) self.ScratchFile('a.cc', [ '#include <stdio.h>', '#include "a.h"', 'int a = 0;', 'void hello_A() {', ' a++;', ' printf("Hello A, %d\\n", a);', '}', ]) self.ScratchFile('b.cc', [ '#include <stdio.h>', '#include "a.h"', '#include "b.h"', 'void hello_B() {', ' hello_A();', ' printf("Hello B\\n");', '}', ]) header_temp = [ '#ifndef %{name}_H', '#define %{name}_H', '', '#if NO_DLLEXPORT', ' #define DLLEXPORT', '#elif COMPILING_%{name}_DLL', ' #define DLLEXPORT __declspec(dllexport)', '#else', ' #define DLLEXPORT __declspec(dllimport)', '#endif', '', 'DLLEXPORT void hello_%{name}();', '', '#endif', ] self.ScratchFile('a.h', [line.replace('%{name}', 'A') for line in header_temp]) self.ScratchFile('b.h', [line.replace('%{name}', 'B') for line in header_temp]) c_cc_content = [ '#include <stdio.h>', '#include "a.h"', '#include "b.h"', '', 'void hello_C() {', ' hello_A();', ' hello_B();', ' printf("Hello C\\n");', '}', '', 'int main() {', ' hello_C();', ' return 0;', '}', ] self.ScratchFile('c.cc', c_cc_content) self.ScratchFile('lib/BUILD', [ 'cc_library(', ' name = "A",', ' srcs = ["dummy.cc"],', ' features = ["windows_export_all_symbols"],', ' visibility = ["//visibility:public"],', ')', ]) self.ScratchFile('lib/dummy.cc', ['void dummy() {}']) self.ScratchFile('main/main.cc', c_cc_content) def getBazelInfo(self, info_key): exit_code, stdout, stderr = self.RunBazel(['info', info_key]) self.AssertExitCode(exit_code, 0, stderr) return stdout[0] def testBuildDynamicLibraryWithUserExportedSymbol(self): self.createProjectFiles() bazel_bin = self.getBazelInfo('bazel-bin') # //:A export symbols by itself using __declspec(dllexport), so it doesn't # need Bazel to export symbols using DEF file. exit_code, _, stderr = self.RunBazel( ['build', '//:A', '--output_groups=dynamic_library']) self.AssertExitCode(exit_code, 0, stderr) # TODO(pcloudy): change suffixes to .lib and .dll after making DLL # extensions correct on Windows. import_library = os.path.join(bazel_bin, 'A.if.lib') shared_library = os.path.join(bazel_bin, 'A.dll') empty_def_file = os.path.join(bazel_bin, 'A.gen.empty.def') self.assertTrue(os.path.exists(import_library)) self.assertTrue(os.path.exists(shared_library)) # An empty DEF file should be generated for //:A self.assertTrue(os.path.exists(empty_def_file)) def testBuildDynamicLibraryWithExportSymbolFeature(self): self.createProjectFiles() bazel_bin = self.getBazelInfo('bazel-bin') # //:B doesn't export symbols by itself, so it need Bazel to export symbols # using DEF file. exit_code, _, stderr = self.RunBazel( ['build', '//:B', '--output_groups=dynamic_library']) self.AssertExitCode(exit_code, 0, stderr) # TODO(pcloudy): change suffixes to .lib and .dll after making DLL # extensions correct on Windows. import_library = os.path.join(bazel_bin, 'B.if.lib') shared_library = os.path.join(bazel_bin, 'B.dll') def_file = os.path.join(bazel_bin, 'B.gen.def') self.assertTrue(os.path.exists(import_library)) self.assertTrue(os.path.exists(shared_library)) # DEF file should be generated for //:B self.assertTrue(os.path.exists(def_file)) # Test build //:B if windows_export_all_symbols feature is disabled by # no_windows_export_all_symbols. exit_code, _, stderr = self.RunBazel([ 'build', '//:B', '--output_groups=dynamic_library', '--features=no_windows_export_all_symbols' ]) self.AssertExitCode(exit_code, 0, stderr) import_library = os.path.join(bazel_bin, 'B.if.lib') shared_library = os.path.join(bazel_bin, 'B.dll') empty_def_file = os.path.join(bazel_bin, 'B.gen.empty.def') self.assertTrue(os.path.exists(import_library)) self.assertTrue(os.path.exists(shared_library)) # An empty DEF file should be generated for //:B self.assertTrue(os.path.exists(empty_def_file)) self.AssertFileContentNotContains(empty_def_file, 'hello_B') def testBuildCcBinaryWithDependenciesDynamicallyLinked(self): self.createProjectFiles() bazel_bin = self.getBazelInfo('bazel-bin') # Since linkstatic=0 is specified for //:C, it's dependencies should be # dynamically linked. exit_code, _, stderr = self.RunBazel(['build', '//:C']) self.AssertExitCode(exit_code, 0, stderr) # TODO(pcloudy): change suffixes to .lib and .dll after making DLL # extensions correct on # Windows. # a_import_library self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.if.lib'))) # a_shared_library self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.dll'))) # a_def_file self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.gen.empty.def'))) # b_import_library self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.if.lib'))) # b_shared_library self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.dll'))) # b_def_file self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.gen.def'))) # c_exe self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'C.exe'))) def testBuildCcBinaryFromDifferentPackage(self): self.createProjectFiles() self.ScratchFile('main/BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', ' linkstatic = 0,' ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//main:main']) self.AssertExitCode(exit_code, 0, stderr) # Test if A.dll and B.dll are copied to the directory of main.exe main_bin = os.path.join(bazel_bin, 'main/main.exe') self.assertTrue(os.path.exists(main_bin)) self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/A.dll'))) self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/B.dll'))) # Run the binary to see if it runs successfully exit_code, stdout, stderr = self.RunProgram([main_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout) def testBuildCcBinaryDependsOnConflictDLLs(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ' deps = ["//:B", "//lib:A"],', # Transitively depends on //:A ' linkstatic = 0,' ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') # //main:main depends on both //lib:A and //:A exit_code, _, stderr = self.RunBazel( ['build', '//main:main', '--incompatible_avoid_conflict_dlls']) self.AssertExitCode(exit_code, 0, stderr) # Run the binary to see if it runs successfully main_bin = os.path.join(bazel_bin, 'main/main.exe') exit_code, stdout, stderr = self.RunProgram([main_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout) # There are 2 A_{hash}.dll since //main:main depends on both //lib:A and # //:A self.assertEqual( len(glob.glob(os.path.join(bazel_bin, 'main', 'A_*.dll'))), 2) # There is only 1 B_{hash}.dll self.assertEqual( len(glob.glob(os.path.join(bazel_bin, 'main', 'B_*.dll'))), 1) def testBuildDifferentCcBinariesDependOnConflictDLLs(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', # Transitively depends on //:A ' linkstatic = 0,' ')', '', 'cc_binary(', ' name = "other_main",', ' srcs = ["other_main.cc"],', ' deps = ["//lib:A"],', ' linkstatic = 0,' ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') self.ScratchFile('main/other_main.cc', ['int main() {return 0;}']) # Building //main:main should succeed exit_code, _, stderr = self.RunBazel( ['build', '//main:main', '--incompatible_avoid_conflict_dlls']) self.AssertExitCode(exit_code, 0, stderr) main_bin = os.path.join(bazel_bin, 'main/main.exe') # Run the main_bin binary to see if it runs successfully exit_code, stdout, stderr = self.RunProgram([main_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout) # There is only 1 A_{hash}.dll since //main:main depends transitively on # //:A self.assertEqual( len(glob.glob(os.path.join(bazel_bin, 'main', 'A_*.dll'))), 1) # There is only 1 B_{hash}.dll self.assertEqual( len(glob.glob(os.path.join(bazel_bin, 'main', 'B_*.dll'))), 1) # Building //main:other_main should succeed exit_code, _, stderr = self.RunBazel([ 'build', '//main:main', '//main:other_main', '--incompatible_avoid_conflict_dlls' ]) self.AssertExitCode(exit_code, 0, stderr) other_main_bin = os.path.join(bazel_bin, 'main/other_main.exe') # Run the other_main_bin binary to see if it runs successfully exit_code, stdout, stderr = self.RunProgram([other_main_bin]) self.AssertExitCode(exit_code, 0, stderr) # There are 2 A_{hash}.dll since //main:main depends on //:A # and //main:other_main depends on //lib:A self.assertEqual( len(glob.glob(os.path.join(bazel_bin, 'main', 'A_*.dll'))), 2) def testDLLIsCopiedFromExternalRepo(self): self.ScratchFile('ext_repo/WORKSPACE') self.ScratchFile('ext_repo/BUILD', [ 'cc_library(', ' name = "A",', ' srcs = ["a.cc"],', ' features = ["windows_export_all_symbols"],', ' visibility = ["//visibility:public"],', ')', ]) self.ScratchFile('ext_repo/a.cc', [ '#include <stdio.h>', 'void hello_A() {', ' printf("Hello A\\n");', '}', ]) self.ScratchFile('WORKSPACE', [ 'local_repository(', ' name = "ext_repo",', ' path = "ext_repo",', ')', ]) self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ' deps = ["@ext_repo//:A"],', ' linkstatic = 0,', ')', ]) self.ScratchFile('main.cc', [ 'extern void hello_A();', '', 'int main() {', ' hello_A();', ' return 0;', '}', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//:main']) self.AssertExitCode(exit_code, 0, stderr) # Test if A.dll is copied to the directory of main.exe main_bin = os.path.join(bazel_bin, 'main.exe') self.assertTrue(os.path.exists(main_bin)) self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.dll'))) # Run the binary to see if it runs successfully exit_code, stdout, stderr = self.RunProgram([main_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(['Hello A'], stdout) def testDynamicLinkingMSVCRT(self): self.createProjectFiles() bazel_output = self.getBazelInfo('output_path') # By default, it should link to msvcrt dynamically. exit_code, _, stderr = self.RunBazel( ['build', '//:A', '--output_groups=dynamic_library', '-s']) paramfile = os.path.join( bazel_output, 'x64_windows-fastbuild/bin/A.dll-2.params') self.AssertExitCode(exit_code, 0, stderr) self.assertIn('/MD', ''.join(stderr)) self.AssertFileContentContains(paramfile, '/DEFAULTLIB:msvcrt.lib') self.assertNotIn('/MT', ''.join(stderr)) self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:libcmt.lib') # Test build in debug mode. exit_code, _, stderr = self.RunBazel( ['build', '-c', 'dbg', '//:A', '--output_groups=dynamic_library', '-s']) paramfile = os.path.join(bazel_output, 'x64_windows-dbg/bin/A.dll-2.params') self.AssertExitCode(exit_code, 0, stderr) self.assertIn('/MDd', ''.join(stderr)) self.AssertFileContentContains(paramfile, '/DEFAULTLIB:msvcrtd.lib') self.assertNotIn('/MTd', ''.join(stderr)) self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:libcmtd.lib') def testStaticLinkingMSVCRT(self): self.createProjectFiles() bazel_output = self.getBazelInfo('output_path') # With static_link_msvcrt feature, it should link to msvcrt statically. exit_code, _, stderr = self.RunBazel([ 'build', '//:A', '--output_groups=dynamic_library', '--features=static_link_msvcrt', '-s' ]) paramfile = os.path.join( bazel_output, 'x64_windows-fastbuild/bin/A.dll-2.params') self.AssertExitCode(exit_code, 0, stderr) self.assertNotIn('/MD', ''.join(stderr)) self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:msvcrt.lib') self.assertIn('/MT', ''.join(stderr)) self.AssertFileContentContains(paramfile, '/DEFAULTLIB:libcmt.lib') # Test build in debug mode. exit_code, _, stderr = self.RunBazel([ 'build', '-c', 'dbg', '//:A', '--output_groups=dynamic_library', '--features=static_link_msvcrt', '-s' ]) paramfile = os.path.join(bazel_output, 'x64_windows-dbg/bin/A.dll-2.params') self.AssertExitCode(exit_code, 0, stderr) self.assertNotIn('/MDd', ''.join(stderr)) self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:msvcrtd.lib') self.assertIn('/MTd', ''.join(stderr)) self.AssertFileContentContains(paramfile, '/DEFAULTLIB:libcmtd.lib') def testBuildSharedLibraryFromCcBinaryWithStaticLink(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main.dll",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', # Transitively depends on //:A ' linkstatic = 1,' ' linkshared = 1,' ' features=["windows_export_all_symbols"]', ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel([ 'build', '//main:main.dll', '--output_groups=default,runtime_dynamic_libraries,interface_library' ]) self.AssertExitCode(exit_code, 0, stderr) main_library = os.path.join(bazel_bin, 'main/main.dll') main_interface = os.path.join(bazel_bin, 'main/main.dll.if.lib') def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def') self.assertTrue(os.path.exists(main_library)) self.assertTrue(os.path.exists(main_interface)) self.assertTrue(os.path.exists(def_file)) # A.dll and B.dll should not be copied. self.assertFalse(os.path.exists(os.path.join(bazel_bin, 'main/A.dll'))) self.assertFalse(os.path.exists(os.path.join(bazel_bin, 'main/B.dll'))) self.AssertFileContentContains(def_file, 'hello_A') self.AssertFileContentContains(def_file, 'hello_B') self.AssertFileContentContains(def_file, 'hello_C') def testBuildSharedLibraryFromCcBinaryWithDynamicLink(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main.dll",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', # Transitively depends on //:A ' linkstatic = 0,' ' linkshared = 1,' ' features=["windows_export_all_symbols"]', ')', '', 'genrule(', ' name = "renamed_main",', ' srcs = [":main.dll"],', ' outs = ["main_renamed.dll"],', ' cmd = "cp $< $@",', ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel([ 'build', '//main:main.dll', '--output_groups=default,runtime_dynamic_libraries,interface_library' ]) self.AssertExitCode(exit_code, 0, stderr) main_library = os.path.join(bazel_bin, 'main/main.dll') main_interface = os.path.join(bazel_bin, 'main/main.dll.if.lib') def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def') self.assertTrue(os.path.exists(main_library)) self.assertTrue(os.path.exists(main_interface)) self.assertTrue(os.path.exists(def_file)) # A.dll and B.dll should be built and copied because they belong to # runtime_dynamic_libraries output group. self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/A.dll'))) self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/B.dll'))) # hello_A and hello_B should not be exported. self.AssertFileContentNotContains(def_file, 'hello_A') self.AssertFileContentNotContains(def_file, 'hello_B') self.AssertFileContentContains(def_file, 'hello_C') # The copy should succeed since //main:main.dll is only supposed to refer to # main.dll, A.dll and B.dll should be in a separate output group. exit_code, _, stderr = self.RunBazel(['build', '//main:renamed_main']) self.AssertExitCode(exit_code, 0, stderr) def testGetDefFileOfSharedLibraryFromCcBinary(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main.dll",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', # Transitively depends on //:A ' linkstatic = 1,' ' linkshared = 1,' ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel( ['build', '//main:main.dll', '--output_groups=def_file']) self.AssertExitCode(exit_code, 0, stderr) # Although windows_export_all_symbols is not specified for this target, # we should still be able to get the DEF file by def_file output group. def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def') self.assertTrue(os.path.exists(def_file)) self.AssertFileContentContains(def_file, 'hello_A') self.AssertFileContentContains(def_file, 'hello_B') self.AssertFileContentContains(def_file, 'hello_C') def testBuildSharedLibraryWithoutAnySymbolExported(self): self.createProjectFiles() self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "A.dll",', ' srcs = ["a.cc", "a.h"],', ' copts = ["/DNO_DLLEXPORT"],', ' linkshared = 1,' ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//:A.dll']) self.AssertExitCode(exit_code, 0, stderr) # Although windows_export_all_symbols is not specified for this target, # we should still be able to build a DLL without any symbol exported. empty_def_file = os.path.join(bazel_bin, 'A.dll.gen.empty.def') self.assertTrue(os.path.exists(empty_def_file)) self.AssertFileContentNotContains(empty_def_file, 'hello_A') def testUsingDefFileGeneratedFromCcLibrary(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('lib_A.cc', ['void hello_A() {}']) self.ScratchFile('lib_B.cc', ['void hello_B() {}']) self.ScratchFile('BUILD', [ 'cc_library(', ' name = "lib_A",', ' srcs = ["lib_A.cc"],', ')', '', 'cc_library(', ' name = "lib_B",', ' srcs = ["lib_B.cc"],', ' deps = [":lib_A"]', ')', '', 'filegroup(', ' name = "lib_B_symbols",', ' srcs = [":lib_B"],', ' output_group = "def_file",', ')', '', 'cc_binary(', ' name = "lib.dll",', ' deps = [":lib_B"],', ' win_def_file = ":lib_B_symbols",', ' linkshared = 1,', ')', ]) # Test specifying DEF file in cc_binary bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//:lib.dll', '-s']) self.AssertExitCode(exit_code, 0, stderr) def_file = bazel_bin + '/lib_B.gen.def' self.assertTrue(os.path.exists(def_file)) # hello_A should not be exported self.AssertFileContentNotContains(def_file, 'hello_A') # hello_B should be exported self.AssertFileContentContains(def_file, 'hello_B') def testWinDefFileAttribute(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('lib.cc', ['void hello() {}']) self.ScratchFile('my_lib.def', [ 'EXPORTS', ' ?hello@@YAXXZ', ]) self.ScratchFile('BUILD', [ 'cc_library(', ' name = "lib",', ' srcs = ["lib.cc"],', ' win_def_file = "my_lib.def",', ')', '', 'cc_binary(', ' name = "lib_dy.dll",', ' srcs = ["lib.cc"],', ' win_def_file = "my_lib.def",', ' linkshared = 1,', ')', ]) # Test exporting symbols using custom DEF file in cc_library. # Auto-generating DEF file should be disabled when custom DEF file specified exit_code, _, stderr = self.RunBazel([ 'build', '//:lib', '-s', '--output_groups=dynamic_library', '--features=windows_export_all_symbols' ]) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = self.getBazelInfo('bazel-bin') lib_if = os.path.join(bazel_bin, 'lib.if.lib') lib_def = os.path.join(bazel_bin, 'lib.gen.def') self.assertTrue(os.path.exists(lib_if)) self.assertFalse(os.path.exists(lib_def)) # Test specifying DEF file in cc_binary exit_code, _, stderr = self.RunBazel(['build', '//:lib_dy.dll', '-s']) self.AssertExitCode(exit_code, 0, stderr) filepath = bazel_bin + '/lib_dy.dll-2.params' with open(filepath, 'r', encoding='latin-1') as param_file: self.assertIn('/DEF:my_lib.def', param_file.read()) def testCcImportRule(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_import(', ' name = "a_import",', ' static_library = "A.lib",', ' shared_library = "A.dll",', ' interface_library = "A.if.lib",', ' hdrs = ["a.h"],', ' alwayslink = 1,', ')', ]) exit_code, _, stderr = self.RunBazel([ 'build', '//:a_import', ]) self.AssertExitCode(exit_code, 0, stderr) def testCopyDLLAsSource(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_import(', ' name = "a_import",', ' shared_library = "A.dll",', ')', '', 'cc_binary(', ' name = "bin",', ' srcs = ["bin.cc"],', ' deps = ["//:a_import"],', ')', ]) self.ScratchFile('A.dll') self.ScratchFile('bin.cc', [ 'int main() {', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel([ 'build', '//:bin', ]) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = self.getBazelInfo('bazel-bin') a_dll = os.path.join(bazel_bin, 'A.dll') # Even though A.dll is in the same package as bin.exe, but it still should # be copied to the output directory of bin.exe. self.assertTrue(os.path.exists(a_dll)) def testCppErrorShouldBeVisible(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "bad",', ' srcs = ["bad.cc"],', ')', ]) self.ScratchFile('bad.cc', [ 'int main(int argc, char** argv) {', ' this_is_an_error();', '}', ]) exit_code, stdout, stderr = self.RunBazel(['build', '//:bad']) self.AssertExitCode(exit_code, 1, stderr) self.assertIn('this_is_an_error', ''.join(stdout)) def testBuildWithClangClByCompilerFlag(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ')', ]) self.ScratchFile('main.cc', [ 'int main() {', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel([ 'build', '-s', '--compiler=clang-cl', '--incompatible_enable_cc_toolchain_resolution=false', '//:main' ]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('clang-cl.exe', ''.join(stderr)) def testBuildWithClangClByToolchainResolution(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE', [ 'register_execution_platforms(', ' ":windows_clang"', ')', '', 'register_toolchains(', ' "@local_config_cc//:cc-toolchain-x64_windows-clang-cl",', ')', ]) self.ScratchFile('BUILD', [ 'platform(', ' name = "windows_clang",', ' constraint_values = [', ' "@platforms//cpu:x86_64",', ' "@platforms//os:windows",', ' "@bazel_tools//tools/cpp:clang-cl",', ' ]', ')', '', 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ')', ]) self.ScratchFile('main.cc', [ 'int main() {', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel([ 'build', '-s', '--incompatible_enable_cc_toolchain_resolution=true', '//:main' ]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('clang-cl.exe', ''.join(stderr)) def createSimpleCppWorkspace(self, name): work_dir = self.ScratchDir(name) self.ScratchFile(name + '/WORKSPACE', ['workspace(name = "%s")' % name]) self.ScratchFile( name + '/BUILD', ['cc_library(name = "lib", srcs = ["lib.cc"], hdrs = ["lib.h"])']) self.ScratchFile(name + '/lib.h', ['void hello();']) self.ScratchFile(name + '/lib.cc', ['#include "lib.h"', 'void hello() {}']) return work_dir # Regression test for https://github.com/bazelbuild/bazel/issues/9172 def testCacheBetweenWorkspaceWithDifferentNames(self): cache_dir = self.ScratchDir('cache') dir_a = self.createSimpleCppWorkspace('A') dir_b = self.createSimpleCppWorkspace('B') exit_code, _, stderr = self.RunBazel( ['build', '--disk_cache=' + cache_dir, ':lib'], cwd=dir_a) self.AssertExitCode(exit_code, 0, stderr) exit_code, _, stderr = self.RunBazel( ['build', '--disk_cache=' + cache_dir, ':lib'], cwd=dir_b) self.AssertExitCode(exit_code, 0, stderr) # Regression test for https://github.com/bazelbuild/bazel/issues/9321 def testCcCompileWithTreeArtifactAsSource(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'load(":genccs.bzl", "genccs")', '', 'genccs(', ' name = "gen_tree",', ')', '', 'cc_library(', ' name = "main",', ' srcs = [ "gen_tree" ]', ')', '', 'cc_binary(', ' name = "genccs",', ' srcs = [ "genccs.cpp" ],', ')', ]) self.ScratchFile('genccs.bzl', [ 'def _impl(ctx):', ' tree = ctx.actions.declare_directory(ctx.attr.name + ".cc")', ' ctx.actions.run(', ' inputs = [],', ' outputs = [ tree ],', ' arguments = [ tree.path ],', ' progress_message = "Generating cc files into \'%s\'" % tree.path,', ' executable = ctx.executable._tool,', ' )', '', ' return [ DefaultInfo(files = depset([ tree ])) ]', '', 'genccs = rule(', ' implementation = _impl,', ' attrs = {', ' "_tool": attr.label(', ' executable = True,', ' cfg = "host",', ' allow_files = True,', ' default = Label("//:genccs"),', ' )', ' }', ')', ]) self.ScratchFile('genccs.cpp', [ '#include <fstream>', '#include <Windows.h>', 'using namespace std;', '', 'int main (int argc, char *argv[]) {', ' CreateDirectory(argv[1], NULL);', ' ofstream myfile;', ' myfile.open(string(argv[1]) + string("/foo.cpp"));', ' myfile << "int main() { return 42; }";', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel(['build', '//:main']) self.AssertExitCode(exit_code, 0, stderr) def testBuild32BitCppBinaryWithMsvcCL(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ')', ]) self.ScratchFile('main.cc', [ 'int main() {', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel( ['build', '-s', '--cpu=x64_x86_windows', '//:main']) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('x86/cl.exe', ''.join(stderr)) def testBuildArmCppBinaryWithMsvcCL(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ')', ]) self.ScratchFile('main.cc', [ 'int main() {', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel( ['build', '-s', '--cpu=x64_arm_windows', '//:main']) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('arm/cl.exe', ''.join(stderr)) def testBuildArm64CppBinaryWithMsvcCL(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ')', ]) self.ScratchFile('main.cc', [ 'int main() {', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel( ['build', '-s', '--cpu=x64_arm64_windows', '//:main']) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('arm64/cl.exe', ''.join(stderr)) if __name__ == '__main__': unittest.main()
werkt/bazel
src/test/py/bazel/bazel_windows_cpp_test.py
Python
apache-2.0
32,128
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility function for voxels.""" import gin import gin.tf import tensorflow as tf from tf3d.layers import sparse_voxel_net_utils from tf3d.utils import shape_utils compute_pooled_voxel_indices = sparse_voxel_net_utils.compute_pooled_voxel_indices pool_features_given_indices = sparse_voxel_net_utils.pool_features_given_indices def crop_and_pad_voxels(voxels, start_coordinates, end_coordinates): """Crops a voxel region and pads past the boundaries with zeros. This accepts start and end coordinates past the limits of the voxel grid, and uses it to calculate how much top/left/right/bottom padding to add. Args: voxels: A tf.float32 tensor of shape [x, y, z, f] to crop start_coordinates: A list of len 4 with the [x, y, z, f] starting location of our crop. This can be negative, which indicates left/top padding. end_coordinates: A list of len 4 with the [x, y, z, f] ending location of our crop. This can be beyond the size of the voxel tensor, which indicates padding. Returns: cropped_and_padded_voxels: A voxel grid with shape [end_coordinates[0] - start_coordinates[0], end_coordinates[1] - start_coordinates[1], end_coordinates[2] - start_coordinates[2], end_coordinates[3] - start_coordinates[3]] Raises: ValueError: If requested crop and pad is outside the bounds of what the function supports. """ if len(start_coordinates) != 4: raise ValueError('start_coordinates should be of length 4') if len(end_coordinates) != 4: raise ValueError('end_coordinates should be of length 4') if any([coord <= 0 for coord in end_coordinates]): raise ValueError('Requested end coordinates should be > 0') start_coordinates = tf.convert_to_tensor(start_coordinates, tf.int32) end_coordinates = tf.convert_to_tensor(end_coordinates, tf.int32) # Clip the coordinates to within the voxel grid clipped_start_coordinates = tf.maximum(0, start_coordinates) clipped_end_coordinates = tf.minimum(voxels.shape, end_coordinates) cropped_voxels = tf.slice(voxels, begin=clipped_start_coordinates, size=(clipped_end_coordinates - clipped_start_coordinates)) top_and_left_padding = tf.maximum(0, -start_coordinates) bottom_and_right_padding = tf.maximum(0, end_coordinates - voxels.shape) padding = tf.stack([top_and_left_padding, bottom_and_right_padding], axis=1) return tf.pad(cropped_voxels, padding) def pointcloud_to_voxel_grid(points, features, grid_cell_size, start_location, end_location, segment_func=tf.math.unsorted_segment_mean): """Converts a pointcloud into a voxel grid. Args: points: A tf.float32 tensor of size [N, 3]. features: A tf.float32 tensor of size [N, F]. grid_cell_size: A tf.float32 tensor of size [3]. start_location: A tf.float32 tensor of size [3]. end_location: A tf.float32 tensor of size [3]. segment_func: A tensorflow function that operates on segments. Expect one of tf.math.unsorted_segment_{min/max/mean/prod/sum}. Defaults to tf.math.unsorted_segment_mean Returns: voxel_features: A tf.float32 tensor of size [grid_x_len, grid_y_len, grid_z_len, F]. segment_ids: A tf.int32 tensor of IDs for each point indicating which (flattened) voxel cell its data was mapped to. point_indices: A tf.int32 tensor of size [num_points, 3] containing the location of each point in the 3d voxel grid. """ grid_cell_size = tf.convert_to_tensor(grid_cell_size, dtype=tf.float32) start_location = tf.convert_to_tensor(start_location, dtype=tf.float32) end_location = tf.convert_to_tensor(end_location, dtype=tf.float32) point_indices = tf.cast( (points - tf.expand_dims(start_location, axis=0)) / tf.expand_dims(grid_cell_size, axis=0), dtype=tf.int32) grid_size = tf.cast( tf.math.ceil((end_location - start_location) / grid_cell_size), dtype=tf.int32) # Note: all points outside the grid are added to the edges # Cap index at grid_size - 1 (so a 10x10x10 grid's max cell is (9,9,9)) point_indices = tf.minimum(point_indices, tf.expand_dims(grid_size - 1, axis=0)) # Don't allow any points below index (0, 0, 0) point_indices = tf.maximum(point_indices, 0) segment_ids = tf.reduce_sum( point_indices * tf.stack( [grid_size[1] * grid_size[2], grid_size[2], 1], axis=0), axis=1) voxel_features = segment_func( data=features, segment_ids=segment_ids, num_segments=(grid_size[0] * grid_size[1] * grid_size[2])) return (tf.reshape(voxel_features, [grid_size[0], grid_size[1], grid_size[2], features.get_shape().as_list()[1]]), segment_ids, point_indices) def voxels_to_points(voxels, segment_ids): """Convert voxels back to points given their segment id. Args: voxels: A tf.float32 tensor representing a voxel grid. Expect shape [x, y, z, f]. segment_ids: A tf.int32 tensor representing the segment id of each point in the original pointcloud we want to project voxel features back to. Returns: point_features: A tf.float32 tensor of shape [N, f] where each point now has the features in the associated voxel cell. """ flattened_voxels = tf.reshape(voxels, shape=(-1, voxels.shape[-1])) return tf.gather(flattened_voxels, segment_ids) def _points_offset_in_voxels_unbatched(points, grid_cell_size): """Converts points into offsets in voxel grid for a single batch. The values range from -0.5 to 0.5 Args: points: A tf.float32 tensor of size [N, 3]. grid_cell_size: The size of the grid cells in x, y, z dimensions in the voxel grid. It should be either a tf.float32 tensor, a numpy array or a list of size [3]. Returns: voxel_xyz_offsets: A tf.float32 tensor of size [N, 3]. """ min_points = tf.reduce_min(points, axis=0) points_index = tf.math.floordiv(points - min_points, grid_cell_size) points_offset = points - min_points - (points_index * grid_cell_size) return (points_offset / grid_cell_size) - 0.5 def points_offset_in_voxels(points, grid_cell_size): """Converts points into offsets in voxel grid. Args: points: A tf.float32 tensor of size [batch_size, N, 3]. grid_cell_size: The size of the grid cells in x, y, z dimensions in the voxel grid. It should be either a tf.float32 tensor, a numpy array or a list of size [3]. Returns: voxel_xyz_offsets: A tf.float32 tensor of size [batch_size, N, 3]. """ batch_size = points.get_shape().as_list()[0] def fn(i): return _points_offset_in_voxels_unbatched( points=points[i, :, :], grid_cell_size=grid_cell_size) return tf.map_fn(fn=fn, elems=tf.range(batch_size), dtype=tf.float32) def _points_to_voxel_indices(points, grid_cell_size): """Converts points into corresponding voxel indices. Maps each point into a voxel grid with cell size given by grid_cell_size. For each voxel, it computes a x, y, z index. Also converts the x, y, z index to a single number index where there is a one-on-one mapping between each x, y, z index value and its corresponding single number index value. Args: points: A tf.float32 tensor of size [N, 3]. grid_cell_size: The size of the grid cells in x, y, z dimensions in the voxel grid. It should be either a tf.float32 tensor, a numpy array or a list of size [3]. Returns: voxel_xyz_indices: A tf.int32 tensor of size [N, 3] containing the x, y, z index of the voxel corresponding to each given point. voxel_single_number_indices: A tf.int32 tensor of size [N] containing the single number index of the voxel corresponding to each given point. voxel_start_location: A tf.float32 tensor of size [3] containing the start location of the voxels. """ voxel_start_location = tf.reduce_min(points, axis=0) voxel_xyz_indices = tf.cast( tf.math.floordiv(points - voxel_start_location, grid_cell_size), dtype=tf.int32) voxel_xyz_indices, voxel_single_number_indices = compute_pooled_voxel_indices( voxel_xyz_indices=voxel_xyz_indices, pooling_size=(1, 1, 1)) return voxel_xyz_indices, voxel_single_number_indices, voxel_start_location def pointcloud_to_sparse_voxel_grid_unbatched(points, features, grid_cell_size, segment_func): """Converts a pointcloud into a voxel grid. This function does not handle batch size and only works for a single batch of points. The function `pointcloud_to_sparse_voxel_grid` below calls this function in a while loop to map a batch of points to a batch of voxels. A sparse voxel grid is represented by only keeping the voxels that have points in them in memory. Assuming that N' voxels have points in them, we represent a sparse voxel grid by (a) voxel_features, a [N', F] or [N', G, F] tensor containing the feature vector for each voxel. (b) voxel_indices, a [N', 3] tensor containing the x, y, z index of each voxel. Args: points: A tf.float32 tensor of size [N, 3]. features: A tf.float32 tensor of size [N, F]. grid_cell_size: The size of the grid cells in x, y, z dimensions in the voxel grid. It should be either a tf.float32 tensor, a numpy array or a list of size [3]. segment_func: A tensorflow function that operates on segments. Examples are one of tf.math.unsorted_segment_{min/max/mean/prod/sum}. Returns: voxel_features: A tf.float32 tensor of size [N', F] or [N', G, F] where G is the number of points sampled per voxel. voxel_indices: A tf.int32 tensor of size [N', 3]. segment_ids: A size [N] tf.int32 tensor of IDs for each point indicating which (flattened) voxel cell its data was mapped to. voxel_start_location: A tf.float32 tensor of size [3] containing the start location of the voxels. Raises: ValueError: If pooling method is unknown. """ grid_cell_size = tf.convert_to_tensor(grid_cell_size, dtype=tf.float32) voxel_xyz_indices, voxel_single_number_indices, voxel_start_location = ( _points_to_voxel_indices(points=points, grid_cell_size=grid_cell_size)) voxel_features, segment_ids, num_segments = pool_features_given_indices( features=features, indices=voxel_single_number_indices, segment_func=segment_func) voxel_xyz_indices = tf.math.unsorted_segment_max( data=voxel_xyz_indices, segment_ids=segment_ids, num_segments=num_segments) return voxel_features, voxel_xyz_indices, segment_ids, voxel_start_location def _pad_or_clip_voxels(voxel_features, voxel_indices, num_valid_voxels, segment_ids, voxels_pad_or_clip_size): """Pads or clips voxels.""" if voxels_pad_or_clip_size: num_valid_voxels = tf.minimum(num_valid_voxels, voxels_pad_or_clip_size) num_channels = voxel_features.get_shape().as_list()[-1] if len(voxel_features.shape.as_list()) == 2: output_shape = [voxels_pad_or_clip_size, num_channels] elif len(voxel_features.shape.as_list()) == 3: num_samples_per_voxel = voxel_features.get_shape().as_list()[1] if num_samples_per_voxel is None: num_samples_per_voxel = tf.shape(voxel_features)[1] output_shape = [ voxels_pad_or_clip_size, num_samples_per_voxel, num_channels ] else: raise ValueError('voxel_features should be either rank 2 or 3.') voxel_features = shape_utils.pad_or_clip_nd( tensor=voxel_features, output_shape=output_shape) voxel_indices = shape_utils.pad_or_clip_nd( tensor=voxel_indices, output_shape=[voxels_pad_or_clip_size, 3]) valid_segment_ids_mask = tf.cast( tf.less(segment_ids, num_valid_voxels), dtype=tf.int32) segment_ids *= valid_segment_ids_mask return voxel_features, voxel_indices, num_valid_voxels, segment_ids def pointcloud_to_sparse_voxel_grid(points, features, num_valid_points, grid_cell_size, voxels_pad_or_clip_size, segment_func): """Converts a pointcloud into a voxel grid. This function calls the `pointcloud_to_sparse_voxel_grid_unbatched` function above in a while loop to map a batch of points to a batch of voxels. Args: points: A tf.float32 tensor of size [batch_size, N, 3]. features: A tf.float32 tensor of size [batch_size, N, F]. num_valid_points: A tf.int32 tensor of size [num_batches] containing the number of valid points in each batch example. grid_cell_size: A tf.float32 tensor of size [3]. voxels_pad_or_clip_size: Number of target voxels to pad or clip to. If None, it will not perform the padding. segment_func: A tensorflow function that operates on segments. Examples are one of tf.math.unsorted_segment_{min/max/mean/prod/sum}. Returns: voxel_features: A tf.float32 tensor of size [batch_size, N', F] or [batch_size, N', G, F] where G is the number of points sampled per voxel. voxel_indices: A tf.int32 tensor of size [batch_size, N', 3]. num_valid_voxels: A tf.int32 tensor of size [batch_size]. segment_ids: A size [batch_size, N] tf.int32 tensor of IDs for each point indicating which (flattened) voxel cell its data was mapped to. voxel_start_location: A size [batch_size, 3] tf.float32 tensor of voxel start locations. Raises: ValueError: If pooling method is unknown. """ batch_size = points.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(points)[0] num_points = tf.shape(points)[1] def fn(i): """Map function.""" num_valid_points_i = num_valid_points[i] points_i = points[i, :num_valid_points_i, :] features_i = features[i, :num_valid_points_i, :] voxel_features_i, voxel_indices_i, segment_ids_i, voxel_start_location_i = ( pointcloud_to_sparse_voxel_grid_unbatched( points=points_i, features=features_i, grid_cell_size=grid_cell_size, segment_func=segment_func)) num_valid_voxels_i = tf.shape(voxel_features_i)[0] (voxel_features_i, voxel_indices_i, num_valid_voxels_i, segment_ids_i) = _pad_or_clip_voxels( voxel_features=voxel_features_i, voxel_indices=voxel_indices_i, num_valid_voxels=num_valid_voxels_i, segment_ids=segment_ids_i, voxels_pad_or_clip_size=voxels_pad_or_clip_size) segment_ids_i = tf.pad( segment_ids_i, paddings=[[0, num_points - num_valid_points_i]]) return (voxel_features_i, voxel_indices_i, num_valid_voxels_i, segment_ids_i, voxel_start_location_i) return tf.map_fn( fn=fn, elems=tf.range(batch_size), dtype=(tf.float32, tf.int32, tf.int32, tf.int32, tf.float32)) def sparse_voxel_grid_to_pointcloud(voxel_features, segment_ids, num_valid_voxels, num_valid_points): """Convert voxel features back to points given their segment ids. Args: voxel_features: A tf.float32 tensor of size [batch_size, N', F]. segment_ids: A size [batch_size, N] tf.int32 tensor of IDs for each point indicating which (flattened) voxel cell its data was mapped to. num_valid_voxels: A tf.int32 tensor of size [batch_size] containing the number of valid voxels in each batch example. num_valid_points: A tf.int32 tensor of size [batch_size] containing the number of valid points in each batch example. Returns: point_features: A tf.float32 tensor of size [batch_size, N, F]. Raises: ValueError: If batch_size is unknown at graph construction time. """ batch_size = voxel_features.shape[0] if batch_size is None: raise ValueError('batch_size is unknown at graph construction time.') num_points = tf.shape(segment_ids)[1] def fn(i): num_valid_voxels_i = num_valid_voxels[i] num_valid_points_i = num_valid_points[i] voxel_features_i = voxel_features[i, :num_valid_voxels_i, :] segment_ids_i = segment_ids[i, :num_valid_points_i] point_features = tf.gather(voxel_features_i, segment_ids_i) point_features_rank = len(point_features.get_shape().as_list()) point_features_paddings = [[0, num_points - num_valid_points_i]] for _ in range(point_features_rank - 1): point_features_paddings.append([0, 0]) point_features = tf.pad(point_features, paddings=point_features_paddings) return point_features return tf.map_fn(fn=fn, elems=tf.range(batch_size), dtype=tf.float32) @gin.configurable def per_voxel_point_sample_segment_func(data, segment_ids, num_segments, num_samples_per_voxel): """Samples features from the points within each voxel. Args: data: A tf.float32 tensor of size [N, F]. segment_ids: A tf.int32 tensor of size [N]. num_segments: Number of segments. num_samples_per_voxel: Number of features to sample per voxel. If the voxel has less number of points in it, the point features will be padded by 0. Returns: A tf.float32 tensor of size [num_segments, num_samples_per_voxel, F]. A tf.int32 indices of size [N, num_samples_per_voxel]. """ num_channels = data.get_shape().as_list()[1] if num_channels is None: raise ValueError('num_channels is None.') n = tf.shape(segment_ids)[0] def _body_fn(i, indices_range, indices): """Computes the indices of the i-th point feature in each segment.""" indices_i = tf.math.unsorted_segment_max( data=indices_range, segment_ids=segment_ids, num_segments=num_segments) indices_i_positive_mask = tf.greater(indices_i, 0) indices_i_positive = tf.boolean_mask(indices_i, indices_i_positive_mask) boolean_mask = tf.scatter_nd( indices=tf.cast( tf.expand_dims(indices_i_positive - 1, axis=1), dtype=tf.int64), updates=tf.ones_like(indices_i_positive, dtype=tf.int32), shape=(n,)) indices_range *= (1 - boolean_mask) indices_i *= tf.cast(indices_i_positive_mask, dtype=tf.int32) indices_i = tf.pad( tf.expand_dims(indices_i, axis=1), paddings=[[0, 0], [i, num_samples_per_voxel - i - 1]]) indices += indices_i i = i + 1 return i, indices_range, indices cond = lambda i, indices_range, indices: i < num_samples_per_voxel (_, _, indices) = tf.while_loop( cond=cond, body=_body_fn, loop_vars=(tf.constant(0, dtype=tf.int32), tf.range(n) + 1, tf.zeros([num_segments, num_samples_per_voxel], dtype=tf.int32))) data = tf.pad(data, paddings=[[1, 0], [0, 0]]) voxel_features = tf.gather(data, tf.reshape(indices, [-1])) return tf.reshape(voxel_features, [num_segments, num_samples_per_voxel, num_channels]) def compute_pointcloud_weights_based_on_voxel_density(points, grid_cell_size): """Computes pointcloud weights based on voxel density. Args: points: A tf.float32 tensor of size [num_points, 3]. grid_cell_size: The size of the grid cells in x, y, z dimensions in the voxel grid. It should be either a tf.float32 tensor, a numpy array or a list of size [3]. Returns: A tf.float32 tensor of size [num_points, 1] containing weights that are inverse proportional to the denisty of the points in voxels. """ num_points = tf.shape(points)[0] features = tf.ones([num_points, 1], dtype=tf.float32) voxel_features, _, segment_ids, _ = ( pointcloud_to_sparse_voxel_grid_unbatched( points=points, features=features, grid_cell_size=grid_cell_size, segment_func=tf.math.unsorted_segment_sum)) num_voxels = tf.shape(voxel_features)[0] point_features = sparse_voxel_grid_to_pointcloud( voxel_features=tf.expand_dims(voxel_features, axis=0), segment_ids=tf.expand_dims(segment_ids, axis=0), num_valid_voxels=tf.expand_dims(num_voxels, axis=0), num_valid_points=tf.expand_dims(num_points, axis=0)) inverse_point_densities = 1.0 / tf.squeeze(point_features, axis=0) total_inverse_density = tf.reduce_sum(inverse_point_densities) return (inverse_point_densities * tf.cast(num_points, dtype=tf.float32) / total_inverse_density)
google-research/google-research
tf3d/utils/voxel_utils.py
Python
apache-2.0
21,191
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = "Ponzoni, Nelson" __copyright__ = "Copyright 2015" __credits__ = ["Ponzoni Nelson"] __maintainer__ = "Ponzoni Nelson" __contact__ = "npcuadra@gmail.com" __email__ = "npcuadra@gmail.com" __license__ = "GPL" __version__ = "1.0.0" __status__ = "Production" """ GRID search """ from collections import Mapping from functools import partial, reduce import operator from itertools import product import numpy as np class ParameterGrid(object): """Grid of parameters with a discrete number of values for each. Can be used to iterate over parameter value combinations with the Python built-in function iter. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_grid : dict of string to sequence, or sequence of such The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values. An empty dict signifies default parameters. A sequence of dicts signifies a sequence of grids to search, and is useful to avoid exploring parameter combinations that make no sense or have no effect. See the examples below. Examples -------- >>> from sklearn.grid_search import ParameterGrid >>> param_grid = {'a': [1, 2], 'b': [True, False]} >>> list(ParameterGrid(param_grid)) == ( ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) True >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, ... {'kernel': 'rbf', 'gamma': 1}, ... {'kernel': 'rbf', 'gamma': 10}] True >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} True See also -------- :class:`GridSearchCV`: uses ``ParameterGrid`` to perform a full parallelized parameter search. """ def __init__(self, param_grid): if isinstance(param_grid, Mapping): # wrap dictionary in a singleton list to support either dict # or list of dicts param_grid = [param_grid] self.param_grid = param_grid def __iter__(self): """Iterate over the points in the grid. Returns ------- params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values. """ for p in self.param_grid: # Always sort the keys of a dictionary, for reproducibility items = sorted(p.items()) if not items: yield {} else: keys, values = zip(*items) for v in product(*values): params = dict(zip(keys, v)) yield params def __len__(self): """Number of points on the grid.""" # Product function that can handle iterables (np.product can't). product = partial(reduce, operator.mul) return sum(product(len(v) for v in p.values()) if p else 1 for p in self.param_grid) def __getitem__(self, ind): """Get the parameters that would be ``ind``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of string to any Equal to list(self)[ind] """ # This is used to make discrete sampling without replacement memory # efficient. for sub_grid in self.param_grid: # XXX: could memoize information used here if not sub_grid: if ind == 0: return {} else: ind -= 1 continue # Reverse so most frequent cycling parameter comes first keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) sizes = [len(v_list) for v_list in values_lists] total = np.product(sizes) if ind >= total: # Try the next grid ind -= total else: out = {} for key, v_list, n in zip(keys, values_lists, sizes): ind, offset = divmod(ind, n) out[key] = v_list[offset] return out raise IndexError('ParameterGrid index out of range') if __name__ == '__main__': param_grid = {'a': [1, 2], 'b': [True, False]} a = ParameterGrid(param_grid) print(list(a)) print(len(a)) print(a[1]) print(a)
lerker/cupydle
cupydle/dnn/gridSearch.py
Python
apache-2.0
4,741
from .responses import InstanceMetadataResponse url_bases = ["http://169.254.169.254"] instance_metadata = InstanceMetadataResponse() url_paths = {"{0}/(?P<path>.+)": instance_metadata.metadata_response}
spulec/moto
moto/instance_metadata/urls.py
Python
apache-2.0
207
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and# # limitations under the License. import copy import pprint import ipaddr import netaddr from neutronclient.common import exceptions as neutron_exc from neutronclient.v2_0 import client as neutron_client from cloudferrylib.base import exception from cloudferrylib.base import network from cloudferrylib.os.identity import keystone as ksresource from cloudferrylib.utils import cache from cloudferrylib.utils import log from cloudferrylib.utils import utils as utl LOG = log.getLogger(__name__) DEFAULT_SECGR = 'default' @cache.Cached(getter='get_subnets_list', modifier='create_network') @cache.Cached(getter='get_networks_list', modifier='create_network') @cache.Cached(getter='get_ports_list', modifier='create_port') class NeutronNetwork(network.Network): """ The main class for working with OpenStack Neutron client """ def __init__(self, config, cloud): super(NeutronNetwork, self).__init__(config) self.cloud = cloud self.identity_client = cloud.resources[utl.IDENTITY_RESOURCE] self.filter_tenant_id = None self.ext_net_map = \ utl.read_yaml_file(self.config.migrate.ext_net_map) or {} self.mysql_connector = cloud.mysql_connector('neutron') @property def neutron_client(self): return self.proxy(self.get_client(), self.config) def get_client(self): return neutron_client.Client( username=self.config.cloud.user, password=self.config.cloud.password, tenant_name=self.config.cloud.tenant, auth_url=self.config.cloud.auth_url, cacert=self.config.cloud.cacert, insecure=self.config.cloud.insecure, region_name=self.config.cloud.region ) def read_info(self, **kwargs): """Get info about neutron resources: :rtype: Dictionary with all necessary neutron info """ if kwargs.get('tenant_id'): tenant_id = self.filter_tenant_id = kwargs['tenant_id'][0] else: tenant_id = '' nets = self.get_networks(tenant_id) subnets = self.get_subnets(tenant_id) detached_ports = self.get_detached_ports(tenant_id) LOG.debug('List of detached ports: %s', repr([p['id'] for p in detached_ports])) if self.filter_tenant_id is not None: shared_nets = self.get_shared_networks_raw() for net in shared_nets: # do not include the same network twice if net['id'] in [n['id'] for n in nets]: continue nets.append(self.convert_networks(net, self.cloud)) LOG.debug("Got shared network ID %s", net['id']) # Append subnets from the shared networks for subnet in net['subnets']: # do not include the same subnets twice if subnet['id'] in [sn['id'] for sn in subnets]: continue subnets.append(self.convert_subnets(subnet, self.cloud)) LOG.debug("Got shared subnet ID %s", subnet['id']) full_nets_list = self.get_networks() else: full_nets_list = nets # Get full list off busy segmentation IDs used_seg_ids = get_segmentation_ids_from_net_list(full_nets_list) routers = self.get_routers() subnet_ids = {sn['id'] for sn in subnets} for router in routers: router['subnet_ids'] = [sn_id for sn_id in router['subnet_ids'] if sn_id in subnet_ids] info = {'networks': nets, 'subnets': subnets, 'routers': routers, 'detached_ports': detached_ports, 'floating_ips': self.get_floatingips(tenant_id), 'security_groups': self.get_sec_gr_and_rules(tenant_id), 'quota': self.get_quota(tenant_id), 'meta': { 'segmentation_ids': used_seg_ids }} if self.config.migrate.keep_lbaas: info['lbaas'] = dict() info['lb_pools'] = self.get_lb_pools(tenant_id) info['lb_monitors'] = self.get_lb_monitors(tenant_id) info['lb_members'] = self.get_lb_members(tenant_id) info['lb_vips'] = self.get_lb_vips(tenant_id) return info def show_quota(self, tenant_id=''): return self.neutron_client.show_quota(tenant_id) def list_quotas(self): return self.neutron_client.list_quotas()['quotas'] def get_quota(self, tenant_id): # return structure {'name_tenant': {'subnet': 10, ...}, ...} tenants = {} if not tenant_id: tenants_obj = self.identity_client.get_tenants_list() tenants = {t.id: t.name for t in tenants_obj} else: tenants[tenant_id] = self.identity_client.\ try_get_tenant_name_by_id(tenant_id) data = { } if self.config.network.get_all_quota: for t_id, t_val in tenants.iteritems(): data[t_val] = self.neutron_client.show_quota(t_id) else: for t in self.neutron_client.list_quotas()['quotas']: if (not tenant_id) or (tenant_id == t['tenant_id']): tenant_name = self.identity_client.\ try_get_tenant_name_by_id(t['tenant_id']) data[tenant_name] = {k: v for k, v in t.iteritems() if k != 'tenant_id'} return data def upload_quota(self, quota): identity = self.identity_client for q_name, q_val in quota.iteritems(): tenant_id = identity.get_tenant_id_by_name(q_name) self.neutron_client.update_quota(tenant_id, q_val) def create_quota(self, tenant_id, quota): return self.neutron_client.update_quota(tenant_id, quota) def required_tenants(self, filter_tenant_id=None): old_filter_tanant_id = self.filter_tenant_id self.filter_tenant_id = filter_tenant_id tenant_ids = set() for shared_net in self.get_shared_networks_raw(): tenant_ids.add(shared_net['tenant_id']) for router in self.get_routers_raw(): tenant_ids.add(router['tenant_id']) self.filter_tenant_id = old_filter_tanant_id return list(tenant_ids) def deploy(self, info): """ Deploy network resources to DST Have non trivial behavior when enabled keep_floatingip and change_router_ips. Example: Initial state: src cloud with router external ip 123.0.0.5 and FloatingIP 123.0.0.4 Migrate resources: 1. Move FloatingIP to dst. On dst we have FloatingIP 123.0.0.4 2. Create FloatingIP on dst as stub for router IP. On dst we have two FloatingIP [123.0.0.4, 123.0.0.5]. IP 123.0.0.5 exists only in OpenStack DB and not crush src network. 3. Create router on dst. (here is the main idea) As you see above, ips 123.0.0.4 and 123.0.0.5 already allocated, then OpenStack must allocate another ip for router (e.g. 123.0.0.6). 4. FloatingIP 123.0.0.5 is not needed anymore. We use it on 1.3. step for not allow OpenStack create router with this ip. It will be released if you enable clean_router_ips_stub in config After resource migration we have: src router external ip 123.0.0.5 and FloatingIP 123.0.0.4 dst router external ip 123.0.0.6 and FloatingIP 123.0.0.4 """ deploy_info = info self.upload_quota(deploy_info['quota']) self.upload_networks(deploy_info['networks'], deploy_info['meta']['segmentation_ids'], deploy_info['detached_ports']) dst_router_ip_ids = None if self.config.migrate.keep_floatingip: self.upload_floatingips(deploy_info['networks'], deploy_info['floating_ips']) if self.config.migrate.change_router_ips: subnets_map = {subnet['id']: subnet for subnet in deploy_info['subnets']} router_ips = self.extract_router_ips_as_floating_ips( subnets_map, deploy_info['routers']) dst_router_ip_ids = self.upload_floatingips( deploy_info['networks'], router_ips) self.upload_routers(deploy_info['networks'], deploy_info['subnets'], deploy_info['routers']) if self.config.migrate.clean_router_ips_stub and dst_router_ip_ids: for router_ip_stub in dst_router_ip_ids: self.neutron_client.delete_floatingip(router_ip_stub) self.upload_neutron_security_groups(deploy_info['security_groups']) self.upload_sec_group_rules(deploy_info['security_groups']) if self.config.migrate.keep_lbaas: self.upload_lb_pools(deploy_info['lb_pools'], deploy_info['subnets']) self.upload_lb_monitors(deploy_info['lb_monitors']) self.associate_lb_monitors(deploy_info['lb_pools'], deploy_info['lb_monitors']) self.upload_lb_members(deploy_info['lb_members'], deploy_info['lb_pools']) self.upload_lb_vips(deploy_info['lb_vips'], deploy_info['lb_pools'], deploy_info['subnets']) return deploy_info def extract_router_ips_as_floating_ips(self, subnets, routers_info): result = [] tenant = self.config.migrate.router_ips_stub_tenant for router_info in routers_info: router = Router(router_info, subnets) tenant_name = tenant if tenant else router.tenant_name if router.ext_net_id: result.append({'tenant_name': tenant_name, 'floating_network_id': router.ext_net_id, 'floating_ip_address': router.ext_ip}) return result def get_mac_by_ip(self, ip_address, instance_id): for port in self.get_ports_list(device_id=instance_id): for fixed_ip_info in port['fixed_ips']: if fixed_ip_info['ip_address'] == ip_address: return port["mac_address"] def get_instance_network_info(self, instance_id): ports = [] for port in self.get_ports_list(device_id=instance_id): ports.append({ 'ip_addresses': [x['ip_address'] for x in port['fixed_ips']], 'mac_address': port['mac_address'], 'floatingip': self.get_port_floating_ip(port['id']), 'allowed_address_pairs': port.get('allowed_address_pairs', []), }) return ports def get_port_floating_ip(self, port_id): floating_ips = self.neutron_client.list_floatingips( port_id=port_id)['floatingips'] if floating_ips: LOG.debug('Got %d floating IP for port %s', len(floating_ips), port_id) return floating_ips[0]['floating_ip_address'] else: return None def get_ports_list(self, **kwargs): return self.neutron_client.list_ports(**kwargs)['ports'] def create_port(self, net_id, mac_address, ip_addresses, tenant_id, keep_ip, sg_ids=None, allowed_address_pairs=None): param_create_port = {'network_id': net_id, 'tenant_id': tenant_id} if mac_address: param_create_port['mac_address'] = mac_address if sg_ids: param_create_port['security_groups'] = sg_ids if keep_ip: param_create_port['fixed_ips'] = [{"ip_address": ip} for ip in ip_addresses] if allowed_address_pairs is not None: param_create_port['allowed_address_pairs'] = allowed_address_pairs with ksresource.AddAdminUserToNonAdminTenant( self.identity_client.keystone_client, self.config.cloud.user, self.config.cloud.tenant): LOG.debug("Creating port IP '%s', MAC '%s' on net '%s'", param_create_port.get('fixed_ips'), mac_address, net_id) return self.neutron_client.create_port( {'port': param_create_port})['port'] def delete_port(self, port_id): return self.neutron_client.delete_port(port_id) def get_network(self, network_info, tenant_id, keep_ip=False): if keep_ip: addresses = [ipaddr.IPAddress(ip) for ip in network_info['ip_addresses']] private = self.neutron_client.list_networks( tenant_id=tenant_id)['networks'] shared = self.neutron_client.list_networks(shared=True)['networks'] for net in private + shared: subnets = self.neutron_client.list_subnets( network_id=net['id'])['subnets'] if all(any(ipaddr.IPNetwork(subnet['cidr']).Contains(ip) for subnet in subnets) for ip in addresses): return net if 'id' in network_info: networks = self.neutron_client.list_networks( id=network_info['id'])['networks'] if len(networks) > 0: return networks[0] if 'name' in network_info: networks = self.neutron_client.list_networks( name=network_info['name'])['networks'] if len(networks) > 0: return networks[0] LOG.error('Failed to find network %s in tenant %s; keep_ip = %s', repr(network_info), tenant_id, keep_ip) raise exception.AbortMigrationError("Can't find suitable network") def check_existing_port(self, network_id, mac=None, ip_address=None, ip_addresses=None, existing_ports=None): if ip_addresses is None: ip_addresses = [] if ip_address is not None and ip_address not in ip_addresses: ip_addresses.append(ip_address) if existing_ports is None: existing_ports = self.get_ports_list( fields=['network_id', 'mac_address', 'id', 'fixed_ips', 'device_owner'], network_id=network_id) for port in existing_ports: if port['network_id'] != network_id: continue if port['mac_address'] == mac: return port for fixed_ip in port['fixed_ips']: if fixed_ip['ip_address'] in ip_addresses: return port return None @staticmethod def convert(neutron_object, cloud, obj_name): """Convert OpenStack Neutron network object to CloudFerry object. :param neutron_object: Direct OS NeutronNetwork object to convert, :cloud: Cloud object, :obj_name: Name of NeutronNetwork object to convert. List of possible values: 'network', 'subnet', 'router', 'floating_ip', 'security_group', 'rule'. """ obj_map = { 'network': NeutronNetwork.convert_networks, 'subnet': NeutronNetwork.convert_subnets, 'router': NeutronNetwork.convert_routers, 'floating_ip': NeutronNetwork.convert_floatingips, 'security_group': NeutronNetwork.convert_security_groups, 'rule': NeutronNetwork.convert_rules, 'lb_pool': NeutronNetwork.convert_lb_pools, 'lb_member': NeutronNetwork.convert_lb_members, 'lb_monitor': NeutronNetwork.convert_lb_monitors, 'lb_vip': NeutronNetwork.convert_lb_vips } return obj_map[obj_name](neutron_object, cloud) def convert_networks(self, net, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] net_res = cloud.resources[utl.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func() subnets = [] subnets_hash = set() for subnet in net['subnets']: snet = self.convert_subnets(subnet, cloud) subnets.append(snet) subnets_hash.add(snet['res_hash']) result = { 'name': net['name'], 'id': net['id'], 'admin_state_up': net['admin_state_up'], 'shared': net['shared'], 'tenant_id': net['tenant_id'], 'tenant_name': get_tenant_name(net['tenant_id']), 'subnets': subnets, 'router:external': net['router:external'], 'provider:physical_network': net['provider:physical_network'], 'provider:network_type': net['provider:network_type'], 'provider:segmentation_id': net['provider:segmentation_id'], 'subnets_hash': subnets_hash, 'meta': {}, } res_hash = net_res.get_resource_hash(result, 'name', 'shared', 'tenant_name', 'router:external', 'admin_state_up', 'provider:physical_network', 'provider:network_type') result['res_hash'] = res_hash return result @staticmethod def convert_subnets(snet, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] network_res = cloud.resources[utl.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func() networks_list = network_res.get_networks_list() net = get_network_from_list_by_id(snet['network_id'], networks_list) cidr = str(netaddr.IPNetwork(snet['cidr']).cidr) result = { 'name': snet['name'], 'id': snet['id'], 'enable_dhcp': snet['enable_dhcp'], 'allocation_pools': snet['allocation_pools'], 'gateway_ip': snet['gateway_ip'], 'ip_version': snet['ip_version'], 'cidr': cidr, 'network_name': net['name'], 'external': net['router:external'], 'network_id': snet['network_id'], 'tenant_name': get_tenant_name(snet['tenant_id']), 'dns_nameservers': snet['dns_nameservers'], 'meta': {}, } res_hash = network_res.get_resource_hash(result, 'name', 'enable_dhcp', 'ip_version', 'gateway_ip', 'cidr', 'allocation_pools', 'tenant_name', 'network_name') result['res_hash'] = res_hash return result @staticmethod def convert_routers(router, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] net_res = cloud.resources[utl.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func() result = { 'name': router['name'], 'id': router['id'], 'admin_state_up': router['admin_state_up'], 'external_gateway_info': router['external_gateway_info'], 'tenant_name': get_tenant_name(router['tenant_id']), 'meta': {}, } result.update(net_res.get_ports_info(router)) if router['external_gateway_info']: networks_list = net_res.get_networks_list() ext_id = router['external_gateway_info']['network_id'] ext_net = get_network_from_list_by_id(ext_id, networks_list) result['ext_net_name'] = ext_net['name'] result['ext_net_tenant_name'] = get_tenant_name( ext_net['tenant_id']) result['ext_net_id'] = router['external_gateway_info'][ 'network_id'] res_hash = net_res.get_resource_hash(result, 'name', 'tenant_name') result['res_hash'] = res_hash return result @staticmethod def convert_floatingips(floating, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] net_res = cloud.resources[utl.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func() networks_list = net_res.get_networks_list() ext_id = floating['floating_network_id'] extnet = get_network_from_list_by_id(ext_id, networks_list) result = { 'id': floating['id'], 'tenant_id': floating['tenant_id'], 'floating_network_id': ext_id, 'network_name': extnet['name'], 'ext_net_tenant_name': get_tenant_name(extnet['tenant_id']), 'tenant_name': get_tenant_name(floating['tenant_id']), 'fixed_ip_address': floating['fixed_ip_address'], 'floating_ip_address': floating['floating_ip_address'], 'port_id': floating['port_id'], 'meta': {}, } return result @staticmethod def convert_rules(rule, cloud): net_res = cloud.resources[utl.NETWORK_RESOURCE] rule_hash = net_res.get_resource_hash(rule, 'direction', 'remote_ip_prefix', 'protocol', 'port_range_min', 'port_range_max', 'ethertype') result = { 'remote_group_id': rule['remote_group_id'], 'direction': rule['direction'], 'remote_ip_prefix': rule['remote_ip_prefix'], 'protocol': rule['protocol'], 'port_range_min': rule['port_range_min'], 'port_range_max': rule['port_range_max'], 'ethertype': rule['ethertype'], 'security_group_id': rule['security_group_id'], 'rule_hash': rule_hash, 'meta': dict() } return result @staticmethod def convert_security_groups(sec_gr, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] net_res = cloud.resources[utl.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) result = { 'name': sec_gr['name'], 'id': sec_gr['id'], 'tenant_id': sec_gr['tenant_id'], 'tenant_name': get_tenant_name(sec_gr['tenant_id']), 'description': sec_gr['description'], 'security_group_rules': [NeutronNetwork.convert(gr, cloud, 'rule') for gr in sec_gr['security_group_rules']], 'meta': {}, } res_hash = net_res.get_resource_hash(result, 'name', 'tenant_name', 'description') result['res_hash'] = res_hash return result @staticmethod def convert_lb_pools(pool, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] net_res = cloud.resources[utl.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) result = { 'name': pool['name'], 'id': pool['id'], 'description': pool['description'], 'lb_method': pool['lb_method'], 'protocol': pool['protocol'], 'subnet_id': pool['subnet_id'], 'provider': pool.get('provider'), 'tenant_id': pool['tenant_id'], 'tenant_name': get_tenant_name(pool['tenant_id']), 'health_monitors': pool['health_monitors'], 'members': pool['members'], 'meta': {} } res_hash = net_res.get_resource_hash(result, 'name', 'tenant_name', 'lb_method', 'protocol') result['res_hash'] = res_hash return result @staticmethod def convert_lb_monitors(monitor, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] net_res = cloud.resources[utl.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) result = { 'id': monitor['id'], 'tenant_id': monitor['tenant_id'], 'tenant_name': get_tenant_name(monitor['tenant_id']), 'type': monitor['type'], 'delay': monitor['delay'], 'timeout': monitor['timeout'], 'max_retries': monitor['max_retries'], 'url_path': monitor.get('url_path', None), 'expected_codes': monitor.get('expected_codes', None), 'pools': monitor.get('pools'), 'meta': {} } res_hash = net_res.get_resource_hash(result, 'tenant_name', 'type', 'delay', 'timeout', 'max_retries') result['res_hash'] = res_hash return result @staticmethod def convert_lb_members(member, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] net_res = cloud.resources[utl.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) result = { 'id': member['id'], 'pool_id': member['pool_id'], 'address': member['address'], 'protocol_port': member['protocol_port'], 'weight': member['weight'], 'tenant_id': member['tenant_id'], 'tenant_name': get_tenant_name(member['tenant_id']), 'meta': {} } res_hash = net_res.get_resource_hash(result, 'address', 'protocol_port', 'weight', 'tenant_name') result['res_hash'] = res_hash return result @staticmethod def convert_lb_vips(vip, cloud): identity_res = cloud.resources[utl.IDENTITY_RESOURCE] net_res = cloud.resources[utl.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) result = { 'name': vip['name'], 'id': vip['id'], 'description': vip['description'], 'address': vip['address'], 'protocol': vip['protocol'], 'protocol_port': vip['protocol_port'], 'pool_id': vip['pool_id'], 'connection_limit': vip['connection_limit'], 'session_persistence': vip.get('session_persistence', None), 'tenant_id': vip['tenant_id'], 'subnet_id': vip['subnet_id'], 'tenant_name': get_tenant_name(vip['tenant_id']), 'meta': {} } res_hash = net_res.get_resource_hash(result, 'name', 'address', 'protocol', 'protocol_port', 'tenant_name') result['res_hash'] = res_hash return result def get_shared_networks_raw(self): """Returns list of external and shared networks in raw neutron object format""" external = self.get_networks_raw({'router:external': True}) shared = self.get_networks_raw({'shared': True}) return external + shared def get_networks_raw(self, search_dict): """Groups networks with subnets in raw `NeutronClient` format""" neutron = self.neutron_client nets = neutron.list_networks(**search_dict)['networks'] subnets_list = self.get_subnets_list() for net in nets: subnets = [] for subnet_id in net['subnets']: subnets.append(get_subnet_from_list_by_id(subnet_id, subnets_list)) net['subnets'] = subnets return nets def get_networks(self, tenant_id=''): LOG.info("Get networks...") networks = self.get_networks_raw({'tenant_id': tenant_id}) networks_info = [] for net in networks: cf_net = self.convert_networks(net, self.cloud) LOG.debug("Getting info about network '%s' (%s):\n%s", cf_net['name'], cf_net['id'], pprint.pformat(cf_net)) networks_info.append(cf_net) LOG.info("Done.") return networks_info def get_networks_list(self, tenant_id=''): return self.neutron_client.list_networks( tenant_id=tenant_id)['networks'] def get_subnets_list(self, tenant_id=''): return self.neutron_client.list_subnets(tenant_id=tenant_id)['subnets'] def get_detached_ports(self, tenant_id=''): ports = self.neutron_client.list_ports(tenant_id=tenant_id)['ports'] return [p for p in ports if not p['device_owner']] def get_subnets(self, tenant_id=''): LOG.info("Get subnets...") subnets = self.get_subnets_list(tenant_id) subnets_info = [] for snet in subnets: subnet = self.convert(snet, self.cloud, 'subnet') subnets_info.append(subnet) LOG.info("Done") return subnets_info def reset_subnet_dhcp(self, subnet_id, dhcp_flag): LOG.debug('Setting enable_dhcp to %s for subnet %s', dhcp_flag, subnet_id) subnet_info = { 'subnet': { 'enable_dhcp': dhcp_flag } } return self.neutron_client.update_subnet(subnet_id, subnet_info) def get_ports_info(self, router): LOG.debug("Finding all ports connected to router '%s'", router['name']) ports_list = self.get_ports_list() ports = get_ports_by_device_id_from_list(router['id'], ports_list) subnet_ids = [] ips = [] for port in ports: for ip_info in port['fixed_ips']: ips.append(ip_info['ip_address']) subnet_ids.append(ip_info['subnet_id']) return {'ips': set(ips), 'subnet_ids': set(subnet_ids)} def get_routers_raw(self): routers = self.neutron_client.list_routers()['routers'] if self.filter_tenant_id: subnet_ids = { sn['id'] for sn in self.get_subnets_list(self.filter_tenant_id)} return [r for r in routers if (r['tenant_id'] == self.filter_tenant_id or subnet_ids & self.get_ports_info(r)['subnet_ids'])] return routers def get_routers(self): LOG.info("Get routers") return [self.convert_routers(r, self.cloud) for r in self.get_routers_raw()] def get_floatingips(self, tenant_id=''): LOG.info("Get floatingips...") floatings = self.neutron_client.list_floatingips( tenant_id=tenant_id)['floatingips'] floatingips_info = [] for floating in floatings: floatingip_info = self.convert(floating, self.cloud, 'floating_ip') floatingips_info.append(floatingip_info) LOG.info("Done") return floatingips_info def get_security_groups(self, tenant_id=''): return self.neutron_client.list_security_groups( tenant_id=tenant_id)['security_groups'] def get_sec_gr_and_rules(self, tenant_id=''): LOG.info("Getting security groups and rules...") service_tenant_name = self.config.cloud.service_tenant service_tenant_id = \ self.identity_client.get_tenant_id_by_name(service_tenant_name) sec_grs = self.get_security_groups(tenant_id) sec_groups_info = [] for sec_gr in sec_grs: if sec_gr['tenant_id'] != service_tenant_id: sec_gr_info = self.convert(sec_gr, self.cloud, 'security_group') if not sec_gr_info['tenant_name']: # Skip security group from undefined tenant LOG.warning("Security group '%s' (%s) from tenant %s " "has been skipped.", sec_gr['name'], sec_gr['id'], sec_gr['tenant_id']) continue sec_groups_info.append(sec_gr_info) LOG.info("Done") return sec_groups_info def get_lb_pools(self, tenant_id=''): LOG.info("Getting load balancer pools...") pools = self.neutron_client.list_pools(tenant_id=tenant_id)['pools'] pools_info = [] for pool in pools: pool_info = self.convert(pool, self.cloud, 'lb_pool') pools_info.append(pool_info) LOG.info("Done") return pools_info def get_lb_monitors(self, tenant_id=''): LOG.info("Getting load balancer monitors...") monitors = \ self.neutron_client.list_health_monitors( tenant_id=tenant_id)['health_monitors'] monitors_info = [] for mon in monitors: mon_info = self.convert(mon, self.cloud, 'lb_monitor') monitors_info.append(mon_info) LOG.info("Done") return monitors_info def get_lb_members(self, tenant_id=''): LOG.info("Getting load balancer members...") members = self.neutron_client.list_members( tenant_id=tenant_id)['members'] members_info = [] for member in members: member_info = self.convert(member, self.cloud, 'lb_member') members_info.append(member_info) LOG.info("Done") return members_info def get_lb_vips(self, tenant_id=''): LOG.info("Getting load balancer VIPs...") vips = self.neutron_client.list_vips( tenant_id=tenant_id)['vips'] vips_info = [] for vip in vips: vip_info = self.convert(vip, self.cloud, 'lb_vip') vips_info.append(vip_info) LOG.info("Done") return vips_info def upload_lb_vips(self, vips, pools, subnets): LOG.info("Creating load balancer VIPs on destination") existing_vips = self.get_lb_vips() existing_vips_hashlist = [ex_vip['res_hash'] for ex_vip in existing_vips] existing_pools = self.get_lb_pools() existing_snets = self.get_subnets() for vip in vips: if not vip['tenant_name']: continue if vip['res_hash'] not in existing_vips_hashlist: tenant_id = self.identity_client.get_tenant_id_by_name( vip['tenant_name']) pool_hash = self.get_res_hash_by_id(pools, vip['pool_id']) dst_pool = self.get_res_by_hash(existing_pools, pool_hash) snet_hash = self.get_res_hash_by_id(subnets, vip['subnet_id']) dst_subnet = self.get_res_by_hash(existing_snets, snet_hash) vip_info = { 'vip': { 'name': vip['name'], 'description': vip['description'], 'address': vip['address'], 'protocol': vip['protocol'], 'protocol_port': vip['protocol_port'], 'connection_limit': vip['connection_limit'], 'pool_id': dst_pool['id'], 'tenant_id': tenant_id, 'subnet_id': dst_subnet['id'] } } if vip['session_persistence']: vip_info['vip']['session_persistence'] = \ vip['session_persistence'] vip['meta']['id'] = self.neutron_client.create_vip( vip_info)['vip']['id'] else: LOG.info("| Dst cloud already has the same VIP " "with address %s in tenant %s", vip['address'], vip['tenant_name']) LOG.info("Done") def upload_lb_members(self, members, pools): LOG.info("Creating load balancer members...") existing_members = self.get_lb_members() existing_members_hashlist = \ [ex_member['res_hash'] for ex_member in existing_members] existing_pools = self.get_lb_pools() for member in members: if not member['tenant_name']: continue if member['res_hash'] not in existing_members_hashlist: tenant_id = self.identity_client.get_tenant_id_by_name( member['tenant_name']) pool_hash = self.get_res_hash_by_id(pools, member['pool_id']) dst_pool = self.get_res_by_hash(existing_pools, pool_hash) member_info = { 'member': { 'protocol_port': member["protocol_port"], 'address': member['address'], 'pool_id': dst_pool['id'], 'tenant_id': tenant_id } } member['meta']['id'] = self.neutron_client.create_member( member_info)['member']['id'] else: LOG.info("| Dst cloud already has the same member " "with address %s in tenant %s", member['address'], member['tenant_name']) LOG.info("Done") def upload_lb_monitors(self, monitors): LOG.info("Creating load balancer monitors on destination...") existing_mons = self.get_lb_monitors() existing_mons_hashlist = \ [ex_mon['res_hash'] for ex_mon in existing_mons] for mon in monitors: if not mon['tenant_name']: continue if mon['res_hash'] not in existing_mons_hashlist: tenant_id = self.identity_client.get_tenant_id_by_name( mon['tenant_name']) mon_info = { 'health_monitor': { 'tenant_id': tenant_id, 'type': mon['type'], 'delay': mon['delay'], 'timeout': mon['timeout'], 'max_retries': mon['max_retries'] } } if mon['url_path']: mon_info['health_monitor']['url_path'] = mon['url_path'] mon_info['health_monitor']['expected_codes'] = \ mon['expected_codes'] mon['meta']['id'] = self.neutron_client.create_health_monitor( mon_info)['health_monitor']['id'] else: LOG.info("| Dst cloud already has the same healthmonitor " "with type %s in tenant %s", mon['type'], mon['tenant_name']) LOG.info("Done") def associate_lb_monitors(self, pools, monitors): LOG.info("Associating balancer monitors on destination...") existing_pools = self.get_lb_pools() existing_monitors = self.get_lb_monitors() for pool in pools: if not pool['tenant_name']: continue pool_hash = self.get_res_hash_by_id(pools, pool['id']) dst_pool = self.get_res_by_hash(existing_pools, pool_hash) for monitor_id in pool['health_monitors']: monitor_hash = self.get_res_hash_by_id(monitors, monitor_id) dst_monitor = self.get_res_by_hash(existing_monitors, monitor_hash) if dst_monitor['id'] not in dst_pool['health_monitors']: dst_monitor_info = { 'health_monitor': { 'id': dst_monitor['id'] } } self.neutron_client.associate_health_monitor( dst_pool['id'], dst_monitor_info) else: LOG.info( "Dst pool with name %s already has associated the " "healthmonitor with id %s in tenant %s", dst_pool['name'], dst_monitor['id'], dst_monitor['tenant_name']) LOG.info("Done") def upload_lb_pools(self, pools, subnets): LOG.info("Creating load balancer pools on destination...") existing_pools = self.get_lb_pools() existing_pools_hashlist = \ [ex_pool['res_hash'] for ex_pool in existing_pools] existing_subnets = self.get_subnets() for pool in pools: if pool['res_hash'] not in existing_pools_hashlist and \ pool['tenant_name']: tenant_id = self.identity_client.get_tenant_id_by_name( pool['tenant_name']) snet_hash = self.get_res_hash_by_id(subnets, pool['subnet_id']) snet_id = self.get_res_by_hash(existing_subnets, snet_hash)['id'] pool_info = { 'pool': { 'name': pool['name'], 'description': pool['description'], 'tenant_id': tenant_id, 'subnet_id': snet_id, 'protocol': pool['protocol'], 'lb_method': pool['lb_method'] } } if pool.get('provider'): pool_info['pool']['provider'] = pool.get('provider') LOG.debug("Creating LB pool '%s'", pool['name']) pool['meta']['id'] = \ self.neutron_client.create_pool(pool_info)['pool']['id'] else: LOG.info("| Dst cloud already has the same pool " "with name %s in tenant %s", pool['name'], pool['tenant_name']) LOG.info("Done") def upload_neutron_security_groups(self, sec_groups): LOG.info("Creating neutron security groups on destination...") exist_secgrs = self.get_sec_gr_and_rules() exis_secgrs_hashlist = [ex_sg['res_hash'] for ex_sg in exist_secgrs] for sec_group in sec_groups: if sec_group['name'] != DEFAULT_SECGR: if sec_group['res_hash'] not in exis_secgrs_hashlist: tenant_id = \ self.identity_client.get_tenant_id_by_name( sec_group['tenant_name'] ) sg_info = \ { 'security_group': { 'name': sec_group['name'], 'tenant_id': tenant_id, 'description': sec_group['description'] } } sec_group['meta']['id'] = self.neutron_client.\ create_security_group(sg_info)['security_group']['id'] LOG.info("Done") def upload_sec_group_rules(self, sec_groups): LOG.info("Creating neutron security group rules on destination...") ex_secgrs = self.get_sec_gr_and_rules() for sec_gr in sec_groups: ex_secgr = \ self.get_res_by_hash(ex_secgrs, sec_gr['res_hash']) if ex_secgr: exrules_hlist = \ [r['rule_hash'] for r in ex_secgr['security_group_rules']] else: exrules_hlist = [] for rule in sec_gr['security_group_rules']: if rule['protocol'] \ and (rule['rule_hash'] not in exrules_hlist): rinfo = \ {'security_group_rule': { 'direction': rule['direction'], 'protocol': rule['protocol'], 'port_range_min': rule['port_range_min'], 'port_range_max': rule['port_range_max'], 'ethertype': rule['ethertype'], 'remote_ip_prefix': rule['remote_ip_prefix'], 'security_group_id': ex_secgr['id'], 'tenant_id': ex_secgr['tenant_id']}} if rule['remote_group_id']: remote_sghash = \ self.get_res_hash_by_id(sec_groups, rule['remote_group_id']) rem_ex_sec_gr = \ self.get_res_by_hash(ex_secgrs, remote_sghash) rinfo['security_group_rule']['remote_group_id'] = \ rem_ex_sec_gr['id'] LOG.debug("Creating security group %s", rinfo) new_rule = \ self.neutron_client.create_security_group_rule(rinfo) rule['meta']['id'] = new_rule['security_group_rule']['id'] LOG.info("Done") def upload_networks(self, networks, src_seg_ids, detached_ports): LOG.info("Creating networks on destination") identity = self.identity_client existing_networks = self.get_networks() # we need to handle duplicates in segmentation ids dst_seg_ids = get_segmentation_ids_from_net_list(existing_networks) for src_net in networks: network_detached_ports = [p for p in detached_ports if p['network_id'] == src_net['id']] # Check network for existence on destination cloud dst_net = self.get_dst_net_by_src_net(existing_networks, src_net) if dst_net: LOG.info("DST cloud already has the same " "network with name '%s' in tenant '%s'", src_net['name'], src_net['tenant_name']) self.deploy_detached_ports(dst_net, network_detached_ports) continue LOG.debug("Trying to create network '%s'", src_net['name']) tenant_id = identity.get_tenant_id_by_name(src_net['tenant_name']) if tenant_id is None: LOG.warning("Tenant '%s' is not available on destination! " "Make sure you migrated identity (keystone) " "resources! Skipping network '%s'.", src_net['tenant_name'], src_net['name']) continue no_extnet_migration = ( src_net.get('router:external') and not self.config.migrate.migrate_extnets or (src_net['id'] in self.ext_net_map)) if no_extnet_migration: LOG.debug("External networks migration is disabled in the " "config OR external networks mapping is enabled. " "Skipping external network: '%s (%s)'", src_net['name'], src_net['id']) continue # create dict, representing basic info about network network_info = { 'network': { 'tenant_id': tenant_id, 'admin_state_up': src_net["admin_state_up"], 'shared': src_net["shared"], 'name': src_net['name'], 'router:external': src_net['router:external'] } } phys_net = src_net["provider:physical_network"] network_type = src_net['provider:network_type'] seg_id = src_net["provider:segmentation_id"] if phys_net or (src_net['provider:network_type'] in ['gre', 'vxlan']): # Update network info with additional arguments. # We need to check if we have parameter # "provider:physical_network" or param # "provider:network_type" either is 'gre' or 'vxlan'. # If condition is satisfied, we need to specify 2 more params: # "provider:network_type" and "provider:segmentation_id". list_update_atr = ["provider:network_type"] if phys_net: list_update_atr.append("provider:physical_network") for atr in list_update_atr: network_info['network'].update({atr: src_net.get(atr)}) # Check segmentation ID for overlapping # If it doesn't overlap with DST, save the same segmentation ID # Otherwise pick free segmentation ID, which does not overlap # with ANY segmentation ID on SRC if seg_id is not None: # Segmentation ID exists; Check for overlapping seg_id_overlaps = (network_type in dst_seg_ids and seg_id in dst_seg_ids[network_type]) if seg_id_overlaps: # Choose the lowest free segmentation ID, that also # does not overlap with SRC new_seg_id = generate_new_segmentation_id(src_seg_ids, dst_seg_ids, network_type) LOG.debug("'%s' segmentation ID '%s' overlaps with " "DST. Generating new one: '%s'.", network_type, seg_id, new_seg_id) # Use it for network network_info['network']['provider:segmentation_id'] = ( new_seg_id) # Update DST segmentation IDs with the just created one dst_seg_ids[network_type].append(new_seg_id) else: # Otherwise use original segmentation ID from SRC network_info['network']['provider:segmentation_id'] = ( seg_id) created_network = self.create_network(src_net, network_info) self.deploy_detached_ports(created_network, network_detached_ports) def deploy_detached_ports(self, net, ports): for subnet in net['subnets']: self.reset_subnet_dhcp(subnet['id'], False) existing_ports = {p['id']: p for p in self.get_ports_list(network_id=net['id'])} for port in ports: ip_addresses = [fip['ip_address'] for fip in port['fixed_ips']] existing_port = self.check_existing_port( net['id'], port['mac_address'], ip_addresses=ip_addresses, existing_ports=existing_ports.values()) if existing_port is not None: if existing_port['mac_address'] == port['mac_address']: LOG.debug('Port %s already migrated to %s', port['id'], existing_port['id']) continue if existing_port['device_owner'].startswith('network:') or \ not existing_port['device_owner']: LOG.debug('Deleting port %s from DST', repr(existing_port)) self.delete_port(existing_port['id']) del existing_ports[existing_port['id']] else: raise exception.AbortMigrationError( 'Can\'t migrate port %s conflict with port %s' % (port['id'], existing_port['id'])) self.create_port(net['id'], port['mac_address'], ip_addresses, net['tenant_id'], True) for subnet in net['subnets']: if subnet['enable_dhcp']: self.reset_subnet_dhcp(subnet['id'], True) def create_network(self, src_net, network_info): try: LOG.debug("creating network with args: '%s'", pprint.pformat(network_info)) created_net = self.neutron_client.create_network(network_info) created_net = created_net['network'] LOG.info("Created net '%s'", created_net['name']) except neutron_exc.NeutronClientException as e: LOG.warning("Cannot create network on destination: %s. " "Destination cloud already has the same network. May " "result in port allocation errors, such as VM IP " "allocation, floating IP allocation, router IP " "allocation, etc.", e) return for snet in src_net['subnets']: subnet_info = { 'subnet': { 'name': snet['name'], 'enable_dhcp': snet['enable_dhcp'], 'network_id': created_net['id'], 'cidr': snet['cidr'], 'allocation_pools': snet['allocation_pools'], 'gateway_ip': snet['gateway_ip'], 'ip_version': snet['ip_version'], 'dns_nameservers': snet['dns_nameservers'], 'tenant_id': created_net['tenant_id'] } } try: created_subnet = self.neutron_client.create_subnet(subnet_info) created_subnet = created_subnet['subnet'] snet['meta']['id'] = created_subnet['id'] LOG.info("Created subnet '%s' in net '%s'", created_subnet['cidr'], created_net['name']) created_net['subnets'].append(created_subnet) except neutron_exc.NeutronClientException: LOG.info("Subnet '%s' (%s) already exists, skipping", snet['name'], snet['cidr']) return created_net def upload_routers(self, networks, subnets, routers): LOG.info("Creating routers on destination") existing_subnets = self.get_subnets() existing_routers = self.get_routers() for router in routers: tenant_id = self.identity_client.get_tenant_id_by_name( router['tenant_name']) r_info = {'router': {'name': router['name'], 'tenant_id': tenant_id}} existing_router = self.get_res_by_hash(existing_routers, router['res_hash']) if not existing_router: LOG.debug("Creating router %s", pprint.pformat(r_info)) existing_router = self.convert_routers( self.neutron_client.create_router(r_info)['router'], self.cloud) router['meta']['id'] = existing_router['id'] self.add_router_interfaces(router, existing_router, subnets, existing_subnets) ex_gw_info = router['external_gateway_info'] if ex_gw_info: self.add_router_gateway(existing_router, router['ext_net_id'], networks, ex_gw_info.get('enable_snat')) def add_router_gateway(self, dst_router, ext_net_id, src_nets, set_snat=None): """ :param set_snat: possible values: 1. `None` - do not update, useful in cases when destination cloud does not support SNAT for external networks (pre-icehouse); 2. `True` - enable SNAT 3. `False` - disable SNAT """ dst_nets = self.get_networks() dst_net_id = self.get_new_extnet_id(ext_net_id, src_nets, dst_nets) if dst_net_id: info = {'network_id': dst_net_id} if set_snat is not None: info['enable_snat'] = set_snat LOG.debug("Setting the external network (%s) gateway for a router " "'%s' (%s)", dst_net_id, dst_router['name'], dst_router['id']) self.neutron_client.add_gateway_router(dst_router['id'], info) else: LOG.warning('External (%s) network is not exists on destination', ext_net_id) def add_router_interfaces(self, src_router, dst_router, src_subnets, dst_subnets): for subnet_id in src_router['subnet_ids']: subnet_hash = self.get_res_hash_by_id(src_subnets, subnet_id) src_subnet = self.get_res_by_hash(src_subnets, subnet_hash) if src_subnet['external']: LOG.debug("NOT connecting subnet '%s' to router '%s' because " "it's connected to external network", subnet_id, dst_router['name']) continue existing_subnet = self.get_res_by_hash(dst_subnets, subnet_hash) if existing_subnet['id'] in dst_router['subnet_ids']: continue LOG.debug("Adding subnet '%s' to router '%s'", subnet_id, dst_router['name']) try: self.neutron_client.add_interface_router( dst_router['id'], {"subnet_id": existing_subnet['id']}) except neutron_exc.NeutronClientException as e: LOG.debug(e, exc_info=True) LOG.warning("Couldn't add interface to subnet %s to router %s:" "\n%s", existing_subnet['id'], dst_router['id'], e) def upload_floatingips(self, networks, src_floats): """Creates floating IPs on destination Process: 1. Create floating IP on destination using neutron APIs in particular tenant. This allocates first IP address available in external network. 2. If keep_floating_ips option is set: 2.1. Modify IP address of a floating IP to be the same as on destination. This is done from the DB level. 2.2. Else - do not modify floating IP address 3. Return list of ID of new floating IPs """ LOG.info("Uploading floating IPs...") existing_networks = self.get_networks() new_floating_ids = [] fips_dst = self.neutron_client.list_floatingips()['floatingips'] ipfloatings = {fip['floating_ip_address']: fip['id'] for fip in fips_dst} for fip in src_floats: ip = fip['floating_ip_address'] if ip in ipfloatings: new_floating_ids.append(ipfloatings[ip]) continue with ksresource.AddAdminUserToNonAdminTenant( self.identity_client.keystone_client, self.config.cloud.user, fip['tenant_name']): ext_net_id = self.get_new_extnet_id( fip['floating_network_id'], networks, existing_networks) if ext_net_id is None: LOG.info("No external net for floating IP, make sure all " "external networks migrated. Skipping floating " "IP '%s'", fip['floating_ip_address']) continue tenant = self.identity_client.keystone_client.tenants.find( name=fip['tenant_name']) new_fip = { 'floatingip': { 'floating_network_id': ext_net_id, 'tenant_id': tenant.id } } created_fip = self.create_floatingip(new_fip) if created_fip is None: continue fip_id = created_fip['id'] new_floating_ids.append(fip_id) sqls = [('UPDATE IGNORE floatingips ' 'SET floating_ip_address = "{ip}" ' 'WHERE id = "{fip_id}"').format(ip=ip, fip_id=fip_id), ('UPDATE IGNORE ipallocations ' 'SET ip_address = "{ip}" ' 'WHERE port_id = (' 'SELECT floating_port_id ' 'FROM floatingips ' 'WHERE id = "{fip_id}")').format( ip=ip, fip_id=fip_id), ('DELETE FROM ipavailabilityranges ' 'WHERE allocation_pool_id in ( ' 'SELECT id ' 'FROM ipallocationpools ' 'WHERE subnet_id = ( ' 'SELECT subnet_id ' 'FROM ipallocations ' 'WHERE port_id = ( ' 'SELECT floating_port_id ' 'FROM floatingips ' 'WHERE id = "{fip_id}")))').format( fip_id=fip_id)] LOG.debug(sqls) dst_mysql = self.mysql_connector dst_mysql.batch_execute(sqls) LOG.info("Done") return new_floating_ids def create_floatingip(self, fip): try: LOG.debug("Creating FIP on net '%s'", fip['floatingip']['floating_network_id']) created = self.neutron_client.create_floatingip(fip) return created['floatingip'] except neutron_exc.NeutronClientException as e: LOG.warning("Unable to create floating IP on destination: '%s'", e) def update_floatingip(self, floatingip_id, port_id=None): update_dict = {'floatingip': {'port_id': port_id}} LOG.debug("Associating floating IP '%s' with port '%s'", floatingip_id, port_id) return self.neutron_client.update_floatingip(floatingip_id, update_dict) @staticmethod def get_res_by_hash(existing_resources, resource_hash): for resource in existing_resources: if resource['res_hash'] == resource_hash: return resource @staticmethod def get_res_hash_by_id(resources, resource_id): for resource in resources: if resource['id'] == resource_id: return resource['res_hash'] @staticmethod def get_resource_hash(neutron_resource, *args): net_res = copy.deepcopy(neutron_resource) list_info = list() for arg in args: if not isinstance(net_res[arg], list): list_info.append(net_res[arg]) else: if arg == 'allocation_pools': pools = net_res[arg] net_res[arg] = [ip for pl in pools for ip in pl.values()] for argitem in net_res[arg]: if isinstance(argitem, basestring): argitem = argitem.lower() list_info.append(argitem) hash_list = \ [info.lower() if isinstance(info, basestring) else info for info in list_info] hash_list.sort() return hash(tuple(hash_list)) def get_new_extnet_id(self, src_net_id, src_nets, dst_nets): """ Get ID of similar external network form DST. :param src_net_id: External network ID from SRC cloud, :param src_nets: Networks list from SRC cloud, :param dst_nets: Networks list from DST cloud, :return unicode: External network ID from DST, that matches with the similar network from SRC. """ if src_net_id in self.ext_net_map: dst_net_id = self.ext_net_map[src_net_id] else: src_net = get_network_from_list_by_id(src_net_id, src_nets) dst_net = self.get_dst_net_by_src_net(dst_nets, src_net) if not dst_net: return dst_net_id = dst_net['id'] return dst_net_id @staticmethod def get_dst_net_by_src_net(existing_networks, src_net): """ Get the same Network object from DST cloud. :param existing_networks: Existing networks list on DST cloud, :param src_net: Network object from SRC, :return dict: Network object from DST, that matches with the same network from SRC. """ for net in existing_networks: if (net['res_hash'] == src_net['res_hash'] and net['subnets_hash'] == src_net['subnets_hash']): return net class Router(object): """ Represents router_info, extract external ip. Router_info contain list of ips only in different order. Impossible to define external router ip. """ def __init__(self, router_info, subnets): self.id = router_info['id'] self.ext_net_id = router_info.get('ext_net_id', None) self.int_cidr = [] self.tenant_name = router_info['tenant_name'] if self.ext_net_id: subnet_ids = router_info['subnet_ids'] for subnet_id in subnet_ids: subnet = subnets[subnet_id] if subnet['network_id'] == self.ext_net_id: self.ext_cidr = subnet['cidr'] self.ext_subnet_id = subnet_id else: self.int_cidr.append(subnet['cidr']) ext_network = ipaddr.IPNetwork(self.ext_cidr) for ip in router_info['ips']: if ext_network.Contains(ipaddr.IPAddress(ip)): self.ext_ip = ip break def get_network_from_list_by_id(network_id, networks_list): """Get Neutron network by id from provided networks list. :param network_id: Neutron network ID :param networks_list: List of Neutron networks, where target network should be searched """ for net in networks_list: if net['id'] == network_id: return net LOG.warning("Cannot obtain network with id='%s' from provided networks " "list", network_id) def get_subnet_from_list_by_id(subnet_id, subnets_list): """Get Neutron subnet by id from provided subnets list. :param subnet_id: Neutron subnet ID :param subnets_list: List of Neutron subnets, where target subnet should be searched """ for subnet in subnets_list: if subnet['id'] == subnet_id: return subnet LOG.warning("Cannot obtain subnet with id='%s' from provided subnets " "list", subnet_id) def get_ports_by_device_id_from_list(device_id, ports_list): """Get Neutron ports by device ID from provided ports list. :param device_id: Port device ID :param ports_list: List of Neutron ports, where target ports should be searched :result: List of ports, which are belong to specified device ID """ ports = [] for port in ports_list: if port['device_id'] == device_id: ports.append(port) if not ports: LOG.debug("There are no ports with device_id='%s' in provided list", device_id) return ports def get_network_from_list(ip, tenant_id, networks_list, subnets_list): """Get Neutron network by parameters from provided list. :param ip: IP address of VM from this network :param tenant_id: Tenant Id of VM in this network :param networks_list: List of Neutron networks, where target network should be searched :param subnets_list: List of Neutron subnets, where target network should be searched """ instance_ip = ipaddr.IPAddress(ip) for subnet in subnets_list: network_id = subnet['network_id'] net = get_network_from_list_by_id(network_id, networks_list) if subnet['tenant_id'] == tenant_id or net['shared']: if ipaddr.IPNetwork(subnet['cidr']).Contains(instance_ip): return get_network_from_list_by_id(network_id, networks_list) def get_segmentation_ids_from_net_list(networks): """Get busy segmentation IDs from provided networks list. We need to handle duplicates in segmentation ids. Neutron has different validation rules for different network types. For 'gre' and 'vxlan' network types there is no strong requirement for 'physical_network' attribute, if we want to have 'segmentation_id', because traffic is encapsulated in L3 packets. For 'vlan' network type there is a strong requirement for 'physical_network' attribute, if we want to have 'segmentation_id'. :result: Dictionary with busy segmentation IDs. Hash is used with structure {"gre": [1, 2, ...], "vlan": [1, 2, ...]} """ used_seg_ids = {} for net in networks: network_has_segmentation_id = ( net["provider:physical_network"] or (net["provider:network_type"] in ['gre', 'vxlan'])) if network_has_segmentation_id: if net["provider:network_type"] not in used_seg_ids: used_seg_ids[net['provider:network_type']] = [] if net["provider:segmentation_id"] is not None: used_seg_ids[net["provider:network_type"]].append( net["provider:segmentation_id"]) return used_seg_ids def generate_new_segmentation_id(src_seg_ids, dst_seg_ids, network_type): """Generate new segmentation ID based on provided info with busy ones. Search for the lowest free segmentation ID. IDs '0' and '1' are reserved in most of network types, so start searching from '2'. For 'vlan' network type ID '4095' is the last one in available range and besides also reserved. Raise AbortMigrationError if reach this ID. :param src_seg_ids: Dictionary with busy segmentation IDs on SRC :param dst_seg_ids: Dictionary with busy segmentation IDs on DST :param network_type: Network type ('vlan', 'vxlan' or 'gre') :result int: New generated free segmentation ID """ src_seg_ids = set(src_seg_ids.get(network_type, [])) dst_seg_ids = set(dst_seg_ids.get(network_type, [])) busy_seg_ids = src_seg_ids | dst_seg_ids free_seg_id = None counter = 2 while free_seg_id is None: if counter not in busy_seg_ids: free_seg_id = counter counter += 1 if free_seg_id >= 4095 and network_type == 'vlan': raise exception.AbortMigrationError("Segmentation IDs limit for 'vlan'" " network type has been exceeded") return free_seg_id
mgrygoriev/CloudFerry
cloudferrylib/os/network/neutron.py
Python
apache-2.0
73,241
from suds.client import Client from nova import exception from nova import db import logging logging.getLogger('suds').setLevel(logging.INFO) def update_for_run_instance(service_url, region_name, server_port1, server_port2, dpid1, dpid2): # check region name client = Client(service_url + "?wsdl") client.service.setServerPort(dpid1, server_port1, region_name) client.service.setServerPort(dpid2, server_port2, region_name) client.service.save() def update_for_terminate_instance(service_url, region_name, server_port1, server_port2, dpid1, dpid2, vlan_id): client = Client(service_url + "?wsdl") client.service.clearServerPort(dpid1, server_port1) client.service.clearServerPort(dpid2, server_port2) client.service.save() dpid_datas = client.service.showSwitchDatapathId() for dpid_data in dpid_datas: ports = client.service.showPorts(dpid_data.dpid) for port in ports: if port.type != "ServerPort": continue if port.regionName == region_name: return remove_region(service_url, region_name, vlan_id) def create_region(service_url, region_name, vlan_id): client = Client(service_url + "?wsdl") try: client.service.createRegion(region_name) client.service.save() except: raise exception.OFCRegionCreationFailed(region_name=region_name) try: switches = db.switch_get_all(None) for switch in switches: client.service.setOuterPortAssociationSetting(switch["dpid"], switch["outer_port"], vlan_id, 65535, region_name) client.service.save() except: client.service.destroyRegion(region_name) client.service.save() raise exception.OFCRegionSettingOuterPortAssocFailed(region_name=region_name, vlan_id=vlan_id) def remove_region(service_url, region_name, vlan_id): client = Client(service_url + "?wsdl") try: switches = db.switch_get_all(None) for switch in switches: client.service.clearOuterPortAssociationSetting(switch["dpid"], switch["outer_port"], vlan_id) client.service.save() except: pass client.service.destroyRegion(region_name) client.service.save() def has_region(service_url, region_name): client = Client(service_url + "?wsdl") return region_name in [x.regionName for x in client.service.showRegion()]
nii-cloud/dodai-compute
nova/virt/dodai/ofc_utils.py
Python
apache-2.0
2,420
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow input/output utilities.""" import collections import json import math import os import numpy as np import tensorflow.compat.v1 as tf class Features(object): """Feature keys.""" # Waveform(s) of audio observed at receiver(s). RECEIVER_AUDIO = 'receiver_audio' # Images of each source at each microphone, including reverberation. # Images are real valued with shape [sources, microphones, length]. SOURCE_IMAGES = 'source_images' # Boolean diarization labels of shape (sources, length) which indicates # whether a source is active or not. For nonexisting source, it is all zeros. DIARIZATION_LABELS = 'diarization_labels' # Speaker indices (global indices which are contiguous over all training data # starting with 0) that are present in this meeting or meeting chunk with # shape (sources,). If number of speakers present in the meeting is less # than sources, for a non-existing speaker/source, the speaker index is # set to -1. Note that, for a meeting sub-block, we still have all the # speaker indices in the meeting even if not all the speakers are present # in that meeting sub-block. SPEAKER_INDEX = 'speaker_indices' def get_inference_spec(num_receivers=1, num_samples=None): """Returns a specification of features in tf.Examples in roomsim format.""" spec = {} spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature( [num_receivers, num_samples], tf.float32) return spec def get_roomsim_spec(num_sources, num_receivers, num_samples): """Returns a specification of features in tf.Examples in roomsim format. Args: num_sources: Expected number of sources. num_receivers: Number of microphones in array. num_samples: Expected length of sources in samples. 'None' for variable. Returns: Feature specifications suitable to pass to tf.parse_example. """ spec = {} spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature( [num_receivers, num_samples], tf.float32) spec[Features.SOURCE_IMAGES] = tf.FixedLenFeature( [num_sources, num_receivers, num_samples], tf.float32) return spec def placeholders_from_spec(feature_spec): """Returns placeholders compatible with a given feature spec.""" placeholders = {} for key, feature in feature_spec.items(): placeholders[key] = tf.placeholder(dtype=feature.dtype, shape=[1] + feature.shape, name=key) return placeholders def _read_meeting_list(meeting_list, meeting_length_type): """Reads meeting list from json file to get necessary information. Args: meeting_list: A meeting list read from a json file. meeting_length_type: One of 'maximum', 'minimum' or 'average'. Since typically meeting lengths are not fixed, we can set the training/eval length to the maximum, minimum or average meeting length in the json file based on the value of this argument. We eventually pad or clip individual meetings to attain the desired constant meeting length in our data reading pipeline. Returns: num_meetings: Number of meetings. max_num_spk_per_meeting: Maximum number of speakers in a meeting. max_num_utt_per_spk: Maximum number of utterances per speaker. max_dia_seg_per_utt: Maximum diarization segments per utterance. max_utt_length: Maximum utterance length. meeting_length: Meeting length that will be used. speaker_ids: A list of speaker ids that appear in meetings. """ max_num_spk_per_meeting = 0 max_num_utt_per_meeting = 0 meeting_lengths = [] speaker_id_to_count = collections.defaultdict(int) num_meetings = len(meeting_list) total_spk = 0 total_utt = 0 max_utt_length = 0 max_num_utt_per_spk = 0 max_dia_seg_per_utt = 0 for one_meeting in meeting_list: sources_start_end = one_meeting['utterance_start_end'] meeting_length = int(one_meeting['duration']) num_utt_in_meeting = len(sources_start_end) max_num_utt_per_meeting = max(max_num_utt_per_meeting, num_utt_in_meeting) utt2spk = [] spk2wavs = collections.defaultdict(list) spk_utt_idx = collections.defaultdict(int) for start, end, spkid, wav_path in sources_start_end: max_utt_length = max(max_utt_length, end - start) utt2spk.append(spkid) spk2wavs[spkid].append(wav_path) speaker_id_to_count[spkid] += 1 spk_utt_idx[spkid] += 1 diarization_info = \ one_meeting['diarization_label'][spkid][spk_utt_idx[spkid] - 1] num_seg_in_utt = len(diarization_info) max_dia_seg_per_utt = max(max_dia_seg_per_utt, num_seg_in_utt) speakers_in_meeting = list(set(utt2spk)) num_spk = len(speakers_in_meeting) for spkid in speakers_in_meeting: max_num_utt_per_spk = max(max_num_utt_per_spk, len(set(spk2wavs[spkid]))) max_num_spk_per_meeting = max(max_num_spk_per_meeting, num_spk) total_spk += num_spk total_utt += num_utt_in_meeting meeting_lengths.append(meeting_length) if meeting_length_type == 'maximum': meeting_length = int(math.ceil(np.max(meeting_lengths))) elif meeting_length_type == 'minimum': meeting_length = int(math.floor(np.min(meeting_lengths))) elif meeting_length_type == 'average': meeting_length = int(round(np.mean(meeting_lengths))) elif isinstance(meeting_length_type, int): meeting_length = meeting_length_type else: raise ValueError(f'Unknown meeting_length_type={meeting_length_type}') speaker_ids = sorted(speaker_id_to_count.keys()) tf.logging.info('Read %s meetings from json file.', num_meetings) tf.logging.info('Average number of speakers per meeting = %f.', total_spk / num_meetings) tf.logging.info('Average number of utterances per speaker = %f.', total_utt / total_spk) return (num_meetings, max_num_spk_per_meeting, max_num_utt_per_spk, max_dia_seg_per_utt, max_utt_length, meeting_length, speaker_ids) def _pad_mics_tf(signal, new_mics): """Pads new mic channels to an input tensor and returns the updated tensor. Args: signal: A tf.tensor of shape (input_mics, samples) new_mics: The number of new mic channels to be added (integer scalar tensor) Returns: padded_signal: A tf.tensor of shape (input_mics + new_mics, samples) """ # Take first new_mics channels and shift them by 1 sample. new_inputs = tf.roll(signal[:new_mics, :], shift=1, axis=-1) # Add noise 1e-3 times the RMS value in the signal. noise_scale = 1e-3 * tf.sqrt(tf.reduce_mean(tf.square(new_inputs))) new_inputs += noise_scale * tf.random.normal(tf.shape(new_inputs)) return tf.concat((signal, new_inputs), axis=0) def json_to_dataset(json_file, batch_size, parallel_readers=tf.data.experimental.AUTOTUNE, randomize_order=False, num_examples=-1, prefetch_buffer_size=tf.data.experimental.AUTOTUNE, shuffle_buffer_size=5, repeat=True, num_mics=1, sample_rate=16000, use_relative_path=True, meeting_length_type='maximum', num_meeting_subdivisions=1, sensor_noise_range=(0.0, 0.0)): r"""Fetches features from a dictionary and source .wav files. Args: json_file: A json file containing meeting information. batch_size: The number of examples to read. parallel_readers: Number of dataset.map operations that should happen in parallel. randomize_order: Whether to randomly shuffle features. num_examples: Limit number of examples to this value. Unlimited if -1. prefetch_buffer_size: How many batches to prefecth. shuffle_buffer_size: The size of the shuffle buffer. repeat: If True, repeat the dataset. num_mics: The expected number of mics in source wav files. sample_rate: Sample rate of wav files read. use_relative_path: If True, the path for .wav files is relative to the json file, otherwise, the paths are absolute. meeting_length_type: 'maximum', 'minimum' or 'average'. Can also specify an integer value which is the length in samples, which will be used. num_meeting_subdivisions: If > 1, chop the meeting in time into this many chunks. sensor_noise_range: Range of standard deviation for sensor noise. If sensor_noise_range[1] <= 0.0, then no sensor noise is added. Otherwise, white Gaussian sensor noise with uniformly random standard deviation from the provided range is added as the first reference signal. Returns: A batch_size number of features constructed from wav files. Raises: ValueError if max_sources_override is less than assumed max number sources. """ tf.logging.info('Reading %s.', json_file) with open(json_file, 'r') as f: meeting_list = json.load(f) (num_meetings, max_num_spk, max_num_utt_per_spk, max_dia_seg_per_utt, max_utt_length, samples, speaker_id_list) = _read_meeting_list( meeting_list, meeting_length_type) tf.logging.info('Maximum number of speakers per meeting = %s', max_num_spk) tf.logging.info('Maximum number of utterances per speaker = %s', max_num_utt_per_spk) tf.logging.info('Maximum diarization segments per utterance = %s', max_dia_seg_per_utt) tf.logging.info('Maximum utterance length in seconds = %s', max_utt_length/sample_rate) tf.logging.info('Used meeting length in seconds = %s', samples/sample_rate) tf.logging.info('Number of speakers seen in all meetings = %s', len(speaker_id_list)) tf.logging.info('Using %s parallel readers.', parallel_readers) tf.logging.info('shuffle_buffer=%s, prefetch_buffer=%s, num_mics=%s, ' 'randomize=%s.', shuffle_buffer_size, prefetch_buffer_size, num_mics, randomize_order) if use_relative_path: base_path = os.path.dirname(json_file) spkid2idx = {key: idx for idx, key in enumerate(speaker_id_list)} def utterance_info_generator(): """Yields utterance informations from each meeting. Utterance info is in the form of a 6-tuple: wav_path, diarization, spkidx, meeting_scale, start, gain. """ default_diarization = np.zeros((max_dia_seg_per_utt, 2), dtype=np.int32) default_utt = ('0', default_diarization, -1, 0.0, 0, 0.0) for one_meeting in meeting_list: meeting_info = collections.defaultdict(list) sources_start_end = one_meeting['utterance_start_end'] num_utt_in_meeting = len(sources_start_end) spk_num_in_meeting = {} new_spknum = 0 spkids_in_meeting = [] spk_utt_idx = collections.defaultdict(int) meeting_scale = float(one_meeting['meeting_scale']) for utt_idx in range(num_utt_in_meeting): start, end, spkid, wav_path = sources_start_end[utt_idx] spkidx = spkid2idx[spkid] if start >= samples: continue if end >= samples: end = samples if spkidx in spk_num_in_meeting: spknum = spk_num_in_meeting[spkidx] else: spknum = new_spknum if spknum > max_num_spk: continue spkids_in_meeting.append(spkidx) spk_num_in_meeting[spkidx] = spknum new_spknum += 1 if use_relative_path: wav_path = os.path.join(base_path, wav_path) gain = one_meeting['utterance_gain'][utt_idx] # Make diarization_labels array. diarization = np.zeros((max_dia_seg_per_utt, 2), dtype=np.int32) spk_utt_idx[spknum] += 1 diarization_info = \ one_meeting['diarization_label'][spkid][spk_utt_idx[spknum] - 1] # Go over diarization segments in utterance. for i, segment_st_end in enumerate(diarization_info): segment_start, segment_end = segment_st_end if segment_start >= samples: continue if segment_end > samples: segment_end = samples adjusted_start = segment_start - start adjusted_end = segment_end - start diarization[i, 0] = adjusted_start diarization[i, 1] = adjusted_end meeting_info[spknum].append((wav_path, diarization, spkidx, meeting_scale, start, gain)) for spknum in range(max_num_spk): if spknum in meeting_info: for utt in range(max_num_utt_per_spk): if utt < len(meeting_info[spknum]): yield meeting_info[spknum][utt] else: yield default_utt else: for utt in range(max_num_utt_per_spk): yield default_utt utterance_info_list = list(utterance_info_generator()) # No need for the original meeting_list from now on. del meeting_list num_utterances = len(utterance_info_list) tensor_shape = [(num_utterances, 1), (num_utterances, max_dia_seg_per_utt, 2), (num_utterances, 1), (num_utterances, 1), (num_utterances, 1), (num_utterances, 1)] tensor_type = [np.string_, np.int32, np.int32, np.float32, np.int32, np.float32] (wav_paths, diarizations, spkindices, meeting_scales, start_samples, utterance_gains) = [np.reshape( tensor, tensor_shape[i]).astype(tensor_type[i]) for i, tensor in enumerate(list(zip(*utterance_info_list)))] dataset = tf.data.Dataset.from_tensor_slices( (wav_paths, diarizations, spkindices, meeting_scales, start_samples, utterance_gains)) if repeat: dataset = dataset.repeat() if randomize_order: # Randomize meeting order for each epoch through the dataset. dataset = dataset.batch(max_num_spk * max_num_utt_per_spk) dataset = dataset.shuffle(num_meetings) dataset = dataset.flat_map( lambda w, d, s, m, t, u: tf.data.Dataset.from_tensor_slices( (w, d, s, m, t, u))) # Read in wav files. def decode_wav(wav): audio_bytes = tf.read_file(wav) waveform, _ = tf.audio.decode_wav(audio_bytes, desired_samples=max_utt_length) waveform = tf.transpose(waveform) num_read_mics = tf.shape(waveform)[0] waveform = tf.cond(num_read_mics >= num_mics, lambda: waveform[:num_mics, :], lambda: _pad_mics_tf(waveform, num_mics - num_read_mics)) waveform = tf.reshape(waveform, (num_mics, max_utt_length)) return waveform def decode_wav_or_return_zeros(wav, gain=1.0): return tf.cond( tf.equal(wav, '0'), lambda: tf.zeros((num_mics, max_utt_length), dtype=tf.float32), lambda: gain * decode_wav(wav)) def utterance_reader(wav_path, diarization, spkidx, meet_scale, start, gain): """Reads wave file for utterance and scale it.""" utt_tensor = decode_wav_or_return_zeros(wav_path[0], gain=gain) return utt_tensor, diarization, spkidx, meet_scale, start # Sandwich heavy IO part between prefetch's. dataset = dataset.prefetch(parallel_readers) dataset = dataset.map(utterance_reader, num_parallel_calls=parallel_readers) dataset = dataset.prefetch(parallel_readers) def pad_utterance(utt_tensor, diarization, spkidx, meeting_scale, start): """Pads utterance to meeting length. Args: utt_tensor: Utterance with shape (num_mics, max_utt_length). diarization: Diarization with shape (max_dia_seg_per_utt, 2). spkidx: Speaker index (global) for the utterance. meeting_scale: Target meeting scale. start: Start index of utterance in the meeting. Returns: utt_tensor_padded: Padded utt tensor (num_mics, samples + max_utt_length) diarization_padded: Diarization updated using the start index. spkidx: Speaker index passed unchanged. meeting_scale: Target meeting scale passed unchanged. """ start = start[0] end_paddings = samples - start utt_tensor_padded = tf.pad(utt_tensor, ((0, 0), (start, end_paddings))) diarization_padded = start + diarization return utt_tensor_padded, diarization_padded, spkidx, meeting_scale dataset = dataset.map(pad_utterance, num_parallel_calls=parallel_readers) dataset = dataset.batch(max_num_utt_per_spk) def make_reference(utt_tensor, diarization, spkidx, meeting_scale): """Makes a reference from fixed length utterance tensors. Args: utt_tensor: Utterances with shape (max_num_utt_per_spk, num_mics, samples + max_utt_len) diarization: Diarization ranges with shape (max_num_utt_per_spk, max_dia_seg_per_utt, 2). spkidx: Speaker indices (repeated) with shape (max_num_utt_per_spk) meeting_scale: Target meeting scale (repeated). Returns: reference: Meeting audio with shape (num_mics, samples) diarization_labels: tf.bool with shape (samples) spkidx: Scalar speaker index. meeting_scale: Target meeting scale. """ reference_waveform = tf.reduce_sum(utt_tensor, axis=0) reference_waveform = reference_waveform[:, :samples] diarization = tf.reshape(diarization, (max_num_utt_per_spk * max_dia_seg_per_utt, 2)) active_samples_list = [ tf.range(diarization[i, 0], diarization[i, 1]) for i in range(max_num_utt_per_spk * max_dia_seg_per_utt)] active_samples = tf.reshape( tf.concat(active_samples_list, axis=0), (-1, 1)) dia_full_init = tf.zeros((samples + max_utt_length, 1), dtype=tf.int32) dia_full = tf.tensor_scatter_add( dia_full_init, active_samples, tf.ones(tf.shape(active_samples), dtype=tf.int32)) dia_full = tf.cast(dia_full[:samples, 0], dtype=tf.bool) spkidx = spkidx[0] meeting_scale = meeting_scale[0] return reference_waveform, dia_full, spkidx, meeting_scale dataset = dataset.map(make_reference, num_parallel_calls=parallel_readers) dataset = dataset.batch(max_num_spk) # If num_meeting_subdivisions > 1, split time-dependent meeting data in time # into num_meeting_subdivisions equal chunks. Note that speaker ids and # meeting_scale are repeated for each chunk. if num_meeting_subdivisions > 1: def chop_meeting_data(reference_waveforms, diarization_labels, speaker_ids, meeting_scale, nsplit=num_meeting_subdivisions): samples = tf.shape(reference_waveforms)[-1] new_samples = nsplit * (samples // nsplit) reference_waveforms = tf.stack( tf.split(reference_waveforms[..., :new_samples], nsplit, axis=-1), axis=0) diarization_labels = tf.stack( tf.split(diarization_labels[..., :new_samples], nsplit, axis=-1), axis=0) speaker_ids = tf.reshape(speaker_ids, (1, max_num_spk)) speaker_ids = tf.broadcast_to(speaker_ids, (nsplit, max_num_spk)) meeting_scale = meeting_scale[0] * tf.ones((nsplit, max_num_spk)) return tf.data.Dataset.from_tensor_slices((reference_waveforms, diarization_labels, speaker_ids, meeting_scale)) dataset = dataset.flat_map(chop_meeting_data) samples = (samples // num_meeting_subdivisions) # Build mixture and sources waveforms. def combine_mixture_and_sources(reference_waveforms, diarization_labels, speaker_ids, meeting_scale): # waveforms has shape (num_sources, num_mics, num_samples). speaker_ids = tf.reshape(speaker_ids, (max_num_spk,)) meeting_scale = meeting_scale[0] mixture_waveform = tf.reduce_sum(reference_waveforms, axis=0) current_mixture_scale = tf.reduce_max(tf.abs(mixture_waveform)) # Note that when meetings are chopped, we cannot apply a meeting level # scale. Instead, we apply the scale in the chunk level so that each # chunk has a maximum scale equal to the meeting_scale. However, we should # not apply any gain to an all noise chunk to avoid amplifying the noise, # so we try not to scale those chunks by checking the current_mixture_scale # value. scale_refs = tf.cond(current_mixture_scale > 0.005, lambda: meeting_scale / current_mixture_scale, lambda: 1.0) reference_waveforms *= scale_refs num_sources = max_num_spk if sensor_noise_range[1] > 0.0: num_sources += 1 sensor_noise_gain = tf.random.uniform((), minval=sensor_noise_range[0], maxval=sensor_noise_range[1]) sensor_noise = sensor_noise_gain * tf.random.normal( (1, num_mics, samples)) reference_waveforms = tf.concat( (sensor_noise, reference_waveforms), axis=0) mixture_waveform = tf.reduce_sum(reference_waveforms, axis=0) reference_waveforms.set_shape((num_sources, num_mics, samples)) mixture_waveform.set_shape((num_mics, samples)) diarization_labels.set_shape((max_num_spk, samples)) speaker_ids.set_shape((max_num_spk,)) return {'receiver_audio': mixture_waveform, 'source_images': reference_waveforms, 'diarization_labels': diarization_labels, 'speaker_indices': speaker_ids, } dataset = dataset.map(combine_mixture_and_sources, num_parallel_calls=parallel_readers) if randomize_order and num_meeting_subdivisions > 1: # It would be good to shuffle examples to avoid having all examples # coming from a single meeting when we split a meeting. dataset = dataset.shuffle(shuffle_buffer_size * num_meeting_subdivisions) dataset = dataset.prefetch(prefetch_buffer_size) dataset = dataset.take(num_examples) dataset = dataset.batch(batch_size, drop_remainder=True) iterator = dataset.make_one_shot_iterator() return iterator.get_next() def input_fn(params): """An input function that uses params['feature_spec']. Args: params: A dictionary of experiment params. Returns: Features specified by params['feature_spec']. If 'inference' exists and is True in params, then placeholders will be returned based on the spec in params['inference_spec'], otherwise a dataset of examples read from params['input_data'] will be returned. """ if params.get('inference', False): feature_spec = params['inference_spec'] with tf.variable_scope('input_audio'): return placeholders_from_spec(feature_spec) else: json_file = params.get('input_data', None) io_params = params.get('io_params', {}) batch_size = params.get('batch_size', None) randomize_order = params.get('randomize_order', False) io_params['randomize_order'] = randomize_order return json_to_dataset(json_file, batch_size, **io_params)
google-research/sound-separation
models/train/data_meeting_io.py
Python
apache-2.0
23,738
# -*- coding: utf-8 -*- # # Copyright 2011-2018 Matt Austin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, unicode_literals import re from django.conf import settings from django.template import Library, Node, NodeList, TemplateSyntaxError from django.utils.encoding import smart_str from thummer.utils import get_thumbnail register = Library() kw_pat = re.compile(r'^(?P<key>[\w]+)=(?P<value>.+)$') class ThummerNodeBase(Node): """ A Node that renders safely """ nodelist_empty = NodeList() def render(self, context): try: return self._render(context) except Exception: if settings.DEBUG: raise # TODO: Log error return self.nodelist_empty.render(context) def _render(self, context): raise NotImplemented() @register.tag('thummer') class ThummerNode(ThummerNodeBase): child_nodelists = ('nodelist_url', 'nodelist_empty') error_msg = ('Syntax error. Expected: ``thummer url geometry ' '[key1=val1 key2=val2...] as var``') def __init__(self, parser, token): bits = token.split_contents() if len(bits) < 5 or bits[-2] != 'as': raise TemplateSyntaxError(self.error_msg) self.url = parser.compile_filter(bits[1]) self.geometry = parser.compile_filter(bits[2]) self.options = [] for bit in bits[3:-2]: m = kw_pat.match(bit) if not m: raise TemplateSyntaxError(self.error_msg) key = smart_str(m.group('key')) expr = parser.compile_filter(m.group('value')) self.options.append((key, expr)) self.as_var = bits[-1] self.nodelist_url = parser.parse(('empty', 'endthummer',)) if parser.next_token().contents == 'empty': self.nodelist_empty = parser.parse(('endthummer',)) parser.delete_first_token() def _render(self, context): url = self.url.resolve(context) geometry = self.geometry.resolve(context) options = {} for key, expr in self.options: noresolve = {'True': True, 'False': False, 'None': None} value = noresolve.get('{}'.format(expr), expr.resolve(context)) if key == 'options': options.update(value) else: options[key] = value if url: thumbnail = get_thumbnail(url, geometry, **options) else: return self.nodelist_empty.render(context) context.push() context[self.as_var] = thumbnail output = self.nodelist_url.render(context) context.pop() return output def __iter__(self): for node in self.nodelist_url: yield node for node in self.nodelist_empty: yield node
mattaustin/django-thummer
thummer/templatetags/thummer.py
Python
apache-2.0
3,380
"""Support for RFXtrx covers.""" import logging from homeassistant.components.cover import CoverEntity from homeassistant.const import CONF_DEVICES, STATE_OPEN from homeassistant.core import callback from . import ( CONF_AUTOMATIC_ADD, CONF_DATA_BITS, CONF_SIGNAL_REPETITIONS, DEFAULT_SIGNAL_REPETITIONS, SIGNAL_EVENT, RfxtrxCommandEntity, get_device_id, get_rfx_object, ) from .const import COMMAND_OFF_LIST, COMMAND_ON_LIST _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass, config_entry, async_add_entities, ): """Set up config entry.""" discovery_info = config_entry.data device_ids = set() def supported(event): return event.device.known_to_be_rollershutter entities = [] for packet_id, entity_info in discovery_info[CONF_DEVICES].items(): event = get_rfx_object(packet_id) if event is None: _LOGGER.error("Invalid device: %s", packet_id) continue if not supported(event): continue device_id = get_device_id( event.device, data_bits=entity_info.get(CONF_DATA_BITS) ) if device_id in device_ids: continue device_ids.add(device_id) entity = RfxtrxCover( event.device, device_id, entity_info[CONF_SIGNAL_REPETITIONS] ) entities.append(entity) async_add_entities(entities) @callback def cover_update(event, device_id): """Handle cover updates from the RFXtrx gateway.""" if not supported(event): return if device_id in device_ids: return device_ids.add(device_id) _LOGGER.info( "Added cover (Device ID: %s Class: %s Sub: %s, Event: %s)", event.device.id_string.lower(), event.device.__class__.__name__, event.device.subtype, "".join(f"{x:02x}" for x in event.data), ) entity = RfxtrxCover( event.device, device_id, DEFAULT_SIGNAL_REPETITIONS, event=event ) async_add_entities([entity]) # Subscribe to main RFXtrx events if discovery_info[CONF_AUTOMATIC_ADD]: hass.helpers.dispatcher.async_dispatcher_connect(SIGNAL_EVENT, cover_update) class RfxtrxCover(RfxtrxCommandEntity, CoverEntity): """Representation of a RFXtrx cover.""" async def async_added_to_hass(self): """Restore device state.""" await super().async_added_to_hass() if self._event is None: old_state = await self.async_get_last_state() if old_state is not None: self._state = old_state.state == STATE_OPEN @property def is_closed(self): """Return if the cover is closed.""" return not self._state async def async_open_cover(self, **kwargs): """Move the cover up.""" await self._async_send(self._device.send_open) self._state = True self.async_write_ha_state() async def async_close_cover(self, **kwargs): """Move the cover down.""" await self._async_send(self._device.send_close) self._state = False self.async_write_ha_state() async def async_stop_cover(self, **kwargs): """Stop the cover.""" await self._async_send(self._device.send_stop) self._state = True self.async_write_ha_state() def _apply_event(self, event): """Apply command from rfxtrx.""" super()._apply_event(event) if event.values["Command"] in COMMAND_ON_LIST: self._state = True elif event.values["Command"] in COMMAND_OFF_LIST: self._state = False @callback def _handle_event(self, event, device_id): """Check if event applies to me and update.""" if device_id != self._device_id: return self._apply_event(event) self.async_write_ha_state()
tchellomello/home-assistant
homeassistant/components/rfxtrx/cover.py
Python
apache-2.0
3,963
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys sys.path.insert(0,'..') import tensorflow as tf import numpy as np import itertools import pickle import os import re import inception_v4 os.environ['CUDA_VISIBLE_DEVICES'] = '' def atoi(text): return int(text) if text.isdigit() else text def natural_keys(myobject): return [ atoi(c) for c in re.split('(\d+)', myobject.name) ] def setWeights(layers, weights): for index, layer in enumerate(layers): if "dense" in layer.name: continue layer.set_weights(weights[index]) print(layer.name + " weights have been set!") print("Finished Setting Weights!") def get_layers(model): # Get Trainable layers layers = model.layers layers.sort(key=natural_keys) result = [] for i in range(len(layers)): try: layer = model.layers[i] if layer.trainable: bad = ["pooling", "flatten", "dropout", "activation", "concatenate"] if not any(word in layer.name for word in bad): result.append(layer) except: continue bn,cv,fn=result[:int((len(result)-1)/2)],result[int((len(result)-1)/2):],result[-1] res_zipped = zip(cv, bn) out_prep = [list(elem) for elem in res_zipped] out = out_prep + [[fn]] return out if __name__ == "__main__": model = inception_v4.create_model() with open('weights.p', 'rb') as fp: weights = pickle.load(fp) # Get layers to set layers = get_layers(model) layers = list(itertools.chain.from_iterable(layers)) # Set the layer weights setWeights(layers, weights) # Save model weights in h5 format model.save_weights("../weights/inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5") print("Finished saving weights in h5 format")
xwzy/triplet-deep-hash-pytorch
triplet-deep-hash-pytorch/src/extract_feature/convert_weights/convert_weights_to_keras.py
Python
apache-2.0
1,736
# =============================================================================== # Copyright 2016 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from __future__ import absolute_import from traits.api import Instance # ============= standard library imports ======================== # ============= local library imports ========================== from twisted.internet import reactor from twisted.internet.endpoints import TCP4ServerEndpoint from twisted.internet.protocol import Factory from pychron.headless_loggable import HeadlessLoggable from pychron.tx.protocols.service import ServiceProtocol class FurnaceFirmwareProtocol(ServiceProtocol): def __init__(self, manager, addr): self._manager = manager self._addr = addr ServiceProtocol.__init__(self) misc_services = (('GetLabTemperature', manager.get_lab_temperature), ('GetLabHumidity', manager.get_lab_humidity), ('SetFrameRate', manager.set_frame_rate), ('GetVersion', manager.get_version), ('GetDIState', manager.get_di_state), ('GetHeartBeat', manager.get_heartbeat), ('GetFullSummary', manager.get_full_summary)) controller_services = (('GetTemperature', manager.get_temperature), ('GetSetpoint', manager.get_setpoint), ('SetSetpoint', manager.set_setpoint), ('GetProcessValue', manager.get_temperature), ('GetPercentOutput', manager.get_percent_output), ('GetFurnaceSummary', manager.get_furnace_summary), ('SetPID', manager.set_pid)) valve_services = (('Open', manager.open_switch), ('Close', manager.close_switch), ('GetIndicatorState', manager.get_indicator_state), # ('GetChannelDOState', manager.get_channel_do_state), ('GetChannelState', manager.get_channel_state), ('GetIndicatorComponentStates', manager.get_indicator_component_states)) dump_services = (('LowerFunnel', manager.lower_funnel), ('RaiseFunnel', manager.raise_funnel), ('InUpPosition', manager.is_funnel_up), ('InDownPosition', manager.is_funnel_down), ('EnergizeMagnets', manager.energize_magnets), ('IsEnergized', manager.is_energized), ('RotaryDumperMoving', manager.rotary_dumper_moving), ('DenergizeMagnets', manager.denergize_magnets), ('MoveAbsolute', manager.move_absolute), ('MoveRelative', manager.move_relative), ('GetPosition', manager.get_position), ('Slew', manager.slew), ('Stalled', manager.stalled), ('SetHome', manager.set_home), ('StopDrive', manager.stop_drive), ('Moving', manager.moving), ('StartJitter', manager.start_jitter), ('StopJitter', manager.stop_jitter)) bakeout_services = (('GetBakeoutSetpoint', manager.get_bakeout_setpoint), ('SetBakeoutControlMode', manager.set_bakeout_control_mode), ('GetBakeoutTemperature', manager.get_bakeout_temperature), ('SetBakeoutClosedLoopSetpoint', manager.set_bakeout_setpoint), ('GetBakeoutTempPower', manager.get_bakeout_temp_and_power)) gauge_services = (('GetPressure', manager.get_gauge_pressure),) for s in (misc_services, controller_services, valve_services, dump_services, bakeout_services, gauge_services): self._register_services(s) class FirmwareFactory(Factory): def __init__(self, manager): self._manager = manager def buildProtocol(self, addr): return FurnaceFirmwareProtocol(self._manager, addr) class FirmwareServer(HeadlessLoggable): manager = Instance('pychron.furnace.firmware.manager.FirmwareManager') def bootstrap(self, port=None, **kw): self.debug('bootstrap') self._load_config(port) self.debug('starting reactor') reactor.run() def _load_config(self, port): self.debug('load config') if port is None: port = 8000 self.add_endpoint(port, FirmwareFactory(self.manager)) def add_endpoint(self, port, factory): self.debug('add endbpoint port={} factory={}'.format(port, factory.__class__.__name__)) endpoint = TCP4ServerEndpoint(reactor, port) endpoint.listen(factory) # ============= EOF =============================================
UManPychron/pychron
pychron/furnace/firmware/server.py
Python
apache-2.0
5,682
""" WSGI config for cg4 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cg4.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
cybojenix/projects_reward_system
cg4/wsgi.py
Python
apache-2.0
381
# -*- coding: utf-8 -*- from .database import BaseMessage from .records import RecordUpdateMessage, RecordDeleteMessage, RecordCreateMessage from ..exceptions import PyOrientBadMethodCallException from ..constants import COMMAND_OP, FIELD_BOOLEAN, FIELD_BYTE, FIELD_CHAR, \ FIELD_INT, FIELD_LONG, FIELD_SHORT, FIELD_STRING, QUERY_SYNC, FIELD_BYTES, \ TX_COMMIT_OP, QUERY_GREMLIN, QUERY_ASYNC, QUERY_CMD, QUERY_TYPES, \ QUERY_SCRIPT from ..utils import need_connected, need_db_opened, dlog __author__ = 'Ostico <ostico@gmail.com>' # # COMMAND_OP # # Executes remote commands: # # Request: (mode:byte)(class-name:string)(command-payload-length:int)(command-payload) # Response: # - synchronous commands: [(synch-result-type:byte)[(synch-result-content:?)]]+ # - asynchronous commands: [(asynch-result-type:byte)[(asynch-result-content:?)]*] # (pre-fetched-record-size.md)[(pre-fetched-record)]*+ # # Where the request: # # mode can be 'a' for asynchronous mode and 's' for synchronous mode # class-name is the class name of the command implementation. # There are short form for the most common commands: # q stands for query as idempotent command. It's like passing # com.orientechnologies.orient.core.sql.query.OSQLSynchQuery # c stands for command as non-idempotent command (insert, update, etc). # It's like passing com.orientechnologies.orient.core.sql.OCommandSQL # s stands for script. It's like passing # com.orientechnologies.orient.core.command.script.OCommandScript. # Script commands by using any supported server-side scripting like Javascript command. Since v1.0. # any other values is the class name. The command will be created via # reflection using the default constructor and invoking the fromStream() method against it # command-payload is the command's serialized payload (see Network-Binary-Protocol-Commands) # Response is different for synchronous and asynchronous request: # synchronous: # synch-result-type can be: # 'n', means null result # 'r', means single record returned # 'l', collection of records. The format is: # an integer to indicate the collection size # all the records one by one # 'a', serialized result, a byte[] is sent # synch-result-content, can only be a record # pre-fetched-record-size, as the number of pre-fetched records not # directly part of the result set but joined to it by fetching # pre-fetched-record as the pre-fetched record content # asynchronous: # asynch-result-type can be: # 0: no records remain to be fetched # 1: a record is returned as a resultset # 2: a record is returned as pre-fetched to be loaded in client's cache only. # It's not part of the result set but the client knows that it's available for later access # asynch-result-content, can only be a record # class CommandMessage(BaseMessage): def __init__(self, _orient_socket): super(CommandMessage, self).__init__(_orient_socket) self._query = '' self._limit = 20 self._fetch_plan = '*:0' self._command_type = QUERY_SYNC self._mod_byte = 's' self._append((FIELD_BYTE, COMMAND_OP)) @need_db_opened def prepare(self, params=None): if isinstance(params, tuple) or isinstance(params, list): try: self.set_command_type(params[0]) self._query = params[1] self._limit = params[2] self._fetch_plan = params[3] # callback function use to operate # over the async fetched records self.set_callback(params[4]) except IndexError: # Use default for non existent indexes pass if self._command_type == QUERY_CMD \ or self._command_type == QUERY_SYNC \ or self._command_type == QUERY_SCRIPT \ or self._command_type == QUERY_GREMLIN: self._mod_byte = 's' else: if self._callback is None: raise PyOrientBadMethodCallException("No callback was provided.", []) self._mod_byte = 'a' _payload_definition = [ (FIELD_STRING, self._command_type), (FIELD_STRING, self._query) ] if self._command_type == QUERY_ASYNC \ or self._command_type == QUERY_SYNC \ or self._command_type == QUERY_GREMLIN: # a limit specified in a sql string should always override a # limit parameter pass to prepare() if ' LIMIT ' not in self._query.upper() or self._command_type == QUERY_GREMLIN: _payload_definition.append((FIELD_INT, self._limit)) else: _payload_definition.append((FIELD_INT, -1)) _payload_definition.append((FIELD_STRING, self._fetch_plan)) if self._command_type == QUERY_SCRIPT: _payload_definition.insert(1, (FIELD_STRING, 'sql')) _payload_definition.append((FIELD_INT, 0)) payload = b''.join( self._encode_field(x) for x in _payload_definition ) self._append((FIELD_BYTE, self._mod_byte)) self._append((FIELD_STRING, payload)) return super(CommandMessage, self).prepare() def fetch_response(self): # skip execution in case of transaction if self._orientSocket.in_transaction is True: return self # decode header only super(CommandMessage, self).fetch_response() if self._command_type == QUERY_ASYNC: self._read_async_records() else: return self._read_sync() def set_command_type(self, _command_type): if _command_type in QUERY_TYPES: # user choice if present self._command_type = _command_type else: raise PyOrientBadMethodCallException( _command_type + ' is not a valid command type', [] ) return self def set_fetch_plan(self, _fetch_plan): self._fetch_plan = _fetch_plan return self def set_query(self, _query): self._query = _query return self def set_limit(self, _limit): self._limit = _limit return self def _read_sync(self): # type of response # decode body char with flag continue ( Header already read ) response_type = self._decode_field(FIELD_CHAR) if not isinstance(response_type, str): response_type = response_type.decode() res = [] if response_type == 'n': self._append(FIELD_CHAR) super(CommandMessage, self).fetch_response(True) # end Line \x00 return None elif response_type == 'r' or response_type == 'w': res = [self._read_record()] self._append(FIELD_CHAR) # end Line \x00 _res = super(CommandMessage, self).fetch_response(True) if response_type == 'w': res = [res[0].oRecordData['result']] elif response_type == 'a': self._append(FIELD_STRING) self._append(FIELD_CHAR) res = [super(CommandMessage, self).fetch_response(True)[0]] elif response_type == 'l': self._append(FIELD_INT) list_len = super(CommandMessage, self).fetch_response(True)[0] for n in range(0, list_len): res.append(self._read_record()) # async-result-type can be: # 0: no records remain to be fetched # 1: a record is returned as a result set # 2: a record is returned as pre-fetched to be loaded in client's # cache only. It's not part of the result set but the client # knows that it's available for later access cached_results = self._read_async_records() # cache = cached_results['cached'] else: # this should be never happen, used only to debug the protocol msg = b'' self._orientSocket._socket.setblocking(0) m = self._orientSocket.read(1) while m != "": msg += m m = self._orientSocket.read(1) return res def set_callback(self, func): if hasattr(func, '__call__'): self._callback = func else: raise PyOrientBadMethodCallException(func + " is not a callable " "function", []) return self # # TX COMMIT # # Commits a transaction. This operation flushes all the # pending changes to the server side. # # Request: (tx-id:int)(using-tx-log:byte)(tx-entry)*(0-byte indicating end-of-records) # tx-entry: (operation-type:byte)(cluster-id:short) # (cluster-position:long)(record-type:byte)(entry-content) # # entry-content for CREATE: (record-content:bytes) # entry-content for UPDATE: (version:record-version)(content-changed:boolean)(record-content:bytes) # entry-content for DELETE: (version:record-version) # Response: (created-record-count:int)[(client-specified-cluster-id:short) # (client-specified-cluster-position:long)(created-cluster-id:short) # (created-cluster-position:long)]*(updated-record-count:int)[(updated-cluster-id:short) # (updated-cluster-position:long)(new-record-version:int)]*(count-of-collection-changes:int) # [(uuid-most-sig-bits:long)(uuid-least-sig-bits:long)(updated-file-id:long)(updated-page-index:long) # (updated-page-offset:int)]* # # Where: # tx-id is the Transaction's Id # use-tx-log tells if the server must use the Transaction # Log to recover the transaction. 1 = true, 0 = false # operation-type can be: # 1, for UPDATES # 2, for DELETES # 3, for CREATIONS # # record-content depends on the operation type: # For UPDATED (1): (original-record-version:int)(record-content:bytes) # For DELETED (2): (original-record-version:int) # For CREATED (3): (record-content:bytes) # # This response contains two parts: a map of 'temporary' client-generated # record ids to 'real' server-provided record ids for each CREATED record, # and a map of UPDATED record ids to update record-versions. # # Look at Optimistic Transaction to know how temporary RecordIDs are managed. # # The last part or response is referred to RidBag management. # Take a look at the main page for more details. class _TXCommitMessage(BaseMessage): def __init__(self, _orient_socket): super(_TXCommitMessage, self).__init__(_orient_socket) self._tx_id = -1 self._operation_stack = [] self._pre_operation_records = {} self._operation_records = {} self._temp_cluster_position_seq = -2 # order matters self._append((FIELD_BYTE, TX_COMMIT_OP)) self._command = TX_COMMIT_OP @need_connected def prepare(self, params=None): self._append((FIELD_INT, self.get_transaction_id())) self._append((FIELD_BOOLEAN, True)) for k, v in enumerate(self._operation_stack): self._append((FIELD_BYTE, chr(1))) # start of records for field in v: self._append(field) self._append((FIELD_BYTE, chr(0))) self._append((FIELD_STRING, "")) return super(_TXCommitMessage, self).prepare() def send(self): return super(_TXCommitMessage, self).send() def fetch_response(self): # self.dump_streams() super(_TXCommitMessage, self).fetch_response() result = { 'created': [], 'updated': [], 'changes': [] } items = self._decode_field(FIELD_INT) for x in range(0, items): # (created-record-count:int) # [ # (client-specified-cluster-id:short) # (client-specified-cluster-position:long) # (created-cluster-id:short) # (created-cluster-position:long) # ]* result['created'].append( { 'client_c_id': self._decode_field(FIELD_SHORT), 'client_c_pos': self._decode_field(FIELD_LONG), 'created_c_id': self._decode_field(FIELD_SHORT), 'created_c_pos': self._decode_field(FIELD_LONG) } ) operation = self._pre_operation_records[ str(result['created'][-1]['client_c_pos']) ] rid = "#" + str(result['created'][-1]['created_c_id']) + \ ":" + str(result['created'][-1]['created_c_pos']) record = getattr(operation, "_record_content") record.update(__version=1, __rid=rid) self._operation_records[rid] = record items = self._decode_field(FIELD_INT) for x in range(0, items): # (updated-record-count:int) # [ # (updated-cluster-id:short) # (updated-cluster-position:long) # (new-record-version:int) # ]* result['updated'].append( { 'updated_c_id': self._decode_field(FIELD_SHORT), 'updated_c_pos': self._decode_field(FIELD_LONG), 'new_version': self._decode_field(FIELD_INT), } ) try: operation = self._pre_operation_records[ str(result['updated'][-1]['updated_c_pos']) ] record = getattr(operation, "_record_content") rid = "#" + str(result['updated'][-1]['updated_c_id']) + \ ":" + str(result['updated'][-1]['updated_c_pos']) record.update( __version=result['updated'][-1]['new_version'], __rid=rid ) self._operation_records[rid] = record except KeyError: pass if self.get_protocol() > 23: items = self._decode_field(FIELD_INT) for x in range(0, items): # (count-of-collection-changes:int) # [ # (uuid-most-sig-bits:long) # (uuid-least-sig-bits:long) # (updated-file-id:long) # (updated-page-index:long) # (updated-page-offset:int) # ]* result['updated'].append( { 'uuid_high': self._decode_field(FIELD_LONG), 'uuid_low': self._decode_field(FIELD_LONG), 'file_id': self._decode_field(FIELD_LONG), 'page_index': self._decode_field(FIELD_LONG), 'page_offset': self._decode_field(FIELD_INT), } ) self.dump_streams() return self._operation_records # [self._operation_records, result] def attach(self, operation): if not isinstance(operation, BaseMessage): # A Subclass of BaseMessage was expected raise AssertionError("A subclass of BaseMessage was expected") if isinstance(operation, RecordUpdateMessage): o_record_enc = self.get_serializer().encode(getattr(operation, "_record_content")) self._operation_stack.append(( (FIELD_BYTE, chr(1)), (FIELD_SHORT, int(getattr(operation, "_cluster_id"))), (FIELD_LONG, int(getattr(operation, "_cluster_position"))), (FIELD_BYTE, getattr(operation, "_record_type")), (FIELD_INT, int(getattr(operation, "_record_version"))), (FIELD_STRING, o_record_enc), )) if self.get_protocol() >= 23: self._operation_stack[-1] = \ self._operation_stack[-1] + \ ((FIELD_BOOLEAN, bool(getattr(operation, "_update_content"))),) self._pre_operation_records[ str(getattr(operation, "_cluster_position")) ] = operation elif isinstance(operation, RecordDeleteMessage): self._operation_stack.append(( (FIELD_BYTE, chr(2)), (FIELD_SHORT, int(getattr(operation, "_cluster_id"))), (FIELD_LONG, int(getattr(operation, "_cluster_position"))), (FIELD_BYTE, getattr(operation, "_record_type")), (FIELD_INT, int(getattr(operation, "_record_version"))), )) elif isinstance(operation, RecordCreateMessage): o_record_enc = self.get_serializer().encode(getattr(operation, "_record_content")) self._operation_stack.append(( (FIELD_BYTE, chr(3)), (FIELD_SHORT, int(-1)), (FIELD_LONG, int(self._temp_cluster_position_seq)), (FIELD_BYTE, getattr(operation, "_record_type")), (FIELD_STRING, o_record_enc), )) self._pre_operation_records[ str(self._temp_cluster_position_seq) ] = operation self._temp_cluster_position_seq -= 1 else: raise PyOrientBadMethodCallException( "Wrong command type " + operation.__class__.__name__, [] ) return self def get_transaction_id(self): if self._tx_id < 0: from datetime import datetime my_epoch = datetime(2014, 7, 1) now = datetime.now() delta = now - my_epoch # write in extended mode to make it easy to read # seconds * 1000000 to get the equivalent microseconds _sm = (delta.seconds + delta.days * 24 * 3600) * 10 ** 6 _ms = delta.microseconds _mstime = _sm + _ms # remove sign # treat as unsigned even when the INT is signed # and take 4 Bytes # ( 32 bit uniqueness is not ensured in any way, # but is surely unique in this session ) # we need only a transaction unique for this session # not a real UUID if _mstime & 0x80000000: self._tx_id = int((_mstime - 0x80000000) & 0xFFFFFFFF) else: self._tx_id = int(_mstime & 0xFFFFFFFF) return self._tx_id def begin(self): self._operation_stack = [] self._pre_operation_records = {} self._operation_records = {} self._temp_cluster_position_seq = -2 self._orientSocket.in_transaction = True self.get_transaction_id() return self def commit(self): self._orientSocket.in_transaction = False result = self.prepare().send().fetch_response() self._operation_stack = [] self._pre_operation_records = {} self._operation_records = {} self._tx_id = -1 self._temp_cluster_position_seq = -2 return result def rollback(self): self._operation_stack = [] self._pre_operation_records = {} self._operation_records = {} self._tx_id = -1 self._temp_cluster_position_seq = -2 self._orientSocket.in_transaction = False return self # # TX COMMIT facade # class TxCommitMessage: def __init__(self, _orient_socket): self._transaction = _TXCommitMessage(_orient_socket) pass def attach(self, operation): self._transaction.attach(operation) return self def begin(self): self._transaction.begin() return self def commit(self): return self._transaction.commit() def rollback(self): return self._transaction.rollback() def set_session_token(self, token): self._transaction.set_session_token(token) return self
orientechnologies/pyorient
pyorient/messages/commands.py
Python
apache-2.0
19,879
import time from typing import List from paasta_tools.deployd.common import DelayDeadlineQueueProtocol from paasta_tools.deployd.common import PaastaThread from paasta_tools.deployd.workers import PaastaDeployWorker from paasta_tools.metrics.metrics_lib import BaseMetrics class MetricsThread(PaastaThread): def __init__(self, metrics_provider: BaseMetrics) -> None: super().__init__() self.metrics = metrics_provider def run_once(self) -> None: raise NotImplementedError() def run(self) -> None: while True: last_run_time = time.time() self.run_once() time.sleep(last_run_time + 20 - time.time()) class QueueAndWorkerMetrics(MetricsThread): def __init__( self, queue: DelayDeadlineQueueProtocol, workers: List[PaastaDeployWorker], cluster: str, metrics_provider: BaseMetrics, ) -> None: super().__init__(metrics_provider) self.daemon = True self.queue = queue self.instances_to_bounce_later_gauge = self.metrics.create_gauge( "instances_to_bounce_later", paasta_cluster=cluster ) self.instances_to_bounce_now_gauge = self.metrics.create_gauge( "instances_to_bounce_now", paasta_cluster=cluster ) self.instances_with_past_deadline_gauge = self.metrics.create_gauge( "instances_with_past_deadline", paasta_cluster=cluster ) self.instances_with_deadline_in_next_n_seconds_gauges = { (available, n): self.metrics.create_gauge( f"{available}_instances_with_deadline_in_next_{n}s", paasta_cluster=cluster, ) for n in [60, 300, 3600] for available in ["available", "unavailable"] } self.max_time_past_deadline_gauge = self.metrics.create_gauge( "max_time_past_deadline", paasta_cluster=cluster ) self.sum_time_past_deadline_gauge = self.metrics.create_gauge( "sum_time_past_deadline", paasta_cluster=cluster ) self.workers = workers self.workers_busy_gauge = self.metrics.create_gauge( "workers_busy", paasta_cluster=cluster ) self.workers_idle_gauge = self.metrics.create_gauge( "workers_idle", paasta_cluster=cluster ) self.workers_dead_gauge = self.metrics.create_gauge( "workers_dead", paasta_cluster=cluster ) def run_once(self) -> None: currently_available_instances = tuple( self.queue.get_available_service_instances(fetch_service_instances=False) ) currently_unavailable_instances = tuple( self.queue.get_unavailable_service_instances(fetch_service_instances=False) ) self.instances_to_bounce_later_gauge.set(len(currently_available_instances)) self.instances_to_bounce_now_gauge.set(len(currently_unavailable_instances)) available_deadlines = [ deadline for deadline, _ in currently_available_instances ] unavailable_deadlines = [ deadline for _, deadline, _ in currently_unavailable_instances ] now = time.time() self.instances_with_past_deadline_gauge.set( len([1 for deadline in available_deadlines if deadline < now]) ) for ( (available, n), gauge, ) in self.instances_with_deadline_in_next_n_seconds_gauges.items(): if available == "available": deadlines = available_deadlines else: deadlines = unavailable_deadlines gauge.set(len([1 for deadline in deadlines if now < deadline < now + n])) self.max_time_past_deadline_gauge.set( max( [now - deadline for deadline in available_deadlines if deadline < now], default=0, ) ) self.sum_time_past_deadline_gauge.set( sum([max(0, now - deadline) for deadline in available_deadlines]) ) self.workers_busy_gauge.set( len([worker for worker in self.workers if worker.busy]) ) self.workers_idle_gauge.set( len([worker for worker in self.workers if not worker.busy]) ) self.workers_dead_gauge.set( len([worker for worker in self.workers if not worker.is_alive()]) )
Yelp/paasta
paasta_tools/deployd/metrics.py
Python
apache-2.0
4,472
import os import setuptools module_path = os.path.join(os.path.dirname(__file__), 'url_for_s3.py') version_line = [line for line in open(module_path) if line.startswith('__version__')][0] __version__ = version_line.split('__version__ = ')[-1][1:][:-2] setuptools.setup( name="url-for-s3", version=__version__, url="https://github.com/Jaza/url-for-s3", author="Jeremy Epstein", author_email="jazepstein@gmail.com", description="Python function that generates a URL to a given S3 resource.", long_description=open('README.rst').read(), py_modules=['url_for_s3'], zip_safe=False, platforms='any', install_requires=[], classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', ], )
Jaza/url-for-s3
setup.py
Python
apache-2.0
1,123
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-10-23 23:54 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('goods', '0002_auto_20171017_2017'), ('trade', '0003_auto_20171022_1507'), ] operations = [ migrations.AlterField( model_name='ordergoods', name='order', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='goods', to='trade.OrderInfo', verbose_name='订单信息'), ), migrations.AlterField( model_name='orderinfo', name='order_sn', field=models.CharField(blank=True, max_length=30, null=True, unique=True, verbose_name='订单号'), ), migrations.AlterField( model_name='orderinfo', name='pay_status', field=models.CharField(choices=[('TRADE_SUCCESS', '成功'), ('TRADE_CLOSE', '超时关闭'), ('WAIT_BUYER_PAY', '交易创建,等待付款'), ('TRADE_FINISHED', '交易结束')], default='paying', max_length=30, verbose_name='订单状态'), ), migrations.AlterUniqueTogether( name='shoppingcart', unique_together=set([('user', 'goods')]), ), ]
LennonChin/Django-Practices
MxShop/apps/trade/migrations/0004_auto_20171023_2354.py
Python
apache-2.0
1,497
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import os import subprocess import sys from textwrap import dedent from twitter.common.contextutil import pushd from pex.testing import temporary_content def assert_entry_points(entry_points): setup_py = dedent(""" from setuptools import setup setup( name='my_app', version='0.0.0', zip_safe=True, packages=[''], entry_points=%(entry_points)r, ) """ % dict(entry_points=entry_points)) my_app = dedent(""" def do_something(): print("hello world!") """) with temporary_content({'setup.py': setup_py, 'my_app.py': my_app}) as project_dir: with pushd(project_dir): subprocess.check_call([sys.executable, 'setup.py', 'bdist_pex']) process = subprocess.Popen([os.path.join(project_dir, 'dist', 'my_app-0.0.0.pex')], stdout=subprocess.PIPE) stdout, _ = process.communicate() assert 0 == process.returncode assert stdout == b'hello world!\n' def test_entry_points_dict(): assert_entry_points({'console_scripts': ['my_app = my_app:do_something']}) def test_entry_points_ini_string(): assert_entry_points(dedent(""" [console_scripts] my_app=my_app:do_something """))
mzdanieltest/pex
tests/test_bdist_pex.py
Python
apache-2.0
1,373