text
stringlengths
4
1.02M
meta
dict
from airflow.contrib.hooks.imap_hook import ImapHook from airflow.sensors.base_sensor_operator import BaseSensorOperator from airflow.utils.decorators import apply_defaults class ImapAttachmentSensor(BaseSensorOperator): """ Waits for a specific attachment on a mail server. :param attachment_name: The name of the attachment that will be checked. :type attachment_name: str :param check_regex: If set to True the attachment's name will be parsed as regular expression. Through this you can get a broader set of attachments that it will look for than just only the equality of the attachment name. The default value is False. :type check_regex: bool :param mail_folder: The mail folder in where to search for the attachment. The default value is 'INBOX'. :type mail_folder: str :param conn_id: The connection to run the sensor against. The default value is 'imap_default'. :type conn_id: str """ template_fields = ('attachment_name',) @apply_defaults def __init__(self, attachment_name, mail_folder='INBOX', check_regex=False, conn_id='imap_default', *args, **kwargs): super(ImapAttachmentSensor, self).__init__(*args, **kwargs) self.attachment_name = attachment_name self.mail_folder = mail_folder self.check_regex = check_regex self.conn_id = conn_id def poke(self, context): """ Pokes for a mail attachment on the mail server. :param context: The context that is being provided when poking. :type context: dict :return: True if attachment with the given name is present and False if not. :rtype: bool """ self.log.info('Poking for %s', self.attachment_name) with ImapHook(imap_conn_id=self.conn_id) as imap_hook: return imap_hook.has_mail_attachment( name=self.attachment_name, mail_folder=self.mail_folder, check_regex=self.check_regex )
{ "content_hash": "4ea5a53d9ff5df65eaaecf42d2c0200a", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 98, "avg_line_length": 38.80701754385965, "alnum_prop": 0.6098553345388789, "repo_name": "fenglu-g/incubator-airflow", "id": "c0eb9b6cd22c3057c511c483b8191a9181553eb5", "size": "3024", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "airflow/contrib/sensors/imap_attachment_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "12126" }, { "name": "Dockerfile", "bytes": "3634" }, { "name": "HTML", "bytes": "129454" }, { "name": "JavaScript", "bytes": "22118" }, { "name": "Mako", "bytes": "1284" }, { "name": "Python", "bytes": "5852162" }, { "name": "Shell", "bytes": "41793" } ], "symlink_target": "" }
"""Tests for unicode_data.py.""" __author__ = "roozbeh@google.com (Roozbeh Pournader)" import unittest from nototools import unicode_data class UnicodeDataTest(unittest.TestCase): """Tests for unicode_data module.""" def test_name(self): """Tests the name() method.""" self.assertEqual("WARANG CITI OM", unicode_data.name(0x118FF)) def test_category(self): """Tests the category() method.""" self.assertEqual("Co", unicode_data.category(0xF0001)) self.assertEqual("Cn", unicode_data.category(0xE01F0)) def test_canonical_decomposition(self): """Tests the canonical_decomposition() method.""" self.assertEqual("", unicode_data.canonical_decomposition(0x0627)) self.assertEqual(u"\u064A\u0654", unicode_data.canonical_decomposition(0x0626)) self.assertEqual(u"\U000226D4", unicode_data.canonical_decomposition(0x2F8A4)) def test_script(self): """Tests the script() method.""" self.assertEqual("Latn", unicode_data.script(0xA794)) self.assertEqual("Zzzz", unicode_data.script(0xE006)) def test_block(self): """Tests the block() method.""" self.assertEqual("Emoticons", unicode_data.block(0x1F600)) def test_default_ignorable(self): """Tests the is_default_ignorable() method.""" self.assertTrue(unicode_data.is_default_ignorable(0x061C)) self.assertFalse(unicode_data.is_default_ignorable(0x0020)) def test_defined(self): """Tests the is_defined() method.""" self.assertTrue(unicode_data.is_defined(0x20BD)) self.assertFalse(unicode_data.is_defined(0xFDD0)) self.assertFalse(unicode_data.is_defined(0x0870)) # CJK ranges self.assertTrue(unicode_data.is_defined(0x3400)) self.assertTrue(unicode_data.is_defined(0x4DB5)) self.assertTrue(unicode_data.is_defined(0x4DB6)) def test_defined_characters(self): """Tests the defined_characters() method.""" self.assertIn(0x20BD, unicode_data.defined_characters()) self.assertNotIn(0xFDD0, unicode_data.defined_characters()) # Version-restricted self.assertNotIn(0x20BD, unicode_data.defined_characters(6.3)) self.assertIn(0x20BD, unicode_data.defined_characters(7.0)) # Script restricted self.assertIn(0x1CD1, unicode_data.defined_characters(scr="Deva")) self.assertNotIn( 0x1CD1, unicode_data.defined_characters(version=5.1, scr="Deva") ) self.assertIn(0x0964, unicode_data.defined_characters(scr="Beng")) self.assertIn(0xA92E, unicode_data.defined_characters(scr="Latn")) self.assertNotIn(0x0363, unicode_data.defined_characters(scr="Arab")) def test_private_use(self): """Tests the is_private_use method.""" self.assertTrue(unicode_data.is_private_use(0xE000)) self.assertTrue(unicode_data.is_private_use(0xF8FF)) self.assertFalse(unicode_data.is_private_use(0x9000)) self.assertTrue(unicode_data.is_private_use(0xF0000)) self.assertTrue(unicode_data.is_private_use(0x10FFFD)) self.assertFalse(unicode_data.is_private_use(0x10FFFE)) def test_mirrored(self): """Tests the mirrored method.""" self.assertNotIsInstance(unicode_data.mirrored(0x0000), bool) self.assertEqual(unicode_data.mirrored(0x0028), 1) self.assertEqual(unicode_data.mirrored(0x2140), 1) self.assertEqual(unicode_data.mirrored(0x0029), 1) self.assertEqual(unicode_data.mirrored(0x1D7C3), 1) self.assertEqual(unicode_data.mirrored(0x0020), 0) self.assertEqual(unicode_data.mirrored(0x27CB), 1) self.assertEqual(unicode_data.mirrored(0x27CD), 1) def test_age(self): """Tests the age method.""" self.assertEqual(unicode_data.age(0xE000), "1.1") self.assertEqual(unicode_data.age(0xE0021), "3.1") self.assertEqual(unicode_data.age(0x20BD), "7.0") self.assertEqual(unicode_data.age(0x2B820), "8.0") self.assertEqual(unicode_data.age(0x104B0), "9.0") def test_bidi_mirroring_glyph(self): """Tests the bidi_mirroring_glyph() method.""" self.assertEqual(unicode_data.bidi_mirroring_glyph(0x0028), 0x0029) self.assertEqual(unicode_data.bidi_mirroring_glyph(0x0029), 0x0028) self.assertEqual(unicode_data.bidi_mirroring_glyph(0x27CB), 0x27CD) self.assertEqual(unicode_data.bidi_mirroring_glyph(0x27CD), 0x27CB) self.assertIsNone(unicode_data.bidi_mirroring_glyph(0x2140)) def test_script_code(self): """Tests the script_code method.""" self.assertEqual(unicode_data.script_code("NKo"), "Nkoo") self.assertEqual(unicode_data.script_code("N'Ko"), "Nkoo") self.assertEqual(unicode_data.script_code("NewTaiLue"), "Talu") self.assertEqual(unicode_data.script_code("Klingon"), "Piqd") def test_human_readable_script_name(self): """Tests the human_readable_script_name method.""" self.assertEqual(unicode_data.human_readable_script_name("Grek"), "Greek") self.assertEqual(unicode_data.human_readable_script_name("Talu"), "New Tai Lue") self.assertEqual(unicode_data.human_readable_script_name("Nkoo"), "N'Ko") self.assertEqual(unicode_data.human_readable_script_name("Zsye"), "Emoji") self.assertEqual(unicode_data.human_readable_script_name("Zsym"), "Symbols") self.assertEqual(unicode_data.human_readable_script_name("Zzzz"), "Unknown") def test_all_scripts(self): """Tests the all_scripts() method.""" self.assertIn("Latn", unicode_data.all_scripts()) self.assertNotIn("Japn", unicode_data.all_scripts()) def test_parse_code_ranges(self): """Tests the _parse_code_ranges method.""" source = ( "0000..001F ; Common # Cc [32] <control-0000>..<control-001F>\n" "0020 ; Common # Zs SPACE\n" ) self.assertEqual( [(0, 31, "Common"), (32, 32, "Common")], unicode_data._parse_code_ranges(source), ) if __name__ == "__main__": unittest.main()
{ "content_hash": "8333bdf750ee11f378b2e146dfe7f646", "timestamp": "", "source": "github", "line_count": 138, "max_line_length": 88, "avg_line_length": 44.85507246376812, "alnum_prop": 0.6583198707592892, "repo_name": "googlei18n/nototools", "id": "15ad71821be4a8cb9135e38c635afdfddb0668d0", "size": "6811", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/unicode_data_test.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "665" }, { "name": "HTML", "bytes": "620" }, { "name": "Makefile", "bytes": "3666" }, { "name": "Python", "bytes": "1158880" }, { "name": "Shell", "bytes": "15227" } ], "symlink_target": "" }
league_schema_name = None def tn(tablename: str) -> str: if league_schema_name is not None: return '`{0}`.`{1}`'.format(league_schema_name, tablename) else: return '`{0}`'.format(tablename)
{ "content_hash": "5b8b5c5c4c50ef97bee0d9cd943e58b5", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 66, "avg_line_length": 27, "alnum_prop": 0.6111111111111112, "repo_name": "incnone/necrobot", "id": "dbc68366610d58b392a7781da0696038447e1120", "size": "216", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "necrobot/database/dbutil.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "637785" } ], "symlink_target": "" }
import os from logshipper.tail import Tail from st2reactor.sensor.base import Sensor class FileWatchSensor(Sensor): def __init__(self, sensor_service, config=None): super(FileWatchSensor, self).__init__(sensor_service=sensor_service, config=config) self._config = self._config['file_watch_sensor'] self._file_paths = self._config.get('file_paths', []) self._trigger_ref = 'linux.file_watch.line' self._tail = None def setup(self): if not self._file_paths: raise ValueError('No file_paths configured to monitor') self._tail = Tail(filenames=self._file_paths) self._tail.handler = self._handle_line self._tail.should_run = True def run(self): self._tail.run() def cleanup(self): if self._tail: self._tail.should_run = False try: self._tail.notifier.stop() except Exception: pass def add_trigger(self, trigger): pass def update_trigger(self, trigger): pass def remove_trigger(self, trigger): pass def _handle_line(self, file_path, line): trigger = self._trigger_ref payload = { 'file_path': file_path, 'file_name': os.path.basename(file_path), 'line': line } self._sensor_service.dispatch(trigger=trigger, payload=payload)
{ "content_hash": "caaa22e788cdfcf45fb22f6d39935e12", "timestamp": "", "source": "github", "line_count": 54, "max_line_length": 76, "avg_line_length": 27.25925925925926, "alnum_prop": 0.5665760869565217, "repo_name": "grengojbo/st2", "id": "9c4e32fb36d5d25a63b24814a0af38a60abf4a06", "size": "1472", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "contrib/linux/sensors/file_watch_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "198" }, { "name": "Makefile", "bytes": "21186" }, { "name": "PowerShell", "bytes": "299" }, { "name": "Python", "bytes": "2091976" }, { "name": "Shell", "bytes": "7518" }, { "name": "Slash", "bytes": "677" } ], "symlink_target": "" }
from __future__ import print_function from setuptools import setup description = "Git commit message linter written in python, checks your commit messages for style." long_description = """ Great for use as a commit-msg git hook or as part of your gating script in a CI pipeline (e.g. jenkins, github actions). Many of the gitlint validations are based on `well-known`_ community_ `standards`_, others are based on checks that we've found useful throughout the years. Gitlint has sane defaults, but you can also easily customize it to your own liking. Demo and full documentation on `jorisroovers.github.io/gitlint`_. To see what's new in the latest release, visit the CHANGELOG_. Source code on `github.com/jorisroovers/gitlint`_. .. _well-known: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html .. _community: http://addamhardy.com/blog/2013/06/05/good-commit-messages-and-enforcing-them-with-git-hooks/ .. _standards: http://chris.beams.io/posts/git-commit/ .. _jorisroovers.github.io/gitlint: https://jorisroovers.github.io/gitlint .. _CHANGELOG: https://github.com/jorisroovers/gitlint/blob/main/CHANGELOG.md .. _github.com/jorisroovers/gitlint: https://github.com/jorisroovers/gitlint """ version = "0.19.0dev" setup( name="gitlint", version=version, description=description, long_description=long_description, classifiers=[ "Development Status :: 5 - Production/Stable", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Environment :: Console", "Intended Audience :: Developers", "Topic :: Software Development :: Quality Assurance", "Topic :: Software Development :: Testing", "License :: OSI Approved :: MIT License", ], python_requires=">=3.6", install_requires=[ "gitlint-core[trusted-deps]==" + version, ], keywords="gitlint git lint", author="Joris Roovers", url="https://jorisroovers.github.io/gitlint", project_urls={ "Documentation": "https://jorisroovers.github.io/gitlint", "Source": "https://github.com/jorisroovers/gitlint", "Changelog": "https://github.com/jorisroovers/gitlint/blob/main/CHANGELOG.md", }, license="MIT", )
{ "content_hash": "f9f502d9d4d038fc0768384cbaf77b0a", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 120, "avg_line_length": 42.63492063492063, "alnum_prop": 0.6809381980640358, "repo_name": "jorisroovers/gitlint", "id": "b94cd50e1cb06b0c5ba68a8c653472fc12b85b1e", "size": "2708", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1486" }, { "name": "Dockerfile", "bytes": "2011" }, { "name": "Python", "bytes": "435509" }, { "name": "Shell", "bytes": "21557" } ], "symlink_target": "" }
import os import tempfile import time import types from ironic_lib import disk_utils import mock from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import uuidutils import testtools from testtools import matchers from ironic.common import boot_devices from ironic.common import exception from ironic.common import image_service from ironic.common import states from ironic.common import utils as common_utils from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils from ironic.drivers.modules import agent_client from ironic.drivers.modules import deploy_utils as utils from ironic.drivers.modules import fake from ironic.drivers.modules import image_cache from ironic.drivers.modules import pxe from ironic.drivers.modules.storage import cinder from ironic.drivers import utils as driver_utils from ironic.tests import base as tests_base from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils from ironic.tests.unit.objects import utils as obj_utils INST_INFO_DICT = db_utils.get_test_pxe_instance_info() DRV_INFO_DICT = db_utils.get_test_pxe_driver_info() DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info() _PXECONF_DEPLOY = b""" default deploy label deploy kernel deploy_kernel append initrd=deploy_ramdisk ipappend 3 label boot_partition kernel kernel append initrd=ramdisk root={{ ROOT }} label boot_whole_disk COM32 chain.c32 append mbr:{{ DISK_IDENTIFIER }} label trusted_boot kernel mboot append tboot.gz --- kernel root={{ ROOT }} --- ramdisk """ _PXECONF_BOOT_PARTITION = """ default boot_partition label deploy kernel deploy_kernel append initrd=deploy_ramdisk ipappend 3 label boot_partition kernel kernel append initrd=ramdisk root=UUID=12345678-1234-1234-1234-1234567890abcdef label boot_whole_disk COM32 chain.c32 append mbr:{{ DISK_IDENTIFIER }} label trusted_boot kernel mboot append tboot.gz --- kernel root=UUID=12345678-1234-1234-1234-1234567890abcdef \ --- ramdisk """ _PXECONF_BOOT_WHOLE_DISK = """ default boot_whole_disk label deploy kernel deploy_kernel append initrd=deploy_ramdisk ipappend 3 label boot_partition kernel kernel append initrd=ramdisk root={{ ROOT }} label boot_whole_disk COM32 chain.c32 append mbr:0x12345678 label trusted_boot kernel mboot append tboot.gz --- kernel root={{ ROOT }} --- ramdisk """ _PXECONF_TRUSTED_BOOT = """ default trusted_boot label deploy kernel deploy_kernel append initrd=deploy_ramdisk ipappend 3 label boot_partition kernel kernel append initrd=ramdisk root=UUID=12345678-1234-1234-1234-1234567890abcdef label boot_whole_disk COM32 chain.c32 append mbr:{{ DISK_IDENTIFIER }} label trusted_boot kernel mboot append tboot.gz --- kernel root=UUID=12345678-1234-1234-1234-1234567890abcdef \ --- ramdisk """ _IPXECONF_DEPLOY = b""" #!ipxe dhcp goto deploy :deploy kernel deploy_kernel initrd deploy_ramdisk boot :boot_partition kernel kernel append initrd=ramdisk root={{ ROOT }} boot :boot_whole_disk kernel chain.c32 append mbr:{{ DISK_IDENTIFIER }} boot """ _IPXECONF_BOOT_PARTITION = """ #!ipxe dhcp goto boot_partition :deploy kernel deploy_kernel initrd deploy_ramdisk boot :boot_partition kernel kernel append initrd=ramdisk root=UUID=12345678-1234-1234-1234-1234567890abcdef boot :boot_whole_disk kernel chain.c32 append mbr:{{ DISK_IDENTIFIER }} boot """ _IPXECONF_BOOT_WHOLE_DISK = """ #!ipxe dhcp goto boot_whole_disk :deploy kernel deploy_kernel initrd deploy_ramdisk boot :boot_partition kernel kernel append initrd=ramdisk root={{ ROOT }} boot :boot_whole_disk kernel chain.c32 append mbr:0x12345678 boot """ _IPXECONF_BOOT_ISCSI_NO_CONFIG = """ #!ipxe dhcp goto boot_iscsi :deploy kernel deploy_kernel initrd deploy_ramdisk boot :boot_partition kernel kernel append initrd=ramdisk root=UUID=0x12345678 boot :boot_whole_disk kernel chain.c32 append mbr:{{ DISK_IDENTIFIER }} boot """ _UEFI_PXECONF_DEPLOY = b""" default=deploy image=deploy_kernel label=deploy initrd=deploy_ramdisk append="ro text" image=kernel label=boot_partition initrd=ramdisk append="root={{ ROOT }}" image=chain.c32 label=boot_whole_disk append="mbr:{{ DISK_IDENTIFIER }}" """ _UEFI_PXECONF_BOOT_PARTITION = """ default=boot_partition image=deploy_kernel label=deploy initrd=deploy_ramdisk append="ro text" image=kernel label=boot_partition initrd=ramdisk append="root=UUID=12345678-1234-1234-1234-1234567890abcdef" image=chain.c32 label=boot_whole_disk append="mbr:{{ DISK_IDENTIFIER }}" """ _UEFI_PXECONF_BOOT_WHOLE_DISK = """ default=boot_whole_disk image=deploy_kernel label=deploy initrd=deploy_ramdisk append="ro text" image=kernel label=boot_partition initrd=ramdisk append="root={{ ROOT }}" image=chain.c32 label=boot_whole_disk append="mbr:0x12345678" """ _UEFI_PXECONF_DEPLOY_GRUB = b""" set default=deploy set timeout=5 set hidden_timeout_quiet=false menuentry "deploy" { linuxefi deploy_kernel "ro text" initrdefi deploy_ramdisk } menuentry "boot_partition" { linuxefi kernel "root=(( ROOT ))" initrdefi ramdisk } menuentry "boot_whole_disk" { linuxefi chain.c32 mbr:(( DISK_IDENTIFIER )) } """ _UEFI_PXECONF_BOOT_PARTITION_GRUB = """ set default=boot_partition set timeout=5 set hidden_timeout_quiet=false menuentry "deploy" { linuxefi deploy_kernel "ro text" initrdefi deploy_ramdisk } menuentry "boot_partition" { linuxefi kernel "root=UUID=12345678-1234-1234-1234-1234567890abcdef" initrdefi ramdisk } menuentry "boot_whole_disk" { linuxefi chain.c32 mbr:(( DISK_IDENTIFIER )) } """ _UEFI_PXECONF_BOOT_WHOLE_DISK_GRUB = """ set default=boot_whole_disk set timeout=5 set hidden_timeout_quiet=false menuentry "deploy" { linuxefi deploy_kernel "ro text" initrdefi deploy_ramdisk } menuentry "boot_partition" { linuxefi kernel "root=(( ROOT ))" initrdefi ramdisk } menuentry "boot_whole_disk" { linuxefi chain.c32 mbr:0x12345678 } """ @mock.patch.object(time, 'sleep', lambda seconds: None) class PhysicalWorkTestCase(tests_base.TestCase): def _mock_calls(self, name_list, module): patch_list = [mock.patch.object(module, name, spec_set=types.FunctionType) for name in name_list] mock_list = [patcher.start() for patcher in patch_list] for patcher in patch_list: self.addCleanup(patcher.stop) parent_mock = mock.MagicMock(spec=[]) for mocker, name in zip(mock_list, name_list): parent_mock.attach_mock(mocker, name) return parent_mock @mock.patch.object(disk_utils, 'work_on_disk', autospec=True) @mock.patch.object(disk_utils, 'is_block_device', autospec=True) @mock.patch.object(disk_utils, 'get_image_mb', autospec=True) @mock.patch.object(utils, 'logout_iscsi', autospec=True) @mock.patch.object(utils, 'login_iscsi', autospec=True) @mock.patch.object(utils, 'get_dev', autospec=True) @mock.patch.object(utils, 'discovery', autospec=True) @mock.patch.object(utils, 'delete_iscsi', autospec=True) def _test_deploy_partition_image(self, mock_delete_iscsi, mock_discovery, mock_get_dev, mock_login_iscsi, mock_logout_iscsi, mock_get_image_mb, mock_is_block_device, mock_work_on_disk, **kwargs): # Below are the only values we allow callers to modify for testing. # Check that values other than this aren't passed in. deploy_args = { 'boot_mode': None, 'boot_option': None, 'configdrive': None, 'disk_label': None, 'ephemeral_format': None, 'ephemeral_mb': None, 'image_mb': 1, 'preserve_ephemeral': False, 'root_mb': 128, 'swap_mb': 64 } disallowed_values = set(kwargs) - set(deploy_args) if disallowed_values: raise ValueError("Only the following kwargs are allowed in " "_test_deploy_partition_image: %(allowed)s. " "Disallowed values: %(disallowed)s." % {"allowed": ", ".join(deploy_args.keys()), "disallowed": ", ".join(disallowed_values)}) deploy_args.update(kwargs) address = '127.0.0.1' port = 3306 iqn = 'iqn.xyz' lun = 1 image_path = '/tmp/xyz/image' node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" dev = '/dev/fake' root_uuid = '12345678-1234-1234-12345678-12345678abcdef' mock_get_dev.return_value = dev mock_is_block_device.return_value = True mock_get_image_mb.return_value = deploy_args['image_mb'] mock_work_on_disk.return_value = { 'root uuid': root_uuid, 'efi system partition uuid': None } deploy_kwargs = { 'boot_mode': deploy_args['boot_mode'], 'boot_option': deploy_args['boot_option'], 'configdrive': deploy_args['configdrive'], 'disk_label': deploy_args['disk_label'], 'preserve_ephemeral': deploy_args['preserve_ephemeral'] } utils.deploy_partition_image( address, port, iqn, lun, image_path, deploy_args['root_mb'], deploy_args['swap_mb'], deploy_args['ephemeral_mb'], deploy_args['ephemeral_format'], node_uuid, **deploy_kwargs) mock_get_dev.assert_called_once_with(address, port, iqn, lun) mock_discovery.assert_called_once_with(address, port) mock_login_iscsi.assert_called_once_with(address, port, iqn) mock_logout_iscsi.assert_called_once_with(address, port, iqn) mock_delete_iscsi.assert_called_once_with(address, port, iqn) mock_get_image_mb.assert_called_once_with(image_path) mock_is_block_device.assert_called_once_with(dev) work_on_disk_kwargs = { 'preserve_ephemeral': deploy_args['preserve_ephemeral'], 'configdrive': deploy_args['configdrive'], # boot_option defaults to 'netboot' if # not set 'boot_option': deploy_args['boot_option'] or 'netboot', 'boot_mode': deploy_args['boot_mode'], 'disk_label': deploy_args['disk_label'] } mock_work_on_disk.assert_called_once_with( dev, deploy_args['root_mb'], deploy_args['swap_mb'], deploy_args['ephemeral_mb'], deploy_args['ephemeral_format'], image_path, node_uuid, **work_on_disk_kwargs) def test_deploy_partition_image_without_boot_option(self): self._test_deploy_partition_image() def test_deploy_partition_image_netboot(self): self._test_deploy_partition_image(boot_option="netboot") def test_deploy_partition_image_localboot(self): self._test_deploy_partition_image(boot_option="local") def test_deploy_partition_image_wo_boot_option_and_wo_boot_mode(self): self._test_deploy_partition_image() def test_deploy_partition_image_netboot_bios(self): self._test_deploy_partition_image(boot_option="netboot", boot_mode="bios") def test_deploy_partition_image_localboot_bios(self): self._test_deploy_partition_image(boot_option="local", boot_mode="bios") def test_deploy_partition_image_netboot_uefi(self): self._test_deploy_partition_image(boot_option="netboot", boot_mode="uefi") def test_deploy_partition_image_disk_label(self): self._test_deploy_partition_image(disk_label='gpt') def test_deploy_partition_image_image_exceeds_root_partition(self): self.assertRaises(exception.InstanceDeployFailure, self._test_deploy_partition_image, image_mb=129, root_mb=128) def test_deploy_partition_image_localboot_uefi(self): self._test_deploy_partition_image(boot_option="local", boot_mode="uefi") def test_deploy_partition_image_without_swap(self): self._test_deploy_partition_image(swap_mb=0) def test_deploy_partition_image_with_ephemeral(self): self._test_deploy_partition_image(ephemeral_format='exttest', ephemeral_mb=256) def test_deploy_partition_image_preserve_ephemeral(self): self._test_deploy_partition_image(ephemeral_format='exttest', ephemeral_mb=256, preserve_ephemeral=True) def test_deploy_partition_image_with_configdrive(self): self._test_deploy_partition_image(configdrive='http://1.2.3.4/cd') @mock.patch.object(disk_utils, 'create_config_drive_partition', autospec=True) @mock.patch.object(disk_utils, 'get_disk_identifier', autospec=True) def test_deploy_whole_disk_image(self, mock_gdi, create_config_drive_mock): """Check loosely all functions are called with right args.""" address = '127.0.0.1' port = 3306 iqn = 'iqn.xyz' lun = 1 image_path = '/tmp/xyz/image' node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" dev = '/dev/fake' utils_name_list = ['get_dev', 'discovery', 'login_iscsi', 'logout_iscsi', 'delete_iscsi'] disk_utils_name_list = ['is_block_device', 'populate_image'] utils_mock = self._mock_calls(utils_name_list, utils) utils_mock.get_dev.return_value = dev disk_utils_mock = self._mock_calls(disk_utils_name_list, disk_utils) disk_utils_mock.is_block_device.return_value = True mock_gdi.return_value = '0x12345678' utils_calls_expected = [mock.call.get_dev(address, port, iqn, lun), mock.call.discovery(address, port), mock.call.login_iscsi(address, port, iqn), mock.call.logout_iscsi(address, port, iqn), mock.call.delete_iscsi(address, port, iqn)] disk_utils_calls_expected = [mock.call.is_block_device(dev), mock.call.populate_image(image_path, dev)] uuid_dict_returned = utils.deploy_disk_image(address, port, iqn, lun, image_path, node_uuid, configdrive=None) self.assertEqual(utils_calls_expected, utils_mock.mock_calls) self.assertEqual(disk_utils_calls_expected, disk_utils_mock.mock_calls) self.assertFalse(create_config_drive_mock.called) self.assertEqual('0x12345678', uuid_dict_returned['disk identifier']) @mock.patch.object(disk_utils, 'create_config_drive_partition', autospec=True) @mock.patch.object(disk_utils, 'get_disk_identifier', autospec=True) def test_deploy_whole_disk_image_with_config_drive(self, mock_gdi, create_partition_mock): """Check loosely all functions are called with right args.""" address = '127.0.0.1' port = 3306 iqn = 'iqn.xyz' lun = 1 image_path = '/tmp/xyz/image' node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" config_url = 'http://1.2.3.4/cd' dev = '/dev/fake' utils_list = ['get_dev', 'discovery', 'login_iscsi', 'logout_iscsi', 'delete_iscsi'] disk_utils_list = ['is_block_device', 'populate_image'] utils_mock = self._mock_calls(utils_list, utils) disk_utils_mock = self._mock_calls(disk_utils_list, disk_utils) utils_mock.get_dev.return_value = dev disk_utils_mock.is_block_device.return_value = True mock_gdi.return_value = '0x12345678' utils_calls_expected = [mock.call.get_dev(address, port, iqn, lun), mock.call.discovery(address, port), mock.call.login_iscsi(address, port, iqn), mock.call.logout_iscsi(address, port, iqn), mock.call.delete_iscsi(address, port, iqn)] disk_utils_calls_expected = [mock.call.is_block_device(dev), mock.call.populate_image(image_path, dev)] uuid_dict_returned = utils.deploy_disk_image(address, port, iqn, lun, image_path, node_uuid, configdrive=config_url) utils_mock.assert_has_calls(utils_calls_expected) disk_utils_mock.assert_has_calls(disk_utils_calls_expected) create_partition_mock.assert_called_once_with(node_uuid, dev, config_url) self.assertEqual('0x12345678', uuid_dict_returned['disk identifier']) @mock.patch.object(common_utils, 'execute', autospec=True) def test_verify_iscsi_connection_raises(self, mock_exec): iqn = 'iqn.xyz' mock_exec.return_value = ['iqn.abc', ''] self.assertRaises(exception.InstanceDeployFailure, utils.verify_iscsi_connection, iqn) self.assertEqual(3, mock_exec.call_count) @mock.patch.object(os.path, 'exists', autospec=True) def test_check_file_system_for_iscsi_device_raises(self, mock_os): iqn = 'iqn.xyz' ip = "127.0.0.1" port = "22" mock_os.return_value = False self.assertRaises(exception.InstanceDeployFailure, utils.check_file_system_for_iscsi_device, ip, port, iqn) self.assertEqual(3, mock_os.call_count) @mock.patch.object(os.path, 'exists', autospec=True) def test_check_file_system_for_iscsi_device(self, mock_os): iqn = 'iqn.xyz' ip = "127.0.0.1" port = "22" check_dir = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-1" % (ip, port, iqn) mock_os.return_value = True utils.check_file_system_for_iscsi_device(ip, port, iqn) mock_os.assert_called_once_with(check_dir) @mock.patch.object(common_utils, 'execute', autospec=True) def test_verify_iscsi_connection(self, mock_exec): iqn = 'iqn.xyz' mock_exec.return_value = ['iqn.xyz', ''] utils.verify_iscsi_connection(iqn) mock_exec.assert_called_once_with( 'iscsiadm', '-m', 'node', '-S', run_as_root=True, check_exit_code=[0]) @mock.patch.object(common_utils, 'execute', autospec=True) def test_force_iscsi_lun_update(self, mock_exec): iqn = 'iqn.xyz' utils.force_iscsi_lun_update(iqn) mock_exec.assert_called_once_with( 'iscsiadm', '-m', 'node', '-T', iqn, '-R', run_as_root=True, check_exit_code=[0]) @mock.patch.object(common_utils, 'execute', autospec=True) @mock.patch.object(utils, 'verify_iscsi_connection', autospec=True) @mock.patch.object(utils, 'force_iscsi_lun_update', autospec=True) @mock.patch.object(utils, 'check_file_system_for_iscsi_device', autospec=True) def test_login_iscsi_calls_verify_and_update(self, mock_check_dev, mock_update, mock_verify, mock_exec): address = '127.0.0.1' port = 3306 iqn = 'iqn.xyz' mock_exec.return_value = ['iqn.xyz', ''] utils.login_iscsi(address, port, iqn) mock_exec.assert_called_once_with( 'iscsiadm', '-m', 'node', '-p', '%s:%s' % (address, port), '-T', iqn, '--login', run_as_root=True, check_exit_code=[0], attempts=5, delay_on_retry=True) mock_verify.assert_called_once_with(iqn) mock_update.assert_called_once_with(iqn) mock_check_dev.assert_called_once_with(address, port, iqn) @mock.patch.object(utils, 'LOG', autospec=True) @mock.patch.object(common_utils, 'execute', autospec=True) @mock.patch.object(utils, 'verify_iscsi_connection', autospec=True) @mock.patch.object(utils, 'force_iscsi_lun_update', autospec=True) @mock.patch.object(utils, 'check_file_system_for_iscsi_device', autospec=True) @mock.patch.object(utils, 'delete_iscsi', autospec=True) @mock.patch.object(utils, 'logout_iscsi', autospec=True) def test_login_iscsi_calls_raises( self, mock_loiscsi, mock_discsi, mock_check_dev, mock_update, mock_verify, mock_exec, mock_log): address = '127.0.0.1' port = 3306 iqn = 'iqn.xyz' mock_exec.return_value = ['iqn.xyz', ''] mock_check_dev.side_effect = exception.InstanceDeployFailure('boom') self.assertRaises(exception.InstanceDeployFailure, utils.login_iscsi, address, port, iqn) mock_verify.assert_called_once_with(iqn) mock_update.assert_called_once_with(iqn) mock_loiscsi.assert_called_once_with(address, port, iqn) mock_discsi.assert_called_once_with(address, port, iqn) self.assertIsInstance(mock_log.error.call_args[0][1], exception.InstanceDeployFailure) @mock.patch.object(utils, 'LOG', autospec=True) @mock.patch.object(common_utils, 'execute', autospec=True) @mock.patch.object(utils, 'verify_iscsi_connection', autospec=True) @mock.patch.object(utils, 'force_iscsi_lun_update', autospec=True) @mock.patch.object(utils, 'check_file_system_for_iscsi_device', autospec=True) @mock.patch.object(utils, 'delete_iscsi', autospec=True) @mock.patch.object(utils, 'logout_iscsi', autospec=True) def test_login_iscsi_calls_raises_during_cleanup( self, mock_loiscsi, mock_discsi, mock_check_dev, mock_update, mock_verify, mock_exec, mock_log): address = '127.0.0.1' port = 3306 iqn = 'iqn.xyz' mock_exec.return_value = ['iqn.xyz', ''] mock_check_dev.side_effect = exception.InstanceDeployFailure('boom') mock_discsi.side_effect = processutils.ProcessExecutionError('boom') self.assertRaises(exception.InstanceDeployFailure, utils.login_iscsi, address, port, iqn) mock_verify.assert_called_once_with(iqn) mock_update.assert_called_once_with(iqn) mock_loiscsi.assert_called_once_with(address, port, iqn) mock_discsi.assert_called_once_with(address, port, iqn) self.assertIsInstance(mock_log.error.call_args[0][1], exception.InstanceDeployFailure) self.assertIsInstance(mock_log.warning.call_args[0][1], processutils.ProcessExecutionError) @mock.patch.object(disk_utils, 'is_block_device', lambda d: True) def test_always_logout_and_delete_iscsi(self): """Check if logout_iscsi() and delete_iscsi() are called. Make sure that logout_iscsi() and delete_iscsi() are called once login_iscsi() is invoked. """ address = '127.0.0.1' port = 3306 iqn = 'iqn.xyz' lun = 1 image_path = '/tmp/xyz/image' root_mb = 128 swap_mb = 64 ephemeral_mb = 256 ephemeral_format = 'exttest' node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" dev = '/dev/fake' class TestException(Exception): pass utils_name_list = ['get_dev', 'discovery', 'login_iscsi', 'logout_iscsi', 'delete_iscsi'] disk_utils_name_list = ['get_image_mb', 'work_on_disk'] utils_mock = self._mock_calls(utils_name_list, utils) utils_mock.get_dev.return_value = dev disk_utils_mock = self._mock_calls(disk_utils_name_list, disk_utils) disk_utils_mock.get_image_mb.return_value = 1 disk_utils_mock.work_on_disk.side_effect = TestException utils_calls_expected = [mock.call.get_dev(address, port, iqn, lun), mock.call.discovery(address, port), mock.call.login_iscsi(address, port, iqn), mock.call.logout_iscsi(address, port, iqn), mock.call.delete_iscsi(address, port, iqn)] disk_utils_calls_expected = [mock.call.get_image_mb(image_path), mock.call.work_on_disk( dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format, image_path, node_uuid, configdrive=None, preserve_ephemeral=False, boot_option="netboot", boot_mode="bios", disk_label=None)] self.assertRaises(TestException, utils.deploy_partition_image, address, port, iqn, lun, image_path, root_mb, swap_mb, ephemeral_mb, ephemeral_format, node_uuid) self.assertEqual(utils_calls_expected, utils_mock.mock_calls) self.assertEqual(disk_utils_calls_expected, disk_utils_mock.mock_calls) @mock.patch.object(common_utils, 'execute', autospec=True) @mock.patch.object(utils, 'verify_iscsi_connection', autospec=True) @mock.patch.object(utils, 'force_iscsi_lun_update', autospec=True) @mock.patch.object(utils, 'check_file_system_for_iscsi_device', autospec=True) def test_ipv6_address_wrapped(self, mock_check_dev, mock_update, mock_verify, mock_exec): address = '2001:DB8::1111' port = 3306 iqn = 'iqn.xyz' mock_exec.return_value = ['iqn.xyz', ''] utils.login_iscsi(address, port, iqn) mock_exec.assert_called_once_with( 'iscsiadm', '-m', 'node', '-p', '[%s]:%s' % (address, port), '-T', iqn, '--login', run_as_root=True, check_exit_code=[0], attempts=5, delay_on_retry=True) class SwitchPxeConfigTestCase(tests_base.TestCase): # NOTE(TheJulia): Remove elilo support after the deprecation period, # in the Queens release. def _create_config(self, ipxe=False, boot_mode=None, boot_loader='elilo'): (fd, fname) = tempfile.mkstemp() if boot_mode == 'uefi' and not ipxe: if boot_loader == 'grub': pxe_cfg = _UEFI_PXECONF_DEPLOY_GRUB else: pxe_cfg = _UEFI_PXECONF_DEPLOY else: pxe_cfg = _IPXECONF_DEPLOY if ipxe else _PXECONF_DEPLOY os.write(fd, pxe_cfg) os.close(fd) self.addCleanup(os.unlink, fname) return fname def test_switch_pxe_config_partition_image(self): boot_mode = 'bios' fname = self._create_config() utils.switch_pxe_config(fname, '12345678-1234-1234-1234-1234567890abcdef', boot_mode, False) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_PXECONF_BOOT_PARTITION, pxeconf) def test_switch_pxe_config_whole_disk_image(self): boot_mode = 'bios' fname = self._create_config() utils.switch_pxe_config(fname, '0x12345678', boot_mode, True) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_PXECONF_BOOT_WHOLE_DISK, pxeconf) def test_switch_pxe_config_trusted_boot(self): boot_mode = 'bios' fname = self._create_config() utils.switch_pxe_config(fname, '12345678-1234-1234-1234-1234567890abcdef', boot_mode, False, True) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_PXECONF_TRUSTED_BOOT, pxeconf) def test_switch_ipxe_config_partition_image(self): boot_mode = 'bios' cfg.CONF.set_override('ipxe_enabled', True, 'pxe') fname = self._create_config(ipxe=True) utils.switch_pxe_config(fname, '12345678-1234-1234-1234-1234567890abcdef', boot_mode, False) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_IPXECONF_BOOT_PARTITION, pxeconf) def test_switch_ipxe_config_whole_disk_image(self): boot_mode = 'bios' cfg.CONF.set_override('ipxe_enabled', True, 'pxe') fname = self._create_config(ipxe=True) utils.switch_pxe_config(fname, '0x12345678', boot_mode, True) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_IPXECONF_BOOT_WHOLE_DISK, pxeconf) # NOTE(TheJulia): Remove elilo support after the deprecation period, # in the Queens release. def test_switch_uefi_elilo_pxe_config_partition_image(self): boot_mode = 'uefi' fname = self._create_config(boot_mode=boot_mode) utils.switch_pxe_config(fname, '12345678-1234-1234-1234-1234567890abcdef', boot_mode, False) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_UEFI_PXECONF_BOOT_PARTITION, pxeconf) # NOTE(TheJulia): Remove elilo support after the deprecation period, # in the Queens release. def test_switch_uefi_elilo_config_whole_disk_image(self): boot_mode = 'uefi' fname = self._create_config(boot_mode=boot_mode) utils.switch_pxe_config(fname, '0x12345678', boot_mode, True) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_UEFI_PXECONF_BOOT_WHOLE_DISK, pxeconf) def test_switch_uefi_grub_pxe_config_partition_image(self): boot_mode = 'uefi' fname = self._create_config(boot_mode=boot_mode, boot_loader='grub') utils.switch_pxe_config(fname, '12345678-1234-1234-1234-1234567890abcdef', boot_mode, False) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_UEFI_PXECONF_BOOT_PARTITION_GRUB, pxeconf) def test_switch_uefi_grub_config_whole_disk_image(self): boot_mode = 'uefi' fname = self._create_config(boot_mode=boot_mode, boot_loader='grub') utils.switch_pxe_config(fname, '0x12345678', boot_mode, True) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_UEFI_PXECONF_BOOT_WHOLE_DISK_GRUB, pxeconf) def test_switch_uefi_ipxe_config_partition_image(self): boot_mode = 'uefi' cfg.CONF.set_override('ipxe_enabled', True, 'pxe') fname = self._create_config(boot_mode=boot_mode, ipxe=True) utils.switch_pxe_config(fname, '12345678-1234-1234-1234-1234567890abcdef', boot_mode, False) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_IPXECONF_BOOT_PARTITION, pxeconf) def test_switch_uefi_ipxe_config_whole_disk_image(self): boot_mode = 'uefi' cfg.CONF.set_override('ipxe_enabled', True, 'pxe') fname = self._create_config(boot_mode=boot_mode, ipxe=True) utils.switch_pxe_config(fname, '0x12345678', boot_mode, True) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_IPXECONF_BOOT_WHOLE_DISK, pxeconf) def test_switch_ipxe_iscsi_boot(self): boot_mode = 'iscsi' cfg.CONF.set_override('ipxe_enabled', True, 'pxe') fname = self._create_config(boot_mode=boot_mode, ipxe=True) utils.switch_pxe_config(fname, '0x12345678', boot_mode, False, False, True) with open(fname, 'r') as f: pxeconf = f.read() self.assertEqual(_IPXECONF_BOOT_ISCSI_NO_CONFIG, pxeconf) class GetPxeBootConfigTestCase(db_base.DbTestCase): def setUp(self): super(GetPxeBootConfigTestCase, self).setUp() self.node = obj_utils.get_test_node(self.context, driver='fake') self.config(pxe_bootfile_name='bios-bootfile', group='pxe') self.config(uefi_pxe_bootfile_name='uefi-bootfile', group='pxe') self.config(pxe_config_template='bios-template', group='pxe') self.config(uefi_pxe_config_template='uefi-template', group='pxe') self.bootfile_by_arch = {'aarch64': 'aarch64-bootfile', 'ppc64': 'ppc64-bootfile'} self.template_by_arch = {'aarch64': 'aarch64-template', 'ppc64': 'ppc64-template'} def test_get_pxe_boot_file_bios_without_by_arch(self): properties = {'cpu_arch': 'x86', 'capabilities': 'boot_mode:bios'} self.node.properties = properties self.config(pxe_bootfile_name_by_arch={}, group='pxe') result = utils.get_pxe_boot_file(self.node) self.assertEqual('bios-bootfile', result) def test_get_pxe_config_template_bios_without_by_arch(self): properties = {'cpu_arch': 'x86', 'capabilities': 'boot_mode:bios'} self.node.properties = properties self.config(pxe_config_template_by_arch={}, group='pxe') result = utils.get_pxe_config_template(self.node) self.assertEqual('bios-template', result) def test_get_pxe_boot_file_uefi_without_by_arch(self): properties = {'cpu_arch': 'x86_64', 'capabilities': 'boot_mode:uefi'} self.node.properties = properties self.config(pxe_bootfile_name_by_arch={}, group='pxe') result = utils.get_pxe_boot_file(self.node) self.assertEqual('uefi-bootfile', result) def test_get_pxe_config_template_uefi_without_by_arch(self): properties = {'cpu_arch': 'x86_64', 'capabilities': 'boot_mode:uefi'} self.node.properties = properties self.config(pxe_config_template_by_arch={}, group='pxe') result = utils.get_pxe_config_template(self.node) self.assertEqual('uefi-template', result) def test_get_pxe_boot_file_cpu_not_in_by_arch(self): properties = {'cpu_arch': 'x86', 'capabilities': 'boot_mode:bios'} self.node.properties = properties self.config(pxe_bootfile_name_by_arch=self.bootfile_by_arch, group='pxe') result = utils.get_pxe_boot_file(self.node) self.assertEqual('bios-bootfile', result) def test_get_pxe_config_template_cpu_not_in_by_arch(self): properties = {'cpu_arch': 'x86', 'capabilities': 'boot_mode:bios'} self.node.properties = properties self.config(pxe_config_template_by_arch=self.template_by_arch, group='pxe') result = utils.get_pxe_config_template(self.node) self.assertEqual('bios-template', result) def test_get_pxe_boot_file_cpu_in_by_arch(self): properties = {'cpu_arch': 'aarch64', 'capabilities': 'boot_mode:uefi'} self.node.properties = properties self.config(pxe_bootfile_name_by_arch=self.bootfile_by_arch, group='pxe') result = utils.get_pxe_boot_file(self.node) self.assertEqual('aarch64-bootfile', result) def test_get_pxe_config_template_cpu_in_by_arch(self): properties = {'cpu_arch': 'aarch64', 'capabilities': 'boot_mode:uefi'} self.node.properties = properties self.config(pxe_config_template_by_arch=self.template_by_arch, group='pxe') result = utils.get_pxe_config_template(self.node) self.assertEqual('aarch64-template', result) def test_get_pxe_boot_file_emtpy_property(self): self.node.properties = {} self.config(pxe_bootfile_name_by_arch=self.bootfile_by_arch, group='pxe') result = utils.get_pxe_boot_file(self.node) self.assertEqual('bios-bootfile', result) def test_get_pxe_config_template_emtpy_property(self): self.node.properties = {} self.config(pxe_config_template_by_arch=self.template_by_arch, group='pxe') result = utils.get_pxe_config_template(self.node) self.assertEqual('bios-template', result) @mock.patch('time.sleep', lambda sec: None) class OtherFunctionTestCase(db_base.DbTestCase): def setUp(self): super(OtherFunctionTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver="fake_pxe") self.node = obj_utils.create_test_node(self.context, driver='fake_pxe') def test_get_dev(self): expected = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9' actual = utils.get_dev('1.2.3.4', 5678, 'iqn.fake', 9) self.assertEqual(expected, actual) @mock.patch.object(utils, 'LOG', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(task_manager.TaskManager, 'process_event', autospec=True) def _test_set_failed_state(self, mock_event, mock_power, mock_log, event_value=None, power_value=None, log_calls=None, poweroff=True, collect_logs=True): err_msg = 'some failure' mock_event.side_effect = event_value mock_power.side_effect = power_value with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: if collect_logs: utils.set_failed_state(task, err_msg) else: utils.set_failed_state(task, err_msg, collect_logs=collect_logs) mock_event.assert_called_once_with(task, 'fail') if poweroff: mock_power.assert_called_once_with(task, states.POWER_OFF) else: self.assertFalse(mock_power.called) self.assertEqual(err_msg, task.node.last_error) if (log_calls and poweroff): mock_log.exception.assert_has_calls(log_calls) else: self.assertFalse(mock_log.called) @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) def test_set_failed_state(self, mock_collect): exc_state = exception.InvalidState('invalid state') exc_param = exception.InvalidParameterValue('invalid parameter') mock_call = mock.call(mock.ANY) self._test_set_failed_state() calls = [mock_call] self._test_set_failed_state(event_value=iter([exc_state] * len(calls)), log_calls=calls) calls = [mock_call] self._test_set_failed_state(power_value=iter([exc_param] * len(calls)), log_calls=calls) calls = [mock_call, mock_call] self._test_set_failed_state(event_value=iter([exc_state] * len(calls)), power_value=iter([exc_param] * len(calls)), log_calls=calls) self.assertEqual(4, mock_collect.call_count) @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) def test_set_failed_state_no_poweroff(self, mock_collect): cfg.CONF.set_override('power_off_after_deploy_failure', False, 'deploy') exc_state = exception.InvalidState('invalid state') exc_param = exception.InvalidParameterValue('invalid parameter') mock_call = mock.call(mock.ANY) self._test_set_failed_state(poweroff=False) calls = [mock_call] self._test_set_failed_state(event_value=iter([exc_state] * len(calls)), log_calls=calls, poweroff=False) calls = [mock_call] self._test_set_failed_state(power_value=iter([exc_param] * len(calls)), log_calls=calls, poweroff=False) calls = [mock_call, mock_call] self._test_set_failed_state(event_value=iter([exc_state] * len(calls)), power_value=iter([exc_param] * len(calls)), log_calls=calls, poweroff=False) self.assertEqual(4, mock_collect.call_count) @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) def test_set_failed_state_collect_deploy_logs(self, mock_collect): for opt in ('always', 'on_failure'): cfg.CONF.set_override('deploy_logs_collect', opt, 'agent') self._test_set_failed_state() mock_collect.assert_called_once_with(mock.ANY) mock_collect.reset_mock() @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) def test_set_failed_state_collect_deploy_logs_never(self, mock_collect): cfg.CONF.set_override('deploy_logs_collect', 'never', 'agent') self._test_set_failed_state() self.assertFalse(mock_collect.called) @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) def test_set_failed_state_collect_deploy_logs_overide(self, mock_collect): cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent') self._test_set_failed_state(collect_logs=False) self.assertFalse(mock_collect.called) def test_get_boot_option(self): self.node.instance_info = {'capabilities': '{"boot_option": "local"}'} result = utils.get_boot_option(self.node) self.assertEqual("local", result) def test_get_boot_option_default_value(self): self.node.instance_info = {} result = utils.get_boot_option(self.node) self.assertEqual("netboot", result) def test_get_boot_option_overriden_default_value(self): cfg.CONF.set_override('default_boot_option', 'local', 'deploy') self.node.instance_info = {} result = utils.get_boot_option(self.node) self.assertEqual("local", result) def test_get_boot_option_instance_info_priority(self): cfg.CONF.set_override('default_boot_option', 'local', 'deploy') self.node.instance_info = {'capabilities': '{"boot_option": "netboot"}'} result = utils.get_boot_option(self.node) self.assertEqual("netboot", result) @mock.patch.object(image_cache, 'clean_up_caches', autospec=True) def test_fetch_images(self, mock_clean_up_caches): mock_cache = mock.MagicMock( spec_set=['fetch_image', 'master_dir'], master_dir='master_dir') utils.fetch_images(None, mock_cache, [('uuid', 'path')]) mock_clean_up_caches.assert_called_once_with(None, 'master_dir', [('uuid', 'path')]) mock_cache.fetch_image.assert_called_once_with('uuid', 'path', ctx=None, force_raw=True) @mock.patch.object(image_cache, 'clean_up_caches', autospec=True) def test_fetch_images_fail(self, mock_clean_up_caches): exc = exception.InsufficientDiskSpace(path='a', required=2, actual=1) mock_cache = mock.MagicMock( spec_set=['master_dir'], master_dir='master_dir') mock_clean_up_caches.side_effect = [exc] self.assertRaises(exception.InstanceDeployFailure, utils.fetch_images, None, mock_cache, [('uuid', 'path')]) mock_clean_up_caches.assert_called_once_with(None, 'master_dir', [('uuid', 'path')]) @mock.patch.object(utils, '_get_ironic_session') @mock.patch('ironic.common.keystone.get_service_url') def test_get_ironic_api_url_from_config(self, mock_get_url, mock_ks): mock_sess = mock.Mock() mock_ks.return_value = mock_sess fake_api_url = 'http://foo/' mock_get_url.side_effect = exception.KeystoneFailure self.config(api_url=fake_api_url, group='conductor') url = utils.get_ironic_api_url() # also checking for stripped trailing slash self.assertEqual(fake_api_url[:-1], url) self.assertFalse(mock_get_url.called) @mock.patch.object(utils, '_get_ironic_session') @mock.patch('ironic.common.keystone.get_service_url') def test_get_ironic_api_url_from_keystone(self, mock_get_url, mock_ks): mock_sess = mock.Mock() mock_ks.return_value = mock_sess fake_api_url = 'http://foo/' mock_get_url.return_value = fake_api_url self.config(api_url=None, group='conductor') url = utils.get_ironic_api_url() # also checking for stripped trailing slash self.assertEqual(fake_api_url[:-1], url) mock_get_url.assert_called_with(mock_sess) @mock.patch.object(utils, '_get_ironic_session') @mock.patch('ironic.common.keystone.get_service_url') def test_get_ironic_api_url_fail(self, mock_get_url, mock_ks): mock_sess = mock.Mock() mock_ks.return_value = mock_sess mock_get_url.side_effect = exception.KeystoneFailure() self.config(api_url=None, group='conductor') self.assertRaises(exception.InvalidParameterValue, utils.get_ironic_api_url) class VirtualMediaDeployUtilsTestCase(db_base.DbTestCase): def setUp(self): super(VirtualMediaDeployUtilsTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver="iscsi_ilo") info_dict = db_utils.get_test_ilo_info() self.node = obj_utils.create_test_node( self.context, driver='iscsi_ilo', driver_info=info_dict) def test_get_single_nic_with_vif_port_id(self): obj_utils.create_test_port( self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), internal_info={'tenant_vif_port_id': 'test-vif-A'}) with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: address = utils.get_single_nic_with_vif_port_id(task) self.assertEqual('aa:bb:cc:dd:ee:ff', address) def test_get_single_nic_with_vif_port_id_extra(self): obj_utils.create_test_port( self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), extra={'vif_port_id': 'test-vif-A'}) with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: address = utils.get_single_nic_with_vif_port_id(task) self.assertEqual('aa:bb:cc:dd:ee:ff', address) def test_get_single_nic_with_cleaning_vif_port_id(self): obj_utils.create_test_port( self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), internal_info={'cleaning_vif_port_id': 'test-vif-A'}) with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: address = utils.get_single_nic_with_vif_port_id(task) self.assertEqual('aa:bb:cc:dd:ee:ff', address) def test_get_single_nic_with_provisioning_vif_port_id(self): obj_utils.create_test_port( self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(), internal_info={'provisioning_vif_port_id': 'test-vif-A'}) with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: address = utils.get_single_nic_with_vif_port_id(task) self.assertEqual('aa:bb:cc:dd:ee:ff', address) class ParseInstanceInfoCapabilitiesTestCase(tests_base.TestCase): def setUp(self): super(ParseInstanceInfoCapabilitiesTestCase, self).setUp() self.node = obj_utils.get_test_node(self.context, driver='fake') def test_parse_instance_info_capabilities_string(self): self.node.instance_info = {'capabilities': '{"cat": "meow"}'} expected_result = {"cat": "meow"} result = utils.parse_instance_info_capabilities(self.node) self.assertEqual(expected_result, result) def test_parse_instance_info_capabilities(self): self.node.instance_info = {'capabilities': {"dog": "wuff"}} expected_result = {"dog": "wuff"} result = utils.parse_instance_info_capabilities(self.node) self.assertEqual(expected_result, result) def test_parse_instance_info_invalid_type(self): self.node.instance_info = {'capabilities': 'not-a-dict'} self.assertRaises(exception.InvalidParameterValue, utils.parse_instance_info_capabilities, self.node) def test_is_secure_boot_requested_true(self): self.node.instance_info = {'capabilities': {"secure_boot": "tRue"}} self.assertTrue(utils.is_secure_boot_requested(self.node)) def test_is_secure_boot_requested_false(self): self.node.instance_info = {'capabilities': {"secure_boot": "false"}} self.assertFalse(utils.is_secure_boot_requested(self.node)) def test_is_secure_boot_requested_invalid(self): self.node.instance_info = {'capabilities': {"secure_boot": "invalid"}} self.assertFalse(utils.is_secure_boot_requested(self.node)) def test_is_trusted_boot_requested_true(self): self.node.instance_info = {'capabilities': {"trusted_boot": "true"}} self.assertTrue(utils.is_trusted_boot_requested(self.node)) def test_is_trusted_boot_requested_false(self): self.node.instance_info = {'capabilities': {"trusted_boot": "false"}} self.assertFalse(utils.is_trusted_boot_requested(self.node)) def test_is_trusted_boot_requested_invalid(self): self.node.instance_info = {'capabilities': {"trusted_boot": "invalid"}} self.assertFalse(utils.is_trusted_boot_requested(self.node)) def test_get_boot_mode_for_deploy_using_capabilities(self): properties = {'capabilities': 'boot_mode:uefi,cap2:value2'} self.node.properties = properties result = utils.get_boot_mode_for_deploy(self.node) self.assertEqual('uefi', result) def test_get_boot_mode_for_deploy_using_instance_info_cap(self): instance_info = {'capabilities': {'secure_boot': 'True'}} self.node.instance_info = instance_info result = utils.get_boot_mode_for_deploy(self.node) self.assertEqual('uefi', result) instance_info = {'capabilities': {'trusted_boot': 'True'}} self.node.instance_info = instance_info result = utils.get_boot_mode_for_deploy(self.node) self.assertEqual('bios', result) instance_info = {'capabilities': {'trusted_boot': 'True'}, 'capabilities': {'secure_boot': 'True'}} self.node.instance_info = instance_info result = utils.get_boot_mode_for_deploy(self.node) self.assertEqual('uefi', result) def test_get_boot_mode_for_deploy_using_instance_info(self): instance_info = {'deploy_boot_mode': 'bios'} self.node.instance_info = instance_info result = utils.get_boot_mode_for_deploy(self.node) self.assertEqual('bios', result) def test_validate_boot_mode_capability(self): prop = {'capabilities': 'boot_mode:uefi,cap2:value2'} self.node.properties = prop result = utils.validate_capabilities(self.node) self.assertIsNone(result) def test_validate_boot_mode_capability_with_exc(self): prop = {'capabilities': 'boot_mode:UEFI,cap2:value2'} self.node.properties = prop self.assertRaises(exception.InvalidParameterValue, utils.validate_capabilities, self.node) def test_validate_boot_mode_capability_instance_info(self): inst_info = {'capabilities': {"boot_mode": "uefi", "cap2": "value2"}} self.node.instance_info = inst_info result = utils.validate_capabilities(self.node) self.assertIsNone(result) def test_validate_boot_mode_capability_instance_info_with_exc(self): inst_info = {'capabilities': {"boot_mode": "UEFI", "cap2": "value2"}} self.node.instance_info = inst_info self.assertRaises(exception.InvalidParameterValue, utils.validate_capabilities, self.node) def test_validate_trusted_boot_capability(self): properties = {'capabilities': 'trusted_boot:value'} self.node.properties = properties self.assertRaises(exception.InvalidParameterValue, utils.validate_capabilities, self.node) def test_all_supported_capabilities(self): self.assertEqual(('local', 'netboot'), utils.SUPPORTED_CAPABILITIES['boot_option']) self.assertEqual(('bios', 'uefi'), utils.SUPPORTED_CAPABILITIES['boot_mode']) self.assertEqual(('true', 'false'), utils.SUPPORTED_CAPABILITIES['secure_boot']) self.assertEqual(('true', 'false'), utils.SUPPORTED_CAPABILITIES['trusted_boot']) def test_get_disk_label(self): inst_info = {'capabilities': {'disk_label': 'gpt', 'foo': 'bar'}} self.node.instance_info = inst_info result = utils.get_disk_label(self.node) self.assertEqual('gpt', result) class TrySetBootDeviceTestCase(db_base.DbTestCase): def setUp(self): super(TrySetBootDeviceTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver="fake") self.node = obj_utils.create_test_node(self.context, driver="fake") @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) def test_try_set_boot_device_okay(self, node_set_boot_device_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: utils.try_set_boot_device(task, boot_devices.DISK, persistent=True) node_set_boot_device_mock.assert_called_once_with( task, boot_devices.DISK, persistent=True) @mock.patch.object(utils, 'LOG', autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) def test_try_set_boot_device_ipmifailure_uefi( self, node_set_boot_device_mock, log_mock): self.node.properties = {'capabilities': 'boot_mode:uefi'} self.node.save() node_set_boot_device_mock.side_effect = exception.IPMIFailure(cmd='a') with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: utils.try_set_boot_device(task, boot_devices.DISK, persistent=True) node_set_boot_device_mock.assert_called_once_with( task, boot_devices.DISK, persistent=True) log_mock.warning.assert_called_once_with(mock.ANY, self.node.uuid) @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) def test_try_set_boot_device_ipmifailure_bios( self, node_set_boot_device_mock): node_set_boot_device_mock.side_effect = exception.IPMIFailure(cmd='a') with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.IPMIFailure, utils.try_set_boot_device, task, boot_devices.DISK, persistent=True) node_set_boot_device_mock.assert_called_once_with( task, boot_devices.DISK, persistent=True) @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) def test_try_set_boot_device_some_other_exception( self, node_set_boot_device_mock): exc = exception.IloOperationError(operation="qwe", error="error") node_set_boot_device_mock.side_effect = exc with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.IloOperationError, utils.try_set_boot_device, task, boot_devices.DISK, persistent=True) node_set_boot_device_mock.assert_called_once_with( task, boot_devices.DISK, persistent=True) class AgentMethodsTestCase(db_base.DbTestCase): def setUp(self): super(AgentMethodsTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver='fake_agent') self.clean_steps = { 'deploy': [ {'interface': 'deploy', 'step': 'erase_devices', 'priority': 20}, {'interface': 'deploy', 'step': 'update_firmware', 'priority': 30} ], 'raid': [ {'interface': 'raid', 'step': 'create_configuration', 'priority': 10} ] } n = {'driver': 'fake_agent', 'driver_internal_info': { 'agent_cached_clean_steps': self.clean_steps}} self.node = obj_utils.create_test_node(self.context, **n) self.ports = [obj_utils.create_test_port(self.context, node_id=self.node.id)] def test_agent_get_clean_steps(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: response = utils.agent_get_clean_steps(task) # Since steps are returned in dicts, they have non-deterministic # ordering self.assertThat(response, matchers.HasLength(3)) self.assertIn(self.clean_steps['deploy'][0], response) self.assertIn(self.clean_steps['deploy'][1], response) self.assertIn(self.clean_steps['raid'][0], response) def test_get_clean_steps_custom_interface(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: response = utils.agent_get_clean_steps(task, interface='raid') self.assertThat(response, matchers.HasLength(1)) self.assertEqual(self.clean_steps['raid'], response) def test_get_clean_steps_override_priorities(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: new_priorities = {'create_configuration': 42} response = utils.agent_get_clean_steps( task, interface='raid', override_priorities=new_priorities) self.assertEqual(42, response[0]['priority']) def test_get_clean_steps_override_priorities_none(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: # this is simulating the default value of a configuration option new_priorities = {'create_configuration': None} response = utils.agent_get_clean_steps( task, interface='raid', override_priorities=new_priorities) self.assertEqual(10, response[0]['priority']) def test_get_clean_steps_missing_steps(self): info = self.node.driver_internal_info del info['agent_cached_clean_steps'] self.node.driver_internal_info = info self.node.save() with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.NodeCleaningFailure, utils.agent_get_clean_steps, task) @mock.patch('ironic.objects.Port.list_by_node_id', spec_set=types.FunctionType) @mock.patch.object(agent_client.AgentClient, 'execute_clean_step', autospec=True) def test_execute_clean_step(self, client_mock, list_ports_mock): client_mock.return_value = { 'command_status': 'SUCCEEDED'} list_ports_mock.return_value = self.ports with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: response = utils.agent_execute_clean_step( task, self.clean_steps['deploy'][0]) self.assertEqual(states.CLEANWAIT, response) @mock.patch('ironic.objects.Port.list_by_node_id', spec_set=types.FunctionType) @mock.patch.object(agent_client.AgentClient, 'execute_clean_step', autospec=True) def test_execute_clean_step_running(self, client_mock, list_ports_mock): client_mock.return_value = { 'command_status': 'RUNNING'} list_ports_mock.return_value = self.ports with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: response = utils.agent_execute_clean_step( task, self.clean_steps['deploy'][0]) self.assertEqual(states.CLEANWAIT, response) @mock.patch('ironic.objects.Port.list_by_node_id', spec_set=types.FunctionType) @mock.patch.object(agent_client.AgentClient, 'execute_clean_step', autospec=True) def test_execute_clean_step_version_mismatch( self, client_mock, list_ports_mock): client_mock.return_value = { 'command_status': 'RUNNING'} list_ports_mock.return_value = self.ports with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: response = utils.agent_execute_clean_step( task, self.clean_steps['deploy'][0]) self.assertEqual(states.CLEANWAIT, response) def test_agent_add_clean_params(self): cfg.CONF.set_override('shred_random_overwrite_iterations', 2, 'deploy') cfg.CONF.set_override('shred_final_overwrite_with_zeros', False, 'deploy') cfg.CONF.set_override('continue_if_disk_secure_erase_fails', True, 'deploy') with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: utils.agent_add_clean_params(task) self.assertEqual(2, task.node.driver_internal_info[ 'agent_erase_devices_iterations']) self.assertIs(False, task.node.driver_internal_info[ 'agent_erase_devices_zeroize']) self.assertIs(True, task.node.driver_internal_info[ 'agent_continue_if_ata_erase_failed']) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True) @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) @mock.patch.object(utils, 'build_agent_options', autospec=True) @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' 'add_cleaning_network') def _test_prepare_inband_cleaning( self, add_cleaning_network_mock, build_options_mock, power_mock, prepare_ramdisk_mock, manage_boot=True): build_options_mock.return_value = {'a': 'b'} with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertEqual( states.CLEANWAIT, utils.prepare_inband_cleaning(task, manage_boot=manage_boot)) add_cleaning_network_mock.assert_called_once_with(task) power_mock.assert_called_once_with(task, states.REBOOT) self.assertEqual(1, task.node.driver_internal_info[ 'agent_erase_devices_iterations']) self.assertIs(True, task.node.driver_internal_info[ 'agent_erase_devices_zeroize']) if manage_boot: prepare_ramdisk_mock.assert_called_once_with( mock.ANY, mock.ANY, {'a': 'b'}) build_options_mock.assert_called_once_with(task.node) else: self.assertFalse(prepare_ramdisk_mock.called) self.assertFalse(build_options_mock.called) def test_prepare_inband_cleaning(self): self._test_prepare_inband_cleaning() def test_prepare_inband_cleaning_manage_boot_false(self): self._test_prepare_inband_cleaning(manage_boot=False) @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True) @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' 'remove_cleaning_network') @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) def _test_tear_down_inband_cleaning( self, power_mock, remove_cleaning_network_mock, clean_up_ramdisk_mock, manage_boot=True): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: utils.tear_down_inband_cleaning(task, manage_boot=manage_boot) power_mock.assert_called_once_with(task, states.POWER_OFF) remove_cleaning_network_mock.assert_called_once_with(task) if manage_boot: clean_up_ramdisk_mock.assert_called_once_with( task.driver.boot, task) else: self.assertFalse(clean_up_ramdisk_mock.called) def test_tear_down_inband_cleaning(self): self._test_tear_down_inband_cleaning(manage_boot=True) def test_tear_down_inband_cleaning_manage_boot_false(self): self._test_tear_down_inband_cleaning(manage_boot=False) def test_build_agent_options_conf(self): self.config(api_url='https://api-url', group='conductor') options = utils.build_agent_options(self.node) self.assertEqual('https://api-url', options['ipa-api-url']) self.assertEqual(0, options['coreos.configdrive']) @mock.patch.object(utils, '_get_ironic_session') def test_build_agent_options_keystone(self, session_mock): self.config(api_url=None, group='conductor') sess = mock.Mock() sess.get_endpoint.return_value = 'https://api-url' session_mock.return_value = sess options = utils.build_agent_options(self.node) self.assertEqual('https://api-url', options['ipa-api-url']) self.assertEqual(0, options['coreos.configdrive']) @mock.patch.object(disk_utils, 'is_block_device', autospec=True) @mock.patch.object(utils, 'login_iscsi', lambda *_: None) @mock.patch.object(utils, 'discovery', lambda *_: None) @mock.patch.object(utils, 'logout_iscsi', lambda *_: None) @mock.patch.object(utils, 'delete_iscsi', lambda *_: None) @mock.patch.object(utils, 'get_dev', lambda *_: '/dev/fake') class ISCSISetupAndHandleErrorsTestCase(tests_base.TestCase): def test_no_parent_device(self, mock_ibd): address = '127.0.0.1' port = 3306 iqn = 'iqn.xyz' lun = 1 mock_ibd.return_value = False expected_dev = '/dev/fake' with testtools.ExpectedException(exception.InstanceDeployFailure): with utils._iscsi_setup_and_handle_errors( address, port, iqn, lun) as dev: self.assertEqual(expected_dev, dev) mock_ibd.assert_called_once_with(expected_dev) def test_parent_device_yield(self, mock_ibd): address = '127.0.0.1' port = 3306 iqn = 'iqn.xyz' lun = 1 expected_dev = '/dev/fake' mock_ibd.return_value = True with utils._iscsi_setup_and_handle_errors( address, port, iqn, lun) as dev: self.assertEqual(expected_dev, dev) mock_ibd.assert_called_once_with(expected_dev) class ValidateImagePropertiesTestCase(db_base.DbTestCase): @mock.patch.object(image_service, 'get_image_service', autospec=True) def test_validate_image_properties_glance_image(self, image_service_mock): node = obj_utils.create_test_node( self.context, driver='fake_pxe', instance_info=INST_INFO_DICT, driver_info=DRV_INFO_DICT, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) inst_info = utils.get_image_instance_info(node) image_service_mock.return_value.show.return_value = { 'properties': {'kernel_id': '1111', 'ramdisk_id': '2222'}, } utils.validate_image_properties(self.context, inst_info, ['kernel_id', 'ramdisk_id']) image_service_mock.assert_called_once_with( node.instance_info['image_source'], context=self.context ) @mock.patch.object(image_service, 'get_image_service', autospec=True) def test_validate_image_properties_glance_image_missing_prop( self, image_service_mock): node = obj_utils.create_test_node( self.context, driver='fake_pxe', instance_info=INST_INFO_DICT, driver_info=DRV_INFO_DICT, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) inst_info = utils.get_image_instance_info(node) image_service_mock.return_value.show.return_value = { 'properties': {'kernel_id': '1111'}, } self.assertRaises(exception.MissingParameterValue, utils.validate_image_properties, self.context, inst_info, ['kernel_id', 'ramdisk_id']) image_service_mock.assert_called_once_with( node.instance_info['image_source'], context=self.context ) @mock.patch.object(image_service, 'get_image_service', autospec=True) def test_validate_image_properties_glance_image_not_authorized( self, image_service_mock): inst_info = {'image_source': 'uuid'} show_mock = image_service_mock.return_value.show show_mock.side_effect = exception.ImageNotAuthorized(image_id='uuid') self.assertRaises(exception.InvalidParameterValue, utils.validate_image_properties, self.context, inst_info, []) @mock.patch.object(image_service, 'get_image_service', autospec=True) def test_validate_image_properties_glance_image_not_found( self, image_service_mock): inst_info = {'image_source': 'uuid'} show_mock = image_service_mock.return_value.show show_mock.side_effect = exception.ImageNotFound(image_id='uuid') self.assertRaises(exception.InvalidParameterValue, utils.validate_image_properties, self.context, inst_info, []) def test_validate_image_properties_invalid_image_href(self): inst_info = {'image_source': 'emule://uuid'} self.assertRaises(exception.InvalidParameterValue, utils.validate_image_properties, self.context, inst_info, []) @mock.patch.object(image_service.HttpImageService, 'show', autospec=True) def test_validate_image_properties_nonglance_image( self, image_service_show_mock): instance_info = { 'image_source': 'http://ubuntu', 'kernel': 'kernel_uuid', 'ramdisk': 'file://initrd', 'root_gb': 100, } image_service_show_mock.return_value = {'size': 1, 'properties': {}} node = obj_utils.create_test_node( self.context, driver='fake_pxe', instance_info=instance_info, driver_info=DRV_INFO_DICT, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) inst_info = utils.get_image_instance_info(node) utils.validate_image_properties(self.context, inst_info, ['kernel', 'ramdisk']) image_service_show_mock.assert_called_once_with( mock.ANY, instance_info['image_source']) @mock.patch.object(image_service.HttpImageService, 'show', autospec=True) def test_validate_image_properties_nonglance_image_validation_fail( self, img_service_show_mock): instance_info = { 'image_source': 'http://ubuntu', 'kernel': 'kernel_uuid', 'ramdisk': 'file://initrd', 'root_gb': 100, } img_service_show_mock.side_effect = exception.ImageRefValidationFailed( image_href='http://ubuntu', reason='HTTPError') node = obj_utils.create_test_node( self.context, driver='fake_pxe', instance_info=instance_info, driver_info=DRV_INFO_DICT, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) inst_info = utils.get_image_instance_info(node) self.assertRaises(exception.InvalidParameterValue, utils.validate_image_properties, self.context, inst_info, ['kernel', 'ramdisk']) class ValidateParametersTestCase(db_base.DbTestCase): def _test__get_img_instance_info( self, instance_info=INST_INFO_DICT, driver_info=DRV_INFO_DICT, driver_internal_info=DRV_INTERNAL_INFO_DICT): # make sure we get back the expected things node = obj_utils.create_test_node( self.context, driver='fake_pxe', instance_info=instance_info, driver_info=driver_info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) info = utils.get_image_instance_info(node) self.assertIsNotNone(info['image_source']) return info def test__get_img_instance_info_good(self): self._test__get_img_instance_info() def test__get_img_instance_info_good_non_glance_image(self): instance_info = INST_INFO_DICT.copy() instance_info['image_source'] = 'http://image' instance_info['kernel'] = 'http://kernel' instance_info['ramdisk'] = 'http://ramdisk' info = self._test__get_img_instance_info(instance_info=instance_info) self.assertIsNotNone(info['ramdisk']) self.assertIsNotNone(info['kernel']) def test__get_img_instance_info_non_glance_image_missing_kernel(self): instance_info = INST_INFO_DICT.copy() instance_info['image_source'] = 'http://image' instance_info['ramdisk'] = 'http://ramdisk' self.assertRaises( exception.MissingParameterValue, self._test__get_img_instance_info, instance_info=instance_info) def test__get_img_instance_info_non_glance_image_missing_ramdisk(self): instance_info = INST_INFO_DICT.copy() instance_info['image_source'] = 'http://image' instance_info['kernel'] = 'http://kernel' self.assertRaises( exception.MissingParameterValue, self._test__get_img_instance_info, instance_info=instance_info) def test__get_img_instance_info_missing_image_source(self): instance_info = INST_INFO_DICT.copy() del instance_info['image_source'] self.assertRaises( exception.MissingParameterValue, self._test__get_img_instance_info, instance_info=instance_info) def test__get_img_instance_info_whole_disk_image(self): driver_internal_info = DRV_INTERNAL_INFO_DICT.copy() driver_internal_info['is_whole_disk_image'] = True self._test__get_img_instance_info( driver_internal_info=driver_internal_info) class InstanceInfoTestCase(db_base.DbTestCase): def test_parse_instance_info_good(self): # make sure we get back the expected things node = obj_utils.create_test_node( self.context, driver='fake_pxe', instance_info=INST_INFO_DICT, driver_internal_info=DRV_INTERNAL_INFO_DICT ) info = utils.parse_instance_info(node) self.assertIsNotNone(info['image_source']) self.assertIsNotNone(info['root_gb']) self.assertEqual(0, info['ephemeral_gb']) self.assertIsNone(info['configdrive']) def test_parse_instance_info_missing_instance_source(self): # make sure error is raised when info is missing info = dict(INST_INFO_DICT) del info['image_source'] node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) self.assertRaises(exception.MissingParameterValue, utils.parse_instance_info, node) def test_parse_instance_info_missing_root_gb(self): # make sure error is raised when info is missing info = dict(INST_INFO_DICT) del info['root_gb'] node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) self.assertRaises(exception.MissingParameterValue, utils.parse_instance_info, node) def test_parse_instance_info_invalid_root_gb(self): info = dict(INST_INFO_DICT) info['root_gb'] = 'foobar' node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) self.assertRaises(exception.InvalidParameterValue, utils.parse_instance_info, node) def test_parse_instance_info_valid_ephemeral_gb(self): ephemeral_gb = 10 ephemeral_mb = 1024 * ephemeral_gb ephemeral_fmt = 'test-fmt' info = dict(INST_INFO_DICT) info['ephemeral_gb'] = ephemeral_gb info['ephemeral_format'] = ephemeral_fmt node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) data = utils.parse_instance_info(node) self.assertEqual(ephemeral_mb, data['ephemeral_mb']) self.assertEqual(ephemeral_fmt, data['ephemeral_format']) def test_parse_instance_info_unicode_swap_mb(self): swap_mb = u'10' swap_mb_int = 10 info = dict(INST_INFO_DICT) info['swap_mb'] = swap_mb node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) data = utils.parse_instance_info(node) self.assertEqual(swap_mb_int, data['swap_mb']) def test_parse_instance_info_invalid_ephemeral_gb(self): info = dict(INST_INFO_DICT) info['ephemeral_gb'] = 'foobar' info['ephemeral_format'] = 'exttest' node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) self.assertRaises(exception.InvalidParameterValue, utils.parse_instance_info, node) def test_parse_instance_info_valid_ephemeral_missing_format(self): ephemeral_gb = 10 ephemeral_fmt = 'test-fmt' info = dict(INST_INFO_DICT) info['ephemeral_gb'] = ephemeral_gb info['ephemeral_format'] = None self.config(default_ephemeral_format=ephemeral_fmt, group='pxe') node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) instance_info = utils.parse_instance_info(node) self.assertEqual(ephemeral_fmt, instance_info['ephemeral_format']) def test_parse_instance_info_valid_preserve_ephemeral_true(self): info = dict(INST_INFO_DICT) for opt in ['true', 'TRUE', 'True', 't', 'on', 'yes', 'y', '1']: info['preserve_ephemeral'] = opt node = obj_utils.create_test_node( self.context, uuid=uuidutils.generate_uuid(), instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) data = utils.parse_instance_info(node) self.assertTrue(data['preserve_ephemeral']) def test_parse_instance_info_valid_preserve_ephemeral_false(self): info = dict(INST_INFO_DICT) for opt in ['false', 'FALSE', 'False', 'f', 'off', 'no', 'n', '0']: info['preserve_ephemeral'] = opt node = obj_utils.create_test_node( self.context, uuid=uuidutils.generate_uuid(), instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) data = utils.parse_instance_info(node) self.assertFalse(data['preserve_ephemeral']) def test_parse_instance_info_invalid_preserve_ephemeral(self): info = dict(INST_INFO_DICT) info['preserve_ephemeral'] = 'foobar' node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) self.assertRaises(exception.InvalidParameterValue, utils.parse_instance_info, node) def test_parse_instance_info_invalid_ephemeral_disk(self): info = dict(INST_INFO_DICT) info['ephemeral_gb'] = 10 info['swap_mb'] = 0 info['root_gb'] = 20 info['preserve_ephemeral'] = True drv_internal_dict = {'instance': {'ephemeral_gb': 9, 'swap_mb': 0, 'root_gb': 20}} drv_internal_dict.update(DRV_INTERNAL_INFO_DICT) node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=drv_internal_dict, ) self.assertRaises(exception.InvalidParameterValue, utils.parse_instance_info, node) def test__check_disk_layout_unchanged_fails(self): info = dict(INST_INFO_DICT) info['ephemeral_gb'] = 10 info['swap_mb'] = 0 info['root_gb'] = 20 info['preserve_ephemeral'] = True drv_internal_dict = {'instance': {'ephemeral_gb': 20, 'swap_mb': 0, 'root_gb': 20}} drv_internal_dict.update(DRV_INTERNAL_INFO_DICT) node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=drv_internal_dict, ) self.assertRaises(exception.InvalidParameterValue, utils._check_disk_layout_unchanged, node, info) def test__check_disk_layout_unchanged(self): info = dict(INST_INFO_DICT) info['ephemeral_gb'] = 10 info['swap_mb'] = 0 info['root_gb'] = 20 info['preserve_ephemeral'] = True drv_internal_dict = {'instance': {'ephemeral_gb': 10, 'swap_mb': 0, 'root_gb': 20}} drv_internal_dict.update(DRV_INTERNAL_INFO_DICT) node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=drv_internal_dict, ) self.assertIsNone(utils._check_disk_layout_unchanged(node, info)) def test_parse_instance_info_configdrive(self): info = dict(INST_INFO_DICT) info['configdrive'] = 'http://1.2.3.4/cd' node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) instance_info = utils.parse_instance_info(node) self.assertEqual('http://1.2.3.4/cd', instance_info['configdrive']) def test_parse_instance_info_nonglance_image(self): info = INST_INFO_DICT.copy() info['image_source'] = 'file:///image.qcow2' info['kernel'] = 'file:///image.vmlinuz' info['ramdisk'] = 'file:///image.initrd' node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) utils.parse_instance_info(node) def test_parse_instance_info_nonglance_image_no_kernel(self): info = INST_INFO_DICT.copy() info['image_source'] = 'file:///image.qcow2' info['ramdisk'] = 'file:///image.initrd' node = obj_utils.create_test_node( self.context, instance_info=info, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) self.assertRaises(exception.MissingParameterValue, utils.parse_instance_info, node) def test_parse_instance_info_whole_disk_image(self): driver_internal_info = dict(DRV_INTERNAL_INFO_DICT) driver_internal_info['is_whole_disk_image'] = True node = obj_utils.create_test_node( self.context, instance_info=INST_INFO_DICT, driver_internal_info=driver_internal_info, ) instance_info = utils.parse_instance_info(node) self.assertIsNotNone(instance_info['image_source']) self.assertIsNotNone(instance_info['root_mb']) self.assertEqual(0, instance_info['swap_mb']) self.assertEqual(0, instance_info['ephemeral_mb']) self.assertIsNone(instance_info['configdrive']) def test_parse_instance_info_whole_disk_image_missing_root(self): info = dict(INST_INFO_DICT) del info['root_gb'] node = obj_utils.create_test_node(self.context, instance_info=info) self.assertRaises(exception.InvalidParameterValue, utils.parse_instance_info, node) class TestBuildInstanceInfoForDeploy(db_base.DbTestCase): def setUp(self): super(TestBuildInstanceInfoForDeploy, self).setUp() self.node = obj_utils.create_test_node(self.context, driver='fake_agent') @mock.patch.object(image_service.HttpImageService, 'validate_href', autospec=True) @mock.patch.object(image_service, 'GlanceImageService', autospec=True) def test_build_instance_info_for_deploy_glance_image(self, glance_mock, validate_mock): i_info = self.node.instance_info i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810' driver_internal_info = self.node.driver_internal_info driver_internal_info['is_whole_disk_image'] = True self.node.driver_internal_info = driver_internal_info self.node.instance_info = i_info self.node.save() image_info = {'checksum': 'aa', 'disk_format': 'qcow2', 'container_format': 'bare', 'properties': {}} glance_mock.return_value.show = mock.MagicMock(spec_set=[], return_value=image_info) glance_mock.return_value.swift_temp_url.return_value = ( 'http://temp-url') mgr_utils.mock_the_extension_manager(driver='fake_agent') with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: utils.build_instance_info_for_deploy(task) glance_mock.assert_called_once_with(version=2, context=task.context) glance_mock.return_value.show.assert_called_once_with( self.node.instance_info['image_source']) glance_mock.return_value.swift_temp_url.assert_called_once_with( image_info) validate_mock.assert_called_once_with(mock.ANY, 'http://temp-url', secret=True) @mock.patch.object(image_service.HttpImageService, 'validate_href', autospec=True) @mock.patch.object(utils, 'parse_instance_info', autospec=True) @mock.patch.object(image_service, 'GlanceImageService', autospec=True) def test_build_instance_info_for_deploy_glance_partition_image( self, glance_mock, parse_instance_info_mock, validate_mock): i_info = {} i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810' i_info['kernel'] = '13ce5a56-1de3-4916-b8b2-be778645d003' i_info['ramdisk'] = 'a5a370a8-1b39-433f-be63-2c7d708e4b4e' i_info['root_gb'] = 5 i_info['swap_mb'] = 4 i_info['ephemeral_gb'] = 0 i_info['ephemeral_format'] = None i_info['configdrive'] = 'configdrive' driver_internal_info = self.node.driver_internal_info driver_internal_info['is_whole_disk_image'] = False self.node.driver_internal_info = driver_internal_info self.node.instance_info = i_info self.node.save() image_info = {'checksum': 'aa', 'disk_format': 'qcow2', 'container_format': 'bare', 'properties': {'kernel_id': 'kernel', 'ramdisk_id': 'ramdisk'}} glance_mock.return_value.show = mock.MagicMock(spec_set=[], return_value=image_info) glance_obj_mock = glance_mock.return_value glance_obj_mock.swift_temp_url.return_value = 'http://temp-url' parse_instance_info_mock.return_value = {'swap_mb': 4} image_source = '733d1c44-a2ea-414b-aca7-69decf20d810' expected_i_info = {'root_gb': 5, 'swap_mb': 4, 'ephemeral_gb': 0, 'ephemeral_format': None, 'configdrive': 'configdrive', 'image_source': image_source, 'image_url': 'http://temp-url', 'kernel': 'kernel', 'ramdisk': 'ramdisk', 'image_type': 'partition', 'image_tags': [], 'image_properties': {'kernel_id': 'kernel', 'ramdisk_id': 'ramdisk'}, 'image_checksum': 'aa', 'image_container_format': 'bare', 'image_disk_format': 'qcow2'} mgr_utils.mock_the_extension_manager(driver='fake_agent') with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: info = utils.build_instance_info_for_deploy(task) glance_mock.assert_called_once_with(version=2, context=task.context) glance_mock.return_value.show.assert_called_once_with( self.node.instance_info['image_source']) glance_mock.return_value.swift_temp_url.assert_called_once_with( image_info) validate_mock.assert_called_once_with( mock.ANY, 'http://temp-url', secret=True) image_type = task.node.instance_info['image_type'] self.assertEqual('partition', image_type) self.assertEqual('kernel', info['kernel']) self.assertEqual('ramdisk', info['ramdisk']) self.assertEqual(expected_i_info, info) parse_instance_info_mock.assert_called_once_with(task.node) @mock.patch.object(image_service.HttpImageService, 'validate_href', autospec=True) def test_build_instance_info_for_deploy_nonglance_image( self, validate_href_mock): i_info = self.node.instance_info driver_internal_info = self.node.driver_internal_info i_info['image_source'] = 'http://image-ref' i_info['image_checksum'] = 'aa' i_info['root_gb'] = 10 i_info['image_checksum'] = 'aa' driver_internal_info['is_whole_disk_image'] = True self.node.instance_info = i_info self.node.driver_internal_info = driver_internal_info self.node.save() mgr_utils.mock_the_extension_manager(driver='fake_agent') with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: info = utils.build_instance_info_for_deploy(task) self.assertEqual(self.node.instance_info['image_source'], info['image_url']) validate_href_mock.assert_called_once_with( mock.ANY, 'http://image-ref', False) @mock.patch.object(utils, 'parse_instance_info', autospec=True) @mock.patch.object(image_service.HttpImageService, 'validate_href', autospec=True) def test_build_instance_info_for_deploy_nonglance_partition_image( self, validate_href_mock, parse_instance_info_mock): i_info = {} driver_internal_info = self.node.driver_internal_info i_info['image_source'] = 'http://image-ref' i_info['kernel'] = 'http://kernel-ref' i_info['ramdisk'] = 'http://ramdisk-ref' i_info['image_checksum'] = 'aa' i_info['root_gb'] = 10 i_info['configdrive'] = 'configdrive' driver_internal_info['is_whole_disk_image'] = False self.node.instance_info = i_info self.node.driver_internal_info = driver_internal_info self.node.save() mgr_utils.mock_the_extension_manager(driver='fake_agent') validate_href_mock.side_effect = ['http://image-ref', 'http://kernel-ref', 'http://ramdisk-ref'] parse_instance_info_mock.return_value = {'swap_mb': 5} expected_i_info = {'image_source': 'http://image-ref', 'image_url': 'http://image-ref', 'image_type': 'partition', 'kernel': 'http://kernel-ref', 'ramdisk': 'http://ramdisk-ref', 'image_checksum': 'aa', 'root_gb': 10, 'swap_mb': 5, 'configdrive': 'configdrive'} with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: info = utils.build_instance_info_for_deploy(task) self.assertEqual(self.node.instance_info['image_source'], info['image_url']) validate_href_mock.assert_called_once_with( mock.ANY, 'http://image-ref', False) self.assertEqual('partition', info['image_type']) self.assertEqual(expected_i_info, info) parse_instance_info_mock.assert_called_once_with(task.node) @mock.patch.object(image_service.HttpImageService, 'validate_href', autospec=True) def test_build_instance_info_for_deploy_nonsupported_image( self, validate_href_mock): validate_href_mock.side_effect = exception.ImageRefValidationFailed( image_href='file://img.qcow2', reason='fail') i_info = self.node.instance_info i_info['image_source'] = 'file://img.qcow2' i_info['image_checksum'] = 'aa' self.node.instance_info = i_info self.node.save() mgr_utils.mock_the_extension_manager(driver='fake_agent') with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.ImageRefValidationFailed, utils.build_instance_info_for_deploy, task) class TestStorageInterfaceUtils(db_base.DbTestCase): def setUp(self): super(TestStorageInterfaceUtils, self).setUp() self.node = obj_utils.create_test_node(self.context, driver='fake') def test_check_interface_capability(self): class fake_driver(object): capabilities = ['foo', 'bar'] self.assertTrue(utils.check_interface_capability(fake_driver, 'foo')) self.assertFalse(utils.check_interface_capability(fake_driver, 'baz')) def test_get_remote_boot_volume(self): obj_utils.create_test_volume_target( self.context, node_id=self.node.id, volume_type='iscsi', boot_index=1, volume_id='4321') obj_utils.create_test_volume_target( self.context, node_id=self.node.id, volume_type='iscsi', boot_index=0, volume_id='1234', uuid=uuidutils.generate_uuid()) self.node.storage_interface = 'cinder' self.node.save() with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: volume = utils.get_remote_boot_volume(task) self.assertEqual('1234', volume['volume_id']) def test_get_remote_boot_volume_none(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertIsNone(utils.get_remote_boot_volume(task)) obj_utils.create_test_volume_target( self.context, node_id=self.node.id, volume_type='iscsi', boot_index=1, volume_id='4321') with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertIsNone(utils.get_remote_boot_volume(task)) @mock.patch.object(fake, 'FakeBoot', autospec=True) @mock.patch.object(fake, 'FakeDeploy', autospec=True) @mock.patch.object(cinder.CinderStorage, 'should_write_image', autospec=True) def test_populate_storage_driver_internal_info_iscsi(self, mock_should_write, mock_deploy, mock_boot): mock_deploy.return_value = mock.Mock( capabilities=['iscsi_volume_deploy']) mock_boot.return_value = mock.Mock( capabilities=['iscsi_volume_boot']) mock_should_write.return_value = True vol_uuid = uuidutils.generate_uuid() obj_utils.create_test_volume_target( self.context, node_id=self.node.id, volume_type='iscsi', boot_index=0, volume_id='1234', uuid=vol_uuid) # NOTE(TheJulia): Since the default for the storage_interface # is a noop interface, we need to define another driver that # can be loaded by driver_manager in order to create the task # to test this method. self.node.storage_interface = "cinder" self.node.save() with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: driver_utils.add_node_capability(task, 'iscsi_boot', 'True') utils.populate_storage_driver_internal_info(task) self.assertEqual( vol_uuid, task.node.driver_internal_info.get('boot_from_volume', None)) self.assertEqual( vol_uuid, task.node.driver_internal_info.get('boot_from_volume_deploy', None)) @mock.patch.object(fake, 'FakeBoot', autospec=True) @mock.patch.object(fake, 'FakeDeploy', autospec=True) @mock.patch.object(cinder.CinderStorage, 'should_write_image', autospec=True) def test_populate_storage_driver_internal_info_fc(self, mock_should_write, mock_deploy, mock_boot): mock_deploy.return_value = mock.Mock( capabilities=['fibre_channel_volume_deploy']) mock_boot.return_value = mock.Mock( capabilities=['fibre_channel_volume_boot']) mock_should_write.return_value = True self.node.storage_interface = "cinder" self.node.save() vol_uuid = uuidutils.generate_uuid() obj_utils.create_test_volume_target( self.context, node_id=self.node.id, volume_type='fibre_channel', boot_index=0, volume_id='1234', uuid=vol_uuid) with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: driver_utils.add_node_capability(task, 'fibre_channel_boot', 'True') utils.populate_storage_driver_internal_info(task) self.assertEqual( vol_uuid, task.node.driver_internal_info.get('boot_from_volume', None)) self.assertEqual( vol_uuid, task.node.driver_internal_info.get('boot_from_volume_deploy', None)) @mock.patch.object(fake, 'FakeBoot', autospec=True) @mock.patch.object(fake, 'FakeDeploy', autospec=True) def test_populate_storage_driver_internal_info_error( self, mock_deploy, mock_boot): mock_deploy.return_value = mock.Mock( capabilities=['fibre_channel_volume_deploy']) mock_boot.return_value = mock.Mock( capabilities=['fibre_channel_volume_boot']) obj_utils.create_test_volume_target( self.context, node_id=self.node.id, volume_type='iscsi', boot_index=0, volume_id='1234') with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.StorageError, utils.populate_storage_driver_internal_info, task) def test_tear_down_storage_configuration(self): vol_uuid = uuidutils.generate_uuid() obj_utils.create_test_volume_target( self.context, node_id=self.node.id, volume_type='iscsi', boot_index=0, volume_id='1234', uuid=vol_uuid) d_i_info = self.node.driver_internal_info d_i_info['boot_from_volume'] = vol_uuid d_i_info['boot_from_volume_deploy'] = vol_uuid self.node.driver_internal_info = d_i_info self.node.save() with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: node = task.node self.assertEqual(1, len(task.volume_targets)) self.assertEqual( vol_uuid, node.driver_internal_info.get('boot_from_volume')) self.assertEqual( vol_uuid, node.driver_internal_info.get('boot_from_volume_deploy')) utils.tear_down_storage_configuration(task) node.refresh() self.assertIsNone( node.driver_internal_info.get('boot_from_volume')) self.assertIsNone( node.driver_internal_info.get('boot_from_volume_deploy')) with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertEqual(0, len(task.volume_targets)) def test_is_iscsi_boot(self): vol_id = uuidutils.generate_uuid() obj_utils.create_test_volume_target( self.context, node_id=self.node.id, volume_type='iscsi', boot_index=0, volume_id='1234', uuid=vol_id) self.node.driver_internal_info = {'boot_from_volume': vol_id} self.node.save() with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertTrue(utils.is_iscsi_boot(task)) def test_is_iscsi_boot_exception(self): self.node.driver_internal_info = { 'boot_from_volume': uuidutils.generate_uuid()} with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertFalse(utils.is_iscsi_boot(task)) def test_is_iscsi_boot_false(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertFalse(utils.is_iscsi_boot(task)) def test_is_iscsi_boot_false_fc_target(self): vol_id = uuidutils.generate_uuid() obj_utils.create_test_volume_target( self.context, node_id=self.node.id, volume_type='fibre_channel', boot_index=0, volume_id='3214', uuid=vol_id) self.node.driver_internal_info.update({'boot_from_volume': vol_id}) self.node.save() with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertFalse(utils.is_iscsi_boot(task))
{ "content_hash": "c9ff93f88dd1729bcf19827ae5a57bfa", "timestamp": "", "source": "github", "line_count": 2557, "max_line_length": 79, "avg_line_length": 42.03011341415721, "alnum_prop": 0.5856277507420606, "repo_name": "jiazichenzhan/Server_Manage_Plugin", "id": "a8e8d05dd054a52d84e1f676baee81f573b6be49", "size": "108163", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "ironic-plugin-pike/ironic/tests/unit/drivers/modules/test_deploy_utils.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "349" }, { "name": "Python", "bytes": "5720362" }, { "name": "Ruby", "bytes": "986" }, { "name": "Shell", "bytes": "128352" } ], "symlink_target": "" }
"""This code example creates new labels. To determine which labels exist, run get_all_labels.py. This feature is only available to DFP premium solution networks.""" import uuid # Import appropriate modules from the client library. from googleads import dfp def main(client): # Initialize appropriate service. label_service = client.GetService('LabelService', version='v201508') # Create label objects. labels = [] for _ in xrange(5): label = { 'name': 'Label #%s' % uuid.uuid4(), 'isActive': 'true', 'types': ['COMPETITIVE_EXCLUSION'] } labels.append(label) # Add Labels. labels = label_service.createLabels(labels) # Display results. for label in labels: print ('Label with id \'%s\', name \'%s\', and types {%s} was found.' % (label['id'], label['name'], ','.join(label['types']))) if __name__ == '__main__': # Initialize client object. dfp_client = dfp.DfpClient.LoadFromStorage() main(dfp_client)
{ "content_hash": "b034a638723272f13c749355779c7cda", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 77, "avg_line_length": 25.973684210526315, "alnum_prop": 0.6474164133738601, "repo_name": "haveal/googleads-python-lib", "id": "c9f2d4a3abf9ebd598adf4ab8b5664b6429e4397", "size": "1605", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "examples/dfp/v201508/label_service/create_labels.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "168602" } ], "symlink_target": "" }
import os import json import datetime from pprint import pprint from copy import deepcopy import numpy as np import pandas as pd from tqdm.notebook import tqdm # #### Configuration # In[2]: ROOT = '..' # In[3]: datafile_date = '2020-03-27-v5' # In[4]: PROCESS_SMALL_DOCS = False # Small docs are the individual paragraphs in the text # In[5]: SOURCE_FILES = { 'COMM-USE': ROOT + f'/data/raw/{datafile_date}/comm_use_subset/', 'BioRxiv': ROOT + f'/data/raw/{datafile_date}/biorxiv_medrxiv/', 'NONCOMM': ROOT + f'/data/raw/{datafile_date}/noncomm_use_subset/', 'PMC': ROOT + f'/data/raw/{datafile_date}/custom_license/', } # In[6]: metadata_file = ROOT + f'/data/raw/{datafile_date}/metadata.csv' # In[7]: outdir = ROOT + f'/data/interim/{datafile_date}/' outfile = f'{outdir}{datafile_date}-covid19-combined.jsonl' outfile_small_docs = f'{outdir}{datafile_date}-covid19-combined-smalldocs.jsonl' outfile_abstracts = f'{outdir}{datafile_date}-covid19-combined-abstracts.jsonl' json_args = {'orient': 'records', 'lines': True} out_json_args = {'date_format': 'iso', **json_args} # In[8]: out_path_mode = 0o777 os.makedirs(outdir, mode = out_path_mode, exist_ok = True) # ## Helper Functions # Some functions taken and modified from https://www.kaggle.com/xhlulu/cord-19-eda-parse-json-and-generate-clean-csv # In[9]: def format_name(author): middle_name = " ".join(author['middle']) if author['middle']: return " ".join([author['first'], middle_name, author['last']]) else: return " ".join([author['first'], author['last']]) def format_affiliation(affiliation): text = [] location = affiliation.get('location') if location: text.extend(list(affiliation['location'].values())) institution = affiliation.get('institution') if institution: text = [institution] + text return ", ".join(text) def format_authors(authors, with_affiliation=False): name_ls = [] for author in authors: name = format_name(author) if with_affiliation: affiliation = format_affiliation(author['affiliation']) if affiliation: name_ls.append(f"{name} ({affiliation})") else: name_ls.append(name) else: name_ls.append(name) return ", ".join(name_ls) def format_body(body_text): texts = [(di['section'], di['text']) for di in body_text] texts_di = {di['section']: "" for di in body_text} for section, text in texts: texts_di[section] += text body = "" for section, text in texts_di.items(): body += section body += "\n\n" body += text body += "\n\n" return body def format_bib(bibs): if type(bibs) == dict: bibs = list(bibs.values()) bibs = deepcopy(bibs) formatted = [] for bib in bibs: bib['authors'] = format_authors( bib['authors'], with_affiliation=False ) doi = None pmid = None other_ids = bib.get('other_ids') if other_ids: doi = other_ids.get('DOI') pmid = other_ids.get('PMID') formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']] if doi: formatted_ls.extend(doi) if pmid: formatted_ls.extend(['PMID' + p for p in pmid]) formatted.append(", ".join(formatted_ls)) return "\n ".join(formatted) # In[10]: def bib_titles(bibs): result = {} for key, bib in bibs.items(): result[key] = bib['title'] return result def extract_small_docs(main_doc_id, body_text, bib_titles_dict): result = [] for i, di in enumerate(body_text): ref_titles = [] for ref in di['cite_spans']: title = bib_titles_dict.get(ref['ref_id']) if title: ref_titles.append(title) result.append((main_doc_id, i, di['text'], di['section'], ref_titles)) return result # In[11]: def load_files(dirname): filenames = os.listdir(dirname) raw_files = [] for filename in tqdm(filenames): filename = dirname + filename file = json.load(open(filename, 'rb')) raw_files.append(file) return raw_files def generate_clean_df(all_files, prepare_small_docs=False): cleaned_files = [] small_docs = [] for file in tqdm(all_files): if prepare_small_docs: bib_titles_dict = bib_titles(file['bib_entries']) docs = extract_small_docs(file['paper_id'], file['body_text'], bib_titles_dict) else: docs = [] features = [ file['paper_id'], file['metadata']['title'], format_authors(file['metadata']['authors']), format_authors(file['metadata']['authors'], with_affiliation=True), format_body(file['abstract']), format_body(file['body_text']), format_bib(file['bib_entries']), file['metadata']['authors'], file['bib_entries'], len(docs) ] cleaned_files.append(features) if prepare_small_docs: small_docs.extend(docs) col_names = ['paper_id', 'title', 'authors', 'affiliations', 'abstract', 'text', 'bibliography','raw_authors','raw_bibliography', 'num_small_docs'] clean_df = pd.DataFrame(cleaned_files, columns=col_names) clean_df.head() if prepare_small_docs: small_docs_df = pd.DataFrame(small_docs, columns=['paper_id', 'small_doc_num', 'text', 'section', 'ref_titles']) return clean_df, small_docs_df else: return clean_df # ## Load Data # ### Load Metadata # In[12]: metadata_df = pd.read_csv(metadata_file) # In[13]: metadata_df.head() # ### Clean Metadata # In[14]: metadata_df['publish_year'] = metadata_df['publish_time'].astype(str).apply(lambda d: d[:4] if d[0] in ('1', '2') else '19xx' if d == 'nan' else # d[2:6] if d.startswith("'[") else '') # In[15]: metadata_df['publish_year'].unique() # ### Load Data Files # In[16]: dfd = {} small_docs = {} for name, indir in SOURCE_FILES.items(): print(f'Loading {name} from {indir}') data_files = load_files(indir) print(f"Cleaning {name} {len(data_files)} files" ) if PROCESS_SMALL_DOCS: dfd[name], small_docs[name] = generate_clean_df(data_files, prepare_small_docs=True) else: dfd[name] = generate_clean_df(data_files) # In[17]: dfd['COMM-USE'].head() # ### Combine data from text files # In[18]: for name, df in dfd.items(): df['dataset'] = name # In[19]: df_combined = pd.concat(dfd.values(), ignore_index=True, sort=False) # In[20]: df_combined.head() # In[21]: if PROCESS_SMALL_DOCS: for name, df in small_docs.items(): df['dataset'] = name df_combined_small_docs = pd.concat(small_docs.values(), ignore_index=True, sort=False) print(df_combined_small_docs.shape) # In[22]: if PROCESS_SMALL_DOCS: print(df_combined_small_docs.columns) # ### Join Metadata and Data Files # In[23]: df = metadata_df.copy() # In[24]: df_joined = df.join(df_combined.set_index('paper_id'), how='left', on='sha', rsuffix='_ft') # In[25]: df_joined.head() # In[26]: df_joined_ft = df_joined[~ df_joined['sha'].isnull()].copy() # In[27]: df_joined_ft.shape # ### Clean abstract # In[28]: df_joined_ft['abstract_clean'] = df_joined_ft['abstract'].fillna('') # In[29]: df_joined_ft['abstract_clean'] = df_joined_ft['abstract_clean'].apply(lambda x: x[9:] if x.lower().startswith('abstract') else x) # ### Create citation ref # In[30]: df_joined_ft['cite_ad'] = df_joined_ft['authors'].fillna('').str.split(',').str[0].str.split(' ').str.join('_') + '_' + df_joined_ft['publish_year'] # ### Write data # In[31]: df_joined_ft.columns # In[32]: # Warning: This file is over 2GB df_joined_ft.to_json(outfile, **out_json_args) outfile # In[33]: if PROCESS_SMALL_DOCS: df_combined_small_docs.to_json(outfile_small_docs, **out_json_args) # In[34]: df_joined_ft.head() # In[35]: df_joined_ft.loc[:, ['cord_uid', 'sha', 'abstract_clean', 'cite_ad', 'title', 'authors', 'publish_year', 'publish_time', 'dataset', 'pmcid', 'pubmed_id', 'doi' ]].to_json(outfile_abstracts, **out_json_args)
{ "content_hash": "ad35791dabcd9c9a4b869a6a165b239c", "timestamp": "", "source": "github", "line_count": 419, "max_line_length": 148, "avg_line_length": 21.083532219570404, "alnum_prop": 0.5619198551052751, "repo_name": "roaminsight/roamresearch", "id": "0bb97b2e2403724c36f12b45f54930d1ce5ba040", "size": "8909", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "BlogPosts/CORD19_topics/cord19-2020-03-27-v5/notebooks/2020-03-27-covid19-data-preprocess-clean.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "286" }, { "name": "C++", "bytes": "14079" }, { "name": "Jupyter Notebook", "bytes": "4778821" }, { "name": "Python", "bytes": "181377" }, { "name": "R", "bytes": "8511" }, { "name": "TeX", "bytes": "10590" } ], "symlink_target": "" }
from ggrc import db from .mixins import BusinessObject, Timeboxed, CustomAttributable from .object_owner import Ownable from .object_person import Personable from .relationship import Relatable from .track_object_state import HasObjectState, track_state_for_class class OrgGroup(HasObjectState, CustomAttributable, Personable, Relatable, Timeboxed, Ownable, BusinessObject, db.Model): __tablename__ = 'org_groups' _aliases = {"url": "Org Group URL"} track_state_for_class(OrgGroup)
{ "content_hash": "6896d3e910d6666e44362993e103f732", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 69, "avg_line_length": 34.6, "alnum_prop": 0.7475915221579962, "repo_name": "josthkko/ggrc-core", "id": "02ae4a22d03ffd24480b017b7e383ee90ba66f77", "size": "632", "binary": false, "copies": "4", "ref": "refs/heads/develop", "path": "src/ggrc/models/org_group.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "163629" }, { "name": "Cucumber", "bytes": "136321" }, { "name": "HTML", "bytes": "1057288" }, { "name": "JavaScript", "bytes": "1492054" }, { "name": "Makefile", "bytes": "6161" }, { "name": "Mako", "bytes": "2178" }, { "name": "Python", "bytes": "2148568" }, { "name": "Shell", "bytes": "29929" } ], "symlink_target": "" }
from novaclient import extension from novaclient.tests import utils from novaclient.tests.v1_1 import fakes from novaclient.v1_1.contrib import migrations extensions = [ extension.Extension(migrations.__name__.split(".")[-1], migrations), ] cs = fakes.FakeClient(extensions=extensions) class MigrationsTest(utils.TestCase): def test_list_migrations(self): ml = cs.migrations.list() cs.assert_called('GET', '/os-migrations') for m in ml: self.assertTrue(isinstance(m, migrations.Migration)) def test_list_migrations_with_filters(self): ml = cs.migrations.list('host1', 'finished', 'child1') cs.assert_called('GET', '/os-migrations?status=finished&host=host1' '&cell_name=child1') for m in ml: self.assertTrue(isinstance(m, migrations.Migration))
{ "content_hash": "6d3ff7b81170f5d5b99c2caa44500904", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 68, "avg_line_length": 32.464285714285715, "alnum_prop": 0.6314631463146315, "repo_name": "neumerance/cloudloon2", "id": "7b49c4cc70e84ac13432dcc0c655d8de1a2a8e07", "size": "1527", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": ".venv/lib/python2.7/site-packages/novaclient/tests/v1_1/contrib/test_migrations.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "178040" }, { "name": "JavaScript", "bytes": "460971" }, { "name": "Perl", "bytes": "1954" }, { "name": "Python", "bytes": "3227734" }, { "name": "Ruby", "bytes": "76" }, { "name": "Shell", "bytes": "14108" } ], "symlink_target": "" }
"""Tests for the GTFlow split handler Ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import random from tensorflow.contrib.boosted_trees.proto import learner_pb2 from tensorflow.contrib.boosted_trees.proto import split_info_pb2 from tensorflow.contrib.boosted_trees.python.ops import split_handler_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import googletest class SplitHandlerOpsTest(test_util.TensorFlowTestCase): def testMakeDenseSplit(self): """Tests split handler op.""" with self.test_session() as sess: # The data looks like the following after dividing by number of steps (2). # Gradients | Partition | Dense Quantile | # (1.2, 0.2) | 0 | 0 | # (-0.3, 0.19) | 0 | 1 | # (4.0, 0.13) | 1 | 1 | partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32) bucket_ids = array_ops.constant( [[0, 0], [1, 0], [1, 0]], dtype=dtypes.int64) gradients = array_ops.constant([2.4, -0.6, 8.0]) hessians = array_ops.constant([0.4, 0.38, 0.26]) bucket_boundaries = [0.3, 0.52] partitions, gains, splits = ( split_handler_ops.build_dense_inequality_splits( num_minibatches=2, partition_ids=partition_ids, bucket_ids=bucket_ids, gradients=gradients, hessians=hessians, bucket_boundaries=bucket_boundaries, l1_regularization=0.1, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, class_id=-1, feature_column_group_id=0, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS, weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)) partitions, gains, splits = sess.run([partitions, gains, splits]) self.assertAllEqual([0, 1], partitions) # Check the split on partition 0. # -(1.2 - 0.1) / (0.2 + 1) expected_left_weight = -0.91666 # expected_left_weight * -(1.2 - 0.1) expected_left_gain = 1.0083333333333331 # (-0.3 + 0.1) / (0.19 + 1) expected_right_weight = 0.1680672 # expected_right_weight * -(-0.3 + 0.1) expected_right_gain = 0.033613445378151252 # (-0.3 + 1.2 - 0.1) ** 2 / (0.19 + 0.2 + 1) expected_bias_gain = 0.46043165467625885 split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split self.assertAllClose( expected_left_gain + expected_right_gain - expected_bias_gain, gains[0], 0.00001) self.assertAllClose([expected_left_weight], left_child.value, 0.00001) self.assertAllClose([expected_right_weight], right_child.value, 0.00001) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.3, split_node.threshold, 0.00001) # Check the split on partition 1. # (-4 + 0.1) / (0.13 + 1) expected_left_weight = -3.4513274336283186 expected_right_weight = 0 split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split # There's only one active bucket here so zero gain is expected. self.assertAllClose(0.0, gains[1], 0.00001) self.assertAllClose([expected_left_weight], left_child.value, 0.00001) self.assertAllClose([expected_right_weight], right_child.value, 0.00001) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.52, split_node.threshold, 0.00001) def testMakeMulticlassDenseSplit(self): """Tests split handler op.""" with self.test_session() as sess: partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32) bucket_ids = array_ops.constant( [[0, 0], [1, 0], [1, 0]], dtype=dtypes.int64) gradients = array_ops.constant([[2.4, 3.0], [-0.6, 0.1], [8.0, 1.0]]) hessians = array_ops.constant([[[0.4, 1], [1, 1]], [[0.38, 1], [1, 1]], [[0.26, 1], [1, 1]]]) bucket_boundaries = [0.3, 0.52] partitions, gains, splits = ( split_handler_ops.build_dense_inequality_splits( num_minibatches=2, partition_ids=partition_ids, bucket_ids=bucket_ids, gradients=gradients, hessians=hessians, bucket_boundaries=bucket_boundaries, l1_regularization=0, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, class_id=-1, feature_column_group_id=0, multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN, weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)) partitions, gains, splits = sess.run([partitions, gains, splits]) self.assertAllEqual([0, 1], partitions) split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split # Each leaf has 2 element vector. self.assertEqual(2, len(left_child.value)) self.assertEqual(2, len(right_child.value)) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.3, split_node.threshold, 1e-6) def testMakeDenseSplitEmptyInputs(self): """Tests empty inputs op.""" with self.test_session() as sess: partition_ids = array_ops.constant([], dtype=dtypes.int32) bucket_ids = array_ops.constant([[]], dtype=dtypes.int64) gradients = array_ops.constant([]) hessians = array_ops.constant([]) bucket_boundaries = [0.3, 0.52] partitions, gains, splits = ( split_handler_ops.build_dense_inequality_splits( num_minibatches=0, partition_ids=partition_ids, bucket_ids=bucket_ids, gradients=gradients, hessians=hessians, bucket_boundaries=bucket_boundaries, l1_regularization=0.1, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, class_id=-1, feature_column_group_id=0, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS, weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)) partitions, gains, splits = sess.run([partitions, gains, splits]) # .assertEmpty doesn't exist on ubuntu-contrib self.assertEqual(0, len(partitions)) self.assertEqual(0, len(gains)) self.assertEqual(0, len(splits)) def testMakeSparseSplit(self): """Tests split handler op.""" with self.test_session() as sess: # The data looks like the following after dividing by number of steps (2). # Gradients | Partition | bucket ID | # (0.9, 0.39) | 0 | -1 | # (1.2, 0.2) | 0 | 0 | # (0.2, 0.12) | 0 | 1 | # (4.0, 0.13) | 1 | -1 | # (4.0, 0.13) | 1 | 1 | partition_ids = array_ops.constant([0, 0, 0, 1, 1], dtype=dtypes.int32) # We have only 1 dimension in our sparse feature column. bucket_ids = array_ops.constant([-1, 0, 1, -1, 1], dtype=dtypes.int64) dimension_ids = array_ops.constant([0, 0, 0, 0, 0], dtype=dtypes.int64) bucket_ids = array_ops.stack([bucket_ids, dimension_ids], axis=1) gradients = array_ops.constant([1.8, 2.4, 0.4, 8.0, 8.0]) hessians = array_ops.constant([0.78, 0.4, 0.24, 0.26, 0.26]) bucket_boundaries = array_ops.constant([0.3, 0.52]) partitions, gains, splits = ( split_handler_ops.build_sparse_inequality_splits( num_minibatches=2, partition_ids=partition_ids, bucket_ids=bucket_ids, gradients=gradients, hessians=hessians, bucket_boundaries=bucket_boundaries, l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, feature_column_group_id=0, bias_feature_id=-1, class_id=-1, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)) partitions, gains, splits = (sess.run([partitions, gains, splits])) self.assertAllEqual([0, 1], partitions) self.assertEqual(2, len(splits)) # Check the split on partition 0. # -(0.2 + 1.2) / (0.12 + 0.2 + 2) expected_left_weight = -0.603448275862069 # (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2) expected_left_gain = 0.8448275862068965 # 0.5 / (0.07 + 2) expected_right_weight = 0.24154589371980678 # 0.5 ** 2 / (0.07 + 2) expected_right_gain = 0.12077294685990339 # (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2) expected_bias_gain = 0.3389121338912133 split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_right self.assertAllClose( expected_left_gain + expected_right_gain - expected_bias_gain, gains[0]) self.assertAllClose([expected_left_weight], left_child.value) self.assertAllClose([expected_right_weight], right_child.value) self.assertEqual(0, split_node.split.feature_column) # Sparse is one dimensional. self.assertEqual(0, split_node.split.dimension_id) self.assertAllClose(0.52, split_node.split.threshold) # Check the split on partition 1. expected_left_weight = -1.8779342723004695 expected_right_weight = 0 # Verify candidate for partition 1, there's only one active bucket here # so zero gain is expected. split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_left self.assertAllClose(0.0, gains[1]) self.assertAllClose([expected_left_weight], left_child.value) self.assertAllClose([expected_right_weight], right_child.value) self.assertEqual(0, split_node.split.feature_column) # Sparse is one dimensional. self.assertEqual(0, split_node.split.dimension_id) self.assertAllClose(0.52, split_node.split.threshold) def testMakeSparseSplitAllEmptyDimensions(self): """Tests split handler op when all dimensions have only bias bucket id.""" with self.test_session() as sess: # The data looks like the following after dividing by number of steps (2). # Gradients | Partition | Dimension | bucket ID | # (0.9, 0.39) | 0 | 0 | -1 | # (4.0, 0.13) | 1 | 0 | -1 | partition_ids = array_ops.constant([0, 1], dtype=dtypes.int32) # We have only 1 dimension in our sparse feature column. bucket_ids = array_ops.constant([[-1, 0], [-1, 0]], dtype=dtypes.int64) gradients = array_ops.constant([1.8, 8.0]) hessians = array_ops.constant([0.78, 0.26]) bucket_boundaries = array_ops.constant([0.3, 0.52]) partitions, gains, splits = ( split_handler_ops.build_sparse_inequality_splits( num_minibatches=2, partition_ids=partition_ids, bucket_ids=bucket_ids, gradients=gradients, hessians=hessians, bucket_boundaries=bucket_boundaries, l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, feature_column_group_id=0, bias_feature_id=-1, class_id=-1, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)) partitions, gains, splits = (sess.run([partitions, gains, splits])) self.assertEqual(0, len(partitions)) self.assertEqual(0, len(splits)) def testMakeSparseMultidimensionalSplit(self): """Tests split handler op.""" with self.test_session() as sess: # Num of steps is 2. # The feature column is three dimensional. # First dimension has bias bucket only, the second has bias bucket and # two valid buckets, the third has just one bias bucket and one valid # bucket. # Gradients | Partition | Dimension | bucket ID | # (0.9, 0.39) | 0 | 0 | -1 | # (1.2, 0.2) | 0 | 1 | 0 | # (0.2, 0.12) | 0 | 1 | 2 | # (0.1, 0.1) | 0 | 2 | 3 | # Now second node - nothing interesting there, just one dimension. # Second node has the same bucket ids for all dimensions. # (4.0, 0.13) | 1 | 0 | -1 | # (4.0, 0.13) | 1 | 2 | 3 | # Tree node ids. partition_ids = array_ops.constant([0, 0, 0, 0, 1, 1], dtype=dtypes.int32) dimension_ids = array_ops.constant([0, 1, 1, 2, 0, 2], dtype=dtypes.int64) bucket_ids = array_ops.constant([-1, 0, 2, 3, -1, 3], dtype=dtypes.int64) bucket_ids = array_ops.stack([bucket_ids, dimension_ids], axis=1) gradients = array_ops.constant([1.8, 2.4, 0.4, 0.2, 8.0, 8.0]) hessians = array_ops.constant([0.78, 0.4, 0.24, 0.2, 0.26, 0.26]) bucket_boundaries = array_ops.constant([0.3, 0.52, 0.58, 0.6]) partitions, gains, splits = ( split_handler_ops.build_sparse_inequality_splits( num_minibatches=2, partition_ids=partition_ids, bucket_ids=bucket_ids, gradients=gradients, hessians=hessians, bucket_boundaries=bucket_boundaries, l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, feature_column_group_id=0, bias_feature_id=-1, class_id=-1, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)) partitions, gains, splits = (sess.run([partitions, gains, splits])) self.assertAllEqual([0, 1], partitions) self.assertEqual(2, len(splits)) # Check the split on node 0 - it should split on second dimension # -(0.2 + 1.2) / (0.12 + 0.2 + 2) expected_left_weight = -0.603448275862069 # (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2) expected_left_gain = 0.8448275862068965 # 0.5 / (0.07 + 2) expected_right_weight = 0.24154589371980678 # 0.5 ** 2 / (0.07 + 2) expected_right_gain = 0.12077294685990339 # (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2) expected_bias_gain = 0.3389121338912133 split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_right self.assertAllClose( expected_left_gain + expected_right_gain - expected_bias_gain, gains[0]) self.assertAllClose([expected_left_weight], left_child.value) self.assertAllClose([expected_right_weight], right_child.value) self.assertEqual(0, split_node.split.feature_column) # Split happened on second dimension. self.assertEqual(1, split_node.split.dimension_id) self.assertAllClose(0.58, split_node.split.threshold) # Check the split on partition 1. expected_left_weight = -1.8779342723004695 expected_right_weight = 0 # Verify candidate for partition 1, there's only one active bucket here # so zero gain is expected. split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_left self.assertAllClose(0.0, gains[1]) self.assertAllClose([expected_left_weight], left_child.value) self.assertAllClose([expected_right_weight], right_child.value) self.assertEqual(0, split_node.split.feature_column) self.assertEqual(2, split_node.split.dimension_id) self.assertAllClose(0.6, split_node.split.threshold) def testMakeSparseSplitDefaultDirectionIsStable(self): """Tests default direction is stable when no sparsity.""" random.seed(1123) for _ in range(50): with self.test_session() as sess: grad = random.random() hessian = random.random() # The data looks like the following (divide by the num of steps 2). # Gradients | Partition | bucket ID | # (grad, hessian) | 0 | -1 | # And then 100 buckets of # (grad/100, hessian/100), so there is no sparsity. n_buckets = 100 # 1 for the overall sum, and 100 buckets. partition_ids = array_ops.constant( [0] * (n_buckets + 1), dtype=dtypes.int32) # We have only 1 dimension in our sparse feature column. bucket_ids = [-1] + [n for n in range(100)] bucket_ids = array_ops.constant(bucket_ids, dtype=dtypes.int64) dimension_ids = array_ops.constant( [0] * (n_buckets + 1), dtype=dtypes.int64) bucket_ids = array_ops.stack([bucket_ids, dimension_ids], axis=1) gradients = [grad] + [grad / n_buckets] * n_buckets gradients = array_ops.constant(gradients) hessians = [hessian] + [hessian / n_buckets] * n_buckets hessians = array_ops.constant(hessians) boundaries = [x * 1 for x in range(n_buckets + 1)] bucket_boundaries = array_ops.constant(boundaries, dtype=dtypes.float32) partitions, gains, splits = ( split_handler_ops.build_sparse_inequality_splits( num_minibatches=2, partition_ids=partition_ids, bucket_ids=bucket_ids, gradients=gradients, hessians=hessians, bucket_boundaries=bucket_boundaries, l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, feature_column_group_id=0, bias_feature_id=-1, class_id=-1, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)) partitions, gains, splits = (sess.run([partitions, gains, splits])) self.assertAllEqual([0], partitions) self.assertEqual(1, len(splits)) split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) self.assertTrue( split_info.split_node.HasField( 'sparse_float_binary_split_default_left')) def testMakeMulticlassSparseSplit(self): """Tests split handler op.""" with self.test_session() as sess: partition_ids = array_ops.constant([0, 0, 0, 1, 1], dtype=dtypes.int32) bucket_ids = array_ops.constant( [[-1, 0], [0, 0], [1, 0], [-1, 0], [1, 0]], dtype=dtypes.int64) gradients = array_ops.constant([[1.8, 3.5], [2.4, 1.0], [0.4, 4.0], [8.0, 3.1], [8.0, 0.8]]) hessian_0 = [[0.78, 1], [12, 1]] hessian_1 = [[0.4, 1], [1, 1]] hessian_2 = [[0.24, 1], [1, 1]] hessian_3 = [[0.26, 1], [1, 1]] hessian_4 = [[0.26, 1], [1, 1]] hessians = array_ops.constant( [hessian_0, hessian_1, hessian_2, hessian_3, hessian_4]) bucket_boundaries = array_ops.constant([0.3, 0.52]) partitions, gains, splits = ( split_handler_ops.build_sparse_inequality_splits( num_minibatches=2, partition_ids=partition_ids, bucket_ids=bucket_ids, gradients=gradients, hessians=hessians, bucket_boundaries=bucket_boundaries, l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, feature_column_group_id=0, bias_feature_id=-1, class_id=-1, multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)) partitions, gains, splits = (sess.run([partitions, gains, splits])) split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_right # Each leaf has 2 element vector. self.assertEqual(2, len(left_child.value)) self.assertEqual(2, len(right_child.value)) self.assertEqual(0, split_node.split.feature_column) self.assertAllClose(0.52, split_node.split.threshold) def testMakeCategoricalEqualitySplit(self): """Tests split handler op for categorical equality split.""" with self.test_session() as sess: # The data looks like the following after dividing by number of steps (2). # Gradients | Partition | Feature ID | # (0.9, 0.39) | 0 | -1 | # (0.2, 0.12) | 0 | 1 | # (1.4, 0.32) | 0 | 2 | # (4.0, 0.13) | 1 | -1 | # (4.0, 0.13) | 1 | 1 | gradients = [1.8, 0.4, 2.8, 8.0, 8.0] hessians = [0.78, 0.24, 0.64, 0.26, 0.26] partition_ids = [0, 0, 0, 1, 1] feature_ids = array_ops.constant( [[-1, 0], [1, 0], [2, 0], [-1, 0], [1, 0]], dtype=dtypes.int64) partitions, gains, splits = ( split_handler_ops.build_categorical_equality_splits( num_minibatches=2, partition_ids=partition_ids, feature_ids=feature_ids, gradients=gradients, hessians=hessians, l1_regularization=0.1, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, feature_column_group_id=0, bias_feature_id=-1, class_id=-1, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)) partitions, gains, splits = sess.run([partitions, gains, splits]) self.assertAllEqual([0, 1], partitions) # Check the split on partition 0. # -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1) expected_left_weight = -0.9848484848484846 # (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1) expected_left_gain = 1.2803030303030298 # -(-0.5 + 0.1) / (0.07 + 1) expected_right_weight = 0.37383177570093457 # (-0.5 + 0.1) ** 2 / (0.07 + 1) expected_right_gain = 0.14953271028037385 # (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1) expected_bias_gain = 0.46043165467625885 split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.categorical_id_binary_split self.assertEqual(0, split_node.feature_column) self.assertEqual(2, split_node.feature_id) self.assertAllClose( expected_left_gain + expected_right_gain - expected_bias_gain, gains[0], 0.00001) self.assertAllClose([expected_left_weight], left_child.value, 0.00001) self.assertAllClose([expected_right_weight], right_child.value, 0.00001) # Check the split on partition 1. # (-4 + 0.1) / (0.13 + 1) expected_left_weight = -3.4513274336283186 # (-4 + 0.1) ** 2 / (0.13 + 1) expected_left_gain = 13.460176991150442 expected_right_weight = 0 expected_right_gain = 0 # (-4 + 0.1) ** 2 / (0.13 + 1) expected_bias_gain = 13.460176991150442 # Verify candidate for partition 1, there's only one active feature here # so zero gain is expected. split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.categorical_id_binary_split self.assertAllClose(0.0, gains[1], 0.00001) self.assertAllClose([expected_left_weight], left_child.value, 0.00001) self.assertAllClose([expected_right_weight], right_child.value, 0.00001) self.assertEqual(0, split_node.feature_column) self.assertEqual(1, split_node.feature_id) def testMakeMulticlassCategoricalEqualitySplit(self): """Tests split handler op for categorical equality split in multiclass.""" with self.test_session() as sess: gradients = array_ops.constant([[1.8, 3.5], [2.4, 1.0], [0.4, 4.0], [9.0, 3.1], [3.0, 0.8]]) hessian_0 = [[0.78, 1], [12, 1]] hessian_1 = [[0.4, 1], [1, 1]] hessian_2 = [[0.24, 1], [1, 1]] hessian_3 = [[0.16, 2], [-1, 1]] hessian_4 = [[0.6, 1], [2, 1]] hessians = array_ops.constant( [hessian_0, hessian_1, hessian_2, hessian_3, hessian_4]) partition_ids = [0, 0, 0, 1, 1] feature_ids = array_ops.constant( [[-1, 0], [1, 0], [2, 0], [-1, 0], [1, 0]], dtype=dtypes.int64) partitions, gains, splits = ( split_handler_ops.build_categorical_equality_splits( num_minibatches=2, partition_ids=partition_ids, feature_ids=feature_ids, gradients=gradients, hessians=hessians, l1_regularization=0.1, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, feature_column_group_id=0, bias_feature_id=-1, class_id=-1, multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)) partitions, gains, splits = sess.run([partitions, gains, splits]) self.assertAllEqual([0, 1], partitions) split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.categorical_id_binary_split # Each leaf has 2 element vector. self.assertEqual(2, len(left_child.value)) self.assertEqual(2, len(right_child.value)) self.assertEqual(0, split_node.feature_column) self.assertEqual(1, split_node.feature_id) def testMakeCategoricalEqualitySplitEmptyInput(self): with self.test_session() as sess: gradients = [] hessians = [] partition_ids = [] feature_ids = [[]] partitions, gains, splits = ( split_handler_ops.build_categorical_equality_splits( num_minibatches=0, partition_ids=partition_ids, feature_ids=feature_ids, gradients=gradients, hessians=hessians, l1_regularization=0.1, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, feature_column_group_id=0, bias_feature_id=-1, class_id=-1, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)) partitions, gains, splits = (sess.run([partitions, gains, splits])) self.assertEqual(0, len(partitions)) self.assertEqual(0, len(gains)) self.assertEqual(0, len(splits)) if __name__ == "__main__": googletest.main()
{ "content_hash": "3b18d439e2fa5e3607eea56d9bdc309a", "timestamp": "", "source": "github", "line_count": 671, "max_line_length": 80, "avg_line_length": 41.83904619970194, "alnum_prop": 0.5997720310607679, "repo_name": "ZhangXinNan/tensorflow", "id": "2589504762787deaf598777650b8372320824c22", "size": "28763", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow/contrib/boosted_trees/python/kernel_tests/split_handler_ops_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "1286" }, { "name": "Batchfile", "bytes": "9258" }, { "name": "C", "bytes": "327005" }, { "name": "C#", "bytes": "8215" }, { "name": "C++", "bytes": "46648068" }, { "name": "CMake", "bytes": "206720" }, { "name": "Dockerfile", "bytes": "6978" }, { "name": "Go", "bytes": "1210133" }, { "name": "HTML", "bytes": "4681865" }, { "name": "Java", "bytes": "830576" }, { "name": "Jupyter Notebook", "bytes": "2632421" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "51309" }, { "name": "Objective-C", "bytes": "15650" }, { "name": "Objective-C++", "bytes": "99243" }, { "name": "PHP", "bytes": "1357" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "40046802" }, { "name": "Ruby", "bytes": "553" }, { "name": "Shell", "bytes": "455624" }, { "name": "Smarty", "bytes": "6976" } ], "symlink_target": "" }
class ImageFinder: ''' Inteface / Abstract Class concept for readability.''' def find(self, image): # explicitly set it up so this can't be called directly raise NotImplementedError('Exception raised, ImageFinder is supposed to be an interface / abstract class!') class ImageFinderFlickr(ImageFinder): ''' Locates images in flickr''' def find(self, image): # in reality, query Flickr API for image path return "Found image in Flickr: " + image class ImageFinderDatabase(ImageFinder): ''' Locates images in database. ''' def find(self, image): #in reality, query database for image path return "Found image in database: " + image if __name__ == "__main__" : finderBase = ImageFinder() finderFlickr = ImageFinderFlickr() finderDatabase = ImageFinderDatabase() try: #this is going to blow up! print finderBase.find('chickens') except NotImplementedError as e: print "The following exception was expected:" print e print finderFlickr.find('chickens') print finderFlickr.find('rabbits') print finderDatabase.find('dogs') print finderDatabase.find('cats')
{ "content_hash": "e0538ede898910226c46fbcdad518fb7", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 115, "avg_line_length": 28.53488372093023, "alnum_prop": 0.6536267318663407, "repo_name": "lmokto/py-designpattern", "id": "89e00f2932823cd5f6f4596e98a1a577bd11bab2", "size": "1829", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "strategy/strategy_ex1.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "32464" } ], "symlink_target": "" }
from __future__ import absolute_import from json import loads from ...core.json_encoder import serialize_json from ...document.util import references_json from ..message import Message from . import register @register class patch_doc_1(Message): ''' Define the ``PATCH-DOC`` message (revision 1) for sending Document patch events between remote documents. The ``content`` fragment of for this message is has the form: .. code-block:: python { 'events' : <protocol document events> 'references' : <model references> } ''' msgtype = 'PATCH-DOC' revision = 1 def __init__(self, header, metadata, content): super(patch_doc_1, self).__init__(header, metadata, content) @classmethod def create(cls, events, use_buffers=True, **metadata): ''' Create a ``PATCH-DOC`` message Args: events (list) : A list of patch events to apply to a document Any additional keyword arguments will be put into the message ``metadata`` fragment as-is. ''' header = cls.create_header() if not events: raise ValueError("PATCH-DOC message requires at least one event") docs = { event.document for event in events } if len(docs) != 1: raise ValueError("PATCH-DOC message configured with events for more than one document") # this roundtrip is fortunate, but is needed because there are type conversions # in BokehJSONEncoder which keep us from easily generating non-string JSON patch_json, buffers = process_document_events(events, use_buffers) content = loads(patch_json) msg = cls(header, metadata, content) for (header, payload) in buffers: msg.add_buffer(header, payload) return msg def apply_to_document(self, doc, setter=None): ''' ''' doc.apply_json_patch(self.content, setter) def process_document_events(events, use_buffers=True): ''' Create a JSON string describing a patch to be applied as well as any optional buffers. Args: events : list of events to be translated into patches Returns: str, list : JSON string which can be applied to make the given updates to obj as well as any optional buffers ''' json_events = [] references = set() buffers = [] if use_buffers else None for event in events: json_events.append(event.generate(references, buffers)) json = { 'events' : json_events, 'references' : references_json(references), } return serialize_json(json), buffers if use_buffers else []
{ "content_hash": "75513499e585e2e546a35363d0f8b833", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 99, "avg_line_length": 27.76530612244898, "alnum_prop": 0.6277104005880191, "repo_name": "rs2/bokeh", "id": "ec65c7f89be671739b9af27c8f7813008c5970e4", "size": "2721", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "bokeh/protocol/messages/patch_doc.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "1442" }, { "name": "CSS", "bytes": "101858" }, { "name": "CoffeeScript", "bytes": "1220192" }, { "name": "HTML", "bytes": "48230" }, { "name": "JavaScript", "bytes": "57773" }, { "name": "Makefile", "bytes": "1150" }, { "name": "Python", "bytes": "2648330" }, { "name": "Shell", "bytes": "8519" }, { "name": "TypeScript", "bytes": "236495" } ], "symlink_target": "" }
from __future__ import division, print_function, absolute_import import amitgroup as ag import numpy as np from pnet.layer import Layer @Layer.register('intensity-threshold-layer') class IntensityThresholdLayer(Layer): def __init__(self, threshold=0.5): self._threshold = threshold def _extract(self, phi, data): X = phi(data) return (X > 0.5).astype(np.uint8)[...,np.newaxis] def save_to_dict(self): return dict(threshold=self._threshold) @classmethod def load_from_dict(cls, d): return cls(threshold=d['threshold'])
{ "content_hash": "93c302a5f41920ed7e31423713cc770f", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 65, "avg_line_length": 26.727272727272727, "alnum_prop": 0.6615646258503401, "repo_name": "amitgroup/parts-net", "id": "e6ec518ffdf06f9717aa6741b0af0ad3c784f95e", "size": "588", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pnet/intensity_threshold_layer.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "533509" } ], "symlink_target": "" }
"""Modify Group Entry Message.""" from enum import IntEnum from pyof.foundation.base import GenericMessage from pyof.foundation.basic_types import ( FixedTypeList, Pad, UBInt8, UBInt16, UBInt32) from pyof.v0x04.common.header import Header, Type from pyof.v0x04.controller2switch.common import Bucket __all__ = ('GroupMod', 'GroupModCommand', 'GroupType', 'Group', 'ListOfBuckets') class Group(IntEnum): """Group numbering. Groups can use any number up to attr:`OFPG_MAX`.""" #: Last usable group number. OFPG_MAX = 0xffffff00 #: Fake groups. #: Represents all groups for group delete commands. OFPG_ALL = 0xfffffffc #: Wildcard group used only for flow stats requests. # Select all flows regardless of group (including flows with no group). OFPG_ANY = 0xffffffff class GroupModCommand(IntEnum): """Group commands.""" #: New group. OFPGC_ADD = 0 #: Modify all matching groups. OFPGC_MODIFY = 1 #: Delete all matching groups. OFPGC_DELETE = 2 class GroupType(IntEnum): """Group types. Range [128, 255] is reserved for experimental use.""" #: All (multicast/broadcast) group. OFPGT_ALL = 0 #: Select group. OFPGT_SELECT = 1 #: Indirect group. OFPGT_INDIRECT = 2 #: Fast failover group. OFPGT_FF = 3 class ListOfBuckets(FixedTypeList): """List of buckets. Represented by instances of Bucket. """ def __init__(self, items=None): """Create a ListOfBuckets with the optional parameters below. Args: items (Bucket): Instance or a list of instances. """ super().__init__(pyof_class=Bucket, items=items) class GroupMod(GenericMessage): """Group setup and teardown (controller -> datapath).""" header = Header(message_type=Type.OFPT_GROUP_MOD) command = UBInt16(enum_ref=GroupModCommand) group_type = UBInt8() #: Pad to 64 bits. pad = Pad(1) group_id = UBInt32() buckets = ListOfBuckets() def __init__(self, xid=None, command=None, group_type=None, group_id=None, buckets=None): """Create a GroupMod with the optional parameters below. Args: xid (int): Header's transaction id. Defaults to random. command (GroupModCommand): One of OFPGC_*. group_type (GroupType): One of OFPGT_*. group_id (int): Group identifier. buckets (:class:`ListOfBuckets`): The length of the bucket array is inferred from the length field in the header. """ super().__init__(xid) self.command = command self.group_type = group_type self.group_id = group_id self.buckets = buckets
{ "content_hash": "c9b8c8f60d313e7a7f8979575dfdc0e8", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 78, "avg_line_length": 29.397849462365592, "alnum_prop": 0.6349670811997074, "repo_name": "cemsbr/python-openflow", "id": "17771251d020b07aea0177b11eb23e68d818ca95", "size": "2734", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyof/v0x04/controller2switch/group_mod.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "481730" } ], "symlink_target": "" }
from version import __VERSION__, VERSION from _jep import * from hook import * class JavaException(Exception): def __init__(self, er): # error is "classname: message" clazz, message = er.split(':', 1) super(JavaException, self).__init__(message) # the java class name of exception self.java_name = clazz
{ "content_hash": "35f9f9fbe6403f6d009fa6cc4b0b4629", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 52, "avg_line_length": 31.727272727272727, "alnum_prop": 0.6246418338108882, "repo_name": "hydrosolutions/model_RRMDA_Themi", "id": "30a53d5aedbb0efbc0ef87bb188d2968850522a9", "size": "349", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "java/resources/jep-3.1.0/jep/__init__.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "32964" }, { "name": "C", "bytes": "1010051" }, { "name": "C++", "bytes": "132758" }, { "name": "CSS", "bytes": "891" }, { "name": "FORTRAN", "bytes": "41162" }, { "name": "Groff", "bytes": "1047274" }, { "name": "HTML", "bytes": "3775235" }, { "name": "Java", "bytes": "348787" }, { "name": "Makefile", "bytes": "17582" }, { "name": "Matlab", "bytes": "23388" }, { "name": "Pascal", "bytes": "48437" }, { "name": "Perl", "bytes": "24353" }, { "name": "Python", "bytes": "13888" }, { "name": "Scilab", "bytes": "4470" }, { "name": "Shell", "bytes": "153711" } ], "symlink_target": "" }
import pexpect import sys from getpass import getpass import time import re def main(): DeviceIp = '184.105.247.71' User = 'pyclass' Port = 22 Pass = getpass() SSHconn = pexpect.spawn('ssh -l {} {} -p {}'.format(User, DeviceIp, Port)) SSHconn.timeout = 5 SSHconn.expect('assword:') SSHconn.sendline(Pass) SSHconn.expect('#') print SSHconn.before,SSHconn.after try: SSHconn.sendline('show ip int br') SSHconn.expect('#') print SSHconn.before,SSHconn.after time.sleep(1) except pexpect.TIMEOUT: print "No results were found for your given request!\n" if __name__ == "__main__": main()
{ "content_hash": "090f1652f058c08939526de8e4010932", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 78, "avg_line_length": 22.8, "alnum_prop": 0.6140350877192983, "repo_name": "brutalic/pynet_brutal", "id": "93905e1c7d87b4e935389e1fbe427a9561c483a2", "size": "707", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "class4/C4E3Pexpect.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "144403" } ], "symlink_target": "" }
"""Tests for Renault selects.""" from unittest.mock import patch import pytest from renault_api.kamereon import schemas from homeassistant.components.select import ( ATTR_OPTION, DOMAIN as SELECT_DOMAIN, SERVICE_SELECT_OPTION, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ATTR_ENTITY_ID, STATE_UNKNOWN, Platform from homeassistant.core import HomeAssistant from . import ( check_device_registry, check_entities, check_entities_no_data, check_entities_unavailable, ) from .const import MOCK_VEHICLES from tests.common import load_fixture, mock_device_registry, mock_registry pytestmark = pytest.mark.usefixtures("patch_renault_account", "patch_get_vehicles") @pytest.fixture(autouse=True) def override_platforms(): """Override PLATFORMS.""" with patch("homeassistant.components.renault.PLATFORMS", [Platform.SELECT]): yield @pytest.mark.usefixtures("fixtures_with_data") async def test_selects( hass: HomeAssistant, config_entry: ConfigEntry, vehicle_type: str ): """Test for Renault selects.""" entity_registry = mock_registry(hass) device_registry = mock_device_registry(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() mock_vehicle = MOCK_VEHICLES[vehicle_type] check_device_registry(device_registry, mock_vehicle["expected_device"]) expected_entities = mock_vehicle[Platform.SELECT] assert len(entity_registry.entities) == len(expected_entities) check_entities(hass, entity_registry, expected_entities) @pytest.mark.usefixtures("fixtures_with_no_data") async def test_select_empty( hass: HomeAssistant, config_entry: ConfigEntry, vehicle_type: str ): """Test for Renault selects with empty data from Renault.""" entity_registry = mock_registry(hass) device_registry = mock_device_registry(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() mock_vehicle = MOCK_VEHICLES[vehicle_type] check_device_registry(device_registry, mock_vehicle["expected_device"]) expected_entities = mock_vehicle[Platform.SELECT] assert len(entity_registry.entities) == len(expected_entities) check_entities_no_data(hass, entity_registry, expected_entities, STATE_UNKNOWN) @pytest.mark.usefixtures("fixtures_with_invalid_upstream_exception") async def test_select_errors( hass: HomeAssistant, config_entry: ConfigEntry, vehicle_type: str ): """Test for Renault selects with temporary failure.""" entity_registry = mock_registry(hass) device_registry = mock_device_registry(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() mock_vehicle = MOCK_VEHICLES[vehicle_type] check_device_registry(device_registry, mock_vehicle["expected_device"]) expected_entities = mock_vehicle[Platform.SELECT] assert len(entity_registry.entities) == len(expected_entities) check_entities_unavailable(hass, entity_registry, expected_entities) @pytest.mark.usefixtures("fixtures_with_access_denied_exception") @pytest.mark.parametrize("vehicle_type", ["zoe_40"], indirect=True) async def test_select_access_denied( hass: HomeAssistant, config_entry: ConfigEntry, vehicle_type: str ): """Test for Renault selects with access denied failure.""" entity_registry = mock_registry(hass) device_registry = mock_device_registry(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() mock_vehicle = MOCK_VEHICLES[vehicle_type] check_device_registry(device_registry, mock_vehicle["expected_device"]) assert len(entity_registry.entities) == 0 @pytest.mark.usefixtures("fixtures_with_not_supported_exception") @pytest.mark.parametrize("vehicle_type", ["zoe_40"], indirect=True) async def test_select_not_supported( hass: HomeAssistant, config_entry: ConfigEntry, vehicle_type: str ): """Test for Renault selects with access denied failure.""" entity_registry = mock_registry(hass) device_registry = mock_device_registry(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() mock_vehicle = MOCK_VEHICLES[vehicle_type] check_device_registry(device_registry, mock_vehicle["expected_device"]) assert len(entity_registry.entities) == 0 @pytest.mark.usefixtures("fixtures_with_data") @pytest.mark.parametrize("vehicle_type", ["zoe_40"], indirect=True) async def test_select_charge_mode(hass: HomeAssistant, config_entry: ConfigEntry): """Test that service invokes renault_api with correct data.""" await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() data = { ATTR_ENTITY_ID: "select.reg_number_charge_mode", ATTR_OPTION: "always", } with patch( "renault_api.renault_vehicle.RenaultVehicle.set_charge_mode", return_value=( schemas.KamereonVehicleHvacStartActionDataSchema.loads( load_fixture("renault/action.set_charge_mode.json") ) ), ) as mock_action: await hass.services.async_call( SELECT_DOMAIN, SERVICE_SELECT_OPTION, service_data=data, blocking=True ) assert len(mock_action.mock_calls) == 1 assert mock_action.mock_calls[0][1] == ("always",)
{ "content_hash": "19a953de2e8bc698557c4daa171af32b", "timestamp": "", "source": "github", "line_count": 155, "max_line_length": 83, "avg_line_length": 35.174193548387095, "alnum_prop": 0.7267057960381511, "repo_name": "nkgilley/home-assistant", "id": "8ab9f116dbab5c01efadc2ca46bb8e5e8f34981a", "size": "5452", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "tests/components/renault/test_select.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "51597279" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
from .visitor import unwrap def cron(spec=None, **kwargs): """Decorator to mark a task as being on a cron schedule.""" if not (spec or kwargs): raise ValueError('You must pass either a cron spec or keyword arguments') def decorator(fn): original = unwrap(fn) original.rundeck_cron = spec or kwargs return fn return decorator def hourly(fn): return cron('0 * * * *')(fn) def daily(fn): return cron('0 0 * * *')(fn) def monthly(fn): return cron('0 0 0 * *')(fn)
{ "content_hash": "98e76b1e4e1c140bd6835aa7c16495b1", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 81, "avg_line_length": 22.91304347826087, "alnum_prop": 0.6091081593927894, "repo_name": "coderanger/fabric-rundeck", "id": "80d83a2b05ecdd2265f9576cea41579980245bf3", "size": "1159", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fabric_rundeck/cron.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "14972" } ], "symlink_target": "" }
""" DataObject is just a dictionary with only a name attribute and rooms for other hooks. $Id: DataObject.py 11 2005-12-24 09:13:33Z duan $ """ __version__='$Revision: 11 $'[11:-2] __author__ = "Duan Guoqiang (mattgduan@gmail.com)" import util class DataObject(dict): """ DataObject is_a dict. """ def __init__(self): dict.__init__(self) self.__name_vars = {'name':''} self.__path_vars = {'path':''} def getNameVars(self): return self.__name_vars def setNameVars(self, vars): assert type(vars) == type({}) self.__name_vars = vars name_vars = property(getNameVars, setNameVars) def getPathVars(self): return self.__path_vars def setPathVars(self, vars): assert type(vars) == type({}) self.__path_vars = vars path_vars = property(getPathVars, setPathVars) def __setitem__(self, key, value): """ All items in a DataObject are lists. """ if type(value) in (type([]), type(())): dict.__setitem__(self, key, value) elif dict.has_key(self, key): v = dict.get(self, key, []) if v: v.append(value) else: dict.__setitem__(self, key, [value]) else: dict.__setitem__(self, key, [value]) def DataSets(self, padding): """ A set of data dict filtered by specified padding. @return: A list of dicts. """ length = 0 data_sets = [] for k in self.keys(): if k.startswith(padding) and \ k[len(padding):].find(".")==-1: key = k.replace(padding, "") v_list = self[k] if not length: length = len(self[k]) if length != len(self[k]): self.debug("Wrong size in DataObject for padding " "'%s' when creating DataSet"%(padding), util.DEBUG_ERROR) return None for i in range(length): if len(data_sets) != length: data_sets.append({}) data_sets[i][key] = v_list[i] return data_sets debug = util.debug
{ "content_hash": "3b363fefeb8f140bd6ae253e1a70636d", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 70, "avg_line_length": 28.071428571428573, "alnum_prop": 0.4770992366412214, "repo_name": "mattduan/codegen", "id": "69134e747d17ef64eb3f9e051e3340d5c610b4de", "size": "2564", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "DataObject.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "52621" } ], "symlink_target": "" }
""" Test extension array for storing nested data in a pandas container. The ListArray stores an ndarray of lists. """ from __future__ import annotations import numbers import random import string import numpy as np from pandas._typing import type_t from pandas.core.dtypes.base import ExtensionDtype import pandas as pd from pandas.api.types import ( is_object_dtype, is_string_dtype, ) from pandas.core.arrays import ExtensionArray class ListDtype(ExtensionDtype): type = list name = "list" na_value = np.nan @classmethod def construct_array_type(cls) -> type_t[ListArray]: """ Return the array type associated with this dtype. Returns ------- type """ return ListArray class ListArray(ExtensionArray): dtype = ListDtype() __array_priority__ = 1000 def __init__(self, values, dtype=None, copy=False) -> None: if not isinstance(values, np.ndarray): raise TypeError("Need to pass a numpy array as values") for val in values: if not isinstance(val, self.dtype.type) and not pd.isna(val): raise TypeError("All values must be of type " + str(self.dtype.type)) self.data = values @classmethod def _from_sequence(cls, scalars, dtype=None, copy=False): data = np.empty(len(scalars), dtype=object) data[:] = scalars return cls(data) def __getitem__(self, item): if isinstance(item, numbers.Integral): return self.data[item] else: # slice, list-like, mask return type(self)(self.data[item]) def __len__(self) -> int: return len(self.data) def isna(self): return np.array( [not isinstance(x, list) and np.isnan(x) for x in self.data], dtype=bool ) def take(self, indexer, allow_fill=False, fill_value=None): # re-implement here, since NumPy has trouble setting # sized objects like UserDicts into scalar slots of # an ndarary. indexer = np.asarray(indexer) msg = ( "Index is out of bounds or cannot do a " "non-empty take from an empty array." ) if allow_fill: if fill_value is None: fill_value = self.dtype.na_value # bounds check if (indexer < -1).any(): raise ValueError try: output = [ self.data[loc] if loc != -1 else fill_value for loc in indexer ] except IndexError as err: raise IndexError(msg) from err else: try: output = [self.data[loc] for loc in indexer] except IndexError as err: raise IndexError(msg) from err return self._from_sequence(output) def copy(self): return type(self)(self.data[:]) def astype(self, dtype, copy=True): if isinstance(dtype, type(self.dtype)) and dtype == self.dtype: if copy: return self.copy() return self elif is_string_dtype(dtype) and not is_object_dtype(dtype): # numpy has problems with astype(str) for nested elements return np.array([str(x) for x in self.data], dtype=dtype) return np.array(self.data, dtype=dtype, copy=copy) @classmethod def _concat_same_type(cls, to_concat): data = np.concatenate([x.data for x in to_concat]) return cls(data) def make_data(): # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer data = np.empty(100, dtype=object) data[:] = [ [random.choice(string.ascii_letters) for _ in range(random.randint(0, 10))] for _ in range(100) ] return data
{ "content_hash": "2c137cc4c04f2d9334df53f95f870f1e", "timestamp": "", "source": "github", "line_count": 132, "max_line_length": 85, "avg_line_length": 28.984848484848484, "alnum_prop": 0.5852064819654992, "repo_name": "pandas-dev/pandas", "id": "f281a0f82e0e735a7c0aefa81a3266854cfebb13", "size": "3826", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "pandas/tests/extension/list/array.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "512" }, { "name": "C", "bytes": "366145" }, { "name": "CSS", "bytes": "1800" }, { "name": "Cython", "bytes": "1186787" }, { "name": "Dockerfile", "bytes": "1411" }, { "name": "HTML", "bytes": "456531" }, { "name": "Python", "bytes": "18778786" }, { "name": "Shell", "bytes": "10369" }, { "name": "Smarty", "bytes": "8486" }, { "name": "XSLT", "bytes": "1196" } ], "symlink_target": "" }
from __future__ import unicode_literals import os import sys import pbr.version sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.intersphinx', 'oslosphinx', ] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = 'python-keystoneclient' copyright = 'OpenStack Contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. version_info = pbr.version.VersionInfo('python-keystoneclient') # The short X.Y version. version = version_info.version_string() # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['keystoneclient.'] # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', 'title', 'Authors name', 'manual' man_pages = [ ('man/keystone', 'keystone', 'Client for OpenStack Identity API', ['OpenStack Contributors'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme_path = ["."] #html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" html_last_updated_fmt = os.popen(git_cmd).read() # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'python-keystoneclientdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]) # . latex_documents = [ ('index', 'python-keystoneclient.tex', 'python-keystoneclient Documentation', 'Nebula Inc, based on work by Rackspace and Jacob Kaplan-Moss', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True intersphinx_mapping = { 'python': ('http://docs.python.org/', None), 'osloconfig': ('http://docs.openstack.org/developer/oslo.config/', None), }
{ "content_hash": "105fc2b8b0b77343bc6bf2056f3928d5", "timestamp": "", "source": "github", "line_count": 216, "max_line_length": 79, "avg_line_length": 32.36574074074074, "alnum_prop": 0.700328994421399, "repo_name": "magic0704/python-keystoneclient", "id": "593d7e235c13b306c8b63b1c5082c906f84f5b81", "size": "7425", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "doc/source/conf.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1536497" }, { "name": "Shell", "bytes": "7107" } ], "symlink_target": "" }
""" Utilities that help wikibot scripts. This script should be imported in all scripts rather than directly importing pywikibot, as it handles exceptions when importing pywikibot appropriately. """ from __future__ import (division, absolute_import, unicode_literals, print_function) import logging import os import sys import tempfile from six import string_types from file_metadata.utilities import download, retry try: import pywikibot except ImportError: logging.error("To run the script, pywikibot is required. Please install " "it and try again. The nightly version of pywikibot can be " "installed with `pip install git+https://" "gerrit.wikimedia.org/r/pywikibot/core.git#egg=pywikibot`") sys.exit(1) except RuntimeError as err: if (len(err.args) > 0 and "No user-config.py found in directory" in err.args[0]): logging.error("A user-config.py is require to run the pywikibot " "script. To create the user-config.py run the " "command `wikibot-create-config`.") sys.exit(1) import pywikibot.editor from pywikibot import textlib from pywikibot.tools.formatter import color_format def stringify(val): """ Convert to val only if it is not already of type string. This is needed because strings like \xfa (ú) throw error when str() is used on them again. """ return val if isinstance(val, string_types) else str(val) @retry(IOError, tries=3) def download_page(page, timeout=None, cache_dir=tempfile.gettempdir()): fname = page.title(as_filename=True).encode('ascii', 'replace') fpath = os.path.join(cache_dir, fname) download(page.fileUrl(), fpath, timeout=timeout) return fpath def put_cats(page, new_cats, summary=None, always=False): line_sep = pywikibot.config.line_separator if not summary: summary = "Adding categories using catfiles" oldtext = page.get() old_cats = textlib.getCategoryLinks(oldtext) old_templates = textlib.extract_templates_and_params(oldtext) old_template_titles = [i[0].lower() for i in old_templates] templates, cats = [], [] for val in new_cats: if val.lower().startswith('category:'): tmp_cat = pywikibot.Category(pywikibot.Link(val, page.site)) if tmp_cat not in old_cats: cats.append(tmp_cat) elif val.lower().startswith('{{'): tmp_templates = textlib.extract_templates_and_params(val) if len(tmp_templates) != 1: logging.warn("There was an error when parsing the template " "'{0}'. Contact the developer, skipping it for " "now.".format(val)) tmp_template = tmp_templates[0] if tmp_template[0].lower() not in old_template_titles: templates.append(val) # Add templates to the top, and the categories to the bottom. newtext = oldtext if len(templates) > 0: newtext = line_sep.join(templates) + line_sep + newtext if len(cats) > 0: newtext = (newtext + line_sep + line_sep.join(c.title(asLink=True, underscore=False) for c in cats)) if oldtext == newtext: pywikibot.output("No changes to the page need to be made.") return while True: # Show the diff that has been created pywikibot.output(color_format( '\n\n>>> {lightpurple}{0}{default} <<<', page.title(underscore=False))) pywikibot.showDiff(oldtext, newtext) if always: choice = 'y' else: # Ask user whether to accept choice = pywikibot.input_choice( 'Do you want to accept these changes?', [('Yes', 'y'), ('No', 'n'), ('Edit', 'e'), ('Open browser', 'b')], 'n', automatic_quit=False) # Apply the choice from above if choice == 'n': break elif choice == 'b': pywikibot.bot.open_webbrowser(page) elif choice == 'e': editor = pywikibot.editor.TextEditor() as_edited = editor.edit(newtext) if as_edited and as_edited != newtext: newtext = as_edited elif choice == 'y': try: page.put_async(newtext, summary) except pywikibot.EditConflict: pywikibot.output('Edit conflict! Skipping') except pywikibot.ServerError: pywikibot.output('Server Error! Skipping') except pywikibot.SpamfilterError as e: pywikibot.output( 'Cannot change %s because of blacklist entry %s' % (page.title(), e.url)) except pywikibot.LockedPage: pywikibot.output('Skipping %s (locked page)' % page.title()) except pywikibot.PageNotSaved as error: pywikibot.output('Error putting page: %s' % error.args) break
{ "content_hash": "7f129c8826ceb9226086ab7da64eb618", "timestamp": "", "source": "github", "line_count": 136, "max_line_length": 78, "avg_line_length": 37.6985294117647, "alnum_prop": 0.5923542032377609, "repo_name": "pywikibot-catfiles/file-metadata", "id": "74db29d1c3525890b2cf2f46205c780ce2d82b59", "size": "5152", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "file_metadata/wikibot/utilities.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "278007" }, { "name": "Shell", "bytes": "3500" } ], "symlink_target": "" }
import json import math import multiprocessing import optparse import os from os.path import join import random import shlex import subprocess import sys import time from testrunner.local import execution from testrunner.local import progress from testrunner.local import testsuite from testrunner.local import utils from testrunner.local import verbose from testrunner.objects import context # Base dir of the v8 checkout to be used as cwd. BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ARCH_GUESS = utils.DefaultArch() DEFAULT_TESTS = ["mjsunit", "webkit"] TIMEOUT_DEFAULT = 60 TIMEOUT_SCALEFACTOR = {"debug" : 4, "release" : 1 } MODE_FLAGS = { "debug" : ["--nohard-abort", "--nodead-code-elimination", "--nofold-constants", "--enable-slow-asserts", "--debug-code", "--verify-heap", "--noconcurrent-recompilation"], "release" : ["--nohard-abort", "--nodead-code-elimination", "--nofold-constants", "--noconcurrent-recompilation"]} SUPPORTED_ARCHS = ["android_arm", "android_ia32", "arm", "ia32", "ppc", "ppc64", "s390", "s390x", "mipsel", "nacl_ia32", "nacl_x64", "x64"] # Double the timeout for these: SLOW_ARCHS = ["android_arm", "android_ia32", "arm", "mipsel", "nacl_ia32", "nacl_x64"] MAX_DEOPT = 1000000000 DISTRIBUTION_MODES = ["smooth", "random"] class RandomDistribution: def __init__(self, seed=None): seed = seed or random.randint(1, sys.maxint) print "Using random distribution with seed %d" % seed self._random = random.Random(seed) def Distribute(self, n, m): if n > m: n = m return self._random.sample(xrange(1, m + 1), n) class SmoothDistribution: """Distribute n numbers into the interval [1:m]. F1: Factor of the first derivation of the distribution function. F2: Factor of the second derivation of the distribution function. With F1 and F2 set to 0, the distribution will be equal. """ def __init__(self, factor1=2.0, factor2=0.2): self._factor1 = factor1 self._factor2 = factor2 def Distribute(self, n, m): if n > m: n = m if n <= 1: return [ 1 ] result = [] x = 0.0 dx = 1.0 ddx = self._factor1 dddx = self._factor2 for i in range(0, n): result += [ x ] x += dx dx += ddx ddx += dddx # Project the distribution into the interval [0:M]. result = [ x * m / result[-1] for x in result ] # Equalize by n. The closer n is to m, the more equal will be the # distribution. for (i, x) in enumerate(result): # The value of x if it was equally distributed. equal_x = i / float(n - 1) * float(m - 1) + 1 # Difference factor between actual and equal distribution. diff = 1 - (x / equal_x) # Equalize x dependent on the number of values to distribute. result[i] = int(x + (i + 1) * diff) return result def Distribution(options): if options.distribution_mode == "random": return RandomDistribution(options.seed) if options.distribution_mode == "smooth": return SmoothDistribution(options.distribution_factor1, options.distribution_factor2) def BuildOptions(): result = optparse.OptionParser() result.add_option("--arch", help=("The architecture to run tests for, " "'auto' or 'native' for auto-detect"), default="ia32,x64,arm") result.add_option("--arch-and-mode", help="Architecture and mode in the format 'arch.mode'", default=None) result.add_option("--asan", help="Regard test expectations for ASAN", default=False, action="store_true") result.add_option("--buildbot", help="Adapt to path structure used on buildbots", default=False, action="store_true") result.add_option("--dcheck-always-on", help="Indicates that V8 was compiled with DCHECKs enabled", default=False, action="store_true") result.add_option("--command-prefix", help="Prepended to each shell command used to run a test", default="") result.add_option("--coverage", help=("Exponential test coverage " "(range 0.0, 1.0) -- 0.0: one test, 1.0 all tests (slow)"), default=0.4, type="float") result.add_option("--coverage-lift", help=("Lifts test coverage for tests " "with a small number of deopt points (range 0, inf)"), default=20, type="int") result.add_option("--download-data", help="Download missing test suite data", default=False, action="store_true") result.add_option("--distribution-factor1", help=("Factor of the first " "derivation of the distribution function"), default=2.0, type="float") result.add_option("--distribution-factor2", help=("Factor of the second " "derivation of the distribution function"), default=0.7, type="float") result.add_option("--distribution-mode", help=("How to select deopt points " "for a given test (smooth|random)"), default="smooth") result.add_option("--dump-results-file", help=("Dump maximum number of " "deopt points per test to a file")) result.add_option("--extra-flags", help="Additional flags to pass to each test command", default="") result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true") result.add_option("-j", help="The number of parallel tasks to run", default=0, type="int") result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)", default="release,debug") result.add_option("--outdir", help="Base directory with compile output", default="out") result.add_option("-p", "--progress", help=("The style of progress indicator" " (verbose, dots, color, mono)"), choices=progress.PROGRESS_INDICATORS.keys(), default="mono") result.add_option("--shard-count", help="Split testsuites into this number of shards", default=1, type="int") result.add_option("--shard-run", help="Run this shard from the split up tests.", default=1, type="int") result.add_option("--shell-dir", help="Directory containing executables", default="") result.add_option("--seed", help="The seed for the random distribution", type="int") result.add_option("-t", "--timeout", help="Timeout in seconds", default= -1, type="int") result.add_option("-v", "--verbose", help="Verbose output", default=False, action="store_true") result.add_option("--random-seed", default=0, dest="random_seed", help="Default seed for initializing random generator") return result def ProcessOptions(options): global VARIANT_FLAGS # Architecture and mode related stuff. if options.arch_and_mode: tokens = options.arch_and_mode.split(".") options.arch = tokens[0] options.mode = tokens[1] options.mode = options.mode.split(",") for mode in options.mode: if not mode.lower() in ["debug", "release"]: print "Unknown mode %s" % mode return False if options.arch in ["auto", "native"]: options.arch = ARCH_GUESS options.arch = options.arch.split(",") for arch in options.arch: if not arch in SUPPORTED_ARCHS: print "Unknown architecture %s" % arch return False # Special processing of other options, sorted alphabetically. options.command_prefix = shlex.split(options.command_prefix) options.extra_flags = shlex.split(options.extra_flags) if options.j == 0: options.j = multiprocessing.cpu_count() while options.random_seed == 0: options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647) if not options.distribution_mode in DISTRIBUTION_MODES: print "Unknown distribution mode %s" % options.distribution_mode return False if options.distribution_factor1 < 0.0: print ("Distribution factor1 %s is out of range. Defaulting to 0.0" % options.distribution_factor1) options.distribution_factor1 = 0.0 if options.distribution_factor2 < 0.0: print ("Distribution factor2 %s is out of range. Defaulting to 0.0" % options.distribution_factor2) options.distribution_factor2 = 0.0 if options.coverage < 0.0 or options.coverage > 1.0: print ("Coverage %s is out of range. Defaulting to 0.4" % options.coverage) options.coverage = 0.4 if options.coverage_lift < 0: print ("Coverage lift %s is out of range. Defaulting to 0" % options.coverage_lift) options.coverage_lift = 0 return True def ShardTests(tests, shard_count, shard_run): if shard_count < 2: return tests if shard_run < 1 or shard_run > shard_count: print "shard-run not a valid number, should be in [1:shard-count]" print "defaulting back to running all tests" return tests count = 0 shard = [] for test in tests: if count % shard_count == shard_run - 1: shard.append(test) count += 1 return shard def Main(): # Use the v8 root as cwd as some test cases use "load" with relative paths. os.chdir(BASE_DIR) parser = BuildOptions() (options, args) = parser.parse_args() if not ProcessOptions(options): parser.print_help() return 1 exit_code = 0 suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test")) if len(args) == 0: suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ] else: args_suites = set() for arg in args: suite = arg.split(os.path.sep)[0] if not suite in args_suites: args_suites.add(suite) suite_paths = [ s for s in suite_paths if s in args_suites ] suites = [] for root in suite_paths: suite = testsuite.TestSuite.LoadTestSuite( os.path.join(BASE_DIR, "test", root)) if suite: suites.append(suite) if options.download_data: for s in suites: s.DownloadData() for mode in options.mode: for arch in options.arch: try: code = Execute(arch, mode, args, options, suites, BASE_DIR) exit_code = exit_code or code except KeyboardInterrupt: return 2 return exit_code def CalculateNTests(m, options): """Calculates the number of tests from m deopt points with exponential coverage. The coverage is expected to be between 0.0 and 1.0. The 'coverage lift' lifts the coverage for tests with smaller m values. """ c = float(options.coverage) l = float(options.coverage_lift) return int(math.pow(m, (m * c + l) / (m + l))) def Execute(arch, mode, args, options, suites, workspace): print(">>> Running tests for %s.%s" % (arch, mode)) dist = Distribution(options) shell_dir = options.shell_dir if not shell_dir: if options.buildbot: shell_dir = os.path.join(workspace, options.outdir, mode) mode = mode.lower() else: shell_dir = os.path.join(workspace, options.outdir, "%s.%s" % (arch, mode)) shell_dir = os.path.relpath(shell_dir) # Populate context object. mode_flags = MODE_FLAGS[mode] timeout = options.timeout if timeout == -1: # Simulators are slow, therefore allow a longer default timeout. if arch in SLOW_ARCHS: timeout = 2 * TIMEOUT_DEFAULT; else: timeout = TIMEOUT_DEFAULT; timeout *= TIMEOUT_SCALEFACTOR[mode] ctx = context.Context(arch, mode, shell_dir, mode_flags, options.verbose, timeout, options.isolates, options.command_prefix, options.extra_flags, False, # Keep i18n on by default. options.random_seed, True, # No sorting of test cases. 0, # Don't rerun failing tests. 0, # No use of a rerun-failing-tests maximum. False, # No predictable mode. False, # No no_harness mode. False, # Don't use perf data. False) # Coverage not supported. # Find available test suites and read test cases from them. variables = { "arch": arch, "asan": options.asan, "deopt_fuzzer": True, "gc_stress": False, "gcov_coverage": False, "ignition": False, "ignition_turbofan": False, "isolates": options.isolates, "mode": mode, "no_i18n": False, "no_snap": False, "simulator": utils.UseSimulator(arch), "system": utils.GuessOS(), "tsan": False, "msan": False, "dcheck_always_on": options.dcheck_always_on, "novfp3": False, "predictable": False, "byteorder": sys.byteorder, } all_tests = [] num_tests = 0 test_id = 0 # Remember test case prototypes for the fuzzing phase. test_backup = dict((s, []) for s in suites) for s in suites: s.ReadStatusFile(variables) s.ReadTestCases(ctx) if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests s.FilterTestCasesByStatus(False) test_backup[s] = s.tests analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT, "--print-deopt-stress"] s.tests = [ t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests ] num_tests += len(s.tests) for t in s.tests: t.id = test_id test_id += 1 if num_tests == 0: print "No tests to run." return 0 print(">>> Collection phase") progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() runner = execution.Runner(suites, progress_indicator, ctx) exit_code = runner.Run(options.j) print(">>> Analysis phase") num_tests = 0 test_id = 0 for s in suites: test_results = {} for t in s.tests: for line in t.output.stdout.splitlines(): if line.startswith("=== Stress deopt counter: "): test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1]) for t in s.tests: if t.path not in test_results: print "Missing results for %s" % t.path if options.dump_results_file: results_dict = dict((t.path, n) for (t, n) in test_results.iteritems()) with file("%s.%d.txt" % (dump_results_file, time.time()), "w") as f: f.write(json.dumps(results_dict)) # Reset tests and redistribute the prototypes from the collection phase. s.tests = [] if options.verbose: print "Test distributions:" for t in test_backup[s]: max_deopt = test_results.get(t.path, 0) if max_deopt == 0: continue n_deopt = CalculateNTests(max_deopt, options) distribution = dist.Distribute(n_deopt, max_deopt) if options.verbose: print "%s %s" % (t.path, distribution) for i in distribution: fuzzing_flags = ["--deopt-every-n-times", "%d" % i] s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags)) num_tests += len(s.tests) for t in s.tests: t.id = test_id test_id += 1 if num_tests == 0: print "No tests to run." return 0 print(">>> Deopt fuzzing phase (%d test cases)" % num_tests) progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() runner = execution.Runner(suites, progress_indicator, ctx) code = runner.Run(options.j) return exit_code or code if __name__ == "__main__": sys.exit(Main())
{ "content_hash": "a32026a853112aec0ec8e7c59997f300", "timestamp": "", "source": "github", "line_count": 469, "max_line_length": 80, "avg_line_length": 34.430703624733475, "alnum_prop": 0.5974114441416893, "repo_name": "kingland/runtime", "id": "928c71b743a44a5871c5434412f51c388f55c142", "size": "17745", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "deps/v8/tools/run-deopt-fuzzer.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "29659" }, { "name": "C", "bytes": "853" }, { "name": "C++", "bytes": "2140348" }, { "name": "JavaScript", "bytes": "508976" }, { "name": "Python", "bytes": "6268" }, { "name": "Shell", "bytes": "3888" } ], "symlink_target": "" }
""" Run unittests for a given set of functions """ import os import subprocess as sp import sys from termcolor import cprint from .utils import env_manager from .utils.findfunc import find_manifest def fancy_print(header, msg): cprint(header + ": ", 'blue', end='') cprint(msg, 'yellow') def setup_parser(parser): parser.add_argument('function_names', nargs='*', type=str, help='the base(s) name of the function') def run(args): original_path = list(sys.path) for func in args.function_names: manifest = find_manifest(func, fail_if_missing=True) env = env_manager.EnvManager(manifest.runtime) if args.verbose: fancy_print("Python", env.python) fancy_print("Lib dir", manifest.lib_dir) os.chdir(manifest.basedir) os.environ['PYTHONPATH'] = ':'.join([str(manifest.lib_dir)] + original_path) if args.verbose > 1: fancy_print("PYTHONPATH", os.environ['PYTHONPATH']) test_file = os.path.join(manifest.basedir, f'test_{manifest.short_name}.py') fancy_print("Testing", test_file) sp.call([env.python, '-m', 'unittest', test_file]) print("")
{ "content_hash": "287fbdda820f62d48f151dbb5b8384cc", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 103, "avg_line_length": 28.142857142857142, "alnum_prop": 0.6395939086294417, "repo_name": "balihoo/blambda", "id": "3bef3fdbbc4a75241f8ca38561cd97130f4d9169", "size": "1182", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "blambda/local_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "94064" }, { "name": "Shell", "bytes": "2510" } ], "symlink_target": "" }
print("################################################################################") print("# Implementation of a multivariate pattern analysis based on the scikitlearn ") print("# toolbox (http://scikit-learn.org/stable/). It reads a matlab file containing ") print("# Xm: a matrix of trials x chans x timepoint. ") print("# y: a vector indicating the class of each trial ") print("# The classification algorithm is based on a support vector machine. ") print("# (c) Jean-Remi King 2012, jeanremi.king [at] gmail.com ") print("################################################################################") ################################################################################ print("LIBRARY") import sys as sys import numpy as np from scipy import stats from sklearn import svm from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold from sklearn.feature_selection import SelectPercentile, f_classif import scipy.io as sio from sklearn.preprocessing import Scaler ################################################################################ print("INPUT DATA") #-- get argument to load specific file filenameX = str(sys.argv[1]) filenamey = str(sys.argv[2]) print(filenameX) print(filenamey) #-- Load data into python mat = sio.loadmat(filenameX) Xm_all = mat["Xm"] # data #-- load classification parameters mat = sio.loadmat(filenamey) path = mat["path"][0] nameX = mat["nameX"][0] namey = mat["namey"][0] folding = mat["folding"][0] n_splits = mat["n_splits"] # svm penalization parameter n_splits = np.reshape(n_splits, n_splits.size) n_folds = mat["n_folds"] # fold number n_folds = np.reshape(n_folds, n_folds.size) svm_C = mat["C"] # svm penalization parameter svm_C = np.reshape(svm_C, svm_C.size) compute_probas = mat["compute_probas"] # svm penalization parameter compute_probas = np.reshape(compute_probas, compute_probas.size) compute_predict = mat["compute_predict"] # svm penalization parameter compute_predict = np.reshape(compute_predict, compute_predict.size) generalize_time = mat["generalize_time"] # svm penalization parameter generalize_time = np.reshape(generalize_time, generalize_time.size) fs_n = mat["fs"] # feature selection fs_n = np.reshape(fs_n, fs_n.size) dims = mat["dims"] # select time windows to compute dims = np.reshape(dims, dims.size) - 1 # reshape for skl compatibility y_all = mat["y"] # class used for train and test y_all = np.reshape(y_all, y_all.size) # reshape for skl compatibility y2_all = mat["y2"] # class used for sample weights y2_all = np.reshape(y2_all, y2_all.size) # reshape for skl compatibility #-- build training and generalizing classes Xm = Xm_all[y_all > 0, :, :] # training categories Xmg = Xm_all[y_all < 0, :, :] # generalization categories y = y_all[y_all > 0] yg = y_all[y_all < 0] y2 = y2_all[y_all > 0] n_samples, n_features, n_dims = Xm.shape n_samplesg, n_featuresg, n_dimsg = Xmg.shape n_classes = np.unique(y).shape[0] #deal with sample_weight sample_weight = np.ones(y.shape[0]) classes = np.unique(y2) for c in range(classes.shape[0]): sample_weight[y2 == classes[c]] = 1. / (np.sum(y2 == classes[c])) ################################################################################ print("PREPARE CLASSIFICATION") #--crossvalidation if folding == 'stratified': cv = StratifiedKFold(y, k=n_folds) elif folding == 'kfolding': cv = KFold(n=y.shape[0], k=n_folds) elif folding == 'leaveoneout': n_folds[0] = y.shape[0] cv = LeaveOneOut(n=y.shape[0]) else: print("unknown crossvalidation method!") #-- classifier clf = svm.SVC(kernel='linear', probability=True, C=svm_C) #-- normalizer scaler = Scaler() #-- feature selection fs = SelectPercentile(f_classif, percentile=fs_n) #-- grid search #parameters = {'svm__C': (1e-6,1e-3, 1e-1, .4)} #clf = GridSearchCV(svm, parameters,n_jobs=1) #-- initialize results predict = np.zeros([n_splits, n_samples, n_dims]) ** np.nan probas = np.zeros([n_splits, n_samples, n_dims, n_classes]) ** np.nan predictg = np.zeros([n_splits, n_samplesg, n_dimsg, n_folds]) ** np.nan probasg = np.zeros([n_splits, n_samplesg, n_dimsg, n_classes, n_folds]) ** np.nan coef = np.empty([n_splits, n_folds, n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0 all_folds = np.zeros([n_splits, n_folds, n_samples]) ** np.nan y_shfl = np.copy(y) Xm_shfl = np.copy(Xm) sw_shfl = np.copy(sample_weight) ################################################################################ print("CLASSIFY...") #-- shufflesplit # repeat stratified kfolding for getting rid off the folding artefacts for split in range(n_splits): print(split) # shuffle order new_order = np.array(range(y.shape[0])) if split > 0: np.random.shuffle(new_order) y_shfl[new_order] = np.copy(y) Xm_shfl[new_order, :, :] = np.copy(Xm) sw_shfl[new_order] = np.copy(sample_weight) cv = StratifiedKFold(y_shfl, k=n_folds) # Stratified crossvalidation for fold, (train, test) in enumerate(cv): print(fold) all_folds[split, fold, train] = 1 all_folds[split, fold, test] = 0 for d in range(0, dims.shape[0]): Xtrain = Xm_shfl[train, :, dims[d]] ytrain = y_shfl[train] sw_train = sw_shfl[train] # (deal with NaN in training) ytrain = ytrain[~np.isnan(np.nansum(Xtrain, axis=1))] sw_train = sw_train[~np.isnan(np.nansum(Xtrain, axis=1))] Xtrain = Xtrain[~np.isnan(np.nansum(Xtrain, axis=1)), :] if np.unique(ytrain).shape[0] > 1: # feature selection (find the 50% most discriminative channels) fs.fit(Xtrain, ytrain) # find Xtrain = fs.transform(Xtrain) # remove unnecessary channels # normalization scaler.fit(Xtrain) # find Xtrain = scaler.transform(Xtrain) # apply zscore # SVM fit clf.fit(Xtrain, ytrain, sample_weight=sw_train) # retrieve hyperplan feature identification coef[split, fold, dims[d], :, :] = 0 # initialize #--- univariate uni_features = fs.pvalues_ <= stats.scoreatpercentile(fs.pvalues_, fs.percentile) #--- multivariate coef[split, fold, dims[d], :, uni_features] = clf.coef_.T # predict cross val (deal with NaN in testing) Xtest = Xm_shfl[test, :, dims[d]] test_nan = np.isnan(np.nansum(Xtest, axis=1)) Xtest = fs.transform(Xtest) Xtest = scaler.transform(Xtest) if (Xtest.shape[0] - np.sum(test_nan)) > 0: if compute_predict: predict[split, test[~test_nan], dims[d]] = clf.predict(Xtest[~test_nan, :]) if compute_probas: probas[split, test[~test_nan], dims[d], :] = clf.predict_proba(Xtest[~test_nan, :]) if np.sum(test_nan) > 0: probas[split, test[test_nan], dims[d], :] = np.nan # predict cross val on generalization sample (deal with NaN in testing) Xtestg = Xmg[:, :, dims[d]] test_nan = np.isnan(np.nansum(Xtestg, axis=1)) Xtestg = fs.transform(Xtestg) Xtestg = scaler.transform(Xtestg) if (Xtestg.shape[0] - np.sum(test_nan)) > 0: if compute_predict: predictg[split, ~test_nan, dims[d], fold] = clf.predict(Xtestg[~test_nan, :]) if compute_probas: probasg[split, ~test_nan, dims[d], :, fold] = clf.predict_proba(Xtestg[~test_nan, :]) if np.sum(test_nan) > 0: probasg[split, test_nan, dims[d], :, fold] = np.nan #-- reorder results predict[split, :, :] = predict[split, new_order, :] probas[split, :, :, :] = probas[split, new_order, :, :] all_folds[split, :, :] = all_folds[split, :, new_order].T ################################################################################ print("EXPORT DATA") mat['predict'] = predict mat['predictg'] = predictg mat['probas'] = probas mat['probasg'] = probasg mat['coef'] = coef mat['all_folds'] = all_folds mat['y_all'] = y_all mat['y'] = y mat['yg'] = yg mat['filenameX'] = filenameX mat['filenamey'] = filenamey output = path + nameX + '_' + namey + "_results.mat" print(output) sio.savemat(output, mat)
{ "content_hash": "05e224cbceda43287cf169cf31078855", "timestamp": "", "source": "github", "line_count": 204, "max_line_length": 109, "avg_line_length": 42.5, "alnum_prop": 0.5657439446366782, "repo_name": "cjayb/kingjr_natmeg_arhus", "id": "6c102ff8811f47292db141e11ffcdadbdee4f208", "size": "8670", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "JR_toolbox/skl_svm.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "2647" }, { "name": "C++", "bytes": "9492" }, { "name": "M", "bytes": "73352" }, { "name": "Matlab", "bytes": "982432" }, { "name": "Mercury", "bytes": "23766" }, { "name": "Objective-C", "bytes": "2404" }, { "name": "Python", "bytes": "182274" } ], "symlink_target": "" }
class Holder(object): def __init__(self, value, start_line=None, start_column=None, end_line=None, end_column=None, filename=None): self.value = value self.start_line = start_line self.start_column = start_column self.end_line = end_line self.end_column = end_column self.filename = filename def __str__(self): return '{0}<{1}.{2}-{3}.{4} [{5}]>'.format( self.value, self.start_line, self.start_column, self.end_line, self.end_column, self.filename) def __repr__(self): return self.__str__() def __hash__(self): return hash(self.value) def __eq__(self, other): return isinstance(other, Holder) and self.value == other.value def __contains__(self, key): key_holder, value_holder = self.get_item(key) return value_holder is not None def get_item(self, key): if not isinstance(self.value, dict): raise ValueError('Value is expected to be of type dict while it' 'is in fact of type {0}' .format(type(self.value).__name__)) for key_holder, value_holder in self.value.iteritems(): if key_holder.value == key: return key_holder, value_holder return None, None def restore(self): if isinstance(self.value, dict): return dict((key_holder.restore(), value_holder.restore()) for key_holder, value_holder in self.value.iteritems()) elif isinstance(self.value, list): return [value_holder.restore() for value_holder in self.value] elif isinstance(self.value, set): return set((value_holder.restore() for value_holder in self.value)) else: return self.value @staticmethod def of(obj, filename=None): if isinstance(obj, Holder): return obj if isinstance(obj, dict): result = dict((Holder.of(key, filename=filename), Holder.of(value, filename=filename)) for key, value in obj.iteritems()) elif isinstance(obj, list): result = [Holder.of(item, filename=filename) for item in obj] elif isinstance(obj, set): result = set((Holder.of(item, filename=filename) for item in obj)) else: result = obj return Holder(result, filename=filename) def copy(self): return Holder(value=self.value, start_line=self.start_line, start_column=self.start_column, end_line=self.end_line, end_column=self.end_column, filename=self.filename)
{ "content_hash": "9ea035c40619e7d4a5b97cb7fc683e57", "timestamp": "", "source": "github", "line_count": 82, "max_line_length": 79, "avg_line_length": 35.73170731707317, "alnum_prop": 0.5290102389078498, "repo_name": "cloudify-cosmo/cloudify-dsl-parser", "id": "1ccff6eda2308afea1b9c1541f2e10d790511879", "size": "3575", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dsl_parser/holder.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "1040" }, { "name": "Python", "bytes": "929968" } ], "symlink_target": "" }
'''Logicni elementi za Boolove formule.''' class Var: '''Razred za spremenljivke.''' def __init__(self, name): self.name = name def evaluate(self, assignments={}): '''Funkcija, ki vraca vrednost spremenljivke.''' return (self.setVariables(assignments)) def setVariables(self, assignments={}): '''Vrne true(), false() ali kopijo sebe.''' if(self.name in assignments): if(assignments[self.name]): return true() else: return false() return Var(self.name) def simplify(self): '''Poenostavitev.''' return self def nnf(self): '''Negacijska normalna oblika.''' return self def cnf(self): '''Konjuktivna normalna oblika. Formula mora biti v negacijski normalni obliki.''' return And([Or([self])]) def __unicode__(self): return unicode(self.name) class Or: '''Razred za disjunkcijo.''' def __init__(self, l): self.clause = l def evaluate(self, assignments={}): '''Funkcija, ki delno evaluira formulo in jo poenostavi.''' return (self.setVariables(assignments)).simplify() def setVariables(self, assignments={}): '''Funkcija, ki delno evaluira formulo.''' return Or(map(lambda x: x.setVariables(assignments), self.clause)) def simplify(self): '''Poenostavitev.''' if len(self.clause) == 0: return false() ret = Or(map(lambda x: x.simplify(), self.clause)) #odstrani neresnice ret.clause = filter(lambda x: x.__class__.__name__ != "false", ret.clause) #absorpcija (A or True = True) if "true" in map(lambda x: x.__class__.__name__, ret.clause): return true() #identiteta za disjunkcijo if len(ret.clause) == 0: return false() return ret def deduplicate(self): '''Funkcija, ki odstrani podvojene literale.''' i = 0 varlist = set() negvars = set() while i<len(self.clause): if self.clause[i].__class__.__name__ == "Var": if self.clause[i].name in varlist: del self.clause[i] i-=1 else: varlist.add(self.clause[i].name) if self.clause[i].__class__.__name__ == "Not" and self.clause[i].clause.__class__.__name__ == "Var": if self.clause[i].clause.name in negvars: del self.clause[i] i-=1 else: negvars.add(self.clause[i].clause.name) i+=1 return self def nnf(self): '''Negacijska normalna oblika.''' return Or(map(lambda x: x.nnf(), self.clause)) def cnf(self): '''Konjuktivna normalna oblika. Formula mora biti v negacijski normalni obliki.''' #================================================= #PSEUDOCODE #================================================= #remove all nested or classes #apply distribution if there are and classes #if there are no and classes only thing which remains are literals #return and(or(literals)) #remove nested ors #================================================= newselfclauses = self.clause ors = True while ors: ors = False newclause = [] for i in xrange(len(newselfclauses)): if newselfclauses[i].__class__.__name__ == "Or": newclause += newselfclauses[i].clause ors = True else: newclause.append(newselfclauses[i]) newselfclauses = newclause #uporabi distributivni zakon for i in xrange(len(newclause)): if newclause[i].__class__.__name__ == "And": complement = newclause[:i] + newclause[i+1:] return And(map(lambda x: Or(complement+[x]).deduplicate(), newclause[i].clause)).cnf() #vrni formulo v konjuktivni normalni obliki return And([Or(newclause).deduplicate()]) def __unicode__(self): return "(" + u" \u2228 ".join(map(unicode, self.clause)) + u")" class And: '''Razred za konjunkcijo.''' def __init__(self, l): self.clause = l def evaluate(self, assignments={}): '''Funkcija, ki delno evaluira formulo in jo poenostavi.''' return (self.setVariables(assignments)).simplify() def setVariables(self, assignments={}): '''Funkcija, ki delno evaluira formulo.''' return And(map(lambda x: x.setVariables(assignments), self.clause)) def simplify(self): '''Poenostavitev.''' if len(self.clause) == 0: return true() ret = And(map(lambda x: x.simplify(), self.clause)) #odstrani resnice ret.clause = filter(lambda x: x.__class__.__name__ != "true", ret.clause) #absorpcija (A and False = False) if "false" in map(lambda x: x.__class__.__name__, ret.clause): return false() #identiteta za konjunkcijo if len(ret.clause) == 0: return true() return ret def deduplicate(self): '''Funkcija, ki odstrani podvojene literale.''' self.clause = map(lambda x: x.deduplicate(), self.clause) return self def nnf(self): '''Negacijska normalna oblika.''' return And(map(lambda x: x.nnf(), self.clause)) def cnf(self): '''Konjuktivna normalna oblika. Formula mora biti v negacijski normalni obliki.''' #================================================= #PSEUDOCODE #================================================= #propagate cnf #remove all nested ands #only thing which remains are or classes - return And(ors) #================================================= #pretvori stavke v konjuktivno normalno obliko newselfclauses = map(lambda x: x.cnf(), self.clause) #odstrani vgnezdene konjunkcije ands = True while ands: ands = False newclause = [] for i in xrange(len(newselfclauses)): if newselfclauses[i].__class__.__name__ == "And": newclause += newselfclauses[i].clause ands = True else: newclause.append(newselfclauses[i]) newselfclauses = newclause #vrni formulo v konjuktivni normalni obliki return And(newclause) def __unicode__(self): return u"(" + u" \u2227 ".join(map(unicode, self.clause)) + u")" class Not: '''Razred za negacijo.''' def __init__(self, l): self.clause = l def evaluate(self, assignments={}): '''Funkcija, ki delno evaluira formulo in jo poenostavi.''' return (self.setVariables(assignments)).simplify() def setVariables(self, assignments={}): '''Funkcija, ki delno evaluira formulo.''' return Not(self.clause.setVariables(assignments)) def simplify(self): '''Poenostavitev.''' formula = self.clause.simplify() name = formula.__class__.__name__ if name == "false": return true() elif name == "true": return false() else: return Not(formula) def deduplicate(self): return self def nnf(self): '''Negacijska normalna oblika.''' #DeMorganov zakon if self.clause.__class__.__name__ == "And": return Or(map(lambda x: Not(x), self.clause.clause)).nnf() #DeMorganov zakon if self.clause.__class__.__name__ == "Or": return And(map(lambda x: Not(x), self.clause.clause)).nnf() #dvojna negacija if self.clause.__class__.__name__ == "Not": return self.clause.clause.nnf() #neresnica postane resnica if self.clause.__class__.__name__ == "false": return true() #resnica postane neresnica if self.clause.__class__.__name__ == "true": return false() return Not(self.clause.nnf()) def cnf(self): '''Konjuktivna normalna oblika. Formula mora biti v negacijski normalni obliki.''' return And([Or([self])]) def __unicode__(self): return u'\u00ac(' + unicode(self.clause) + u")" class true: '''Razred za resnico.''' def evaluate(self, assignments={}): return true() def simplify(self): return self def deduplicate(self): return self def nnf(self): return self def cnf(self): return And([Or([self])]) def setVariables(self, assignments={}): return true() def __unicode__(self): return u"\u22a4" class false: '''Razred za neresnico.''' def evaluate(self, assignments={}): return false() def simplify(self): return self def deduplicate(self): return self def nnf(self): return self def cnf(self): return And([Or([self])]) def setVariables(self, assignments={}): return false() def __unicode__(self): return u"\u22a5" class FlatCNF: '''Ucinkovitejsa reprezentacija za formule v konjunktivni normalni obliki. Formulo predstavimo kot seznam mnozic stevil.''' def __init__(self, formula = None): self.clauses = [] self.name_mapping = {} if formula is None: return if formula.__class__.__name__ in ("file", "str", "list", "tuple"): self._makefromCNF(formula) elif formula.__class__.__name__ in ("And", "Or", "Not", "false", "true", "Var"): self._makefromformula(formula) else: raise Exception("FlatCNF can only be constructed from formulas, CNF files or CNF lists") self.clauses.sort(key = lambda x: len(x)) def _makefromformula(self, formula): '''Strukturo sestavimo iz dane poljubne formule.''' f = formula.nnf().cnf() #Nastejemo vse spremenljivke, jih preslikamo v stevila (negativna za negirane spremenljivke) #ter sestavimo seznam mnozic, ki predstavlja CNF dane formule. varnames = {} nextnumber = 1 for or_node in f.clause: node = set() tautology = False for var in or_node.clause: if var.__class__.__name__ == "Not": name = var.clause.name negate = -1 elif var.__class__.__name__ == "false": continue elif var.__class__.__name__ == "true": tautology = True continue else: name = var.name negate = 1 if name not in varnames: varnum = nextnumber nextnumber += 1 varnames[name] = varnum else: varnum = varnames[name] self.name_mapping[varnum] = name varnum *= negate node.add(varnum) if not tautology: self.clauses.append(node) def _makefromCNF(self, handle): '''Strukturo sestavimo iz formule, ki je ze v CNF obliki, z ostevilcenimi spremenljivkami. Formula je lahko dana v datoteki (po imenu ali file objektu) ali ze kot seznam seznamov stevil.''' if handle.__class__.__name__ == "str": handle = open(handle, "rt") if handle.__class__.__name__ == "file": for line in handle: self.clauses.append(set( map(int,line.split()) )) else: for or_node in handle: self.clauses.append(set(or_node)) def __unicode__(self): or_list = [] for or_node in self.clauses: var_list = [] for var in or_node: if abs(var) in self.name_mapping: name = self.name_mapping[abs(var)] else: name = str(abs(var)) var_list.append(u'\u00ac(' + name + u")" if var<0 else unicode(name)) or_list.append(u"(" + u" \u2228 ".join(var_list) + u")") return u"(" + u" \u2227 ".join(or_list) + u")" def evaluate(self, var, value): '''Delno evaluira formulo z dano spremenljivko in vrednostjo, ter pri tem pazi na morebitne prazne ali-stavke. Ce je nova formula neresljiva, vrne False, sicer pa novo formulo.''' if (var>=0) != value: var = -var for or_node in self.clauses: if var in or_node: # this entire or-clause becomes a tautology with the given evaluation or_node.clear() or_node.add('x') # (false OR expr...) == expr... if -var in or_node and len(or_node) == 1: return False or_node.discard(-var) self.clauses = filter(lambda x: 'x' not in x and len(x) > 0, self.clauses) return self def dump(self, filename): with open(filename,"wt") as f: f.write("p cnf %d %d\n"%(len(self.name_mapping), len(self.clauses))) f.write(("\n".join(" ".join(map(str, or_node))+" 0" for or_node in self.clauses))+"\n") def clone(self): '''Naredi globoko kopijo te formule kot nov objekt.''' ret = FlatCNF() ret.name_mapping = self.name_mapping.copy() ret.clauses = map(set.copy, self.clauses) return ret def rename(self, assignments): '''Preimenuje spremenljivke v dani resitvi iz internega imenskega prostora (stevilke) v izvornega (ce je bila formula konstruirana iz objekta z imenovanimi spremenljivkami).''' ret = {} for k in assignments: if k in self.name_mapping: ret[ self.name_mapping[k] ] = assignments[k] else: ret[k] = assignments[k] return ret
{ "content_hash": "b509e9b70f58ec2737bb06fb59f4b700", "timestamp": "", "source": "github", "line_count": 460, "max_line_length": 112, "avg_line_length": 30.854347826086958, "alnum_prop": 0.5239202423730007, "repo_name": "urska19/LVR-sat", "id": "d6adaf0a988581fe87bd0811f6df62b957993fd8", "size": "14238", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/logConstructs.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "102113" } ], "symlink_target": "" }
import atexit from operator import attrgetter from abc import ABCMeta, abstractmethod from .util import StrKeyDict HIGH = 'HIGH' LOW = 'LOW' # TODO: More implementations: IN, OUT, ANALOG, PWM, # ... IC2, SERIAL, SERVO, INTERRUPTION IN = 'IN' OUT = 'OUT' ANALOG = 'ANALOG' PWM = 'PWM' class WrongPinMode(Exception): value = 'Operation not supported in current mode.' class ModeNotSuported(Exception): value = 'Mode not supported by Pin or Board.' class ArgumentOutOfRange(Exception): value = 'Argument not in the range 0.0 to 1.0' class Board(object): """Abstract class defining common interface for all boards. Instance attributes of interest to end-users: ``«board».pins`` A ``dict`` with physical pin locations as keys and ``Pin`` instances as values. ``«board».cleanup()`` This should be called to release the pins for other applications on some boards. It is called automatically when the script finishes. Implementations of ``Board`` subclasses should: * Call ``super(«BoardSubclass», self).__init__()`` and ``self._add_pins(«pins»)`` in their ``__init__`` method. * Implement ``_set_digital_mode()`` and ``_set_pin_state()``. * Override ``cleanup()``, if the board needs it. """ __metaclass__ = ABCMeta def __init__(self): """Registers ``self.cleanup`` for calling at script exit. This ``__init__`` method should be called by the ``__init__`` of all ``Board`` subclasses using ``super(BoardSubclass, self).__init__()``. The ``__init__`` of board subclasses should also call ``self._add_pins(pins)`` with an iterable of ``Pin`` instances. """ atexit.register(self.cleanup) def filter_pins(self, *pin_types): """Get a list of pins that are instances of the given pin_types See the ``digital_pins`` property for an example of use. Arguments: ``pin_types``: an iterable of types (usually, ``Pin`` subclasses) """ filtered = [] for pin_type in pin_types: sub = [x for x in self.pins.values() if isinstance(x, pin_type)] filtered += sub return filtered def select_pins(self, locations): """Get list of pins from iterable of locations""" locations = list(locations) return [self.pins[location] for location in locations] @property def digital_pins(self): """[property] Get list of digital pins""" return sorted(self.filter_pins(DigitalPin), key=attrgetter('location')) def cleanup(self): """Releases pins for use by other applications. Overriding this stub may or may not be needed in specific drivers. For example, scripts running on boards using standard ``sysfs`` GPIO access should ``unexport`` the pins before exiting. """ pass ###################################################################### # the following methods are of interest only to implementers of # drivers, i.e. concrete Board subclasses def _add_pins(self, pins): """Populate ``board.pins`` mapping from ``Pin`` instances. The ``__init__`` method of concrete ``Board`` subclasses should call this method to populate the board instance ``pins`` mapping. Arguments: ``pins``: an iterable of ``Pin`` instances """ self.pins = StrKeyDict() self.gpio = StrKeyDict() for pin in pins: self.pins[pin.location] = pin if hasattr(pin, 'gpio_id'): self.gpio[pin.gpio_id] = pin @abstractmethod def _set_digital_mode(self, pin, mode): """Abstract method to be implemented by each ``Board`` subclass. The ``«pin».mode(…)`` property calls this method because the procedure to set pin mode changes from board to board. """ @abstractmethod def _set_pin_state(self, pin, state): """Abstract method to be implemented by each ``Board`` subclass The ``«pin».__change_state(…)`` method calls this method because the procedure to set pin state changes from board to board. """ @abstractmethod def _get_pin_state(self, pin): """Abstract method to be implemented by each ``Board`` subclass """ class AnalogInputCapable(object): """Mixin interface for boards that support AnalogInputPin Concrete ``AnalogInputCapable`` subclasses should implement ``_get_pin_value``, and `_set_analog_mode`` to read the values of analog pins. """ __metaclass__ = ABCMeta @abstractmethod def _get_pin_value(self, pin): """Abstract method to be implemented by each ``Board`` subclass. The ``«AnalogPin».value(…)`` method calls this method because the procedure to read pin analog signal changes from board to board. """ @abstractmethod def _set_analog_mode(self, pin, mode): """Abstract method to be implemented by each ``Board`` subclass. The ``«pin».mode(…)`` property calls this method because the procedure to set pin mode changes from board to board. """ class PwmOutputCapable(object): """Mixin interface for boards that support PwmOutputPin Concrete ``PwmOutputCapable`` subclasses should implement ``_get_pin_value`` to write the PWM signal of analog pins. """ __metaclass__ = ABCMeta @abstractmethod def _set_pwm_mode(self, pin): """Abstract method to be implemented by each ``Board`` subclass. The ``«pin».mode(…)`` property calls this method because the procedure to set pin mode changes from board to board. """ @abstractmethod def _set_pwm_frequency(self, pin, value): """Abstract method to be implemented by each ``Board`` subclass. The ``«PwmPin».frequency(…)`` method calls this method because the procedure to set the PWM's frequency changes from board to board. This method should always be overwritten! If its is not possible to set PWM's frequency, a ``NotImplementedError`` exception should be raised. """ @abstractmethod def _set_pwm_duty_cycle(self, pin, value): """Abstract method to be implemented by each ``Board`` subclass. The ``«PwmPin».value(…)`` method calls this method because the procedure to set PWM's duty cycle changes from board to board. """ def _get_pwm_duty_cycle(self, pin): """ This method should be overwritten if the ``Board`` subclass has this feature. """ if hasattr(pin, '_duty_cycle'): return pin._duty_cycle return 0.0 def _get_pwm_frequency(self, pin): """ This method should be overwritten if the ``Board`` subclass has this feature. """ if hasattr(pin, '_frequency'): return pin._frequency return 0.0 class Pin(object): """Abstract class defining common interface for all pins. Attributes: ``supported_modes`` Modes supported. Each ``Pin`` subclass supports different set of ``modes``. ``supported_modes`` is used by the ``Pin.mode`` property. Each subclass should over overwrite this attribute. """ __metaclass__ = ABCMeta supported_modes = [] def __init__(self, board, location, gpio_id=None): """Initialize ``Pin`` instance with Arguments: ``board`` The board to which the pin is attached. ``location`` Physical location of pin; ``int`` and ``str`` are acceptable. ``gpio_id`` Logical name of GPIO pin (e.g. ``sysfs`` file name). """ self.board = board self.location = location if gpio_id is not None: self.gpio_id = gpio_id self._mode = None def __repr__(self): cls_name = self.__class__.__name__ location = self.location if hasattr(self, 'gpio_id'): gpio_id = 'gpio%s' % self.gpio_id else: gpio_id = '' return '<{cls_name} {gpio_id}@{location}>'.format(**locals()) @property def mode(self): """[property] Get/set pin mode to ``pingo.IN``, ``pingo.OUT`` ``pingo.ANALOG`` or ``pingo.PWM``""" return self._mode @mode.setter def mode(self, value): if value not in self.supported_modes: raise ModeNotSuported() if value in [IN, OUT]: self.board._set_digital_mode(self, value) elif value == ANALOG: self.board._set_analog_mode(self, value) elif value == PWM: self.board._set_pwm_mode(self, value) self._mode = value class DigitalPin(Pin): """Defines common interface for all digital pins. The ``repr`` of a digital pin looks like ``<DigitalPin gpio21@40>`` where ``21`` is the logical pin identifier and ``40`` is the physical location of the pin in the connector. Implementers of board drivers do not need to subclass this class because pins delegate all board-dependent behavior to the board. """ supported_modes = [IN, OUT] def __init__(self, board, location, gpio_id=None): Pin.__init__(self, board, location, gpio_id) self._state = None @property def state(self): """[property] Get/set pin state to ``pingo.HIGH`` or ``pingo.LOW``""" if self.mode not in [IN, OUT]: raise WrongPinMode() if self.mode == IN: self._state = self.board._get_pin_state(self) return self._state @state.setter def state(self, value): if self.mode != OUT: raise WrongPinMode() self.board._set_pin_state(self, value) self._state = value def low(self): """Set voltage of pin to ``pingo.LOW`` (GND).""" self.state = LOW lo = low # shortcut for interactive use def high(self): """Set state of the pin to ``pingo.HIGH`` (Vcc).""" self.state = HIGH hi = high # shortcut for interactive use def toggle(self): """Change state of the pin.""" self.state = HIGH if self.state == LOW else LOW def pulse(self): """Generate a pulse in state of the pin.""" if self.state == LOW: self.state = HIGH self.state = LOW else: self.state = LOW self.state = HIGH class PwmPin(DigitalPin): supported_modes = [IN, OUT, PWM] def __init__(self, board, location, gpio_id=None, frequency=None): DigitalPin.__init__(self, board, location, gpio_id) self._frequency = frequency self._duty_cycle = None # TUDO: # Write a decorator to test mode == 'MODE' @property def value(self): if self.mode != PWM: raise WrongPinMode() return self.board._get_pwm_duty_cycle(self) @value.setter def value(self, value): if self.mode != PWM: raise WrongPinMode() if not 0.0 <= value <= 100.0: raise ArgumentOutOfRange() self.board._set_pwm_duty_cycle(self, value) self._duty_cycle = value @property def frequency(self): if self.mode != PWM: raise WrongPinMode() return self.board._get_pwm_frequency(self) @frequency.setter def frequency(self, new_frequency): if self.mode != PWM: raise WrongPinMode() if new_frequency <= 0.0: raise ArgumentOutOfRange() self.board._set_pwm_frequency(self, new_frequency) self._frequency = new_frequency class AnalogPin(Pin): """Defines common interface for all analog pins. Implementers of board drivers do not need to subclass this class because pins delegate all board-dependent behavior to the board. This pin type supports read operations only. """ supported_modes = [IN, ANALOG] def __init__(self, board, location, resolution, gpio_id=None): """ :param board: the board to which this ping belongs :param location: the physical location of the pin on the board :param resolution: resolution of the AD converter in bits :param gpio_id: the logical id for GPIO access, if applicable """ Pin.__init__(self, board, location, gpio_id) self.bits = resolution self._mode = None @property def value(self): """[property] Pin value as integer from 0 to 2 ** resolution - 1""" return self.board._get_pin_value(self) def ratio(self, from_min=0, from_max=None, to_min=0.0, to_max=1.0): """ Pin value as a float, by default from 0.0 to 1.0. The ``from...`` and ``to...`` parameters work like in the Arduino map_ function, converting values from an expected input range to a desired output range. .. _map: http://arduino.cc/en/reference/map """ if from_max is None: from_max = 2 ** self.bits - 1 _value = self.value return (float(_value - from_min) * (to_max - to_min) / (from_max - from_min) + to_min) @property def percent(self): """[property] Pin value as float from 0.0 to 100.0""" return self.ratio(to_max=100.0) class GroundPin(Pin): def __repr__(self): return '<%s>' % self.__class__.__name__ class VccPin(Pin): def __init__(self, board, location, voltage): Pin.__init__(self, board, location) self.voltage = voltage # e.g. 3.3, 5.0 def __repr__(self): return '<%s %0.1fV>' % (self.__class__.__name__, self.voltage)
{ "content_hash": "a0030230bc5f687a39dcb9ee42814b1c", "timestamp": "", "source": "github", "line_count": 454, "max_line_length": 84, "avg_line_length": 30.526431718061673, "alnum_prop": 0.5903023306154845, "repo_name": "Vido/pingo", "id": "b9f3c2e0f5a539664f6e6b9e90b6e0574e2405a9", "size": "13912", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pingo/board.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "416" }, { "name": "Python", "bytes": "106214" }, { "name": "Shell", "bytes": "221" }, { "name": "VimL", "bytes": "1062" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('ep', '0001_initial'), ] operations = [ migrations.CreateModel( name='PrefectDeviceParameterConfiguration', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('power_consumption', models.DecimalField(decimal_places=2, max_digits=7)), ('line', models.CharField(max_length=2)), ('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='configuration', to='ep.Device')), ], options={ 'verbose_name': 'Prefect Device Configuration', }, ), ]
{ "content_hash": "e6af5cac3c0d79a23ab90877c9571913", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 137, "avg_line_length": 32.17857142857143, "alnum_prop": 0.5937846836847946, "repo_name": "dschien/energy-aggregator", "id": "752252a6f24b65190d8976e2f377c0f62ab37ea6", "size": "973", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "prefect/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "10426" }, { "name": "HTML", "bytes": "54586" }, { "name": "JavaScript", "bytes": "770928" }, { "name": "Python", "bytes": "253587" }, { "name": "Shell", "bytes": "8664" } ], "symlink_target": "" }
""" Memory commands test module """ from tests.utils import ( GefUnitTestGeneric, gdb_run_cmd, gdb_start_silent_cmd, _target, ) class MemoryCommand(GefUnitTestGeneric): """ `memory` command testing module""" def test_cmd_memory_watch(self): self.assertFailIfInactiveSession(gdb_run_cmd("memory watch $pc")) res = gdb_start_silent_cmd("memory watch $pc 0x100 byte") self.assertNoException(res) res = gdb_start_silent_cmd("memory watch $pc 0x40 word") self.assertNoException(res) res = gdb_start_silent_cmd("memory watch $pc 0x30 dword") self.assertNoException(res) res = gdb_start_silent_cmd("memory watch $pc 0x20 qword") self.assertNoException(res) res = gdb_start_silent_cmd("memory watch $pc 0x8 pointers") self.assertNoException(res) res = gdb_start_silent_cmd("memory watch $pc") self.assertNoException(res) target = _target("memwatch") res = gdb_start_silent_cmd("memory watch &myglobal", before=["set args 0xdeadbeef"], after=["continue"], target=target, context='memory') self.assertIn("deadbeef", res) self.assertNotIn("cafebabe", res) res = gdb_start_silent_cmd("memory watch &myglobal", before=["set args 0xcafebabe"], after=["continue"], target=target, context="memory") self.assertIn("cafebabe", res) self.assertNotIn("deadbeef", res) def test_cmd_memory_unwatch(self): self.assertFailIfInactiveSession(gdb_run_cmd("memory unwatch $pc")) res = gdb_start_silent_cmd("memory unwatch $pc") self.assertNoException(res) def test_cmd_memory_list(self): self.assertFailIfInactiveSession(gdb_run_cmd("memory list")) res = gdb_start_silent_cmd("memory list") self.assertNoException(res) def test_cmd_memory_reset(self): self.assertFailIfInactiveSession(gdb_run_cmd("memory reset")) res = gdb_start_silent_cmd("memory reset") self.assertNoException(res)
{ "content_hash": "12e3fb45a7a5e6b69c50da1cc7b986e1", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 75, "avg_line_length": 36.76190476190476, "alnum_prop": 0.5785837651122625, "repo_name": "hugsy/gef", "id": "aed65ce704cd9355c7bde1bc2ab1f773e87b2699", "size": "2316", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "tests/commands/memory.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "11174" }, { "name": "Makefile", "bytes": "1234" }, { "name": "Python", "bytes": "485778" }, { "name": "Shell", "bytes": "7430" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import collections import inspect import sys from abc import abstractmethod from functools import update_wrapper import six from pants.build_graph.address import Address from pants.engine.objects import Resolvable, Serializable from pants.util.memo import memoized from pants.util.meta import AbstractClass from pants.util.objects import datatype class Collection(object): """ Singleton Collection Type. The ambition is to gain native support for flattening, so methods like <pants.engine.fs.merge_files> won't have to be defined separately. Related to: https://github.com/pantsbuild/pants/issues/3169 """ @classmethod @memoized def of(cls, *element_types): union = '|'.join(element_type.__name__ for element_type in element_types) type_name = b'{}.of({})'.format(cls.__name__, union) supertypes = (cls, datatype('Collection', ['dependencies'])) properties = {'element_types': element_types} collection_of_type = type(type_name, supertypes, properties) # Expose the custom class type at the module level to be pickle compatible. setattr(sys.modules[cls.__module__], type_name, collection_of_type) return collection_of_type Addresses = Collection.of(Address) class TypeConstraint(AbstractClass): """Represents a type constraint. Not intended for direct use; instead, use one of :class:`SuperclassesOf`, :class:`Exact` or :class:`SubclassesOf`. """ def __init__(self, *types, **kwargs): """Creates a type constraint centered around the given types. The type constraint is satisfied as a whole if satisfied for at least one of the given types. :param type *types: The focus of this type constraint. :param str description: A description for this constraint if the list of types is too long. """ if not types: raise ValueError('Must supply at least one type') if any(not isinstance(t, type) for t in types): raise TypeError('Supplied types must be types. {!r}'.format(types)) self._types = types self._desc = kwargs.get('description', None) @property def types(self): """Return the subject types of this type constraint. :type: tuple of type """ return self._types def satisfied_by(self, obj): """Return `True` if the given object satisfies this type constraint. :rtype: bool """ return self.satisfied_by_type(type(obj)) @abstractmethod def satisfied_by_type(self, obj_type): """Return `True` if the given object satisfies this type constraint. :rtype: bool """ def __hash__(self): return hash((type(self), self._types)) def __eq__(self, other): return type(self) == type(other) and self._types == other._types def __ne__(self, other): return not (self == other) def __str__(self): if self._desc: constrained_type = '({})'.format(self._desc) else: if len(self._types) == 1: constrained_type = self._types[0].__name__ else: constrained_type = '({})'.format(', '.join(t.__name__ for t in self._types)) return '{variance_symbol}{constrained_type}'.format(variance_symbol=self._variance_symbol, constrained_type=constrained_type) def __repr__(self): if self._desc: constrained_type = self._desc else: constrained_type = ', '.join(t.__name__ for t in self._types) return ('{type_constraint_type}({constrained_type})' .format(type_constraint_type=type(self).__name__, constrained_type=constrained_type)) class SuperclassesOf(TypeConstraint): """Objects of the exact type as well as any super-types are allowed.""" _variance_symbol = '-' def satisfied_by_type(self, obj_type): return any(issubclass(t, obj_type) for t in self._types) class Exactly(TypeConstraint): """Only objects of the exact type are allowed.""" _variance_symbol = '=' def satisfied_by_type(self, obj_type): return obj_type in self._types class SubclassesOf(TypeConstraint): """Objects of the exact type as well as any sub-types are allowed.""" _variance_symbol = '+' def satisfied_by_type(self, obj_type): return issubclass(obj_type, self._types) class NotSerializableError(TypeError): """Indicates an addressable descriptor is illegally installed in a non-Serializable type.""" class MutationError(AttributeError): """Indicates an illegal attempt to mutate an addressable attribute that already has a value.""" class TypeConstraintError(TypeError): """Indicates a :class:`TypeConstraint` violation.""" class AddressableDescriptor(object): """A data descriptor for fields containing one or more addressable items. An addressable descriptor has lifecycle expectations tightly coupled with the contract of Serializable objects and the 2-phase hydration of AddressMap.parse, Graph.resolve. Decorated accessors are write-once, and then read-only. They are intended to be written in a constructor such that objects containing them have immutable semantics. In other words, the descriptor is intended to be used like a type-checked `@property` with possibly lazily resolved values. The written value is type-checked against a :class:`TypeConstraint` and can only be one of 3 types: 1. An opaque string address. 2. A Resolvable for the address that, when resolved, will meet the type constraint. 3. A concrete value that meets the type constraint. The 1st type, an opaque string address, is also the type associated with the 1st stage of the 2-stage lifecycle of Serializable objects containing addressable values. In the second and final stage, the Serializable object is re-constructed with addressable values of the second or third types; ie: reconstructed with either resolvables or concrete values in place of the first stage address. Two affordances are made in type constraint handling: 1. Either a :class:`TypeConstraint` instance can be given if the type constraint is fully known or else a type constraint class can be given if the type constraint should apply to the type of the enclosing class. This is useful for declaring an addressable property in a baseclass that should be type-constrained based on the type of the derived class. 2. Decorators for addressables (see `addressable`, `addressable_list` and `addressable_dict`) allow wrapping of either class functions - typical - or @property descriptors. The property descriptor case sets up an idiom for recursive addressables. The idiom looks like: >>> class Thing(Struct): ... def __init__(self, thing): ... super(Thing, self).__init__() ... self.thing = thing ... @property ... def parent(self): ... '''Return this thing's parent. ... ... :rtype: :class:`Thing` ... ''' ... >>> Thing.parent = addressable(Exactly(Thing))(Thing.parent) Here the `Thing.parent` property is re-assigned with a type-constrained addressable descriptor after the class is defined so the class can be referred to in the type constraint. """ _descriptors = set() @classmethod def is_addressable(cls, obj, key): """Return `True` if the given attribute of `obj` is an addressable attribute. :param obj: The object to inspect. :param string key: The name of the property on `obj` to check. """ return (type(obj), key) in cls._descriptors @classmethod def _register(cls, obj, descriptor): cls._descriptors.add((type(obj), descriptor._name)) def __init__(self, name, type_constraint): self._name = name self._type_constraint = type_constraint def __set__(self, instance, value): if not Serializable.is_serializable(instance): raise NotSerializableError('The addressable descriptor {} can only be applied to methods or ' 'properties of Serializable objects, applied to method {} of ' 'type {}'.format(type(self).__name__, self._name, type(instance).__name__)) instance_dict = instance._asdict() if self._name in instance_dict: raise MutationError('Attribute {} of {} has already been set to {}, rejecting attempt to ' 're-set with {}'.format(self._name, instance, instance_dict[self._name], value)) value = self._checked_value(instance, value) self._register(instance, self) # We mutate the instance dict, which is only OK if used in the conventional idiom of setting # the value via this data descriptor in the instance's constructor. instance_dict[self._name] = value def __get__(self, instance, unused_owner_type=None): # We know instance is a Serializable from the type-checking done in set. value = instance._asdict()[self._name] return self._resolve_value(instance, value) def _get_type_constraint(self, instance): if inspect.isclass(self._type_constraint): return self._type_constraint(type(instance)) else: return self._type_constraint def _checked_value(self, instance, value): # We allow five forms of value: # 0. None. # 1. An opaque (to us) address pointing to a value that can be resolved by external # means. # 2. A `Resolvable` value that we can lazily resolve and type-check in `__get__`. # 3. A concrete instance that meets our type constraint. # 4. A dict when our type constraint has exactly one Serializable subject type - we convert the # dict into an instance of that type. if value is None: return None if isinstance(value, (six.string_types, Address, Resolvable)): return value # Support untyped dicts that we deserialize on-demand here into the required type. # This feature allows for more brevity in the JSON form (local type inference) and an alternate # construction style in the python forms. type_constraint = self._get_type_constraint(instance) if (isinstance(value, dict) and len(type_constraint.types) == 1 and Serializable.is_serializable_type(type_constraint.types[0])): if not value: # TODO(John Sirois): Is this the right thing to do? Or should an empty serializable_type # be constructed? return None # {} -> None. else: serializable_type = type_constraint.types[0] return serializable_type(**value) if not type_constraint.satisfied_by(value): raise TypeConstraintError('Got {} of type {} for {} attribute of {} but expected {!r}' .format(value, type(value).__name__, self._name, instance, type_constraint)) return value def _resolve_value(self, instance, value): if not isinstance(value, Resolvable): # The value is concrete which means we type-checked on set so no need to do so again, its a # raw address string or an instance that satisfies our type constraint. return value else: resolved_value = value.resolve() type_constraint = self._get_type_constraint(instance) if not type_constraint.satisfied_by(resolved_value): raise TypeConstraintError('The value resolved from {} did not meet the type constraint of ' '{!r} for the {} property of {}: {}' .format(value.address, type_constraint, self._name, instance, resolved_value)) return resolved_value def _addressable_wrapper(addressable_descriptor, type_constraint): def wrapper(func): # We allow for wrapping property objects to support the following idiom for defining recursive # addressables: # # class Thing(Struct): # def __init__(self, thing): # super(Thing, self).__init__() # self.thing = thing # # @property # def parent(self): # """Return this thing's parent. # # :rtype: :class:`Thing` # """" # # Thing.parent = addressable(Exactly(Thing))(Thing.parent) func = func.fget if isinstance(func, property) else func addressable_accessor = addressable_descriptor(func.__name__, type_constraint) return update_wrapper(addressable_accessor, func) return wrapper def addressable(type_constraint): """Return an addressable attribute for Serializable classes. The attribute should have no implementation (it will be ignored), but can carry a docstring. The implementation is provided by this wrapper. Idiomatic use assigns the value, which can either be an opaque address string or a resolved value that meets the type constraint, in the constructor:: >>> class Employee(Serializable): ... def __init__(self, person): ... self.person = person ... @addressable(SubclassesOf(Person)) ... def person(self): ... '''The person that is this employee.''' Addressable attributes are only assignable once, so this pattern yields an immutable `Employee` whose `person` attribute is either a `Person` instance or :class:`pants.engine.objects.Resolvable` person or else a string address pointing to one. See :class:`AddressableDescriptor` for more details. :param type_constraint: The type constraint the value must satisfy. :type type_constraint: :class:`TypeConstraint` """ return _addressable_wrapper(AddressableDescriptor, type_constraint) class AddressableList(AddressableDescriptor): def _checked_value(self, instance, value): if value is None: return None if not isinstance(value, collections.MutableSequence): raise TypeError('The {} property of {} must be a list, given {} of type {}' .format(self._name, instance, value, type(value).__name__)) return [super(AddressableList, self)._checked_value(instance, v) for v in value] def _resolve_value(self, instance, value): return [super(AddressableList, self)._resolve_value(instance, v) for v in value] if value else [] def addressable_list(type_constraint): """Marks a list's values as satisfying a given type constraint. Some (or all) elements of the list may be :class:`pants.engine.objects.Resolvable` elements to resolve later. See :class:`AddressableDescriptor` for more details. :param type_constraint: The type constraint the list's values must all satisfy. :type type_constraint: :class:`TypeConstraint` """ return _addressable_wrapper(AddressableList, type_constraint) class AddressableDict(AddressableDescriptor): def _checked_value(self, instance, value): if value is None: return None if not isinstance(value, collections.MutableMapping): raise TypeError('The {} property of {} must be a dict, given {} of type {}' .format(self._name, instance, value, type(value).__name__)) return {k: super(AddressableDict, self)._checked_value(instance, v) for k, v in value.items()} def _resolve_value(self, instance, value): return {k: super(AddressableDict, self)._resolve_value(instance, v) for k, v in value.items()} if value else {} def addressable_dict(type_constraint): """Marks a dicts's values as satisfying a given type constraint. Some (or all) values in the dict may be :class:`pants.engine.objects.Resolvable` values to resolve later. See :class:`AddressableDescriptor` for more details. :param type_constraint: The type constraint the dict's values must all satisfy. :type type_constraint: :class:`TypeConstraint` """ return _addressable_wrapper(AddressableDict, type_constraint) # TODO(John Sirois): Move variants into Address 1st class as part of merging the engine/exp # into the mainline (if they survive). # TODO: Variants currently require an explicit name (and thus a `:`) in order to parse correctly. def strip_variants(address): """Return a copy of the given address with the variants (if any) stripped from the name. :rtype: :class:`pants.build_graph.address.Address` """ address, _ = parse_variants(address) return address def _extract_variants(address, variants_str): """Return the variants (if any) represented by the given variants_str. :returns: The variants or else `None` if there are none. :rtype: tuple of tuples (key, value) strings """ def entries(): for entry in variants_str.split(','): key, _, value = entry.partition('=') if not key or not value: raise ValueError('Invalid variants after the @ in: {}'.format(address)) yield (key, value) return tuple(entries()) def parse_variants(address): target_name, _, variants_str = address.target_name.partition('@') variants = _extract_variants(address, variants_str) if variants_str else None normalized_address = Address(spec_path=address.spec_path, target_name=target_name) return normalized_address, variants
{ "content_hash": "4686c3b95f224ced98d8ca4303509527", "timestamp": "", "source": "github", "line_count": 461, "max_line_length": 100, "avg_line_length": 38.017353579175705, "alnum_prop": 0.6635284719844802, "repo_name": "mateor/pants", "id": "048d8d67de909e41b204b257125cb6dbbbefe2e2", "size": "17673", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "src/python/pants/engine/addressable.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "781" }, { "name": "CSS", "bytes": "9444" }, { "name": "GAP", "bytes": "2459" }, { "name": "Gherkin", "bytes": "919" }, { "name": "Go", "bytes": "1746" }, { "name": "HTML", "bytes": "79866" }, { "name": "Java", "bytes": "459514" }, { "name": "JavaScript", "bytes": "29992" }, { "name": "Protocol Buffer", "bytes": "4749" }, { "name": "Python", "bytes": "5386641" }, { "name": "Rust", "bytes": "70804" }, { "name": "Scala", "bytes": "80844" }, { "name": "Shell", "bytes": "64072" }, { "name": "Thrift", "bytes": "2046" } ], "symlink_target": "" }
import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'counterpartyd' copyright = u'2014, Counterparty Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1.0' # The full version, including alpha/beta/rc tags. release = '0.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'counterpartyddoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'counterpartyd.tex', u'counterpartyd Documentation', u'Counterparty Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'counterpartyd', u'counterpartyd Documentation', [u'Counterparty Team'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'counterpartyd', u'counterpartyd Documentation', u'Counterparty Team', 'counterpartyd', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
{ "content_hash": "cb9aeb225950f640cc947ed39b660fb3", "timestamp": "", "source": "github", "line_count": 229, "max_line_length": 80, "avg_line_length": 32.23580786026201, "alnum_prop": 0.7037388241668925, "repo_name": "Ziftr/counterpartyd", "id": "152846cf94a3faf3556edff92907751427eae195", "size": "7806", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "docs/conf.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "5592" }, { "name": "Python", "bytes": "403611" }, { "name": "Shell", "bytes": "5110" } ], "symlink_target": "" }
import textwrap from lymph.client import Client from lymph.cli.base import Command, handle_request_errors class InspectCommand(Command): """ Usage: lymph inspect [--ip=<address> | --guess-external-ip | -g] <address> [options] Options: --ip=<address> Use this IP for all sockets. --guess-external-ip, -g Guess the public facing IP of this machine and use it instead of the provided address. {COMMON_OPTIONS} """ short_description = 'Describes the RPC interface of a service' @handle_request_errors def run(self): address = self.args['<address>'] client = Client.from_config(self.config) result = client.request(address, 'lymph.inspect', {}, timeout=5).body print('RPC interface of {}\n'.format(self.terminal.bold(address))) for method in sorted(result['methods'], key=lambda m: m['name']): print( "rpc {name}({params})\n\t {help}\n".format( name=self.terminal.red(method['name']), params=self.terminal.yellow(', '.join(method['params'])), help='\n '.join(textwrap.wrap(method['help'], 70)), ) )
{ "content_hash": "126feefaaf2cf349c9cbcf1f8a826eee", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 88, "avg_line_length": 33.21052631578947, "alnum_prop": 0.5689381933438986, "repo_name": "itakouna/lymph", "id": "4be48aecab7e3ecd7f3a6e7c73f6e020f38018d3", "size": "1262", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "lymph/cli/inspect.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "3410" }, { "name": "JavaScript", "bytes": "17495" }, { "name": "Makefile", "bytes": "245" }, { "name": "Python", "bytes": "235051" } ], "symlink_target": "" }
""" Sandwich demo based on code from http://nbviewer.ipython.org/6576096 """ import numpy as np from matplotlib import pyplot as plt from sklearn.metrics import pairwise_distances from sklearn.neighbors import NearestNeighbors from metric_learn import LMNN, ITML_Supervised, LSML_Supervised, SDML_Supervised def sandwich_demo(): x, y = sandwich_data() knn = nearest_neighbors(x, k=2) ax = plt.subplot(3, 1, 1) # take the whole top row plot_sandwich_data(x, y, ax) plot_neighborhood_graph(x, knn, y, ax) ax.set_title('input space') ax.set_aspect('equal') ax.set_xticks([]) ax.set_yticks([]) mls = [ LMNN(), ITML_Supervised(num_constraints=200), SDML_Supervised(num_constraints=200), LSML_Supervised(num_constraints=200), ] for ax_num, ml in enumerate(mls, start=3): ml.fit(x, y) tx = ml.transform() ml_knn = nearest_neighbors(tx, k=2) ax = plt.subplot(3, 2, ax_num) plot_sandwich_data(tx, y, axis=ax) plot_neighborhood_graph(tx, ml_knn, y, axis=ax) ax.set_title(ml.__class__.__name__) ax.set_xticks([]) ax.set_yticks([]) plt.show() # TODO: use this somewhere def visualize_class_separation(X, labels): _, (ax1,ax2) = plt.subplots(ncols=2) label_order = np.argsort(labels) ax1.imshow(pairwise_distances(X[label_order]), interpolation='nearest') ax2.imshow(pairwise_distances(labels[label_order,None]), interpolation='nearest') def nearest_neighbors(X, k=5): knn = NearestNeighbors(n_neighbors=k) knn.fit(X) return knn.kneighbors(X, return_distance=False) def sandwich_data(): # number of distinct classes num_classes = 6 # number of points per class num_points = 9 # distance between layers, the points of each class are in a layer dist = 0.7 data = np.zeros((num_classes, num_points, 2), dtype=float) labels = np.zeros((num_classes, num_points), dtype=int) x_centers = np.arange(num_points, dtype=float) - num_points / 2 y_centers = dist * (np.arange(num_classes, dtype=float) - num_classes / 2) for i, yc in enumerate(y_centers): for k, xc in enumerate(x_centers): data[i, k, 0] = np.random.normal(xc, 0.1) data[i, k, 1] = np.random.normal(yc, 0.1) labels[i,:] = i return data.reshape((-1, 2)), labels.ravel() def plot_sandwich_data(x, y, axis=plt, colors='rbgmky'): for idx, val in enumerate(np.unique(y)): xi = x[y==val] axis.scatter(*xi.T, s=50, facecolors='none', edgecolors=colors[idx]) def plot_neighborhood_graph(x, nn, y, axis=plt, colors='rbgmky'): for i, a in enumerate(x): b = x[nn[i,1]] axis.plot((a[0], b[0]), (a[1], b[1]), colors[y[i]]) if __name__ == '__main__': sandwich_demo()
{ "content_hash": "1e5e082ecde93367e216bdd59bd6f8c0", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 80, "avg_line_length": 28.967741935483872, "alnum_prop": 0.6551596139569413, "repo_name": "terrytangyuan/metric-learn", "id": "34b48a00b074587d5c8fbfd41ca7c75a59ae2f47", "size": "2694", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/sandwich.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "98259" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' lookup: aws_ssm author: - Bill Wang <ozbillwang(at)gmail.com> - Marat Bakeev <hawara(at)gmail.com> - Michael De La Rue <siblemitcom.mddlr@spamgourmet.com> version_added: 2.5 requirements: - boto3 - botocore short_description: Get the value for a SSM parameter or all parameters under a path. description: - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters. The first argument you pass the lookup can either be a parameter name or a hierarchy of parameters. Hierarchies start with a forward slash and end with the parameter name. Up to 5 layers may be specified. - If looking up an explicitly listed parameter by name which does not exist then the lookup will return a None value which will be interpreted by Jinja2 as an empty string. You can use the ```default``` filter to give a default value in this case but must set the second parameter to true (see examples below) - When looking up a path for parameters under it a dictionary will be returned for each path. If there is no parameter under that path then the return will be successful but the dictionary will be empty. - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm will generate an error, normally crashing the current ansible task. This is normally the right thing since ignoring a value that IAM isn't giving access to could cause bigger problems and wrong behaviour or loss of data. If you want to continue in this case then you will have to set up two ansible tasks, one which sets a variable and ignores failures one which uses the value of that variable with a default. See the examples below. options: decrypt: description: A boolean to indicate whether to decrypt the parameter. default: true type: boolean bypath: description: A boolean to indicate whether the parameter is provided as a hierarchy. default: false type: boolean recursive: description: A boolean to indicate whether to retrieve all parameters within a hierarchy. default: false type: boolean shortnames: description: Indicates whether to return the name only without path if using a parameter hierarchy. default: false type: boolean ''' EXAMPLES = ''' # lookup sample: - name: lookup ssm parameter store in the current region debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}" - name: lookup ssm parameter store in nominated region debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}" - name: lookup ssm parameter store without decrypted debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}" - name: lookup ssm parameter store in nominated aws profile debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}" - name: lookup ssm parameter store using explicit aws credentials debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}" - name: lookup ssm parameter store with all options. debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}" - name: lookup a key which doesn't exist, returns "" debug: msg="{{ lookup('aws_ssm', 'NoKey') }}" - name: lookup a key which doesn't exist, returning a default ('root') debug: msg="{{ lookup('aws_ssm', 'AdminID') | default('root', true) }}" - name: lookup a key which doesn't exist failing to store it in a fact set_fact: temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}" ignore_errors: true - name: show fact default to "access failed" if we don't have access debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}" - name: return a dictionary of ssm parameters from a hierarchy path debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}" - name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param) debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}" - name: Iterate over a parameter hierarchy debug: msg='key contains {{item.Name}} with value {{item.Value}} ' loop: '{{ query("aws_ssm", "/TEST/test-list", region="ap-southeast-2", bypath=true) }}' ''' from ansible.module_utils._text import to_native from ansible.module_utils.ec2 import HAS_BOTO3, boto3_tag_list_to_ansible_dict from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display try: from botocore.exceptions import ClientError import botocore import boto3 except ImportError: pass # will be captured by imported HAS_BOTO3 display = Display() def _boto3_conn(region, credentials): if 'boto_profile' in credentials: boto_profile = credentials.pop('boto_profile') else: boto_profile = None try: connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region, **credentials) except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): if boto_profile: try: connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region) # FIXME: we should probably do better passing on of the error information except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): raise AnsibleError("Insufficient credentials found.") else: raise AnsibleError("Insufficient credentials found.") return connection class LookupModule(LookupBase): def run(self, terms, variables=None, boto_profile=None, aws_profile=None, aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None, bypath=False, shortnames=False, recursive=False, decrypt=True): ''' :arg terms: a list of lookups to run. e.g. ['parameter_name', 'parameter_name_too' ] :kwarg variables: ansible variables active at the time of the lookup :kwarg aws_secret_key: identity of the AWS key to use :kwarg aws_access_key: AWS secret key (matching identity) :kwarg aws_security_token: AWS session key if using STS :kwarg decrypt: Set to True to get decrypted parameters :kwarg region: AWS region in which to do the lookup :kwarg bypath: Set to True to do a lookup of variables under a path :kwarg recursive: Set to True to recurse below the path (requires bypath=True) :returns: A list of parameter values or a list of dictionaries if bypath=True. ''' if not HAS_BOTO3: raise AnsibleError('botocore and boto3 are required for aws_ssm lookup.') ret = [] response = {} ssm_dict = {} credentials = {} if aws_profile: credentials['boto_profile'] = aws_profile else: credentials['boto_profile'] = boto_profile credentials['aws_secret_access_key'] = aws_secret_key credentials['aws_access_key_id'] = aws_access_key credentials['aws_session_token'] = aws_security_token client = _boto3_conn(region, credentials) ssm_dict['WithDecryption'] = decrypt # Lookup by path if bypath: ssm_dict['Recursive'] = recursive for term in terms: ssm_dict["Path"] = term display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region)) try: response = client.get_parameters_by_path(**ssm_dict) except ClientError as e: raise AnsibleError("SSM lookup exception: {0}".format(to_native(e))) paramlist = list() paramlist.extend(response['Parameters']) # Manual pagination, since boto doesn't support it yet for get_parameters_by_path while 'NextToken' in response: response = client.get_parameters_by_path(NextToken=response['NextToken'], **ssm_dict) paramlist.extend(response['Parameters']) # shorten parameter names. yes, this will return duplicate names with different values. if shortnames: for x in paramlist: x['Name'] = x['Name'][x['Name'].rfind('/') + 1:] display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist)) if len(paramlist): ret.append(boto3_tag_list_to_ansible_dict(paramlist, tag_name_key_name="Name", tag_value_key_name="Value")) else: ret.append({}) # Lookup by parameter name - always returns a list with one or no entry. else: display.vvv("AWS_ssm name lookup term: %s" % terms) ssm_dict["Names"] = terms try: response = client.get_parameters(**ssm_dict) except ClientError as e: raise AnsibleError("SSM lookup exception: {0}".format(to_native(e))) params = boto3_tag_list_to_ansible_dict(response['Parameters'], tag_name_key_name="Name", tag_value_key_name="Value") for i in terms: if i in params: ret.append(params[i]) elif i in response['InvalidParameters']: ret.append(None) else: raise AnsibleError("Ansible internal error: aws_ssm lookup failed to understand boto3 return value: {0}".format(str(response))) return ret display.vvvv("AWS_ssm path lookup returning: %s " % str(ret)) return ret
{ "content_hash": "90cf8b8d32f2720ea38fb2adc43d57e6", "timestamp": "", "source": "github", "line_count": 223, "max_line_length": 154, "avg_line_length": 45.72645739910314, "alnum_prop": 0.6429341963322546, "repo_name": "SergeyCherepanov/ansible", "id": "39d0175c394d4532bdfea66a9f9b86f3e09b5b1f", "size": "10482", "binary": false, "copies": "14", "ref": "refs/heads/master", "path": "ansible/ansible/plugins/lookup/aws_ssm.py", "mode": "33188", "license": "mit", "language": [ { "name": "Shell", "bytes": "824" } ], "symlink_target": "" }
from nodetraq.tests import * class TestDnsController(TestController): def test_index(self): response = self.app.get(url(controller='dns', action='index')) # Test response...
{ "content_hash": "50db722e9ffcd1c2f6ee42bd9fa0aa9a", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 70, "avg_line_length": 28, "alnum_prop": 0.673469387755102, "repo_name": "seryl/Nodetraq", "id": "27555016e75e0f7e5f4d7b2b33bb256757620e00", "size": "196", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nodetraq/tests/functional/test_dns.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "291106" }, { "name": "Python", "bytes": "295460" } ], "symlink_target": "" }
"""This module is deprecated. Please use :mod:`airflow.providers.docker.operators.docker_swarm`.""" import warnings from airflow.providers.docker.operators.docker_swarm import DockerSwarmOperator # noqa warnings.warn( "This module is deprecated. Please use `airflow.providers.docker.operators.docker_swarm`.", DeprecationWarning, stacklevel=2, )
{ "content_hash": "8f262407da895ca0f764a7024ef81d7e", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 99, "avg_line_length": 32.90909090909091, "alnum_prop": 0.7707182320441989, "repo_name": "danielvdende/incubator-airflow", "id": "b023da796a4c83c5f0795d440d94191548486793", "size": "1149", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "airflow/contrib/operators/docker_swarm_operator.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "25785" }, { "name": "Dockerfile", "bytes": "76693" }, { "name": "HCL", "bytes": "3786" }, { "name": "HTML", "bytes": "164512" }, { "name": "JavaScript", "bytes": "236992" }, { "name": "Jinja", "bytes": "37155" }, { "name": "Jupyter Notebook", "bytes": "2929" }, { "name": "Mako", "bytes": "1339" }, { "name": "Python", "bytes": "21824455" }, { "name": "R", "bytes": "313" }, { "name": "Shell", "bytes": "495567" }, { "name": "TypeScript", "bytes": "326556" } ], "symlink_target": "" }
from django.conf.urls import url from payments import views urlpatterns = [ url(r'^stripe/charge', views.stripe_charge_view, name='stripe'), url(r'^paypal/success', views.paypal_success_view, name='paypal_success'), url(r'^paypal/cancel', views.paypal_cancel_view, name='paypal_cancel'), ]
{ "content_hash": "59cd450c535f594765ef0c3d32bb46ab", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 78, "avg_line_length": 37.875, "alnum_prop": 0.7161716171617162, "repo_name": "pdiazv/makaw", "id": "522ba4d656fd8c2280baa33318addcc82c197787", "size": "327", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "payments/urls.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "45028" }, { "name": "HTML", "bytes": "29890" }, { "name": "JavaScript", "bytes": "16125" }, { "name": "Nginx", "bytes": "1171" }, { "name": "Python", "bytes": "56318" }, { "name": "Shell", "bytes": "5566" } ], "symlink_target": "" }
import os from datetime import datetime, timedelta from django.core.files import File from survey.services.csv_uploader import CSVUploader, UploadService from survey.tests.base_test import BaseTest from survey.models import UploadErrorLog from django.utils.timezone import utc class CSVUploaderTest(BaseTest): def setUp(self): self.filename = 'simple.csv' self.data = [["0","1","2"], ["0","1","2"], ["0","1","2"]] self.headers = ["Region", "District", "County"] def tearDown(self): os.system("rm -rf %s"%self.filename) def generate_csv_file(self, filename): data = [self.headers] data.extend(self.data) self.write_to_csv('wb', data, filename) def test_read_headers_and_data(self): self.generate_csv_file(self.filename) file = File(open(self.filename, 'rb')) headers, data = CSVUploader(file).split_content() self.assertEqual(self.headers, headers) self.assertEqual(self.data, data) class UploaderServiceTest(BaseTest): def setUp(self): self.data = [['RegionName', 'DistrictName', 'CountyName', 'Selection Probability'], ['region1', 'district1', 'county1', '0.01'], ['region2', 'district2', 'county2', '0.1']] self.write_to_csv('wb', self.data) self.filename = 'test.csv' self.file = open(self.filename, 'rb') def tearDown(self): os.system("rm -rf %s" % self.filename) def test_deletes_one_month_old_error_logs_every_time_an_instance_is_created(self): UploadErrorLog.objects.all().delete() error_location = UploadErrorLog.objects.create(model='LOCATION', filename=self.filename, error="Some errors location") error_now = UploadErrorLog.objects.create(filename=self.filename, error="Some errors now") two_months_old_error_log = UploadErrorLog.objects.create(filename=self.filename, error="Some different errors") two_months_old_error_log.created = datetime.utcnow().replace(tzinfo=utc) - timedelta(days=31) two_months_old_error_log.save() UploadService(self.file) two_months_old_error_log = UploadErrorLog.objects.filter(filename=self.filename, error="Some different errors") self.failIf(two_months_old_error_log) error_location = UploadErrorLog.objects.filter(model='LOCATION', filename=self.filename, error="Some errors location") self.failUnless(error_location) error_now = UploadErrorLog.objects.filter( filename=self.filename, error="Some errors now") self.failUnless(error_now) def test_error_logging(self): UploadErrorLog.objects.all().delete() uploader_service = UploadService(self.file) uploader_service.log_error(row_number=1, error="hahaha") retrieved_log = UploadErrorLog.objects.filter(filename=self.file.name, row_number=1, error="hahaha") self.assertEqual(1, retrieved_log.count()) def test_remove_trailing_name_in_headers(self): headers = ['heheName', 'somethingCode', 'hahaName', 'blablaCode', 'hihihi', 'hohoho'] self.assertEqual(['hehe','somethingCode', 'haha', 'blablaCode', 'hihihi', 'hohoho'], UploadService.remove_trailing('Name', in_array=headers)) self.assertEqual(['hehe', 'haha', 'hihihi', 'hohoho'], UploadService.remove_trailing('Name', in_array=headers, exclude='Code')) self.assertEqual(['hehe', 'haha', 'hihihi', 'hohoho'], UploadService.remove_trailing('Name', headers, 'Code'))
{ "content_hash": "0a116fb4750b6e63b1d0b434af960de2", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 126, "avg_line_length": 42.82142857142857, "alnum_prop": 0.6530442035029191, "repo_name": "unicefuganda/mics", "id": "17d9063c09a41dee5c98eef579a027ff1755b550", "size": "3597", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "survey/tests/services/test_csv_uploader.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "37725" }, { "name": "JavaScript", "bytes": "390607" }, { "name": "Python", "bytes": "5209696" }, { "name": "Shell", "bytes": "1277" } ], "symlink_target": "" }
"""\ Move files from "uploading" to "in progress". Example: %(prog)s --username ACCESS_KEY_ID --password SECRET_ACCESS_KEY \\ --input check_files.log https://www.encodeproject.org """ import json import requests import sys try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin EPILOG = __doc__ HEADERS = { 'Accept': 'application/json', 'Content-Type': 'application/json', } def run(fp, url, username, password): for line in fp.readlines(): item, result, errors = json.loads(line) if errors: print('skipped %s: %s' % (item['@id'], errors)) continue item_url = urljoin(url, item['@id']) r = requests.get( item_url + '?frame=object&datastore=database', auth=(username, password), headers=HEADERS, ) if r.status_code != 200: print('ERROR retrieving %s: %s %s' % (item['@id'], r.status_code, r.reason)) print(r.text) continue if r.json()['status'] != 'uploading': print('skipped %s: status %r is not "uploading"' % (item['@id'], r.json()['status'])) continue data = { 'status': 'in progress', 'file_size': result['file_size'], } if 'content_md5sum' in result: data['content_md5sum'] = result['content_md5sum'] r = requests.patch( item_url, data=json.dumps(data), auth=(username, password), headers=HEADERS, ) if r.status_code != 200: print('ERROR patching %s: %s %s' % (item['@id'], r.status_code, r.reason)) print(r.text) continue print('patched %s' % item['@id']) def main(): import argparse parser = argparse.ArgumentParser( description="Update file status", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument('--username', '-u', default='', help="HTTP username (access_key_id)") parser.add_argument('--password', '-p', default='', help="HTTP password (secret_access_key)") parser.add_argument( '--input', '-i', type=argparse.FileType('r'), default=sys.stdin, help="Input file.") parser.add_argument('url', help="server to post to") args = parser.parse_args() run(args.input, args.url, args.username, args.password) if __name__ == '__main__': main()
{ "content_hash": "fc5d5eb17056997602811383b221f034", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 97, "avg_line_length": 31, "alnum_prop": 0.5649193548387097, "repo_name": "kidaa/encoded", "id": "04e039750aa61c93f1e818bd75889a210b15671e", "size": "2480", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/encoded/commands/update_file_status.py", "mode": "33188", "license": "mit", "language": [ { "name": "ActionScript", "bytes": "741" }, { "name": "CSS", "bytes": "124947" }, { "name": "Cucumber", "bytes": "17721" }, { "name": "HTML", "bytes": "371787" }, { "name": "JavaScript", "bytes": "550757" }, { "name": "Python", "bytes": "863661" }, { "name": "Ruby", "bytes": "992" }, { "name": "Shell", "bytes": "2814" } ], "symlink_target": "" }
from setuptools import find_packages from setuptools import setup REQUIRED_PACKAGES = [ 'torch==1.11.0', 'transformers', 'datasets', 'tqdm' ] setup( name='trainer', version='0.1', install_requires=REQUIRED_PACKAGES, setup_requires=REQUIRED_PACKAGES, packages=find_packages(), include_package_data=True, description='AI Platform | Training | PyTorch | Text Classification | Python Package' )
{ "content_hash": "5152f17a0191cb2235c455d887fd9c9f", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 89, "avg_line_length": 23, "alnum_prop": 0.6864988558352403, "repo_name": "GoogleCloudPlatform/ai-platform-samples", "id": "97b25842e509cacb75ed002665ad6b85e7c4cef0", "size": "1115", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "ai-platform/tutorials/unofficial/pytorch-on-google-cloud/sentiment_classification/python_package/setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "19249" }, { "name": "HCL", "bytes": "8273" }, { "name": "Jupyter Notebook", "bytes": "68378139" }, { "name": "Python", "bytes": "338281" }, { "name": "Shell", "bytes": "124415" } ], "symlink_target": "" }
from django.template.defaultfilters import slugify from django.contrib.auth.models import User from django.core.validators import validate_email, ValidationError from django.db.models import EmailField from django.utils.http import urlencode from django.contrib.auth import REDIRECT_FIELD_NAME from django.utils import importlib from emailconfirmation.models import EmailAddress import app_settings def get_login_redirect_url(request, fallback=app_settings.LOGIN_REDIRECT_URL): """ Returns a url to redirect to after the login """ url = request.REQUEST.get(REDIRECT_FIELD_NAME) or fallback return url def passthrough_login_redirect_url(request, url): assert url.find("?") < 0 # TODO: Handle this case properly next = get_login_redirect_url(request, fallback=None) if next: url = url + '?' + urlencode({ REDIRECT_FIELD_NAME: next }) return url def generate_unique_username(txt): username = slugify(txt.split('@')[0]) max_length = User._meta.get_field('username').max_length i = 0 while True: try: if i: pfx = str(i + 1) else: pfx = '' ret = username[0:max_length - len(pfx)] + pfx User.objects.get(username=ret) i += 1 except User.DoesNotExist: return ret def valid_email_or_none(email): ret = None try: if email: validate_email(email) if len(email) <= EmailField().max_length: ret = email except ValidationError: pass return ret def email_address_exists(email, exclude_user=None): emailaddresses = EmailAddress.objects if exclude_user: emailaddresses = emailaddresses.exclude(user=exclude_user) ret = emailaddresses.filter(email__iexact=email).exists() if not ret: users = User.objects if exclude_user: users = users.exclude(user=exclude_user) ret = users.filter(email__iexact=email).exists() return ret def import_attribute(path): assert isinstance(path, str) pkg, attr = path.rsplit('.',1) ret = getattr(importlib.import_module(pkg), attr) return ret def import_callable(path_or_callable): if not hasattr(path_or_callable, '__call__'): ret = import_attribute(path_or_callable) else: ret = path_or_callable return ret
{ "content_hash": "d5110455b31bc1466bf0afc465d45cd2", "timestamp": "", "source": "github", "line_count": 85, "max_line_length": 69, "avg_line_length": 28.51764705882353, "alnum_prop": 0.6377887788778878, "repo_name": "1st/django-allauth", "id": "4df5197f15bd99bf6c2439a82b5751b5bde22a79", "size": "2424", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "allauth/utils.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
import logging from redis import StrictRedis from django.conf import settings logger = logging.getLogger('django.request') redis_connection = StrictRedis.from_url(settings.REDIS_URL) def try_redis_call(method_name, *args, **kwargs): method = getattr(redis_connection, method_name) default = kwargs.pop('default', None) assert not kwargs try: return method(*args) except: # Ignore the error. Redis is just a cache, so if it's down or # something we'd rather continue than fail the whole request. params = "%s, %s" % (method_name, str(args)) logger.error('Redis call failed: %s' % params, exc_info=True) return default
{ "content_hash": "675fb8388e078468e1f3c1a35c835c8e", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 69, "avg_line_length": 34.4, "alnum_prop": 0.6773255813953488, "repo_name": "reverie/jotleaf.com", "id": "8f2887f2e8fec94693f30537b43ed669f41fa485", "size": "688", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jotleaf/main/redis_helpers.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "260767" }, { "name": "CoffeeScript", "bytes": "236120" }, { "name": "HTML", "bytes": "133720" }, { "name": "JavaScript", "bytes": "279484" }, { "name": "PHP", "bytes": "865" }, { "name": "Python", "bytes": "442635" }, { "name": "Shell", "bytes": "1026" } ], "symlink_target": "" }
""" Django settings for server project. Generated by 'django-admin startproject' using Django 1.10.1. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'g33t$l!mph9xkd&(g)l0cv3(!&2a2hw(fsz2ejfq=rsebvu_^h' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'app', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'server.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'server.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/'
{ "content_hash": "d57fb8719a5957e18b1dbf031d00cfe4", "timestamp": "", "source": "github", "line_count": 123, "max_line_length": 91, "avg_line_length": 25.447154471544714, "alnum_prop": 0.6846645367412141, "repo_name": "Likegram/study_run", "id": "e4c542fe3cdf10735c11570c2dbfc941db54317e", "size": "3130", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "server/server/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "14328" } ], "symlink_target": "" }
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('control', parent_package, top_path) config.add_subpackage('tests') return config
{ "content_hash": "667c1e2cb19611e252c0a68b84d574d3", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 63, "avg_line_length": 45.2, "alnum_prop": 0.7389380530973452, "repo_name": "roryyorke/python-control", "id": "3ed3e3a7e2f171af07a1f281be59802375a951d7", "size": "226", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "control/setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "126" }, { "name": "Python", "bytes": "1020841" } ], "symlink_target": "" }
import sys print('using print') sys.stdout.write('using sys.stdout.write') sys.stderr.write('using sys.stderr.write') import logging logging.debug('Debug') logging.info('Info') logging.warning('Warning') logging.error('Error') # # HTML outputs # # Using `pandas`. Here we find two representations: both text and HTML. import pandas as pd pd.DataFrame([4]) from IPython.display import display display(pd.DataFrame([5])) display(pd.DataFrame([6])) # # Images # %matplotlib inline # First plot from matplotlib import pyplot as plt import numpy as np w, h = 3, 3 data = np.zeros((h, w, 3), dtype=np.uint8) data[0,:] = [0,255,0] data[1,:] = [0,0,255] data[2,:] = [0,255,0] data[1:3,1:3] = [255, 0, 0] plt.imshow(data) plt.axis('off') plt.show() # Second plot data[1:3,1:3] = [255, 255, 0] plt.imshow(data) plt.axis('off') plt.show() # # Errors undefined_variable
{ "content_hash": "f7b8ae7fe05df38a5c411ed1017d1e53", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 71, "avg_line_length": 18.4468085106383, "alnum_prop": 0.6839677047289504, "repo_name": "mwouts/jupytext", "id": "76ef61020be2fe8e1048851c27d475cba458a8ed", "size": "1179", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/notebooks/mirror/ipynb_to_script_vim_folding_markers/text_outputs_and_images.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "752" }, { "name": "C#", "bytes": "2752" }, { "name": "C++", "bytes": "26237" }, { "name": "Clojure", "bytes": "7920" }, { "name": "F#", "bytes": "1449" }, { "name": "Gnuplot", "bytes": "2067" }, { "name": "Groovy", "bytes": "6195" }, { "name": "Haskell", "bytes": "930" }, { "name": "Java", "bytes": "1670" }, { "name": "JavaScript", "bytes": "21654" }, { "name": "Julia", "bytes": "25322" }, { "name": "Jupyter Notebook", "bytes": "630468" }, { "name": "MATLAB", "bytes": "1316" }, { "name": "Makefile", "bytes": "581" }, { "name": "OCaml", "bytes": "1049" }, { "name": "PowerShell", "bytes": "8962" }, { "name": "Prolog", "bytes": "12028" }, { "name": "Python", "bytes": "832380" }, { "name": "R", "bytes": "6011" }, { "name": "RobotFramework", "bytes": "1275" }, { "name": "Rust", "bytes": "15459" }, { "name": "Sage", "bytes": "418" }, { "name": "Scala", "bytes": "1000" }, { "name": "Scheme", "bytes": "54543" }, { "name": "Shell", "bytes": "1014" }, { "name": "Tcl", "bytes": "791" }, { "name": "TypeScript", "bytes": "17816" }, { "name": "q", "bytes": "1866" } ], "symlink_target": "" }
"""slycot_convert_test.py - test SLICOT-based conversions RMM, 30 Mar 2011 (based on TestSlycot from v0.4a) """ import numpy as np import pytest from control import bode, rss, ss, tf from control.tests.conftest import slycotonly numTests = 5 maxStates = 10 maxI = 1 maxO = 1 @pytest.fixture(scope="module") def fixedseed(): """Get consistent test results""" np.random.seed(0) @slycotonly @pytest.mark.usefixtures("fixedseed") class TestSlycot: """Test Slycot system conversion TestSlycot compares transfer function and state space conversions for various numbers of inputs,outputs and states. 1. Usually passes for SISO systems of any state dim, occasonally, there will be a dimension mismatch if the original randomly generated ss system is not minimal because td04ad returns a minimal system. 2. For small systems with many inputs, n<<m, the tests fail because td04ad returns a minimal ss system which has fewer states than the original system. It is typical for systems with many more inputs than states to have extraneous states. 3. For systems with larger dimensions, n~>5 and with 2 or more outputs the conversion to statespace (td04ad) intermittently results in an equivalent realization of higher order than the original tf order. We think this has to do with minimu realization tolerances in the Fortran. The algorithm doesn't recognize that two denominators are identical and so it creates a system with nearly duplicate eigenvalues and double the state dimension. This should not be a problem in the python-control usage because the common_den() method finds repeated roots within a tolerance that we specify. Matlab: Matlab seems to force its statespace system output to have order less than or equal to the order of denominators provided, avoiding the problem of very large state dimension we describe in 3. It does however, still have similar problems with pole/zero cancellation such as we encounter in 2, where a statespace system may have fewer states than the original order of transfer function. """ @pytest.fixture def verbose(self): """Set to True and switch off pytest stdout capture to print info""" return False @pytest.mark.parametrize("testNum", np.arange(numTests) + 1) @pytest.mark.parametrize("inputs", np.arange(maxI) + 1) @pytest.mark.parametrize("outputs", np.arange(maxO) + 1) @pytest.mark.parametrize("states", np.arange(maxStates) + 1) def testTF(self, states, outputs, inputs, testNum, verbose): """Test transfer function conversion. Directly tests the functions tb04ad and td04ad through direct comparison of transfer function coefficients. Similar to convert_test, but tests at a lower level. """ from slycot import tb04ad, td04ad ssOriginal = rss(states, outputs, inputs) if (verbose): print('====== Original SS ==========') print(ssOriginal) print('states=', states) print('inputs=', inputs) print('outputs=', outputs) tfOriginal_Actrb, tfOriginal_Bctrb, tfOriginal_Cctrb,\ tfOrigingal_nctrb, tfOriginal_index,\ tfOriginal_dcoeff, tfOriginal_ucoeff =\ tb04ad(states, inputs, outputs, ssOriginal.A, ssOriginal.B, ssOriginal.C, ssOriginal.D, tol1=0.0) ssTransformed_nr, ssTransformed_A, ssTransformed_B,\ ssTransformed_C, ssTransformed_D\ = td04ad('R', inputs, outputs, tfOriginal_index, tfOriginal_dcoeff, tfOriginal_ucoeff, tol=0.0) tfTransformed_Actrb, tfTransformed_Bctrb,\ tfTransformed_Cctrb, tfTransformed_nctrb,\ tfTransformed_index, tfTransformed_dcoeff,\ tfTransformed_ucoeff = tb04ad( ssTransformed_nr, inputs, outputs, ssTransformed_A, ssTransformed_B, ssTransformed_C, ssTransformed_D, tol1=0.0) # print('size(Trans_A)=',ssTransformed_A.shape) if (verbose): print('===== Transformed SS ==========') print(ss(ssTransformed_A, ssTransformed_B, ssTransformed_C, ssTransformed_D)) # print('Trans_nr=',ssTransformed_nr # print('tfOrig_index=',tfOriginal_index) # print('tfOrig_ucoeff=',tfOriginal_ucoeff) # print('tfOrig_dcoeff=',tfOriginal_dcoeff) # print('tfTrans_index=',tfTransformed_index) # print('tfTrans_ucoeff=',tfTransformed_ucoeff) # print('tfTrans_dcoeff=',tfTransformed_dcoeff) # Compare the TF directly, must match # numerators # TODO test failing! # np.testing.assert_array_almost_equal( # tfOriginal_ucoeff, tfTransformed_ucoeff, decimal=3) # denominators # np.testing.assert_array_almost_equal( # tfOriginal_dcoeff, tfTransformed_dcoeff, decimal=3) @pytest.mark.parametrize("testNum", np.arange(numTests) + 1) @pytest.mark.parametrize("inputs", np.arange(1) + 1) # SISO only @pytest.mark.parametrize("outputs", np.arange(1) + 1) # SISO only @pytest.mark.parametrize("states", np.arange(maxStates) + 1) def testFreqResp(self, states, outputs, inputs, testNum, verbose): """Compare bode responses. Compare the bode reponses of the SS systems and TF systems to the original SS. They generally are different realizations but have same freq resp. Currently this test may only be applied to SISO systems. """ from slycot import tb04ad, td04ad ssOriginal = rss(states, outputs, inputs) tfOriginal_Actrb, tfOriginal_Bctrb, tfOriginal_Cctrb,\ tfOrigingal_nctrb, tfOriginal_index,\ tfOriginal_dcoeff, tfOriginal_ucoeff = tb04ad( states, inputs, outputs, ssOriginal.A, ssOriginal.B, ssOriginal.C, ssOriginal.D, tol1=0.0) ssTransformed_nr, ssTransformed_A, ssTransformed_B,\ ssTransformed_C, ssTransformed_D\ = td04ad('R', inputs, outputs, tfOriginal_index, tfOriginal_dcoeff, tfOriginal_ucoeff, tol=0.0) tfTransformed_Actrb, tfTransformed_Bctrb,\ tfTransformed_Cctrb, tfTransformed_nctrb,\ tfTransformed_index, tfTransformed_dcoeff,\ tfTransformed_ucoeff = tb04ad( ssTransformed_nr, inputs, outputs, ssTransformed_A, ssTransformed_B, ssTransformed_C, ssTransformed_D, tol1=0.0) numTransformed = np.array(tfTransformed_ucoeff) denTransformed = np.array(tfTransformed_dcoeff) numOriginal = np.array(tfOriginal_ucoeff) denOriginal = np.array(tfOriginal_dcoeff) ssTransformed = ss(ssTransformed_A, ssTransformed_B, ssTransformed_C, ssTransformed_D) for inputNum in range(inputs): for outputNum in range(outputs): [ssOriginalMag, ssOriginalPhase, freq] =\ bode(ssOriginal, plot=False) [tfOriginalMag, tfOriginalPhase, freq] =\ bode(tf(numOriginal[outputNum][inputNum], denOriginal[outputNum]), plot=False) [ssTransformedMag, ssTransformedPhase, freq] =\ bode(ssTransformed, freq, plot=False) [tfTransformedMag, tfTransformedPhase, freq] =\ bode(tf(numTransformed[outputNum][inputNum], denTransformed[outputNum]), freq, plot=False) # print('numOrig=', # numOriginal[outputNum][inputNum]) # print('denOrig=', # denOriginal[outputNum]) # print('numTrans=', # numTransformed[outputNum][inputNum]) # print('denTrans=', # denTransformed[outputNum]) np.testing.assert_array_almost_equal( ssOriginalMag, tfOriginalMag, decimal=3) np.testing.assert_array_almost_equal( ssOriginalPhase, tfOriginalPhase, decimal=3) np.testing.assert_array_almost_equal( ssOriginalMag, ssTransformedMag, decimal=3) np.testing.assert_array_almost_equal( ssOriginalPhase, ssTransformedPhase, decimal=3) np.testing.assert_array_almost_equal( tfOriginalMag, tfTransformedMag, decimal=3) np.testing.assert_array_almost_equal( tfOriginalPhase, tfTransformedPhase, decimal=2)
{ "content_hash": "86089b209d0e0533e03ed52c1c6d60ed", "timestamp": "", "source": "github", "line_count": 213, "max_line_length": 76, "avg_line_length": 42.83568075117371, "alnum_prop": 0.60861464270057, "repo_name": "murrayrm/python-control", "id": "edd355b3bc4a9a1cd48a3b8e85de67007ef69c95", "size": "9124", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "control/tests/slycot_convert_test.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "126" }, { "name": "Python", "bytes": "1562387" } ], "symlink_target": "" }
import optparse import os import time import unittest from webkitpy.common.system import executive_mock from webkitpy.common.system.executive_mock import MockExecutive2 from webkitpy.common.system.systemhost_mock import MockSystemHost from webkitpy.layout_tests.port import android from webkitpy.layout_tests.port import port_testcase from webkitpy.layout_tests.port import driver from webkitpy.layout_tests.port import driver_unittest from webkitpy.tool.mocktool import MockOptions # Type of tombstone test which the mocked Android Debug Bridge should execute. VALID_TOMBSTONE_TEST_TYPE = 0 NO_FILES_TOMBSTONE_TEST_TYPE = 1 NO_PERMISSION_TOMBSTONE_TEST_TYPE = 2 INVALID_ENTRY_TOMBSTONE_TEST_TYPE = 3 INVALID_ENTRIES_TOMBSTONE_TEST_TYPE = 4 # Any "adb" commands will be interpret by this class instead of executing actual # commands on the file system, which we don't want to do. class MockAndroidDebugBridge: def __init__(self, device_count): self._device_count = device_count self._last_command = None self._tombstone_output = None # Local public methods. def run_command(self, args): self._last_command = ' '.join(args) if args[0].startswith('path'): if args[0] == 'path1': return '' if args[0] == 'path2': return 'version 1.1' return 'version 1.0' if args[0] == 'adb': if len(args) > 1 and args[1] == 'version': return 'version 1.0' if len(args) > 1 and args[1] == 'devices': return self._get_device_output() if len(args) > 3 and args[3] == 'command': return 'mockoutput' if len(args) > 3 and args[3] == 'install': return 'Success' if len(args) > 3 and args[3] in ('push', 'wait-for-device'): return 'mockoutput' if len(args) > 5 and args[5] == 'battery': return 'level: 99' if len(args) > 5 and args[5] == 'force-stop': return 'mockoutput' if len(args) > 5 and args[5] == 'power': return 'mScreenOn=true' if len(args) > 5 and args[4] == 'cat' and args[5].find('tombstone') != -1: return 'tombstone content' if len(args) > 6 and args[4] == 'ls' and args[6].find('tombstone') != -1: assert self._tombstone_output, 'Tombstone output needs to have been set by the test.' return self._tombstone_output return '' def last_command(self): return self._last_command def set_tombstone_output(self, output): self._tombstone_output = output # Local private methods. def _get_device_output(self): serials = ['123456789ABCDEF0', '123456789ABCDEF1', '123456789ABCDEF2', '123456789ABCDEF3', '123456789ABCDEF4', '123456789ABCDEF5'] output = 'List of devices attached\n' for serial in serials[:self._device_count]: output += '%s\tdevice\n' % serial return output class AndroidCommandsTest(unittest.TestCase): def setUp(self): android.AndroidCommands._adb_command_path = None android.AndroidCommands._adb_command_path_options = ['adb'] def make_executive(self, device_count): self._mock_executive = MockAndroidDebugBridge(device_count) return MockExecutive2(run_command_fn=self._mock_executive.run_command) def make_android_commands(self, device_count, serial): return android.AndroidCommands(self.make_executive(device_count), serial, debug_logging=False) # The used adb command should include the device's serial number, and get_serial() should reflect this. def test_adb_command_and_get_serial(self): android_commands = self.make_android_commands(1, '123456789ABCDEF0') self.assertEquals(['adb', '-s', '123456789ABCDEF0'], android_commands.adb_command()) self.assertEquals('123456789ABCDEF0', android_commands.get_serial()) # Running an adb command should return the command's output. def test_run_command(self): android_commands = self.make_android_commands(1, '123456789ABCDEF0') output = android_commands.run(['command']) self.assertEquals('adb -s 123456789ABCDEF0 command', self._mock_executive.last_command()) self.assertEquals('mockoutput', output) # Test that the convenience methods create the expected commands. def test_convenience_methods(self): android_commands = self.make_android_commands(1, '123456789ABCDEF0') android_commands.file_exists('/some_directory') self.assertEquals('adb -s 123456789ABCDEF0 shell ls -d /some_directory', self._mock_executive.last_command()) android_commands.push('foo', 'bar') self.assertEquals('adb -s 123456789ABCDEF0 push %s bar' % os.path.realpath('foo'), self._mock_executive.last_command()) android_commands.pull('bar', 'foo') self.assertEquals('adb -s 123456789ABCDEF0 pull bar foo', self._mock_executive.last_command()) class AndroidPortTest(port_testcase.PortTestCase): port_name = 'android' port_maker = android.AndroidPort def make_port(self, **kwargs): port = super(AndroidPortTest, self).make_port(**kwargs) port._mock_adb = MockAndroidDebugBridge(kwargs.get('device_count', 1)) port._executive = MockExecutive2(run_command_fn=port._mock_adb.run_command) return port def test_check_build(self): host = MockSystemHost() port = self.make_port(host=host, options=MockOptions(child_processes=1)) host.filesystem.exists = lambda p: True port.check_build(needs_http=True, printer=port_testcase.FakePrinter()) def test_check_sys_deps(self): # FIXME: Do something useful here, but testing the full logic would be hard. pass def make_wdiff_available(self, port): port._wdiff_available = True port._host_port._wdiff_available = True # Test that content_shell currently is the only supported driver. def test_non_content_shell_driver(self): self.assertRaises(self.make_port, options=optparse.Values({'driver_name': 'foobar'})) # Test that the number of child processes to create depends on the devices. def test_default_child_processes(self): port_default = self.make_port(device_count=5) port_fixed_device = self.make_port(device_count=5, options=optparse.Values({'adb_device': '123456789ABCDEF9'})) self.assertEquals(5, port_default.default_child_processes()) self.assertEquals(1, port_fixed_device.default_child_processes()) # Test that an HTTP server indeed is required by Android (as we serve all tests over them) def test_requires_http_server(self): self.assertTrue(self.make_port(device_count=1).requires_http_server()) # Tests the default timeouts for Android, which are different than the rest of Chromium. def test_default_timeout_ms(self): self.assertEqual(self.make_port(options=optparse.Values({'configuration': 'Release'})).default_timeout_ms(), 10000) self.assertEqual(self.make_port(options=optparse.Values({'configuration': 'Debug'})).default_timeout_ms(), 10000) class ChromiumAndroidDriverTest(unittest.TestCase): def setUp(self): self._mock_adb = MockAndroidDebugBridge(1) self._mock_executive = MockExecutive2(run_command_fn=self._mock_adb.run_command) android_commands = android.AndroidCommands(self._mock_executive, '123456789ABCDEF0', debug_logging=False) self._port = android.AndroidPort(MockSystemHost(executive=self._mock_executive), 'android') self._driver = android.ChromiumAndroidDriver(self._port, worker_number=0, pixel_tests=True, driver_details=android.ContentShellDriverDetails(), android_devices=self._port._devices) # The cmd_line() method in the Android port is used for starting a shell, not the test runner. def test_cmd_line(self): self.assertEquals(['adb', '-s', '123456789ABCDEF0', 'shell'], self._driver.cmd_line(False, [])) # Test that the Chromium Android port can interpret Android's shell output. def test_read_prompt(self): self._driver._server_process = driver_unittest.MockServerProcess(lines=['root@android:/ # ']) self.assertIsNone(self._driver._read_prompt(time.time() + 1)) self._driver._server_process = driver_unittest.MockServerProcess(lines=['$ ']) self.assertIsNone(self._driver._read_prompt(time.time() + 1)) class ChromiumAndroidDriverTwoDriversTest(unittest.TestCase): # Test two drivers getting the right serial numbers, and that we disregard per-test arguments. def test_two_drivers(self): mock_adb = MockAndroidDebugBridge(2) mock_executive = MockExecutive2(run_command_fn=mock_adb.run_command) port = android.AndroidPort(MockSystemHost(executive=mock_executive), 'android') driver0 = android.ChromiumAndroidDriver(port, worker_number=0, pixel_tests=True, driver_details=android.ContentShellDriverDetails(), android_devices=port._devices) driver1 = android.ChromiumAndroidDriver(port, worker_number=1, pixel_tests=True, driver_details=android.ContentShellDriverDetails(), android_devices=port._devices) self.assertEqual(['adb', '-s', '123456789ABCDEF0', 'shell'], driver0.cmd_line(True, [])) self.assertEqual(['adb', '-s', '123456789ABCDEF1', 'shell'], driver1.cmd_line(True, ['anything'])) class ChromiumAndroidTwoPortsTest(unittest.TestCase): # Test that the driver's command line indeed goes through to the driver. def test_options_with_two_ports(self): mock_adb = MockAndroidDebugBridge(2) mock_executive = MockExecutive2(run_command_fn=mock_adb.run_command) port0 = android.AndroidPort(MockSystemHost(executive=mock_executive), 'android', options=MockOptions(additional_driver_flag=['--foo=bar'])) port1 = android.AndroidPort(MockSystemHost(executive=mock_executive), 'android', options=MockOptions(driver_name='content_shell')) self.assertEqual(1, port0.driver_cmd_line().count('--foo=bar')) self.assertEqual(0, port1.driver_cmd_line().count('--create-stdin-fifo')) class ChromiumAndroidDriverTombstoneTest(unittest.TestCase): EXPECTED_STACKTRACE = '-rw------- 1000 1000 3604 2013-11-19 16:16 tombstone_10\ntombstone content' def setUp(self): self._mock_adb = MockAndroidDebugBridge(1) self._mock_executive = MockExecutive2(run_command_fn=self._mock_adb.run_command) self._port = android.AndroidPort(MockSystemHost(executive=self._mock_executive), 'android') self._driver = android.ChromiumAndroidDriver(self._port, worker_number=0, pixel_tests=True, driver_details=android.ContentShellDriverDetails(), android_devices=self._port._devices) self._errors = [] self._driver._log_error = lambda msg: self._errors.append(msg) self._warnings = [] self._driver._log_warning = lambda msg: self._warnings.append(msg) # Tests that we return an empty string and log an error when no tombstones could be found. def test_no_tombstones_found(self): self._mock_adb.set_tombstone_output('/data/tombstones/tombstone_*: No such file or directory') stacktrace = self._driver._get_last_stacktrace() self.assertEqual(1, len(self._errors)) self.assertEqual('The driver crashed, but no tombstone found!', self._errors[0]) self.assertEqual('', stacktrace) # Tests that an empty string will be returned if we cannot read the tombstone files. def test_insufficient_tombstone_permission(self): self._mock_adb.set_tombstone_output('/data/tombstones/tombstone_*: Permission denied') stacktrace = self._driver._get_last_stacktrace() self.assertEqual(1, len(self._errors)) self.assertEqual('The driver crashed, but we could not read the tombstones!', self._errors[0]) self.assertEqual('', stacktrace) # Tests that invalid "ls" output will throw a warning when listing the tombstone files. def test_invalid_tombstone_list_entry_format(self): self._mock_adb.set_tombstone_output('-rw------- 1000 1000 3604 2013-11-19 16:15 tombstone_00\n' + '-- invalid entry --\n' + '-rw------- 1000 1000 3604 2013-11-19 16:16 tombstone_10') stacktrace = self._driver._get_last_stacktrace() self.assertEqual(1, len(self._warnings)) self.assertEqual(ChromiumAndroidDriverTombstoneTest.EXPECTED_STACKTRACE, stacktrace) # Tests the case in which we can't find any valid tombstone entries at all. The tombstone # output used for the mock misses the permission part. def test_invalid_tombstone_list(self): self._mock_adb.set_tombstone_output('1000 1000 3604 2013-11-19 16:15 tombstone_00\n' + '1000 1000 3604 2013-11-19 16:15 tombstone_01\n' + '1000 1000 3604 2013-11-19 16:15 tombstone_02') stacktrace = self._driver._get_last_stacktrace() self.assertEqual(3, len(self._warnings)) self.assertEqual(1, len(self._errors)) self.assertEqual('The driver crashed, but we could not find any valid tombstone!', self._errors[0]) self.assertEqual('', stacktrace) # Tests that valid tombstone listings will return the contents of the most recent file. def test_read_valid_tombstone_file(self): self._mock_adb.set_tombstone_output('-rw------- 1000 1000 3604 2013-11-19 16:15 tombstone_00\n' + '-rw------- 1000 1000 3604 2013-11-19 16:16 tombstone_10\n' + '-rw------- 1000 1000 3604 2013-11-19 16:15 tombstone_02') stacktrace = self._driver._get_last_stacktrace() self.assertEqual(0, len(self._warnings)) self.assertEqual(0, len(self._errors)) self.assertEqual(ChromiumAndroidDriverTombstoneTest.EXPECTED_STACKTRACE, stacktrace)
{ "content_hash": "6023ce7b786d5f2a36b6bf8bbd3653af", "timestamp": "", "source": "github", "line_count": 299, "max_line_length": 159, "avg_line_length": 48.39799331103679, "alnum_prop": 0.6548268951696496, "repo_name": "heke123/chromium-crosswalk", "id": "42a052d9b34b1674d5c3050e2ce2d928506aeb06", "size": "15998", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/android_unittest.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
{ 'name': 'Audit Trail', 'version': '1.0', 'category': 'Tools', 'description': """ This module lets administrator track every user operation on all the objects of the system. =========================================================================================== The administrator can subscribe to rules for read, write and delete on objects and can check logs. """, 'author': 'OpenERP SA', 'website': 'http://www.openerp.com', 'depends': ['base'], 'data': [ 'wizard/audittrail_view_log_view.xml', 'audittrail_view.xml', 'security/ir.model.access.csv', ], 'demo': ['audittrail_demo.xml'], 'installable': True, 'auto_install': False, 'images': ['images/audittrail1.jpeg','images/audittrail2.jpeg','images/audittrail3.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
{ "content_hash": "7bead34e680b948f1cc2cc803cdcdb98", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 94, "avg_line_length": 35.12, "alnum_prop": 0.5728929384965832, "repo_name": "ntiufalara/openerp7", "id": "41958c84f66f96f68e8ff7ea93e3fbdc7e59970a", "size": "1858", "binary": false, "copies": "55", "ref": "refs/heads/master", "path": "openerp/addons/audittrail/__openerp__.py", "mode": "33261", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "9611" }, { "name": "C#", "bytes": "93691" }, { "name": "C++", "bytes": "108790" }, { "name": "CSS", "bytes": "583265" }, { "name": "Groff", "bytes": "8138" }, { "name": "HTML", "bytes": "125159" }, { "name": "JavaScript", "bytes": "5109152" }, { "name": "Makefile", "bytes": "14036" }, { "name": "NSIS", "bytes": "14114" }, { "name": "PHP", "bytes": "14033" }, { "name": "Python", "bytes": "9373763" }, { "name": "Ruby", "bytes": "220" }, { "name": "Shell", "bytes": "6430" }, { "name": "XSLT", "bytes": "156761" } ], "symlink_target": "" }
import logging import _socket import types import os from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, ENOTCONN, ESHUTDOWN, EINTR, EISCONN, ENOENT, EAGAIN import _io from concurrence import Tasklet, FileDescriptorEvent, TIMEOUT_CURRENT from concurrence.io import IOStream DEFAULT_BACKLOG = 512 XMOD = 8 _interceptor = None class Socket(IOStream): log = logging.getLogger('Socket') __slots__ = ['socket', 'fd', '_readable', '_writable', 'state'] STATE_INIT = 0 STATE_LISTENING = 1 STATE_CONNECTING = 2 STATE_CONNECTED = 3 STATE_CLOSING = 4 STATE_CLOSED = 5 _x = 0 def __init__(self, socket, state = STATE_INIT): """don't call directly pls use one of the provided classmethod to create a socket""" self.socket = socket if _socket.AF_INET == socket.family: #always set the nodelay option on tcp sockets. This turns off the Nagle algorithm #we don't need this because in concurrence we are always buffering ourselves #before sending out data, so no need to let the tcp stack do it again and possibly delay #sending try: self.socket.setsockopt(_socket.IPPROTO_TCP, _socket.TCP_NODELAY, 1) except: self.log.warn("could not set TCP_NODELAY") #concurrence sockets are always non-blocking, this is the whole idea :-) : self.socket.setblocking(0) self.fd = self.socket.fileno() self._readable = None #will be created lazily self._writable = None #will be created lazily self.state = state @classmethod def set_interceptor(cls, interceptor): global _interceptor _interceptor = interceptor @classmethod def from_address(cls, addr): """Creates a new socket from the given address. If the addr is a tuple (host, port) a normal tcp socket is assumed. if addr is a string, a UNIX Domain socket is assumed""" if _interceptor is not None: return _interceptor(addr) elif type(addr) == types.StringType: return cls(_socket.socket(_socket.AF_UNIX, _socket.SOCK_STREAM)) else: return cls(_socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)) @classmethod def new(cls): return cls(_socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)) @classmethod def server(cls, addr, backlog = DEFAULT_BACKLOG, reuse_address = True): s = cls.from_address(addr) s.set_reuse_address(reuse_address) s.bind(addr) s.listen(backlog) return s @classmethod def connect(cls, addr, timeout = TIMEOUT_CURRENT): """creates a new socket and connects it to the given address. returns the connected socket""" socket = cls.from_address(addr) socket._connect(addr, timeout) return socket @classmethod def from_file_descriptor(cls, fd, socket_family = _socket.AF_UNIX, socket_type = _socket.SOCK_STREAM, socket_state = STATE_INIT): return cls(_socket.fromfd(fd, socket_family, socket_type), socket_state) def _get_readable(self): if self._readable is None: self._readable = FileDescriptorEvent(self.fd, 'r') return self._readable def _set_readable(self, readable): self._readable = readable readable = property(_get_readable, _set_readable) def _get_writable(self): if self._writable is None: self._writable = FileDescriptorEvent(self.fd, 'w') return self._writable def _set_writable(self, writable): self._writable = writable writable = property(_get_writable, _set_writable) def fileno(self): return self.fd def set_reuse_address(self, reuse_address): self.socket.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, int(reuse_address)) def set_send_buffer_size(self, n): self.socket.setsockopt(_socket.SOL_SOCKET, _socket.SO_SNDBUF, n) def set_recv_buffer_size(self, n): self.socket.setsockopt(_socket.SOL_SOCKET, _socket.SO_RCVBUF, n) def bind(self, addr): self.socket.bind(addr) def listen(self, backlog = DEFAULT_BACKLOG): self.socket.listen(backlog) self.state = self.STATE_LISTENING def accept(self): """waits on a listening socket, returns a new socket_class instance for the incoming connection""" assert self.state == self.STATE_LISTENING, "make sure socket is listening before calling accept" while True: #we need a loop because sometimes we become readable and still not a valid #connection was accepted, in which case we return here and wait some more. self.readable.wait() try: s, _ = self.socket.accept() except _socket.error, (errno, _): if errno in [EAGAIN, EWOULDBLOCK]: #this can happen when more than one process received readability on the same socket (forked/cloned/dupped) #in that case 1 process will do the accept, the others receive this error, and should continue waiting for #readability continue else: raise return self.__class__(s, self.STATE_CONNECTED) def accept_iter(self): while True: try: yield self.accept() except Exception: self.log.exception("in accept_iter") Tasklet.sleep(1.0) #prevent hogging def _connect(self, addr, timeout = TIMEOUT_CURRENT): assert self.state == self.STATE_INIT, "make sure socket is not already connected or closed" try: err = self.socket.connect_ex(addr) serr = self.socket.getsockopt(_socket.SOL_SOCKET, _socket.SO_ERROR) except: self.log.exception("unexpected exception thrown by connect_ex") raise if err == 0 and serr == 0: self.state = self.STATE_CONNECTED elif err == EINPROGRESS and serr != 0: raise IOError(serr, os.strerror(serr)) elif err == EINPROGRESS and serr == 0: self.state = self.STATE_CONNECTING try: self.writable.wait(timeout = timeout) self.state = self.STATE_CONNECTED except: self.state = self.STATE_INIT raise else: #some other error, #unix domain socket that does not exist, Cannot assign requested address etc etc raise _io.error_from_errno(IOError) def write(self, buffer, timeout = TIMEOUT_CURRENT, assume_writable = True): """Writes as many bytes as possible from the given buffer to this socket. The buffer position is updated according to the number of bytes succesfully written to the socket. This method returns the total number of bytes written. This method could possible write 0 bytes""" assert self.state == self.STATE_CONNECTED, "socket must be connected in order to write to it" Socket._x += 1 if Socket._x % XMOD == 0: assume_writable = False #by default assume that we can write to the socket without blocking if assume_writable: bytes_written, _ = buffer.send(self.fd) #write to fd from buffer if bytes_written < 0 and _io.get_errno() == EAGAIN: #nope, need to wait before sending our data assume_writable = False #else if error != EAGAIN, assume_writable will stay True, and we fall trough and raise error below #if we cannot assume write-ability we will wait until data can be written again if not assume_writable: self.writable.wait(timeout = timeout) bytes_written, _ = buffer.send(self.fd) #write to fd from buffer #print 'bw', bytes_written, buffer.capacity # if bytes_written < 0: raise _io.error_from_errno(IOError) else: return bytes_written def read(self, buffer, timeout = TIMEOUT_CURRENT, assume_readable = True): """Reads as many bytes as possible the socket into the given buffer. The buffer position is updated according to the number of bytes read from the socket. This method could possible read 0 bytes. The method returns the total number of bytes read""" assert self.state == self.STATE_CONNECTED, "socket must be connected in order to read from it" Socket._x += 1 if Socket._x % XMOD == 0: assume_readable = False #by default assume that we can read from the socket without blocking if assume_readable: bytes_read, _ = buffer.recv(self.fd) #read from fd to if bytes_read < 0 and _io.get_errno() == EAGAIN: #nope, need to wait before reading our data assume_readable = False #else if error != EAGAIN, assume_readable will stay True, and we fall trough and raise error below #if we cannot assume readability we will wait until data can be read again if not assume_readable: self.readable.wait(timeout = timeout) bytes_read, _ = buffer.recv(self.fd) #read from fd to #print 'br', bytes_read, buffer.capacity # if bytes_read < 0: raise _io.error_from_errno(IOError) else: return bytes_read def write_socket(self, socket, timeout = TIMEOUT_CURRENT): """writes a socket trough this socket""" self.writable.wait(timeout = timeout) _io.msgsendfd(self.fd, socket.fd) def read_socket(self, socket_class = None, socket_family = _socket.AF_INET, socket_type = _socket.SOCK_STREAM, socket_state = STATE_INIT, timeout = TIMEOUT_CURRENT): """reads a socket from this socket""" self.readable.wait(timeout = timeout) fd = _io.msgrecvfd(self.fd) return (socket_class or self.__class__).from_file_descriptor(fd, socket_family, socket_type, socket_state) def is_closed(self): return self.state == self.STATE_CLOSED def close(self): assert self.state in [self.STATE_CONNECTED, self.STATE_LISTENING] self.state = self.STATE_CLOSING if self._readable is not None: self._readable.close() if self._writable is not None: self._writable.close() self.socket.close() del self.socket del self._readable del self._writable self.state = self.STATE_CLOSED class SocketServer(object): log = logging.getLogger('SocketServer') def __init__(self, endpoint, handler = None): self._addr = None self._socket = None if isinstance(endpoint, Socket): self._socket = endpoint else: self._addr = endpoint self._handler = handler self._reuseaddress = True self._handler_task_name = 'socket_handler' self._accept_task = None self._accept_task_name = 'socket_acceptor' @property def socket(self): return self._socket def _handle_accept(self, accepted_socket): result = None try: result = self._handler(accepted_socket) except TaskletExit: raise except: self.log.exception("unhandled exception in socket handler") finally: if result is None and not accepted_socket.is_closed(): try: accepted_socket.close() except TaskletExit: raise except: self.log.exception("unhandled exception while forcefully closing client") def _create_socket(self): if self._socket is None: if self._addr is None: assert False, "address must be set or accepting socket must be explicitly set" self._socket = Socket.from_address(self._addr) self._socket.set_reuse_address(self._reuseaddress) return self._socket def _accept_task_loop(self): accepted_socket = self._socket.accept() Tasklet.new(self._handle_accept, self._handler_task_name)(accepted_socket) def bind(self): """creates socket if needed, and binds it""" socket = self._create_socket() socket.bind(self._addr) def listen(self, backlog = DEFAULT_BACKLOG): """creates socket if needed, and listens it""" socket = self._create_socket() socket.listen(backlog) def serve(self): """listens and starts a new tasks accepting incoming connections on the configured address""" if self._socket is None: self.bind() self.listen() if not callable(self._handler): assert False, "handler not set or not callable" self._accept_task = Tasklet.loop(self._accept_task_loop, name = self._accept_task_name, daemon = True)() def close(self): self._accept_task.kill() self._socket.close()
{ "content_hash": "85b906bb3bdbc40734ca77ae3b794c37", "timestamp": "", "source": "github", "line_count": 349, "max_line_length": 170, "avg_line_length": 37.730659025787965, "alnum_prop": 0.6130012150668287, "repo_name": "toymachine/concurrence", "id": "d03b7dc66f45841926a858b2ccfa911f7e1e1240", "size": "13364", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/concurrence/io/socket.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "42241" }, { "name": "JavaScript", "bytes": "19227" }, { "name": "Python", "bytes": "408521" }, { "name": "Shell", "bytes": "45" } ], "symlink_target": "" }
import os.path def full_path(fname): dirpath = os.path.dirname(os.path.realpath(__file__)) return os.path.join(dirpath, fname) def load(fname): return open(full_path(fname)).read()
{ "content_hash": "3a5486409e67e910f30f9bc67dcf5a64", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 57, "avg_line_length": 19.7, "alnum_prop": 0.6751269035532995, "repo_name": "lukleh/Tiny-Stackbased-Virtual-Machine-in-Python", "id": "ec229d614179682270f1cde115a1f554c3bd2491", "size": "198", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/fixtures/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "89628" }, { "name": "Shell", "bytes": "171" } ], "symlink_target": "" }
from django.core.exceptions import ImproperlyConfigured from django.template import Origin, Template, Context, TemplateDoesNotExist, add_to_builtins from django.utils.importlib import import_module from django.conf import settings template_source_loaders = None class LoaderOrigin(Origin): def __init__(self, display_name, loader, name, dirs): super(LoaderOrigin, self).__init__(display_name) self.loader, self.loadname, self.dirs = loader, name, dirs def reload(self): return self.loader(self.loadname, self.dirs)[0] def make_origin(display_name, loader, name, dirs): if settings.TEMPLATE_DEBUG: return LoaderOrigin(display_name, loader, name, dirs) else: return None def find_template_source(name, dirs=None): # Calculate template_source_loaders the first time the function is executed # because putting this logic in the module-level namespace may cause # circular import errors. See Django ticket #1292. global template_source_loaders if template_source_loaders is None: loaders = [] for path in settings.TEMPLATE_LOADERS: i = path.rfind('.') module, attr = path[:i], path[i+1:] try: mod = import_module(module) except ImportError, e: raise ImproperlyConfigured, 'Error importing template source loader %s: "%s"' % (module, e) try: func = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured, 'Module "%s" does not define a "%s" callable template source loader' % (module, attr) if not func.is_usable: import warnings warnings.warn("Your TEMPLATE_LOADERS setting includes %r, but your Python installation doesn't support that type of template loading. Consider removing that line from TEMPLATE_LOADERS." % path) else: loaders.append(func) template_source_loaders = tuple(loaders) for loader in template_source_loaders: try: source, display_name = loader(name, dirs) return (source, make_origin(display_name, loader, name, dirs)) except TemplateDoesNotExist: pass raise TemplateDoesNotExist, name def get_template(template_name): """ Returns a compiled Template object for the given template name, handling template inheritance recursively. """ source, origin = find_template_source(template_name) template = get_template_from_string(source, origin, template_name) return template def get_template_from_string(source, origin=None, name=None): """ Returns a compiled Template object for the given template code, handling template inheritance recursively. """ return Template(source, origin, name) def render_to_string(template_name, dictionary=None, context_instance=None): """ Loads the given template_name and renders it with the given dictionary as context. The template_name may be a string to load a single template using get_template, or it may be a tuple to use select_template to find one of the templates in the list. Returns a string. """ dictionary = dictionary or {} if isinstance(template_name, (list, tuple)): t = select_template(template_name) else: t = get_template(template_name) if context_instance: context_instance.update(dictionary) else: context_instance = Context(dictionary) return t.render(context_instance) def select_template(template_name_list): "Given a list of template names, returns the first that can be loaded." for template_name in template_name_list: try: return get_template(template_name) except TemplateDoesNotExist: continue # If we get here, none of the templates could be loaded raise TemplateDoesNotExist, ', '.join(template_name_list) add_to_builtins('django.template.loader_tags')
{ "content_hash": "71a24f95e494ef4490db80c7bf7e2476", "timestamp": "", "source": "github", "line_count": 99, "max_line_length": 209, "avg_line_length": 41.474747474747474, "alnum_prop": 0.6573307355090112, "repo_name": "greggian/TapdIn", "id": "127d857e2fa2ecdebc76afd07d6810a346e8d379", "size": "5115", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django/template/loader.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "82525" }, { "name": "Python", "bytes": "3585862" }, { "name": "Shell", "bytes": "227" } ], "symlink_target": "" }
from allauth.account.signals import password_reset from django.core.signals import request_finished from django.dispatch import receiver @receiver(request_finished) def set_request_finished(sender, **kwargs): print("Request finished!") @receiver(password_reset) def set_password_reset(sender, **kwargs): print("Password has been reset!")
{ "content_hash": "4732de2971333e8c0ff88796469e9e50", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 50, "avg_line_length": 26.923076923076923, "alnum_prop": 0.7742857142857142, "repo_name": "groundupnews/gu", "id": "82da6c6c718c9c8e233b19b1af15ac53f7e90d06", "size": "350", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "newsroom/signals.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "222991" }, { "name": "HTML", "bytes": "563742" }, { "name": "JavaScript", "bytes": "790912" }, { "name": "PHP", "bytes": "2275" }, { "name": "Python", "bytes": "598998" }, { "name": "Roff", "bytes": "888" }, { "name": "Shell", "bytes": "803" }, { "name": "XSLT", "bytes": "870" } ], "symlink_target": "" }
""" Default key bindings.:: registry = load_key_bindings() app = Application(key_bindings_registry=registry) """ from __future__ import unicode_literals from prompt_toolkit.key_binding.registry import ConditionalRegistry, MergedRegistry from prompt_toolkit.key_binding.bindings.basic import load_basic_bindings, load_abort_and_exit_bindings, load_basic_system_bindings, load_auto_suggestion_bindings, load_mouse_bindings from prompt_toolkit.key_binding.bindings.emacs import load_emacs_bindings, load_emacs_system_bindings, load_emacs_search_bindings, load_emacs_open_in_editor_bindings, load_extra_emacs_page_navigation_bindings from prompt_toolkit.key_binding.bindings.vi import load_vi_bindings, load_vi_system_bindings, load_vi_search_bindings, load_vi_open_in_editor_bindings, load_extra_vi_page_navigation_bindings from prompt_toolkit.filters import to_cli_filter __all__ = ( 'load_key_bindings', 'load_key_bindings_for_prompt', ) def load_key_bindings( get_search_state=None, enable_abort_and_exit_bindings=False, enable_system_bindings=False, enable_search=False, enable_open_in_editor=False, enable_extra_page_navigation=False, enable_auto_suggest_bindings=False): """ Create a Registry object that contains the default key bindings. :param enable_abort_and_exit_bindings: Filter to enable Ctrl-C and Ctrl-D. :param enable_system_bindings: Filter to enable the system bindings (meta-! prompt and Control-Z suspension.) :param enable_search: Filter to enable the search bindings. :param enable_open_in_editor: Filter to enable open-in-editor. :param enable_open_in_editor: Filter to enable open-in-editor. :param enable_extra_page_navigation: Filter for enabling extra page navigation. (Bindings for up/down scrolling through long pages, like in Emacs or Vi.) :param enable_auto_suggest_bindings: Filter to enable fish-style suggestions. """ assert get_search_state is None or callable(get_search_state) # Accept both Filters and booleans as input. enable_abort_and_exit_bindings = to_cli_filter(enable_abort_and_exit_bindings) enable_system_bindings = to_cli_filter(enable_system_bindings) enable_search = to_cli_filter(enable_search) enable_open_in_editor = to_cli_filter(enable_open_in_editor) enable_extra_page_navigation = to_cli_filter(enable_extra_page_navigation) enable_auto_suggest_bindings = to_cli_filter(enable_auto_suggest_bindings) registry = MergedRegistry([ # Load basic bindings. load_basic_bindings(), load_mouse_bindings(), ConditionalRegistry(load_abort_and_exit_bindings(), enable_abort_and_exit_bindings), ConditionalRegistry(load_basic_system_bindings(), enable_system_bindings), # Load emacs bindings. load_emacs_bindings(), ConditionalRegistry(load_emacs_open_in_editor_bindings(), enable_open_in_editor), ConditionalRegistry(load_emacs_search_bindings(get_search_state=get_search_state), enable_search), ConditionalRegistry(load_emacs_system_bindings(), enable_system_bindings), ConditionalRegistry(load_extra_emacs_page_navigation_bindings(), enable_extra_page_navigation), # Load Vi bindings. load_vi_bindings(get_search_state=get_search_state), ConditionalRegistry(load_vi_open_in_editor_bindings(), enable_open_in_editor), ConditionalRegistry(load_vi_search_bindings(get_search_state=get_search_state), enable_search), ConditionalRegistry(load_vi_system_bindings(), enable_system_bindings), ConditionalRegistry(load_extra_vi_page_navigation_bindings(), enable_extra_page_navigation), # Suggestion bindings. # (This has to come at the end, because the Vi bindings also have an # implementation for the "right arrow", but we really want the # suggestion binding when a suggestion is available.) ConditionalRegistry(load_auto_suggestion_bindings(), enable_auto_suggest_bindings), ]) return registry def load_key_bindings_for_prompt(**kw): """ Create a ``Registry`` object with the defaults key bindings for an input prompt. This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D), incremental search and auto suggestions. (Not for full screen applications.) """ kw.setdefault('enable_abort_and_exit_bindings', True) kw.setdefault('enable_search', True) kw.setdefault('enable_auto_suggest_bindings', True) return load_key_bindings(**kw)
{ "content_hash": "af21f54bb497dccc70c963329d16f0cb", "timestamp": "", "source": "github", "line_count": 119, "max_line_length": 208, "avg_line_length": 41.32773109243698, "alnum_prop": 0.675884505896706, "repo_name": "unnikrishnankgs/va", "id": "fb2c1070f7992bcccba7397cbf1d01b99a0d0adb", "size": "4918", "binary": false, "copies": "17", "ref": "refs/heads/master", "path": "venv/lib/python3.5/site-packages/prompt_toolkit/key_binding/defaults.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "C", "bytes": "1836035" }, { "name": "C++", "bytes": "12002305" }, { "name": "CMake", "bytes": "128" }, { "name": "CSS", "bytes": "64776" }, { "name": "Cuda", "bytes": "78890" }, { "name": "Fortran", "bytes": "8281" }, { "name": "GLSL", "bytes": "976" }, { "name": "HTML", "bytes": "297329" }, { "name": "JavaScript", "bytes": "4313047" }, { "name": "Jupyter Notebook", "bytes": "603900" }, { "name": "Makefile", "bytes": "7573" }, { "name": "Nginx", "bytes": "544" }, { "name": "Objective-C", "bytes": "567" }, { "name": "Protocol Buffer", "bytes": "72897" }, { "name": "PureBasic", "bytes": "134" }, { "name": "Python", "bytes": "51104955" }, { "name": "Shell", "bytes": "71646" }, { "name": "Smarty", "bytes": "28890" }, { "name": "XSLT", "bytes": "152770" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg as linalg_lib from tensorflow.python.ops.linalg import linear_operator_test_util from tensorflow.python.platform import test linalg = linalg_lib rng = np.random.RandomState(0) class SquareLinearOperatorCompositionTest( linear_operator_test_util.SquareLinearOperatorDerivedClassTest): """Most tests done in the base class LinearOperatorDerivedClassTest.""" def setUp(self): # Increase from 1e-6 to 1e-4 and 2e-4. self._atol[dtypes.float32] = 2e-4 self._atol[dtypes.complex64] = 1e-4 self._rtol[dtypes.float32] = 2e-4 self._rtol[dtypes.complex64] = 1e-4 @staticmethod def skip_these_tests(): # Cholesky not implemented. return ["cholesky"] def operator_and_matrix(self, build_info, dtype, use_placeholder): shape = list(build_info.shape) # Either 1 or 2 matrices, depending. num_operators = rng.randint(low=1, high=3) matrices = [ linear_operator_test_util.random_positive_definite_matrix( shape, dtype, force_well_conditioned=True) for _ in range(num_operators) ] lin_op_matrices = matrices if use_placeholder: lin_op_matrices = [ array_ops.placeholder_with_default( matrix, shape=None) for matrix in matrices] operator = linalg.LinearOperatorComposition( [linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices], is_square=True) matmul_order_list = list(reversed(matrices)) mat = matmul_order_list[0] for other_mat in matmul_order_list[1:]: mat = math_ops.matmul(other_mat, mat) return operator, mat def test_is_x_flags(self): # Matrix with two positive eigenvalues, 1, and 1. # The matrix values do not effect auto-setting of the flags. matrix = [[1., 0.], [1., 1.]] operator = linalg.LinearOperatorComposition( [linalg.LinearOperatorFullMatrix(matrix)], is_positive_definite=True, is_non_singular=True, is_self_adjoint=False) self.assertTrue(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) self.assertFalse(operator.is_self_adjoint) def test_is_non_singular_auto_set(self): # Matrix with two positive eigenvalues, 11 and 8. # The matrix values do not effect auto-setting of the flags. matrix = [[11., 0.], [1., 8.]] operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True) operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True) operator = linalg.LinearOperatorComposition( [operator_1, operator_2], is_positive_definite=False, # No reason it HAS to be False... is_non_singular=None) self.assertFalse(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) with self.assertRaisesRegexp(ValueError, "always non-singular"): linalg.LinearOperatorComposition( [operator_1, operator_2], is_non_singular=False) def test_name(self): matrix = [[11., 0.], [1., 8.]] operator_1 = linalg.LinearOperatorFullMatrix(matrix, name="left") operator_2 = linalg.LinearOperatorFullMatrix(matrix, name="right") operator = linalg.LinearOperatorComposition([operator_1, operator_2]) self.assertEqual("left_o_right", operator.name) def test_different_dtypes_raises(self): operators = [ linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)), linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32)) ] with self.assertRaisesRegexp(TypeError, "same dtype"): linalg.LinearOperatorComposition(operators) def test_empty_operators_raises(self): with self.assertRaisesRegexp(ValueError, "non-empty"): linalg.LinearOperatorComposition([]) class NonSquareLinearOperatorCompositionTest( linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest): """Most tests done in the base class LinearOperatorDerivedClassTest.""" def setUp(self): # Increase from 1e-6 to 1e-4 self._atol[dtypes.float32] = 1e-4 self._atol[dtypes.complex64] = 1e-4 self._rtol[dtypes.float32] = 1e-4 self._rtol[dtypes.complex64] = 1e-4 def operator_and_matrix(self, build_info, dtype, use_placeholder): sess = ops.get_default_session() shape = list(build_info.shape) # Test only the case of 2 matrices. # The Square test uses either 1 or 2, so we have tested the case of 1 matrix # sufficiently. num_operators = 2 # Create 2 matrices/operators, A1, A2, which becomes A = A1 A2. # Use inner dimension of 2. k = 2 batch_shape = shape[:-2] shape_1 = batch_shape + [shape[-2], k] shape_2 = batch_shape + [k, shape[-1]] matrices = [ linear_operator_test_util.random_normal( shape_1, dtype=dtype), linear_operator_test_util.random_normal( shape_2, dtype=dtype) ] lin_op_matrices = matrices if use_placeholder: lin_op_matrices = [ array_ops.placeholder_with_default( matrix, shape=None) for matrix in matrices] operator = linalg.LinearOperatorComposition( [linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices]) matmul_order_list = list(reversed(matrices)) mat = matmul_order_list[0] for other_mat in matmul_order_list[1:]: mat = math_ops.matmul(other_mat, mat) return operator, mat @test_util.run_deprecated_v1 def test_static_shapes(self): operators = [ linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 4)), linalg.LinearOperatorFullMatrix(rng.rand(2, 4, 5)) ] operator = linalg.LinearOperatorComposition(operators) self.assertAllEqual((2, 3, 5), operator.shape) @test_util.run_deprecated_v1 def test_shape_tensors_when_statically_available(self): operators = [ linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 4)), linalg.LinearOperatorFullMatrix(rng.rand(2, 4, 5)) ] operator = linalg.LinearOperatorComposition(operators) with self.cached_session(): self.assertAllEqual((2, 3, 5), operator.shape_tensor().eval()) @test_util.run_deprecated_v1 def test_shape_tensors_when_only_dynamically_available(self): mat_1 = rng.rand(1, 2, 3, 4) mat_2 = rng.rand(1, 2, 4, 5) mat_ph_1 = array_ops.placeholder(dtypes.float64) mat_ph_2 = array_ops.placeholder(dtypes.float64) feed_dict = {mat_ph_1: mat_1, mat_ph_2: mat_2} operators = [ linalg.LinearOperatorFullMatrix(mat_ph_1), linalg.LinearOperatorFullMatrix(mat_ph_2) ] operator = linalg.LinearOperatorComposition(operators) with self.cached_session(): self.assertAllEqual( (1, 2, 3, 5), operator.shape_tensor().eval(feed_dict=feed_dict)) if __name__ == "__main__": linear_operator_test_util.add_tests(SquareLinearOperatorCompositionTest) linear_operator_test_util.add_tests(NonSquareLinearOperatorCompositionTest) test.main()
{ "content_hash": "a3de8177ad74a76ed792141003517e4a", "timestamp": "", "source": "github", "line_count": 208, "max_line_length": 80, "avg_line_length": 34.99519230769231, "alnum_prop": 0.6903420799560379, "repo_name": "chemelnucfin/tensorflow", "id": "ba611a450c28c4c958ec8a24f5dd9a1184c1031a", "size": "7969", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "4913" }, { "name": "Batchfile", "bytes": "16146" }, { "name": "C", "bytes": "825231" }, { "name": "C#", "bytes": "8562" }, { "name": "C++", "bytes": "75313939" }, { "name": "CMake", "bytes": "207856" }, { "name": "Dockerfile", "bytes": "80130" }, { "name": "Go", "bytes": "1670422" }, { "name": "HTML", "bytes": "4680032" }, { "name": "Java", "bytes": "881711" }, { "name": "Jupyter Notebook", "bytes": "1113647" }, { "name": "LLVM", "bytes": "6536" }, { "name": "MLIR", "bytes": "853297" }, { "name": "Makefile", "bytes": "109340" }, { "name": "Objective-C", "bytes": "105235" }, { "name": "Objective-C++", "bytes": "258793" }, { "name": "PHP", "bytes": "38007" }, { "name": "Pascal", "bytes": "3741" }, { "name": "Pawn", "bytes": "14380" }, { "name": "Perl", "bytes": "7536" }, { "name": "Python", "bytes": "50825074" }, { "name": "RobotFramework", "bytes": "891" }, { "name": "Ruby", "bytes": "4706" }, { "name": "Shell", "bytes": "532610" }, { "name": "Smarty", "bytes": "31460" }, { "name": "Swift", "bytes": "62814" } ], "symlink_target": "" }
from django.utils.translation import ugettext_lazy as _ import horizon from openstack_dashboard.dashboards.newtouch import dashboard class Server(horizon.Panel): name = _("Server") slug = "server" dashboard.Newtouch.register(Server)
{ "content_hash": "150755dcf0810fc12f94422a0c31c427", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 61, "avg_line_length": 20.5, "alnum_prop": 0.7642276422764228, "repo_name": "zouyapeng/horizon-newtouch", "id": "fde05017921d249656d3921555da143ad6514886", "size": "246", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "openstack_dashboard/dashboards/newtouch/server/panel.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "12134754" }, { "name": "HTML", "bytes": "436727" }, { "name": "JavaScript", "bytes": "785199" }, { "name": "Makefile", "bytes": "588" }, { "name": "Python", "bytes": "4085185" }, { "name": "Shell", "bytes": "17651" } ], "symlink_target": "" }
__version__ = '0.2-devel' from .logger import Logger from .robot_database import RobotDatabase
{ "content_hash": "98caebaa51b0f6e5ca4cc810274ec336", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 41, "avg_line_length": 19.4, "alnum_prop": 0.7422680412371134, "repo_name": "robotframework/DbBot", "id": "66520c936e100df1ce78691c58e36875976fe258", "size": "705", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dbbot/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "31726" } ], "symlink_target": "" }
from gi.repository import Gio settings = Gio.Settings('org.mate.pluma') auto_detected_encodings = ['UTF-8', 'BIG5', 'BIG5-HKSCS', 'EUC-TW', 'GB18030', 'GB2312', 'GBK', 'CURRENT', 'ISO-8859-15', 'UTF-16'] settings.set_strv('auto-detected-encodings', auto_detected_encodings) ## http://foreachsam.github.io/book-util-gsettings/book/content/case/pluma/encodings/ ## https://lazka.github.io/pgi-docs/#Gio-2.0/classes/Settings.html
{ "content_hash": "3e026835edafe6b2926db4c2268af98d", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 131, "avg_line_length": 43.1, "alnum_prop": 0.7192575406032483, "repo_name": "foreachsam/book-lang-python", "id": "73a5504089ab9386b39c3e06e7152c4227026ae3", "size": "455", "binary": false, "copies": "1", "ref": "refs/heads/gh-pages", "path": "example/subject/gi/gsettings/prototype/demo-pluma-encodings/set.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "8073" }, { "name": "HTML", "bytes": "12523" }, { "name": "JavaScript", "bytes": "1242" }, { "name": "Python", "bytes": "197822" }, { "name": "Shell", "bytes": "5208" }, { "name": "XSLT", "bytes": "3118" } ], "symlink_target": "" }
import logging from io import BytesIO import docker from emuvim.api.util.path_utils import get_absolute_path def build_dockerfile_dir(folder, tag): dcli = docker.from_env().api folder = get_absolute_path(folder) build_stream = dcli.build(folder, tag=tag) logging.info('Docker build result:') for line in build_stream: logging.info(line) def suffix_tag_name(tag, suffix): if ":" in tag: return "%s_%s" % (tag, suffix) return "%s:latest_%s" % (tag, suffix) def wrap_debian_like(image): dcli = docker.from_env().api dockerfile = ''' FROM %s RUN apt update -y && apt install -y net-tools iputils-ping iproute ''' % image f = BytesIO(dockerfile.encode('utf-8')) wrapper_name = suffix_tag_name(image, 'containernet_compatible') logging.info('wrapping image: %s->%s' % (image, wrapper_name)) build_stream = dcli.build(fileobj=f, tag=wrapper_name) build_result = [line for line in build_stream] logging.debug('Docker build result:' + '\n'.join(build_result)) return wrapper_name # 172.17.0.1 is the ip of the docker0 interface on the host DOCKER_HOST_IP = '172.17.0.1'
{ "content_hash": "76fbbd78ede2a53e84cba7cc15f4f1f6", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 70, "avg_line_length": 29.075, "alnum_prop": 0.6620808254514188, "repo_name": "mpeuster/son-emu", "id": "39d8a2be37c607184843af3141df5773da530615", "size": "1163", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/emuvim/api/util/docker_utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "1766" }, { "name": "Dockerfile", "bytes": "2767" }, { "name": "HTML", "bytes": "4641" }, { "name": "JavaScript", "bytes": "9195" }, { "name": "Python", "bytes": "832407" }, { "name": "Shell", "bytes": "6423" } ], "symlink_target": "" }
from __future__ import print_function """ CParser.py - C parsing library Copyright 2010 Luke Campagnola Distributed under MIT/X11 license. See license.txt for more infomation. Used for extracting data such as macro definitions, variables, typedefs, and function signatures from C files (preferrably header files). """ import sys, re, os import six __all__ = ['winDefs', 'CParser'] def winDefs(verbose=False, architecture=None): """Convenience function. Returns a parser which loads a selection of windows headers included with CParser. These definitions can either be accessed directly or included before parsing another file like this: windefs = CParser.winDefs() p = CParser.CParser("headerFile.h", copyFrom=windefs) Definitions are pulled from a selection of header files included in Visual Studio (possibly not legal to distribute? Who knows.), some of which have been abridged because they take so long to parse. ============== ================================================================== **Arguments:** *verbose* If true, prints a lot of debugging info. Default is False. *architecture* Specify '32bit' or '64bit' to get an headers parsed as either 32 or 64-bit. If unspecified, we use sys.maxsize to determine whether to interpret headers for 32 or 64 bits. ============== ================================================================== """ headerFiles = ['WinNt.h', 'WinDef.h', 'WinBase.h', 'BaseTsd.h', 'WTypes.h', 'WinUser.h'] if architecture is None: if sys.maxsize > 2**32: architecture = '64bit' else: architecture = '32bit' if verbose: print("Getting winDefs for %s" %architecture) if architecture == '32bit': macros = {'_WIN32': '','_MSC_VER': '800', 'CONST': 'const', 'NO_STRICT': None} cache = 'WinDefs_32bit.cache' elif architecture == '64bit': macros = {'_WIN64': '', 'CONST': 'const', 'NO_STRICT': None} cache = 'WinDefs_64bit.cache' else: raise Exception("Not sure how to return headers for '%s' architecture; valid arguments are '32bit', '64bit', or None." % architecture) d = os.path.dirname(__file__) p = CParser( [os.path.join(d, 'headers', h) for h in headerFiles], types={'__int64': ('long long')}, macros=macros, processAll=False ) p.processAll(cache=os.path.join(d, 'headers', cache), noCacheWarning=True, verbose=verbose) return p class CParser(): """Class for parsing C code to extract variable, struct, enum, and function declarations as well as preprocessor macros. This is not a complete C parser; instead, it is meant to simplify the process of extracting definitions from header files in the absence of a complete build system. Many files will require some amount of manual intervention to parse properly (see 'replace' and extra arguments to __init__) Usage: ## create parser object, load two files p = CParser(['header1.h', 'header2.h']) ## remove comments, preprocess, and search for declarations p.processAll() ## just to see what was successfully parsed from the files p.printAll() ## access parsed declarations allValues = p.defs['values'] functionSignatures = p.defs['functions'] ... ## To see what was not successfully parsed: unp = p.processAll(returnUnparsed=True) for s in unp: print(s) """ cacheVersion = 22 ## increment every time cache structure or parsing changes to invalidate old cache files. def __init__(self, files=None, replace=None, copyFrom=None, processAll=True, cache=None, checkCache=False, verbose=False, **args): """Create a C parser object fiven a file or list of files. Files are read to memory and operated on from there. *copyFrom* may be another CParser object from which definitions should be copied. *replace* may be specified to perform string replacements before parsing. format is {'searchStr': 'replaceStr', ...} *cache* specifies a cache file where parsed definitions should be stored. *checkCache* specifies whether to attempt to reparse if it appears the header file is newer. Extra parameters may be used to specify the starting state of the parser. For example, one could provide a set of missing type declarations by types={'UINT': ('unsigned int'), 'STRING': ('char', 1)} Similarly, preprocessor macros can be specified: macros={'WINAPI': ''} """ self.defs = {} ## holds all definitions self.fileDefs = {} ## holds definitions grouped by the file they came from self.initOpts = args.copy() self.initOpts['files'] = [] self.initOpts['replace'] = {} self.dataList = ['types', 'variables', 'fnmacros', 'macros', 'structs', 'unions', 'enums', 'functions', 'values'] self.verbose = False # placeholders for definitions that change during parsing #if hasPyParsing: #self.macroExpr = Forward() #self.fnMacroExpr = Forward() #self.definedType = Forward() #self.definedStruct = Forward() #self.definedEnum = Forward() self.fileOrder = [] self.files = {} self.packList = {} ## list describing struct packing rules as defined by #pragma pack if files is not None: if type(files) is str: files = [files] for f in files: self.loadFile(f, replace) ## initialize empty definition lists for k in self.dataList: self.defs[k] = {} #for f in files: #self.fileDefs[f][k] = {} self.compiledTypes = {} ## holds translations from typedefs/structs/unions to fundamental types self.currentFile = None # Import extra arguments if specified for t in args: for k in args[t].keys(): self.addDef(t, k, args[t][k]) # Import from other CParsers if specified if copyFrom is not None: if type(copyFrom) not in [list, tuple]: copyFrom = [copyFrom] for p in copyFrom: self.importDict(p.fileDefs, p.fileOrder) if processAll: self.processAll(cache=cache, verbose=verbose, checkCache=checkCache) def processAll(self, cache=None, returnUnparsed=False, printAfterPreprocess=False, noCacheWarning=True, verbose=False, checkCache=False): """Remove comments, preprocess, and parse declarations from all files. (operates in memory; does not alter the original files) Returns a list of the results from parseDefs. 'cache' may specify a file where cached results are be stored or retrieved. The cache is automatically invalidated if any of the arguments to __init__ are changed, or if the C files are newer than the cache. 'returnUnparsed' is passed directly to parseDefs. 'printAfterPreprocess' is for debugging; prints the result of preprocessing each file.""" self.verbose = verbose if cache is not None and self.loadCache(cache, checkValidity=checkCache): if verbose: print("Loaded cached definitions; will skip parsing.") return ## cached values loaded successfully, nothing left to do here #else: #print "No cache.", cache results = [] if noCacheWarning or verbose: print("Parsing C header files (no valid cache found). This could take several minutes...") for f in self.fileOrder: #fn = os.path.basename(f) if self.files[f] is None: ## This means the file could not be loaded and there was no cache. raise Exception('Could not find header file "%s" or a suitable cache file.' % f) if verbose: print("Removing comments from file '%s'..." % f) self.removeComments(f) if verbose: print("Preprocessing file '%s'..." % f) self.preprocess(f) if printAfterPreprocess: print("===== PREPROCSSED %s =======" % f) print(self.files[f]) if verbose: print("Parsing definitions in file '%s'..." % f) results.append(self.parseDefs(f, returnUnparsed)) if cache is not None: if verbose: print("Writing cache file '%s'" % cache) self.writeCache(cache) return results def loadCache(self, cacheFile, checkValidity=False): """Load a cache file. Used internally if cache is specified in processAll(). if checkValidity=True, then run several checks before loading the cache: - cache file must not be older than any source files - cache file must not be older than this library file - options recorded in cache must match options used to initialize CParser""" ## make sure cache file exists if type(cacheFile) is not str: raise Exception("cache file option must be a string.") if not os.path.isfile(cacheFile): d = os.path.dirname(__file__) ## If file doesn't exist, search for it in this module's path cacheFile = os.path.join(d, "headers", cacheFile) if not os.path.isfile(cacheFile): if self.verbose: print("Can't find requested cache file.") return False ## make sure cache is newer than all input files canParse = hasPyParsing if checkValidity: mtime = os.stat(cacheFile).st_mtime for f in self.fileOrder: ## if file does not exist, then it does not count against the validity of the cache. if os.path.isfile(f): if os.stat(f).st_mtime > mtime: if self.verbose: print("Cache file is out of date.") return False else: if self.verbose: print("Header file is missing: %s" % f) canParse = False try: ## read cache file import pickle try: cache = pickle.load(open(cacheFile, 'r')) except ValueError: cache = pickle.load(open(cacheFile, 'rb')) ## make sure __init__ options match (unless we can't parse the headers anyway) if checkValidity: if cache['opts'] != self.initOpts: if self.verbose: print("Cache file is not valid--created using different initialization options.") print(cache['opts']) print(self.initOpts) if canParse: return False elif self.verbose: print("However, can't parse header files; will attempt to use the cache anyway.") elif self.verbose: print("Cache init opts are OK:") print(cache['opts']) if cache['version'] < self.cacheVersion: if self.verbose: print("Cache file is not valid--cache format has changed.") if canParse: return False elif self.verbose: print("However, can't parse header files; will attempt to use the cache anyway.") ## import all parse results self.importDict(cache['fileDefs'], cache['fileOrder']) return True except: print("Warning--cache read failed:") sys.excepthook(*sys.exc_info()) return False def importDict(self, data, order): """Import definitions from a dictionary. The dict format should be the same as CParser.fileDefs. Used internally; does not need to be called manually.""" for f in order: f = os.path.split(f)[1] self.currentFile = f for k in self.dataList: for n in data[f][k]: self.addDef(k, n, data[f][k][n]) def writeCache(self, cacheFile): """Store all parsed declarations to cache. Used internally.""" cache = {} cache['opts'] = self.initOpts cache['fileDefs'] = self.fileDefs cache['fileOrder'] = self.fileOrder cache['version'] = self.cacheVersion #for k in self.dataList: #cache[k] = getattr(self, k) import pickle pickle.dump(cache, open(cacheFile, 'wb')) def loadFile(self, file, replace=None): """Read a file, make replacements if requested. Called by __init__, should not be called manually.""" if not os.path.isfile(file): ## Not a fatal error since we might be able to function properly if there is a cache file.. #raise Exception("File %s not found" % file) print("Warning: C header '%s' is missing; this may cause trouble." % file) self.files[file] = None return False fd = open(file, 'rU') ## U causes all newline types to be converted to \n self.files[file] = fd.read() fd.close() if replace is not None: for s in replace: self.files[file] = re.sub(s, replace[s], self.files[file]) self.fileOrder.append(file) bn = os.path.basename(file) self.initOpts['replace'][bn] = replace self.initOpts['files'].append(bn) # only interested in the file names; the directory may change between systems. return True #### Beginning of processing functions def assertPyparsing(self): """Make sure pyparsing module is available.""" global hasPyParsing if not hasPyParsing: raise Exception("CParser class requires 'pyparsing' library for actual parsing work. Without this library, CParser can only be used with previously cached parse results.") def removeComments(self, file): """Remove all comments from file. (operates in memory; does not alter the original files)""" self.assertPyparsing() text = self.files[file] cplusplusLineComment = Literal("//") + restOfLine # match quoted strings first to prevent matching comments inside quotes self.files[file] = (quotedString | cStyleComment.suppress() | cplusplusLineComment.suppress()).transformString(text) def preprocess(self, file): """Scan named file for preprocessor directives, removing them while expanding macros. (operates in memory; does not alter the original files)""" self.assertPyparsing() self.buildParser() ## we need this so that evalExpr works properly self.currentFile = file packStack = [(None,None)] ## stack for #pragma pack push/pop self.packList[file] = [(0,None)] packing = None ## current packing value text = self.files[file] ## First join together lines split by \\n text = Literal('\\\n').suppress().transformString(text) #self.ppDirective = Combine("#" + Word(alphas).leaveWhitespace()) + restOfLine # define the structure of a macro definition name = Word(alphas+'_', alphanums+'_')('name') self.ppDefine = name.setWhitespaceChars(' \t')("macro") + Optional(lparen + delimitedList(name) + rparen).setWhitespaceChars(' \t')('args') + SkipTo(LineEnd())('value') self.ppDefine.setParseAction(self.processMacroDefn) #self.updateMacroDefns() #self.updateFnMacroDefns() # define pattern for scanning through the input string #self.macroExpander = (self.macroExpr | self.fnMacroExpr) ## Comb through lines, process all directives lines = text.split('\n') result = [] #macroExpander = (quotedString | self.macroExpander) directive = re.compile(r'\s*#([a-zA-Z]+)(.*)$') ifTrue = [True] ifHit = [] for i in range(len(lines)): line = lines[i] newLine = '' m = directive.match(line) if m is None: # regular code line if ifTrue[-1]: # only include if we are inside the correct section of an IF block #line = macroExpander.transformString(line) # expand all known macros newLine = self.expandMacros(line) else: # macro line d = m.groups()[0] rest = m.groups()[1] #print "PREPROCESS:", d, rest if d == 'ifdef': d = 'if' rest = 'defined '+rest elif d == 'ifndef': d = 'if' rest = '!defined '+rest ## Evaluate 'defined' operator before expanding macros if d in ['if', 'elif']: def pa(t): return ['0', '1'][t['name'] in self.defs['macros'] or t['name'] in self.defs['fnmacros']] rest = ( Keyword('defined') + (name | lparen + name + rparen) ).setParseAction(pa).transformString(rest) elif d in ['define', 'undef']: macroName, rest = re.match(r'\s*([a-zA-Z_][a-zA-Z0-9_]*)(.*)$', rest).groups() ## Expand macros if needed if rest is not None and (all(ifTrue) or d in ['if', 'elif']): rest = self.expandMacros(rest) if d == 'elif': if ifHit[-1] or not all(ifTrue[:-1]): ev = False else: ev = self.evalPreprocessorExpr(rest) if self.verbose: print(" "*(len(ifTrue)-2) + line, rest, ev) ifTrue[-1] = ev ifHit[-1] = ifHit[-1] or ev elif d == 'else': if self.verbose: print(" "*(len(ifTrue)-2) + line, not ifHit[-1]) ifTrue[-1] = (not ifHit[-1]) and all(ifTrue[:-1]) ifHit[-1] = True elif d == 'endif': ifTrue.pop() ifHit.pop() if self.verbose: print(" "*(len(ifTrue)-1) + line) elif d == 'if': if all(ifTrue): ev = self.evalPreprocessorExpr(rest) else: ev = False if self.verbose: print(" "*(len(ifTrue)-1) + line, rest, ev) ifTrue.append(ev) ifHit.append(ev) elif d == 'define': if not ifTrue[-1]: continue if self.verbose: print(" "*(len(ifTrue)) + "define:", macroName, rest) try: self.ppDefine.parseString(macroName+ ' ' + rest) ## macro is registered here except: print("Error processing macro definition:", macroName, rest) print(" ", sys.exc_info()[1]) elif d == 'undef': if not ifTrue[-1]: continue try: self.remDef('macros', macroName.strip()) #self.macroListString = '|'.join(list(self.defs['macros'].keys()) + self.defs['fnmacros'].keys()) #self.updateMacroDefns() except: if sys.exc_info()[0] is not KeyError: sys.excepthook(*sys.exc_info()) print("Error removing macro definition '%s'" % macroName.strip()) elif d == 'pragma': ## Check for changes in structure packing if not ifTrue[-1]: continue m = re.match(r'\s+pack\s*\(([^\)]+)\)', rest) if m is None: continue opts = [s.strip() for s in m.groups()[0].split(',')] pushpop = id = val = None for o in opts: if o in ['push', 'pop']: pushpop = o elif o.isdigit(): val = int(o) else: id = o if val is not None: packing = val if pushpop == 'push': packStack.append((packing, id)) elif opts[0] == 'pop': if id is None: packStack.pop() else: ind = None for i in range(len(packStack)): if packStack[i][1] == id: ind = i break if ind is not None: packStack = packStack[:ind] if val is None: packing = packStack[-1][0] else: packing = int(opts[0]) if self.verbose: print(">> Packing changed to %s at line %d" % (str(packing), i)) self.packList[file].append((i, packing)) else: pass ## Ignore any other directives result.append(newLine) self.files[file] = '\n'.join(result) def evalPreprocessorExpr(self, expr): ## make a few alterations so the expression can be eval'd macroDiffs = ( Literal('!').setParseAction(lambda: ' not ') | Literal('&&').setParseAction(lambda: ' and ') | Literal('||').setParseAction(lambda: ' or ') | Word(alphas+'_',alphanums+'_').setParseAction(lambda: '0')) expr2 = macroDiffs.transformString(expr) try: ev = bool(eval(expr2)) except: if self.verbose: print("Error evaluating preprocessor expression: %s [%s]" % (expr, expr2)) print(" ", sys.exc_info()[1]) ev = False return ev #def updateMacroDefns(self): ##self.macroExpr << MatchFirst( [Keyword(m)('macro') for m in self.defs['macros']] ) ##self.macroExpr.setParseAction(self.processMacroRef) ## regex is faster than pyparsing. ## Matches quoted strings and macros ##names = list(self.defs['macros'].keys()) + self.defs['fnmacros'].keys() #if len(self.macroListString) == 0: #self.macroRegex = None #else: #self.macroRegex = re.compile( #r'("(\\"|[^"])*")|(\b(%s)\b)' % self.macroListString #) #def updateFnMacroDefns(self): #self.fnMacroExpr << MatchFirst( [(Keyword(m)('macro') + lparen + Group(delimitedList(expression))('args') + rparen) for m in self.defs['fnmacros']] ) #self.fnMacroExpr.setParseAction(self.processFnMacroRef) def processMacroDefn(self, t): """Parse a #define macro and register the definition""" if self.verbose: print("MACRO:", t) #macroVal = self.macroExpander.transformString(t.value).strip() #macroVal = Literal('\\\n').suppress().transformString(macroVal) ## remove escaped newlines macroVal = t.value.strip() if macroVal in self.defs['fnmacros']: self.addDef('fnmacros', t.macro, self.defs['fnmacros'][macroVal]) if self.verbose: print(" Copy fn macro %s => %s" % (macroVal, t.macro)) else: if t.args == '': val = self.evalExpr(macroVal) self.addDef('macros', t.macro, macroVal) self.addDef('values', t.macro, val) if self.verbose: print(" Add macro:", t.macro, "("+str(val)+")", self.defs['macros'][t.macro]) else: self.addDef('fnmacros', t.macro, self.compileFnMacro(macroVal, [x for x in t.args])) if self.verbose: print(" Add fn macro:", t.macro, t.args, self.defs['fnmacros'][t.macro]) #if self.macroListString == '': #self.macroListString = t.macro #else: #self.macroListString += '|' + t.macro #self.updateMacroDefns() #self.macroExpr << MatchFirst( map(Keyword,self.defs['macros'].keys()) ) return "#define " + t.macro + " " + macroVal def compileFnMacro(self, text, args): """Turn a function macro spec into a compiled description""" ## find all instances of each arg in text argRegex = re.compile(r'("(\\"|[^"])*")|(\b(%s)\b)' % ('|'.join(args))) start = 0 parts = [] argOrder = [] N = 3 for m in argRegex.finditer(text): arg = m.groups()[N] #print m, arg if arg is not None: parts.append(text[start:m.start(N)] + '%s') start = m.end(N) argOrder.append(args.index(arg)) parts.append(text[start:]) return (''.join(parts), argOrder) def expandMacros(self, line): reg = re.compile(r'("(\\"|[^"])*")|(\b(\w+)\b)') parts = [] start = 0 N = 3 ## the group number to check for macro names macros = self.defs['macros'] fnmacros = self.defs['fnmacros'] for m in reg.finditer(line): name = m.groups()[N] if name in macros: parts.append(line[start:m.start(N)]) start = m.end(N) parts.append(macros[name]) elif name in fnmacros: try: ## If function macro expansion fails, just ignore it. exp, end = self.expandFnMacro(name, line[m.end(N):]) parts.append(line[start:m.start(N)]) start = end + m.end(N) parts.append(exp) except: if sys.exc_info()[1][0] != 0: print("Function macro expansion failed:", name, line[m.end(N):]) raise parts.append(line[start:]) return ''.join(parts) #def expandMacros(self, line): #if self.macroRegex is None: #return line #parts = [] #start = 0 #N = 3 ## the group number to check for macro names #for m in self.macroRegex.finditer(line): #name = m.groups()[N] #if name is not None: #if name in self.defs['macros']: #parts.append(line[start:m.start(N)]) #start = m.end(N) #parts.append(self.defs['macros'][name]) #elif name in self.defs['fnmacros']: #try: ## If function macro expansion fails, just ignore it. #exp, end = self.expandFnMacro(name, line[m.end(N):]) #parts.append(line[start:m.start(N)]) #start = end + m.end(N) #parts.append(exp) #except: #if sys.exc_info()[1][0] != 0: #print "Function macro expansion failed:", name, line[m.end(N):] #raise #else: #raise Exception("Macro '%s' not found (internal error)" % name) #parts.append(line[start:]) #return ''.join(parts) def expandFnMacro(self, name, text): #print "expandMacro:", name, text defn = self.defs['fnmacros'][name] ## defn looks like ('%s + %s / %s', (0, 0, 1)) argList = stringStart + lparen + Group(delimitedList(expression))('args') + rparen res = [x for x in argList.scanString(text, 1)] if len(res) == 0: raise Exception(0, "Function macro '%s' not followed by (...)" % name) args, start, end = res[0] #print " ", res #print " ", args #print " ", defn newStr = defn[0] % tuple([args[0][i] for i in defn[1]]) #print " ", newStr return (newStr, end) # parse action to replace macro references with their respective definition #def processMacroRef(self, t): #return self.defs['macros'][t.macro] #def processFnMacroRef(self, t): #m = self.defs['fnmacros'][t.macro] ##print "=====>>" ##print "Process FN MACRO:", t ##print " macro defn:", t.macro, m ##print " macro call:", t.args ### m looks like ('a + b', ('a', 'b')) #newStr = m[0][:] ##print " starting str:", newStr #try: #for i in range(len(m[1])): ##print " step", i #arg = m[1][i] ##print " arg:", arg, '=>', t.args[i] #newStr = Keyword(arg).copy().setParseAction(lambda: t.args[i]).transformString(newStr) ##print " new str:", newStr #except: ##sys.excepthook(*sys.exc_info()) #raise ##print "<<=====" #return newStr def parseDefs(self, file, returnUnparsed=False): """Scan through the named file for variable, struct, enum, and function declarations. Returns the entire tree of successfully parsed tokens. If returnUnparsed is True, return a string of all lines that failed to match (for debugging).""" self.assertPyparsing() self.currentFile = file #self.definedType << kwl(self.defs['types'].keys()) parser = self.buildParser() if returnUnparsed: text = parser.suppress().transformString(self.files[file]) return re.sub(r'\n\s*\n', '\n', text) else: return [x[0] for x in parser.scanString(self.files[file])] def buildParser(self): """Builds the entire tree of parser elements for the C language (the bits we support, anyway). """ if hasattr(self, 'parser'): return self.parser self.assertPyparsing() self.structType = Forward() self.enumType = Forward() self.typeSpec = (typeQualifier + ( fundType | Optional(kwl(sizeModifiers + signModifiers)) + ident | self.structType | self.enumType ) + typeQualifier + msModifier).setParseAction(recombine) #self.argList = Forward() ### Abstract declarators for use in function pointer arguments # Thus begins the extremely hairy business of parsing C declarators. # Whomever decided this was a reasonable syntax should probably never breed. # The following parsers combined with the processDeclarator function # allow us to turn a nest of type modifiers into a correctly # ordered list of modifiers. self.declarator = Forward() self.abstractDeclarator = Forward() ## abstract declarators look like: # <empty string> # * # **[num] # (*)(int, int) # *( )(int, int)[10] # ...etc... self.abstractDeclarator << Group( typeQualifier + Group(ZeroOrMore('*'))('ptrs') + typeQualifier + ((Optional('&')('ref')) | (lparen + self.abstractDeclarator + rparen)('center')) + Optional(lparen + Optional(delimitedList(Group( self.typeSpec('type') + self.abstractDeclarator('decl') + Optional(Literal('=').suppress() + expression, default=None)('val') )), default=None) + rparen)('args') + Group(ZeroOrMore(lbrack + Optional(expression, default='-1') + rbrack))('arrays') ) ## Argument list may consist of declarators or abstract declarators #self.argList << delimitedList(Group( #self.typeSpec('type') + #(self.declarator('decl') | self.abstractDeclarator('decl')) + #Optional(Keyword('=')) + expression #)) ## declarators look like: # varName # *varName # **varName[num] # (*fnName)(int, int) # * fnName(int arg1=0)[10] # ...etc... self.declarator << Group( typeQualifier + callConv + Group(ZeroOrMore('*'))('ptrs') + typeQualifier + ((Optional('&')('ref') + ident('name')) | (lparen + self.declarator + rparen)('center')) + Optional(lparen + Optional(delimitedList(Group( self.typeSpec('type') + (self.declarator | self.abstractDeclarator)('decl') + Optional(Literal('=').suppress() + expression, default=None)('val') )), default=None) + rparen)('args') + Group(ZeroOrMore(lbrack + Optional(expression, default='-1') + rbrack))('arrays') ) self.declaratorList = Group(delimitedList(self.declarator)) ## typedef self.typeDecl = Keyword('typedef') + self.typeSpec('type') + self.declaratorList('declList') + semi self.typeDecl.setParseAction(self.processTypedef) ## variable declaration self.variableDecl = Group(self.typeSpec('type') + Optional(self.declaratorList('declList')) + Optional(Literal('=').suppress() + (expression('value') | (lbrace + Group(delimitedList(expression))('arrayValues') + rbrace)))) + semi self.variableDecl.setParseAction(self.processVariable) ## function definition #self.paramDecl = Group(self.typeSpec + (self.declarator | self.abstractDeclarator)) + Optional(Literal('=').suppress() + expression('value')) self.typelessFunctionDecl = self.declarator('decl') + nestedExpr('{', '}').suppress() self.functionDecl = self.typeSpec('type') + self.declarator('decl') + nestedExpr('{', '}').suppress() self.functionDecl.setParseAction(self.processFunction) ## Struct definition self.structDecl = Forward() structKW = (Keyword('struct') | Keyword('union')) #self.structType << structKW('structType') + ((Optional(ident)('name') + lbrace + Group(ZeroOrMore( Group(self.structDecl | self.variableDecl.copy().setParseAction(lambda: None)) ))('members') + rbrace) | ident('name')) self.structMember = ( Group(self.variableDecl.copy().setParseAction(lambda: None)) | (self.typeSpec + self.declarator + nestedExpr('{', '}')).suppress() | (self.declarator + nestedExpr('{', '}')).suppress() ) self.declList = lbrace + Group(OneOrMore(self.structMember))('members') + rbrace self.structType << (Keyword('struct') | Keyword('union'))('structType') + ((Optional(ident)('name') + self.declList) | ident('name')) self.structType.setParseAction(self.processStruct) #self.updateStructDefn() self.structDecl = self.structType + semi ## enum definition enumVarDecl = Group(ident('name') + Optional(Literal('=').suppress() + (integer('value') | ident('valueName')))) self.enumType << Keyword('enum') + (Optional(ident)('name') + lbrace + Group(delimitedList(enumVarDecl))('members') + rbrace | ident('name')) self.enumType.setParseAction(self.processEnum) self.enumDecl = self.enumType + semi #self.parser = (self.typeDecl | self.variableDecl | self.structDecl | self.enumDecl | self.functionDecl) self.parser = (self.typeDecl | self.variableDecl | self.functionDecl) return self.parser def processDeclarator(self, decl): """Process a declarator (without base type) and return a tuple (name, [modifiers]) See processType(...) for more information.""" toks = [] name = None #print "DECL:", decl if 'callConv' in decl and len(decl['callConv']) > 0: toks.append(decl['callConv']) if 'ptrs' in decl and len(decl['ptrs']) > 0: toks.append('*' * len(decl['ptrs'])) if 'arrays' in decl and len(decl['arrays']) > 0: #arrays = [] #for x in decl['arrays']: #n = self.evalExpr(x) #if n == -1: ## If an array was given as '[]', interpret it as '*' instead. #toks.append('*') #else: #arrays.append(n) #if len(arrays) > 0: #toks.append(arrays) toks.append([self.evalExpr(x) for x in decl['arrays']]) if 'args' in decl and len(decl['args']) > 0: #print " process args" if decl['args'][0] is None: toks.append(()) else: toks.append(tuple([self.processType(a['type'], a['decl']) + (a['val'][0],) for a in decl['args']])) if 'ref' in decl: toks.append('&') if 'center' in decl: (n, t) = self.processDeclarator(decl['center'][0]) if n is not None: name = n toks.extend(t) if 'name' in decl: name = decl['name'] return (name, toks) def processType(self, typ, decl): """Take a declarator + base type and return a serialized name/type description. The description will be a list of elements (name, [basetype, modifier, modifier, ...]) - name is the string name of the declarator or None for an abstract declarator - basetype is the string representing the base type - modifiers can be: '*' - pointer (multiple pointers "***" allowed) '&' - reference '__X' - calling convention (windows only). X can be 'cdecl' or 'stdcall' list - array. Value(s) indicate the length of each array, -1 for incomplete type. tuple - function, items are the output of processType for each function argument. Examples: int *x[10] => ('x', ['int', [10], '*']) char fn(int x) => ('fn', ['char', [('x', ['int'])]]) struct s (*)(int, int*) => (None, ["struct s", ((None, ['int']), (None, ['int', '*'])), '*']) """ #print "PROCESS TYPE/DECL:", typ, decl (name, decl) = self.processDeclarator(decl) return (name, [typ] + decl) def processEnum(self, s, l, t): try: if self.verbose: print("ENUM:", t) if t.name == '': n = 0 while True: name = 'anonEnum%d' % n if name not in self.defs['enums']: break n += 1 else: name = t.name[0] if self.verbose: print(" name:", name) if name not in self.defs['enums']: i = 0 enum = {} for v in t.members: if v.value != '': i = eval(v.value) if v.valueName != '': i = enum[v.valueName] enum[v.name] = i self.addDef('values', v.name, i) i += 1 if self.verbose: print(" members:", enum) self.addDef('enums', name, enum) self.addDef('types', 'enum '+name, ('enum', name)) return ('enum ' + name) except: if self.verbose: print("Error processing enum:", t) sys.excepthook(*sys.exc_info()) def processFunction(self, s, l, t): if self.verbose: print("FUNCTION", t, list(t.keys())) try: (name, decl) = self.processType(t.type, t.decl[0]) if len(decl) == 0 or type(decl[-1]) != tuple: print(t) raise Exception("Incorrect declarator type for function definition.") if self.verbose: print(" name:", name) print(" sig:", decl) self.addDef('functions', name, (decl[:-1], decl[-1])) except: if self.verbose: print("Error processing function:", t) sys.excepthook(*sys.exc_info()) def packingAt(self, line): """Return the structure packing value at the given line number""" packing = None for p in self.packList[self.currentFile]: if p[0] <= line: packing = p[1] else: break return packing def processStruct(self, s, l, t): try: strTyp = t.structType # struct or union ## check for extra packing rules packing = self.packingAt(lineno(l, s)) if self.verbose: print(strTyp.upper(), t.name, t) if t.name == '': n = 0 while True: sname = 'anon_%s%d' % (strTyp, n) if sname not in self.defs[strTyp+'s']: break n += 1 else: if type(t.name) is str: sname = t.name else: sname = t.name[0] if self.verbose: print(" NAME:", sname) if sname not in self.defs[strTyp+'s'] or self.defs[strTyp+'s'][sname].get('members', []) == []: if self.verbose: print(" NEW " + strTyp.upper()) struct = [] for m in t.members: typ = m[0].type val = self.evalExpr(m) if self.verbose: print(" member:", m, list(m[0].keys()), m[0].declList) if len(m[0].declList) == 0: ## anonymous member struct.append((None, [typ], None)) for d in m[0].declList: (name, decl) = self.processType(typ, d) struct.append((name, decl, val)) if self.verbose: print(" ", name, decl, val) self.addDef(strTyp+'s', sname, {'pack': packing, 'members': struct}) self.addDef('types', strTyp+' '+sname, (strTyp, sname)) #self.updateStructDefn() else: if self.verbose: print(" (already defined)") return strTyp+' '+sname except: #print t sys.excepthook(*sys.exc_info()) def processVariable(self, s, l, t): if self.verbose: print("VARIABLE:", t) try: val = self.evalExpr(t[0]) for d in t[0].declList: (name, typ) = self.processType(t[0].type, d) if type(typ[-1]) is tuple: ## this is a function prototype if self.verbose: print(" Add function prototype:", name, typ, val) self.addDef('functions', name, (typ[:-1], typ[-1])) else: if self.verbose: print(" Add variable:", name, typ, val) self.addDef('variables', name, (val, typ)) self.addDef('values', name, val) except: #print t, t[0].name, t.value sys.excepthook(*sys.exc_info()) def processTypedef(self, s, l, t): if self.verbose: print("TYPE:", t) typ = t.type #print t, t.type for d in t.declList: (name, decl) = self.processType(typ, d) if self.verbose: print(" ", name, decl) self.addDef('types', name, decl) #self.definedType << MatchFirst( map(Keyword,self.defs['types'].keys()) ) def evalExpr(self, toks): ## Evaluates expressions. Currently only works for expressions that also ## happen to be valid python expressions. ## This function does not currently include previous variable ## declarations, but that should not be too difficult to implement.. #print "Eval:", toks try: if isinstance(toks, six.string_types): #print " as string" val = self.eval(toks, None, self.defs['values']) elif toks.arrayValues != '': #print " as list:", toks.arrayValues val = [self.eval(x, None, self.defs['values']) for x in toks.arrayValues] elif toks.value != '': #print " as value" val = self.eval(toks.value, None, self.defs['values']) else: #print " as None" val = None return val except: if self.verbose: print(" failed eval:", toks) print(" ", sys.exc_info()[1]) return None def eval(self, expr, *args): """Just eval with a little extra robustness.""" expr = expr.strip() cast = (lparen + self.typeSpec + self.abstractDeclarator + rparen).suppress() expr = (quotedString | number | cast).transformString(expr) if expr == '': return None return eval(expr, *args) def printAll(self, file=None): """Print everything parsed from files. Useful for debugging.""" from pprint import pprint for k in self.dataList: print("============== %s ==================" % k) if file is None: pprint(self.defs[k]) else: pprint(self.fileDefs[file][k]) def addDef(self, typ, name, val): """Add a definition of a specific type to both the definition set for the current file and the global definition set.""" self.defs[typ][name] = val if self.currentFile is None: baseName = None else: baseName = os.path.basename(self.currentFile) if baseName not in self.fileDefs: self.fileDefs[baseName] = {} for k in self.dataList: self.fileDefs[baseName][k] = {} self.fileDefs[baseName][typ][name] = val def remDef(self, typ, name): if self.currentFile is None: baseName = None else: baseName = os.path.basename(self.currentFile) del self.defs[typ][name] del self.fileDefs[baseName][typ][name] def isFundType(self, typ): """Return True if this type is a fundamental C type, struct, or union""" if typ[0][:7] == 'struct ' or typ[0][:6] == 'union ' or typ[0][:5] == 'enum ': return True names = baseTypes + sizeModifiers + signModifiers for w in typ[0].split(): if w not in names: return False return True def evalType(self, typ): """evaluate a named type into its fundamental type""" used = [] while True: if self.isFundType(typ): ## remove 'signed' before returning evaluated type typ[0] = re.sub(r'\bsigned\b', '', typ[0]).strip() return typ parent = typ[0] if parent in used: raise Exception('Recursive loop while evaluating types. (typedefs are %s)' % (' -> '.join(used+[parent]))) used.append(parent) if not parent in self.defs['types']: raise Exception('Unknown type "%s" (typedefs are %s)' % (parent, ' -> '.join(used))) pt = self.defs['types'][parent] typ = pt + typ[1:] def find(self, name): """Search all definitions for the given name""" res = [] for f in self.fileDefs: fd = self.fileDefs[f] for t in fd: typ = fd[t] for k in typ: if isinstance(name, six.string_types): if k == name: res.append((f, t)) else: if re.match(name, k): res.append((f, t, k)) return res def findText(self, text): """Search all file strings for text, return matching lines.""" res = [] for f in self.files: l = self.files[f].split('\n') for i in range(len(l)): if text in l[i]: res.append((f, i, l[i])) return res hasPyParsing = False try: from pyparsing import * ParserElement.enablePackrat() hasPyParsing = True except: pass ## no need to do anything yet as we might not be using any parsing functions.. ## Define some common language elements if pyparsing is available. numTypes = ['int', 'float', 'double', '__int64'] baseTypes = ['char', 'bool', 'void'] + numTypes sizeModifiers = ['short', 'long'] signModifiers = ['signed', 'unsigned'] qualifiers = ['const', 'static', 'volatile', 'inline', 'restrict', 'near', 'far'] msModifiers = ['__based', '__declspec', '__fastcall', '__restrict', '__sptr', '__uptr', '__w64', '__unaligned', '__nullterminated'] keywords = ['struct', 'enum', 'union', '__stdcall', '__cdecl'] + qualifiers + baseTypes + sizeModifiers + signModifiers if hasPyParsing: ## Some basic definitions expression = Forward() pexpr = '(' + expression + ')' def kwl(strs): """Generate a match-first list of keywords given a list of strings.""" #return MatchFirst(map(Keyword,strs)) return Regex(r'\b(%s)\b' % '|'.join(strs)) keyword = kwl(keywords) wordchars = alphanums+'_$' ident = (WordStart(wordchars) + ~keyword + Word(alphas+"_",alphanums+"_$") + WordEnd(wordchars)).setParseAction(lambda t: t[0]) #integer = Combine(Optional("-") + (Word( nums ) | Combine("0x" + Word(hexnums)))) semi = Literal(";").ignore(quotedString).suppress() lbrace = Literal("{").ignore(quotedString).suppress() rbrace = Literal("}").ignore(quotedString).suppress() lbrack = Literal("[").ignore(quotedString).suppress() rbrack = Literal("]").ignore(quotedString).suppress() lparen = Literal("(").ignore(quotedString).suppress() rparen = Literal(")").ignore(quotedString).suppress() hexint = Regex('-?0x[%s]+[UL]*'%hexnums).setParseAction(lambda t: t[0].rstrip('UL')) decint = Regex(r'-?\d+[UL]*').setParseAction(lambda t: t[0].rstrip('UL')) integer = (hexint | decint) floating = Regex(r'-?((\d+(\.\d*)?)|(\.\d+))([eE]-?\d+)?') number = (hexint | floating | decint) bitfieldspec = ":" + integer biOperator = oneOf("+ - / * | & || && ! ~ ^ % == != > < >= <= -> . :: << >> = ? :") uniRightOperator = oneOf("++ --") uniLeftOperator = oneOf("++ -- - + * sizeof new") name = (WordStart(wordchars) + Word(alphas+"_",alphanums+"_$") + WordEnd(wordchars)) #number = Word(hexnums + ".-+xUL").setParseAction(lambda t: t[0].rstrip('UL')) #stars = Optional(Word('*&'), default='')('ptrs') ## may need to separate & from * later? callConv = Optional(Keyword('__cdecl')|Keyword('__stdcall'))('callConv') ## Removes '__name' from all type specs.. may cause trouble. underscore2Ident = (WordStart(wordchars) + ~keyword + '__' + Word(alphanums,alphanums+"_$") + WordEnd(wordchars)).setParseAction(lambda t: t[0]) typeQualifier = ZeroOrMore((underscore2Ident + Optional(nestedExpr())) | kwl(qualifiers)).suppress() msModifier = ZeroOrMore(kwl(msModifiers) + Optional(nestedExpr())).suppress() pointerOperator = ( '*' + typeQualifier | '&' + typeQualifier | '::' + ident + typeQualifier ) ## language elements fundType = OneOrMore(kwl(signModifiers + sizeModifiers + baseTypes)).setParseAction(lambda t: ' '.join(t)) ## Is there a better way to process expressions with cast operators?? castAtom = ( ZeroOrMore(uniLeftOperator) + Optional('('+ident+')').suppress() + (( ident + '(' + Optional(delimitedList(expression)) + ')' | ident + OneOrMore('[' + expression + ']') | ident | number | quotedString ) | ('(' + expression + ')')) + ZeroOrMore(uniRightOperator) ) uncastAtom = ( ZeroOrMore(uniLeftOperator) + (( ident + '(' + Optional(delimitedList(expression)) + ')' | ident + OneOrMore('[' + expression + ']') | ident | number | quotedString ) | ('(' + expression + ')')) + ZeroOrMore(uniRightOperator) ) atom = castAtom | uncastAtom expression << Group( atom + ZeroOrMore(biOperator + atom) ) arrayOp = lbrack + expression + rbrack def recombine(tok): """Flattens a tree of tokens and joins into one big string.""" return " ".join(flatten(tok.asList())) expression.setParseAction(recombine) def flatten(lst): res = [] for i in lst: if type(i) in [list, tuple]: res.extend(flatten(i)) else: res.append(str(i)) return res def printParseResults(pr, depth=0, name=''): """For debugging; pretty-prints parse result objects.""" start = name + " "*(20-len(name)) + ':'+ '..'*depth if isinstance(pr, ParseResults): print(start) for i in pr: name = '' for k in pr.keys(): if pr[k] is i: name = k break printParseResults(i, depth+1, name) else: print(start + str(pr)) ## Just for fun.. if __name__ == '__main__': files = sys.argv[1:] p = CParser(files) p.processAll() p.printAll()
{ "content_hash": "4656e0785f76ce9bd69fdc8fc3c3c259", "timestamp": "", "source": "github", "line_count": 1329, "max_line_length": 237, "avg_line_length": 42.090293453724605, "alnum_prop": 0.5099574528942759, "repo_name": "meganbkratz/acq4", "id": "7699f1c8ead4270a8beae17455d4c985b6a1628d", "size": "55962", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "acq4/util/clibrary/CParser.py", "mode": "33188", "license": "mit", "language": [ { "name": "AMPL", "bytes": "3037" }, { "name": "Arduino", "bytes": "18651" }, { "name": "Batchfile", "bytes": "64" }, { "name": "C", "bytes": "705091" }, { "name": "C++", "bytes": "321384" }, { "name": "CSS", "bytes": "716" }, { "name": "MATLAB", "bytes": "1752" }, { "name": "Objective-C", "bytes": "596020" }, { "name": "Processing", "bytes": "13403" }, { "name": "Python", "bytes": "5922488" } ], "symlink_target": "" }
"""Calculation methods related to volume based on cclib data.""" from __future__ import print_function import copy import numpy try: from PyQuante.CGBF import CGBF from cclib.bridge import cclib2pyquante module_pyq = True except: module_pyq = False try: from pyvtk import * from pyvtk.DataSetAttr import * module_pyvtk = True except: module_pyvtk = False from cclib.parser.utils import convertor class Volume(object): """Represent a volume in space. Required parameters: origin -- the bottom left hand corner of the volume topcorner -- the top right hand corner spacing -- the distance between the points in the cube Attributes: data -- a numpy array of values for each point in the volume (set to zero at initialisation) numpts -- the numbers of points in the (x,y,z) directions """ def __init__(self, origin, topcorner, spacing): self.origin = origin self.spacing = spacing self.topcorner = topcorner self.numpts = [] for i in range(3): self.numpts.append(int((self.topcorner[i]-self.origin[i])/self.spacing[i] + 1) ) self.data = numpy.zeros( tuple(self.numpts), "d") def __str__(self): """Return a string representation.""" return "Volume %s to %s (density: %s)" % (self.origin, self.topcorner, self.spacing) def write(self, filename, format="Cube"): """Write the volume to file.""" format = format.upper() if format.upper() not in ["VTK", "CUBE"]: raise "Format must be either VTK or Cube" elif format=="VTK": self.writeasvtk(filename) else: self.writeascube(filename) def writeasvtk(self, filename): if not module_pyvtk: raise Exception("You need to have pyvtk installed") ranges = (numpy.arange(self.data.shape[2]), numpy.arange(self.data.shape[1]), numpy.arange(self.data.shape[0])) v = VtkData(RectilinearGrid(*ranges), "Test", PointData(Scalars(self.data.ravel(), "from cclib", "default"))) v.tofile(filename) def integrate(self): boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] * convertor(1, "Angstrom", "bohr")**3) return sum(self.data.ravel()) * boxvol def integrate_square(self): boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] * convertor(1, "Angstrom", "bohr")**3) return sum(self.data.ravel()**2) * boxvol def writeascube(self, filename): # Remember that the units are bohr, not Angstroms convert = lambda x : convertor(x, "Angstrom", "bohr") ans = [] ans.append("Cube file generated by cclib") ans.append("") format = "%4d%12.6f%12.6f%12.6f" origin = [convert(x) for x in self.origin] ans.append(format % (0, origin[0], origin[1], origin[2])) ans.append(format % (self.data.shape[0], convert(self.spacing[0]), 0.0, 0.0)) ans.append(format % (self.data.shape[1], 0.0, convert(self.spacing[1]), 0.0)) ans.append(format % (self.data.shape[2], 0.0, 0.0, convert(self.spacing[2]))) line = [] for i in range(self.data.shape[0]): for j in range(self.data.shape[1]): for k in range(self.data.shape[2]): line.append(scinotation(self.data[i][j][k])) if len(line)==6: ans.append(" ".join(line)) line = [] if line: ans.append(" ".join(line)) line = [] outputfile = open(filename, "w") outputfile.write("\n".join(ans)) outputfile.close() def scinotation(num): """Write in scientific notation >>> scinotation(1./654) ' 1.52905E-03' >>> scinotation(-1./654) '-1.52905E-03' """ ans = "%10.5E" % num broken = ans.split("E") exponent = int(broken[1]) if exponent<-99: return " 0.000E+00" if exponent<0: sign="-" else: sign="+" return ("%sE%s%s" % (broken[0],sign,broken[1][-2:])).rjust(12) def getbfs(coords, gbasis): """Convenience function for both wavefunction and density based on PyQuante Ints.py.""" mymol = makepyquante(coords, [0 for x in coords]) sym2powerlist = { 'S' : [(0,0,0)], 'P' : [(1,0,0),(0,1,0),(0,0,1)], 'D' : [(2,0,0),(0,2,0),(0,0,2),(1,1,0),(0,1,1),(1,0,1)], 'F' : [(3,0,0),(2,1,0),(2,0,1),(1,2,0),(1,1,1),(1,0,2), (0,3,0),(0,2,1),(0,1,2), (0,0,3)] } bfs = [] for i,atom in enumerate(mymol): bs = gbasis[i] for sym,prims in bs: for power in sym2powerlist[sym]: bf = CGBF(atom.pos(),power) for expnt,coef in prims: bf.add_primitive(expnt,coef) bf.normalize() bfs.append(bf) return bfs def wavefunction(coords, mocoeffs, gbasis, volume): """Calculate the magnitude of the wavefunction at every point in a volume. Attributes: coords -- the coordinates of the atoms mocoeffs -- mocoeffs for one eigenvalue gbasis -- gbasis from a parser object volume -- a template Volume object (will not be altered) """ bfs = getbfs(coords, gbasis) wavefn = copy.copy(volume) wavefn.data = numpy.zeros( wavefn.data.shape, "d") conversion = convertor(1,"bohr","Angstrom") x = numpy.arange(wavefn.origin[0], wavefn.topcorner[0]+wavefn.spacing[0], wavefn.spacing[0]) / conversion y = numpy.arange(wavefn.origin[1], wavefn.topcorner[1]+wavefn.spacing[1], wavefn.spacing[1]) / conversion z = numpy.arange(wavefn.origin[2], wavefn.topcorner[2]+wavefn.spacing[2], wavefn.spacing[2]) / conversion for bs in range(len(bfs)): data = numpy.zeros( wavefn.data.shape, "d") for i,xval in enumerate(x): for j,yval in enumerate(y): for k,zval in enumerate(z): data[i, j, k] = bfs[bs].amp(xval,yval,zval) numpy.multiply(data, mocoeffs[bs], data) numpy.add(wavefn.data, data, wavefn.data) return wavefn def electrondensity(coords, mocoeffslist, gbasis, volume): """Calculate the magnitude of the electron density at every point in a volume. Attributes: coords -- the coordinates of the atoms mocoeffs -- mocoeffs for all of the occupied eigenvalues gbasis -- gbasis from a parser object volume -- a template Volume object (will not be altered) Note: mocoeffs is a list of numpy arrays. The list will be of length 1 for restricted calculations, and length 2 for unrestricted. """ bfs = getbfs(coords, gbasis) density = copy.copy(volume) density.data = numpy.zeros( density.data.shape, "d") conversion = convertor(1,"bohr","Angstrom") x = numpy.arange(density.origin[0], density.topcorner[0]+density.spacing[0], density.spacing[0]) / conversion y = numpy.arange(density.origin[1], density.topcorner[1]+density.spacing[1], density.spacing[1]) / conversion z = numpy.arange(density.origin[2], density.topcorner[2]+density.spacing[2], density.spacing[2]) / conversion for mocoeffs in mocoeffslist: for mocoeff in mocoeffs: wavefn = numpy.zeros( density.data.shape, "d") for bs in range(len(bfs)): data = numpy.zeros( density.data.shape, "d") for i,xval in enumerate(x): for j,yval in enumerate(y): tmp = [] for k,zval in enumerate(z): tmp.append(bfs[bs].amp(xval, yval, zval)) data[i,j,:] = tmp numpy.multiply(data, mocoeff[bs], data) numpy.add(wavefn, data, wavefn) density.data += wavefn**2 if len(mocoeffslist) == 1: density.data = density.data*2. # doubly-occupied return density if __name__=="__main__": try: import psyco psyco.full() except ImportError: pass from cclib.io import ccopen import logging a = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp_basis.log") a.logger.setLevel(logging.ERROR) c = a.parse() b = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp.out") b.logger.setLevel(logging.ERROR) d = b.parse() vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) ) wavefn = wavefunction(d.atomcoords[0], d.mocoeffs[0][d.homos[0]], c.gbasis, vol) assert abs(wavefn.integrate())<1E-6 # not necessarily true for all wavefns assert abs(wavefn.integrate_square() - 1.00)<1E-3 # true for all wavefns print(wavefn.integrate(), wavefn.integrate_square()) vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) ) frontierorbs = [d.mocoeffs[0][(d.homos[0]-3):(d.homos[0]+1)]] density = electrondensity(d.atomcoords[0], frontierorbs, c.gbasis, vol) assert abs(density.integrate()-8.00)<1E-2 print("Combined Density of 4 Frontier orbitals=",density.integrate())
{ "content_hash": "22346d3431adfbd7b420001d86ed51f4", "timestamp": "", "source": "github", "line_count": 261, "max_line_length": 126, "avg_line_length": 37.14942528735632, "alnum_prop": 0.5552805280528053, "repo_name": "Schamnad/cclib", "id": "8ef6f354b589160bc181d22232119b57f8efbaed", "size": "9899", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/cclib/method/volume.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Arc", "bytes": "18395" }, { "name": "DIGITAL Command Language", "bytes": "21581" }, { "name": "Python", "bytes": "836753" }, { "name": "Shell", "bytes": "867" }, { "name": "TeX", "bytes": "29388" } ], "symlink_target": "" }
import os import sys import time import urllib import tempfile from shutil import rmtree, copyfile from distutils.dir_util import mkpath import yaml from jinja2 import Environment, PackageLoader, FileSystemLoader, StrictUndefined from hokusai import CWD from hokusai.lib.command import command from hokusai.lib.config import HOKUSAI_CONFIG_DIR, config from hokusai.services.ecr import ECR from hokusai.lib.common import print_green, clean_string, shout from hokusai.lib.exceptions import HokusaiError @command(config_check=False) def setup(project_name, template_remote, template_dir, template_vars, allow_missing_vars): mkpath(os.path.join(CWD, HOKUSAI_CONFIG_DIR)) config.create(clean_string(project_name)) ecr = ECR() if ecr.project_repo_exists(): print_green("Project repo %s already exists. Skipping create." % ecr.project_repo) else: ecr.create_project_repo() print_green("Created project repo %s" % ecr.project_repo) scratch_dir = None if template_remote: scratch_dir = tempfile.mkdtemp() git_repo_and_branch = template_remote.split('#', 1) git_repo = git_repo_and_branch[0] if len(git_repo_and_branch) == 2: git_branch = git_repo_and_branch[1] else: git_branch = "master" shout("git clone -b %s --single-branch %s %s" % (git_branch, git_repo, scratch_dir)) custom_template_dir = None if allow_missing_vars: environment_kwargs = {} else: environment_kwargs = { "undefined": StrictUndefined } if scratch_dir and template_dir: custom_template_dir = os.path.join(scratch_dir, os.path.basename(template_dir)) env = Environment(loader=FileSystemLoader(os.path.join(scratch_dir, os.path.basename(template_dir))), **environment_kwargs) elif scratch_dir: custom_template_dir = scratch_dir env = Environment(loader=FileSystemLoader(scratch_dir), **environment_kwargs) elif template_dir: custom_template_dir = os.path.abspath(template_dir) env = Environment(loader=FileSystemLoader(os.path.abspath(template_dir)), **environment_kwargs) else: try: base_path = sys._MEIPASS env = Environment(loader=FileSystemLoader(os.path.join(base_path, 'hokusai', 'templates'))) except: env = Environment(loader=PackageLoader('hokusai', 'templates')) required_templates = [ 'Dockerfile.j2', '.dockerignore.j2', 'hokusai/build.yml.j2', 'hokusai/development.yml.j2', 'hokusai/test.yml.j2', 'hokusai/staging.yml.j2', 'hokusai/production.yml.j2' ] template_context = { "project_name": config.project_name, "project_repo": ecr.project_repo } for s in template_vars: if '=' not in s: raise HokusaiError("Error: template variables must be of the form 'key=value'") split = s.split('=', 1) template_context[split[0]] = split[1] try: for template in required_templates: if custom_template_dir and not os.path.isfile(os.path.join(custom_template_dir, template)): raise HokusaiError("Could not find required template file %s" % template) with open(os.path.join(CWD, template.rstrip('.j2')), 'w') as f: f.write(env.get_template(template).render(**template_context)) print_green("Created %s" % template.rstrip('.j2')) if custom_template_dir: for root, _, files in os.walk(custom_template_dir): subpath = os.path.relpath(root, custom_template_dir) if subpath is not '.': mkpath(os.path.join(CWD, subpath)) for file in files: if subpath is not '.': file_path = os.path.join(subpath, file) else: file_path = file if file_path in required_templates: continue if file_path.endswith('.j2'): with open(os.path.join(CWD, file_path.rstrip('.j2')), 'w') as f: f.write(env.get_template(file_path).render(**template_context)) else: copyfile(os.path.join(custom_template_dir, file_path), os.path.join(CWD, file_path)) print_green("Created %s" % file_path.rstrip('.j2')) finally: if scratch_dir: rmtree(scratch_dir)
{ "content_hash": "8c18c36e955dd3a3ab49b36cccfac27f", "timestamp": "", "source": "github", "line_count": 115, "max_line_length": 127, "avg_line_length": 35.77391304347826, "alnum_prop": 0.6737967914438503, "repo_name": "izakp/hokusai", "id": "df7d6745dbb639e74f3392465490f4e33ca4fdd5", "size": "4114", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hokusai/commands/setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "599" }, { "name": "Makefile", "bytes": "3484" }, { "name": "Python", "bytes": "134404" } ], "symlink_target": "" }
import tarfile import numpy as np from paddle.io import Dataset from paddle.dataset.common import _check_exists_and_download __all__ = [] URL_DEV_TEST = ( 'http://www-lium.univ-lemans.fr/~schwenk/' 'cslm_joint_paper/data/dev+test.tgz' ) MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5' # this is a small set of data for test. The original data is too large and # will be add later. URL_TRAIN = 'http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz' MD5_TRAIN = '0791583d57d5beb693b9414c5b36798c' START = "<s>" END = "<e>" UNK = "<unk>" UNK_IDX = 2 class WMT14(Dataset): """ Implementation of `WMT14 <http://www.statmt.org/wmt14/>`_ test dataset. The original WMT14 dataset is too large and a small set of data for set is provided. This module will download dataset from http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz . Args: data_file(str): path to data tar file, can be set None if :attr:`download` is True. Default None mode(str): 'train', 'test' or 'gen'. Default 'train' dict_size(int): word dictionary size. Default -1. download(bool): whether to download dataset automatically if :attr:`data_file` is not set. Default True Returns: Dataset: Instance of WMT14 dataset - src_ids (np.array) - The sequence of token ids of source language. - trg_ids (np.array) - The sequence of token ids of target language. - trg_ids_next (np.array) - The next sequence of token ids of target language. Examples: .. code-block:: python import paddle from paddle.text.datasets import WMT14 class SimpleNet(paddle.nn.Layer): def __init__(self): super().__init__() def forward(self, src_ids, trg_ids, trg_ids_next): return paddle.sum(src_ids), paddle.sum(trg_ids), paddle.sum(trg_ids_next) wmt14 = WMT14(mode='train', dict_size=50) for i in range(10): src_ids, trg_ids, trg_ids_next = wmt14[i] src_ids = paddle.to_tensor(src_ids) trg_ids = paddle.to_tensor(trg_ids) trg_ids_next = paddle.to_tensor(trg_ids_next) model = SimpleNet() src_ids, trg_ids, trg_ids_next = model(src_ids, trg_ids, trg_ids_next) print(src_ids.numpy(), trg_ids.numpy(), trg_ids_next.numpy()) """ def __init__( self, data_file=None, mode='train', dict_size=-1, download=True ): assert mode.lower() in [ 'train', 'test', 'gen', ], "mode should be 'train', 'test' or 'gen', but got {}".format(mode) self.mode = mode.lower() self.data_file = data_file if self.data_file is None: assert ( download ), "data_file is not set and downloading automatically is disabled" self.data_file = _check_exists_and_download( data_file, URL_TRAIN, MD5_TRAIN, 'wmt14', download ) # read dataset into memory assert dict_size > 0, "dict_size should be set as positive number" self.dict_size = dict_size self._load_data() def _load_data(self): def __to_dict(fd, size): out_dict = dict() for line_count, line in enumerate(fd): if line_count < size: out_dict[line.strip().decode()] = line_count else: break return out_dict self.src_ids = [] self.trg_ids = [] self.trg_ids_next = [] with tarfile.open(self.data_file, mode='r') as f: names = [ each_item.name for each_item in f if each_item.name.endswith("src.dict") ] assert len(names) == 1 self.src_dict = __to_dict(f.extractfile(names[0]), self.dict_size) names = [ each_item.name for each_item in f if each_item.name.endswith("trg.dict") ] assert len(names) == 1 self.trg_dict = __to_dict(f.extractfile(names[0]), self.dict_size) file_name = "{}/{}".format(self.mode, self.mode) names = [ each_item.name for each_item in f if each_item.name.endswith(file_name) ] for name in names: for line in f.extractfile(name): line = line.decode() line_split = line.strip().split('\t') if len(line_split) != 2: continue src_seq = line_split[0] # one source sequence src_words = src_seq.split() src_ids = [ self.src_dict.get(w, UNK_IDX) for w in [START] + src_words + [END] ] trg_seq = line_split[1] # one target sequence trg_words = trg_seq.split() trg_ids = [self.trg_dict.get(w, UNK_IDX) for w in trg_words] # remove sequence whose length > 80 in training mode if len(src_ids) > 80 or len(trg_ids) > 80: continue trg_ids_next = trg_ids + [self.trg_dict[END]] trg_ids = [self.trg_dict[START]] + trg_ids self.src_ids.append(src_ids) self.trg_ids.append(trg_ids) self.trg_ids_next.append(trg_ids_next) def __getitem__(self, idx): return ( np.array(self.src_ids[idx]), np.array(self.trg_ids[idx]), np.array(self.trg_ids_next[idx]), ) def __len__(self): return len(self.src_ids) def get_dict(self, reverse=False): """ Get the source and target dictionary. Args: reverse (bool): wether to reverse key and value in dictionary, i.e. key: value to value: key. Returns: Two dictionaries, the source and target dictionary. Examples: .. code-block:: python from paddle.text.datasets import WMT14 wmt14 = WMT14(mode='train', dict_size=50) src_dict, trg_dict = wmt14.get_dict() """ src_dict, trg_dict = self.src_dict, self.trg_dict if reverse: src_dict = {v: k for k, v in src_dict.items()} trg_dict = {v: k for k, v in trg_dict.items()} return src_dict, trg_dict
{ "content_hash": "66a36d5a885dddc5508b31494decfa4b", "timestamp": "", "source": "github", "line_count": 192, "max_line_length": 93, "avg_line_length": 34.973958333333336, "alnum_prop": 0.5189873417721519, "repo_name": "luotao1/Paddle", "id": "8cf66c27f8c50e4d49c66387e104c18fa30e6af4", "size": "7328", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "python/paddle/text/datasets/wmt14.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "58544" }, { "name": "C", "bytes": "210300" }, { "name": "C++", "bytes": "36771446" }, { "name": "CMake", "bytes": "903079" }, { "name": "Cuda", "bytes": "5200715" }, { "name": "Dockerfile", "bytes": "4361" }, { "name": "Go", "bytes": "49796" }, { "name": "Java", "bytes": "16630" }, { "name": "Jinja", "bytes": "23852" }, { "name": "MLIR", "bytes": "39982" }, { "name": "Python", "bytes": "36248258" }, { "name": "R", "bytes": "1332" }, { "name": "Shell", "bytes": "553175" } ], "symlink_target": "" }
import unittest from pyparsing import ParseException from tests.utils.grammar import get_record_grammar """ CWR Original Work Title for Versions grammar tests. The following cases are tested: """ __author__ = 'Bernardo Martínez Garrido' __license__ = 'MIT' __status__ = 'Development' class TestOriginalWorkTitleGrammar(unittest.TestCase): def setUp(self): self.grammar = get_record_grammar('original_work_title') def test_valid_full(self): record = 'VER0000123400000023THE TITLE T0123456789ESLAST NAME 1 FIRST NAME 1 THE SOURCE 00014107338I-000000229-7LAST NAME 2 FIRST NAME 2 00014107339I-000000230-7ABCD0123456789' result = self.grammar.parseString(record)[0] self.assertEqual('VER', result.record_type) self.assertEqual(1234, result.transaction_sequence_n) self.assertEqual(23, result.record_sequence_n) self.assertEqual('THE TITLE', result.title) self.assertEqual('T0123456789', result.iswc) self.assertEqual('ES', result.language_code) self.assertEqual('LAST NAME 1', result.writer_1_last_name) self.assertEqual('FIRST NAME 1', result.writer_1_first_name) self.assertEqual('THE SOURCE', result.source) self.assertEqual(14107338, result.writer_1_ipi_name_n) self.assertEqual('I-000000229-7', result.writer_1_ipi_base_n) self.assertEqual('LAST NAME 2', result.writer_2_last_name) self.assertEqual('FIRST NAME 2', result.writer_2_first_name) self.assertEqual(14107339, result.writer_2_ipi_name_n) self.assertEqual('I-000000230-7', result.writer_2_ipi_base_n) self.assertEqual('ABCD0123456789', result.submitter_work_n) class TestOriginalWorkTitleGrammarException(unittest.TestCase): def setUp(self): self.grammar = get_record_grammar('original_work_title') def test_empty(self): """ Tests that a exception is thrown when the the works number is zero. """ record = '' self.assertRaises(ParseException, self.grammar.parseString, record) def test_invalid(self): record = 'This is an invalid string' self.assertRaises(ParseException, self.grammar.parseString, record)
{ "content_hash": "b0ebaa4772eee9127bd78ba08131d24e", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 383, "avg_line_length": 40.28333333333333, "alnum_prop": 0.6412908564335954, "repo_name": "weso/CWR-DataApi", "id": "3a9b131057e44210005f6d78f7dd2a4a97a8f853", "size": "2442", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/grammar/factory/record/test_original_work_title.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "3830" }, { "name": "Makefile", "bytes": "2366" }, { "name": "Python", "bytes": "997385" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='TestModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('field_a', models.CharField(max_length=255)), ('field_b', models.CharField(max_length=255)), ], ), ]
{ "content_hash": "4ae55e1b62916c3b3b5a218ee1c04d5d", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 114, "avg_line_length": 24.863636363636363, "alnum_prop": 0.56672760511883, "repo_name": "centralniak/learn-select-for-update", "id": "73ab63078fdca4076dad44e1443b521ff559b063", "size": "619", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "slowtask/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "11192" } ], "symlink_target": "" }
__VERSION__ = '0.7.2' import sys if sys.version_info[:2] < (2, 3): print >> sys.stderr, "Sorry, xlwt requires Python 2.3 or later" sys.exit(1) from Workbook import Workbook from Worksheet import Worksheet from Row import Row from Column import Column from Formatting import Font, Alignment, Borders, Pattern, Protection from Style import XFStyle, easyxf from ExcelFormula import *
{ "content_hash": "72657e6b07e04fab7b6edf0f57f4c9d4", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 68, "avg_line_length": 27.928571428571427, "alnum_prop": 0.7442455242966752, "repo_name": "alextreme/Django-Bingo", "id": "dcc23f04e246e95737ebeab8723dffb9fb41535e", "size": "423", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "contrib/xlwt/xlwt/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "1197147" }, { "name": "Python", "bytes": "523530" }, { "name": "Shell", "bytes": "443" } ], "symlink_target": "" }
import sys sys.path.append("web2py") import argparse import os from db.Corpus_DB import Corpus_DB def ImportCorpus(corpus_filename_or_folder, database_path): database_filename = '{}/corpus.db'.format(database_path) with Corpus_DB(database_path, isInit=True) as corpus_db: if os.path.isfile(corpus_filename_or_folder): print 'Importing file [{}] into database [{}]'.format(corpus_filename_or_folder, database_filename) corpus_db.ImportFromFile(corpus_filename_or_folder) else: print 'Importing folder [{}] into database [{}]'.format(corpus_filename_or_folder, database_filename) corpus_db.ImportFromFolder(corpus_filename_or_folder) def main(): parser = argparse.ArgumentParser( description = 'Import a file into a SQLite3 Database.' ) parser.add_argument( 'database', type = str, help = 'Output database folder, containing a file "corpus.db"' ) parser.add_argument( 'corpus' , type = str, help = 'Input corpus filename or folder' ) args = parser.parse_args() ImportCorpus(args.corpus, args.database) if __name__ == '__main__': main()
{ "content_hash": "65fb70ab3d85761a540308c5f3151ab6", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 110, "avg_line_length": 39.44444444444444, "alnum_prop": 0.7295774647887324, "repo_name": "maoxuxiang/termite_mallet_project", "id": "f0e2cdee26178d0dbfd56897a65761a68359c4e5", "size": "1112", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "bin/import_corpus.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "581895" }, { "name": "Java", "bytes": "4515" }, { "name": "JavaScript", "bytes": "2239800" }, { "name": "Perl", "bytes": "1688" }, { "name": "Python", "bytes": "6130120" }, { "name": "Shell", "bytes": "106587" } ], "symlink_target": "" }
""" compares two pdf files. """ import io import logging import sys import pdfminer.settings from pdfminer import high_level, layout pdfminer.settings.STRICT = False logging.basicConfig() def compare(file1, file2, **kwargs): # If any LAParams group arguments were passed, # create an LAParams object and # populate with given args. Otherwise, set it to None. if kwargs.get('laparams', None) is None: laparams = layout.LAParams() for param in ("all_texts", "detect_vertical", "word_margin", "char_margin", "line_margin", "boxes_flow"): paramv = kwargs.get(param, None) if paramv is not None: laparams[param] = paramv kwargs['laparams'] = laparams s1 = io.StringIO() with open(file1, "rb") as fp: high_level.extract_text_to_fp(fp, s1, **kwargs) s2 = io.StringIO() with open(file2, "rb") as fp: high_level.extract_text_to_fp(fp, s2, **kwargs) import difflib s1.seek(0) s2.seek(0) s1, s2 = s1.readlines(), s2.readlines() import os.path try: extension = os.path.splitext(kwargs['outfile'])[1][1:4] if extension.lower() == 'htm': return difflib.HtmlDiff().make_file(s1, s2) except KeyError: pass return difflib.unified_diff(s1, s2, n=kwargs['context_lines']) # main def main(args=None): import argparse P = argparse.ArgumentParser(description=__doc__) P.add_argument("file1", type=str, default=None, help="File 1 to compare.") P.add_argument("file2", type=str, default=None, help="File 2 to compare.") P.add_argument("-o", "--outfile", type=str, default="-", help="Output file(default/'-' is stdout) if .htm or .html," " create an HTML table (or a complete HTML file " "containing the table) showing a side by side, " "line by line comparison of text with inter-line and " "intra-line change highlights. The table can be " "generated in either full or " "contextual difference mode.") P.add_argument("-N", "--context-lines", default=3, type=int, help="context lines shown") P.add_argument("-d", "--debug", default=False, action="store_true", help="Debug output.") # params for pdf2txt P.add_argument("-p", "--pagenos", type=str, help="Comma-separated list of page numbers to parse. " "Included for legacy applications, " "use --page-numbers for more " "idiomatic argument entry.") P.add_argument("--page-numbers", type=int, default=None, nargs="+", help="Alternative to --pagenos with space-separated " "numbers; supercedes --pagenos where it is used.") P.add_argument("-m", "--maxpages", type=int, default=0, help="Maximum pages to parse") P.add_argument("-P", "--password", type=str, default="", help="Decryption password for both PDFs") P.add_argument("-t", "--output_type", type=str, default="text", help="pdf2txt type: text|html|xml|tag (default is text)") P.add_argument("-c", "--codec", type=str, default="utf-8", help="Text encoding") P.add_argument("-s", "--scale", type=float, default=1.0, help="Scale") P.add_argument("-A", "--all-texts", default=None, action="store_true", help="LAParams all texts") P.add_argument("-V", "--detect-vertical", default=None, action="store_true", help="LAParams detect vertical") P.add_argument("-W", "--word-margin", type=float, default=None, help="LAParams word margin") P.add_argument("-M", "--char-margin", type=float, default=None, help="LAParams char margin") P.add_argument("-L", "--line-margin", type=float, default=None, help="LAParams line margin") P.add_argument("-F", "--boxes-flow", type=float, default=None, help="LAParams boxes flow") P.add_argument("-Y", "--layoutmode", default="normal", type=str, help="HTML Layout Mode") P.add_argument("-n", "--no-laparams", default=False, action="store_true", help="Pass None as LAParams") P.add_argument("-R", "--rotation", default=0, type=int, help="Rotation") P.add_argument("-O", "--output-dir", default=None, help="Output directory for images") P.add_argument("-C", "--disable-caching", default=False, action="store_true", help="Disable caching") P.add_argument("-S", "--strip-control", default=False, action="store_true", help="Strip control in XML mode") A = P.parse_args(args=args) if A.debug: logging.getLogger().setLevel(logging.DEBUG) if A.page_numbers: A.page_numbers = {x-1 for x in A.page_numbers} if A.pagenos: A.page_numbers = {int(x)-1 for x in A.pagenos.split(",")} if A.output_type == "text" and A.outfile != "-": for override, alttype in ((".htm", "html"), (".html", "html"), (".xml", "xml"), (".tag", "tag")): if A.outfile.endswith(override): A.output_type = alttype if A.outfile == "-": outfp = sys.stdout else: outfp = open(A.outfile, "w", encoding='utf-8') outfp.writelines(compare(**vars(A))) outfp.close() return 0 if __name__ == '__main__': sys.exit(main())
{ "content_hash": "ceea7846e107a2304c568f2f73a91174", "timestamp": "", "source": "github", "line_count": 143, "max_line_length": 78, "avg_line_length": 40.34965034965035, "alnum_prop": 0.5523396880415945, "repo_name": "goulu/pdfminer", "id": "68478fb779353e6d872c5d6be83a6217d98243b0", "size": "5794", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/pdfdiff.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "1783" }, { "name": "Python", "bytes": "478562" } ], "symlink_target": "" }
import base64 import json import logging import os import random import time from opencenteragent.utils import detailed_exception from opencenterclient.client import OpenCenterEndpoint from state import StateMachine, StateMachineState from primitives import OrchestratorTasks name = 'adventurator' endpoint = 'http://localhost:8080/admin' def setup(config={}): global endpoint endpoint = 'http://localhost:8080/admin' if 'admin' in global_config.get('endpoints', {}): endpoint = global_config['endpoints']['admin'] LOG.debug('doing setup for %s handler' % name) register_action('adventurate', handle_adventurate) def handle_adventurate(input_data): global endpoint parent_id = input_data['id'] action = input_data['action'] payload = input_data['payload'] adventure_dsl = None adventure_id = None ep = OpenCenterEndpoint(endpoint) if 'adventure' in payload: adventure_obj = ep.adventures[int(payload['adventure'])] adventure_dsl = adventure_obj.dsl adventure_id = payload['adventure'] elif 'adventure_dsl' in payload: adventure_dsl = payload['adventure_dsl'] adventure_id = 0 if not adventure_dsl: return _retval(1, friendly_str='must specify adventure or adventure_dsl') if not 'nodes' in payload: return _retval(1, friendly_str='no "nodes" list in request') if 'initial_state' in payload: initial_state = payload['initial_state'] else: initial_state = {} if not 'nodes' in initial_state: initial_state['nodes'] = payload['nodes'] adv_globals = [] if 'globals' in payload: adv_globals = payload['globals'] LOG.debug('using globals %s' % adv_globals) ns = {} ns['LOG'] = LOG ns['StateMachine'] = StateMachine ns['StateMachineState'] = StateMachineState ns['tasks'] = OrchestratorTasks(endpoint=endpoint, parent_task_id=parent_id, adventure_globals=adv_globals) ns['input_data'] = initial_state ns['result_str'] = 'fail' ns['result_code'] = 254 ns['result_data'] = {} ns['sm_description'] = adventure_dsl LOG.debug('About to run the following dsl: %s' % adventure_dsl) node_list = {} ns['input_data']['fails'] = [] for node in initial_state['nodes']: node_list[int(node)] = 'ok' attr_obj = ep.attrs.new() attr_obj.node_id = node attr_obj.key = 'last_task' attr_obj.value = 'ok' attr_obj.save() try: exec '(result_data, state_data) = ' \ 'tasks.sm_eval(sm_description, input_data)' in ns, ns except Exception as e: for node in node_list.keys(): attr_obj = ep.attrs.new() attr_obj.node_id = node attr_obj.key = 'last_task' attr_obj.value = 'failed' attr_obj.save() return _retval(1, result_data=detailed_exception()) output_data = {'result_code': 1, 'result_str': 'no return data from adventure', 'result_data': {}} if 'result_data' in ns: output_data = ns['result_data'] history = [] if 'state_data' in ns and \ 'history' in ns['state_data']: history = ns['state_data']['history'] # clean up any failed tasks. LOG.debug('Adventure terminated with state: %s' % ns['state_data']) rollbacks = {} # walk through the history and assemble a rollback plan for entry in history: # walk through the history and assemble rollback plans for k, v in entry['result_data'].items(): k = int(k) if not k in rollbacks: rollbacks[k] = [] if 'rollback' in v['result_data'] and \ len(v['result_data']['rollback']) > 0: if isinstance(v['result_data']['rollback'], list): rollbacks[k] += v['result_data']['rollback'] else: rollbacks[k].append(v['result_data']['rollback']) # v['result_data'].pop('history') state_data = ns['state_data'] output_data['result_data']['history'] = history output_data['result_data']['rollbacks'] = rollbacks if 'fails' in state_data: # we need to walk through all the failed nodes. for node in map(lambda x: int(x), state_data['fails']): node_list[node] = 'failed' if node in rollbacks and len(rollbacks[node]) > 0: LOG.debug('Running rollback plan for node %d: %s' % (node, rollbacks[node])) ns['sm_description'] = rollbacks[node] ns['input_data'] = {'nodes': [node]} try: exec '(rollback_result, rollback_state) = tasks.sm_eval(' \ 'sm_description, input_data)' in ns, ns if 'rollback_result' in ns and \ 'result_code' in ns['rollback_result']: if ns['rollback_result']['result_code'] == 0: node_list[node] = 'rollback' else: LOG.debug('Error in rollback: %s: %s' % (ns['rollback_result'], ns['rollback_state'])) except Exception as e: LOG.debug('Exception running rollback: %s\n%s' % (str(e), detailed_exception())) else: LOG.debug('No rollback plan for failed node %d' % node) for node in node_list.keys(): attr_obj = ep.attrs.new() attr_obj.node_id = int(node) attr_obj.key = 'last_task' attr_obj.value = node_list[node] attr_obj.save() return output_data def _retval(result_code, friendly_str=None, result_data={}): if not friendly_str: friendly_str = 'success' if result_code == 0 else 'fail' return {'result_code': result_code, 'result_str': friendly_str, 'result_data': result_data}
{ "content_hash": "da9c88b2f2cfce9d7be70443736dbf67", "timestamp": "", "source": "github", "line_count": 193, "max_line_length": 79, "avg_line_length": 32.16062176165803, "alnum_prop": 0.5509908168197196, "repo_name": "rcbops/opencenter-agent", "id": "9b37e55ecaf701747e7beeeba194cc7f6ac2a340", "size": "7517", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "opencenteragent/plugins/output/plugin_adventurator.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "179557" }, { "name": "Shell", "bytes": "46184" } ], "symlink_target": "" }
import requests import json import pandas as pd def QA_fetch_get_future_domain(): """ 获取快期的主连代码 return [list] """ res = pd.DataFrame(json.loads(requests.get("https://openmd.shinnytech.com/t/md/symbols/latest.json").text)).T return res.loc[res.ins_name.str.contains('主连')].underlying_symbol.apply(lambda x: x.split('.')[1]).tolist() if __name__ == "__main__": print(QA_fetch_get_future_domain())
{ "content_hash": "075eb35d742cffa8fbe0acfd5732c092", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 114, "avg_line_length": 22.842105263157894, "alnum_prop": 0.6451612903225806, "repo_name": "QUANTAXIS/QUANTAXIS", "id": "2e34370976c0c32c75357001ea4ad0728e575209", "size": "456", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "QUANTAXIS/QAFetch/QAKQ.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "172" }, { "name": "Cython", "bytes": "13284" }, { "name": "Dockerfile", "bytes": "39129" }, { "name": "Python", "bytes": "1967828" }, { "name": "Rust", "bytes": "588524" }, { "name": "Shell", "bytes": "58705" }, { "name": "TeX", "bytes": "82015" } ], "symlink_target": "" }
""" compute_water_level_filter_saliniy.py ~~~~~~~~ This script is used to: 1- calculate water depth above pressure sensor from pressure reading and coincident barometric pressure measurements. 2- calculate total water depth 3- calculate water surface elevation 4- filter salinity when water level falls below conductivity proble as observed from corrected sonde depth (water depth above pressure sensor) measurement. 5- plot data """ import platform import time import os import copy import glob import numpy as np import sonde import matplotlib.pyplot as plt import pandas from sonde import find_tz if platform.system()=='Windows': base_dir = 'T:\\BaysEstuaries\\Data\\WQData' project_dir = os.path.join('T:\\BaysEstuaries\\PROJECTS\\NUECES', 'Nueces Delta Ecol. Modeling_USACE', '2011/Data2012-2013') else: base_dir = '/T/BaysEstuaries/Data/WQData' project_dir = os.path.join('/T/BaysEstuaries/PROJECTS/NUECES', 'Nueces Delta Ecol. Modeling_USACE', '2011/Data2012-2013') def drop_seconds(time_series): """this is to realign water level and barometer timseries for adjustment""" return [pandas.datetime(dt.year, dt.month, dt.day,dt.hour, dt.minute) for dt in time_series.index] def string_cleaner(string): if type(string) is str: return string.strip().lower() else: return string #the pressure in meters at which site_name = raw_input('Enter Site Name: ').lower() barologger_sitename = raw_input('Enter Barologger Site Name: ').lower() write_file = raw_input("Write output file? [yes/no]: ").lower() if barologger_sitename == 'nude3_baro': barologger_file_name = 'lower_delta_filled_barometer_data.csv' if barologger_sitename == 'nueces5_baro': barologger_file_name = 'upper_delta_filled_barometer_data.csv' log_file_dir = os.path.join('deployment_logs','original_files') log_file_name = 'NUECES_DELTA_Deployment_Log_2012' dep_log_file = os.path.join(base_dir, log_file_dir, log_file_name + '_deployment_temp.csv') cal_log_file = os.path.join(base_dir, log_file_dir, log_file_name + '_calibration.csv') site_dep_log_file = os.path.join(base_dir, log_file_dir, site_name+'_dep_log.csv') austin_baro_file = os.path.join(project_dir, 'mabry_station_and_sea_level_pressure.csv') sensor_to_gps_height_file = os.path.join(project_dir, 'site_coordinates.csv') sonde_site_dir = os.path.join(base_dir, 'sites', site_name) barologger_site_dir = os.path.join(base_dir, 'sites', barologger_sitename) sonde_data_file = os.path.join(sonde_site_dir, 'twdb_wq_' + site_name \ + '.csv') barologger_data_file = os.path.join(barologger_site_dir, barologger_file_name) deployment_data = pandas.read_csv(dep_log_file, header=0, delimiter=',', parse_dates={'dep_datetime':[1,5], 'ret_datetime':[1,2]}, na_values=['nd', '', ' ','n/a']) calibration_data = pandas.read_csv(cal_log_file, header=0, delimiter=',', parse_dates=[[0,2]], index_col=0, na_values=['nd', '', ' ','n/a']) deployment_data = deployment_data.applymap(string_cleaner) deployment_data.index = deployment_data.dep_datetime deployment_data.rename(columns={'Height from Base of Instrument to Base of GPS Receiver (m)': 'sonde_bottom_to_gps'},inplace=True) deployment_data.rename(columns= {'Depth from bottom of instrument to water surface (m) ': 'sonde_bottom_to_surface'}, inplace=True) deployment_data.rename(columns={'Total Water Depth (m) ': 'total_water_depth'}, inplace=True) site_deployment_data = deployment_data[deployment_data['SITE ID'] == site_name] site_deployment_data.index = site_deployment_data.dep_datetime site_deployment_data['spotcheck_sensor_depth'] = np.nan calibration_data = calibration_data.applymap(string_cleaner) sonde_data = sonde.Sonde(sonde_data_file) sonde_dates = [pandas.datetime.strptime(dt.strftime('%m-%d-%y %H:%M:%S'), '%m-%d-%y %H:%M:%S') for dt in sonde_data.dates] sensor_to_gps_height = pandas.read_csv(sensor_to_gps_height_file, sep=',', index_col=[0], na_values=['nd','\s*']) sonde_series = pandas.DataFrame(sonde_data.data,index=sonde_dates) sonde_series['file_name'] = [f.lower().strip().split('.')[0] for f in sonde_data.data_file] sonde_series['sonde_id'] = 'unknown' deploy_filename_list = np.unique(sonde_series.file_name) baro_series = pandas.read_csv(barologger_data_file, sep=',', parse_dates=[0], index_col=0) austin_lab_baro_series = pandas.read_csv(austin_baro_file, sep=',', parse_dates=[0], index_col=0) sonde_series.index = [pandas.datetime(dt.year, dt.month, dt.day,dt.hour, dt.minute) for dt in sonde_series.index] baro_series.index = [pandas.datetime(dt.year, dt.month, dt.day,dt.hour, dt.minute) for dt in baro_series.index] sonde_series = sonde_series.groupby(level=0).first() sonde_baro_series = pandas.concat((sonde_series, baro_series), axis=1) sonde_baro_series.water_depth_non_vented\ [sonde_baro_series.water_depth_non_vented < -900.] = np.nan sonde_baro_series.air_pressure[sonde_baro_series.air_pressure < -900] = np.nan sonde_baro_series['corrected_sonde_depth'] = np.nan sonde_baro_series['water_surface_elevation_one'] = np.nan sonde_baro_series['water_surface_elevation_two'] = np.nan sonde_baro_series['water_surface_elevation_three'] = np.nan sonde_baro_series['water_surface_elevation_med'] = np.nan sonde_baro_series['total_water_depth'] = np.nan sonde_baro_series = sonde_baro_series.resample('15min', how='first') ltc_zero_pressure = 9.5 ll_zero_pressure = 0. ysi_bottom_to_sensor = .102 solinst_bottom_to_sensor = 0.01 horizontal_orientation = ['nueces2', 'nueces5', 'nueces6', 'nueces11'] water_depth_det_limit = {'nueces1': (-.2, 100.), 'nueces2': (0.02, 5), 'nueces3': (0.02, 100.), 'nueces4': (0.03, 100.), 'nueces5': (0.03, 5.), 'nueces6': (0.03, 40.), 'nueces7': (0.03, 5.), 'nueces8': (0.03, 100.), 'nueces9': (0.03, 100.), 'nueces10': (0.03, 100.), 'nueces11': (0.02, 100.), 'nueces12': (0.03, 100.), 'nueces13': (0.03, 100.), 'nueces14': (0.03, 100.), 'nude1': (0.02, 100.)} water_depth_detection_limit = water_depth_det_limit[site_name][0] salinity_filter_limit = water_depth_det_limit[site_name][1] site_gps_elevation_one = sensor_to_gps_height.ix[site_name, 'survey_one_elevation'] site_gps_elevation_two = sensor_to_gps_height.ix[site_name, 'survey_two_elevation'] site_gps_elevation_three = sensor_to_gps_height.ix[site_name, 'survey_three_elevation'] site_gps_elevation_med = sensor_to_gps_height.ix[site_name,['survey_one_elevation', 'survey_two_elevation', 'survey_three_elevation']].quantile( 0.5) last_ret_start = pandas.datetime(2013,11,10) last_ret_end = pandas.datetime(2013,11,15) fig = plt.figure() sal_ax = fig.add_subplot(111) #sonde_baro_series.seawater_salinity.plot(style='c.', label='raw', ax=sal_ax, # markersize=4) cleaned_sal_ax = plt.figure().add_subplot(111) sonde_baro_series.seawater_salinity.plot(style='c.', label='removed', ax=cleaned_sal_ax, markersize=4) cleaned_sal_ax.set_ylim(0, np.ceil(np.max([sonde_baro_series.seawater_salinity.max(), site_deployment_data['SURFACE SALINITY (ppt)'].max()]))) cleaned_sal_ax.set_ylabel("salinity, psu") cleaned_sal_ax.set_title(site_name) plt.figure() for sonde_file in deploy_filename_list: #for sonde_file in ['0912nu02']: deployment_specific_data = site_deployment_data[site_deployment_data\ ['DEPLOYED FILENAME']==sonde_file] sonde_type = deployment_specific_data['DEPLOYED SONDE ID'] dep_file_mask = sonde_baro_series['file_name'] == sonde_file site_sonde_baro_series = sonde_baro_series[dep_file_mask] sonde_baro_series['sonde_id'][dep_file_mask] = sonde_type[0] if 'ltc' in sonde_type.values[0]: print "correcting deployment file ", sonde_file sonde_baro_series['corrected_sonde_depth'][dep_file_mask] = \ sonde_baro_series.water_depth_non_vented[dep_file_mask] - \ sonde_baro_series.air_pressure[dep_file_mask] + \ ltc_zero_pressure if site_name == 'nueces13': if sonde_file == '0712nu13': survey_solinst_sonde_to_gps = 1.48 print "solinst_to_gps,", sonde_file, survey_solinst_sonde_to_gps else: survey_solinst_sonde_to_gps = 0.905 print "solinst_to_gps,", sonde_file, survey_solinst_sonde_to_gps else: survey_solinst_sonde_to_gps = sensor_to_gps_height.ix[site_name, 'median_solinst_sonde_bottom_to_gps'] if site_name in horizontal_orientation: site_deployment_data['spotcheck_sensor_depth'][site_deployment_data\ ['DEPLOYED FILENAME']==sonde_file]= \ deployment_specific_data.sonde_bottom_to_surface try: site_deployment_data.ix[last_ret_start:last_ret_end, 'spotcheck_sensor_depth'] = \ site_deployment_data.sonde_bottom_to_surface.ix[last_ret_start: last_ret_end][0] except TypeError: pass sonde_baro_series['water_surface_elevation_one'][dep_file_mask] =\ site_gps_elevation_one + sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_two'][dep_file_mask] =\ site_gps_elevation_two + sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_three'][dep_file_mask] = \ site_gps_elevation_three + sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_med'][dep_file_mask] = \ site_gps_elevation_med + sonde_baro_series['corrected_sonde_depth'][dep_file_mask] if np.isnan(sensor_to_gps_height.ix[site_name, 'median_solinst_sensor_height']): solinst_sensor_height = sensor_to_gps_height.ix[site_name, 'median_ysi_sensor_height'] else: solinst_sensor_height = sensor_to_gps_height.ix[site_name, 'median_solinst_sensor_height'] else: site_deployment_data['spotcheck_sensor_depth'][site_deployment_data\ ['DEPLOYED FILENAME']==sonde_file]= \ deployment_specific_data.sonde_bottom_to_surface - \ solinst_bottom_to_sensor try: site_deployment_data.ix[last_ret_start:last_ret_end, 'spotcheck_sensor_depth'] = \ site_deployment_data.sonde_bottom_to_surface.ix[last_ret_start: last_ret_end][0] - solinst_bottom_to_sensor except TypeError: pass sonde_baro_series['water_surface_elevation_one'][dep_file_mask] = \ site_gps_elevation_one + solinst_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_two'][dep_file_mask] = \ site_gps_elevation_two + solinst_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_three'][dep_file_mask] = \ site_gps_elevation_three + solinst_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_med'][dep_file_mask] = \ site_gps_elevation_med + solinst_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] if site_name == 'nueces13': if sonde_file == '0712nu13': solinst_sensor_height = 0.29 else: solinst_sensor_height = 0.925 else: solinst_sensor_height = sensor_to_gps_height.ix[site_name, 'median_solinst_sensor_height'] \ + solinst_bottom_to_sensor sonde_baro_series['total_water_depth'][dep_file_mask] = \ solinst_sensor_height + sonde_baro_series['corrected_sonde_depth']\ [dep_file_mask] # water_depth_detection_mask = sonde_baro_series.corrected_sonde_depth\ # < water_depth_detection_limit # sonde_baro_series['corrected_salinity'][dep_file_mask * \ # water_depth_detection_mask] = np.nan # sonde_baro_series['total_water_depth'][dep_file_mask * \ # water_depth_detection_mask] = -888. if 'ysi' in sonde_type.values[0]: print "correcting deployment file ", sonde_file site_calibration_data = calibration_data\ [calibration_data['SITE ID']== site_name] site_dep_data = pandas.read_csv(site_dep_log_file, header=0, sep=',', parse_dates={'dep_datetime':[1,5], 'ret_datetime':[1,2]}, na_values=['nd', '', ' ']) site_dep_data = site_dep_data.applymap(string_cleaner) site_dep_data.index = site_dep_data.dep_datetime # site_cal_baro_data = pandas.concat((site_calibration_data, # austin_lab_baro_series), axis=1) # site_cal_baro_data.station_pressure = site_cal_baro_data.\ # station_pressure.interpolate(method='linear') # ysi_zero_pressure_mabry = site_cal_baro_data.station_pressure.ix[site_calibration_data.index[site_calibration_data['DEPLOYED FILENAME']==sonde_file]] ysi_zero_pressure = site_dep_data.zero_pressure.ix[site_dep_data.index[site_dep_data['DEPLOYED FILENAME']==sonde_file]] sonde_baro_series['corrected_sonde_depth'][dep_file_mask] = \ sonde_baro_series.water_depth_non_vented[dep_file_mask] - \ sonde_baro_series.air_pressure[dep_file_mask] + ysi_zero_pressure[0] survey_ysi_sonde_to_gps = sensor_to_gps_height.ix[site_name, 'median_ysi_sonde_bottom_to_gps'] # this assumes that the solinst was placed at the same level of if site_name in horizontal_orientation: site_deployment_data['spotcheck_sensor_depth'][site_deployment_data\ ['DEPLOYED FILENAME']==sonde_file]= \ site_deployment_data.sonde_bottom_to_surface try: site_deployment_data.ix[last_ret_start:last_ret_end, 'spotcheck_sensor_depth'] = \ site_deployment_data.sonde_bottom_to_surface.ix[last_ret_start: last_ret_end][0] except TypeError: pass sonde_baro_series['water_surface_elevation_one'][dep_file_mask] = \ site_gps_elevation_one + sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_two'][dep_file_mask] \ = site_gps_elevation_two + sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_three'][dep_file_mask] = \ site_gps_elevation_three + sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_med'][dep_file_mask] = \ site_gps_elevation_med + sonde_baro_series['corrected_sonde_depth'][dep_file_mask] if np.isnan(sensor_to_gps_height.ix[site_name, 'median_ysi_sensor_height']): ysi_sensor_height = sensor_to_gps_height.ix[site_name, 'median_solinst_sensor_height'] else: ysi_sensor_height = sensor_to_gps_height.ix[site_name, 'median_ysi_sensor_height'] else: site_deployment_data['spotcheck_sensor_depth'][site_deployment_data\ ['DEPLOYED FILENAME']==sonde_file]= \ deployment_specific_data.sonde_bottom_to_surface - \ ysi_bottom_to_sensor try: site_deployment_data.ix[last_ret_start:last_ret_end, 'spotcheck_sensor_depth'] = \ site_deployment_data.sonde_bottom_to_surface.ix[last_ret_start: last_ret_end][0] - ysi_bottom_to_sensor except TypeError: pass sonde_baro_series['water_surface_elevation_one'][dep_file_mask] = \ site_gps_elevation_one + ysi_bottom_to_sensor +\ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_two'][dep_file_mask] = \ site_gps_elevation_two + ysi_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_three'][dep_file_mask] = \ site_gps_elevation_three + ysi_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_med'][dep_file_mask] = \ site_gps_elevation_med + ysi_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] if np.isnan(sensor_to_gps_height.ix[site_name, 'median_ysi_sensor_height']): ysi_sensor_height = sensor_to_gps_height.ix[site_name, 'median_solinst_sensor_height'] else: ysi_sensor_height = sensor_to_gps_height.ix[site_name, 'median_ysi_sensor_height'] \ + ysi_bottom_to_sensor sonde_baro_series['total_water_depth'][sonde_baro_series\ ['file_name'] == sonde_file] = ysi_sensor_height + \ sonde_baro_series\ ['corrected_sonde_depth'][sonde_baro_series['file_name']== sonde_file] if 'll' in sonde_type.values[0]: sonde_baro_series['corrected_sonde_depth'][sonde_baro_series['file_name'] == sonde_file] = \ sonde_baro_series.water_depth_non_vented[sonde_baro_series['file_name'] == sonde_file] - \ sonde_baro_series.air_pressure[sonde_baro_series['file_name'] == sonde_file] if np.isnan(sensor_to_gps_height.ix[site_name, 'median_solinst_sonde_bottom_to_gps']): survey_solinst_sonde_to_gps = sensor_to_gps_height.ix[site_name, 'median_ysi_sonde_bottom_to_gps'] - \ .25 else : survey_solinst_sonde_to_gps = sensor_to_gps_height.ix[site_name, 'median_solinst_sonde_bottom_to_gps'] if site_name in horizontal_orientation: site_deployment_data['spotcheck_sensor_depth'][site_deployment_data\ ['DEPLOYED FILENAME']==sonde_file]= \ site_deployment_data.sonde_bottom_to_surface try: site_deployment_data.ix[last_ret_start:last_ret_end, 'spotcheck_sensor_depth'] = \ site_deployment_data.sonde_bottom_to_surface.ix[last_ret_start: last_ret_end][0] except TypeError: pass sonde_baro_series['water_surface_elevation_one'][dep_file_mask] = \ site_gps_elevation_one + sonde_baro_series['corrected_sonde_depth']\ [dep_file_mask] sonde_baro_series['water_surface_elevation_two'][dep_file_mask] = \ site_gps_elevation_two + sonde_baro_series['corrected_sonde_depth']\ [dep_file_mask] sonde_baro_series['water_surface_elevation_three'][dep_file_mask] = \ site_gps_elevation_three + sonde_baro_series['corrected_sonde_depth']\ [dep_file_mask] sonde_baro_series['water_surface_elevation_med'][dep_file_mask] = \ site_gps_elevation_med + sonde_baro_series['corrected_sonde_depth']\ [dep_file_mask] if np.isnan(sensor_to_gps_height.ix[site_name, 'median_solinst_sensor_height']): solinst_sensor_height = sensor_to_gps_height.ix[site_name, 'median_ysi_sensor_height'] else: solinst_sensor_height = sensor_to_gps_height.ix[site_name, 'median_solinst_sensor_height'] else: site_deployment_data['spotcheck_sensor_depth'][site_deployment_data\ ['DEPLOYED FILENAME']==sonde_file]= \ deployment_specific_data.sonde_bottom_to_surface- \ solinst_bottom_to_sensor try: site_deployment_data.ix[last_ret_start:last_ret_end, 'spotcheck_sensor_depth'] = \ site_deployment_data.sonde_bottom_to_surface.ix[last_ret_start: last_ret_end][0] - solinst_bottom_to_sensor except TypeError: pass sonde_baro_series['water_surface_elevation_one'][dep_file_mask] = \ site_gps_elevation_one + solinst_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_two'][dep_file_mask] = \ site_gps_elevation_two + solinst_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_three'][dep_file_mask] = \ site_gps_elevation_three + solinst_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] sonde_baro_series['water_surface_elevation_med'][dep_file_mask] = \ site_gps_elevation_med + solinst_bottom_to_sensor + \ sonde_baro_series['corrected_sonde_depth'][dep_file_mask] if np.isnan(sensor_to_gps_height.ix[site_name, 'median_solinst_sensor_height']): solinst_sensor_height = sensor_to_gps_height.ix[site_name, 'median_ysi_sensor_height'] + .25 else: solinst_sensor_height = sensor_to_gps_height.ix[site_name, 'median_solinst_sensor_height'] \ + solinst_bottom_to_sensor sonde_baro_series['total_water_depth'][sonde_baro_series\ ['file_name'] == sonde_file] = solinst_sensor_height + \ sonde_baro_series\ ['corrected_sonde_depth'][sonde_baro_series['file_name']== sonde_file] sonde_baro_series['corrected_sonde_depth']\ [sonde_baro_series['file_name']==sonde_file].plot(style='.',label='sonde depth') plt.ylim(0,2) site_deployment_data.index = [t + pandas.datetools.Hour(-1) if find_tz(t).zone == 'UTC-5' else t for t in site_deployment_data.index] site_deployment_data.to_csv(os.path.join(sonde_site_dir, site_name + '_complete_dep_log.csv'), sep=',', index_label='datetime(utc-6)', float_format='%10.2f', na_rep=-999.99) site_deployment_data[site_deployment_data.columns[-3]].dropna().plot(style='r.', markersize=12) water_depth_detection_mask = sonde_baro_series.corrected_sonde_depth\ < water_depth_detection_limit salinity_threshold_mask = sonde_baro_series.seawater_salinity < salinity_filter_limit water_depth_detection_mask = water_depth_detection_mask * salinity_threshold_mask sonde_baro_series['raw_seawater_salinity'] = sonde_baro_series.seawater_salinity sonde_baro_series['seawater_salinity'][water_depth_detection_mask] = -888.88 sonde_baro_series['total_water_depth'][water_depth_detection_mask] = -888.88 sonde_baro_series['water_surface_elevation_one'][water_depth_detection_mask] = -888.88 sonde_baro_series['water_surface_elevation_two'][water_depth_detection_mask] = -888.88 sonde_baro_series['water_surface_elevation_three'][water_depth_detection_mask] = -888.88 sonde_baro_series['water_surface_elevation_med'][water_depth_detection_mask] = -888.88 sonde_baro_series['water_temperature'][water_depth_detection_mask] = -888.88 sonde_baro_series['water_electrical_conductivity'][water_depth_detection_mask] = -888.88 site_deployment_data['spotcheck_sensor_depth'].dropna().plot(style='r^', markersize=10, label='spot-check') plt.title(site_name) plt.ylabel('depth, m') plt.legend().set_visible(False) #plt.ylim(0,2) sonde_baro_series.rename(columns={'water_depth_non_vented': 'raw_pressure_reading'},inplace=True) sonde_depth_ax = plt.figure().add_subplot(111) sonde_baro_series.ix[:,'corrected_sonde_depth'].dropna().plot(style='b.', label='water depth above sensor', ax=sonde_depth_ax) sonde_baro_series.ix[:,'raw_pressure_reading'].dropna().plot(style='g.', label='raw pressure reading', ax=sonde_depth_ax) #site_deployment_data[site_deployment_data.columns[-3]].dropna().plot(style='r.', # markersize=12) site_deployment_data['spotcheck_sensor_depth'].dropna().plot(style='r.', markersize=10, label='spot-check', ax=sonde_depth_ax) sonde_depth_ax.set_title(site_name) sonde_depth_ax.set_ylim(0, 2) sonde_depth_ax.set_ylabel('water depth, m') sonde_depth_ax.legend() #plt.savefig(os.path.join(sonde_site_dir, # site_name + '_water_depth_above_sensor.png')) total_depth_ax = plt.figure().add_subplot(111) #sonde_baro_series.ix[:, 'corrected_sonde_depth'].plot(style='.', ax=ax, # label='water depth above sensor') sonde_baro_series.ix[:, 'total_water_depth'].plot(style='.', ax=total_depth_ax, label='total water depth') total_depth_ax.set_title(site_name) total_depth_ax.set_ylim(0,2) total_depth_ax.set_ylabel('water depth, m') site_deployment_data.total_water_depth.plot(style='r.', markersize=12, ax=total_depth_ax, label= 'spot-check') plt.legend() #plt.savefig(os.path.join(sonde_site_dir, site_name + '_water_depth.png')) sonde_baro_series.rename(columns={'water_depth_non_vented': 'raw_pressure_reading', 'seawater_salinity': 'filtered salinity', 'raw_seawater_salinity': 'unfiltered salinity', 'corrected_sonde_depth': 'water depth'},inplace=True) sonde_baro_series.ix[:,['water depth', 'unfiltered salinity','filtered salinity']].plot(style={'filtered salinity':'b.', 'unfiltered salinity':'c.', 'water depth': 'g.'}, ax=sal_ax, markersize=4,secondary_y='water depth') #site_deployment_data['SURFACE SALINITY (ppt)'].plot(style='ro', ax=sal_ax) sal_ax.set_ylim(0,80) sal_ax.set_ylabel('salinity, psu') sal_ax.set_title(site_name) #sal_ax.right_ax.set_ylim(0.01, np.ceil(sonde_baro_series.corrected_sonde_depth.max())) sal_ax.right_ax.set_ylim(0.01, 0.6) sal_ax.right_ax.set_ylabel('water depth above sensor, m') """ site_deployment_data.total_water_depth = site_deployment_data.total_water_depth.apply( lambda d: np.float(d)) site_deployment_data.sonde_bottom_to_surface = site_deployment_data.\ sonde_bottom_to_surface.apply( lambda d: np.float(d)) sonde_baro_series.seawater_salinity.plot(style='b.', ax=cleaned_sal_ax, markersize=4, label="cleaned") site_deployment_data['SURFACE SALINITY (ppt)'].plot(style='ro', ax=cleaned_sal_ax, label='spot-check') #plt.savefig(os.path.join(sonde_site_dir, site_name + '_salinity.png')) sonde_baro_series.ix[:, ['water_surface_elevation_one', 'water_surface_elevation_two', 'water_surface_elevation_three', 'water_surface_elevation_med']].plot(title=site_name, style={'water_surface_elevation_one': '.', 'water_surace_elevation_two': '.', 'water_survace_elevation_three': '.', 'water_surface_elevation_med': '^'},mew=0) plt.ylim(-2,2) plt.ylabel('water surface elevation,m from NAVD88') #plt.figure() #sonde_baro_series.ix[:, 'water_surface_elevation_med'].plot(style='.', # title=site_name) plt.ylabel('water surface elevation,m from NAVD88') plt.ylim(-3,1) plt.title(site_name) ll_bool = site_deployment_data.sonde_type == 'll' ltc_bool = site_deployment_data.sonde_type == 'ltc' #solinst_bool = [ll or ltc for ll,ltc in zip(ll_bool, ltc_bool)] #solinst_sensor_height = site_deployment_data.total_depth[solinst_bool] - \ site_deployment_data.sonde_bottom_to_surface[solinst_bool] ysi_sensor_height_series = site_deployment_data.total_depth\ [site_deployment_data.sonde_type == 'ysi'] - \ site_deployment_data.sonde_bottom_to_surface\ [site_deployment_data.sonde_type == 'ysi'] try: solinst_sensor_height.plot(style='.-', label='solinst') except TypeError: pass try: ysi_sensor_height.plot(style='.-', label='ysi') except TypeError: pass """ #plt.title(site_name + ' sensor height') #print site_name #print "ysi:", ysi_sensor_height.quantile(.5) #print "solinst:", solinst_sensor_height.quantile(.5) plt.show() metadata_file = os.path.join(base_dir,'swis_site_list_with_nueces_added.psv') metadata = np.genfromtxt(metadata_file, delimiter='|', dtype='|S20,|S100,|S20,|S20', usecols=(1,5,6,7), names='site_name,site_desc,lat,lon',skip_header=1) idx = np.where(metadata['site_name']==site_name.upper()) if write_file == 'yes': disclaimer_header = '' disclaimer_header += 'disclaimer: This data has been collected by a Texas Water Development Board datasonde.\n' disclaimer_header += 'disclaimer: Raw uncorrected data may contain errors. Provisional data has had anomalous \n' disclaimer_header += 'disclaimer: individual data points removed. Such data points typically are disconnected \n' disclaimer_header += 'disclaimer: from the recorded trend; nonetheless all removed data is retained in an associated \n' disclaimer_header += 'disclaimer: QA Rules file available upon request. However data that simply appear unusual are \n' disclaimer_header += 'disclaimer: not removed unless verifying information is obtained suggesting the data is not \n' disclaimer_header += 'disclaimer: representative of bay conditions. The Board makes no warranties (including no warranties \n' disclaimer_header += 'disclaimer: as to merchantability or fitness) either expressed or implied with respect to the data \n' disclaimer_header += 'disclaimer: or its fitness for any specific application. \n' disclaimer_header += 'fill_value for bad data: -999.99. \n' try: disclaimer_header += 'fill_value for parameters when water level falls to or below sensor level above delta bed of ' + str(solinst_sensor_height)[:5] + ' m: -888.88. \n' except NameError: disclaimer_header += 'fill_value for parameters when water level falls to or below sensor level above delta bed of ' + str(ysi_sensor_height)[:5] + ' m: -888.88. \n' disclaimer_header += 'site_description: ' + metadata['site_desc'][idx][0] + '\n' disclaimer_header += 'site_name: ' + site_name + '\n' disclaimer_header += 'latitude: ' + metadata['lat'][idx][0] + '\n' disclaimer_header += 'longitude: ' + metadata['lon'][idx][0] + '\n' disclaimer_header += 'timezone: UTC-6 \n' columnorder = ['raw_pressure_reading', 'air_pressure', 'corrected_sonde_depth', 'total_water_depth', 'water_surface_elevation_one', 'water_surface_elevation_two', 'water_surface_elevation_three', 'water_surface_elevation_med', 'water_temperature', 'water_electrical_conductivity', 'seawater_salinity', 'sonde_id', 'file_name'] processed_wq_file = os.path.join(sonde_site_dir, site_name + '_final_data_corrected.csv') sonde_baro_series = sonde_baro_series.ix[:, columnorder] fid = open(processed_wq_file, 'w') fid.write(disclaimer_header) sonde_baro_series.to_csv(fid, sep=',', index_label='datetimes', na_rep = -999.99, header=False) fid.close()
{ "content_hash": "8e1669a5c888ec159341da9202079cc4", "timestamp": "", "source": "github", "line_count": 670, "max_line_length": 177, "avg_line_length": 53.723880597014926, "alnum_prop": 0.562967078760939, "repo_name": "twdb/sonde", "id": "69a800726d21b99d96b1ff2ea341f9d446c5f414", "size": "35995", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/compute_water_level_filter_salinity.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Mathematica", "bytes": "373923" }, { "name": "Python", "bytes": "338143" } ], "symlink_target": "" }
import webob from lxml import etree from nova.api.openstack import views from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova.compute import instance_types from nova import db from nova import exception class Controller(object): """Flavor controller for the OpenStack API.""" def index(self, req): """Return all flavors in brief.""" items = self._get_flavors(req, is_detail=False) return dict(flavors=items) def detail(self, req): """Return all flavors in detail.""" items = self._get_flavors(req, is_detail=True) return dict(flavors=items) def _get_flavors(self, req, is_detail=True): """Helper function that returns a list of flavor dicts.""" filters = {} if 'minRam' in req.params: try: filters['min_memory_mb'] = int(req.params['minRam']) except ValueError: pass # ignore bogus values per spec if 'minDisk' in req.params: try: filters['min_local_gb'] = int(req.params['minDisk']) except ValueError: pass # ignore bogus values per spec ctxt = req.environ['nova.context'] inst_types = instance_types.get_all_types(filters=filters) builder = self._get_view_builder(req) items = [builder.build(inst_type, is_detail=is_detail) for inst_type in inst_types.values()] return items def show(self, req, id): """Return data about the given flavor id.""" try: ctxt = req.environ['nova.context'] flavor = instance_types.get_instance_type_by_flavor_id(id) except exception.NotFound: raise webob.exc.HTTPNotFound() builder = self._get_view_builder(req) values = builder.build(flavor, is_detail=True) return dict(flavor=values) def _get_view_builder(self, req): base_url = req.application_url project_id = getattr(req.environ['nova.context'], 'project_id', '') return views.flavors.ViewBuilder(base_url, project_id) def make_flavor(elem, detailed=False): elem.set('name') elem.set('id') if detailed: elem.set('ram') elem.set('disk') for attr in ("vcpus", "swap", "rxtx_quota", "rxtx_cap"): elem.set(attr, xmlutil.EmptyStringSelector(attr)) xmlutil.make_links(elem, 'links') flavor_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} class FlavorTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('flavor', selector='flavor') make_flavor(root, detailed=True) return xmlutil.MasterTemplate(root, 1, nsmap=flavor_nsmap) class MinimalFlavorsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('flavors') elem = xmlutil.SubTemplateElement(root, 'flavor', selector='flavors') make_flavor(elem) return xmlutil.MasterTemplate(root, 1, nsmap=flavor_nsmap) class FlavorsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('flavors') elem = xmlutil.SubTemplateElement(root, 'flavor', selector='flavors') make_flavor(elem, detailed=True) return xmlutil.MasterTemplate(root, 1, nsmap=flavor_nsmap) class FlavorXMLSerializer(xmlutil.XMLTemplateSerializer): def show(self): return FlavorTemplate() def detail(self): return FlavorsTemplate() def index(self): return MinimalFlavorsTemplate() def create_resource(): body_serializers = {'application/xml': FlavorXMLSerializer()} serializer = wsgi.ResponseSerializer(body_serializers) return wsgi.Resource(Controller(), serializer=serializer)
{ "content_hash": "321ff21b8ee93a866847930c2d65313a", "timestamp": "", "source": "github", "line_count": 118, "max_line_length": 77, "avg_line_length": 32.38135593220339, "alnum_prop": 0.6443339439937189, "repo_name": "salv-orlando/MyRepo", "id": "7280ecbce855febf5247ee86057e9cacdbafc793", "size": "4496", "binary": false, "copies": "1", "ref": "refs/heads/bp/xenapi-security-groups", "path": "nova/api/openstack/flavors.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "7412" }, { "name": "Python", "bytes": "4477933" }, { "name": "Shell", "bytes": "34174" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import data import logger import test_topic_model import util if __name__ == "__main__": logger.init('train_topic_model.log') logger.log.info("Topic Model Training start running...") util.text_process.init() data.pg.init() util.text_process.save_texts(data.pg.get_train_pos(), "topic_tmp/pos_data.txt") data.pg.close() dictionary = util.topic_model.build_dictionary(source="topic_tmp/pos_data.txt", path="topic_tmp/pos_data.dict") corpus = util.topic_model.build_corpus(dictionary, source="topic_tmp/pos_data.txt", path="topic_tmp/pos_corpus.mm") model = util.topic_model.build_lda(corpus, dictionary, path="topic_tmp/lda.model") data.pg.init() raw_pos = util.text_process.pos_process(data.pg.get_pos(), data.pg.get("city"), data.pg.get("position_type")) data.pg.close() index_corpus = util.topic_model.get_corpus(dictionary, util.text_process.split_and_filter_texts(raw_pos)) index = util.topic_model.build_index(index_corpus, model) test_topic_model.run(index, model, dictionary, raw_pos) logger.log.info("Topic Model Training finished.")
{ "content_hash": "d91f27981a9c81e922a5a38bb4127a8f", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 119, "avg_line_length": 30.8, "alnum_prop": 0.700487012987013, "repo_name": "wjfwzzc/train_model", "id": "55ca4723b70dd4e5ab7c06b99fa7ba2095957a6c", "size": "1257", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "train_topic_model.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "23203" }, { "name": "Shell", "bytes": "1581" } ], "symlink_target": "" }
from string import Template from datetime import date bitcoinDir = "./"; inFile = bitcoinDir+"/share/qt/Info.plist" outFile = "Friendshipcoin-Qt.app/Contents/Info.plist" version = "unknown"; fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro" for line in open(fileForGrabbingVersion): lineArr = line.replace(" ", "").split("="); if lineArr[0].startswith("VERSION"): version = lineArr[1].replace("\n", ""); fIn = open(inFile, "r") fileContent = fIn.read() s = Template(fileContent) newFileContent = s.substitute(VERSION=version,YEAR=date.today().year) fOut = open(outFile, "w"); fOut.write(newFileContent); print "Info.plist fresh created"
{ "content_hash": "9353bf7425c1d628f603d83f8a5260b8", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 69, "avg_line_length": 27.5, "alnum_prop": 0.7121212121212122, "repo_name": "friendshipCoin/friendshipcoin", "id": "7791ad2db2ef5f053e8cad9fee3870d5ad2f6e88", "size": "909", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "share/qt/clean_mac_info_plist.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "32226" }, { "name": "C++", "bytes": "2600273" }, { "name": "CSS", "bytes": "1127" }, { "name": "Groff", "bytes": "18284" }, { "name": "HTML", "bytes": "50615" }, { "name": "Makefile", "bytes": "13426" }, { "name": "NSIS", "bytes": "6094" }, { "name": "Objective-C", "bytes": "858" }, { "name": "Objective-C++", "bytes": "5864" }, { "name": "Python", "bytes": "69744" }, { "name": "QMake", "bytes": "14720" }, { "name": "Shell", "bytes": "12868" } ], "symlink_target": "" }
from tigershark.facade import CompositeAccess from tigershark.facade import ElementAccess from tigershark.facade import ElementSequenceAccess from tigershark.facade import Facade from tigershark.facade import Money from tigershark.facade import SegmentAccess from tigershark.facade import SegmentConversion from tigershark.facade import SegmentSequenceAccess from tigershark.facade import X12LoopBridge from tigershark.facade import X12SegmentBridge from tigershark.facade import XDecimal from tigershark.facade import boolean from tigershark.facade import enum from tigershark.facade.enums.common import delivery_or_calendar_pattern_code from tigershark.facade.enums.common import delivery_time_pattern_code from tigershark.facade.enums.common import quantity_qualifier from tigershark.facade.enums.common import time_period_qualifier from tigershark.facade.enums.common import place_of_service from tigershark.facade.enums.eligibility import coverage_level from tigershark.facade.enums.eligibility import eligibility_or_benefit_code from tigershark.facade.enums.eligibility import insurance_type from tigershark.facade.enums.eligibility import reject_reason_code from tigershark.facade.enums.eligibility import service_type_codes from tigershark.facade.f27x import Address from tigershark.facade.f27x import ContactInformation from tigershark.facade.f27x import DateOrTimePeriod from tigershark.facade.f27x import DemographicInformation from tigershark.facade.f27x import Diagnosis from tigershark.facade.f27x import Hierarchy from tigershark.facade.f27x import Header from tigershark.facade.f27x import Location from tigershark.facade.f27x import NamedEntity from tigershark.facade.f27x import ProviderInformation from tigershark.facade.f27x import ReferenceID from tigershark.facade.f27x import Relationship from tigershark.facade.f27x import TraceNumber from tigershark.facade.utils import first class RequestValidation(X12SegmentBridge): valid_request = ElementAccess("AAA", 1, x12type=boolean("Y")) reject_reason = ElementAccess("AAA", 3, x12type=enum(reject_reason_code)) follow_up_action_code = ElementAccess("AAA", 4, x12type=enum({ "C": "Please Correct and Resubmit", "N": "Resubmission Not Allowed", "P": "Please Resubmit Original Transaction", "R": "Resubmission Allowed", "S": "Do Not Resubmit; Inquiry Initiated to a Third Party", "W": "Please Wait 30 Days and Resubmit", "X": "Please Wait 10 Days and Resubmit", "Y": "Do Not Resubmit; We Will Hold Your Request and Respond Again " "Shortly"})) class Source(Facade, X12LoopBridge): """The information source is the entity with the eligibility answers""" loopName = "2000A" hierarchy = SegmentAccess("HL", x12type=SegmentConversion(Hierarchy)) request_validations = SegmentSequenceAccess("AAA", x12type=SegmentConversion(RequestValidation)) class _Information(X12LoopBridge): loopName = "2100A" name = SegmentAccess("NM1", x12type=SegmentConversion(NamedEntity)) contact_information = SegmentSequenceAccess("PER", x12type=SegmentConversion(ContactInformation)) reference_ids = SegmentSequenceAccess("REF", x12type=SegmentConversion(ReferenceID)) request_validations = SegmentSequenceAccess("AAA", x12type=SegmentConversion(RequestValidation)) def __init__(self, anX12Message, *args, **kwargs): super(Source, self).__init__(anX12Message, *args, **kwargs) self.source_information = first(self.loops( self._Information, anX12Message)) self.receivers = self.loops(Receiver, anX12Message) class Receiver(Facade, X12LoopBridge): """The entity asking the questions""" loopName = "2000B" hierarchy = SegmentAccess("HL", x12type=SegmentConversion(Hierarchy)) class _Information(X12LoopBridge): loopName = "2100B" name = SegmentAccess("NM1", x12type=SegmentConversion(NamedEntity)) contact_information = SegmentSequenceAccess("PER", x12type=SegmentConversion(ContactInformation)) reference_ids = SegmentSequenceAccess("REF", x12type=SegmentConversion(ReferenceID)) request_validations = SegmentSequenceAccess("AAA", x12type=SegmentConversion(RequestValidation)) def __init__(self, anX12Message, *args, **kwargs): super(Receiver, self).__init__(anX12Message, *args, **kwargs) self.receiver_information = first(self.loops( self._Information, anX12Message)) self.subscribers = self.loops(Subscriber, anX12Message) class EligibilityOrBenefitInformation(X12SegmentBridge): """Eligibility Information.""" information_type = ElementAccess("EB", 1, x12type=enum( eligibility_or_benefit_code)) coverage_level = ElementAccess("EB", 2, x12type=enum( coverage_level)) service_type = ElementAccess("EB", 3, x12type=enum( service_type_codes)) insurance_type = ElementAccess("EB", 4, x12type=enum( insurance_type)) description = ElementAccess("EB", 5) time_period_type = ElementAccess("EB", 6, x12type=enum(time_period_qualifier)) benefit_amount = ElementAccess("EB", 7, x12type=Money) benefit_percent = ElementAccess("EB", 8, x12type=XDecimal) quantity_type = ElementAccess("EB", 9, x12type=enum(quantity_qualifier)) quantity = ElementAccess("EB", 10) authorization_or_certification = ElementAccess("EB", 11, x12type=boolean("Y")) in_plan_network = ElementAccess("EB", 12, x12type=boolean("Y")) out_of_plan_network = ElementAccess("EB", 12, x12type=boolean("N")) both_in_out_network = ElementAccess("EB", 12, x12type=boolean("W")) in_plan_network_type = ElementAccess("EB", 12, x12type=enum({ "Y": "In Plan Network", "N": "Out of Plan Network", "W": "Both In and Out of Plan Network", })) ada_code = CompositeAccess("EB", "AD", 13) cpt_code = CompositeAccess("EB", "CJ", 13) hcpcs_code = CompositeAccess("EB", "HC", 13) icd_9_cm_code = CompositeAccess("EB", "ID", 13) ndc_code = CompositeAccess("EB", "ND", 13) zz_code = CompositeAccess("EB", "ZZ", 13) class HealthCareServicesDelivery(X12LoopBridge): benefit_quantity_type = ElementAccess("HSD", 1, x12type=enum(quantity_qualifier)) benefit_quantity = ElementAccess("HSD", 2) unit_or_basis_for_measurement = ElementAccess("HSD", 3, x12type=enum({ "DA": "Days", "MO": "Months", "VS": "Visit", "WK": "Week", "YR": "Years"})) sample_selection_modulus = ElementAccess("HSD", 4) time_period_type = ElementAccess("HSD", 5, x12type=enum(time_period_qualifier)) period_count = ElementAccess("HSD", 6) delivery_frequency = ElementAccess("HSD", 7, x12type=enum(delivery_or_calendar_pattern_code)) delivery_time = ElementAccess("HSD", 8, x12type=enum(delivery_time_pattern_code)) class Message(X12LoopBridge): message_text = ElementAccess("MSG", 1) class EligibilityOrBenefitAdditionalInformation(X12LoopBridge): additional_information = CompositeAccess("III", "ZZ", 2, x12type=enum(place_of_service)) class Subscriber(Facade, X12LoopBridge): """The person uniquely identified by the Source. This person was identified as a member of the Source. Subscriber may or may not be the patient. NOTE: Patient The Patient may be the Subscriber or the Dependent. There are several ways of identifying which type the Patient is, but the two most common are: 1. The Source assigns a unique ID number to each member of the Subscriber's family. In this case all dependents of a Subscriber are uniquely addressable by the Source, and are thus considered proper Subscribers, not Dependents. 2. The Source only gives a unique ID number to the Subscriber, and all family members are identifiable only by giving the Subscriber's and the Dependent's information. In this case both the Subscriber and Dependent will be defined. """ loopName = "2000C" hierarchy = SegmentAccess("HL", x12type=SegmentConversion(Hierarchy)) trace_numbers = SegmentSequenceAccess("TRN", x12type=SegmentConversion(TraceNumber)) class _Information(X12LoopBridge): loopName = "2100C" name = SegmentAccess("NM1", x12type=SegmentConversion(NamedEntity)) address_street = SegmentAccess("N3", x12type=SegmentConversion(Address)) address_location = SegmentAccess("N4", x12type=SegmentConversion(Location)) reference_ids = SegmentSequenceAccess("REF", x12type=SegmentConversion(ReferenceID)) contact_information = SegmentSequenceAccess("PER", x12type=SegmentConversion(ContactInformation)) request_validations = SegmentSequenceAccess("AAA", x12type=SegmentConversion(RequestValidation)) demographic_information = SegmentAccess("DMG", x12type=SegmentConversion(DemographicInformation)) relationship = SegmentAccess("INS", x12type=SegmentConversion(Relationship)) dates = SegmentSequenceAccess("DTP", x12type=SegmentConversion(DateOrTimePeriod)) class _EligibilityOrBenefitInformation(Facade, X12LoopBridge): loopName = "2110C" coverage_information = SegmentAccess("EB", x12type=SegmentConversion(EligibilityOrBenefitInformation)) services_deliveries = SegmentSequenceAccess("HSD", x12type=SegmentConversion(HealthCareServicesDelivery)) reference_ids = SegmentSequenceAccess("REF", x12type=SegmentConversion(ReferenceID)) dates = SegmentSequenceAccess("DTP", x12type=SegmentConversion(DateOrTimePeriod)) request_validations = SegmentSequenceAccess("AAA", x12type=SegmentConversion(RequestValidation)) messages = ElementSequenceAccess("MSG", 1) additional_information = ElementSequenceAccess("III", 2, x12type=enum(place_of_service)) class _AdditionalInformation(X12LoopBridge): loopName = "2115C" diagnosis = SegmentAccess("III", x12type=SegmentConversion(Diagnosis)) class _RelatedEntityInformation(X12LoopBridge): loopName = "2120C" name = SegmentAccess("NM1", x12type=SegmentConversion(NamedEntity)) address_street = SegmentAccess("N3", x12type=SegmentConversion(Address)) address_location = SegmentAccess("N4", x12type=SegmentConversion(Location)) contact_information = SegmentSequenceAccess("PER", x12type=SegmentConversion(ContactInformation)) provider_information = SegmentAccess("PRV", x12type=SegmentConversion(ProviderInformation)) def __init__(self, anX12Message, *args, **kwargs): super(Subscriber._EligibilityOrBenefitInformation, self).__init__( anX12Message, *args, **kwargs) self.additional_information = self.loops( self._AdditionalInformation, anX12Message) self.benefit_related_entity = first(self.loops( self._RelatedEntityInformation, anX12Message)) def __init__(self, anX12Message, *args, **kwargs): super(Subscriber, self).__init__(anX12Message, *args, **kwargs) self.personal_information = first(self.loops( self._Information, anX12Message)) self.eligibility_or_benefit_information = \ self.loops(self._EligibilityOrBenefitInformation, anX12Message) self.dependents = self.loops(Dependent, anX12Message) class Dependent(Facade, X12LoopBridge): """The Dependent. This person was *NOT* identified as a member of the Source. If this is populated, then this is the patient. """ loopName = "2000D" hierarchy = SegmentAccess("HL", x12type=SegmentConversion(Hierarchy)) trace_numbers = SegmentSequenceAccess("TRN", x12type=SegmentConversion(TraceNumber)) class _Information(X12LoopBridge): loopName = "2100D" name = SegmentAccess("NM1", x12type=SegmentConversion(NamedEntity)) address_street = SegmentAccess("N3", x12type=SegmentConversion(Address)) address_location = SegmentAccess("N4", x12type=SegmentConversion(Location)) reference_ids = SegmentSequenceAccess("REF", x12type=SegmentConversion(ReferenceID)) contact_information = SegmentSequenceAccess("PER", x12type=SegmentConversion(ContactInformation)) request_validations = SegmentSequenceAccess("AAA", x12type=SegmentConversion(RequestValidation)) demographic_information = SegmentAccess("DMG", x12type=SegmentConversion(DemographicInformation)) relationship = SegmentAccess("INS", x12type=SegmentConversion(Relationship)) dates = SegmentSequenceAccess("DTP", x12type=SegmentConversion(DateOrTimePeriod)) class _EligibilityOrBenefitInformation(Facade, X12LoopBridge): loopName = "2110D" coverage_information = SegmentAccess("EB", x12type=SegmentConversion(EligibilityOrBenefitInformation)) services_deliveries = SegmentSequenceAccess("HSD", x12type=SegmentConversion(HealthCareServicesDelivery)) reference_ids = SegmentSequenceAccess("REF", x12type=SegmentConversion(ReferenceID)) dates = SegmentSequenceAccess("DTP", x12type=SegmentConversion(DateOrTimePeriod)) request_validations = SegmentSequenceAccess("AAA", x12type=SegmentConversion(RequestValidation)) messages = ElementSequenceAccess("MSG", 1) additional_information = ElementSequenceAccess("III", 2, x12type=enum(place_of_service)) class _AdditionalInformation(X12LoopBridge): loopName = "2115C" diagnosis = SegmentAccess("III", x12type=SegmentConversion(Diagnosis)) class _RelatedEntityInformation(X12LoopBridge): loopName = "2120D" name = SegmentAccess("NM1", x12type=SegmentConversion(NamedEntity)) address_street = SegmentAccess("N3", x12type=SegmentConversion(Address)) address_location = SegmentAccess("N4", x12type=SegmentConversion(Location)) contact_information = SegmentSequenceAccess("PER", x12type=SegmentConversion(ContactInformation)) provider_information = SegmentAccess("PRV", x12type=SegmentConversion(ProviderInformation)) def __init__(self, anX12Message, *args, **kwargs): super(Dependent._EligibilityOrBenefitInformation, self).__init__( anX12Message, *args, **kwargs) self.additional_information = self.loops( self._AdditionalInformation, anX12Message) self.benefit_related_entity = first(self.loops( self._RelatedEntityInformation, anX12Message)) def __init__(self, anX12Message, *args, **kwargs): super(Dependent, self).__init__(anX12Message, *args, **kwargs) self.personal_information = first(self.loops( self._Information, anX12Message)) self.eligibility_or_benefit_information = \ self.loops(self._EligibilityOrBenefitInformation, anX12Message) class F271_4010(Facade): transaction_set_identifier_code = '271' x12_version_string = '4010' def __init__(self, anX12Message): st_loops = anX12Message.descendant('LOOP', name='ST_LOOP') if len(st_loops) > 0: self.facades = [] for loop in st_loops: self.facades.append(F271_4010(loop)) else: self.header = first(self.loops(Header, anX12Message)) self.source = first(self.loops(Source, anX12Message)) class F271_5010(Facade): transaction_set_identifier_code = '271' x12_version_string = '5010' def __init__(self, anX12Message): st_loops = anX12Message.descendant('LOOP', name='ST_LOOP') if len(st_loops) > 0: self.facades = [] for loop in st_loops: self.facades.append(F271_5010(loop)) else: self.header = first(self.loops(Header, anX12Message)) self.source = first(self.loops(Source, anX12Message)) self.receivers = self.loops(Receiver, anX12Message) self.subscribers = self.loops(Subscriber, anX12Message) self.dependents = self.loops(Dependent, anX12Message)
{ "content_hash": "033dbb5a3ba30861e590d2e4437922a9", "timestamp": "", "source": "github", "line_count": 389, "max_line_length": 96, "avg_line_length": 44.01542416452442, "alnum_prop": 0.6722345520383133, "repo_name": "jdavisp3/TigerShark", "id": "ca593e3bbd8f8af8bf8f52ef3156e4787b41953b", "size": "17122", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tigershark/facade/f271.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "36926" }, { "name": "HTML", "bytes": "120474" }, { "name": "JavaScript", "bytes": "46605" }, { "name": "PLpgSQL", "bytes": "113" }, { "name": "Perl", "bytes": "23425" }, { "name": "Python", "bytes": "4426497" } ], "symlink_target": "" }
import factory import os import six import uuid from django.contrib.auth import get_user_model from django.core.files.storage import default_storage from ..models import WooeyJob, ScriptGroup, Script, ScriptParameter, ScriptParameterGroup, ScriptParameters from .. import settings as wooey_settings class ScriptGroupFactory(factory.DjangoModelFactory): class Meta: model = ScriptGroup group_name = 'test group' group_description = 'test desc' class ScriptFactory(factory.DjangoModelFactory): class Meta: model = Script script_name = 'test script' script_group = factory.SubFactory(ScriptGroupFactory) script_description = 'test script desc' class UserFactory(factory.DjangoModelFactory): class Meta: model = get_user_model() username = 'user' email = 'a@a.com' password = 'testuser' class BaseJobFactory(factory.DjangoModelFactory): class Meta: model = WooeyJob job_name = six.u('\xd0\xb9\xd1\x86\xd1\x83') job_description = six.u('\xd0\xb9\xd1\x86\xd1\x83\xd0\xb5\xd0\xba\xd0\xb5') def generate_script(script_path): filename = os.path.join(script_path)[1] filename = os.path.join(wooey_settings.WOOEY_SCRIPT_DIR, filename) new_file = default_storage.save(filename, open(script_path)) from ..backend import utils res = utils.add_wooey_script(script_path=new_file, group=None) return res['script'] def generate_job(script_version): return BaseJobFactory(script_version=script_version)
{ "content_hash": "62b293a3dd89c003c0038c2beae0566e", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 107, "avg_line_length": 26.63157894736842, "alnum_prop": 0.7200263504611331, "repo_name": "waytai/Wooey", "id": "a8bfd6882fd178fa1825bf0accb966a7893a3c72", "size": "1518", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "wooey/tests/factories.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "1924" }, { "name": "HTML", "bytes": "72919" }, { "name": "JavaScript", "bytes": "811" }, { "name": "Makefile", "bytes": "359" }, { "name": "Python", "bytes": "186980" } ], "symlink_target": "" }
__author__ = 'jules' import cProfile import deepThought.ORM.ORM as ORM from deepThought.scheduler.PPPolicies import PPPolicies import deepThought.simulator.simulator as simulator def main(): job = ORM.deserialize("/tmp/output.pickle") scheduler = PPPolicies(job) job.initialize() scheduler.initialize() command = "simulator.simulate_schedule(scheduler)" cProfile.runctx( command, globals(), locals(), filename="/tmp/simulator.profile" ) if __name__ == "__main__": main()
{ "content_hash": "54ce94ad3e4a753ff6c7a95289f3be2a", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 86, "avg_line_length": 29.470588235294116, "alnum_prop": 0.7145708582834331, "repo_name": "juliusf/Genetic-SRCPSP", "id": "aedf6c189099d3e7a70d7808c53f28a9cfe811d5", "size": "501", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/profileSimulator.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "128549" }, { "name": "Shell", "bytes": "1791" } ], "symlink_target": "" }
import unittest import textwrap from .. import Document, Section, DefinitionList, Paragraph, Container class DefinitionListTestCase(unittest.TestCase): __shortLorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque facilisis nisi vel nibh" def setUp(self): unittest.TestCase.setUp(self) self.maxDiff = None self.doc = Document() def test_definition_list(self): self.doc.add( DefinitionList() .add("Item 1", Paragraph("Definition 1")) .add("Item 2", Paragraph("Definition 2")) ) self.assertEqual( self.doc.format(), textwrap.dedent("""\ Item 1 Definition 1 Item 2 Definition 2""" ) ) def test_items_with_different_lengths(self): self.doc.add( DefinitionList() .add("Item 1", Paragraph("Definition 1")) .add("Longer item 2", Paragraph("Definition 2")) ) self.assertEqual( self.doc.format(), textwrap.dedent("""\ Item 1 Definition 1 Longer item 2 Definition 2""" ) ) def test_within_sub_section(self): self.doc.add( Section("Section") .add( Section("Sub-section") .add( DefinitionList() .add("Item 1", Paragraph("Definition 1")) .add("Longer item 2", Paragraph("Definition 2")) ) ) ) self.assertEqual( self.doc.format(), textwrap.dedent("""\ Section Sub-section Item 1 Definition 1 Longer item 2 Definition 2""" ) ) def test_empty_definition(self): self.doc.add( DefinitionList() .add("Longer item 1", Paragraph("Definition 1")) .add("Item 2", Paragraph("")) .add("Longer item 3", Paragraph("")) ) self.assertEqual( self.doc.format(), textwrap.dedent("""\ Longer item 1 Definition 1 Item 2 Longer item 3""" ) ) def test_wrapping_of_definition_with_only_short_items(self): self.doc.add( Section("Section") .add( DefinitionList() .add("Item 1 (short enought)", Paragraph("Definition 1 " + self.__shortLorem)) .add("Item 2", Paragraph("Definition 2 " + self.__shortLorem)) ) ) self.assertEqual( self.doc.format(), textwrap.dedent( # 70 chars ########################################################### """\ Section Item 1 (short enought) Definition 1 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque facilisis nisi vel nibh Item 2 Definition 2 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque facilisis nisi vel nibh""" ) ) def test_wrapping_of_definition_with_short_and_long_items(self): self.doc.add( Section("Section") .add( DefinitionList() .add("Item 1 (just tooo long)", Paragraph("Definition 1 " + self.__shortLorem)) .add("Item 2", Paragraph("Definition 2 " + self.__shortLorem)) ) ) self.assertEqual( self.doc.format(), textwrap.dedent( # 70 chars ########################################################### """\ Section Item 1 (just tooo long) Definition 1 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque facilisis nisi vel nibh Item 2 Definition 2 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque facilisis nisi vel nibh""" ) ) def test_wrapping_of_definition_with_only_long_items(self): self.doc.add( Section("Section") .add( DefinitionList() .add("Item 1 (just tooo long)", Paragraph("Definition 1 " + self.__shortLorem)) .add("Item 2 (also too long, really)", Paragraph("Definition 2 " + self.__shortLorem)) ) ) self.assertEqual( self.doc.format(), textwrap.dedent( # 70 chars ########################################################### """\ Section Item 1 (just tooo long) Definition 1 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque facilisis nisi vel nibh Item 2 (also too long, really) Definition 2 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque facilisis nisi vel nibh""" ) ) def test_container_as_definition(self): self.doc.add( Section("Section") .add( DefinitionList() .add("Item", Container().add(Paragraph("Para 1")).add(Paragraph("Para 2"))) ) ) self.assertEqual( self.doc.format(), textwrap.dedent("""\ Section Item Para 1 Para 2""" ) ) def test_definition_list_as_definition(self): self.doc.add( Section("Section") .add( DefinitionList() .add( "Item 1", DefinitionList() .add("Item A", Paragraph("Definition A")) .add("Item B", Paragraph("Definition B")) ) .add( "Item 2", DefinitionList() .add("Item C", Paragraph("Definition C")) .add("Item D", Paragraph("Definition D")) ) ) ) self.assertEqual( self.doc.format(), textwrap.dedent("""\ Section Item 1 Item A Definition A Item B Definition B Item 2 Item C Definition C Item D Definition D""" ) )
{ "content_hash": "622ace91f6241a35591c54d26b6a2e94", "timestamp": "", "source": "github", "line_count": 198, "max_line_length": 114, "avg_line_length": 34.58585858585859, "alnum_prop": 0.4420268691588785, "repo_name": "jacquev6/RecursiveDocument", "id": "f08715515a7f56333a5ec048c6015e3bfe844e79", "size": "6933", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "RecursiveDocument/tests/DefinitionListTestCase.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "31159" } ], "symlink_target": "" }
import bz2, datetime import unittest import sqlite3 as sqlite class SqliteTypeTests(unittest.TestCase): def setUp(self): self.con = sqlite.connect(":memory:") self.cur = self.con.cursor() self.cur.execute("create table test(i integer, s varchar, f number, b blob)") def tearDown(self): self.cur.close() self.con.close() def CheckString(self): self.cur.execute("insert into test(s) values (?)", (u"Österreich",)) self.cur.execute("select s from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], u"Österreich") def CheckSmallInt(self): self.cur.execute("insert into test(i) values (?)", (42,)) self.cur.execute("select i from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], 42) def CheckLargeInt(self): num = 2**40 self.cur.execute("insert into test(i) values (?)", (num,)) self.cur.execute("select i from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], num) def CheckFloat(self): val = 3.14 self.cur.execute("insert into test(f) values (?)", (val,)) self.cur.execute("select f from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], val) def CheckBlob(self): val = buffer("Guglhupf") self.cur.execute("insert into test(b) values (?)", (val,)) self.cur.execute("select b from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], val) def CheckUnicodeExecute(self): self.cur.execute(u"select 'Österreich'") row = self.cur.fetchone() self.failUnlessEqual(row[0], u"Österreich") class DeclTypesTests(unittest.TestCase): class Foo: def __init__(self, _val): self.val = _val def __cmp__(self, other): if not isinstance(other, DeclTypesTests.Foo): raise ValueError if self.val == other.val: return 0 else: return 1 def __conform__(self, protocol): if protocol is sqlite.PrepareProtocol: return self.val else: return None def __str__(self): return "<%s>" % self.val def setUp(self): self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES) self.cur = self.con.cursor() self.cur.execute("create table test(i int, s str, f float, b bool, u unicode, foo foo, bin blob)") # override float, make them always return the same number sqlite.converters["FLOAT"] = lambda x: 47.2 # and implement two custom ones sqlite.converters["BOOL"] = lambda x: bool(int(x)) sqlite.converters["FOO"] = DeclTypesTests.Foo def tearDown(self): del sqlite.converters["FLOAT"] del sqlite.converters["BOOL"] del sqlite.converters["FOO"] self.cur.close() self.con.close() def CheckString(self): # default self.cur.execute("insert into test(s) values (?)", ("foo",)) self.cur.execute("select s from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], "foo") def CheckSmallInt(self): # default self.cur.execute("insert into test(i) values (?)", (42,)) self.cur.execute("select i from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], 42) def CheckLargeInt(self): # default num = 2**40 self.cur.execute("insert into test(i) values (?)", (num,)) self.cur.execute("select i from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], num) def CheckFloat(self): # custom val = 3.14 self.cur.execute("insert into test(f) values (?)", (val,)) self.cur.execute("select f from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], 47.2) def CheckBool(self): # custom self.cur.execute("insert into test(b) values (?)", (False,)) self.cur.execute("select b from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], False) self.cur.execute("delete from test") self.cur.execute("insert into test(b) values (?)", (True,)) self.cur.execute("select b from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], True) def CheckUnicode(self): # default val = u"\xd6sterreich" self.cur.execute("insert into test(u) values (?)", (val,)) self.cur.execute("select u from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], val) def CheckFoo(self): val = DeclTypesTests.Foo("bla") self.cur.execute("insert into test(foo) values (?)", (val,)) self.cur.execute("select foo from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], val) def CheckUnsupportedSeq(self): class Bar: pass val = Bar() try: self.cur.execute("insert into test(f) values (?)", (val,)) self.fail("should have raised an InterfaceError") except sqlite.InterfaceError: pass except: self.fail("should have raised an InterfaceError") def CheckUnsupportedDict(self): class Bar: pass val = Bar() try: self.cur.execute("insert into test(f) values (:val)", {"val": val}) self.fail("should have raised an InterfaceError") except sqlite.InterfaceError: pass except: self.fail("should have raised an InterfaceError") def CheckBlob(self): # default val = buffer("Guglhupf") self.cur.execute("insert into test(bin) values (?)", (val,)) self.cur.execute("select bin from test") row = self.cur.fetchone() self.failUnlessEqual(row[0], val) class ColNamesTests(unittest.TestCase): def setUp(self): self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES|sqlite.PARSE_DECLTYPES) self.cur = self.con.cursor() self.cur.execute("create table test(x foo)") sqlite.converters["FOO"] = lambda x: "[%s]" % x sqlite.converters["BAR"] = lambda x: "<%s>" % x sqlite.converters["EXC"] = lambda x: 5/0 def tearDown(self): del sqlite.converters["FOO"] del sqlite.converters["BAR"] del sqlite.converters["EXC"] self.cur.close() self.con.close() def CheckDeclType(self): self.cur.execute("insert into test(x) values (?)", ("xxx",)) self.cur.execute("select x from test") val = self.cur.fetchone()[0] self.failUnlessEqual(val, "[xxx]") def CheckNone(self): self.cur.execute("insert into test(x) values (?)", (None,)) self.cur.execute("select x from test") val = self.cur.fetchone()[0] self.failUnlessEqual(val, None) def CheckColName(self): self.cur.execute("insert into test(x) values (?)", ("xxx",)) self.cur.execute('select x as "x [bar]" from test') val = self.cur.fetchone()[0] self.failUnlessEqual(val, "<xxx>") # Check if the stripping of colnames works. Everything after the first # whitespace should be stripped. self.failUnlessEqual(self.cur.description[0][0], "x") def CheckCursorDescriptionNoRow(self): """ cursor.description should at least provide the column name(s), even if no row returned. """ self.cur.execute("select * from test where 0 = 1") self.assert_(self.cur.description[0][0] == "x") class ObjectAdaptationTests(unittest.TestCase): def cast(obj): return float(obj) cast = staticmethod(cast) def setUp(self): self.con = sqlite.connect(":memory:") try: del sqlite.adapters[int] except: pass sqlite.register_adapter(int, ObjectAdaptationTests.cast) self.cur = self.con.cursor() def tearDown(self): del sqlite.adapters[(int, sqlite.PrepareProtocol)] self.cur.close() self.con.close() def CheckCasterIsUsed(self): self.cur.execute("select ?", (4,)) val = self.cur.fetchone()[0] self.failUnlessEqual(type(val), float) class BinaryConverterTests(unittest.TestCase): def convert(s): return bz2.decompress(s) convert = staticmethod(convert) def setUp(self): self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES) sqlite.register_converter("bin", BinaryConverterTests.convert) def tearDown(self): self.con.close() def CheckBinaryInputForConverter(self): testdata = "abcdefg" * 10 result = self.con.execute('select ? as "x [bin]"', (buffer(bz2.compress(testdata)),)).fetchone()[0] self.failUnlessEqual(testdata, result) class DateTimeTests(unittest.TestCase): def setUp(self): self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES) self.cur = self.con.cursor() self.cur.execute("create table test(d date, ts timestamp)") def tearDown(self): self.cur.close() self.con.close() def CheckSqliteDate(self): d = sqlite.Date(2004, 2, 14) self.cur.execute("insert into test(d) values (?)", (d,)) self.cur.execute("select d from test") d2 = self.cur.fetchone()[0] self.failUnlessEqual(d, d2) def CheckSqliteTimestamp(self): ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0) self.cur.execute("insert into test(ts) values (?)", (ts,)) self.cur.execute("select ts from test") ts2 = self.cur.fetchone()[0] self.failUnlessEqual(ts, ts2) def CheckSqlTimestamp(self): # The date functions are only available in SQLite version 3.1 or later if sqlite.sqlite_version_info < (3, 1): return # SQLite's current_timestamp uses UTC time, while datetime.datetime.now() uses local time. now = datetime.datetime.now() self.cur.execute("insert into test(ts) values (current_timestamp)") self.cur.execute("select ts from test") ts = self.cur.fetchone()[0] self.failUnlessEqual(type(ts), datetime.datetime) self.failUnlessEqual(ts.year, now.year) def CheckDateTimeSubSeconds(self): ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0, 500000) self.cur.execute("insert into test(ts) values (?)", (ts,)) self.cur.execute("select ts from test") ts2 = self.cur.fetchone()[0] self.failUnlessEqual(ts, ts2) def suite(): sqlite_type_suite = unittest.makeSuite(SqliteTypeTests, "Check") decltypes_type_suite = unittest.makeSuite(DeclTypesTests, "Check") colnames_type_suite = unittest.makeSuite(ColNamesTests, "Check") adaptation_suite = unittest.makeSuite(ObjectAdaptationTests, "Check") bin_suite = unittest.makeSuite(BinaryConverterTests, "Check") date_suite = unittest.makeSuite(DateTimeTests, "Check") return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, bin_suite, date_suite)) def test(): runner = unittest.TextTestRunner() runner.run(suite()) if __name__ == "__main__": test()
{ "content_hash": "1db4ff491b698626fc686dc23415a8b6", "timestamp": "", "source": "github", "line_count": 328, "max_line_length": 134, "avg_line_length": 34.8780487804878, "alnum_prop": 0.6002622377622377, "repo_name": "TathagataChakraborti/resource-conflicts", "id": "8da5722d55faff89d40272e3840a3e2f7eadc733", "size": "12475", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "PLANROB-2015/seq-sat-lama/py2.5/lib/python2.5/sqlite3/test/types.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "207947" }, { "name": "Batchfile", "bytes": "9764" }, { "name": "C", "bytes": "14253103" }, { "name": "C++", "bytes": "754817" }, { "name": "CSS", "bytes": "9779" }, { "name": "DIGITAL Command Language", "bytes": "13234" }, { "name": "Emacs Lisp", "bytes": "174752" }, { "name": "Groff", "bytes": "43625" }, { "name": "HTML", "bytes": "418642" }, { "name": "Inno Setup", "bytes": "18796" }, { "name": "Makefile", "bytes": "392287" }, { "name": "Matlab", "bytes": "918" }, { "name": "Objective-C", "bytes": "28604" }, { "name": "Perl", "bytes": "163937" }, { "name": "Prolog", "bytes": "66" }, { "name": "Python", "bytes": "38769203" }, { "name": "R", "bytes": "2349" }, { "name": "SAS", "bytes": "57249" }, { "name": "Shell", "bytes": "173594" }, { "name": "TeX", "bytes": "5169842" }, { "name": "VimL", "bytes": "9563" }, { "name": "Visual Basic", "bytes": "1443" } ], "symlink_target": "" }
import sys, os import fontforge import optparse # print 'ARGV :', sys.argv[1:] parser = optparse.OptionParser() parser.add_option('-w', '--woff', dest="woff", action="store_true", default=False, help='Save in WOFF format (.woff)' ) parser.add_option('-o', '--otf', dest="otf", action="store_true", default=False, help='Save in OpenType format (.otf)' ) parser.add_option('-t', '--ttf', dest="ttf", action="store_true", default=False, help='Save in TrueType format (.ttf)' ) parser.add_option('-s', '--svg', dest="svg", action="store_true", default=False, help='Save in SVG Font format (.svg)' ) parser.add_option('-e', '--eot', dest="eot", action="store_true", default=False, help='Save in Embedded OpenType format (.eot)' ) parser.add_option('-u', '--ufo', dest="ufo", action="store_true", default=False, help='Save in UFO format (.ufo)' ) options, remainder = parser.parse_args() if not len(remainder) == 1: print "Only one non-keyword argument allowed." sys.exit() fontname = remainder[0] font = fontforge.open(fontname) d = os.path.dirname(os.path.abspath(fontname)) + '/' filename = os.path.basename(fontname) basename, ext = os.path.splitext(filename) print d print filename print basename woff_filename = d + basename + '.woff' otf_filename = d + basename + '.otf' ttf_filename = d + basename + '.ttf' svg_filename = d + basename + '.svg' eot_filename = d + basename + '.eot' ufo_filename = d + basename + '.ufo' if options.woff: print woff_filename font.generate(woff_filename) if options.otf: print otf_filename font.generate(otf_filename) if options.ttf: print ttf_filename font.generate(ttf_filename) if options.svg: print svg_filename font.generate(svg_filename) if options.ufo: print ufo_filename font.generate(ufo_filename) if options.eot: print eot_filename if options.ttf: cmd = 'ttf2eot < %s > %s' % (ttf_filename, eot_filename) os.system(cmd) else: font.generate(ttf_filename) cmd = 'ttf2eot < %s > %s' % (ttf_filename, eot_filename) os.system(cmd) os.remove(ttf_filename)
{ "content_hash": "7818e9bc51816ce89f46a635ce6a0c4a", "timestamp": "", "source": "github", "line_count": 91, "max_line_length": 64, "avg_line_length": 29.252747252747252, "alnum_prop": 0.5274229902329076, "repo_name": "quitequinn/advanced-font-converter", "id": "af6996e3fab935815c6b2180aa09f6c5ee11f38c", "size": "2685", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "convertors/fontconvert.py", "mode": "33261", "license": "mit", "language": [ { "name": "C++", "bytes": "132694" }, { "name": "JavaScript", "bytes": "5870" }, { "name": "Makefile", "bytes": "982" }, { "name": "Python", "bytes": "7495" }, { "name": "Ruby", "bytes": "391" } ], "symlink_target": "" }
@app.route('/api/v1.0/<int:api_key>/scrape/rushhour/new',methods=['GET']) ########scrapes the Rush Hour index page def get_rush_hour_index(api_key): if str(api_key)!=the_api_key: return 401 #base_url = 'http://www.rushhour.nl/store_master.php?idxGroup=2&idxGenre=2&idxSubGenre=&app=250' week = datetime.datetime.utcnow().isocalendar()[1] year = datetime.datetime.utcnow().isocalendar()[0] base_url = 'http://www.rushhour.nl/store_master.php?blNew=1&bIsOutOfStock=1&numYear=%s&numWeek=%s&app=250' % (year,week) #for selenium display = Display(visible=0, size=(800, 600)) display.start() geckodriver_log_location = os.path.join(app.root_path, 'logs', 'geckodriver.log') print(geckodriver_log_location) # return geckodriver_log_location ####now get the HTML try: r = requests.get(base_url,timeout=5) except Exception as e: return "Failed to request the Rush Hour URL " + base_url, 405 #need to use selenium because of the popup browser = webdriver.Firefox(log_path=geckodriver_log_location) browser.get(base_url) try: alert = browser.switch_to_alert() alert.accept() print "alert accpted" except: print "no alert" html = browser.page_source browser.close() display.sendstop() soup = BeautifulSoup(html, "lxml") for product in soup.find_all("div","item_wrap1"): details = str() label_html = str() label = str() label_url = str() artist_title = str() split_a_t = str() artist = str() title = str() release_url = str() the_release = product.find("div","item_content") all_details = the_release.find("h2","title") #print all_details release_url = all_details.findAll("a")[0]['href'] url_split = release_url.split('=') store_release_id = url_split[1] print store_release_id all_details_reg = all_details.text.split(' - ') title = all_details_reg[1] label = all_details_reg[2] print title,label if len(store_release_id)<1: print('Didnt get the store id - skip') continue if len(label) < 3 or len(title) < 3: print('skipping ' + title + ' or ' + label + ' as less than 3 characters') continue #sql = ('SELECT id FROM releases_all WHERE label_no_country LIKE %s AND title LIKE %s') % ('%' + label + '%','%' + title + '%') try: query = db_insert('INSERT INTO store_mappings (release_id,store,store_url,unique_key,store_release_id) SELECT id,%s,%s, md5(concat(id,%s)),%s FROM releases_all WHERE label_no_country LIKE %s AND title LIKE %s ON DUPLICATE KEY UPDATE store_url=values(store_url),store_release_id=values(store_release_id)', ('rushhour','/' + release_url,'rushhour',store_release_id,label + '%','%' + title + '%')) data = query.fetchall() print(query,data) except Exception as e: print(str(e)) continue return base_url,201 @app.route('/api/v1.0/<int:api_key>/scrape/rushhour/release/<string:rushhour_id>',methods=['GET']) def get_rushhour_release(api_key,rushhour_id): if str(api_key)!=the_api_key: return 401 base_url = 'http://www.rushhour.nl/store_detailed.php?item=' + rushhour_id #for selenium display = Display(visible=0, size=(800, 600)) display.start() geckodriver_log_location = os.path.join(app.root_path, 'logs', 'geckodriver.log') print(geckodriver_log_location) # return geckodriver_log_location ####now get the HTML try: r = requests.get(base_url,timeout=5) except Exception as e: return "Failed to request the Rush Hour URL " + base_url, 405 #need to use selenium because of the popup browser = webdriver.Firefox(log_path=geckodriver_log_location) browser.get(base_url) try: alert = browser.switch_to_alert() alert.accept() print "alert accpted" except: print "no alert" html = browser.page_source browser.close() display.sendstop() soup = BeautifulSoup(html, "lxml") stock_details = soup.findAll("img",class_="cart_icon") print(stock_details) cart_url = 'http://www.rushhour.nl/store_detailed.php?action=add&item=' + rushhour_id if len(stock_details) > 0: return jsonify({'store':'rushhour','in_stock':'true','cart_url':cart_url}) else: return jsonify({'store':'rushhour','in_stock':'false','cart_url':cart_url})
{ "content_hash": "a037bc5dc832eb84c7e1e13d0dc22c75", "timestamp": "", "source": "github", "line_count": 136, "max_line_length": 406, "avg_line_length": 33.75735294117647, "alnum_prop": 0.6144630799390111, "repo_name": "siquick/ss_api", "id": "24d84abf886cde487b9f2f8a0601399d7d82f0e6", "size": "4591", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "api/retired_rushhour.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2863" } ], "symlink_target": "" }
'''Autogenerated by get_gl_extensions script, do not edit!''' from OpenGL import platform as _p, constants as _cs, arrays from OpenGL.GL import glget import ctypes EXTENSION_NAME = 'GL_EXT_stencil_clear_tag' def _f( function ): return _p.createFunction( function,_p.GL,'GL_EXT_stencil_clear_tag',False) _p.unpack_constants( """GL_STENCIL_TAG_BITS_EXT 0x88F2 GL_STENCIL_CLEAR_TAG_VALUE_EXT 0x88F3""", globals()) @_f @_p.types(None,_cs.GLsizei,_cs.GLuint) def glStencilClearTagEXT( stencilTagBits,stencilClearTag ):pass def glInitStencilClearTagEXT(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( EXTENSION_NAME )
{ "content_hash": "cc43829ff3131b04e6e70b30627b25b3", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 78, "avg_line_length": 39.833333333333336, "alnum_prop": 0.7545327754532776, "repo_name": "frederica07/Dragon_Programming_Process", "id": "117fbe415912208c7e3cc118b1aadeb03e65101c", "size": "717", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "PyOpenGL-3.0.2/OpenGL/raw/GL/EXT/stencil_clear_tag.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Makefile", "bytes": "1548" }, { "name": "Python", "bytes": "2558317" } ], "symlink_target": "" }
"""Offer time listening automation rules.""" import voluptuous as vol from homeassistant.const import CONF_PLATFORM from homeassistant.core import HassJob, callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.event import async_track_time_change # mypy: allow-untyped-defs, no-check-untyped-defs CONF_HOURS = "hours" CONF_MINUTES = "minutes" CONF_SECONDS = "seconds" class TimePattern: """Validate a time pattern value. :raises Invalid: If the value has a wrong format or is outside the range. """ def __init__(self, maximum): """Initialize time pattern.""" self.maximum = maximum def __call__(self, value): """Validate input.""" try: if value == "*": return value if isinstance(value, str) and value.startswith("/"): number = int(value[1:]) else: value = number = int(value) if not (0 <= number <= self.maximum): raise vol.Invalid(f"must be a value between 0 and {self.maximum}") except ValueError as err: raise vol.Invalid("invalid time_pattern value") from err return value TRIGGER_SCHEMA = vol.All( cv.TRIGGER_BASE_SCHEMA.extend( { vol.Required(CONF_PLATFORM): "time_pattern", CONF_HOURS: TimePattern(maximum=23), CONF_MINUTES: TimePattern(maximum=59), CONF_SECONDS: TimePattern(maximum=59), } ), cv.has_at_least_one_key(CONF_HOURS, CONF_MINUTES, CONF_SECONDS), ) async def async_attach_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" trigger_data = automation_info["trigger_data"] hours = config.get(CONF_HOURS) minutes = config.get(CONF_MINUTES) seconds = config.get(CONF_SECONDS) job = HassJob(action) # If larger units are specified, default the smaller units to zero if minutes is None and hours is not None: minutes = 0 if seconds is None and minutes is not None: seconds = 0 @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_hass_job( job, { "trigger": { **trigger_data, "platform": "time_pattern", "now": now, "description": "time pattern", } }, ) return async_track_time_change( hass, time_automation_listener, hour=hours, minute=minutes, second=seconds )
{ "content_hash": "68934cf27cb3f06c5be48ee3593e6826", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 82, "avg_line_length": 29.853932584269664, "alnum_prop": 0.5957847196085811, "repo_name": "Danielhiversen/home-assistant", "id": "000d73b6cd191a6301c977f1ac8e886d48d5cf78", "size": "2657", "binary": false, "copies": "7", "ref": "refs/heads/dev", "path": "homeassistant/components/homeassistant/triggers/time_pattern.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2443" }, { "name": "Python", "bytes": "36870185" }, { "name": "Shell", "bytes": "4910" } ], "symlink_target": "" }
__author__ = 'mongolrgata' import os import struct import sys NUL_CHAR16 = chr(0).encode('utf-16le') def read_unsigned_int32(file): """ :param file: :type file: io.FileIO :return: :rtype: int """ return struct.unpack('<L', file.read(4))[0] def read_filename(file): """ :param file: :type file: io.FileIO :return: :rtype: str """ result = bytearray() while True: char = file.read(2) if char == NUL_CHAR16: break result.extend(char) return result.decode('utf-16le') def prepare_params(arc_filename): """ :param arc_filename: :type arc_filename: str :return: :rtype (str, str) """ return ( arc_filename, os.path.splitext(arc_filename)[0] ) def extract(arc_filename, directory): """ :param arc_filename: :type arc_filename: str :return: :rtype: list[str] """ with open(arc_filename, 'rb') as arc_file: file_count = read_unsigned_int32(arc_file) arc_file.seek(4, os.SEEK_CUR) # header_length file_lengths = [] file_names = [] if not os.path.exists(directory): os.mkdir(directory) for i in range(0, file_count): file_lengths.append(read_unsigned_int32(arc_file)) arc_file.seek(4, os.SEEK_CUR) # file_offset file_names.append(read_filename(arc_file)) for i in range(0, file_count): with open(os.path.join(directory, file_names[i]), 'wb') as file_out: file_out.write(arc_file.read(file_lengths[i])) with open(os.path.join(directory, 'order'), 'wt', encoding='utf-8') as order_file: order_file.write('\n'.join(file_names)) return file_names def main(): extract(*prepare_params(sys.argv[1])) if __name__ == '__main__': main()
{ "content_hash": "ced7844083da732e4aa4bceadf87c945", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 86, "avg_line_length": 20.182795698924732, "alnum_prop": 0.5652637187000533, "repo_name": "mongolrgata/cool-beauty-tools", "id": "79586e2b912a5d15043a95a10cdeb8a50432824d", "size": "1877", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": ".arc-tools/dearcer/dearcer.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "695" }, { "name": "CSS", "bytes": "416" }, { "name": "HTML", "bytes": "837" }, { "name": "JavaScript", "bytes": "5536" }, { "name": "Python", "bytes": "21186" } ], "symlink_target": "" }
"""Function for interpolating formatted errors from the TensorFlow runtime. Exposes the function `interpolate` to interpolate messages with tags of the form {{type name}}. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import itertools import os import re import six from tensorflow.python.util import tf_stack _NAME_REGEX = r"[A-Za-z0-9.][A-Za-z0-9_.\-/]*?" _TAG_REGEX = r"{{{{({name}) ({name})}}}}".format(name=_NAME_REGEX) _INTERPOLATION_REGEX = r"^(.*?)({tag})".format(tag=_TAG_REGEX) _INTERPOLATION_PATTERN = re.compile(_INTERPOLATION_REGEX, re.DOTALL) _ParseTag = collections.namedtuple("_ParseTag", ["type", "name"]) _BAD_FILE_SUBSTRINGS = [ os.path.join("tensorflow", "python"), "<embedded", ] def _parse_message(message): """Parses the message. Splits the message into separators and tags. Tags are named tuples representing the string {{type name}} and they are separated by separators. For example, in "123{{node Foo}}456{{node Bar}}789", there are two tags and three separators. The separators are the numeric characters. Args: message: String to parse Returns: (list of separator strings, list of _ParseTags). For example, if message is "123{{node Foo}}456" then this function returns (["123", "456"], [_ParseTag("node", "Foo")]) """ seps = [] tags = [] pos = 0 while pos < len(message): match = re.match(_INTERPOLATION_PATTERN, message[pos:]) if match: seps.append(match.group(1)) tags.append(_ParseTag(match.group(3), match.group(4))) pos += match.end() else: break seps.append(message[pos:]) return seps, tags def _compute_device_summary_from_list(name, device_assignment_list, prefix=""): """Return a summary of an op's device function stack. Args: name: The name of the op. device_assignment_list: The op._device_assignments list. prefix: An optional string prefix used before each line of the multi- line string returned by this function. Returns: A multi-line string similar to: Device assignments active during op 'foo' creation: with tf.device(/cpu:0): <test_1.py:27> with tf.device(some_func<foo.py, 123>): <test_2.py:38> The first line will have no padding to its left by default. Subsequent lines will have two spaces of left-padding. Use the prefix argument to increase indentation. """ if not device_assignment_list: message = "No device assignments were active during op '%s' creation." message %= name return prefix + message str_list = [] str_list.append( "%sDevice assignments active during op '%s' creation:" % (prefix, name)) for traceable_obj in device_assignment_list: location_summary = "<{file}:{line}>".format( file=traceable_obj.filename, line=traceable_obj.lineno) subs = { "prefix": prefix, "indent": " ", "dev_name": traceable_obj.obj, "loc": location_summary, } str_list.append( "{prefix}{indent}with tf.device({dev_name}): {loc}".format(**subs)) return "\n".join(str_list) def _compute_device_assignment_summary_from_op(op, prefix=""): # pylint: disable=protected-access return _compute_device_summary_from_list(op.name, op._device_assignments, prefix) # pylint: enable=protected-access def _compute_colocation_summary_from_dict(name, colocation_dict, prefix=""): """Return a summary of an op's colocation stack. Args: name: The op name. colocation_dict: The op._colocation_dict. prefix: An optional string prefix used before each line of the multi- line string returned by this function. Returns: A multi-line string similar to: Node-device colocations active during op creation: with tf.colocate_with(test_node_1): <test_1.py:27> with tf.colocate_with(test_node_2): <test_2.py:38> The first line will have no padding to its left by default. Subsequent lines will have two spaces of left-padding. Use the prefix argument to increase indentation. """ if not colocation_dict: message = "No node-device colocations were active during op '%s' creation." message %= name return prefix + message str_list = [] str_list.append("%sNode-device colocations active during op '%s' creation:" % (prefix, name)) for coloc_name, location in colocation_dict.items(): location_summary = "<{file}:{line}>".format( file=location.filename, line=location.lineno) subs = { "prefix": prefix, "indent": " ", "name": coloc_name, "loc": location_summary, } str_list.append( "{prefix}{indent}with tf.colocate_with({name}): {loc}".format(**subs)) return "\n".join(str_list) def _compute_colocation_summary_from_op(op, prefix=""): """Fetch colocation file, line, and nesting and return a summary string.""" # pylint: disable=protected-access return _compute_colocation_summary_from_dict(op.name, op._colocation_dict, prefix) # pylint: enable=protected-access def _find_index_of_defining_frame_for_op(op): """Return index in op._traceback with first 'useful' frame. This method reads through the stack stored in op._traceback looking for the innermost frame which (hopefully) belongs to the caller. It accomplishes this by rejecting frames whose filename appears to come from TensorFlow (see error_interpolation._BAD_FILE_SUBSTRINGS for the list of rejected substrings). Args: op: the Operation object for which we would like to find the defining location. Returns: Integer index into op._traceback where the first non-TF file was found (innermost to outermost), or 0 (for the outermost stack frame) if all files came from TensorFlow. """ # pylint: disable=protected-access # Index 0 of tf_traceback is the outermost frame. tf_traceback = tf_stack.convert_stack(op._traceback) size = len(tf_traceback) # pylint: enable=protected-access filenames = [frame[tf_stack.TB_FILENAME] for frame in tf_traceback] # We process the filenames from the innermost frame to outermost. for idx, filename in enumerate(reversed(filenames)): contains_bad_substrings = [ss in filename for ss in _BAD_FILE_SUBSTRINGS] if not any(contains_bad_substrings): return size - idx - 1 return 0 def _get_defining_frame_from_op(op): """Find and return stack frame where op was defined.""" frame_index = _find_index_of_defining_frame_for_op(op) # pylint: disable=protected-access frame = op._traceback[frame_index] # pylint: enable=protected-access return frame def compute_field_dict(op): """Return a dictionary mapping interpolation tokens to values. Args: op: op.Operation object having a _traceback member. Returns: A dictionary mapping string tokens to string values. The keys are shown below along with example values. { "file": "tool_utils.py", "line": "124", "defined_at": " (defined at tool_utils.py:124)", "colocations": '''Node-device colocations active during op creation: with tf.colocate_with(test_node_1): <test_1.py:27> with tf.colocate_with(test_node_2): <test_2.py:38>''' "devices": '''Device assignments active during op 'foo' creation: with tf.device(/cpu:0): <test_1.py:27> with tf.device(some_func<foo.py, 123>): <test_2.py:38>''' "devs_and_colocs": A concatenation of colocations and devices, e.g. '''Node-device colocations active during op creation: with tf.colocate_with(test_node_1): <test_1.py:27> with tf.colocate_with(test_node_2): <test_2.py:38>''' Device assignments active during op 'foo' creation: with tf.device(/cpu:0): <test_1.py:27> with tf.device(some_func<foo.py, 123>): <test_2.py:38>''' } """ frame = _get_defining_frame_from_op(op) filename = frame[tf_stack.TB_FILENAME] lineno = frame[tf_stack.TB_LINENO] defined_at = " (defined at %s:%d)" % (filename, lineno) colocation_summary = _compute_colocation_summary_from_op(op) device_summary = _compute_device_assignment_summary_from_op(op) combined_summary = "\n".join([colocation_summary, device_summary]) field_dict = { "file": filename, "line": lineno, "defined_at": defined_at, "colocations": colocation_summary, "devices": device_summary, "devs_and_colocs": combined_summary, } return field_dict def interpolate(error_message, graph): """Interpolates an error message. The error message can contain tags of the form ^^type:name^^ which will be replaced. Args: error_message: A string to interpolate. graph: ops.Graph object containing all nodes referenced in the error message. Returns: The string with tags of the form {{type name}} interpolated. """ seps, tags = _parse_message(error_message) subs = [] end_msg = "" for t in tags: try: op = graph.get_operation_by_name(t.name) except KeyError: op = None msg = "{{%s %s}}" % (t.type, t.name) if op is not None: field_dict = compute_field_dict(op) if t.type == "node": msg = "node %s%s " % (t.name, field_dict["defined_at"]) elif t.type == "colocation_node": msg = "node %s%s having device %s " % (t.name, field_dict["defined_at"], field_dict["devices"]) end_msg += "\n\n" + field_dict["devs_and_colocs"] subs.append(msg) subs.append(end_msg) return "".join( itertools.chain(*six.moves.zip_longest(seps, subs, fillvalue="")))
{ "content_hash": "8fc02a0848ce541128bd307c1f6c4866", "timestamp": "", "source": "github", "line_count": 290, "max_line_length": 80, "avg_line_length": 34.03793103448276, "alnum_prop": 0.6520109411407152, "repo_name": "snnn/tensorflow", "id": "bc3c81b2a2f19bfa89bb2e2a418ea8239a5075d9", "size": "10560", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "tensorflow/python/framework/error_interpolation.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "3325" }, { "name": "Batchfile", "bytes": "10132" }, { "name": "C", "bytes": "339398" }, { "name": "C#", "bytes": "8446" }, { "name": "C++", "bytes": "49741628" }, { "name": "CMake", "bytes": "195409" }, { "name": "Dockerfile", "bytes": "36386" }, { "name": "Go", "bytes": "1254047" }, { "name": "HTML", "bytes": "4681865" }, { "name": "Java", "bytes": "867093" }, { "name": "Jupyter Notebook", "bytes": "2604735" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "58612" }, { "name": "Objective-C", "bytes": "15650" }, { "name": "Objective-C++", "bytes": "99243" }, { "name": "PHP", "bytes": "1357" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "41593453" }, { "name": "Ruby", "bytes": "553" }, { "name": "Shell", "bytes": "476832" }, { "name": "Smarty", "bytes": "6976" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.contrib import admin from django.contrib.auth.admin import UserAdmin from .models import Profile from django.contrib.auth import get_user_model from django.core.urlresolvers import reverse User = get_user_model() class UserProfileInline(admin.StackedInline): model = Profile class NewUserAdmin(UserAdmin): inlines = [UserProfileInline] list_display = ('is_active', 'email', 'username', 'permalink', 'is_superuser', 'is_staff',) # 'View on site' didn't work since the original User model needs to # have get_absolute_url defined. So showing on the list display # was a workaround. def permalink(self, obj): url = reverse("profiles:show", kwargs={"slug": obj.profile.slug}) # Unicode hex b6 is the Pilcrow sign return '<a href="{}">{}</a>'.format(url, '\xb6') permalink.allow_tags = True admin.site.unregister(User) admin.site.register(User, NewUserAdmin)
{ "content_hash": "c36cd3c90386552e1e392a370c267880", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 71, "avg_line_length": 32.61290322580645, "alnum_prop": 0.6805143422354105, "repo_name": "jcda/django-edge-PSA-integration", "id": "927651ecdf2c8064637a428e32af4bcd0a8e18be", "size": "1011", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/profiles/admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3378" }, { "name": "HTML", "bytes": "17993" }, { "name": "JavaScript", "bytes": "363" }, { "name": "Python", "bytes": "32893" } ], "symlink_target": "" }
import inspect # list of methods that check everything other than attributes ALL_BUT_ATTRIBUTES = [ inspect.ismodule, inspect.isclass, inspect.ismethod, inspect.isfunction, inspect.isgeneratorfunction, inspect.isgenerator, inspect.istraceback, inspect.isframe, inspect.iscode, inspect.isroutine, inspect.isabstract, inspect.ismethoddescriptor, inspect.isdatadescriptor, inspect.isgetsetdescriptor, inspect.ismemberdescriptor, ] DJANGO_GENERATED_METHODS = [ 'clean', 'clean_fields', 'delete', 'full_clean', 'save', 'save_base', 'validate_unique' ] class InspectModel(object): def __init__(self, model): self.model = model if not inspect.isclass(model): self.model = model.__class__ self.fields = [] # standard django model fields self.relation_fields = [] # OneToOne or ForeignKey fields self.many_fields = [] # ManyToMany fields self.attributes = [] # standard python class attributes self.methods = [] # standard python class methods self.items = [] # groups all of the above for convenience self.properties = [] self.update_fields() self.update_attributes() self.update_methods() self.update_properties() def model_name(self): return self.model.__name__ def update_fields(self): """Set the list of django.db.models fields Three different types of fields: * standard model fields: Char, Integer... * relation fields: OneToOne (back and forth) and ForeignKey * many fields: ManyToMany (back and forth) """ self.fields = [] self.relation_fields = [] self.many_fields = [] opts = getattr(self.model, '_meta', None) if opts: for f in opts.get_all_field_names(): field, model, direct, m2m = opts.get_field_by_name(f) name = field.name if not direct: # relation or many field from another model name = field.get_accessor_name() field = field.field if field.rel.multiple: # m2m or fk to this model self.add_item(name, self.many_fields) else: # o2o self.add_item(name, self.relation_fields) else: # relation, many or field from this model if field.rel: # relation or many field if hasattr(field.rel, 'through'): # m2m self.add_item(name, self.many_fields) else: self.add_item(name, self.relation_fields) else: # standard field self.add_item(name, self.fields) def update_attributes(self): """Return the list of class attributes which are not fields""" self.attributes = [] for a in dir(self.model): if a.startswith('_') or a in self.fields: continue item = getattr(self.model, a, None) try: from django.db.models.manager import Manager if isinstance(item, Manager): continue except: pass if any([check(item) for check in ALL_BUT_ATTRIBUTES]): continue self.add_item(a, self.attributes) def update_methods(self): """Return the list of class methods""" self.methods = [] for m in dir(self.model): if m.startswith('_') or m in self.fields: continue if m in DJANGO_GENERATED_METHODS: continue if is_method_without_args(getattr(self.model, m, None)): self.add_item(m, self.methods) def update_properties(self): """Return the list of properties""" self.properties = [name for name in dir(self.model) if isinstance(getattr(self.model, name,None), property)] def add_item(self, item, item_type): item_type.append(item) # we only want each item once s = set(self.items) s.add(item) self.items = list(sorted(s)) def is_method_without_args(func): """Check if func is a method callable with only one param (self)""" if not inspect.ismethod(func): return False args, var, named, defaults = inspect.getargspec(func) if defaults: args = args[:-len(defaults)] # args with defaults don't count return len(args) == 1
{ "content_hash": "c3c73bc4dbcde8e7e85b5775dbe0f01a", "timestamp": "", "source": "github", "line_count": 137, "max_line_length": 116, "avg_line_length": 33.72992700729927, "alnum_prop": 0.5639471975762822, "repo_name": "20tab/upy", "id": "5ebfae42f85c0b56d4360c705d1825deb51b065c", "size": "4668", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "upy/contrib/inspect/utils.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "65284" }, { "name": "JavaScript", "bytes": "614193" }, { "name": "PHP", "bytes": "172931" }, { "name": "Python", "bytes": "357681" }, { "name": "Shell", "bytes": "1345" } ], "symlink_target": "" }
"""Common functionality for switches that aren't session oriented and have an API to talk to. This only works with the vlan_pool network allocator.""" import logging import re import requests from hil.errors import SwitchError from hil.model import SwitchSession from hil.network_allocator import get_network_allocator _CHANNEL_RE = re.compile(r'vlan/(\d+)') logger = logging.getLogger(__name__) class Session(SwitchSession): """Common base class for sessions in switches that are using an API""" def modify_port(self, port, channel, new_network): """This implements modify port that vlan centric switches can use""" if channel == 'vlan/native': if new_network is None: self._remove_native_vlan(port) self._port_shutdown(port) else: self._set_native_vlan(port, new_network) else: match = re.match(r'vlan/(\d+)', channel) assert match is not None, "Malformed channel: No VLAN ID found" vlan_id = match.groups()[0] legal = get_network_allocator(). \ is_legal_channel_for(channel, vlan_id) assert legal, "Invalid VLAN ID" if new_network is None: self._remove_vlan_from_trunk(port, vlan_id) else: assert new_network == vlan_id self._add_vlan_to_trunk(port, vlan_id) def revert_port(self, port): """Implements revert port for switches that use VLANs""" self._remove_all_vlans_from_trunk(port) if self._get_native_vlan(port) is not None: self._remove_native_vlan(port) self._port_shutdown(port) def disconnect(self): pass def get_port_networks(self, ports): """Implements get_port_networks. See hil/model.py for more details about this method""" response = {} for port in ports: native = self._get_native_vlan(port.label) if native is not None: response[port] = [native] else: response[port] = [] response[port] += self._get_vlans(port.label) return response def _make_request(self, method, url, data=None, acceptable_error_codes=()): """This can make the http request for you. Also accepts a list of acceptable error codes if you need.""" r = requests.request(method, url, data=data, auth=self._auth) if r.status_code >= 400 and \ r.status_code not in acceptable_error_codes: logger.error('Bad Request to switch. ' 'Response: %s and ' 'Reason: %s', r.text, r.reason) raise SwitchError('Bad Request to switch. ' 'Response: %s and ' 'Reason: %s', r.text, r.reason) return r @property def _auth(self): """Returns tuple for authentication""" return self.username, self.password def _remove_native_vlan(self, interface): """ Remove the native vlan from an interface. Args: interface: interface to remove the native vlan from """ assert False, "Subclasses MUST override _remove_native_vlan" def _port_shutdown(self, interface): """ Shuts down <interface> """ assert False, "Subclasses MUST override _port_shutdown" def _set_native_vlan(self, interface, vlan): """ Set the native vlan of an interface. Args: interface: interface to set the native vlan of vlan: vlan to set as the native vlan """ assert False, "Subclasses MUST override _set_native_vlan" def _remove_vlan_from_trunk(self, interface, vlan): """ Remove a vlan from a trunk port. Args: interface: interface to remove the vlan from vlan: vlan to remove """ assert False, "Subclasses MUST override _remove_vlan_from_trunk" def _add_vlan_to_trunk(self, interface, vlan): """ Add a vlan to a trunk port. Args: interface: interface to add the vlan to vlan: vlan to add """ assert False, "Subclasses MUST override _add_vlan_to_trunk" def _remove_all_vlans_from_trunk(self, interface): """ Remove all vlan from a trunk port. Args: interface: interface to remove the vlan from """ assert False, "Subclasses MUST override _remove_all_vlans_from_trunk" def _get_native_vlan(self, interface): """ Return the native vlan of an interface. Args: interface: interface to return the native vlan of Returns: Tuple of the form ('vlan/native', vlan) or None """ assert False, "Subclasses MUST override _get_native_vlan" def _get_vlans(self, interface): """ Return the vlans of a trunk port. Should not include the native vlan. `_get_native_vlan` should return that. Args: interface: interface to return the vlans of Returns: List containing the vlans of the form: [('vlan/vlan1', vlan1), ('vlan/vlan2', vlan2)] or an empy list if there are no trunked vlans. """ assert False, "Subclasses MUST override _get_vlans"
{ "content_hash": "8d12ab7f8d443a29b4744a09c85fe838", "timestamp": "", "source": "github", "line_count": 155, "max_line_length": 79, "avg_line_length": 34.76129032258064, "alnum_prop": 0.5861172976985894, "repo_name": "CCI-MOC/haas", "id": "df5151d5337980480cde33929752c8ccacdf76d8", "size": "5388", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "hil/ext/switches/_vlan_http.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "252862" } ], "symlink_target": "" }
from mod_pywebsocket import handshake def web_socket_do_extra_handshake(request): raise handshake.AbortedByUserException("abort for test") def web_socket_transfer_data(request): raise handshake.AbortedByUserException("abort for test") # vi:sts=4 sw=4 et
{ "content_hash": "2e6f7554c55333066b4f6072bbe0b43c", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 60, "avg_line_length": 22.333333333333332, "alnum_prop": 0.7686567164179104, "repo_name": "GoogleChromeLabs/pywebsocket3", "id": "63cb541bb7ec4044745798ab97ae7ccba57e04f7", "size": "1797", "binary": false, "copies": "21", "ref": "refs/heads/master", "path": "test/testdata/handlers/abort_by_user_wsh.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "19422" }, { "name": "JavaScript", "bytes": "24155" }, { "name": "Python", "bytes": "419498" }, { "name": "SWIG", "bytes": "3312" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.apps import AppConfig class OrdersConfig(AppConfig): name = 'orders'
{ "content_hash": "88767fa2dd0bd91cca089d89fdf541f3", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 39, "avg_line_length": 18.285714285714285, "alnum_prop": 0.75, "repo_name": "cmwaura/aura", "id": "f9fdc0c09929f6711efa5d002d0e4a5066cae6ba", "size": "128", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "orders/apps.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "127" }, { "name": "HTML", "bytes": "5614" }, { "name": "JavaScript", "bytes": "484" }, { "name": "Python", "bytes": "13126" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.test import TestCase from .models import A, B, Bar, D, DataPoint, Foo, RelatedPoint class SimpleTest(TestCase): def setUp(self): self.a1 = A.objects.create() self.a2 = A.objects.create() for x in range(20): B.objects.create(a=self.a1) D.objects.create(a=self.a1) def test_nonempty_update(self): """ Test that update changes the right number of rows for a nonempty queryset """ num_updated = self.a1.b_set.update(y=100) self.assertEqual(num_updated, 20) cnt = B.objects.filter(y=100).count() self.assertEqual(cnt, 20) def test_empty_update(self): """ Test that update changes the right number of rows for an empty queryset """ num_updated = self.a2.b_set.update(y=100) self.assertEqual(num_updated, 0) cnt = B.objects.filter(y=100).count() self.assertEqual(cnt, 0) def test_nonempty_update_with_inheritance(self): """ Test that update changes the right number of rows for an empty queryset when the update affects only a base table """ num_updated = self.a1.d_set.update(y=100) self.assertEqual(num_updated, 20) cnt = D.objects.filter(y=100).count() self.assertEqual(cnt, 20) def test_empty_update_with_inheritance(self): """ Test that update changes the right number of rows for an empty queryset when the update affects only a base table """ num_updated = self.a2.d_set.update(y=100) self.assertEqual(num_updated, 0) cnt = D.objects.filter(y=100).count() self.assertEqual(cnt, 0) def test_foreign_key_update_with_id(self): """ Test that update works using <field>_id for foreign keys """ num_updated = self.a1.d_set.update(a_id=self.a2) self.assertEqual(num_updated, 20) self.assertEqual(self.a2.d_set.count(), 20) class AdvancedTests(TestCase): def setUp(self): self.d0 = DataPoint.objects.create(name="d0", value="apple") self.d2 = DataPoint.objects.create(name="d2", value="banana") self.d3 = DataPoint.objects.create(name="d3", value="banana") self.r1 = RelatedPoint.objects.create(name="r1", data=self.d3) def test_update(self): """ Objects are updated by first filtering the candidates into a queryset and then calling the update() method. It executes immediately and returns nothing. """ resp = DataPoint.objects.filter(value="apple").update(name="d1") self.assertEqual(resp, 1) resp = DataPoint.objects.filter(value="apple") self.assertEqual(list(resp), [self.d0]) def test_update_multiple_objects(self): """ We can update multiple objects at once. """ resp = DataPoint.objects.filter(value="banana").update( value="pineapple") self.assertEqual(resp, 2) self.assertEqual(DataPoint.objects.get(name="d2").value, 'pineapple') def test_update_fk(self): """ Foreign key fields can also be updated, although you can only update the object referred to, not anything inside the related object. """ resp = RelatedPoint.objects.filter(name="r1").update(data=self.d0) self.assertEqual(resp, 1) resp = RelatedPoint.objects.filter(data__name="d0") self.assertEqual(list(resp), [self.r1]) def test_update_multiple_fields(self): """ Multiple fields can be updated at once """ resp = DataPoint.objects.filter(value="apple").update( value="fruit", another_value="peach") self.assertEqual(resp, 1) d = DataPoint.objects.get(name="d0") self.assertEqual(d.value, 'fruit') self.assertEqual(d.another_value, 'peach') def test_update_all(self): """ In the rare case you want to update every instance of a model, update() is also a manager method. """ self.assertEqual(DataPoint.objects.update(value='thing'), 3) resp = DataPoint.objects.values('value').distinct() self.assertEqual(list(resp), [{'value': 'thing'}]) def test_update_slice_fail(self): """ We do not support update on already sliced query sets. """ method = DataPoint.objects.all()[:2].update self.assertRaises(AssertionError, method, another_value='another thing') def test_update_respects_to_field(self): """ Update of an FK field which specifies a to_field works. """ a_foo = Foo.objects.create(target='aaa') b_foo = Foo.objects.create(target='bbb') bar = Bar.objects.create(foo=a_foo) self.assertEqual(bar.foo_id, a_foo.target) bar_qs = Bar.objects.filter(pk=bar.pk) self.assertEqual(bar_qs[0].foo_id, a_foo.target) bar_qs.update(foo=b_foo) self.assertEqual(bar_qs[0].foo_id, b_foo.target)
{ "content_hash": "442cf43dac57cb3f7fa821dc23f292ea", "timestamp": "", "source": "github", "line_count": 140, "max_line_length": 81, "avg_line_length": 36.57857142857143, "alnum_prop": 0.6104276508494435, "repo_name": "52ai/django-ccsds", "id": "010151d61e9ccaceec55a043a9f01e04edd70b47", "size": "5121", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/update/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "43623" }, { "name": "HTML", "bytes": "173769" }, { "name": "JavaScript", "bytes": "106416" }, { "name": "Makefile", "bytes": "125" }, { "name": "Python", "bytes": "10925166" }, { "name": "Shell", "bytes": "934" }, { "name": "Smarty", "bytes": "130" } ], "symlink_target": "" }
from speech_style import * from kb import * from task3 import get_restaurants, rank_restaurants def modify_options(dialogs, kb, accept_prob=0.25, save='all'): new_dialogs = [] for dialog in dialogs: restaurants = get_restaurants(dialog) specialities = set([kb[restaurant]['R_speciality'] for restaurant in restaurants]) temp_dialog_set = [[['veg ' + np.random.choice(list(specialities))]], [['non-veg ' + np.random.choice(list(specialities))]]] for turn in dialog: if 'resto_' in turn[0] : break for temp_dialog in temp_dialog_set: temp_dialog.append(turn) for restaurant in restaurants: attrib_list = ['R_phone', 'R_cuisine', 'R_address', 'R_location', 'R_number', 'R_price', 'R_rating', 'R_type', 'R_speciality', 'R_social_media', 'R_parking', 'R_public_transport'] for temp_dialog in temp_dialog_set: for attrib in attrib_list: temp_dialog.append([restaurant + ' ' + attrib + ' ' + kb[restaurant][attrib]]) # for turn in dialog: # if len(turn) == 2: # for temp_dialog in temp_dialog_set: # temp_dialog.append(turn) # if turn[1] == 'ok let me look into some options for you' : break for temp_dialog in temp_dialog_set: utterences = { 'reject' : ["do you have something else", "no i don't like that", "no this does not work for me"], 'accept' : ["that looks great", "i love that", "let's do it", "it's perfect"] } ranked_restaurants = rank_restaurants(restaurants, temp_dialog[0][0].split(' ')[0], temp_dialog[0][0].split(' ')[1], kb) for restaurant in ranked_restaurants: temp_dialog.append(['<SILENCE>', 'what do you think of this option: ' + restaurant]) if restaurant == ranked_restaurants[-1] : choice = 'accept' else : choice = np.random.choice(['accept', 'reject'], p=[accept_prob, 1-accept_prob]) if choice == 'accept': temp_dialog.append([np.random.choice(utterences['accept']), 'great let me do the reservation']) break else : temp_dialog.append([np.random.choice(utterences['reject']), 'sure let me find an other option for you']) for turn in dialog: temp_list = ['here it is', 'is there anything i can help you with', "you're welcome"] if len(turn) == 2: for item in temp_list: if item in turn[1]: for temp_dialog in temp_dialog_set: temp_dialog.append(turn) if save=='random': new_dialogs.append(temp_dialog_set[np.random.choice(len(temp_dialog_set))]) else: for temp_dialog in temp_dialog_set: new_dialogs.append(temp_dialog) return new_dialogs def modify_info(dialogs, kb, utterences): new_dialogs = [] for dialog in dialogs: profile = ['male young', 'female young', 'male middle-aged', 'female middle-aged', 'male elderly', 'female elderly'].index(' '.join(dialog[0][0].split(' ')[:2])) restaurant = '' for i, turn in enumerate(dialog): if len(turn) == 2: if utterences['what do you think of this option:'][profile] in turn[1] : restaurant = turn[1].split(': ')[1] elif turn[1] == utterences['great let me do the reservation'][profile] : break temp_dialog = dialog[:i] for turn in dialog[i:]: queries = { 'contact' : ['do you have its contact details', 'may i have the contact details of the restaurant', 'what are the contact details of the restaurant'], 'directions' : ['do you have direction information', 'may i have the direction information to the restaurant', 'can you provide direction to the restaurant'] } if 'phone number' in turn[0]: turn[0] = np.random.choice(queries['contact']) if temp_dialog[0][0].split(' ')[1] == 'young': turn[1] = utterences['here it is'][profile] + ' ' + kb[restaurant]['R_social_media'] else: turn[1] = utterences['here it is'][profile] + ' ' + kb[restaurant]['R_phone'] if 'address' in turn[0]: turn[0] = np.random.choice(queries['directions']) if kb[restaurant]['R_price'] == 'cheap': turn[1] = utterences['here it is'][profile] + ' ' + kb[restaurant]['R_address'] + ' ' + kb[restaurant]['R_public_transport'] else: turn[1] = utterences['here it is'][profile] + ' ' + kb[restaurant]['R_address'] + ' ' + kb[restaurant]['R_parking'] temp_dialog.append(turn) new_dialogs.append(temp_dialog) return new_dialogs if __name__ == '__main__': utterences = load_utterences() kb = read_kb('../data/personalized-dialog-dataset/personalized-dialog-kb-all.txt') dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt') new_dialogs = modify_options(dialogs, kb) new_dialogs = modify_speech_style(new_dialogs, utterences, 'modified') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/personalized-dialog-task5-full-dialogs-dev.txt') dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt') new_dialogs = modify_options(dialogs, kb) new_dialogs = modify_speech_style(new_dialogs, utterences, 'modified') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/personalized-dialog-task5-full-dialogs-trn.txt') dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt') new_dialogs = modify_options(dialogs, kb) new_dialogs = modify_speech_style(new_dialogs, utterences, 'modified') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/personalized-dialog-task5-full-dialogs-tst.txt') dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt') new_dialogs = modify_options(dialogs, kb) new_dialogs = modify_speech_style(new_dialogs, utterences, 'modified') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/personalized-dialog-task5-full-dialogs-tst-OOV.txt') # small dialogs set dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='random') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/small/personalized-dialog-task5-full-dialogs-dev.txt') dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='random') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/small/personalized-dialog-task5-full-dialogs-trn.txt') dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='random') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/small/personalized-dialog-task5-full-dialogs-tst.txt') dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='random') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/small/personalized-dialog-task5-full-dialogs-tst-OOV.txt') # Split dialog sets dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='0') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_young/personalized-dialog-task5-full-dialogs-dev.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='0') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_young/personalized-dialog-task5-full-dialogs-trn.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='0') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_young/personalized-dialog-task5-full-dialogs-tst.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='0') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_young/personalized-dialog-task5-full-dialogs-tst-OOV.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='1') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_young/personalized-dialog-task5-full-dialogs-dev.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='1') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_young/personalized-dialog-task5-full-dialogs-trn.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='1') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_young/personalized-dialog-task5-full-dialogs-tst.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='1') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_young/personalized-dialog-task5-full-dialogs-tst-OOV.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='2') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_middle-aged/personalized-dialog-task5-full-dialogs-dev.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='2') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_middle-aged/personalized-dialog-task5-full-dialogs-trn.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='2') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_middle-aged/personalized-dialog-task5-full-dialogs-tst.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='2') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_middle-aged/personalized-dialog-task5-full-dialogs-tst-OOV.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='3') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_middle-aged/personalized-dialog-task5-full-dialogs-dev.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='3') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_middle-aged/personalized-dialog-task5-full-dialogs-trn.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='3') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_middle-aged/personalized-dialog-task5-full-dialogs-tst.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='3') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_middle-aged/personalized-dialog-task5-full-dialogs-tst-OOV.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='4') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_elderly/personalized-dialog-task5-full-dialogs-dev.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='4') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_elderly/personalized-dialog-task5-full-dialogs-trn.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='4') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_elderly/personalized-dialog-task5-full-dialogs-tst.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='4') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_elderly/personalized-dialog-task5-full-dialogs-tst-OOV.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='5') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_elderly/personalized-dialog-task5-full-dialogs-dev.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='5') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_elderly/personalized-dialog-task5-full-dialogs-trn.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='5') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_elderly/personalized-dialog-task5-full-dialogs-tst.txt') dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt') new_dialogs = modify_options(dialogs, kb, save='random') new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='5') new_dialogs = modify_info(new_dialogs, kb, utterences) save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_elderly/personalized-dialog-task5-full-dialogs-tst-OOV.txt')
{ "content_hash": "4f4218897c6db521802943dc884310c2", "timestamp": "", "source": "github", "line_count": 307, "max_line_length": 191, "avg_line_length": 64.86644951140065, "alnum_prop": 0.6732951692276791, "repo_name": "chaitjo/personalized-dialog", "id": "e2bc999aadbe3d59e1e467533cac0cdc592c1361", "size": "19914", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/task5.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "129866" }, { "name": "Shell", "bytes": "4749" } ], "symlink_target": "" }
import scrapy from tutorial.items import QuoteItem class QuotesSpider(scrapy.Spider): name = "quotes" start_urls = [ 'http://quotes.toscrape.com/page/1/', ] def parse(self, response): for quote in response.xpath('//div[@class="quote"]'): item = QuoteItem() item['text'] = quote.xpath('span[@class="text"]/text()').extract_first() item['author'] = quote.xpath('span/small/text()').extract_first() yield item next_page = response.xpath('//li[@class="next"]/a/@href').extract_first() if next_page: next_page = response.urljoin(next_page) yield scrapy.Request(next_page, callback=self.parse)
{ "content_hash": "6a8dd9e1d7ae994713871af35bd4c69e", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 84, "avg_line_length": 35.55, "alnum_prop": 0.5921237693389592, "repo_name": "gnublet/py_explorations", "id": "95ade8866c03774d5758ddc29ace752d50f079b6", "size": "711", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "general/tutorial/tutorial/spiders/quotes_spider.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "11429" }, { "name": "Jupyter Notebook", "bytes": "2034669" }, { "name": "Python", "bytes": "21419" } ], "symlink_target": "" }
import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): db.alter_column('sentry_projectoptions', 'key', self.gf('django.db.models.fields.CharField')(max_length=64)) def backwards(self, orm): pass models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'sentry.event': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, 'sentry.filterkey': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.filtervalue': { 'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.group': { 'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"}) }, 'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.lostpasswordhash': { 'Meta': {'object_name': 'LostPasswordHash'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'sentry.messagecountbyminute': { 'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'}, 'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.messagefiltervalue': { 'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.messageindex': { 'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'}, 'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'sentry.option': { 'Meta': {'object_name': 'Option'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, 'sentry.pendingteammember': { 'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'sentry.project': { 'Meta': {'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'}) }, 'sentry.projectcountbyminute': { 'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'}, 'date': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, 'sentry.searchdocument': { 'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, 'sentry.searchtoken': { 'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'}, 'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}), 'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'sentry.team': { 'Meta': {'object_name': 'Team'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}) }, 'sentry.teammember': { 'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['auth.User']"}) }, 'sentry.useroption': { 'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'value': ('picklefield.fields.PickledObjectField', [], {}) } } complete_apps = ['sentry']
{ "content_hash": "0c6a44fc773ddb52b03f1de9467b540a", "timestamp": "", "source": "github", "line_count": 249, "max_line_length": 182, "avg_line_length": 81.55020080321285, "alnum_prop": 0.5450605732295873, "repo_name": "NickPresta/sentry", "id": "79b6a027ca1ef579de6e9532fb7e9c15d3266fba", "size": "20330", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "src/sentry/migrations/0070_projectoption_key_length.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "779841" }, { "name": "Perl", "bytes": "618" }, { "name": "Python", "bytes": "2527496" }, { "name": "Shell", "bytes": "4106" } ], "symlink_target": "" }
""" Display the X selection. Configuration parameters: cache_timeout: how often we refresh this module in seconds (default is at every py3status configured interval) command: the xsel command to run (default 'xsel') max_size: stip the selection to this value (default 15) symmetric: show the beginning and the end of the selection string with respect to configured max_size. Requires: - xsel command line tool @author Sublim3 umbsublime@gamil.com @license BSD """ import shlex from time import time from subprocess import check_output class Py3status: """ """ # available configuration parameters cache_timeout = 0.5 command = 'xsel' max_size = 15 symmetric = True color = "#0066FF" def xsel(self, i3s_output_list, i3s_config): """ Display the content of xsel. """ current_value = check_output(shlex.split(self.command)) if len(current_value) >= self.max_size: if self.symmetric is True: split = int(self.max_size / 2) - 1 current_value = current_value[:split].decode( 'utf-8') + '..' + current_value[-split:].decode('utf-8') else: current_value = current_value[:self.max_size] response = { 'cached_until': time() + self.cache_timeout, 'full_text': current_value.decode('utf-8'), 'color': self.color } return response if __name__ == "__main__": """ Test this module by calling it directly. """ from time import sleep x = Py3status() config = { 'color_bad': '#FF0000', 'color_degraded': '#FFFF00', 'color_good': '#00FF00' } while True: print(x.xsel([], config)) sleep(1)
{ "content_hash": "2ea81e5f4e5edddc185934739341c860", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 76, "avg_line_length": 26.823529411764707, "alnum_prop": 0.5805921052631579, "repo_name": "UmBsublime/py3status", "id": "51515a333f2f0f602992abdaa6391cac828da26e", "size": "1848", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "py3status/modules/xsel.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "187156" } ], "symlink_target": "" }
from preggy import expect from tests.base import FilterTestCase class ContrastFilterTestCase(FilterTestCase): def test_contrast_filter(self): image = self.get_filtered('source.jpg', 'thumbor.filters.contrast', 'contrast(20)') expected = self.get_fixture('contrast.jpg') ssim = self.get_ssim(image, expected) expect(ssim).to_be_greater_than(0.98)
{ "content_hash": "6fef767ec5f179dcb9c1aa4056f3cd36", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 91, "avg_line_length": 32.166666666666664, "alnum_prop": 0.7020725388601037, "repo_name": "BetterCollective/thumbor", "id": "51efa33ec65634f634887e75dec72a8a4a6ef4aa", "size": "637", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "tests/filters/test_contrast.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "58039" }, { "name": "JavaScript", "bytes": "409" }, { "name": "Makefile", "bytes": "2069" }, { "name": "Python", "bytes": "533298" } ], "symlink_target": "" }
""" rstblog.utils ~~~~~~~~~~~~~ Various utilities. :copyright: (c) 2010 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from math import ceil from jinja2 import Markup class Pagination(object): """Internal helper class for paginations""" def __init__(self, builder, entries, page, per_page, url_key): self.builder = builder self.entries = entries self.page = page self.per_page = per_page self.url_key = url_key @property def total(self): return len(self.entries) @property def pages(self): return int(ceil(self.total / float(self.per_page))) def get_prev(self): return Pagination(self.builder, self.entries, self.page - 1, self.per_page, self.url_key) @property def prev_num(self): """Number of the previous page.""" return self.page - 1 @property def has_prev(self): """True if a previous page exists""" return self.page > 1 def get_next(self): return Pagination(self.builder, self.entries, self.page + 1, self.per_page, self.url_key) @property def has_next(self): """True if a next page exists.""" return self.page < self.pages @property def next_num(self): """Number of the next page""" return self.page + 1 def get_slice(self): return self.entries[(self.page - 1) * self.per_page: self.page * self.per_page] def iter_pages(self, left_edge=2, left_current=2, right_current=5, right_edge=2): """Iterates over the page numbers in the pagination. The four parameters control the thresholds how many numbers should be produced from the sides. Skipped page numbers are represented as `None`. """ last = 0 for num in xrange(1, self.pages + 1): valid = ( num <= left_edge or ( num > self.page - left_current - 1 and num < self.page + right_current ) or num > self.pages - right_edge ) if valid: if last + 1 != num: yield None yield num last = num def __str__(self): return unicode(self).encode('utf-8') def __unicode__(self): return self.builder.render_template('_pagination.html', { 'pagination': self }) def __html__(self): return Markup(unicode(self))
{ "content_hash": "a662d8e8fa30f7eabd2bf1ed90a235da", "timestamp": "", "source": "github", "line_count": 97, "max_line_length": 77, "avg_line_length": 27.24742268041237, "alnum_prop": 0.5331063185773742, "repo_name": "oddbird/rstblog", "id": "f6b2ced29175fa1f95d393e5f34d8a4c38e15c67", "size": "2667", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rstblog/utils.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "4340" }, { "name": "Python", "bytes": "53117" } ], "symlink_target": "" }
"""engine.SCons.Tool.icc Tool-specific initialization for the OS/2 icc compiler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/icc.py 3842 2008/12/20 22:59:52 scons" import cc def generate(env): """Add Builders and construction variables for the OS/2 to an Environment.""" cc.generate(env) env['CC'] = 'icc' env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET' env['CXXCOM'] = '$CXX $CXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET' env['CPPDEFPREFIX'] = '/D' env['CPPDEFSUFFIX'] = '' env['INCPREFIX'] = '/I' env['INCSUFFIX'] = '' env['CFILESUFFIX'] = '.c' env['CXXFILESUFFIX'] = '.cc' def exists(env): return env.Detect('icc')
{ "content_hash": "c9c94369b5f7e2980aeed72b0bb91d33", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 107, "avg_line_length": 39.056603773584904, "alnum_prop": 0.7169082125603865, "repo_name": "james-dibble/Embedded-Systems-Assignment", "id": "ac6d6aadeab9c62c3780cae5b7bb6b433d225155", "size": "2070", "binary": false, "copies": "12", "ref": "refs/heads/master", "path": "EmbeddedSystems.Applications/EmbeddedSystems.Client/jsoncpp-src-0.6.0-rc2/scons-local-1.2.0/SCons/Tool/icc.py", "mode": "33188", "license": "mit", "language": [ { "name": "ASP", "bytes": "480" }, { "name": "C", "bytes": "5203" }, { "name": "C#", "bytes": "207641" }, { "name": "C++", "bytes": "330101" }, { "name": "CSS", "bytes": "1175" }, { "name": "IDL", "bytes": "1618" }, { "name": "Python", "bytes": "1803809" } ], "symlink_target": "" }
from setuptools import setup, find_packages from codecs import open import os import sys from libmproxy import version # Based on https://github.com/pypa/sampleproject/blob/master/setup.py # and https://python-packaging-user-guide.readthedocs.org/ here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() # Core dependencies # Do not use the "~=" compatible release specifier. # This will break `pip install` on systems with old setuptools versions. deps = { "netlib>=%s, <%s" % (version.MINORVERSION, version.NEXT_MINORVERSION), "tornado>=4.3.0, <4.4", "configargparse>=0.10.0, <0.11", "pyperclip>=1.5.22, <1.6", "blinker>=1.4, <1.5", "pyparsing>=2.0.5, <2.1", "html2text==2015.11.4", "construct>=2.5.2, <2.6", "six>=1.10.0, <1.11", "lxml==3.4.4", # there are no Windows wheels for newer versions, so we pin this. "Pillow>=3.0.0, <3.1", "watchdog>=0.8.3, <0.9", } # A script -> additional dependencies dict. scripts = { "mitmproxy": { "urwid>=1.3.1, <1.4", }, "mitmdump": { "click>=6.2, <6.3", }, "mitmweb": set() } # Developer dependencies dev_deps = { "mock>=1.0.1", "pytest>=2.8.0", "pytest-xdist>=1.13.1", "pytest-cov>=2.1.0", "coveralls>=0.4.1", "pathod>=%s, <%s" % (version.MINORVERSION, version.NEXT_MINORVERSION), "sphinx>=1.3.1", "sphinx-autobuild>=0.5.2", "sphinxcontrib-documentedlist>=0.2", } example_deps = { "pytz==2015.7", "harparser>=0.2, <0.3", "beautifulsoup4>=4.4.1, <4.5", } # Add *all* script dependencies to developer dependencies. for script_deps in scripts.values(): dev_deps.update(script_deps) # Remove mitmproxy for Windows support. if os.name == "nt": del scripts["mitmproxy"] deps.add("pydivert>=0.0.7") # Transparent proxying on Windows # Add dependencies for available scripts as core dependencies. for script_deps in scripts.values(): deps.update(script_deps) if sys.version_info < (3, 4): example_deps.add("enum34>=1.0.4, <1.1") console_scripts = ["%s = libmproxy.main:%s" % (s, s) for s in scripts.keys()] setup( name="mitmproxy", version=version.VERSION, description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.", long_description=long_description, url="http://mitmproxy.org", author="Aldo Cortesi", author_email="aldo@corte.si", license="MIT", classifiers=[ "License :: OSI Approved :: MIT License", "Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: Console :: Curses", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Security", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: Proxy Servers", "Topic :: Software Development :: Testing" ], packages=find_packages(), include_package_data=True, entry_points={ 'console_scripts': console_scripts}, install_requires=list(deps), extras_require={ 'dev': list(dev_deps), 'contentviews': [ "pyamf>=0.7.2, <0.8", "protobuf>=2.6.1, <2.7", "cssutils>=1.0.1, <1.1" ], 'examples': list(example_deps) } )
{ "content_hash": "7d127af073cc100b288d1e9ee4a80a08", "timestamp": "", "source": "github", "line_count": 118, "max_line_length": 125, "avg_line_length": 31.347457627118644, "alnum_prop": 0.6082725060827251, "repo_name": "bazzinotti/mitmproxy", "id": "a59638078599891ef7148c3555d183727c1dd34e", "size": "3699", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "425" }, { "name": "CSS", "bytes": "194068" }, { "name": "HTML", "bytes": "2824" }, { "name": "JavaScript", "bytes": "1755960" }, { "name": "Python", "bytes": "664196" }, { "name": "Shell", "bytes": "2303" } ], "symlink_target": "" }