text stringlengths 4 1.02M | meta dict |
|---|---|
import OSC #de pyosc
import RPi.GPIO as GPIO
import time
import argparse
import errno
import datetime
import sys
__author__ = 'gllmAr'
# gestion des arguments
parser = argparse.ArgumentParser(description='RPI Gpio interrupt OSC')
parser.add_argument('-g','--gpioBoard', help='gpio Board Mode (0=GPIO.BCM, 1=GPIO.BOARD)', default='0', type=int)
parser.add_argument('-i','--inputPin', help='input Pin', default='23', type=int)
parser.add_argument('-d','--destination',help='destination ip address', default='127.0.0.1')
parser.add_argument('-p','--outputPort', help='Output Port ', default='9999', type=int)
parser.add_argument('-o','--oscPath', help='Osc path', default='/gpioOSC')
parser.add_argument('-b','--bouncetime', help='(de)bouncetime', default='200', type=int)
parser.add_argument('-r','--resistance', help='pull_up_down resistance (0=off, 1=pullUp, 2=pullDown)', default='2', type=int)
parser.add_argument('-t','--trigger', help='trigger mode (0=FALLING, 1=RISING, 2=BOTH)', default='0', type=int)
parser.add_argument('-D','--Debug', help='Debug mode on ', default='1', type=int)
args = parser.parse_args()
# definir le callback,
# sera envoye dans un thread par la fonction gpio.add_event_detect
flag = 0 # quand un interrupt est detecte ajouter 1 et envoyer le message OSC
def flagUp(channel):
global flag # rendre la variable globale (pas tres clair pourquoi ca ne communique pas )
flag += 1 # incrementer de 1 a chaque interrupt
gpioBoardMode = "0"
#PUD_OFF, PUD_UP or PUD_DOWN
if args.gpioBoard == 0:
gpioBoardMode = "GPIO.BCM"
GPIO.setmode(GPIO.BCM)
elif args.gpioBoard == 1:
gpioBoardMode = "GPIO.BOARD"
GPIO.setmode(GPIO.BOARD)
else:
print("gpioBoard number not valid(try 0/1)")
sys.exit()
resistanceType = "0"
#PUD_OFF, PUD_UP or PUD_DOWN
if args.resistance == 0:
resistanceType = "GPIO.PUD_OFF"
GPIO.setup(args.inputPin, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
elif args.resistance == 1:
resistanceType = "GPIO.PUD_UP"
GPIO.setup(args.inputPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
elif args.resistance == 2:
resistanceType = "GPIO.PUD_DOWN"
GPIO.setup(args.inputPin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
else:
print("resistance number not valid(try 0/1/2)")
sys.exit()
triggerType = "0"
#GPIO.FALLING , GPIO.RISING, GPIO.BOTH
if args.trigger == 0:
triggerType = "GPIO.FALLING"
GPIO.add_event_detect(args.inputPin, GPIO.FALLING, callback=flagUp, bouncetime=args.bouncetime)
elif args.trigger == 1:
triggerType = "GPIO.RISING"
GPIO.add_event_detect(args.inputPin, GPIO.RISING, callback=flagUp, bouncetime=args.bouncetime)
elif args.trigger == 2:
triggerType = "GPIO.BOTH"
GPIO.add_event_detect(args.inputPin, GPIO.BOTH, callback=flagUp, bouncetime=args.bouncetime)
else:
print("trigger number not valid(try 0/1/2)")
sys.exit()
## Printer les arguments ##
print ("RPI Gpio interrupt OSC")
print ("destination Address: %s" % args.destination )
print ("outputPort: %s" % args.outputPort )
print ("InputPin: %s" % args.inputPin )
print ("gpioBoard: %s" % gpioBoardMode )
print ("oscPath: %s" % args.oscPath )
print ("bouncetime: %s" % args.bouncetime )
print ("resistance: %s" % resistanceType )
print ("trigger: %s" % triggerType )
print ("Debug: %s" % args.Debug )
## definir la fonction d envoi ##
c = OSC.OSCClient()
def sendOSC(value):
try:
c.connect((args.destination, args.outputPort)) # connection
oscmsg = OSC.OSCMessage()
oscmsg.setAddress(args.oscPath)
oscmsg.append(1)
c.send(oscmsg)
if args.Debug:
now = datetime.datetime.now()
print now.isoformat()
except OSC.OSCClientError:
print "Connection Refused"
# une loop infinie sur le thread principale qui attend
# et qui envois si la variable change
try:
while 1:
if flag > 0:
sendOSC(1)
flag = 0
time.sleep(.1)
# sortir du programme avec CTRL+C et cleaner le GPIO
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
| {
"content_hash": "fde2cdd27e4315e4781ca891fffb72e6",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 126,
"avg_line_length": 33.260162601626014,
"alnum_prop": 0.6770960645318993,
"repo_name": "gllmAR/rpi-interuptOsc",
"id": "a5de18557b7e3f577d357a8fb088ec87301bcf33",
"size": "4178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interuptOsc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4178"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
)
class NormalbootsIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?normalboots\.com/video/(?P<videoid>[0-9a-z-]*)/?$'
_TEST = {
'url': 'http://normalboots.com/video/home-alone-games-jontron/',
'md5': '8bf6de238915dd501105b44ef5f1e0f6',
'info_dict': {
'id': 'home-alone-games-jontron',
'ext': 'mp4',
'title': 'Home Alone Games - JonTron - NormalBoots',
'description': 'Jon is late for Christmas. Typical. Thanks to: Paul Ritchey for Co-Writing/Filming: http://www.youtube.com/user/ContinueShow Michael Azzi for Christmas Intro Animation: http://michafrar.tumblr.com/ Jerrod Waters for Christmas Intro Music: http://www.youtube.com/user/xXJerryTerryXx Casey Ormond for ‘Tense Battle Theme’:\xa0http://www.youtube.com/Kiamet/',
'uploader': 'JonTron',
'upload_date': '20140125',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
webpage = self._download_webpage(url, video_id)
video_uploader = self._html_search_regex(r'Posted\sby\s<a\shref="[A-Za-z0-9/]*">(?P<uploader>[A-Za-z]*)\s</a>',
webpage, 'uploader')
raw_upload_date = self._html_search_regex('<span style="text-transform:uppercase; font-size:inherit;">[A-Za-z]+, (?P<date>.*)</span>',
webpage, 'date')
video_upload_date = unified_strdate(raw_upload_date)
player_url = self._html_search_regex(r'<iframe\swidth="[0-9]+"\sheight="[0-9]+"\ssrc="(?P<url>[\S]+)"', webpage, 'url')
player_page = self._download_webpage(player_url, video_id)
video_url = self._html_search_regex(r"file:\s'(?P<file>[^']+\.mp4)'", player_page, 'file')
return {
'id': video_id,
'url': video_url,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'uploader': video_uploader,
'upload_date': video_upload_date,
}
| {
"content_hash": "67c993a1307dcaac448beb29fbe21040",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 384,
"avg_line_length": 44.82,
"alnum_prop": 0.6010709504685409,
"repo_name": "Grassboy/plugin.video.plurkTrend",
"id": "25e71a56e196d9cf7f9d2423c47293b01e46cd24",
"size": "2263",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/normalboots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1059158"
},
{
"name": "Shell",
"bytes": "115"
}
],
"symlink_target": ""
} |
"""Unit tests for INFINIDAT InfiniBox volume driver."""
import mock
from oslo_utils import units
from cinder import exception
from cinder import test
from cinder.volume import configuration
from cinder.volume.drivers import infinidat
TEST_WWN_1 = '00:11:22:33:44:55:66:77'
TEST_WWN_2 = '11:11:22:33:44:55:66:77'
test_volume = mock.Mock(id=1, size=1, volume_type_id=1)
test_snapshot = mock.Mock(id=2, volume=test_volume, volume_id='1')
test_clone = mock.Mock(id=3, size=1)
test_group = mock.Mock(id=4)
test_snapgroup = mock.Mock(id=5, group=test_group)
test_connector = dict(wwpns=[TEST_WWN_1],
initiator='iqn.2012-07.org.fake:01')
class FakeInfinisdkException(Exception):
pass
class InfiniboxDriverTestCaseBase(test.TestCase):
def setUp(self):
super(InfiniboxDriverTestCaseBase, self).setUp()
# create mock configuration
self.configuration = mock.Mock(spec=configuration.Configuration)
self.configuration.infinidat_storage_protocol = 'fc'
self.configuration.san_ip = 'mockbox'
self.configuration.infinidat_pool_name = 'mockpool'
self.configuration.san_thin_provision = 'thin'
self.configuration.san_login = 'user'
self.configuration.san_password = 'pass'
self.configuration.volume_backend_name = 'mock'
self.configuration.volume_dd_blocksize = '1M'
self.configuration.use_multipath_for_image_xfer = False
self.configuration.enforce_multipath_for_image_xfer = False
self.configuration.num_volume_device_scan_tries = 1
self.configuration.san_is_local = False
self.configuration.chap_username = None
self.configuration.chap_password = None
self.configuration.infinidat_use_compression = None
self.driver = infinidat.InfiniboxVolumeDriver(
configuration=self.configuration)
self._system = self._infinibox_mock()
# mock external library dependencies
infinisdk = self.patch("cinder.volume.drivers.infinidat.infinisdk")
capacity = self.patch("cinder.volume.drivers.infinidat.capacity")
self.patch("cinder.volume.drivers.infinidat.iqn")
self.patch("cinder.volume.drivers.infinidat.wwn")
capacity.byte = 1
capacity.GiB = units.Gi
infinisdk.core.exceptions.InfiniSDKException = FakeInfinisdkException
infinisdk.InfiniBox.return_value = self._system
self.driver.do_setup(None)
def _infinibox_mock(self):
result = mock.Mock()
self._mock_volume = mock.Mock()
self._mock_volume.has_children.return_value = False
self._mock_volume.get_logical_units.return_value = []
self._mock_volume.create_child.return_value = self._mock_volume
self._mock_host = mock.Mock()
self._mock_host.get_luns.return_value = []
self._mock_host.map_volume().get_lun.return_value = 1
self._mock_pool = mock.Mock()
self._mock_pool.get_free_physical_capacity.return_value = units.Gi
self._mock_pool.get_physical_capacity.return_value = units.Gi
self._mock_ns = mock.Mock()
self._mock_ns.get_ips.return_value = [mock.Mock(ip_address='1.1.1.1')]
self._mock_group = mock.Mock()
self._mock_qos_policy = mock.Mock()
result.volumes.safe_get.return_value = self._mock_volume
result.volumes.create.return_value = self._mock_volume
result.pools.safe_get.return_value = self._mock_pool
result.hosts.safe_get.return_value = self._mock_host
result.cons_groups.safe_get.return_value = self._mock_group
result.cons_groups.create.return_value = self._mock_group
result.hosts.create.return_value = self._mock_host
result.network_spaces.safe_get.return_value = self._mock_ns
result.components.nodes.get_all.return_value = []
result.qos_policies.create.return_value = self._mock_qos_policy
result.qos_policies.safe_get.return_value = None
return result
def _raise_infinisdk(self, *args, **kwargs):
raise FakeInfinisdkException()
class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
def test_initialize_connection(self):
self._system.hosts.safe_get.return_value = None
result = self.driver.initialize_connection(test_volume, test_connector)
self.assertEqual(1, result["data"]["target_lun"])
def test_initialize_connection_host_exists(self):
result = self.driver.initialize_connection(test_volume, test_connector)
self.assertEqual(1, result["data"]["target_lun"])
def test_initialize_connection_mapping_exists(self):
mock_mapping = mock.Mock()
mock_mapping.get_volume.return_value = self._mock_volume
mock_mapping.get_lun.return_value = 888
self._mock_host.get_luns.return_value = [mock_mapping]
result = self.driver.initialize_connection(test_volume, test_connector)
self.assertEqual(888, result["data"]["target_lun"])
def test_initialize_connection_volume_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
self.assertRaises(exception.InvalidVolume,
self.driver.initialize_connection,
test_volume, test_connector)
def test_initialize_connection_create_fails(self):
self._system.hosts.safe_get.return_value = None
self._system.hosts.create.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, test_connector)
def test_initialize_connection_map_fails(self):
self._mock_host.map_volume.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, test_connector)
def test_initialize_connection_metadata(self):
self._system.hosts.safe_get.return_value = None
self.driver.initialize_connection(test_volume, test_connector)
self._mock_host.set_metadata_from_dict.assert_called_once()
def test_terminate_connection(self):
self.driver.terminate_connection(test_volume, test_connector)
def test_terminate_connection_delete_host(self):
self._mock_host.get_luns.return_value = [object()]
self.driver.terminate_connection(test_volume, test_connector)
self.assertEqual(0, self._mock_host.safe_delete.call_count)
self._mock_host.get_luns.return_value = []
self.driver.terminate_connection(test_volume, test_connector)
self.assertEqual(1, self._mock_host.safe_delete.call_count)
def test_terminate_connection_volume_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
self.assertRaises(exception.InvalidVolume,
self.driver.terminate_connection,
test_volume, test_connector)
def test_terminate_connection_api_fail(self):
self._mock_host.unmap_volume.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
test_volume, test_connector)
def test_get_volume_stats_refreshes(self):
result = self.driver.get_volume_stats()
self.assertEqual(1, result["free_capacity_gb"])
# change the "free space" in the pool
self._mock_pool.get_free_physical_capacity.return_value = 0
# no refresh - free capacity should stay the same
result = self.driver.get_volume_stats(refresh=False)
self.assertEqual(1, result["free_capacity_gb"])
# refresh - free capacity should change to 0
result = self.driver.get_volume_stats(refresh=True)
self.assertEqual(0, result["free_capacity_gb"])
def test_get_volume_stats_pool_not_found(self):
self._system.pools.safe_get.return_value = None
self.assertRaises(exception.VolumeDriverException,
self.driver.get_volume_stats)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_volume(self, *mocks):
self.driver.create_volume(test_volume)
def test_create_volume_pool_not_found(self):
self._system.pools.safe_get.return_value = None
self.assertRaises(exception.VolumeDriverException,
self.driver.create_volume, test_volume)
def test_create_volume_api_fail(self):
self._system.pools.safe_get.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, test_volume)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_volume_metadata(self, *mocks):
self.driver.create_volume(test_volume)
self._mock_volume.set_metadata_from_dict.assert_called_once()
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_volume_compression_enabled(self, *mocks):
self.configuration.infinidat_use_compression = True
self.driver.create_volume(test_volume)
self.assertTrue(
self._system.volumes.create.call_args[1]["compression_enabled"]
)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_volume_compression_not_enabled(self, *mocks):
self.configuration.infinidat_use_compression = False
self.driver.create_volume(test_volume)
self.assertFalse(
self._system.volumes.create.call_args[1]["compression_enabled"]
)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_volume_compression_not_available(self, *mocks):
self._system.compat.has_compression.return_value = False
self.driver.create_volume(test_volume)
self.assertNotIn(
"compression_enabled",
self._system.volumes.create.call_args[1]
)
def test_delete_volume(self):
self.driver.delete_volume(test_volume)
def test_delete_volume_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
# should not raise an exception
self.driver.delete_volume(test_volume)
def test_delete_volume_with_children(self):
self._mock_volume.has_children.return_value = True
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume, test_volume)
def test_extend_volume(self):
self.driver.extend_volume(test_volume, 2)
def test_extend_volume_api_fail(self):
self._mock_volume.resize.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume, test_volume, 2)
def test_create_snapshot(self):
self.driver.create_snapshot(test_snapshot)
def test_create_snapshot_metadata(self):
self._mock_volume.create_snapshot.return_value = self._mock_volume
self.driver.create_snapshot(test_snapshot)
self._mock_volume.set_metadata_from_dict.assert_called_once()
def test_create_snapshot_volume_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
self.assertRaises(exception.InvalidVolume,
self.driver.create_snapshot, test_snapshot)
def test_create_snapshot_api_fail(self):
self._mock_volume.create_snapshot.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, test_snapshot)
@mock.patch("cinder.volume.utils.copy_volume")
@mock.patch("cinder.utils.brick_get_connector")
@mock.patch("cinder.utils.brick_get_connector_properties",
return_value=test_connector)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_volume_from_snapshot(self, *mocks):
self.driver.create_volume_from_snapshot(test_clone, test_snapshot)
def test_create_volume_from_snapshot_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
self.assertRaises(exception.InvalidSnapshot,
self.driver.create_volume_from_snapshot,
test_clone, test_snapshot)
def test_create_volume_from_snapshot_create_fails(self):
self._mock_volume.create_child.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
test_clone, test_snapshot)
@mock.patch("cinder.utils.brick_get_connector_properties",
return_value=test_connector)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_volume_from_snapshot_map_fails(self, *mocks):
self._mock_host.map_volume.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
test_clone, test_snapshot)
@mock.patch("cinder.volume.utils.copy_volume")
@mock.patch("cinder.utils.brick_get_connector")
@mock.patch("cinder.utils.brick_get_connector_properties",
return_value=test_connector)
def test_create_volume_from_snapshot_delete_clone_fails(self, *mocks):
self._mock_volume.delete.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
test_clone, test_snapshot)
def test_delete_snapshot(self):
self.driver.delete_snapshot(test_snapshot)
def test_delete_snapshot_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
# should not raise an exception
self.driver.delete_snapshot(test_snapshot)
def test_delete_snapshot_api_fail(self):
self._mock_volume.safe_delete.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_snapshot, test_snapshot)
@mock.patch("cinder.volume.utils.copy_volume")
@mock.patch("cinder.utils.brick_get_connector")
@mock.patch("cinder.utils.brick_get_connector_properties",
return_value=test_connector)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_cloned_volume(self, *mocks):
self.driver.create_cloned_volume(test_clone, test_volume)
def test_create_cloned_volume_volume_already_mapped(self):
mock_mapping = mock.Mock()
mock_mapping.get_volume.return_value = self._mock_volume
self._mock_volume.get_logical_units.return_value = [mock_mapping]
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
test_clone, test_volume)
def test_create_cloned_volume_create_fails(self):
self._system.volumes.create.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
test_clone, test_volume)
@mock.patch("cinder.utils.brick_get_connector_properties",
return_value=test_connector)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_cloned_volume_map_fails(self, *mocks):
self._mock_host.map_volume.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
test_clone, test_volume)
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_create_group(self, *mocks):
self.driver.create_group(None, test_group)
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_create_group_metadata(self, *mocks):
self.driver.create_group(None, test_group)
self._mock_group.set_metadata_from_dict.assert_called_once()
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_create_group_twice(self, *mocks):
self.driver.create_group(None, test_group)
self.driver.create_group(None, test_group)
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_create_group_api_fail(self, *mocks):
self._system.cons_groups.create.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_group,
None, test_group)
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group(self, *mocks):
self.driver.delete_group(None, test_group, [test_volume])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group_doesnt_exist(self, *mocks):
self._system.cons_groups.safe_get.return_value = None
self.driver.delete_group(None, test_group, [test_volume])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group_api_fail(self, *mocks):
self._mock_group.safe_delete.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_group,
None, test_group, [test_volume])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_add_and_remove(self, *mocks):
self.driver.update_group(None, test_group,
[test_volume], [test_volume])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_api_fail(self, *mocks):
self._mock_group.add_member.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.update_group,
None, test_group,
[test_volume], [test_volume])
@mock.patch("cinder.volume.utils.copy_volume")
@mock.patch("cinder.utils.brick_get_connector")
@mock.patch("cinder.utils.brick_get_connector_properties",
return_value=test_connector)
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_group_from_src_snaps(self, *mocks):
self.driver.create_group_from_src(None, test_group, [test_volume],
test_snapgroup, [test_snapshot],
None, None)
@mock.patch("cinder.volume.utils.copy_volume")
@mock.patch("cinder.utils.brick_get_connector")
@mock.patch("cinder.utils.brick_get_connector_properties",
return_value=test_connector)
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_group_from_src_vols(self, *mocks):
self.driver.create_group_from_src(None, test_group, [test_volume],
None, None,
test_group, [test_volume])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_create_group_snap(self, *mocks):
mock_snapgroup = mock.Mock()
mock_snapgroup.get_members.return_value = [self._mock_volume]
self._mock_volume.get_parent.return_value = self._mock_volume
self._mock_volume.get_name.return_value = ''
self._mock_group.create_snapshot.return_value = mock_snapgroup
self.driver.create_group_snapshot(None,
test_snapgroup,
[test_snapshot])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_create_group_snap_api_fail(self, *mocks):
self._mock_group.create_snapshot.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_group_snapshot, None,
test_snapgroup, [test_snapshot])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group_snap(self, *mocks):
self.driver.delete_group_snapshot(None,
test_snapgroup,
[test_snapshot])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group_snap_does_not_exist(self, *mocks):
self._system.cons_groups.safe_get.return_value = None
self.driver.delete_group_snapshot(None,
test_snapgroup,
[test_snapshot])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group_snap_invalid_group(self, *mocks):
self._mock_group.is_snapgroup.return_value = False
self.assertRaises(exception.InvalidGroupSnapshot,
self.driver.delete_group_snapshot,
None, test_snapgroup, [test_snapshot])
@mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group_snap_api_fail(self, *mocks):
self._mock_group.safe_delete.side_effect = self._raise_infinisdk
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_group_snapshot,
None, test_snapgroup, [test_snapshot])
class InfiniboxDriverTestCaseFC(InfiniboxDriverTestCaseBase):
def test_initialize_connection_multiple_wwpns(self):
connector = {'wwpns': [TEST_WWN_1, TEST_WWN_2]}
result = self.driver.initialize_connection(test_volume, connector)
self.assertEqual(1, result["data"]["target_lun"])
class InfiniboxDriverTestCaseISCSI(InfiniboxDriverTestCaseBase):
def setUp(self):
super(InfiniboxDriverTestCaseISCSI, self).setUp()
self.configuration.infinidat_storage_protocol = 'iscsi'
self.configuration.infinidat_iscsi_netspaces = ['netspace1']
self.configuration.use_chap_auth = False
self.driver.do_setup(None)
def test_setup_without_netspaces_configured(self):
self.configuration.infinidat_iscsi_netspaces = []
self.assertRaises(exception.VolumeDriverException,
self.driver.do_setup, None)
def test_initialize_connection(self):
result = self.driver.initialize_connection(test_volume, test_connector)
self.assertEqual(1, result['data']['target_lun'])
def test_initialize_netspace_does_not_exist(self):
self._system.network_spaces.safe_get.return_value = None
self.assertRaises(exception.VolumeDriverException,
self.driver.initialize_connection,
test_volume, test_connector)
def test_initialize_netspace_has_no_ips(self):
self._mock_ns.get_ips.return_value = []
self.assertRaises(exception.VolumeDriverException,
self.driver.initialize_connection,
test_volume, test_connector)
def test_initialize_connection_with_chap(self):
self.configuration.use_chap_auth = True
result = self.driver.initialize_connection(test_volume, test_connector)
self.assertEqual(1, result['data']['target_lun'])
self.assertEqual('CHAP', result['data']['auth_method'])
self.assertIn('auth_username', result['data'])
self.assertIn('auth_password', result['data'])
def test_initialize_connection_multiple_netspaces(self):
self.configuration.infinidat_iscsi_netspaces = ['netspace1',
'netspace2']
result = self.driver.initialize_connection(test_volume, test_connector)
self.assertEqual(1, result['data']['target_lun'])
self.assertEqual(2, len(result['data']['target_luns']))
self.assertEqual(2, len(result['data']['target_iqns']))
self.assertEqual(2, len(result['data']['target_portals']))
def test_terminate_connection(self):
self.driver.terminate_connection(test_volume, test_connector)
class InfiniboxDriverTestCaseQoS(InfiniboxDriverTestCaseBase):
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_qos_max_ipos(self, qos_specs):
qos_specs.return_value = {'qos_specs': {'id': 'qos_name',
'consumer': 'back-end',
'specs': {'maxIOPS': 1000,
'maxBWS': None}}}
self.driver.create_volume(test_volume)
self._system.qos_policies.create.assert_called_once()
self._mock_qos_policy.assign_entity.assert_called_once()
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_qos_max_bws(self, qos_specs):
qos_specs.return_value = {'qos_specs': {'id': 'qos_name',
'consumer': 'back-end',
'specs': {'maxIOPS': None,
'maxBWS': 10000}}}
self.driver.create_volume(test_volume)
self._system.qos_policies.create.assert_called_once()
self._mock_qos_policy.assign_entity.assert_called_once()
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_qos_no_compat(self, qos_specs):
qos_specs.return_value = {'qos_specs': {'id': 'qos_name',
'consumer': 'back-end',
'specs': {'maxIOPS': 1000,
'maxBWS': 10000}}}
self._system.compat.has_qos.return_value = False
self.driver.create_volume(test_volume)
self._system.qos_policies.create.assert_not_called()
self._mock_qos_policy.assign_entity.assert_not_called()
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_qos_volume_type_id_none(self, qos_specs):
qos_specs.return_value = {'qos_specs': {'id': 'qos_name',
'consumer': 'back-end',
'specs': {'maxIOPS': 1000,
'maxBWS': 10000}}}
test_volume = mock.Mock(id=1, size=1, volume_type_id=None)
self.driver.create_volume(test_volume)
self._system.qos_policies.create.assert_not_called()
self._mock_qos_policy.assign_entity.assert_not_called()
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_qos_no_specs(self, qos_specs):
qos_specs.return_value = {'qos_specs': None}
self.driver.create_volume(test_volume)
self._system.qos_policies.create.assert_not_called()
self._mock_qos_policy.assign_entity.assert_not_called()
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_qos_front_end(self, qos_specs):
qos_specs.return_value = {'qos_specs': {'id': 'qos_name',
'consumer': 'front-end',
'specs': {'maxIOPS': 1000,
'maxBWS': 10000}}}
self.driver.create_volume(test_volume)
self._system.qos_policies.create.assert_not_called()
self._mock_qos_policy.assign_entity.assert_not_called()
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_qos_specs_empty(self, qos_specs):
qos_specs.return_value = {'qos_specs': {'id': 'qos_name',
'consumer': 'back-end',
'specs': {'maxIOPS': None,
'maxBWS': None}}}
self.driver.create_volume(test_volume)
self._system.qos_policies.create.assert_not_called()
self._mock_qos_policy.assign_entity.assert_not_called()
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_qos_policy_exists(self, qos_specs):
qos_specs.return_value = {'qos_specs': {'id': 'qos_name',
'consumer': 'back-end',
'specs': {'maxIOPS': 1000,
'maxBWS': 10000}}}
self._system.qos_policies.safe_get.return_value = self._mock_qos_policy
self.driver.create_volume(test_volume)
self._system.qos_policies.create.assert_not_called()
self._mock_qos_policy.assign_entity.assert_called()
| {
"content_hash": "f42dfdbc43bcf62941f99e42eabba3d0",
"timestamp": "",
"source": "github",
"line_count": 616,
"max_line_length": 79,
"avg_line_length": 48.62987012987013,
"alnum_prop": 0.6260849245560155,
"repo_name": "eharney/cinder",
"id": "76cd068fbd59b5080081912c7f4b3ce9d2c2fcea",
"size": "30585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/test_infinidat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "19839107"
},
{
"name": "Shell",
"bytes": "6453"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from web_app import app
if __name__ == '__main__':
import logging
from logging.handlers import RotatingFileHandler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
app_log = RotatingFileHandler('./play.log', maxBytes=100*1024*1024, backupCount=3)
app_log.setFormatter(formatter)
app_log.setLevel(logging.INFO)
app.logger.addHandler(app_log)
app.logger.setLevel(logging.INFO)
access_log = open('./access.log', "a")
error_log = open('./error.log', "a")
from gevent.pywsgi import WSGIServer
host = '0.0.0.0'
port = 44399
http_server = WSGIServer((host, port),
app,
log=access_log,
error_log=error_log)
print(f"http://{host}:{port}")
http_server.serve_forever()
| {
"content_hash": "3ac390734a1148de4b0f919ddc3ac668",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 89,
"avg_line_length": 32.925925925925924,
"alnum_prop": 0.5973003374578177,
"repo_name": "qiyiping/othello",
"id": "b9d584adc2634f9eb3166982ab4f269f1090e751",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/run_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "441"
},
{
"name": "HTML",
"bytes": "1135"
},
{
"name": "JavaScript",
"bytes": "7598"
},
{
"name": "Python",
"bytes": "32651"
}
],
"symlink_target": ""
} |
from gevent.server import StreamServer
from utils import green, red
from messages import parser, builder
transactions = []
def transfer(message, socket):
_from, to, how_many, date = message[1:]
if _from in transactions or to in transactions:
socket.send(builder('retry'))
transactions.append(_from)
transactions.append(to)
with open("logs/now", 'a') as f:
f.write("-%s:%s:%s:%s\n" % (_from, to, how_many, date))
with open("clients/%s" % _from, 'r+') as f:
money = int(f.read() or '0')
f.seek(0)
f.write(str(money - int(how_many)))
f.truncate()
with open("clients/%s" % to, 'r+') as f:
money = int(f.read() or '0')
f.seek(0)
f.write(str(money + int(how_many)))
f.truncate()
transactions.remove(_from)
transactions.remove(to)
socket.send(builder('success'))
def handle(socket, address):
print green("Client connected")
while True:
data = socket.recv(2048)
if not data:
print red("Client disconnected")
return
message = parser(data)
if message is None:
return
if message[0] == 'transfer':
transfer(message[1], socket)
server = StreamServer(('127.0.0.1', 6888), handle)
server.serve_forever()
| {
"content_hash": "3abecefcd3f1f52d84bd8f29d789cef9",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 59,
"avg_line_length": 20.96551724137931,
"alnum_prop": 0.6291118421052632,
"repo_name": "vtemian/university_projects",
"id": "c6b3ab04befb4178d87761f62d3f4bb5996fcdcd",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "practic_stage/hmw7/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15965"
},
{
"name": "C++",
"bytes": "9417"
},
{
"name": "Python",
"bytes": "69909"
}
],
"symlink_target": ""
} |
import os
import shelve
def save_game(player, entities, game_map, message_log, game_state):
with shelve.open('savegame.dat', 'n') as data_file:
data_file['player_index'] = entities.index(player)
data_file['entities'] = entities
data_file['game_map'] = game_map
data_file['message_log'] = message_log
data_file['game_state'] = game_state
def load_game():
if not os.path.isfile('savegame.dat'):
raise FileNotFoundError
with shelve.open('savegame.dat', 'r') as data_file:
player_index = data_file['player_index']
entities = data_file['entities']
game_map = data_file['game_map']
message_log = data_file['message_log']
game_state = data_file['game_state']
player = entities[player_index]
return player, entities, game_map, message_log, game_state
| {
"content_hash": "d88090c3c0693ece6e635a1e43c1f037",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 32.96153846153846,
"alnum_prop": 0.6336056009334889,
"repo_name": "LordRhys/Game-Development",
"id": "997787847daa86067fe88577996b94aebd2f6fff",
"size": "857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyRL/loader_functions/data_loaders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70833"
}
],
"symlink_target": ""
} |
""" Part of weight_app
:copyright: (c) 2012 by Andreas Madsack.
:license: BSD, see LICENSE for more details.
"""
from flask.ext.testing import TestCase
from flask import Flask
import datetime
from main import db, create_app
import models
import utils
import StringIO
def add_some_data(username):
# add testuser
u1 = models.User(username=username)
u1.set_password(utils.new_pw())
db.session.add(u1)
db.session.flush()
db.session.commit()
xdata = StringIO.StringIO(u"<w>"
"<scales><item id=\"sid1\">"
"<owner>o1</owner>"
"<model>m1</model>"
"</item></scales>"
"<days><day date=\"2012-06-03\">"
"<weight scale=\"sid1\">50.1</weight>"
"</day></days>"
"</w>")
utils.import_weight_from_xml(xdata, username)
class ImportTest(TestCase):
username = u'user1'
def setUp(self):
db.create_all()
add_some_data(username=self.username)
def create_app(self):
return create_app()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_scale_1(self):
""" is the id imported?
"""
sc1 = models.Scale.query.get(u'sid1')
self.assertNotEqual(sc1, None)
def test_scale_2(self):
""" query the imported dataset
"""
sc1 = models.Scale.query.get(u'sid1')
self.assertEqual(sc1.owner, u'o1')
self.assertEqual(sc1.model, u'm1')
def test_weight_1(self):
""" count data in weight table
"""
we1 = models.Weight.query.all()
self.assertNotEqual(we1[0], None)
def test_weight_2(self):
""" query the imported dataset
"""
wx = datetime.datetime.strptime('2012-06-03',
'%Y-%m-%d').date()
we1 = models.Weight.query.filter_by(wdate=wx).all()
self.assertEqual(len(we1), 1)
we1 = we1[0]
sc1 = models.Scale.query.get(u'sid1')
self.assertEqual(we1.wdate, wx)
self.assertEqual(we1.weight, 50.1)
self.assertEqual(we1.scales, sc1)
def test_weight_3(self):
""" add some data
"""
wx = datetime.datetime.strptime('2012-06-04',
'%Y-%m-%d').date()
we1 = models.Weight(wdate=wx)
we1.weight=10
sc1 = models.Scale.query.get(u'sid1')
we1.scale_name = sc1.name
u1 = models.User.query.get(u'user1')
we1.user_username = u1.username
db.session.add(we1)
db.session.flush()
self.assertEqual(we1.wdate, wx)
self.assertEqual(we1.weight, 10)
self.assertEqual(we1.scales, sc1)
self.assertEqual(we1.user_username, u1.username)
def test_import_1(self):
""" empty weight field is ignored on import => no dataset
"""
xdata = StringIO.StringIO(u"<w>"
"<days><day date=\"2012-06-02\">"
"</day></days>"
"</w>")
utils.import_weight_from_xml(xdata, u'user1')
wx = datetime.datetime.strptime('2012-06-02',
'%Y-%m-%d').date()
we1 = models.Weight.query.filter_by(wdate=wx).all()
self.assertEqual(len(we1), 0)
def test_import_2(self):
""" import without scale set. scales should be None.
"""
xdata = StringIO.StringIO(u"<w>"
"<days><day date=\"2012-06-02\">"
"<weight>10</weight>"
"</day></days>"
"</w>")
utils.import_weight_from_xml(xdata, u'user1')
wx = datetime.datetime.strptime('2012-06-02',
'%Y-%m-%d').date()
we1 = models.Weight.query.filter_by(wdate=wx).all()
self.assertEqual(len(we1), 1)
self.assertEqual(we1[0].scales, None)
| {
"content_hash": "1d3a387f4f9aa5ea905c7d2d4a26238f",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 68,
"avg_line_length": 32.13076923076923,
"alnum_prop": 0.4986832655015561,
"repo_name": "mfa/weight-app",
"id": "92d8a59ef33d9593cb2865f05c821125f73928e5",
"size": "4178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weight/tests/test_importer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "17520"
},
{
"name": "Python",
"bytes": "35296"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import os
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.apigeeconnect_v1.services.connection_service import pagers
from google.cloud.apigeeconnect_v1.types import connection
from .transports.base import DEFAULT_CLIENT_INFO, ConnectionServiceTransport
from .transports.grpc import ConnectionServiceGrpcTransport
from .transports.grpc_asyncio import ConnectionServiceGrpcAsyncIOTransport
class ConnectionServiceClientMeta(type):
"""Metaclass for the ConnectionService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConnectionServiceTransport]]
_transport_registry["grpc"] = ConnectionServiceGrpcTransport
_transport_registry["grpc_asyncio"] = ConnectionServiceGrpcAsyncIOTransport
def get_transport_class(
cls,
label: Optional[str] = None,
) -> Type[ConnectionServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ConnectionServiceClient(metaclass=ConnectionServiceClientMeta):
"""Service Interface for the Apigee Connect connection
management APIs.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "apigeeconnect.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConnectionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConnectionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConnectionServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ConnectionServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def endpoint_path(
project: str,
endpoint: str,
) -> str:
"""Returns a fully-qualified endpoint string."""
return "projects/{project}/endpoints/{endpoint}".format(
project=project,
endpoint=endpoint,
)
@staticmethod
def parse_endpoint_path(path: str) -> Dict[str, str]:
"""Parses a endpoint path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/endpoints/(?P<endpoint>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Optional[Union[str, ConnectionServiceTransport]] = None,
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the connection service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ConnectionServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ConnectionServiceTransport):
# transport is a ConnectionServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def list_connections(
self,
request: Optional[Union[connection.ListConnectionsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListConnectionsPager:
r"""Lists connections that are currently active for the
given Apigee Connect endpoint.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import apigeeconnect_v1
def sample_list_connections():
# Create a client
client = apigeeconnect_v1.ConnectionServiceClient()
# Initialize request argument(s)
request = apigeeconnect_v1.ListConnectionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_connections(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.apigeeconnect_v1.types.ListConnectionsRequest, dict]):
The request object. The request for
[ListConnections][Management.ListConnections].
parent (str):
Required. Parent name of the form:
``projects/{project_number or project_id}/endpoints/{endpoint}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.apigeeconnect_v1.services.connection_service.pagers.ListConnectionsPager:
The response for
[ListConnections][Management.ListConnections].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a connection.ListConnectionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, connection.ListConnectionsRequest):
request = connection.ListConnectionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_connections]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListConnectionsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-apigee-connect",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ConnectionServiceClient",)
| {
"content_hash": "9b234ecf08ed8fb471280caed3e93e9e",
"timestamp": "",
"source": "github",
"line_count": 565,
"max_line_length": 120,
"avg_line_length": 39.87433628318584,
"alnum_prop": 0.6207110834923876,
"repo_name": "googleapis/python-apigee-connect",
"id": "217c0405870fa508ba09ccdba710d91cfd80afa7",
"size": "23129",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/apigeeconnect_v1/services/connection_service/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "306158"
},
{
"name": "Shell",
"bytes": "30684"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_nic
short_description: Manages NICs and secondary IPs of an instance on Apache CloudStack based clouds.
description:
- Add and remove secondary IPs to and from a NIC.
version_added: "2.3"
author: "René Moser (@resmo)"
deprecated: Deprecated in 2.4. Use M(cs_instance_nic_secondaryip) instead.
options:
vm:
description:
- Name of instance.
required: true
aliases: ['name']
network:
description:
- Name of the network.
- Required to find the NIC if instance has multiple networks assigned.
required: false
default: null
vm_guest_ip:
description:
- Secondary IP address to be added to the instance nic.
- If not set, the API always returns a new IP address and idempotency is not given.
required: false
default: null
aliases: ['secondary_ip']
vpc:
description:
- Name of the VPC the C(vm) is related to.
required: false
default: null
domain:
description:
- Domain the instance is related to.
required: false
default: null
account:
description:
- Account the instance is related to.
required: false
default: null
project:
description:
- Name of the project the instance is deployed in.
required: false
default: null
zone:
description:
- Name of the zone in which the instance is deployed in.
- If not set, default zone is used.
required: false
default: null
state:
description:
- State of the ipaddress.
required: false
default: "present"
choices: [ 'present', 'absent' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Assign a specific IP to the default NIC of the VM
- local_action:
module: cs_nic
vm: customer_xy
vm_guest_ip: 10.10.10.10
# Assign an IP to the default NIC of the VM
# Note: If vm_guest_ip is not set, you will get a new IP address on every run.
- local_action:
module: cs_nic
vm: customer_xy
# Remove a specific IP from the default NIC
- local_action:
module: cs_nic
vm: customer_xy
vm_guest_ip: 10.10.10.10
state: absent
'''
RETURN = '''
---
id:
description: UUID of the nic.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
vm:
description: Name of the VM.
returned: success
type: string
sample: web-01
ip_address:
description: Primary IP of the NIC.
returned: success
type: string
sample: 10.10.10.10
netmask:
description: Netmask of the NIC.
returned: success
type: string
sample: 255.255.255.0
mac_address:
description: MAC address of the NIC.
returned: success
type: string
sample: 02:00:33:31:00:e4
vm_guest_ip:
description: Secondary IP of the NIC.
returned: success
type: string
sample: 10.10.10.10
network:
description: Name of the network if not default.
returned: success
type: string
sample: sync network
domain:
description: Domain the VM is related to.
returned: success
type: string
sample: example domain
account:
description: Account the VM is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the VM is related to.
returned: success
type: string
sample: Production
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackNic(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackNic, self).__init__(module)
self.vm_guest_ip = self.module.params.get('vm_guest_ip')
self.nic = None
self.returns = {
'ipaddress': 'ip_address',
'macaddress': 'mac_address',
'netmask': 'netmask',
}
def get_nic(self):
if self.nic:
return self.nic
args = {
'virtualmachineid': self.get_vm(key='id'),
'networkid': self.get_network(key='id'),
}
nics = self.cs.listNics(**args)
if nics:
self.nic = nics['nic'][0]
return self.nic
self.module.fail_json(msg="NIC for VM %s in network %s not found" %(self.get_vm(key='name'), self.get_network(key='name')))
def get_secondary_ip(self):
nic = self.get_nic()
if self.vm_guest_ip:
secondary_ips = nic.get('secondaryip') or []
for secondary_ip in secondary_ips:
if secondary_ip['ipaddress'] == self.vm_guest_ip:
return secondary_ip
return None
def present_nic(self):
nic = self.get_nic()
if not self.get_secondary_ip():
self.result['changed'] = True
args = {
'nicid': nic['id'],
'ipaddress': self.vm_guest_ip,
}
if not self.module.check_mode:
res = self.cs.addIpToNic(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
nic = self.poll_job(res, 'nicsecondaryip')
# Save result for RETURNS
self.vm_guest_ip = nic['ipaddress']
return nic
def absent_nic(self):
nic = self.get_nic()
secondary_ip = self.get_secondary_ip()
if secondary_ip:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.removeIpFromNic(id=secondary_ip['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % nic['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'nicsecondaryip')
return nic
def get_result(self, nic):
super(AnsibleCloudStackNic, self).get_result(nic)
if nic and not self.module.params.get('network'):
self.module.params['network'] = nic.get('networkid')
self.result['network'] = self.get_network(key='name')
self.result['vm'] = self.get_vm(key='name')
self.result['vm_guest_ip'] = self.vm_guest_ip
self.result['domain'] = self.get_domain(key='path')
self.result['account'] = self.get_account(key='name')
self.result['project'] = self.get_project(key='name')
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
vm=dict(required=True, aliases=['name']),
vm_guest_ip=dict(default=None, aliases=['secondary_ip']),
network=dict(default=None),
vpc=dict(default=None),
state=dict(choices=['present', 'absent'], default='present'),
domain=dict(default=None),
account=dict(default=None),
project=dict(default=None),
zone=dict(default=None),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True,
required_if=([
('state', 'absent', ['vm_guest_ip'])
])
)
try:
acs_nic = AnsibleCloudStackNic(module)
state = module.params.get('state')
if state == 'absent':
nic = acs_nic.absent_nic()
else:
nic = acs_nic.present_nic()
result = acs_nic.get_result(nic)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| {
"content_hash": "a9780dc8cb81613f401d4294f3e6f4fa",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 131,
"avg_line_length": 28.42293906810036,
"alnum_prop": 0.6021437578814628,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "d5ce700df2c1c99185a911ac029050a170826e06",
"size": "8673",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/cloudstack/_cs_nic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
import os
import pandas as pd
from pathlib import Path
def pwd():
return Path.cwd()
def ls(path='.', ls=False):
""" returns files in current directory"""
if ls:
return os.listdir()
else:
return pd.Series(os.listdir(Path.cwd() / path))
def cd(path, full_path=False):
"""full path"""
if full_path:
os.chdir(path)
print(path)
else:
os.chdir(Path.cwd() / path)
print(Path.cwd() / path)
def tnsnames(open_file=True):
path_tnsnames = Path(os.getenv('ORACLE_HOME')) / 'network' / 'admin'
file_tnsnames = path_tnsnames / 'tnsnames.ora'
os.startfile(file_tnsnames) if open_file else os.chdir(path_tnsnames) | {
"content_hash": "025b5f85dbf73b749a1b273eb6dc8801",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 73,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.6159420289855072,
"repo_name": "Duke-LeTran/practice-and-notes",
"id": "0f29321b063879258b222df0854ef8788c61339b",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLSQL",
"bytes": "2708"
},
{
"name": "Python",
"bytes": "38002"
}
],
"symlink_target": ""
} |
import datetime
from blist import sortedlist
from elasticsearch.client import Elasticsearch
from util import dt_to_ts
from util import EAException
from util import elastalert_logger
from util import format_index
from util import hashable
from util import lookup_es_key
from util import pretty_ts
from util import ts_now
from util import ts_to_dt
class RuleType(object):
""" The base class for a rule type.
The class must implement add_data and add any matches to self.matches.
:param rules: A rule configuration.
"""
required_options = frozenset()
def __init__(self, rules, args=None):
self.matches = []
self.rules = rules
self.occurrences = {}
def add_data(self, data):
""" The function that the elastalert client calls with results from ES.
Data is a list of dictionaries, from elasticsearch.
:param data: A list of events, each of which is a dictionary of terms.
"""
raise NotImplementedError()
def add_match(self, event):
""" This function is called on all matching events. Rules use it to add
extra information about the context of a match. Event is a dictionary
containing terms directly from elasticsearch and alerts will report
all of the information.
:param event: The matching event, a dictionary of terms.
"""
# Convert datetime's back to timestamps
ts = self.rules.get('timestamp_field')
if ts in event:
event[ts] = dt_to_ts(event[ts])
self.matches.append(event)
def get_match_str(self, match):
""" Returns a string that gives more context about a match.
:param match: The matching event, a dictionary of terms.
:return: A user facing string describing the match.
"""
return ''
def garbage_collect(self, timestamp):
""" Gets called periodically to remove old data that is useless beyond given timestamp.
May also be used to compute things in the absence of new data.
:param timestamp: A timestamp indicating the rule has been run up to that point.
"""
pass
def add_count_data(self, counts):
""" Gets called when a rule has use_count_query set to True. Called to add data from querying to the rule.
:param counts: A dictionary mapping timestamps to hit counts.
"""
raise NotImplementedError()
def add_terms_data(self, terms):
""" Gets called when a rule has use_terms_query set to True.
:param terms: A list of buckets with a key, corresponding to query_key, and the count """
raise NotImplementedError()
class CompareRule(RuleType):
""" A base class for matching a specific term by passing it to a compare function """
required_options = frozenset(['compare_key'])
def compare(self, event):
""" An event is a match iff this returns true """
raise NotImplementedError()
def add_data(self, data):
# If compare returns true, add it as a match
for event in data:
if self.compare(event):
self.add_match(event)
class BlacklistRule(CompareRule):
""" A CompareRule where the compare function checks a given key against a blacklist """
required_options = frozenset(['compare_key', 'blacklist'])
def compare(self, event):
term = lookup_es_key(event, self.rules['compare_key'])
if term in self.rules['blacklist']:
return True
return False
class WhitelistRule(CompareRule):
""" A CompareRule where the compare function checks a given term against a whitelist """
required_options = frozenset(['compare_key', 'whitelist', 'ignore_null'])
def compare(self, event):
term = lookup_es_key(event, self.rules['compare_key'])
if term is None:
return not self.rules['ignore_null']
if term not in self.rules['whitelist']:
return True
return False
class ChangeRule(CompareRule):
""" A rule that will store values for a certain term and match if those values change """
required_options = frozenset(['query_key', 'compare_key', 'ignore_null'])
change_map = {}
occurrence_time = {}
def compare(self, event):
key = hashable(lookup_es_key(event, self.rules['query_key']))
val = lookup_es_key(event, self.rules['compare_key'])
if not val and self.rules['ignore_null']:
return False
changed = False
# If we have seen this key before, compare it to the new value
if key in self.occurrences:
changed = self.occurrences[key] != val
if changed:
self.change_map[key] = (self.occurrences[key], val)
# If using timeframe, only return true if the time delta is < timeframe
if key in self.occurrence_time:
changed = event[self.rules['timestamp_field']] - self.occurrence_time[key] <= self.rules['timeframe']
# Update the current value and time
self.occurrences[key] = val
if 'timeframe' in self.rules:
self.occurrence_time[key] = event[self.rules['timestamp_field']]
return changed
def add_match(self, match):
# TODO this is not technically correct
# if the term changes multiple times before an alert is sent
# this data will be overwritten with the most recent change
change = self.change_map.get(hashable(lookup_es_key(match, self.rules['query_key'])))
extra = {}
if change:
extra = {'old_value': change[0],
'new_value': change[1]}
super(ChangeRule, self).add_match(dict(match.items() + extra.items()))
class FrequencyRule(RuleType):
""" A rule that matches if num_events number of events occur within a timeframe """
required_options = frozenset(['num_events', 'timeframe'])
def __init__(self, *args):
super(FrequencyRule, self).__init__(*args)
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.get_ts = lambda event: event[0][self.ts_field]
self.attach_related = self.rules.get('attach_related', False)
def add_count_data(self, data):
""" Add count data to the rule. Data should be of the form {ts: count}. """
if len(data) > 1:
raise EAException('add_count_data can only accept one count at a time')
(ts, count), = data.items()
event = ({self.ts_field: ts}, count)
self.occurrences.setdefault('all', EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event)
self.check_for_match('all')
def add_terms_data(self, terms):
for timestamp, buckets in terms.iteritems():
for bucket in buckets:
event = ({self.ts_field: timestamp,
self.rules['query_key']: bucket['key']}, bucket['doc_count'])
self.occurrences.setdefault(bucket['key'], EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event)
self.check_for_match(bucket['key'])
def add_data(self, data):
if 'query_key' in self.rules:
qk = self.rules['query_key']
else:
qk = None
for event in data:
if qk:
key = hashable(lookup_es_key(event, qk))
else:
# If no query_key, we use the key 'all' for all events
key = 'all'
# Store the timestamps of recent occurrences, per key
self.occurrences.setdefault(key, EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append((event, 1))
self.check_for_match(key)
def check_for_match(self, key):
# Match if, after removing old events, we hit num_events
if self.occurrences[key].count() >= self.rules['num_events']:
event = self.occurrences[key].data[-1][0]
if self.attach_related:
event['related_events'] = [data[0] for data in self.occurrences[key].data[:-1]]
self.add_match(event)
self.occurrences.pop(key)
def garbage_collect(self, timestamp):
""" Remove all occurrence data that is beyond the timeframe away """
stale_keys = []
for key, window in self.occurrences.iteritems():
if timestamp - window.data[-1][0][self.ts_field] > self.rules['timeframe']:
stale_keys.append(key)
map(self.occurrences.pop, stale_keys)
def get_match_str(self, match):
lt = self.rules.get('use_local_time')
starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']), lt)
endtime = pretty_ts(match[self.ts_field], lt)
message = 'At least %d events occurred between %s and %s\n\n' % (self.rules['num_events'],
starttime,
endtime)
return message
class AnyRule(RuleType):
""" A rule that will match on any input data """
def add_data(self, data):
for datum in data:
self.add_match(datum)
class EventWindow(object):
""" A container for hold event counts for rules which need a chronological ordered event window. """
def __init__(self, timeframe, onRemoved=None, getTimestamp=lambda e: e[0]['@timestamp']):
self.timeframe = timeframe
self.onRemoved = onRemoved
self.get_ts = getTimestamp
self.data = sortedlist(key=self.get_ts)
self.running_count = 0
def clear(self):
self.data = sortedlist(key=self.get_ts)
self.running_count = 0
def append(self, event):
""" Add an event to the window. Event should be of the form (dict, count).
This will also pop the oldest events and call onRemoved on them until the
window size is less than timeframe. """
self.data.add(event)
self.running_count += event[1]
while self.duration() >= self.timeframe:
oldest = self.data[0]
self.data.remove(oldest)
self.running_count -= oldest[1]
self.onRemoved and self.onRemoved(oldest)
def duration(self):
""" Get the size in timedelta of the window. """
if not self.data:
return datetime.timedelta(0)
return self.get_ts(self.data[-1]) - self.get_ts(self.data[0])
def count(self):
""" Count the number of events in the window. """
return self.running_count
def __iter__(self):
return iter(self.data)
def append_middle(self, event):
""" Attempt to place the event in the correct location in our deque.
Returns True if successful, otherwise False. """
rotation = 0
ts = self.get_ts(event)
# Append left if ts is earlier than first event
if self.get_ts(self.data[0]) > ts:
self.data.appendleft(event)
self.running_count += event[1]
return
# Rotate window until we can insert event
while self.get_ts(self.data[-1]) > ts:
self.data.rotate(1)
rotation += 1
if rotation == len(self.data):
# This should never happen
return
self.data.append(event)
self.running_count += event[1]
self.data.rotate(-rotation)
class SpikeRule(RuleType):
""" A rule that uses two sliding windows to compare relative event frequency. """
required_options = frozenset(['timeframe', 'spike_height', 'spike_type'])
def __init__(self, *args):
super(SpikeRule, self).__init__(*args)
self.timeframe = self.rules['timeframe']
self.ref_windows = {}
self.cur_windows = {}
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.get_ts = lambda e: e[0][self.ts_field]
self.first_event = {}
self.skip_checks = {}
self.ref_window_filled_once = False
def add_count_data(self, data):
""" Add count data to the rule. Data should be of the form {ts: count}. """
if len(data) > 1:
raise EAException('add_count_data can only accept one count at a time')
for ts, count in data.iteritems():
self.handle_event({self.ts_field: ts}, count, 'all')
def add_terms_data(self, terms):
for timestamp, buckets in terms.iteritems():
for bucket in buckets:
count = bucket['doc_count']
event = {self.ts_field: timestamp,
self.rules['query_key']: bucket['key']}
key = bucket['key']
self.handle_event(event, count, key)
def add_data(self, data):
for event in data:
qk = self.rules.get('query_key', 'all')
if qk != 'all':
qk = hashable(lookup_es_key(event, qk))
if qk is None:
qk = 'other'
self.handle_event(event, 1, qk)
def clear_windows(self, qk, event):
# Reset the state and prevent alerts until windows filled again
self.cur_windows[qk].clear()
self.ref_windows[qk].clear()
self.first_event.pop(qk)
self.skip_checks[qk] = event[self.ts_field] + self.rules['timeframe'] * 2
def handle_event(self, event, count, qk='all'):
self.first_event.setdefault(qk, event)
self.ref_windows.setdefault(qk, EventWindow(self.timeframe, getTimestamp=self.get_ts))
self.cur_windows.setdefault(qk, EventWindow(self.timeframe, self.ref_windows[qk].append, self.get_ts))
self.cur_windows[qk].append((event, count))
# Don't alert if ref window has not yet been filled for this key AND
if event[self.ts_field] - self.first_event[qk][self.ts_field] < self.rules['timeframe'] * 2:
# ElastAlert has not been running long enough for any alerts OR
if not self.ref_window_filled_once:
return
# This rule is not using alert_on_new_data (with query_key) OR
if not (self.rules.get('query_key') and self.rules.get('alert_on_new_data')):
return
# An alert for this qk has recently fired
if qk in self.skip_checks and event[self.ts_field] < self.skip_checks[qk]:
return
else:
self.ref_window_filled_once = True
if self.find_matches(self.ref_windows[qk].count(), self.cur_windows[qk].count()):
# skip over placeholder events which have count=0
for match, count in self.cur_windows[qk].data:
if count:
break
self.add_match(match, qk)
self.clear_windows(qk, match)
def add_match(self, match, qk):
extra_info = {}
spike_count = self.cur_windows[qk].count()
reference_count = self.ref_windows[qk].count()
extra_info = {'spike_count': spike_count,
'reference_count': reference_count}
match = dict(match.items() + extra_info.items())
super(SpikeRule, self).add_match(match)
def find_matches(self, ref, cur):
""" Determines if an event spike or dip happening. """
# Apply threshold limits
if (cur < self.rules.get('threshold_cur', 0) or
ref < self.rules.get('threshold_ref', 0)):
return False
spike_up, spike_down = False, False
if cur <= ref / self.rules['spike_height']:
spike_down = True
if cur >= ref * self.rules['spike_height']:
spike_up = True
if (self.rules['spike_type'] in ['both', 'up'] and spike_up) or \
(self.rules['spike_type'] in ['both', 'down'] and spike_down):
return True
return False
def get_match_str(self, match):
message = 'An abnormal number (%d) of events occurred around %s.\n' % (match['spike_count'],
pretty_ts(match[self.rules['timestamp_field']], self.rules.get('use_local_time')))
message += 'Preceding that time, there were only %d events within %s\n\n' % (match['reference_count'], self.rules['timeframe'])
return message
def garbage_collect(self, ts):
# Windows are sized according to their newest event
# This is a placeholder to accurately size windows in the absence of events
for qk in self.cur_windows.keys():
# If we havn't seen this key in a long time, forget it
if qk != 'all' and self.ref_windows[qk].count() == 0 and self.cur_windows[qk].count() == 0:
self.cur_windows.pop(qk)
self.ref_windows.pop(qk)
continue
placeholder = {self.ts_field: ts}
# The placeholder may trigger an alert, in which case, qk will be expected
if qk != 'all':
placeholder.update({self.rules['query_key']: qk})
self.handle_event(placeholder, 0, qk)
class FlatlineRule(FrequencyRule):
""" A rule that matches when there is a low number of events given a timeframe. """
required_options = frozenset(['timeframe', 'threshold'])
def __init__(self, *args):
super(FlatlineRule, self).__init__(*args)
self.threshold = self.rules['threshold']
# Dictionary mapping query keys to the first events
self.first_event = {}
def check_for_match(self, key):
most_recent_ts = self.get_ts(self.occurrences[key].data[-1])
if self.first_event.get(key) is None:
self.first_event[key] = most_recent_ts
# Don't check for matches until timeframe has elapsed
if most_recent_ts - self.first_event[key] < self.rules['timeframe']:
return
# Match if, after removing old events, we hit num_events
count = self.occurrences[key].count()
if count < self.rules['threshold']:
event = self.occurrences[key].data[-1][0]
event.update(key=key, count=count)
self.add_match(event)
# we after adding this match, let's remove this key so we don't realert on it
self.occurrences.pop(key)
del self.first_event[key]
def get_match_str(self, match):
ts = match[self.rules['timestamp_field']]
lt = self.rules.get('use_local_time')
message = 'An abnormally low number of events occurred around %s.\n' % (pretty_ts(ts, lt))
message += 'Between %s and %s, there were less than %s events.\n\n' % (pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']), lt),
pretty_ts(ts, lt),
self.rules['threshold'])
return message
def garbage_collect(self, ts):
# We add an event with a count of zero to the EventWindow for each key. This will cause the EventWindow
# to remove events that occurred more than one `timeframe` ago, and call onRemoved on them.
for key in self.occurrences.keys():
self.occurrences.setdefault(key, EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(({self.ts_field: ts}, 0))
self.check_for_match(key)
class NewTermsRule(RuleType):
""" Alerts on a new value in a list of fields. """
def __init__(self, rule, args=None):
super(NewTermsRule, self).__init__(rule, args)
self.seen_values = {}
# Allow the use of query_key or fields
if 'fields' not in self.rules:
if 'query_key' not in self.rules:
raise EAException("fields or query_key must be specified")
self.fields = self.rules['query_key']
else:
self.fields = self.rules['fields']
if not self.fields:
raise EAException("fields must not be an empty list")
if type(self.fields) != list:
self.fields = [self.fields]
if self.rules.get('use_terms_query') and len(self.fields) != 1:
raise EAException("use_terms_query can only be used with one field at a time")
try:
self.get_all_terms(args)
except Exception as e:
# Refuse to start if we cannot get existing terms
raise EAException('Error searching for existing terms: %s' % (e))
def get_all_terms(self, args):
""" Performs a terms aggregation for each field to get every existing term. """
self.es = Elasticsearch(host=self.rules['es_host'], port=self.rules['es_port'], timeout=self.rules.get('es_conn_timeout', 50))
window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30}))
field_name = {"field": "", "size": 2147483647} # Integer.MAX_VALUE
query_template = {"aggs": {"values": {"terms": field_name}}}
if args and args.start:
end = ts_to_dt(args.start)
else:
end = ts_now()
start = end - window_size
if self.rules.get('use_strftime_index'):
index = format_index(self.rules['index'], start, end)
else:
index = self.rules['index']
time_filter = {self.rules['timestamp_field']: {'lte': dt_to_ts(end), 'gte': dt_to_ts(start)}}
query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
query = {'aggs': {'filtered': query_template}}
for field in self.fields:
field_name['field'] = field
res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s')
if 'aggregations' in res:
buckets = res['aggregations']['filtered']['values']['buckets']
keys = [bucket['key'] for bucket in buckets]
self.seen_values[field] = keys
elastalert_logger.info('Found %s unique values for %s' % (len(keys), field))
else:
self.seen_values[field] = []
elastalert_logger.info('Found no values for %s' % (field))
def add_data(self, data):
for document in data:
for field in self.fields:
value = lookup_es_key(document, field)
if not value and self.rules.get('alert_on_missing_field'):
document['missing_field'] = field
self.add_match(document)
elif value:
if value not in self.seen_values[field]:
document['new_field'] = field
self.add_match(document)
self.seen_values[field].append(value)
def add_terms_data(self, terms):
# With terms query, len(self.fields) is always 1
field = self.fields[0]
for timestamp, buckets in terms.iteritems():
for bucket in buckets:
if bucket['doc_count']:
if bucket['key'] not in self.seen_values[field]:
match = {field: bucket['key'],
self.rules['timestamp_field']: timestamp,
'new_field': field}
self.add_match(match)
self.seen_values[field].append(bucket['key'])
class CardinalityRule(RuleType):
""" A rule that matches if cardinality of a field is above or below a threshold within a timeframe """
required_options = frozenset(['timeframe', 'cardinality_field'])
def __init__(self, *args):
super(CardinalityRule, self).__init__(*args)
if 'max_cardinality' not in self.rules and 'min_cardinality' not in self.rules:
raise EAException("CardinalityRule must have one of either max_cardinality or min_cardinality")
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.cardinality_field = self.rules['cardinality_field']
self.cardinality_cache = {}
self.first_event = {}
self.timeframe = self.rules['timeframe']
def add_data(self, data):
qk = self.rules.get('query_key')
for event in data:
if qk:
key = hashable(lookup_es_key(event, qk))
else:
# If no query_key, we use the key 'all' for all events
key = 'all'
self.cardinality_cache.setdefault(key, {})
self.first_event.setdefault(key, event[self.ts_field])
if self.cardinality_field in event:
# Store this timestamp as most recent occurence of the term
self.cardinality_cache[key][event[self.cardinality_field]] = event[self.ts_field]
self.check_for_match(key, event)
def check_for_match(self, key, event, gc=True):
# Check to see if we are past max/min_cardinality for a given key
timeframe_elapsed = event[self.ts_field] - self.first_event.get(key, event[self.ts_field]) > self.timeframe
if (len(self.cardinality_cache[key]) > self.rules.get('max_cardinality', float('inf')) or
(len(self.cardinality_cache[key]) < self.rules.get('min_cardinality', float('-inf')) and timeframe_elapsed)):
# If there might be a match, run garbage collect first, as outdated terms are only removed in GC
# Only run it if there might be a match so it doesn't impact performance
if gc:
self.garbage_collect(event[self.ts_field])
self.check_for_match(key, event, False)
else:
self.first_event.pop(key, None)
self.add_match(event)
def garbage_collect(self, timestamp):
""" Remove all occurrence data that is beyond the timeframe away """
for qk, terms in self.cardinality_cache.items():
for term, last_occurence in terms.items():
if timestamp - last_occurence > self.rules['timeframe']:
self.cardinality_cache[qk].pop(term)
# Create a placeholder event for if a min_cardinality match occured
if 'min_cardinality' in self.rules:
event = {self.ts_field: timestamp}
if 'query_key' in self.rules:
event.update({self.rules['query_key']: qk})
self.check_for_match(qk, event, False)
def get_match_str(self, match):
lt = self.rules.get('use_local_time')
starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']), lt)
endtime = pretty_ts(match[self.ts_field], lt)
if 'max_cardinality' in self.rules:
message = ('A maximum of %d unique %s(s) occurred since last alert or between %s and %s\n\n' % (self.rules['max_cardinality'],
self.rules['cardinality_field'],
starttime, endtime))
else:
message = ('Less than %d unique %s(s) occurred since last alert or between %s and %s\n\n' % (self.rules['min_cardinality'],
self.rules['cardinality_field'],
starttime, endtime))
return message
| {
"content_hash": "0952c12df5c4fc1f16a4381f7d1ee53c",
"timestamp": "",
"source": "github",
"line_count": 640,
"max_line_length": 161,
"avg_line_length": 43.109375,
"alnum_prop": 0.5789054005074302,
"repo_name": "iamrudra/elastalert",
"id": "c09c38aed50667b7743e7164e39933ddeb192fc2",
"size": "27614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elastalert/ruletypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "365"
},
{
"name": "Python",
"bytes": "239880"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# 3rd Party
from model_utils.managers import PassThroughManager
class SendableManager(PassThroughManager):
pass
| {
"content_hash": "aafcbe93ea0b14f2cb6bf67da11228ab",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 19.875,
"alnum_prop": 0.7987421383647799,
"repo_name": "craiglabenz/django-grapevine",
"id": "9f137934f987a9dbb92e4e840adbfa3957d97fe1",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grapevine/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "92191"
},
{
"name": "Python",
"bytes": "167458"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
} |
class InlineClass(object):
def __init__(self, dict):
self.__dict__ = dict
| {
"content_hash": "0e962d7b37751aa3764c02af0568ea05",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 29,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.5697674418604651,
"repo_name": "ingrammicro/fallball-connector",
"id": "04de875708dde6ef1f13b0c0c3805ac980c105b2",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "4386"
},
{
"name": "Dockerfile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "106160"
},
{
"name": "Shell",
"bytes": "475"
}
],
"symlink_target": ""
} |
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v12.services.types import google_ads_service
from .base import GoogleAdsServiceTransport, DEFAULT_CLIENT_INFO
class GoogleAdsServiceGrpcTransport(GoogleAdsServiceTransport):
"""gRPC backend transport for GoogleAdsService.
Service to fetch data and metrics across resources.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def search(
self,
) -> Callable[
[google_ads_service.SearchGoogleAdsRequest],
google_ads_service.SearchGoogleAdsResponse,
]:
r"""Return a callable for the search method over gRPC.
Returns all rows that match the search query.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `ChangeEventError <>`__
`ChangeStatusError <>`__ `ClickViewError <>`__
`HeaderError <>`__ `InternalError <>`__ `QueryError <>`__
`QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.SearchGoogleAdsRequest],
~.SearchGoogleAdsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search" not in self._stubs:
self._stubs["search"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v12.services.GoogleAdsService/Search",
request_serializer=google_ads_service.SearchGoogleAdsRequest.serialize,
response_deserializer=google_ads_service.SearchGoogleAdsResponse.deserialize,
)
return self._stubs["search"]
@property
def search_stream(
self,
) -> Callable[
[google_ads_service.SearchGoogleAdsStreamRequest],
google_ads_service.SearchGoogleAdsStreamResponse,
]:
r"""Return a callable for the search stream method over gRPC.
Returns all rows that match the search stream query.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `ChangeEventError <>`__
`ChangeStatusError <>`__ `ClickViewError <>`__
`HeaderError <>`__ `InternalError <>`__ `QueryError <>`__
`QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.SearchGoogleAdsStreamRequest],
~.SearchGoogleAdsStreamResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_stream" not in self._stubs:
self._stubs["search_stream"] = self.grpc_channel.unary_stream(
"/google.ads.googleads.v12.services.GoogleAdsService/SearchStream",
request_serializer=google_ads_service.SearchGoogleAdsStreamRequest.serialize,
response_deserializer=google_ads_service.SearchGoogleAdsStreamResponse.deserialize,
)
return self._stubs["search_stream"]
@property
def mutate(
self,
) -> Callable[
[google_ads_service.MutateGoogleAdsRequest],
google_ads_service.MutateGoogleAdsResponse,
]:
r"""Return a callable for the mutate method over gRPC.
Creates, updates, or removes resources. This method supports
atomic transactions with multiple types of resources. For
example, you can atomically create a campaign and a campaign
budget, or perform up to thousands of mutates atomically.
This method is essentially a wrapper around a series of mutate
methods. The only features it offers over calling those methods
directly are:
- Atomic transactions
- Temp resource names (described below)
- Somewhat reduced latency over making a series of mutate calls
Note: Only resources that support atomic transactions are
included, so this method can't replace all calls to individual
services.
Atomic Transaction Benefits
---------------------------
Atomicity makes error handling much easier. If you're making a
series of changes and one fails, it can leave your account in an
inconsistent state. With atomicity, you either reach the chosen
state directly, or the request fails and you can retry.
Temp Resource Names
-------------------
Temp resource names are a special type of resource name used to
create a resource and reference that resource in the same
request. For example, if a campaign budget is created with
``resource_name`` equal to ``customers/123/campaignBudgets/-1``,
that resource name can be reused in the ``Campaign.budget``
field in the same request. That way, the two resources are
created and linked atomically.
To create a temp resource name, put a negative number in the
part of the name that the server would normally allocate.
Note:
- Resources must be created with a temp name before the name
can be reused. For example, the previous
CampaignBudget+Campaign example would fail if the mutate
order was reversed.
- Temp names are not remembered across requests.
- There's no limit to the number of temp names in a request.
- Each temp name must use a unique negative number, even if the
resource types differ.
Latency
-------
It's important to group mutates by resource type or the request
may time out and fail. Latency is roughly equal to a series of
calls to individual mutate methods, where each change in
resource type is a new call. For example, mutating 10 campaigns
then 10 ad groups is like 2 calls, while mutating 1 campaign, 1
ad group, 1 campaign, 1 ad group is like 4 calls.
List of thrown errors: `AdCustomizerError <>`__ `AdError <>`__
`AdGroupAdError <>`__ `AdGroupCriterionError <>`__
`AdGroupError <>`__ `AssetError <>`__ `AuthenticationError <>`__
`AuthorizationError <>`__ `BiddingError <>`__
`CampaignBudgetError <>`__ `CampaignCriterionError <>`__
`CampaignError <>`__ `CampaignExperimentError <>`__
`CampaignSharedSetError <>`__ `CollectionSizeError <>`__
`ContextError <>`__ `ConversionActionError <>`__
`CriterionError <>`__ `CustomerFeedError <>`__
`DatabaseError <>`__ `DateError <>`__ `DateRangeError <>`__
`DistinctError <>`__ `ExtensionFeedItemError <>`__
`ExtensionSettingError <>`__ `FeedAttributeReferenceError <>`__
`FeedError <>`__ `FeedItemError <>`__ `FeedItemSetError <>`__
`FieldError <>`__ `FieldMaskError <>`__
`FunctionParsingError <>`__ `HeaderError <>`__ `ImageError <>`__
`InternalError <>`__ `KeywordPlanAdGroupKeywordError <>`__
`KeywordPlanCampaignError <>`__ `KeywordPlanError <>`__
`LabelError <>`__ `ListOperationError <>`__
`MediaUploadError <>`__ `MutateError <>`__
`NewResourceCreationError <>`__ `NullError <>`__
`OperationAccessDeniedError <>`__ `PolicyFindingError <>`__
`PolicyViolationError <>`__ `QuotaError <>`__ `RangeError <>`__
`RequestError <>`__ `ResourceCountLimitExceededError <>`__
`SettingError <>`__ `SharedSetError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
`UrlFieldError <>`__ `UserListError <>`__
`YoutubeVideoRegistrationError <>`__
Returns:
Callable[[~.MutateGoogleAdsRequest],
~.MutateGoogleAdsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate" not in self._stubs:
self._stubs["mutate"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v12.services.GoogleAdsService/Mutate",
request_serializer=google_ads_service.MutateGoogleAdsRequest.serialize,
response_deserializer=google_ads_service.MutateGoogleAdsResponse.deserialize,
)
return self._stubs["mutate"]
def close(self):
self.grpc_channel.close()
__all__ = ("GoogleAdsServiceGrpcTransport",)
| {
"content_hash": "26370de0f82213b196c39eaf83106f23",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 99,
"avg_line_length": 44.775061124694375,
"alnum_prop": 0.6107137006498116,
"repo_name": "googleads/google-ads-python",
"id": "a115263ec5f46c53b93cb89f8c05e21b7896cdff",
"size": "18913",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/services/services/google_ads_service/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import os
import sys
import threading
import time
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as framework_device_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
# Import gradients to resolve circular imports
from tensorflow.python.ops import gradients # pylint: disable=unused-import
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SessionTest, self).setUp()
warnings.simplefilter('always')
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(
np.asarray(
[[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32),
copy_val)
def testManyCPUs(self):
with session.Session(
config=config_pb2.ConfigProto(device_count={
'CPU': 2, 'GPU': 0
})) as sess:
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
num_cpu_devices = 0
num_gpu_devices = 0
for device in sess.list_devices():
device_type = framework_device_lib.DeviceSpec.from_string(
device.name).device_type
if device_type == 'CPU':
num_cpu_devices += 1
elif device_type == 'GPU':
num_gpu_devices += 1
self.assertEqual(2, num_cpu_devices)
self.assertEqual(0, num_gpu_devices)
def testPerSessionThreads(self):
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
pool.global_name = 't1'
run_options = config_pb2.RunOptions()
run_options.inter_op_thread_pool = (
len(config.session_inter_op_thread_pool) - 1)
with session.Session(config=config) as s:
inp = constant_op.constant(30.0, name='W2')
results = s.run([inp], options=run_options)
self.assertAllEqual([30.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
def exc_predicate(e):
return (e.op == c.op and e.op._original_op == b.op and
e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
@test_util.run_v1_only('b/120545219')
def testFetchAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
val1 = np.array([1.2, 3.4, 5.6])
val2 = np.array([[1, 2], [4, 3]])
val3 = np.array([10, 20, 30])
t1 = constant_op.constant(val1)
t2 = constant_op.constant(val2)
sample = SampleAttr(t1, t2)
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val1, result.field1)
self.assertAllEqual(val2, result.field2)
result = sess.run(sample, feed_dict={sample.field1: val3})
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val3, result.field1)
self.assertAllEqual(val2, result.field2)
@test_util.run_v1_only('b/120545219')
def testFetchNestedAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field0 = attr.ib()
field1 = attr.ib()
v1 = 10
v2 = 20
v3 = np.float32(1.2)
v4 = np.float32(3.4)
v5 = np.float64(100.001)
v6 = np.float64(-23.451)
arr1 = np.array([1.2, 6.7, 3.4])
arr2 = np.array([7, 11, 3])
sample = SampleAttr(
SampleAttr(
SampleAttr(constant_op.constant(v1), constant_op.constant(v2)),
SampleAttr(constant_op.constant(arr1), constant_op.constant(arr2))),
{'A': SampleAttr(constant_op.constant(v3), constant_op.constant(v4)),
'B': [SampleAttr(constant_op.constant(v5), constant_op.constant(v6))]})
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertIsInstance(result.field0, SampleAttr)
self.assertIsInstance(result.field0.field0, SampleAttr)
self.assertIsInstance(result.field0.field1, SampleAttr)
self.assertIsInstance(result.field0.field1.field0, np.ndarray)
self.assertAllEqual(arr1, result.field0.field1.field0)
self.assertIsInstance(result.field0.field1.field1, np.ndarray)
self.assertAllEqual(arr2, result.field0.field1.field1)
self.assertIsInstance(result.field1, dict)
self.assertIn('A', result.field1)
self.assertIn('B', result.field1)
self.assertIsInstance(result.field1['A'], SampleAttr)
self.assertAllEqual(
[v3, v4],
[result.field1['A'].field0, result.field1['A'].field1])
self.assertIsInstance(result.field1['B'], list)
self.assertEqual(1, len(result.field1['B']))
self.assertIsInstance(result.field1['B'][0], SampleAttr)
self.assertAllEqual(
[v5, v6],
[result.field1['B'][0].field0, result.field1['B'][0].field1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c),
ABC(a=a, b=b, c=c), {
'a': a.name,
'c': c,
'b': b
}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c), {
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(
DEFG(
d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({
'd': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {
'a': a.name,
'c': c,
'b': b
}
})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices), constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
dtype=np.float32, shape=shape, name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: (values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: (values, indices)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
@test_util.run_v1_only('b/120545219')
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.VariableV1(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(
target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
@staticmethod
def _build_graph():
time.sleep(random.random() * 0.1)
# Do some graph construction. Try to exercise non-trivial paths.
graph = ops.get_default_graph()
gdef = None
for _ in range(10):
x = array_ops.placeholder(dtype=dtypes.float32)
with ops.colocate_with(x):
y = array_ops.placeholder(dtype=dtypes.float32)
with ops.device('/cpu:0'):
z = control_flow_ops.while_loop(
lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y])
with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}):
gradients_impl.gradients(z, [x, y])
if gdef is None:
gdef = graph.as_graph_def()
else:
importer.import_graph_def(gdef, name='import')
@test_util.run_v1_only('b/120545219')
def testParallelRunAndSingleBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in threads:
t.start()
SessionTest._build_graph()
stop.set()
for t in threads:
t.join()
@test_util.run_v1_only('b/120545219')
def testParallelRunAndParallelBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
run_threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in run_threads:
t.start()
build_threads = [self.checkedThread(target=SessionTest._build_graph)
for _ in range(10)]
for t in build_threads:
t.start()
for t in build_threads:
t.join()
# Let the run_threads run until the build threads are finished.
stop.set()
for t in run_threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('versions { producer: %d min_consumer: %d }' %
(versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
@test_util.run_v1_only('b/120545219')
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
@test_util.run_v1_only('b/120545219')
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
@test_util.run_v1_only('b/120545219')
def testMultipleInteractiveSessionsWarning(self):
# Reinitialize the global state to ensure that the expected warnings will
# be emitted.
session.InteractiveSession._active_session_count = 0 # pylint: disable=protected-access
sess = session.InteractiveSession()
sess.run(constant_op.constant(4.0)) # Run so that the session is "opened".
sess.close()
# Opening and closing interactive sessions serially should not warn.
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
sess.close()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess2 = session.InteractiveSession()
self.assertEqual(1, len(w))
self.assertTrue('An interactive session is already active. This can cause '
'out-of-memory errors in some cases. You must explicitly '
'call `InteractiveSession.close()` to release resources '
'held by the other session(s).' in str(w[0].message))
sess2.close()
sess.close()
@test_util.run_v1_only('b/120545219')
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
@test_util.run_v1_only('b/120545219')
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool,
dtypes.complex64, dtypes.complex128
]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={
feed_t: np_array
}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={
feed_t: np_array
}))
# Also check that we can get both back.
out_v, feed_v = sess.run(
[out_t, feed_t], feed_dict={
feed_t: np_array
})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testMakeCallableOnTensorWithRunOptions(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
tensor_runner = sess.make_callable(a, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
res = tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(42.0, res)
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableOnOperationWithRunOptions(self):
with session.Session() as sess:
a = variables.Variable(42.0)
b = state_ops.assign_add(a, 1.0)
sess.run(a.initializer)
tensor_runner = sess.make_callable(b.op, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(43.0, sess.run(a))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableWithFeedListAndRunOptions(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
tensor_runner = sess.make_callable(
a, feed_list=[ph.name], accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
self.assertAllClose(42.0,
tensor_runner(
41.0,
options=run_options,
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testOptimizedMakeCallable(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
for _ in range(3):
callable_fn = sess._make_callable_from_options(callable_opts)
for _ in range(5):
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32)))
def testOptimizedMakeCallableWithRunMetadata(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
callable_opts.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
callable_fn = sess._make_callable_from_options(callable_opts)
run_metadata = config_pb2.RunMetadata()
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32),
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(
sess.run(feed_t, feed_dict={
feed_t: c_list
}), c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [
u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode',
u'\U0001f60e deal with it'
]
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(
constant_op.constant(1.0), options=None, run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0), options=run_options, run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'Input to reshape is a tensor with 4 values, '
'but the requested shape has 21'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/device:GPU:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(
d,
feed_dict={a: 1.0},
options=run_options,
run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def runTestOutputPartitionGraphs(self, sess):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
a = constant_op.constant(1)
run_metadata = config_pb2.RunMetadata()
sess.run(a, options=run_options, run_metadata=run_metadata)
self.assertGreater(len(run_metadata.partition_graphs), 0)
sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0)
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session())
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
@test_util.run_v1_only('b/120545219')
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
@test_util.run_v1_only('b/120545219')
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
@test_util.run_v1_only('b/120545219')
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.cached_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(
squared_tensor, feed_dict={
squared_tensor: np1 * np1
})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
@test_util.run_v1_only('b/120545219')
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
# Passing the config to the server, but not the session should still result
# in logging device placement.
config = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
sess.run(c)
# Ensure that we did log device placement.
self.assertTrue('/job:local/replica:0/task:0/device:CPU:0' in str(log),
str(log))
@test_util.run_v1_only('b/120545219')
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
@test_util.run_v1_only('b/120545219')
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
# pylint: disable=protected-access
enter_1 = gen_control_flow_ops.enter(data, 'foo_1', False)
enter_2 = gen_control_flow_ops.enter(data, 'foo_2', False)
# pylint: enable=protected-access
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def runTestAddFunctionToSession(self, target=''):
"""Add a function to a session after the graph has already been run."""
@function.Defun(dtypes.float32)
def foo(x):
return x + 1
x = constant_op.constant(1.0)
with session.Session(target=target) as sess:
sess.run(x)
f = foo(x)
result = sess.run(f)
self.assertEqual(result, 2.0)
@test_util.run_v1_only('b/120545219')
def testAddFunctionToSession(self):
self.runTestAddFunctionToSession()
@test_util.run_v1_only('b/120545219')
def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target)
def testOpenAndCloseGrpcSession(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target):
pass
def testOpenAndCloseSession(self):
with session.Session():
pass
@test_util.run_v1_only('b/120545219')
def testAutoConvertAndCheckData(self):
with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegexp(
TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1})
if __name__ == '__main__':
googletest.main()
| {
"content_hash": "fa69f144988525793ee6b8da5e264bd4",
"timestamp": "",
"source": "github",
"line_count": 2030,
"max_line_length": 92,
"avg_line_length": 39.21527093596059,
"alnum_prop": 0.6252716469657191,
"repo_name": "jendap/tensorflow",
"id": "da6218663de8b02fcda3f3e67e68bb46e47e914a",
"size": "80296",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/client/session_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "606044"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "55619540"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "78675"
},
{
"name": "Go",
"bytes": "1383418"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "900190"
},
{
"name": "Jupyter Notebook",
"bytes": "2510235"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "77367"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14644"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "45358371"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "530065"
},
{
"name": "Smarty",
"bytes": "25609"
}
],
"symlink_target": ""
} |
"""
Emoji Example
===============
A simple example that shows how to include emoji. Note that this example does not seem to work on OS X, but does
work correctly in Ubuntu.
There are 3 important steps to follow to include emoji:
1) Read the text input with io.open instead of the built in open. This ensures that it is loaded as UTF-8
2) Override the regular expression used by word cloud to parse the text into words. The default expression
will only match ascii words
3) Override the default font to something that supports emoji. The included Symbola font includes black and
white outlines for most emoji. There are currently issues with the PIL/Pillow library that seem to prevent
it from functioning correctly on OS X (https://github.com/python-pillow/Pillow/issues/1774), so try this
on ubuntu if you are having problems.
"""
import io
import os
import string
from os import path
from wordcloud import WordCloud
# get data directory (using getcwd() is needed to support running example in generated IPython notebook)
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
# It is important to use io.open to correctly load the file as UTF-8
text = io.open(path.join(d, 'happy-emoji.txt')).read()
# the regex used to detect words is a combination of normal words, ascii art, and emojis
# 2+ consecutive letters (also include apostrophes), e.x It's
normal_word = r"(?:\w[\w']+)"
# 2+ consecutive punctuations, e.x. :)
ascii_art = r"(?:[{punctuation}][{punctuation}]+)".format(punctuation=string.punctuation)
# a single character that is not alpha_numeric or other ascii printable
emoji = r"(?:[^\s])(?<![\w{ascii_printable}])".format(ascii_printable=string.printable)
regexp = r"{normal_word}|{ascii_art}|{emoji}".format(normal_word=normal_word, ascii_art=ascii_art,
emoji=emoji)
# Generate a word cloud image
# The Symbola font includes most emoji
font_path = path.join(d, 'fonts', 'Symbola', 'Symbola.ttf')
wc = WordCloud(font_path=font_path, regexp=regexp).generate(text)
# Display the generated image:
# the matplotlib way:
import matplotlib.pyplot as plt
plt.imshow(wc)
plt.axis("off")
plt.show()
| {
"content_hash": "40b3fac2da420dee473a53baa7e3db89",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 113,
"avg_line_length": 45.520833333333336,
"alnum_prop": 0.7263157894736842,
"repo_name": "amueller/word_cloud",
"id": "ab949cabd72d8adf6d32166f8af7fb56da9d9756",
"size": "2207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/emoji.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "1196"
},
{
"name": "Python",
"bytes": "168835"
}
],
"symlink_target": ""
} |
import requests
import sys
def main(args):
newStr = args[1] + "-" + args[2];
host = "https://www.birchbox.com/shop/birchbox-1/{d}/{d}-bb"
host = host.replace("{d}", newStr);
for x in range(1,71):
response = requests.head(host + str(x))
if response.status_code != 404:
print host + str(x) + "\n"
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: python getBBSpoilers.py month year \n example: python getBBSpoilers.py april 2016"
else:
main(sys.argv) | {
"content_hash": "3c5b53d08e062bfaa3648b3437a1b69f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 104,
"avg_line_length": 30.555555555555557,
"alnum_prop": 0.5581818181818182,
"repo_name": "ThePolymath/PublicUtils",
"id": "7431ee84e3aa8d42b1943d16aced535944b750f3",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getBBSpoilers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "597"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import conference.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conference', '0012_add_vototalk_timestamps'),
]
operations = [
migrations.AddField(
model_name='stripepayment',
name='charge_id',
field=models.CharField(max_length=100, null=True),
),
]
| {
"content_hash": "27ad654cc4a3238e6b78fa2811f8647f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 22.68421052631579,
"alnum_prop": 0.6241299303944315,
"repo_name": "EuroPython/epcon",
"id": "4c1099b4df00cc583458a728ae74445ac0a1a367",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/ep2021",
"path": "conference/migrations/0013_add_stripe_charge_id.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "6475"
},
{
"name": "Dockerfile",
"bytes": "609"
},
{
"name": "HTML",
"bytes": "412025"
},
{
"name": "JavaScript",
"bytes": "421281"
},
{
"name": "Makefile",
"bytes": "4679"
},
{
"name": "Python",
"bytes": "991334"
},
{
"name": "Shell",
"bytes": "1182"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.ndimage
#from scipy.signal import convolve2d
#from scipy.signal import fftconvolve
#from numpy.fft import rfftn
#from numpy.fft import irfftn
import mahotas
import time
import h5py
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
class MaxoutMaxpoolLayer(object):
def __init__(self, nkernels, ninputs, kernel_size, stride_in, maxpool_size, maxout_size, W, b):
self.ninputs = ninputs
self.nkernels = nkernels
self.kernel_size = kernel_size
self.maxpool_size = maxpool_size
self.maxout_size = maxout_size
self.stride_in = stride_in
self.stride_out = stride_in * maxpool_size
self.noutputs = nkernels / maxpool_size
# Size of previous convolution operation (for fft result cache)
self.prev_conv_size = 0
# Input / output footprint - set once full network has been constructed
self.input_footprint = 0
self.output_footprint = 0
self.W = W
self.b = b
def apply_layer(self, input_image):
# Calculate feed-forward result
assert(input_image.shape[1] == self.ninputs)
#output = np.zeros(output_size, dtype=np.float32)
output = np.tile(self.b, (input_image.shape[0], 1, 1, 1))
crop_low = (self.kernel_size - 1) / 2
crop_high = (self.kernel_size) / 2
for batchi in range(input_image.shape[0]):
# Apply convolution
for channeli in range(self.ninputs):
channel_input = input_image[batchi, channeli, :, :]
channel_filters = self.W[:,channeli,:,:]
for filteri in range(self.nkernels):
# Space domain convolution (ndimage)
output[batchi, filteri, :, :] += scipy.ndimage.convolve(
channel_input,
channel_filters[filteri,:,:],
mode='constant')[crop_low:-crop_high, crop_low:-crop_high]
#output[batchi, filteri, :, :] += self.b[filteri, :, :]
if batchi % 100 == 99:
print "MO Layer: Convolution batch {0}, of {1} complete.".format(batchi + 1, input_image.shape[0])
# Apply maxout
if self.maxout_size != 1:
maxout_temp = None
for i in xrange(self.maxout_size):
this_slice = output[:,i::self.maxout_size,:,:]
if maxout_temp is None:
maxout_temp = this_slice
else:
maxout_temp = np.maximum(maxout_temp, this_slice)
output = maxout_temp
print "MO Layer: Applied maxout."
# Apply maxpool
if self.maxpool_size != 1:
maxpool_temp = None
for offset_x in range(self.maxpool_size):
for offset_y in range(self.maxpool_size):
this_slice = output[:, :, offset_x::self.maxpool_size, offset_y::self.maxpool_size]
if maxpool_temp is None:
maxpool_temp = this_slice
else:
maxpool_temp = np.maximum(maxpool_temp, this_slice)
output = maxpool_temp
print "MO Layer: Applied maxpool."
print "MO Layer: Complete."
return output
class SoftmaxLayer(object):
def __init__(self, ninputs, noutputs, kernel_size, stride, W, b):
self.ninputs = ninputs
self.noutputs = noutputs
self.kernel_size = kernel_size
self.stride_in = stride
self.stride_out = stride
# Input / output footprint - set once full network has been constructed
self.input_footprint = 0
self.output_footprint = 0
self.W = W
self.b = b
def apply_layer(self, input_image):
# Calculate feed-forward result
assert(input_image.shape[1] == self.ninputs)
nbatches = input_image.shape[0]
output_size = (nbatches, self.noutputs, input_image.shape[2] - self.kernel_size + 1, input_image.shape[3] - self.kernel_size + 1)
output = np.zeros(output_size, dtype=np.float32)
for batchi in range(nbatches):
# Apply dot product
rolled_input = np.rollaxis(np.rollaxis(input_image[batchi, :, :, :], 2), 2)
output[batchi, :, :, :] = (np.dot(rolled_input.flatten(), self.W) + self.b).reshape(output_size[1:4])
if batchi % 100 == 99:
print "SM Layer: Done batch {0}, of {1}.".format(batchi + 1, nbatches)
#Apply softmax
#print output
maxes = np.amax(output, axis=1).reshape((nbatches, 1, output_size[2], output_size[3]))
maxes = np.tile(maxes, (1,2,1,1))
e = np.exp(output - maxes)
esum = np.sum(e, axis=1).reshape((nbatches, 1, output_size[2], output_size[3]))
esum = np.tile(esum, (1,2,1,1))
output = e / esum
print "SM Layer: Complete."
return output
class DeepNetwork(object):
def __init__(self, filename):
network_h5 = h5py.File(filename, 'r')
self.nlayers = network_h5['/layers'][...]
print 'Network has {0} layers.'.format(self.nlayers)
if '/downsample_factor' in network_h5:
self.downsample = network_h5['/downsample_factor'][...]
else:
self.downsample = 1
self.best_sigma = 0
self.best_offset = (0,0)
all_layers = []
stride_in = 1
for layer_i in range(self.nlayers):
layer_string = '/layer{0}/'.format(layer_i)
layer_type = network_h5[layer_string + 'type'][...]
if layer_type == 'MaxoutConvC01B':
layer_weights = network_h5[layer_string + 'weights'][...]
layer_bias = network_h5[layer_string + 'bias'][...]
layer_maxpoolsize = int(network_h5[layer_string + 'pool_shape'][...][0])
layer_maxoutsize = int(network_h5[layer_string + 'num_pieces'][...])
# Arrange weights as [kernels, inputs, ksize, ksize]
layer_weights = np.rollaxis(layer_weights, 3, 0)[:,:,::-1,::-1]
new_layer = MaxoutMaxpoolLayer(
layer_weights.shape[0], layer_weights.shape[1], layer_weights.shape[2],
stride_in, layer_maxpoolsize, layer_maxoutsize, W=layer_weights, b=layer_bias)
elif layer_type == 'Softmax':
layer_weights = network_h5[layer_string + 'weights'][...]
layer_bias = network_h5[layer_string + 'bias'][...]
layer_ksize = network_h5[layer_string + 'ksize'][...][0]
new_layer = SoftmaxLayer(
layer_weights.shape[0] / (layer_ksize ** 2), layer_weights.shape[1], layer_ksize,
stride_in, W=layer_weights, b=layer_bias)
else:
raise Exception("Unknown layer type: {0}".format(layer_type))
all_layers.append(new_layer)
stride_in = new_layer.stride_out
# Calculate network footprint and therefore pad size
footprint = 1
for layer in range(self.nlayers-1, -1, -1):
all_layers[layer].output_footprint = footprint
if layer == self.nlayers - 1:
footprint = all_layers[layer].kernel_size
else:
footprint = footprint * all_layers[layer].maxpool_size - 1 + all_layers[layer].kernel_size
all_layers[layer].input_footprint = footprint
self.all_layers = all_layers
self.pad_by = int(self.downsample * (footprint // 2))
def apply_net(self, input_image, perform_downsample=False, perform_pad=False, perform_upsample=False, perform_blur=False, perform_offset=False):
if perform_pad:
input_image = np.pad(input_image, ((self.pad_by, self.pad_by), (self.pad_by, self.pad_by)), 'symmetric')
if perform_downsample and self.downsample != 1:
input_image = np.float32(mahotas.imresize(input_image, 1.0/self.downsample))
nx = input_image.shape[0] - self.all_layers[0].input_footprint + 1
ny = input_image.shape[1] - self.all_layers[0].input_footprint + 1
nbatches = nx * ny
layer_temp = np.zeros((nbatches, 1, self.all_layers[0].input_footprint, self.all_layers[0].input_footprint), dtype=np.float32)
batchi = 0
for x in range(nx):
for y in range(ny):
#print (x,y)
layer_temp[batchi, :, :, :] = input_image[x:(x + self.all_layers[0].input_footprint), y:(y + self.all_layers[0].input_footprint)]
batchi += 1
assert batchi == nbatches
for layeri in range(len(self.all_layers)):
print 'Layer {0}.'.format(layeri)
layer_temp = self.all_layers[layeri].apply_layer(layer_temp)
output_image = layer_temp[:,0,0,0].reshape(nx, ny)
if perform_upsample:
output_image = np.float32(mahotas.imresize(output_image, self.downsample))
if perform_blur and self.best_sigma != 0:
output_image = scipy.ndimage.filters.gaussian_filter(output_image, self.best_sigma)
if perform_offset:
#Translate
output_image = np.roll(output_image, self.best_offset[0], axis=0)
output_image = np.roll(output_image, self.best_offset[1], axis=1)
# Crop to valid size
#output_image = output_image[self.pad_by:-self.pad_by,self.pad_by:-self.pad_by]
return output_image
| {
"content_hash": "5acb763738850ee1e898a5855a4da6ea",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 148,
"avg_line_length": 37.87258687258687,
"alnum_prop": 0.5713120603527373,
"repo_name": "Rhoana/membrane_cnn",
"id": "22149eb7a2da9e5f5737f85c4738fd723cdc7766",
"size": "9850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maxout/custom_classify/lib_maxout_python.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "278821"
}
],
"symlink_target": ""
} |
from supplychainpy.inventory.units import SKU, Package
class Pallet:
def __init__(self):
self.length = None
self.width = None
self.height = None
self.weight = None
self.package = []
def recalculate_weight(self):
pass
def recalculate_height(self):
pass
class PalletBuilder:
def __init__(self):
self.pallet = None
def make_new_pallet(self):
self.pallet = Pallet()
class PalletDirector:
def __init__(self, pallet_builder: PalletBuilder):
self._pallet_builder = pallet_builder
def construct_pallet(self, length: float, width: float, height: float, weight: float):
self._pallet_builder.make_new_pallet()
self._pallet_builder.pallet.height = height
self._pallet_builder.pallet.length = length
self._pallet_builder.pallet.width = width
self._pallet_builder.pallet.width = weight
def add_package(self, package: Package):
self._pallet_builder.pallet.package.append(package)
def recalculate_dimensions(self):
self._pallet_builder.pallet.recalculate_weight()
self._pallet_builder.pallet.recalculate_height()
def __add__(self, other):
self._pallet_builder.pallet.package.append(other)
@property
def pallet(self)-> Pallet():
return self._pallet_builder.pallet
class ShippingContainer(Pallet):
def __init__(self):
super().__init__()
# how to represent a unit of packaging
# parameter for unt nase shape circular or square to calculate fill
| {
"content_hash": "d16f1a0f717741262bc88692e8f69501",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 90,
"avg_line_length": 27.224137931034484,
"alnum_prop": 0.642811906269791,
"repo_name": "KevinFasusi/supplychainpy",
"id": "761164ec16568a2435151d8a82cfe78ac7e00dca",
"size": "3144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supplychainpy/warehouse/storage_unit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13286"
},
{
"name": "Gherkin",
"bytes": "491"
},
{
"name": "HTML",
"bytes": "61502"
},
{
"name": "JavaScript",
"bytes": "691701"
},
{
"name": "Python",
"bytes": "624591"
},
{
"name": "Shell",
"bytes": "1456"
}
],
"symlink_target": ""
} |
import argparse
import os
import subprocess
import pandas as pd
import mummer_two_bins
def get_all_bin_paths():
bin_info = pd.read_csv('./support_files/available_bins.csv')
return bin_info['bin path']
def query_bin_file_to_list(filepath):
print('load paths at {}'.format(filepath))
with open(filepath) as f:
lines = f.read().splitlines()
return lines
def analyze_bin_pairs(partial_bins_list, results_dir,
preserve_existing=True):
"""
loop over a list of some bin paths, and run MUMMER with every other bin
as the reference. Also parses the .coords file.
:return: nothing to terminal/python but saves files
"""
reference_bin_paths = get_all_bin_paths()
for query_bin_path in partial_bins_list:
# There will be a folder for each bin, and the MUMmer results for all
# other bins as reference within
result_sub_dir = mummer_two_bins.strip_off_fasta_suffix(
os.path.basename(query_bin_path))
particular_results_dir = results_dir + "/" + result_sub_dir
print("save result in {}".format(results_dir))
# loop over all the bins, including itself.
for ref_bin_path in reference_bin_paths:
# first check whether the file exists:
expected_coords_path = \
mummer_two_bins.file_prefix_from_fata_paths(
query_bin_path, ref_bin_path, particular_results_dir) + \
'.coords'
coords_exists = os.path.exists(expected_coords_path)
# Command to send to shell:
command = ['python', "./support_files/mummer_two_bins.py",
query_bin_path, ref_bin_path, particular_results_dir]
if preserve_existing:
if coords_exists:
print("not writing over existing mummer results: "
"{}".format(expected_coords_path))
# don't remake the file if it already exists.
continue
else:
# Run mummer_two_bins on the pair of fastas.
print('mummer comamnd to run: \n`{}`'.format(
' '.join(command)))
subprocess.check_call(command)
# If we don't care whether the file exists or not, make it
# fresh every time.
else:
subprocess.check_call(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Run MUMmer using a subset of query bins, and all of the'
'reference bins.')
parser.add_argument("query_bin_path_list", type=str,
help='path to list of paths for query bins')
parser.add_argument("results_dir", type=str,
help='path to save results to')
parser.add_argument("preserve_existing", type=bool, default=True,
help='re-run Mummer even if file exists?')
args = parser.parse_args()
print(args)
query_bins_as_list = query_bin_file_to_list(args.query_bin_path_list)
analyze_bin_pairs(
partial_bins_list=query_bins_as_list,
results_dir=args.results_dir,
preserve_existing=True)
| {
"content_hash": "c59e7778ff70f1a8401acf4e6fef51a1",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 77,
"avg_line_length": 32.8,
"alnum_prop": 0.5810975609756097,
"repo_name": "JanetMatsen/meta4_bins_janalysis",
"id": "188bf1c97824a5a68ddabe9c4c609e9e0432ecd2",
"size": "3280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compare_bins/support_files/mummer_some_pairs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2467677"
},
{
"name": "Python",
"bytes": "140897"
},
{
"name": "R",
"bytes": "22732"
},
{
"name": "Shell",
"bytes": "24478"
}
],
"symlink_target": ""
} |
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
from jnpr.junos.cfg.user_ssh_key import UserSSHKey
class User(Resource):
"""
[edit system login user <name>]
Resource name: str
<name> is the user login name
Manages resources:
sshkey, UserSSHKey
"""
PROPERTIES = [
"uid",
"fullname", # the full-name field
"userclass", # user class
"password", # write-only clear-text password, will get crypt'd
"$password", # read-only crypt'd password
"$sshkeys", # read-only names of ssh-keys
]
MANAGES = {"sshkey": UserSSHKey}
# -----------------------------------------------------------------------
# XML readers
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E.system(E.login(E.user(E.name(self._name))))
def _xml_at_res(self, xml):
return xml.find(".//user")
def _xml_to_py(self, has_xml, has_py):
Resource._r_has_xml_status(has_xml, has_py)
has_py["userclass"] = has_xml.findtext("class")
Resource.copyifexists(has_xml, "full-name", has_py, "fullname")
Resource.copyifexists(has_xml, "uid", has_py)
if "uid" in has_py:
has_py["uid"] = int(has_py["uid"])
auth = has_xml.find("authentication")
if auth is not None:
# plain-text password
Resource.copyifexists(auth, "encrypted-password", has_py, "$password")
# ssh-keys
sshkeys = auth.xpath("ssh-rsa | ssh-dsa")
if sshkeys is not None:
has_py["$sshkeys"] = [
(sshkey.tag, sshkey.findtext("name").strip()) for sshkey in sshkeys
]
# -----------------------------------------------------------------------
# XML property writers
# -----------------------------------------------------------------------
def _xml_change_fullname(self, xml):
xml.append(E("full-name", self["fullname"]))
return True
def _xml_change_userclass(self, xml):
xml.append(E("class", self["userclass"]))
return True
def _xml_change_password(self, xml):
xml.append(E.authentication(E("plain-text-password-value", self["password"])))
return True
def _xml_change_uid(self, xml):
xml.append(E.uid(str(self["uid"])))
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
get = E.system(E.login(E.user(JXML.NAMES_ONLY)))
got = self.R.get_config(get)
self._rlist = [name.text for name in got.xpath(".//user/name")]
| {
"content_hash": "3153529494a836ba9b8ff4d72b9a9886",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 87,
"avg_line_length": 31.119565217391305,
"alnum_prop": 0.4841075794621027,
"repo_name": "Juniper/py-junos-eznc",
"id": "549b92ebf683a8771f116dfffe2e4ceade0e1053",
"size": "2883",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/jnpr/junos/cfg/user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "856"
},
{
"name": "Pascal",
"bytes": "408"
},
{
"name": "Puppet",
"bytes": "2263"
},
{
"name": "Python",
"bytes": "1101958"
},
{
"name": "Ruby",
"bytes": "134"
},
{
"name": "Shell",
"bytes": "1516"
}
],
"symlink_target": ""
} |
from . import scalar
from .exception import ProphyError
from .base_array import base_array
def optional(cls):
if issubclass(cls, bytes):
raise ProphyError("optional bytes not implemented")
if issubclass(cls, base_array):
raise ProphyError("optional array not implemented")
if cls._DYNAMIC:
raise ProphyError("optional dynamic fields not implemented")
class _optional(cls):
pass
_optional._OPTIONAL_ALIGNMENT = max(scalar.u32._ALIGNMENT, cls._ALIGNMENT)
_optional._OPTIONAL_SIZE = _optional._OPTIONAL_ALIGNMENT + cls._SIZE
_optional._OPTIONAL = True
_optional._optional_type = scalar.u32
return _optional
| {
"content_hash": "9fafd53b80619b1cad7798e87c3e9d4a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 33.75,
"alnum_prop": 0.7022222222222222,
"repo_name": "cislaa/prophy",
"id": "37154c34554dff38515b6823b5a113e964321f09",
"size": "675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prophy/optional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "108198"
},
{
"name": "Python",
"bytes": "512886"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import xml.etree.ElementTree as ET
import glob
import sys
import re
import os
import os.path
import argparse
import subprocess
import shutil
desc= """run tests"""
def warn(*objs):
print(*objs, file=sys.stderr)
dirRe = re.compile("\$dir")
outdirRe = re.compile("\$outDir")
tests = glob.glob("*/test.xml")
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-debug', dest='debug', action='store_true', default=False,
help='Print additional information')
parser.add_argument(dest='tests', metavar='xmlPaths',
default=tests, nargs='*',
help='One or more full path to test.xml files to execute. '+
'If none given all will be executed.')
args = parser.parse_args()
debug = args.debug
def replaceVars(dir, str):
str = outdirRe.sub('$dir/out', str)
str = dirRe.sub(dir, str)
return str
def printResult(success, cpOpts, dir, wdir, out, err, ref):
if not success:
print("FAILED:\t%s %s" % (dir, out))
if wdir :
if not wdir.startswith("/"):
wdir = "%s/%s" % (dir,wdir)
print(" wdir: %s" % wdir)
else:
wdir = dir
if "-R" in cpOpts:
if out == '.': out = "" # out was just "." remove from path
ref = ref + '/'
out = out + '/*' # copy all files from out to ref
print(" stderr in: %s/%s" % (wdir,err))
refPath = ref
if not refPath.startswith("/") : refPath = wdir + "/" + ref
print(" if correct issue: cp %s %s/%s %s" %
(cpOpts, wdir, out, refPath))
else:
print("Success\t%s %s" % (dir, out))
startDir = os.getcwd()
for tXml in args.tests:
if not tXml.endswith(".xml") : tXml = tXml + "/test.xml"
dir=os.path.dirname(tXml)
absDir=os.path.abspath(dir)
shutil.rmtree(dir + '/out', True)
os.mkdir(dir + '/out')
try:
root = ET.parse(tXml).getroot()
# iterate over <test> elements
for test in root.iter('test'):
inp =replaceVars(absDir, test.attrib['in'] )
out =replaceVars(absDir, test.attrib['out'] )
# collect command from any innertext
com = test.text.strip()
for t in test.iter():
com = com + " " + t.tail.strip()
com =replaceVars(absDir, com.strip() )
err=re.sub('\.[^.]+$','.err', out)
if not inp or not out or not com:
print("%s: inp, out or command missing! (%s, %s, %s)\n"
% (test,inp,out,com))
continue
os.chdir(dir)
wdir = ''
if 'workDir' in test.attrib:
wdir = replaceVars( absDir, test.attrib['workDir'] )
os.chdir(replaceVars(absDir, test.attrib['workDir']))
errf = open( err, 'w+' )
# runn all init scripts
for init in test.iter('init'):
icom = replaceVars(absDir, init.text.strip() )
if debug: print("EXECUTING: %s" % icom)
subprocess.call(["tcsh", "-fec", icom], stdout=errf, stderr=errf )
# execute command
# perl clears out 4th row in sdf file to avoid differences on timestamp
com="cat %s \\\n|%s\\\n" % (inp, com)
com="(%s)\\\n|perl -pe 's/^( -OEChem-).+/$1/'" % (com)
if debug: print("EXECUTING: %s" % com)
sout = open( out, 'w+' )
retCode = subprocess.call(["tcsh", "-fec", com],
stdout=sout, stderr=errf)
sout.close()
if retCode != 0:
print("FAILED with status %d:\t%s %s" % (retCode, dir, out))
if wdir :
if not wdir.startswith("/"):
fwdir = "%s/%s" % (dir,wdir)
print(" wdir: %s" % fwdir)
else:
fwdir = dir
print(" stderr in: %s/%s" % (fwdir,err))
os.chdir(startDir)
continue
# execute postprocess script
for post in test.iter('postprocess'):
pcom = replaceVars(absDir, post.text.strip() )
if debug: print("EXECUTING: %s" % pcom)
subprocess.call(["tcsh", "-fec", pcom], stdout=errf, stderr=errf )
print("\n\n===========================================", file=errf)
print("Diffs and checks", file=errf)
print("===========================================", file=errf)
# itereate over <diff> elements
for diffs in test.iter('diff'):
ref =replaceVars(absDir, diffs.attrib['ref'] )
if debug: print("EXECUTING: diff %s %s" % (ref, out))
ret = subprocess.call(["diff", ref, out], stdout=errf, stderr=errf )
printResult( ret == 0, "", dir, wdir, out, err, ref)
for diffs in test.iter('diffDir'):
refDir = replaceVars(absDir, diffs.attrib['refDir'] )
outDir = replaceVars(absDir, diffs.attrib['outDir'] )
opts = diffs.attrib.get('opts', '') # opts is optional
opts = replaceVars(absDir, opts )
com = "diff %s '%s' '%s'" % (opts, refDir, outDir)
if debug: print("EXECUTING: %s" % com)
ret = subprocess.call(["tcsh", "-fec", com],
stdout=errf, stderr=errf )
printResult( ret == 0, "-R", dir, wdir, outDir, err, refDir);
errf.close()
os.chdir(startDir)
except ET.ParseError as e:
warn("\nError parsing: " + tXml)
warn(" %s" % e)
| {
"content_hash": "e8e20de4d937d3deb66ce57eec4d01a7",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 80,
"avg_line_length": 34.22981366459627,
"alnum_prop": 0.5200508074759572,
"repo_name": "chemalot/chemalot",
"id": "9f990647b27853829bd8195763855ee2fe419fcf",
"size": "5533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/runTests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "39505"
},
{
"name": "Java",
"bytes": "1494589"
},
{
"name": "Pawn",
"bytes": "17132"
},
{
"name": "Perl",
"bytes": "170498"
},
{
"name": "Python",
"bytes": "31307"
},
{
"name": "R",
"bytes": "14207"
},
{
"name": "Shell",
"bytes": "30114"
}
],
"symlink_target": ""
} |
""" @package antlr3
@brief ANTLR3 runtime package
This module contains all support classes, which are needed to use recognizers
generated by ANTLR3.
@mainpage
\note Please be warned that the line numbers in the API documentation do not
match the real locations in the source code of the package. This is an
unintended artifact of doxygen, which I could only convince to use the
correct module names by concatenating all files from the package into a single
module file...
Here is a little overview over the most commonly used classes provided by
this runtime:
@section recognizers Recognizers
These recognizers are baseclasses for the code which is generated by ANTLR3.
- BaseRecognizer: Base class with common recognizer functionality.
- Lexer: Base class for lexers.
- Parser: Base class for parsers.
- tree.TreeParser: Base class for %tree parser.
@section streams Streams
Each recognizer pulls its input from one of the stream classes below. Streams
handle stuff like buffering, look-ahead and seeking.
A character stream is usually the first element in the pipeline of a typical
ANTLR3 application. It is used as the input for a Lexer.
- ANTLRStringStream: Reads from a string objects. The input should be a unicode
object, or ANTLR3 will have trouble decoding non-ascii data.
- ANTLRFileStream: Opens a file and read the contents, with optional character
decoding.
- ANTLRInputStream: Reads the date from a file-like object, with optional
character decoding.
A Parser needs a TokenStream as input (which in turn is usually fed by a
Lexer):
- CommonTokenStream: A basic and most commonly used TokenStream
implementation.
- TokenRewriteStream: A modification of CommonTokenStream that allows the
stream to be altered (by the Parser). See the 'tweak' example for a usecase.
And tree.TreeParser finally fetches its input from a tree.TreeNodeStream:
- tree.CommonTreeNodeStream: A basic and most commonly used tree.TreeNodeStream
implementation.
@section tokenstrees Tokens and Trees
A Lexer emits Token objects which are usually buffered by a TokenStream. A
Parser can build a Tree, if the output=AST option has been set in the grammar.
The runtime provides these Token implementations:
- CommonToken: A basic and most commonly used Token implementation.
- ClassicToken: A Token object as used in ANTLR 2.x, used to %tree
construction.
Tree objects are wrapper for Token objects.
- tree.CommonTree: A basic and most commonly used Tree implementation.
A tree.TreeAdaptor is used by the parser to create tree.Tree objects for the
input Token objects.
- tree.CommonTreeAdaptor: A basic and most commonly used tree.TreeAdaptor
implementation.
@section Exceptions
RecognitionException are generated, when a recognizer encounters incorrect
or unexpected input.
- RecognitionException
- MismatchedRangeException
- MismatchedSetException
- MismatchedNotSetException
.
- MismatchedTokenException
- MismatchedTreeNodeException
- NoViableAltException
- EarlyExitException
- FailedPredicateException
.
.
A tree.RewriteCardinalityException is raised, when the parsers hits a
cardinality mismatch during AST construction. Although this is basically a
bug in your grammar, it can only be detected at runtime.
- tree.RewriteCardinalityException
- tree.RewriteEarlyExitException
- tree.RewriteEmptyStreamException
.
.
"""
# tree.RewriteRuleElementStream
# tree.RewriteRuleSubtreeStream
# tree.RewriteRuleTokenStream
# CharStream
# DFA
# TokenSource
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__version__ = '3.1.3'
def version_str_to_tuple(version_str):
import re
import sys
if version_str == 'HEAD':
return (sys.maxint, sys.maxint, sys.maxint, sys.maxint)
m = re.match(r'(\d+)\.(\d+)(\.(\d+))?(b(\d+))?', version_str)
if m is None:
raise ValueError("Bad version string %r" % version_str)
major = int(m.group(1))
minor = int(m.group(2))
patch = int(m.group(4) or 0)
beta = int(m.group(6) or sys.maxint)
return (major, minor, patch, beta)
runtime_version_str = __version__
runtime_version = version_str_to_tuple(runtime_version_str)
from .constants import *
from .dfa import *
from .exceptions import *
from .recognizers import *
from .streams import *
from .tokens import *
| {
"content_hash": "a1409a1deb0b6df63a124e515315624c",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 79,
"avg_line_length": 33.473684210526315,
"alnum_prop": 0.769916142557652,
"repo_name": "ateska/pyless",
"id": "da56d922de7850066bf4df8805d5864cd1b55758",
"size": "5724",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pyless/antlr3/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1245136"
},
{
"name": "Shell",
"bytes": "210"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
import traceback
from bloom.logging import debug
from bloom.logging import error
from bloom.logging import fmt
from bloom.logging import info
try:
from catkin_pkg.packages import find_packages
except ImportError:
debug(traceback.format_exc())
error("catkin_pkg was not detected, please install it.", exit=True)
formula_template = """\
# This formula is generated, you should not edit it directly.
require 'formula'
class :{camelcase_name} < Formula
homepage ':{homepage}'
url ':{url}'
version ':{version}'
sha1 ':{sha1}'
:{depends}
def install
system "cmake", *std_cmake_args
system "make install"
end
end
"""
def camelcase(name):
pass
def generate_substitutions_from_package(pkg, rosdistro):
subs = {}
subs['camelcase_name'] = pkg.name.lower().split('-')
return subs
def prepare_arguments(parser):
add = parser.add_argument
add('package_path', nargs='?',
help="path to or containing the package.xml of a package")
action = parser.add_mutually_exclusive_group(required=False)
add = action.add_argument
add('--place-template-files', '--place', action='store_true',
help="places debian/* template files only")
add('--process-template-files', '--process', action='store_true',
help="processes templates in debian/* only")
add = parser.add_argument
add('--ros-distro', '--rosdistro', '-r',
help='ROS distro, e.g. groovy, hydro (used for rosdep)')
return parser
def get_subs(pkg, ros_distro):
return generate_substitutions_from_package(pkg, ros_distro)
def main(args=None, get_subs_fn=None):
get_subs_fn = get_subs_fn or get_subs
_place_template_files = True
_process_template_files = True
package_path = os.getcwd()
if args is not None:
package_path = args.package_path or os.getcwd()
_place_template_files = args.place_template_files
_process_template_files = args.process_template_files
pkgs_dict = find_packages(package_path)
if len(pkgs_dict) == 0:
sys.exit("No packages found in path: '{0}'".format(package_path))
if len(pkgs_dict) > 1:
sys.exit("Multiple packages found, this tool only supports one package at a time.")
ros_distro = os.environ.get('ROS_DISTRO', 'groovy')
# Allow args overrides
ros_distro = args.ros_distro or ros_distro
# Summarize
info(fmt("@!@{gf}==> @|") +
fmt("Generating Homebrew formula for package(s) %s" %
([p.name for p in pkgs_dict.values()])))
for path, pkg in pkgs_dict.items():
template_files = None
try:
subs = get_subs_fn(pkg, ros_distro)
if _place_template_files:
# Place template files
place_template_files(path)
if _process_template_files:
# Just process existing template files
template_files = process_template_files(path, subs)
if not _place_template_files and not _process_template_files:
# If neither, do both
place_template_files(path)
template_files = process_template_files(path, subs)
if template_files is not None:
for template_file in template_files:
os.remove(os.path.normpath(template_file))
except Exception as exc:
debug(traceback.format_exc())
error(type(exc).__name__ + ": " + str(exc), exit=True)
except (KeyboardInterrupt, EOFError):
sys.exit(1)
# This describes this command to the loader
description = dict(
title='homebrew',
description="Generates a Homebrew Formula for a catkin package",
main=main,
prepare_arguments=prepare_arguments
)
| {
"content_hash": "f49b186ec261ea6f95ff7228257c2660",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 91,
"avg_line_length": 30.975609756097562,
"alnum_prop": 0.6359580052493439,
"repo_name": "wjwwood/bloom_homebrew",
"id": "378c74a50fadcaaeee7688ec2ce6e1492c5de47e",
"size": "5456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bloom_homebrew/generate_cmd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6838"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .message import *
from .message_parser import *
from .message_serializer import *
from .ping_message import *
from .ping_message_parser import *
from .ping_message_serializer import *
from .pong_message import *
from .pong_message_parser import *
from .pong_message_serializer import*
from .client import *
from .server import *
| {
"content_hash": "7e0b707d533aaebed6e8fcb56e4a4b81",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 38,
"avg_line_length": 23.57894736842105,
"alnum_prop": 0.7589285714285714,
"repo_name": "foxdog-studios/pyddp",
"id": "2a341f6713e3906b88da8db86acc6edaf905eaf9",
"size": "1052",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ddp/messages/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "109"
},
{
"name": "Python",
"bytes": "182982"
},
{
"name": "Shell",
"bytes": "3484"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class SchedulingApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_group(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_api_group_with_http_info(**kwargs)
else:
(data) = self.get_api_group_with_http_info(**kwargs)
return data
def get_api_group_with_http_info(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_group_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/scheduling.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| {
"content_hash": "463e14895e436be333fd80f806b42e04",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 119,
"avg_line_length": 34.69672131147541,
"alnum_prop": 0.5379163713678242,
"repo_name": "mbohlool/client-python",
"id": "a49258c74cca71ce54d2e9cbaf280456da32fbe7",
"size": "4250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/apis/scheduling_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8417639"
},
{
"name": "Shell",
"bytes": "16830"
}
],
"symlink_target": ""
} |
this_version = '0.2.5'
stable_version = '0.2.5'
readme = '''-----------------------------
dill: serialize all of python
-----------------------------
About Dill
==========
Dill extends python's 'pickle' module for serializing and de-serializing
python objects to the majority of the built-in python types. Serialization
is the process of converting an object to a byte stream, and the inverse
of which is converting a byte stream back to on python object hierarchy.
Dill provides the user the same interface as the 'pickle' module, and
also includes some additional features. In addition to pickling python
objects, dill provides the ability to save the state of an interpreter
session in a single command. Hence, it would be feasable to save a
interpreter session, close the interpreter, ship the pickled file to
another computer, open a new interpreter, unpickle the session and
thus continue from the 'saved' state of the original interpreter
session.
Dill can be used to store python objects to a file, but the primary
usage is to send python objects across the network as a byte stream.
Dill is quite flexible, and allows arbitrary user defined classes
and funcitons to be serialized. Thus dill is not intended to be
secure against erroneously or maliciously constructed data. It is
left to the user to decide whether the data they unpickle is from
a trustworthy source.
Dill is part of pathos, a python framework for heterogeneous computing.
Dill is in active development, so any user feedback, bug reports, comments,
or suggestions are highly appreciated. A list of known issues is maintained
at http://trac.mystic.cacr.caltech.edu/project/pathos/query, with a public
ticket list at https://github.com/uqfoundation/dill/issues.
Major Features
==============
Dill can pickle the following standard types::
- none, type, bool, int, long, float, complex, str, unicode,
- tuple, list, dict, file, buffer, builtin,
- both old and new style classes,
- instances of old and new style classes,
- set, frozenset, array, functions, exceptions
Dill can also pickle more 'exotic' standard types::
- functions with yields, nested functions, lambdas,
- cell, method, unboundmethod, module, code, methodwrapper,
- dictproxy, methoddescriptor, getsetdescriptor, memberdescriptor,
- wrapperdescriptor, xrange, slice,
- notimplemented, ellipsis, quit
Dill cannot yet pickle these standard types::
- frame, generator, traceback
Dill also provides the capability to::
- save and load python interpreter sessions
- save and extract the source code from functions and classes
- interactively diagnose pickling errors
Current Release
===============
This version is dill-0.2.5.
The latest stable version of dill is available from::
http://trac.mystic.cacr.caltech.edu/project/pathos
or::
https://github.com/uqfoundation/dill/releases
or also::
https://pypi.python.org/pypi/dill
Dill is distributed under a 3-clause BSD license.
>>> import dill
>>> print (dill.license())
Development Version
===================
You can get the latest development version with all the shiny new features at::
https://github.com/uqfoundation
Feel free to fork the github mirror of our svn trunk. If you have a new
contribution, please submit a pull request.
Installation
============
Dill is packaged to install from source, so you must
download the tarball, unzip, and run the installer::
[download]
$ tar -xvzf dill-0.2.5.tgz
$ cd dill-0.2.5
$ python setup py build
$ python setup py install
You will be warned of any missing dependencies and/or settings
after you run the "build" step above.
Alternately, dill can be installed with pip or easy_install::
$ pip install dill
Requirements
============
Dill requires::
- python2, version >= 2.5 *or* python3, version >= 3.1
- pyreadline, version >= 1.7.1 (on windows)
Optional requirements::
- setuptools, version >= 0.6
- objgraph, version >= 1.7.2
More Information
================
Probably the best way to get started is to look at the tests that are
provided within dill. See `dill.tests` for a set of scripts that demonstrate
dill's ability to serialize different python objects. Since dill conforms
to the 'pickle' interface, the examples and documentation at
http://docs.python.org/library/pickle.html also apply to dill if one will
`import dill as pickle`. The source code is also generally well
documented, so further questions may be resolved by inspecting the code
itself. Please also feel free to submit a ticket on github, or ask a
question on stackoverflow (@Mike McKerns).
Dill is an active research tool. There are a growing number of publications
and presentations that discuss real-world examples and new features of dill
in greater detail than presented in the user's guide. If you would like to
share how you use dill in your work, please post a link or send an email
(to mmckerns at caltech dot edu).
Citation
========
If you use dill to do research that leads to publication, we ask that you
acknowledge use of dill by citing the following in your publication::
M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
"Building a framework for predictive science", Proceedings of
the 10th Python in Science Conference, 2011;
http://arxiv.org/pdf/1202.1056
Michael McKerns and Michael Aivazis,
"pathos: a framework for heterogeneous computing", 2010- ;
http://trac.mystic.cacr.caltech.edu/project/pathos
Please see http://trac.mystic.cacr.caltech.edu/project/pathos or
http://arxiv.org/pdf/1202.1056 for further information.
'''
license = '''This software is part of the open-source mystic project at the California
Institute of Technology, and is available subject to the conditions and
terms laid out below. By downloading and using this software you are
agreeing to the following conditions.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met::
- Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentations and/or other materials provided with the distribution.
- Neither the name of the California Institute of Technology nor
the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Copyright (c) 2016 California Institute of Technology. All rights reserved.
'''
| {
"content_hash": "0f49261d4bb997c8b468f3de2e3c4934",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 86,
"avg_line_length": 35.66350710900474,
"alnum_prop": 0.7464451827242525,
"repo_name": "jlcarmic/producthunt_simulator",
"id": "e3304f82dbd466e6d7dae61a03acd2c1d8682b81",
"size": "7561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/dill/info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "497576"
},
{
"name": "C++",
"bytes": "3309990"
},
{
"name": "CSS",
"bytes": "20962"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "130243"
},
{
"name": "JavaScript",
"bytes": "32774"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "16272002"
},
{
"name": "Shell",
"bytes": "3821"
}
],
"symlink_target": ""
} |
from .util import DummyDict
from .util import tprint
import deepdish as dd
import numpy as np
# CAFFE WEIGHTS: O x I x H x W
# TFLOW WEIGHTS: H x W x I x O
def to_caffe(tfW, name=None, shape=None, color_layer='', conv_fc_transitionals=None, info=DummyDict()):
assert conv_fc_transitionals is None or name is not None
if tfW.ndim == 4:
if (name == 'conv1_1' or name == 'conv1' or name == color_layer) and tfW.shape[2] == 3:
tfW = tfW[:, :, ::-1]
info[name] = 'flipped'
cfW = tfW.transpose(3, 2, 0, 1)
return cfW
else:
if conv_fc_transitionals is not None and name in conv_fc_transitionals:
cf_shape = conv_fc_transitionals[name]
tf_shape = (cf_shape[2], cf_shape[3], cf_shape[1], cf_shape[0])
cfW = tfW.reshape(tf_shape).transpose(3, 2, 0, 1).reshape(cf_shape[0], -1)
info[name] = 'fc->c transitioned with caffe shape {}'.format(cf_shape)
return cfW
else:
return tfW.T
def from_caffe(cfW, name=None, color_layer='', conv_fc_transitionals=None, info=DummyDict()):
assert conv_fc_transitionals is None or name is not None
if cfW.ndim == 4:
tfW = cfW.transpose(2, 3, 1, 0)
assert conv_fc_transitionals is None or name is not None
if (name == 'conv1_1' or name == 'conv1' or name == color_layer) and tfW.shape[2] == 3:
tfW = tfW[:, :, ::-1]
info[name] = 'flipped'
return tfW
else:
if conv_fc_transitionals is not None and name in conv_fc_transitionals:
cf_shape = conv_fc_transitionals[name]
tfW = cfW.reshape(cf_shape).transpose(2, 3, 1, 0).reshape(-1, cf_shape[0])
info[name] = 'c->fc transitioned with caffe shape {}'.format(cf_shape)
return tfW
else:
return cfW.T
def load_caffemodel(path, session, prefix='', ignore=set(),
conv_fc_transitionals=None, renamed_layers=DummyDict(),
color_layer='', verbose=False, pre_adjust_batch_norm=False):
import tensorflow as tf
def find_weights(name, which='weights'):
for tw in tf.trainable_variables():
if tw.name.split(':')[0] == name + '/' + which:
return tw
return None
"""
def find_batch_norm(name, which='mean'):
for tw in tf.all_variables():
if tw.name.endswith(name + '/bn_' + which + ':0'):
return tw
return None
"""
data = dd.io.load(path, '/data')
assigns = []
loaded = []
info = {}
for key in data:
local_key = prefix + renamed_layers.get(key, key)
if key not in ignore:
bn_name = 'batch_' + key
if '0' in data[key]:
weights = find_weights(local_key, 'weights')
if weights is not None:
W = from_caffe(data[key]['0'], name=key, info=info,
conv_fc_transitionals=conv_fc_transitionals,
color_layer=color_layer)
if W.ndim != weights.get_shape().as_list():
W = W.reshape(weights.get_shape().as_list())
init_str = ''
if pre_adjust_batch_norm and bn_name in data:
bn_data = data[bn_name]
sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
W /= sigma
init_str += ' batch-adjusted'
assigns.append(weights.assign(W))
loaded.append('{}:0 -> {}:weights{} {}'.format(key, local_key, init_str, info.get(key, '')))
if '1' in data[key]:
biases = find_weights(local_key, 'biases')
if biases is not None:
bias = data[key]['1']
init_str = ''
if pre_adjust_batch_norm and bn_name in data:
bn_data = data[bn_name]
sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
mu = bn_data['0'] / bn_data['2']
bias = (bias - mu) / sigma
init_str += ' batch-adjusted'
assigns.append(biases.assign(bias))
loaded.append('{}:1 -> {}:biases{}'.format(key, local_key, init_str))
# Check batch norm and load them (unless they have been folded into)
#if not pre_adjust_batch_norm:
session.run(assigns)
if verbose:
tprint('Loaded model from', path)
for l in loaded:
tprint('-', l)
return loaded
def save_caffemodel(path, session, layers, prefix='',
conv_fc_transitionals=None, color_layer='', verbose=False,
save_batch_norm=False, lax_naming=False):
import tensorflow as tf
def find_weights(name, which='weights'):
for tw in tf.trainable_variables():
if lax_naming:
ok = tw.name.split(':')[0].endswith(name + '/' + which)
else:
ok = tw.name.split(':')[0] == name + '/' + which
if ok:
return tw
return None
def find_batch_norm(name, which='mean'):
for tw in tf.all_variables():
#if name + '_moments' in tw.name and tw.name.endswith(which + '/batch_norm:0'):
if tw.name.endswith(name + '/bn_' + which + ':0'):
return tw
return None
data = {}
saved = []
info = {}
for lay in layers:
if isinstance(lay, tuple):
lay, p_lay = lay
else:
p_lay = lay
weights = find_weights(prefix + p_lay, 'weights')
d = {}
if weights is not None:
tfW = session.run(weights)
cfW = to_caffe(tfW, name=lay,
conv_fc_transitionals=conv_fc_transitionals,
info=info, color_layer=color_layer)
d['0'] = cfW
saved.append('{}:weights -> {}:0 {}'.format(prefix + p_lay, lay, info.get(lay, '')))
biases = find_weights(prefix + p_lay, 'biases')
if biases is not None:
b = session.run(biases)
d['1'] = b
saved.append('{}:biases -> {}:1'.format(prefix + p_lay, lay))
if d:
data[lay] = d
if save_batch_norm:
mean = find_batch_norm(lay, which='mean')
variance = find_batch_norm(lay, which='var')
if mean is not None and variance is not None:
d = {}
d['0'] = np.squeeze(session.run(mean))
d['1'] = np.squeeze(session.run(variance))
d['2'] = np.array([1.0], dtype=np.float32)
data['batch_' + lay] = d
saved.append('batch_norm({}) saved'.format(lay))
dd.io.save(path, dict(data=data), compression=None)
if verbose:
tprint('Saved model to', path)
for l in saved:
tprint('-', l)
return saved
| {
"content_hash": "3ab3f52d0cd752cb2d975fec29bbccff",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 112,
"avg_line_length": 37.544973544973544,
"alnum_prop": 0.5022547914317925,
"repo_name": "gustavla/self-supervision",
"id": "6de438d9a6150c8592a64f95df5b14db5bf0ab59",
"size": "7096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selfsup/caffe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "402485"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import namedtuple
from textwrap import dedent
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_compile import ZincCompile
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir, safe_open
from pants_test.backend.jvm.tasks.jvm_compile.base_compile_integration_test import BaseCompileIT
class Compile(namedtuple('Compile', ['srcfiles', 'config', 'artifact_count'])):
pass
class CacheCompileIntegrationTest(BaseCompileIT):
def run_compile(self, target_spec, config, workdir):
args = ['compile', target_spec]
pants_run = self.run_pants_with_workdir(args, workdir, config)
self.assert_success(pants_run)
def create_file(self, path, value):
with safe_open(path, 'w') as f:
f.write(value)
def test_stale_artifacts_rmd_when_cache_used_with_zinc(self):
with temporary_dir() as cache_dir, \
self.temporary_workdir() as workdir, \
temporary_dir(root_dir=get_buildroot()) as src_dir:
config = {
'cache.compile.zinc': {'write_to': [cache_dir], 'read_from': [cache_dir]},
'compile.zinc': {'incremental_caching': True },
}
srcfile = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'A.java')
buildfile = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'BUILD')
self.create_file(srcfile,
dedent("""package org.pantsbuild.cachetest;
class A {}
class Main {}"""))
self.create_file(buildfile,
dedent("""java_library(name='cachetest',
sources=['A.java']
)"""))
cachetest_spec = os.path.join(os.path.basename(src_dir), 'org', 'pantsbuild',
'cachetest:cachetest')
# Caches values A.class, Main.class
self.run_compile(cachetest_spec, config, workdir)
self.create_file(srcfile,
dedent("""package org.pantsbuild.cachetest;
class A {}
class NotMain {}"""))
# Caches values A.class, NotMain.class and leaves them on the filesystem
self.run_compile(cachetest_spec, config, workdir)
self.create_file(srcfile,
dedent("""package org.pantsbuild.cachetest;
class A {}
class Main {}"""))
# Should cause NotMain.class to be removed
self.run_compile(cachetest_spec, config, workdir)
root = os.path.join(workdir, 'compile', 'zinc')
# One target.
self.assertEqual(len(os.listdir(root)), 1)
target_workdir_root = os.path.join(root, os.listdir(root)[0])
target_workdirs = os.listdir(target_workdir_root)
# Two workdirs.
self.assertEqual(len(target_workdirs), 2)
def classfiles(d):
cd = os.path.join(target_workdir_root, d, 'classes', 'org', 'pantsbuild', 'cachetest')
return sorted(os.listdir(cd))
# One workdir should contain NotMain, and the other should contain Main.
self.assertEquals(sorted(classfiles(w) for w in target_workdirs),
sorted([['A.class', 'Main.class'], ['A.class', 'NotMain.class']]))
def test_incremental_caching(self):
"""Tests that with --no-incremental-caching, we don't write incremental artifacts."""
srcfile = 'A.java'
def config(incremental_caching):
return { 'compile.zinc': {'incremental_caching': incremental_caching} }
self._do_test_caching(
Compile({srcfile: "class A {}"}, config(False), 1),
Compile({srcfile: "final class A {}"}, config(False), 1),
Compile({srcfile: "public final class A {}"}, config(True), 2),
)
def test_incremental(self):
"""Tests that with --no-incremental and --no-incremental-caching, we always write artifacts."""
srcfile = 'A.java'
config = {'compile.zinc': {'incremental': False, 'incremental_caching': False}}
self._do_test_caching(
Compile({srcfile: "class A {}"}, config, 1),
Compile({srcfile: "final class A {}"}, config, 2),
Compile({srcfile: "public final class A {}"}, config, 3),
)
def _do_test_caching(self, *compiles):
"""Tests that the given compiles within the same workspace produce the given artifact counts."""
with temporary_dir() as cache_dir, \
self.temporary_workdir() as workdir, \
temporary_dir(root_dir=get_buildroot()) as src_dir:
def complete_config(config):
# Clone the input config and add cache settings.
cache_settings = {'write_to': [cache_dir], 'read_from': [cache_dir]}
return dict(config.items() + [('cache.compile.zinc', cache_settings)])
buildfile = os.path.join(src_dir, 'BUILD')
spec = os.path.join(src_dir, ':cachetest')
artifact_dir = os.path.join(cache_dir,
ZincCompile.stable_name(),
'{}.cachetest'.format(os.path.basename(src_dir)))
for c in compiles:
# Clear the src directory and recreate the files.
safe_mkdir(src_dir, clean=True)
self.create_file(buildfile,
"""java_library(name='cachetest', sources=rglobs('*.java', '*.scala'))""")
for name, content in c.srcfiles.items():
self.create_file(os.path.join(src_dir, name), content)
# Compile, and confirm that we have the right count of artifacts.
self.run_compile(spec, complete_config(c.config), workdir)
self.assertEquals(c.artifact_count, len(os.listdir(artifact_dir)))
| {
"content_hash": "60d55140ffba5b4a2be3620fa0aaf64a",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 100,
"avg_line_length": 41.48936170212766,
"alnum_prop": 0.6095726495726496,
"repo_name": "dturner-tw/pants",
"id": "58d0d41a434df8fd001963606a50578cca4f4b05",
"size": "5997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/tasks/jvm_compile/java/test_cache_compile_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11538"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1849"
},
{
"name": "HTML",
"bytes": "70358"
},
{
"name": "Java",
"bytes": "293253"
},
{
"name": "JavaScript",
"bytes": "31042"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4404984"
},
{
"name": "Scala",
"bytes": "85217"
},
{
"name": "Shell",
"bytes": "50774"
},
{
"name": "Thrift",
"bytes": "2919"
}
],
"symlink_target": ""
} |
import re
import subprocess
def test_create_product():
output = str(
subprocess.check_output("python import_user_events_inline.py", shell=True)
)
assert re.match(
'.*import user events from inline source request.*?parent: "projects/.*?/locations/global/catalogs/default_catalog.*',
output,
)
assert re.match(
".*import user events from inline source request.*?input_config.*?user_event_inline_source.*",
output,
)
assert re.match(
".*the operation was started.*?projects/.*?/locations/global/catalogs/default_catalog/operations/import-user-events.*",
output,
)
assert re.match(".*import user events operation is done.*", output)
assert re.match(".*number of successfully imported events.*?3.*", output)
assert re.match(".*number of failures during the importing.*?0.*", output)
| {
"content_hash": "363476d62349598da3ce95f9ab1fa6df",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 127,
"avg_line_length": 36.708333333333336,
"alnum_prop": 0.6606129398410897,
"repo_name": "googleapis/python-retail",
"id": "79546b4038a0189a4b01d776e87766c44acc4791",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/interactive-tutorials/events/import_user_events_inline_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views, models
from Grundgeruest.views import ListeMitMenue
app_name = 'Veranstaltungen'
urlpatterns = [
url(r'^$',
ListeMitMenue.as_view(
template_name='Bibliothek/liste_alle.html',
model=models.Buch,
context_object_name='buecher',
),
name='liste_alle'),
url('^aus_datei_einlesen/([\w-]*)$', views.aus_datei_einlesen, name='einlesen'),
url('^aus_datei_einlesen$', views.aus_datei_einlesen, name='einlesen'),
]
| {
"content_hash": "240f7fca1e509f297b9216f64363ed2e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 84,
"avg_line_length": 29.36842105263158,
"alnum_prop": 0.6021505376344086,
"repo_name": "wmles/scholarium",
"id": "67e1400e5b84eb0b4d14846e8b99f1f9817654b6",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Bibliothek/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "107176"
},
{
"name": "HTML",
"bytes": "33687"
},
{
"name": "JavaScript",
"bytes": "702"
},
{
"name": "Python",
"bytes": "64048"
}
],
"symlink_target": ""
} |
import unittest
import mock
import tempfile
from test import safe_repr
from test.unit import MockTrue
from swift.common.swob import HTTPBadRequest, Request, HTTPException
from swift.common.http import HTTP_REQUEST_ENTITY_TOO_LARGE, \
HTTP_BAD_REQUEST, HTTP_LENGTH_REQUIRED
from swift.common import constraints
class TestConstraints(unittest.TestCase):
def assertIn(self, member, container, msg=None):
"""Copied from 2.7"""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def test_check_metadata_empty(self):
headers = {}
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
def test_check_metadata_good(self):
headers = {'X-Object-Meta-Name': 'Value'}
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
def test_check_metadata_empty_name(self):
headers = {'X-Object-Meta-': 'Value'}
self.assert_(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), HTTPBadRequest)
def test_check_metadata_name_length(self):
name = 'a' * constraints.MAX_META_NAME_LENGTH
headers = {'X-Object-Meta-%s' % name: 'v'}
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
name = 'a' * (constraints.MAX_META_NAME_LENGTH + 1)
headers = {'X-Object-Meta-%s' % name: 'v'}
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
self.assertIn(
('X-Object-Meta-%s' % name).lower(),
constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').body.lower())
def test_check_metadata_value_length(self):
value = 'a' * constraints.MAX_META_VALUE_LENGTH
headers = {'X-Object-Meta-Name': value}
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
value = 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)
headers = {'X-Object-Meta-Name': value}
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
self.assertIn(
'x-object-meta-name',
constraints.check_metadata(Request.blank(
'/', headers=headers),
'object').body.lower())
self.assertIn(
str(constraints.MAX_META_VALUE_LENGTH),
constraints.check_metadata(Request.blank(
'/', headers=headers),
'object').body)
def test_check_metadata_count(self):
headers = {}
for x in xrange(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
headers['X-Object-Meta-Too-Many'] = 'v'
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
def test_check_metadata_size(self):
headers = {}
size = 0
chunk = constraints.MAX_META_NAME_LENGTH + \
constraints.MAX_META_VALUE_LENGTH
x = 0
while size + chunk < constraints.MAX_META_OVERALL_SIZE:
headers['X-Object-Meta-%04d%s' %
(x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
size += chunk
x += 1
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'), None)
# add two more headers in case adding just one falls exactly on the
# limit (eg one header adds 1024 and the limit is 2048)
headers['X-Object-Meta-%04d%s' %
(x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d%s' %
(x + 1, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
self.assertEquals(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
def test_check_object_creation_content_length(self):
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE),
'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'), None)
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name').status_int,
HTTP_REQUEST_ENTITY_TOO_LARGE)
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'), None)
headers = {'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name').status_int,
HTTP_LENGTH_REQUIRED)
def test_check_object_creation_name_length(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'}
name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
self.assertEquals(constraints.check_object_creation(Request.blank(
'/', headers=headers), name), None)
name = 'o' * (constraints.MAX_OBJECT_NAME_LENGTH + 1)
self.assertEquals(constraints.check_object_creation(
Request.blank('/', headers=headers), name).status_int,
HTTP_BAD_REQUEST)
def test_check_object_creation_content_type(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'), None)
headers = {'Transfer-Encoding': 'chunked'}
self.assertEquals(constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name').status_int,
HTTP_BAD_REQUEST)
def test_check_object_creation_bad_content_type(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': '\xff\xff'}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
self.assert_('Content-Type' in resp.body)
def test_check_mount(self):
self.assertFalse(constraints.check_mount('', ''))
with mock.patch("swift.common.constraints.ismount", MockTrue()):
self.assertTrue(constraints.check_mount('/srv', '1'))
self.assertTrue(constraints.check_mount('/srv', 'foo-bar'))
self.assertTrue(constraints.check_mount(
'/srv', '003ed03c-242a-4b2f-bee9-395f801d1699'))
self.assertFalse(constraints.check_mount('/srv', 'foo bar'))
self.assertFalse(constraints.check_mount('/srv', 'foo/bar'))
self.assertFalse(constraints.check_mount('/srv', 'foo?bar'))
def test_check_float(self):
self.assertFalse(constraints.check_float(''))
self.assertTrue(constraints.check_float('0'))
def test_check_utf8(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
unicode_with_null = u'abc\u0000def'
utf8_with_null = unicode_with_null.encode('utf-8')
for false_argument in [None,
'',
invalid_utf8_str,
unicode_with_null,
utf8_with_null]:
self.assertFalse(constraints.check_utf8(false_argument))
for true_argument in ['this is ascii and utf-8, too',
unicode_sample,
valid_utf8_str]:
self.assertTrue(constraints.check_utf8(true_argument))
def test_validate_bad_meta(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-object-meta-hello':
'ab' * constraints.MAX_HEADER_SIZE})
self.assertEquals(constraints.check_metadata(req, 'object').status_int,
HTTP_BAD_REQUEST)
self.assertIn('x-object-meta-hello', constraints.check_metadata(req,
'object').body.lower())
def test_validate_constraints(self):
c = constraints
self.assertTrue(c.MAX_META_OVERALL_SIZE > c.MAX_META_NAME_LENGTH)
self.assertTrue(c.MAX_META_OVERALL_SIZE > c.MAX_META_VALUE_LENGTH)
self.assertTrue(c.MAX_HEADER_SIZE > c.MAX_META_NAME_LENGTH)
self.assertTrue(c.MAX_HEADER_SIZE > c.MAX_META_VALUE_LENGTH)
def test_validate_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/o2'})
src_cont, src_obj = constraints.check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/subdir/o2'})
src_cont, src_obj = constraints.check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'subdir/o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': '/c/o2'})
src_cont, src_obj = constraints.check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
def test_validate_bad_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'bad_object'})
self.assertRaises(HTTPException,
constraints.check_copy_from_header, req)
class TestConstraintsConfig(unittest.TestCase):
def test_default_constraints(self):
for key in constraints.DEFAULT_CONSTRAINTS:
# if there is local over-rides in swift.conf we just continue on
if key in constraints.OVERRIDE_CONSTRAINTS:
continue
# module level attrs (that aren't in OVERRIDE) should have the
# same value as the DEFAULT map
module_level_value = getattr(constraints, key.upper())
self.assertEquals(constraints.DEFAULT_CONSTRAINTS[key],
module_level_value)
def test_effective_constraints(self):
for key in constraints.DEFAULT_CONSTRAINTS:
# module level attrs should always mirror the same value as the
# EFFECTIVE map
module_level_value = getattr(constraints, key.upper())
self.assertEquals(constraints.EFFECTIVE_CONSTRAINTS[key],
module_level_value)
# if there are local over-rides in swift.conf those should be
# reflected in the EFFECTIVE, otherwise we expect the DEFAULTs
self.assertEquals(constraints.EFFECTIVE_CONSTRAINTS[key],
constraints.OVERRIDE_CONSTRAINTS.get(
key, constraints.DEFAULT_CONSTRAINTS[key]))
def test_override_constraints(self):
try:
with tempfile.NamedTemporaryFile() as f:
f.write('[swift-constraints]\n')
# set everything to 1
for key in constraints.DEFAULT_CONSTRAINTS:
f.write('%s = 1\n' % key)
f.flush()
with mock.patch.object(constraints, 'SWIFT_CONF_FILE',
f.name):
constraints.reload_constraints()
for key in constraints.DEFAULT_CONSTRAINTS:
# module level attrs should all be 1
module_level_value = getattr(constraints, key.upper())
self.assertEquals(module_level_value, 1)
# all keys should be in OVERRIDE
self.assertEquals(constraints.OVERRIDE_CONSTRAINTS[key],
module_level_value)
# module level attrs should always mirror the same value as
# the EFFECTIVE map
self.assertEquals(constraints.EFFECTIVE_CONSTRAINTS[key],
module_level_value)
finally:
constraints.reload_constraints()
def test_reload_reset(self):
try:
with tempfile.NamedTemporaryFile() as f:
f.write('[swift-constraints]\n')
# set everything to 1
for key in constraints.DEFAULT_CONSTRAINTS:
f.write('%s = 1\n' % key)
f.flush()
with mock.patch.object(constraints, 'SWIFT_CONF_FILE',
f.name):
constraints.reload_constraints()
self.assertTrue(constraints.SWIFT_CONSTRAINTS_LOADED)
self.assertEquals(sorted(constraints.DEFAULT_CONSTRAINTS.keys()),
sorted(constraints.OVERRIDE_CONSTRAINTS.keys()))
# file is now deleted...
with mock.patch.object(constraints, 'SWIFT_CONF_FILE',
f.name):
constraints.reload_constraints()
# no constraints have been loaded from non-existant swift.conf
self.assertFalse(constraints.SWIFT_CONSTRAINTS_LOADED)
# no constraints are in OVERRIDE
self.assertEquals([], constraints.OVERRIDE_CONSTRAINTS.keys())
# the EFFECTIVE constraints mirror DEFAULT
self.assertEquals(constraints.EFFECTIVE_CONSTRAINTS,
constraints.DEFAULT_CONSTRAINTS)
finally:
constraints.reload_constraints()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8a387c9892173a1f8c9fcd08e214634e",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 79,
"avg_line_length": 46.30793650793651,
"alnum_prop": 0.581408103105505,
"repo_name": "NeCTAR-RC/swift",
"id": "9abf153db6a4b38220fe9760fdf31141b8d6ed2d",
"size": "15182",
"binary": false,
"copies": "1",
"ref": "refs/heads/nectar/icehouse",
"path": "test/unit/common/test_constraints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15048"
},
{
"name": "Python",
"bytes": "3512822"
},
{
"name": "Shell",
"bytes": "902"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.expanduser('~/code/sphinx_numfig-r13'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'numfig',
]
# Numfig:
numfig_number_figures = True
numfig_figure_caption_prefix = "Figure"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'gcmstools'
copyright = '2015, Ryan Nelson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_templates',]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'gcmstools Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'gcmstools'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_static/css']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gcmstoolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'gcmstools.tex', 'gcmstools Documentation',
'Ryan Nelson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gcmstools', 'gcmstools Documentation',
['Ryan Nelson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'gcmstools', 'gcmstools Documentation',
'Ryan Nelson', 'gcmstools', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "5d2d7487de1f71ecd4847aab6a5faa5f",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 79,
"avg_line_length": 31.486381322957197,
"alnum_prop": 0.7040286702916461,
"repo_name": "rnelsonchem/gcmstools",
"id": "3f96e9815625b53b85aef7345812ae36c5e9824b",
"size": "8537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "44048"
}
],
"symlink_target": ""
} |
"""SPC Outlook JSON service."""
import datetime
import json
from pymemcache.client import Client
import pytz
import pandas as pd
from pandas.io.sql import read_sql
from paste.request import parse_formvars
from pyiem.nws.products.spcpts import THRESHOLD_ORDER
from pyiem.util import get_dbconn, get_sqlalchemy_conn, html_escape
ISO9660 = "%Y-%m-%dT%H:%MZ"
def get_order(threshold):
"""Lookup a threshold and get its rank, higher is more extreme"""
if threshold not in THRESHOLD_ORDER:
return -1
return THRESHOLD_ORDER.index(threshold)
def get_dbcursor():
"""Do as I say"""
postgis = get_dbconn("postgis")
return postgis.cursor()
def dotime(time, lon, lat, day, cat):
"""Query for Outlook based on some timestamp"""
if time in ["", "current", "now"]:
ts = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
if day > 1:
ts += datetime.timedelta(days=(day - 1))
else:
# ISO formatting
ts = datetime.datetime.strptime(time, "%Y-%m-%dT%H:%MZ")
ts = ts.replace(tzinfo=pytz.utc)
with get_sqlalchemy_conn("postgis") as conn:
df = read_sql(
"""
SELECT issue at time zone 'UTC' as i,
expire at time zone 'UTC' as e,
product_issue at time zone 'UTC' as v,
threshold, category from spc_outlooks where
product_issue = (
select product_issue from spc_outlook where
issue <= %s and expire > %s and day = %s
and outlook_type = 'C' ORDER by product_issue DESC LIMIT 1)
and ST_Contains(geom, ST_GeomFromEWKT('SRID=4326;POINT(%s %s)'))
and day = %s and outlook_type = 'C' and category = %s
""",
conn,
params=(ts, ts, day, lon, lat, day, cat),
index_col=None,
)
res = {
"generation_time": datetime.datetime.utcnow().strftime(ISO9660),
"query_params": {
"time": ts.strftime(ISO9660),
"lon": lon,
"lat": lat,
"cat": cat,
"day": day,
},
"outlook": {},
}
if df.empty:
return json.dumps(res)
df["threshold_rank"] = df["threshold"].apply(get_order)
df = df.sort_values("threshold_rank", ascending=False)
res["outlook"] = {
"threshold": df.iloc[0]["threshold"],
"utc_product_issue": pd.Timestamp(df.iloc[0]["v"]).strftime(ISO9660),
"utc_issue": pd.Timestamp(df.iloc[0]["i"]).strftime(ISO9660),
"utc_expire": pd.Timestamp(df.iloc[0]["e"]).strftime(ISO9660),
}
return json.dumps(res)
def dowork(lon, lat, last, day, cat):
"""Actually do stuff"""
cursor = get_dbcursor()
res = dict(outlooks=[])
# Need to compute SIGN seperately
cursor.execute(
"""
WITH data as (
SELECT issue at time zone 'UTC' as i,
expire at time zone 'UTC' as e,
product_issue at time zone 'UTC' as v,
o.threshold, category, t.priority,
row_number() OVER (PARTITION by expire
ORDER by priority DESC NULLS last, issue ASC) as rank
from spc_outlooks o, spc_outlook_thresholds t
where o.threshold = t.threshold and
ST_Contains(geom, ST_GeomFromEWKT('SRID=4326;POINT(%s %s)'))
and day = %s and outlook_type = 'C' and category = %s
and o.threshold not in ('TSTM', 'SIGN') ORDER by issue DESC),
agg as (
select i, e, v, threshold, category from data where rank = 1),
sign as (
SELECT issue at time zone 'UTC' as i,
expire at time zone 'UTC' as e,
product_issue at time zone 'UTC' as v,
threshold, category from spc_outlooks
where ST_Contains(geom, ST_GeomFromEWKT('SRID=4326;POINT(%s %s)'))
and day = %s and outlook_type = 'C' and category = %s
and threshold = 'SIGN' ORDER by expire DESC, issue ASC LIMIT 1)
(SELECT i, e, v, threshold, category from agg
ORDER by e DESC, threshold desc) UNION ALL
(SELECT i, e, v, threshold, category from sign
ORDER by e DESC, threshold desc)
""",
(lon, lat, day, cat, lon, lat, day, cat),
)
running = {}
for row in cursor:
if last > 0:
running.setdefault(row[3], 0)
running[row[3]] += 1
if running[row[3]] > last:
continue
res["outlooks"].append(
dict(
day=day,
utc_issue=row[0].strftime("%Y-%m-%dT%H:%M:%SZ"),
utc_expire=row[1].strftime("%Y-%m-%dT%H:%M:%SZ"),
utc_product_issue=row[2].strftime("%Y-%m-%dT%H:%M:%SZ"),
threshold=row[3],
category=row[4],
)
)
return json.dumps(res)
def application(environ, start_response):
"""Answer request."""
fields = parse_formvars(environ)
lat = float(fields.get("lat", 42.0))
lon = float(fields.get("lon", -95.0))
time = fields.get("time")
last = int(fields.get("last", 0))
day = int(fields.get("day", 1))
cat = fields.get("cat", "categorical").upper()
cb = fields.get("callback")
mckey = f"/json/spcoutlook/{lon:.4f}/{lat:.4f}/{last}/{day}/{cat}/{time}"
mc = Client("iem-memcached:11211")
res = mc.get(mckey)
if not res:
if time is not None:
res = dotime(time, lon, lat, day, cat)
else:
res = dowork(lon, lat, last, day, cat)
mc.set(mckey, res, 3600)
else:
res = res.decode("utf-8")
mc.close()
if cb is not None:
res = f"{html_escape(cb)}({res})"
headers = [("Content-type", "application/json")]
start_response("200 OK", headers)
return [res.encode("utf-8")]
| {
"content_hash": "2b5c2a15cb27f380204729d4ecfa8848",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 77,
"avg_line_length": 33.292397660818715,
"alnum_prop": 0.5678903917091165,
"repo_name": "akrherz/iem",
"id": "5b561a3f1a0563e5968c9209b25906532d945fde",
"size": "5693",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "htdocs/json/spcoutlook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16912"
},
{
"name": "HTML",
"bytes": "1092923"
},
{
"name": "Hack",
"bytes": "7078"
},
{
"name": "JavaScript",
"bytes": "244253"
},
{
"name": "PHP",
"bytes": "3492474"
},
{
"name": "Python",
"bytes": "3279270"
},
{
"name": "Rich Text Format",
"bytes": "30075"
},
{
"name": "Shell",
"bytes": "72284"
}
],
"symlink_target": ""
} |
from ics_demo.dao.interfaces import base
from ics_demo.dao.orm.demo import Carrot
from ics_demo.helpers import uuidgen
def get_all():
return base.class_get_all_to_dict(Carrot)
def get_one(uuid):
return base.class_get_one_by_uuid_to_dict(Carrot, uuid)
def get_obj(uuid):
return base.class_get_one_by_uuid_to_obj(Carrot, uuid)
def get_obj_by_ID(ID):
return base.class_get_one_by_ID_to_obj(Carrot, ID)
def get_keys():
return base.class_get_keys(Carrot)
def save(post_dict):
name = post_dict['name']
rabbit = post_dict['rabbit']
return Carrot(uuid=uuidgen(), name=name, rabbit=rabbit)
| {
"content_hash": "1109a6daa3bbc1fa1d354a0df56d9aef",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 59,
"avg_line_length": 25.833333333333332,
"alnum_prop": 0.7032258064516129,
"repo_name": "lielongxingkong/ics_demo",
"id": "1db4747531680bc6dafe8feaca0ac9d5ddf6e24a",
"size": "620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ics_demo/dao/interfaces/demo/carrot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1298"
},
{
"name": "Python",
"bytes": "24229"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from prettytable import PrettyTable
import FirstYear
import Range
import URL
import MySQLdb
Details=defaultdict(lambda:defaultdict(lambda:'-'))
Branch_Switchers=defaultdict(lambda:defaultdict(lambda:'-'))
inputdata=open('InputFile.txt','r')
t=int(inputdata.readline().strip())
print "Number of Operations: "+str(t)+"\n"
db = MySQLdb.connect("localhost","root","","FIRST_YEAR")
if (db):
print "Connection successful"
else:
print "Connection Unsuccessful"
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS DETAILED,PRECISE,BRANCH_CHANGERS,GRADE_STATUS,UFM")
sql = """CREATE TABLE DETAILED(
ROLL INT(10),
NAME CHAR(100),
FATHER_NAME CHAR(100),
SEM1 INT(10),
SEM2 INT(10),
PENDING1 CHAR(100),
STATUS1 CHAR(100),
SEM1_CO INT(10),
SEM2_CO INT(10),
PENDING1_CO CHAR(100),
STATUS1_CO CHAR(100),
SEM3 INT(10),
SEM4 INT(10),
PENDING2 CHAR(100),
STATUS2 CHAR(100),
SEM3_CO INT(10),
SEM4_CO INT(10),
PENDING2_CO CHAR(100),
STATUS2_CO CHAR(100),
SEM5 INT(10),
SEM6 INT(10),
PENDING3 CHAR(100),
STATUS3 CHAR(100)
)"""
cursor.execute(sql)
sql = """CREATE TABLE GRADE_STATUS(
ROLL INT(10),
NAME CHAR(100),
STATUS1 CHAR(100),
STATUS1_CO CHAR(100),
STATUS2 CHAR(100),
STATUS2_CO CHAR(100),
STATUS3 CHAR(100)
)"""
cursor.execute(sql)
sql = """CREATE TABLE BRANCH_CHANGERS(
ROLL INT(10),
NAME CHAR(100),
FROM_BRANCH CHAR(100),
TO_BRANCH CHAR(100)
)"""
cursor.execute(sql)
sql="""CREATE TABLE UFM(
SEM CHAR(100),
ROLLS CHAR(100)
)"""
cursor.execute(sql)
for i in range(t):
branch=inputdata.readline().strip()
year=int(inputdata.readline())
if year==1:
FirstYear.execute(Details,branch,Branch_Switchers,UFM)
Detailed=PrettyTable(["ROLL","NAME","FATHER_NAME","SEM1","SEM2","PENDING1","STATUS1","SEM1_CO","SEM2_CO","PENDING1_CO","STATUS1_CO","SEM3","SEM4",
"PENDING2","STATUS2","SEM3_CO","SEM4_CO","PENDING2_CO","STATUS2_CO","SEM5","SEM6","PENDING3","STATUS3"])
bswitch=PrettyTable(["ROLL","NAME","FROM BRANCH","TO BRANCH"])
grade_status=PrettyTable(["ROLL","NAME","STATUS1","STATUS1_CO","STATUS2","STATUS2_CO","STATUS3"])
ufm_table=PrettyTable(["SEMESTER","ROLL NUMBERS"])
li=Details.keys()
li.sort()
for ROLL in li:
Detailed.add_row([Details[ROLL]["ROLL"],Details[ROLL]['NAME'],Details[ROLL]["FATHER's NAME"],Details[ROLL]["SEM1"],Details[ROLL]["SEM2"],Details[ROLL]["PENDING1"],
Details[ROLL]["STATUS1"],Details[ROLL]["SEM1_CO"],Details[ROLL]["SEM2_CO"],Details[ROLL]["PENDING1_CO"],Details[ROLL]["STATUS1_CO"],Details[ROLL]["SEM3"],
Details[ROLL]["SEM4"],Details[ROLL]["PENDING2"],Details[ROLL]["STATUS2"],Details[ROLL]["SEM3_CO"],Details[ROLL]["SEM4_CO"],Details[ROLL]["PENDING2_CO"],
Details[ROLL]["STATUS2_CO"],Details[ROLL]["SEM5"],Details[ROLL]["SEM6"],Details[ROLL]["PENDING3"],Details[ROLL]["STATUS3"]])
cursor.execute("""INSERT INTO DETAILED(ROLL,NAME,FATHER_NAME,SEM1,SEM2,PENDING1,STATUS1,SEM1_CO,SEM2_CO,PENDING1_CO,STATUS1_CO,SEM3,SEM4,PENDING2,STATUS2,
SEM3_CO,SEM4_CO,PENDING2_CO,STATUS2_CO,SEM5,SEM6,PENDING3,STATUS3)
VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
,(Details[ROLL]["ROLL"],Details[ROLL]['NAME'],Details[ROLL]["FATHER's NAME"],Details[ROLL]["SEM1"],Details[ROLL]["SEM2"],Details[ROLL]["PENDING1"],
Details[ROLL]["STATUS1"],Details[ROLL]["SEM1_CO"],Details[ROLL]["SEM2_CO"],Details[ROLL]["PENDING1_CO"],Details[ROLL]["STATUS1_CO"],Details[ROLL]["SEM3"],
Details[ROLL]["SEM4"],Details[ROLL]["PENDING2"],Details[ROLL]["STATUS2"],Details[ROLL]["SEM3_CO"],Details[ROLL]["SEM4_CO"],Details[ROLL]["PENDING2_CO"],
Details[ROLL]["STATUS2_CO"],Details[ROLL]["SEM5"],Details[ROLL]["SEM6"],Details[ROLL]["PENDING3"],Details[ROLL]["STATUS3"]))
grade_status.add_row([Details[ROLL]["ROLL"],Details[ROLL]["NAME"],Details[ROLL]["STATUS1"],Details[ROLL]["STATUS1_CO"],
Details[ROLL]["STATUS2"],Details[ROLL]["STATUS2_CO"],Details[ROLL]["STATUS3"]])
cursor.execute("""INSERT INTO GRADE_STATUS(ROLL,NAME,STATUS1,STATUS1_CO,STATUS2,STATUS2_CO,STATUS3)
VALUES(%s,%s,%s,%s,%s,%s,%s)"""
,(Details[ROLL]["ROLL"],Details[ROLL]["NAME"],Details[ROLL]["STATUS1"],Details[ROLL]["STATUS1_CO"],
Details[ROLL]["STATUS2"],Details[ROLL]["STATUS2_CO"],Details[ROLL]["STATUS3"]))
for ROLL in Branch_Switchers.keys():
bswitch.add_row([Branch_Switchers[ROLL]["ROLL"],Branch_Switchers[ROLL]["NAME"],Branch_Switchers[ROLL]["FROM_BRANCH"],Branch_Switchers[ROLL]["TO_BRANCH"]])
cursor.execute("""INSERT INTO BRANCH_CHANGERS(ROLL,NAME,FROM_BRANCH,TO_BRANCH)
VALUES(%s,%s,%s,%s)"""
,(Branch_Switchers[ROLL]["ROLL"],Branch_Switchers[ROLL]["NAME"],Branch_Switchers[ROLL]["FROM_BRANCH"],Branch_Switchers[ROLL]["TO_BRANCH"]))
print "==================================================================================DETAILED DATABASE==================================================================================================="
print "\n"
print Detailed
print "\n====================================================CARRY OVER DETAILS DATABASE========================================================\n"
print Detailed.get_string(fields=["ROLL", "NAME","PENDING1","STATUS1","PENDING1_CO","STATUS1_CO","PENDING2","STATUS2","PENDING2_CO","STATUS2_CO","PENDING3","STATUS3"])
print "\n========================BRANCH CHANGERS=======================\n"
print bswitch
sql = """CREATE TABLE PRECISE AS
SELECT ROLL,NAME,FATHER_NAME,(SEM1+SEM2) AS FIRST_YEAR,(SEM3+SEM4) AS SECOND_YEAR,(SEM5+SEM6) AS THIRD_YEAR
FROM DETAILED"""
cursor.execute(sql)
ufm_table.add_row(["ODD SEMESTER 2015-16",UFM["ODD SEMESTER 2015-16"]])
cursor.execute("""INSERT INTO UFM(SEM,ROLLS)
VALUES(%s,%s)"""
,("ODD SEMESTER 2015-16",UFM["ODD SEMESTER 2015-16"]))
db.commit()
raw_input("press enter to exit ;)")
| {
"content_hash": "8ba0f81e43d80dde2fa8b0d2542d6868",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 206,
"avg_line_length": 41.40251572327044,
"alnum_prop": 0.5726872246696035,
"repo_name": "atulac/KNIT-Training-And-Placement-Cell-Automation",
"id": "a0e9c3cc27ad42221c8eb5cfcd227366ad0727b0",
"size": "6583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DataGenerator/1stYear/Initiator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143810"
}
],
"symlink_target": ""
} |
from marshmallow_jsonapi import Schema, fields
from marshmallow import validate
from app.basemodels import db, CRUD_MixIn
from datetime import datetime
class Users(db.Model, CRUD_MixIn):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(250), nullable=False,unique=True)
password = db.Column(db.String(250), nullable=False)
name = db.Column(db.String(250), nullable=True, unique=True)
createddate = db.Column(db.DateTime, nullable=True, default=datetime.utcnow)
createdby = db.Column(db.String(250), nullable=False)
updateddate = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updatedby = db.Column(db.String(250), nullable=False)
def __init__(self, email, password, name, createdby, updatedby, ):
self.email = email
self.password = password
self.name = name
self.createdby = createdby
self.updatedby = updatedby
class UsersSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
# add validate=not_blank in required fields
id = fields.Integer(dump_only=True)
email = fields.String(validate=not_blank)
password = fields.String(validate=not_blank, load_only=True)
name = fields.String(validate=not_blank)
createddate = fields.Date(dump_only=True)
createdby = fields.String(validate=not_blank)
updateddate = fields.Date(dump_only=True)
updatedby = fields.String(validate=not_blank)
# self links
def get_top_level_links(self, data, many):
if many:
self_link = "/users/"
else:
self_link = "/users/{}".format(data['id'])
return {'self': self_link}
class Meta:
type_ = 'users'
| {
"content_hash": "e8dfb5582174f783bca2ba75bd3d67e3",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 81,
"avg_line_length": 33.61538461538461,
"alnum_prop": 0.6710526315789473,
"repo_name": "Leo-g/Flask-Scaffold",
"id": "42b80f93aa36d6ae2f838158ea1728492dd33122",
"size": "1748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/users/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15274"
},
{
"name": "HTML",
"bytes": "14505"
},
{
"name": "JavaScript",
"bytes": "49098"
},
{
"name": "Nginx",
"bytes": "1545"
},
{
"name": "Python",
"bytes": "62750"
},
{
"name": "Shell",
"bytes": "21"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['HPUXFCompiler']
class HPUXFCompiler(FCompiler):
compiler_type = 'hpux'
description = 'HP Fortran 90 Compiler'
version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
executables = {
'version_cmd': ["f90", "+version"],
'compiler_f77': ["f90"],
'compiler_fix': ["f90"],
'compiler_f90': ["f90"],
'linker_so': ["ld", "-b"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
module_dir_switch = None # XXX: fix me
module_include_switch = None # XXX: fix me
pic_flags = ['+Z']
def get_flags(self):
return self.pic_flags + ['+ppu', '+DD64']
def get_flags_opt(self):
return ['-O3']
def get_libraries(self):
return ['m']
def get_library_dirs(self):
opt = ['/usr/lib/hpux64']
return opt
def get_version(self, force=0, ok_status=[256, 0, 1]):
# XXX status==256 may indicate 'unrecognized option' or
# 'no input file'. So, version_cmd needs more work.
return FCompiler.get_version(self, force, ok_status)
if __name__ == '__main__':
from distutils import log
log.set_verbosity(10)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='hpux')
compiler.customize()
print(compiler.get_version())
| {
"content_hash": "f30fd0e21e49281841d3a5b2c0aeb973",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 64,
"avg_line_length": 27.358490566037737,
"alnum_prop": 0.5875862068965517,
"repo_name": "DailyActie/Surrogate-Model",
"id": "32ffa8e662bcf790413a1884d29ad8f07ea4c745",
"size": "1450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/numpy-master/numpy/distutils/fcompiler/hpux.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
import profile
class d_profile(profile):
def __init__(self):
# super(profile.__init__(self))
self.name = "default"
def get(self):
# self.name = "default"
return self | {
"content_hash": "8394e9141e9edf1f09c8c99a7ce26cd7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 39,
"avg_line_length": 20.7,
"alnum_prop": 0.5507246376811594,
"repo_name": "Ganben/parrot",
"id": "2c0c846a83c2513a19555e23884b067a5e61ccc6",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrrot/chatserver/models/d_profile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35293"
}
],
"symlink_target": ""
} |
from __future__ import print_function # (at top of module)
import time
import sys
import os
import pandas as pd
import numpy as np
import pickle
PROJ_ROOT = os.path.join(os.getcwd(), os.pardir, os.pardir)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.options.display.float_format = '{:,.3f}'.format
# load environment variables from .env file using dotenv.
from dotenv import load_dotenv
dotenv_path = os.path.join(PROJ_ROOT, '.env')
load_dotenv(dotenv_path)
# add the 'src' directory as one where we can import modules
src_dir = os.path.join(PROJ_ROOT, 'src')
sys.path.append(src_dir)
from data.spotipy_functions import *
from models.train_model import *
def define_seed():
print("Enter between 1 and 5 seed track spotify IDs")
init_seed = []
for i in range(5):
inp = input("Enter track seed (ENTER for none): ")
if inp == "":
break
else:
init_seed.append(inp)
return init_seed
#MAIN
init_seed = define_seed()
print("Generating recommendations...")
training_set = define_training_set(init_seed)
suggest_set = band_BPMs(get_new_recs_and_feats(init_seed,30),80,170)
gui_cols = ['artist_name','track_name','tempo','uri','status']
acc_rej = ''
while acc_rej != 'q':
print(' ')
print('TRAINING SET')
print('------------')
print(training_set[gui_cols])
print(' ')
print('RECOMMENDATIONS')
print('---------------')
print(suggest_set[gui_cols + ['P_accept']])
print(' ')
print('Type a# or r# to accept or reject a track.')
print('(e.g. To accept track 5, type: a5)')
print('To Save, type: s')
print('To get new recommendations from a new seed, type: g')
acc_rej = input('To quit, type: q > ')
if acc_rej[0] == 'a':
suggest_set, training_set = process_track(suggest_set, training_set, int(acc_rej[1:]), 1)
elif acc_rej[0] == 'r':
suggest_set, training_set = process_track(suggest_set, training_set, int(acc_rej[1:]), -1)
elif acc_rej == 'q':
break
elif acc_rej == 's':
training_set.to_pickle(os.path.join(PROJ_ROOT,'data','interim','training_set.pkl'))
suggest_set.to_pickle(os.path.join(PROJ_ROOT,'data','interim','track_set.pkl'))
elif acc_rej == 'g':
init_seed = define_seed()
suggest_set = band_BPMs(get_new_recs_and_feats(init_seed,30),80,170)
else:
print('Error, invalid input.')
# only train the model when it has positive and negative examples
if training_set.status.min() < 0:
suggest_set = train_NB_model(suggest_set, training_set)
| {
"content_hash": "c6303faddd07e33027383fbb4b92998d",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 94,
"avg_line_length": 30.48780487804878,
"alnum_prop": 0.66,
"repo_name": "tomsyouruncle/DJ_suggest",
"id": "20a228596ed07e930cbaef7c835ff8c545552e2a",
"size": "2518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ui/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3123"
},
{
"name": "Jupyter Notebook",
"bytes": "471043"
},
{
"name": "Makefile",
"bytes": "1191"
},
{
"name": "Python",
"bytes": "16214"
}
],
"symlink_target": ""
} |
"""This module provides the default commands for beets' command-line
interface.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import re
from collections import namedtuple, Counter
from itertools import chain
import beets
from beets import ui
from beets.ui import print_, input_, decargs, show_path_changes
from beets import autotag
from beets.autotag import Recommendation
from beets.autotag import hooks
from beets import plugins
from beets import importer
from beets import util
from beets.util import syspath, normpath, ancestry, displayable_path
from beets import library
from beets import config
from beets import logging
from beets.util.confit import _package_path
VARIOUS_ARTISTS = u'Various Artists'
PromptChoice = namedtuple('ExtraChoice', ['short', 'long', 'callback'])
# Global logger.
log = logging.getLogger('beets')
# The list of default subcommands. This is populated with Subcommand
# objects that can be fed to a SubcommandsOptionParser.
default_commands = []
# Utilities.
def _do_query(lib, query, album, also_items=True):
"""For commands that operate on matched items, performs a query
and returns a list of matching items and a list of matching
albums. (The latter is only nonempty when album is True.) Raises
a UserError if no items match. also_items controls whether, when
fetching albums, the associated items should be fetched also.
"""
if album:
albums = list(lib.albums(query))
items = []
if also_items:
for al in albums:
items += al.items()
else:
albums = []
items = list(lib.items(query))
if album and not albums:
raise ui.UserError('No matching albums found.')
elif not album and not items:
raise ui.UserError('No matching items found.')
return items, albums
# fields: Shows a list of available fields for queries and format strings.
def _print_keys(query):
"""Given a SQLite query result, print the `key` field of each
returned row, with identation of 2 spaces.
"""
for row in query:
print_(' ' * 2 + row[b'key'])
def fields_func(lib, opts, args):
def _print_rows(names):
names.sort()
print_(" " + "\n ".join(names))
print_("Item fields:")
_print_rows(library.Item.all_keys())
print_("Album fields:")
_print_rows(library.Album.all_keys())
with lib.transaction() as tx:
# The SQL uses the DISTINCT to get unique values from the query
unique_fields = 'SELECT DISTINCT key FROM (%s)'
print_("Item flexible attributes:")
_print_keys(tx.query(unique_fields % library.Item._flex_table))
print_("Album flexible attributes:")
_print_keys(tx.query(unique_fields % library.Album._flex_table))
fields_cmd = ui.Subcommand(
'fields',
help='show fields available for queries and format strings'
)
fields_cmd.func = fields_func
default_commands.append(fields_cmd)
# help: Print help text for commands
class HelpCommand(ui.Subcommand):
def __init__(self):
super(HelpCommand, self).__init__(
'help', aliases=('?',),
help='give detailed help on a specific sub-command',
)
def func(self, lib, opts, args):
if args:
cmdname = args[0]
helpcommand = self.root_parser._subcommand_for_name(cmdname)
if not helpcommand:
raise ui.UserError("unknown command '{0}'".format(cmdname))
helpcommand.print_help()
else:
self.root_parser.print_help()
default_commands.append(HelpCommand())
# import: Autotagger and importer.
# Importer utilities and support.
def disambig_string(info):
"""Generate a string for an AlbumInfo or TrackInfo object that
provides context that helps disambiguate similar-looking albums and
tracks.
"""
disambig = []
if info.data_source and info.data_source != 'MusicBrainz':
disambig.append(info.data_source)
if isinstance(info, hooks.AlbumInfo):
if info.media:
if info.mediums > 1:
disambig.append(u'{0}x{1}'.format(
info.mediums, info.media
))
else:
disambig.append(info.media)
if info.year:
disambig.append(unicode(info.year))
if info.country:
disambig.append(info.country)
if info.label:
disambig.append(info.label)
if info.albumdisambig:
disambig.append(info.albumdisambig)
if disambig:
return u', '.join(disambig)
def dist_string(dist):
"""Formats a distance (a float) as a colorized similarity percentage
string.
"""
out = '%.1f%%' % ((1 - dist) * 100)
if dist <= config['match']['strong_rec_thresh'].as_number():
out = ui.colorize('text_success', out)
elif dist <= config['match']['medium_rec_thresh'].as_number():
out = ui.colorize('text_warning', out)
else:
out = ui.colorize('text_error', out)
return out
def penalty_string(distance, limit=None):
"""Returns a colorized string that indicates all the penalties
applied to a distance object.
"""
penalties = []
for key in distance.keys():
key = key.replace('album_', '')
key = key.replace('track_', '')
key = key.replace('_', ' ')
penalties.append(key)
if penalties:
if limit and len(penalties) > limit:
penalties = penalties[:limit] + ['...']
return ui.colorize('text_warning', '(%s)' % ', '.join(penalties))
def show_change(cur_artist, cur_album, match):
"""Print out a representation of the changes that will be made if an
album's tags are changed according to `match`, which must be an AlbumMatch
object.
"""
def show_album(artist, album):
if artist:
album_description = u' %s - %s' % (artist, album)
elif album:
album_description = u' %s' % album
else:
album_description = u' (unknown album)'
print_(album_description)
def format_index(track_info):
"""Return a string representing the track index of the given
TrackInfo or Item object.
"""
if isinstance(track_info, hooks.TrackInfo):
index = track_info.index
medium_index = track_info.medium_index
medium = track_info.medium
mediums = match.info.mediums
else:
index = medium_index = track_info.track
medium = track_info.disc
mediums = track_info.disctotal
if config['per_disc_numbering']:
if mediums > 1:
return u'{0}-{1}'.format(medium, medium_index)
else:
return unicode(medium_index)
else:
return unicode(index)
# Identify the album in question.
if cur_artist != match.info.artist or \
(cur_album != match.info.album and
match.info.album != VARIOUS_ARTISTS):
artist_l, artist_r = cur_artist or '', match.info.artist
album_l, album_r = cur_album or '', match.info.album
if artist_r == VARIOUS_ARTISTS:
# Hide artists for VA releases.
artist_l, artist_r = u'', u''
artist_l, artist_r = ui.colordiff(artist_l, artist_r)
album_l, album_r = ui.colordiff(album_l, album_r)
print_("Correcting tags from:")
show_album(artist_l, album_l)
print_("To:")
show_album(artist_r, album_r)
else:
print_(u"Tagging:\n {0.artist} - {0.album}".format(match.info))
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('text_highlight_minor', '(%s)' % disambig))
print_(' '.join(info))
# Tracks.
pairs = match.mapping.items()
pairs.sort(key=lambda (_, track_info): track_info.index)
# Build up LHS and RHS for track difference display. The `lines` list
# contains ``(lhs, rhs, width)`` tuples where `width` is the length (in
# characters) of the uncolorized LHS.
lines = []
medium = disctitle = None
for item, track_info in pairs:
# Medium number and title.
if medium != track_info.medium or disctitle != track_info.disctitle:
media = match.info.media or 'Media'
if match.info.mediums > 1 and track_info.disctitle:
lhs = '%s %s: %s' % (media, track_info.medium,
track_info.disctitle)
elif match.info.mediums > 1:
lhs = '%s %s' % (media, track_info.medium)
elif track_info.disctitle:
lhs = '%s: %s' % (media, track_info.disctitle)
else:
lhs = None
if lhs:
lines.append((lhs, '', 0))
medium, disctitle = track_info.medium, track_info.disctitle
# Titles.
new_title = track_info.title
if not item.title.strip():
# If there's no title, we use the filename.
cur_title = displayable_path(os.path.basename(item.path))
lhs, rhs = cur_title, new_title
else:
cur_title = item.title.strip()
lhs, rhs = ui.colordiff(cur_title, new_title)
lhs_width = len(cur_title)
# Track number change.
cur_track, new_track = format_index(item), format_index(track_info)
if cur_track != new_track:
if item.track in (track_info.index, track_info.medium_index):
color = 'text_highlight_minor'
else:
color = 'text_highlight'
templ = ui.colorize(color, u' (#{0})')
lhs += templ.format(cur_track)
rhs += templ.format(new_track)
lhs_width += len(cur_track) + 4
# Length change.
if item.length and track_info.length and \
abs(item.length - track_info.length) > \
config['ui']['length_diff_thresh'].as_number():
cur_length = ui.human_seconds_short(item.length)
new_length = ui.human_seconds_short(track_info.length)
templ = ui.colorize('text_highlight', u' ({0})')
lhs += templ.format(cur_length)
rhs += templ.format(new_length)
lhs_width += len(cur_length) + 3
# Penalties.
penalties = penalty_string(match.distance.tracks[track_info])
if penalties:
rhs += ' %s' % penalties
if lhs != rhs:
lines.append((' * %s' % lhs, rhs, lhs_width))
elif config['import']['detail']:
lines.append((' * %s' % lhs, '', lhs_width))
# Print each track in two columns, or across two lines.
col_width = (ui.term_width() - len(''.join([' * ', ' -> ']))) // 2
if lines:
max_width = max(w for _, _, w in lines)
for lhs, rhs, lhs_width in lines:
if not rhs:
print_(lhs)
elif max_width > col_width:
print_(u'%s ->\n %s' % (lhs, rhs))
else:
pad = max_width - lhs_width
print_(u'%s%s -> %s' % (lhs, ' ' * pad, rhs))
# Missing and unmatched tracks.
if match.extra_tracks:
print_('Missing tracks ({0}/{1} - {2:.1%}):'.format(
len(match.extra_tracks),
len(match.info.tracks),
len(match.extra_tracks) / len(match.info.tracks)
))
for track_info in match.extra_tracks:
line = ' ! %s (#%s)' % (track_info.title, format_index(track_info))
if track_info.length:
line += ' (%s)' % ui.human_seconds_short(track_info.length)
print_(ui.colorize('text_warning', line))
if match.extra_items:
print_('Unmatched tracks ({0}):'.format(len(match.extra_items)))
for item in match.extra_items:
line = ' ! %s (#%s)' % (item.title, format_index(item))
if item.length:
line += ' (%s)' % ui.human_seconds_short(item.length)
print_(ui.colorize('text_warning', line))
def show_item_change(item, match):
"""Print out the change that would occur by tagging `item` with the
metadata from `match`, a TrackMatch object.
"""
cur_artist, new_artist = item.artist, match.info.artist
cur_title, new_title = item.title, match.info.title
if cur_artist != new_artist or cur_title != new_title:
cur_artist, new_artist = ui.colordiff(cur_artist, new_artist)
cur_title, new_title = ui.colordiff(cur_title, new_title)
print_("Correcting track tags from:")
print_(" %s - %s" % (cur_artist, cur_title))
print_("To:")
print_(" %s - %s" % (new_artist, new_title))
else:
print_("Tagging track: %s - %s" % (cur_artist, cur_title))
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('text_highlight_minor', '(%s)' % disambig))
print_(' '.join(info))
def summarize_items(items, singleton):
"""Produces a brief summary line describing a set of items. Used for
manually resolving duplicates during import.
`items` is a list of `Item` objects. `singleton` indicates whether
this is an album or single-item import (if the latter, them `items`
should only have one element).
"""
summary_parts = []
if not singleton:
summary_parts.append("{0} items".format(len(items)))
format_counts = {}
for item in items:
format_counts[item.format] = format_counts.get(item.format, 0) + 1
if len(format_counts) == 1:
# A single format.
summary_parts.append(items[0].format)
else:
# Enumerate all the formats by decreasing frequencies:
for fmt, count in sorted(format_counts.items(),
key=lambda (f, c): (-c, f)):
summary_parts.append('{0} {1}'.format(fmt, count))
if items:
average_bitrate = sum([item.bitrate for item in items]) / len(items)
total_duration = sum([item.length for item in items])
total_filesize = sum([item.filesize for item in items])
summary_parts.append('{0}kbps'.format(int(average_bitrate / 1000)))
summary_parts.append(ui.human_seconds_short(total_duration))
summary_parts.append(ui.human_bytes(total_filesize))
return ', '.join(summary_parts)
def _summary_judgment(rec):
"""Determines whether a decision should be made without even asking
the user. This occurs in quiet mode and when an action is chosen for
NONE recommendations. Return an action or None if the user should be
queried. May also print to the console if a summary judgment is
made.
"""
if config['import']['quiet']:
if rec == Recommendation.strong:
return importer.action.APPLY
else:
action = config['import']['quiet_fallback'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
})
elif rec == Recommendation.none:
action = config['import']['none_rec_action'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
'ask': None,
})
else:
return None
if action == importer.action.SKIP:
print_('Skipping.')
elif action == importer.action.ASIS:
print_('Importing as-is.')
return action
def choose_candidate(candidates, singleton, rec, cur_artist=None,
cur_album=None, item=None, itemcount=None,
extra_choices=[]):
"""Given a sorted list of candidates, ask the user for a selection
of which candidate to use. Applies to both full albums and
singletons (tracks). Candidates are either AlbumMatch or TrackMatch
objects depending on `singleton`. for albums, `cur_artist`,
`cur_album`, and `itemcount` must be provided. For singletons,
`item` must be provided.
`extra_choices` is a list of `PromptChoice`s, containg the choices
appended by the plugins after receiving the `before_choose_candidate`
event. If not empty, the choices are appended to the prompt presented
to the user.
Returns one of the following:
* the result of the choice, which may be SKIP, ASIS, TRACKS, or MANUAL
* a candidate (an AlbumMatch/TrackMatch object)
* the short letter of a `PromptChoice` (if the user selected one of
the `extra_choices`).
"""
# Sanity check.
if singleton:
assert item is not None
else:
assert cur_artist is not None
assert cur_album is not None
# Build helper variables for extra choices.
extra_opts = tuple(c.long for c in extra_choices)
extra_actions = tuple(c.short for c in extra_choices)
# Zero candidates.
if not candidates:
if singleton:
print_("No matching recordings found.")
opts = ('Use as-is', 'Skip', 'Enter search', 'enter Id',
'aBort')
else:
print_("No matching release found for {0} tracks."
.format(itemcount))
print_('For help, see: '
'http://beets.readthedocs.org/en/latest/faq.html#nomatch')
opts = ('Use as-is', 'as Tracks', 'Group albums', 'Skip',
'Enter search', 'enter Id', 'aBort')
sel = ui.input_options(opts + extra_opts)
if sel == 'u':
return importer.action.ASIS
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'e':
return importer.action.MANUAL
elif sel == 's':
return importer.action.SKIP
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
elif sel == 'g':
return importer.action.ALBUMS
elif sel in extra_actions:
return sel
else:
assert False
# Is the change good enough?
bypass_candidates = False
if rec != Recommendation.none:
match = candidates[0]
bypass_candidates = True
while True:
# Display and choose from candidates.
require = rec <= Recommendation.low
if not bypass_candidates:
# Display list of candidates.
print_(u'Finding tags for {0} "{1} - {2}".'.format(
u'track' if singleton else u'album',
item.artist if singleton else cur_artist,
item.title if singleton else cur_album,
))
print_(u'Candidates:')
for i, match in enumerate(candidates):
# Index, metadata, and distance.
line = [
u'{0}.'.format(i + 1),
u'{0} - {1}'.format(
match.info.artist,
match.info.title if singleton else match.info.album,
),
u'({0})'.format(dist_string(match.distance)),
]
# Penalties.
penalties = penalty_string(match.distance, 3)
if penalties:
line.append(penalties)
# Disambiguation
disambig = disambig_string(match.info)
if disambig:
line.append(ui.colorize('text_highlight_minor',
'(%s)' % disambig))
print_(' '.join(line))
# Ask the user for a choice.
if singleton:
opts = ('Skip', 'Use as-is', 'Enter search', 'enter Id',
'aBort')
else:
opts = ('Skip', 'Use as-is', 'as Tracks', 'Group albums',
'Enter search', 'enter Id', 'aBort')
sel = ui.input_options(opts + extra_opts,
numrange=(1, len(candidates)))
if sel == 's':
return importer.action.SKIP
elif sel == 'u':
return importer.action.ASIS
elif sel == 'm':
pass
elif sel == 'e':
return importer.action.MANUAL
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
elif sel == 'g':
return importer.action.ALBUMS
elif sel in extra_actions:
return sel
else: # Numerical selection.
match = candidates[sel - 1]
if sel != 1:
# When choosing anything but the first match,
# disable the default action.
require = True
bypass_candidates = False
# Show what we're about to do.
if singleton:
show_item_change(item, match)
else:
show_change(cur_artist, cur_album, match)
# Exact match => tag automatically if we're not in timid mode.
if rec == Recommendation.strong and not config['import']['timid']:
return match
# Ask for confirmation.
if singleton:
opts = ('Apply', 'More candidates', 'Skip', 'Use as-is',
'Enter search', 'enter Id', 'aBort')
else:
opts = ('Apply', 'More candidates', 'Skip', 'Use as-is',
'as Tracks', 'Group albums', 'Enter search', 'enter Id',
'aBort')
default = config['import']['default_action'].as_choice({
'apply': 'a',
'skip': 's',
'asis': 'u',
'none': None,
})
if default is None:
require = True
sel = ui.input_options(opts + extra_opts, require=require,
default=default)
if sel == 'a':
return match
elif sel == 'g':
return importer.action.ALBUMS
elif sel == 's':
return importer.action.SKIP
elif sel == 'u':
return importer.action.ASIS
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'e':
return importer.action.MANUAL
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
elif sel in extra_actions:
return sel
def manual_search(singleton):
"""Input either an artist and album (for full albums) or artist and
track name (for singletons) for manual search.
"""
artist = input_('Artist:')
name = input_('Track:' if singleton else 'Album:')
return artist.strip(), name.strip()
def manual_id(singleton):
"""Input an ID, either for an album ("release") or a track ("recording").
"""
prompt = u'Enter {0} ID:'.format('recording' if singleton else 'release')
return input_(prompt).strip()
class TerminalImportSession(importer.ImportSession):
"""An import session that runs in a terminal.
"""
def choose_match(self, task):
"""Given an initial autotagging of items, go through an interactive
dance with the user to ask for a choice of metadata. Returns an
AlbumMatch object, ASIS, or SKIP.
"""
# Show what we're tagging.
print_()
print_(displayable_path(task.paths, u'\n') +
u' ({0} items)'.format(len(task.items)))
# Take immediate action if appropriate.
action = _summary_judgment(task.rec)
if action == importer.action.APPLY:
match = task.candidates[0]
show_change(task.cur_artist, task.cur_album, match)
return match
elif action is not None:
return action
# Loop until we have a choice.
candidates, rec = task.candidates, task.rec
while True:
# Gather extra choices from plugins.
extra_choices = self._get_plugin_choices(task)
extra_ops = {c.short: c.callback for c in extra_choices}
# Ask for a choice from the user.
choice = choose_candidate(
candidates, False, rec, task.cur_artist, task.cur_album,
itemcount=len(task.items), extra_choices=extra_choices
)
# Choose which tags to use.
if choice in (importer.action.SKIP, importer.action.ASIS,
importer.action.TRACKS, importer.action.ALBUMS):
# Pass selection to main control flow.
return choice
elif choice is importer.action.MANUAL:
# Try again with manual search terms.
search_artist, search_album = manual_search(False)
_, _, candidates, rec = autotag.tag_album(
task.items, search_artist, search_album
)
elif choice is importer.action.MANUAL_ID:
# Try a manually-entered ID.
search_id = manual_id(False)
if search_id:
_, _, candidates, rec = autotag.tag_album(
task.items, search_ids=search_id.split()
)
elif choice in extra_ops.keys():
# Allow extra ops to automatically set the post-choice.
post_choice = extra_ops[choice](self, task)
if isinstance(post_choice, importer.action):
# MANUAL and MANUAL_ID have no effect, even if returned.
return post_choice
else:
# We have a candidate! Finish tagging. Here, choice is an
# AlbumMatch object.
assert isinstance(choice, autotag.AlbumMatch)
return choice
def choose_item(self, task):
"""Ask the user for a choice about tagging a single item. Returns
either an action constant or a TrackMatch object.
"""
print_()
print_(task.item.path)
candidates, rec = task.candidates, task.rec
# Take immediate action if appropriate.
action = _summary_judgment(task.rec)
if action == importer.action.APPLY:
match = candidates[0]
show_item_change(task.item, match)
return match
elif action is not None:
return action
while True:
extra_choices = self._get_plugin_choices(task)
extra_ops = {c.short: c.callback for c in extra_choices}
# Ask for a choice.
choice = choose_candidate(candidates, True, rec, item=task.item,
extra_choices=extra_choices)
if choice in (importer.action.SKIP, importer.action.ASIS):
return choice
elif choice == importer.action.TRACKS:
assert False # TRACKS is only legal for albums.
elif choice == importer.action.MANUAL:
# Continue in the loop with a new set of candidates.
search_artist, search_title = manual_search(True)
candidates, rec = autotag.tag_item(task.item, search_artist,
search_title)
elif choice == importer.action.MANUAL_ID:
# Ask for a track ID.
search_id = manual_id(True)
if search_id:
candidates, rec = autotag.tag_item(
task.item, search_ids=search_id.split())
elif choice in extra_ops.keys():
# Allow extra ops to automatically set the post-choice.
post_choice = extra_ops[choice](self, task)
if isinstance(post_choice, importer.action):
# MANUAL and MANUAL_ID have no effect, even if returned.
return post_choice
else:
# Chose a candidate.
assert isinstance(choice, autotag.TrackMatch)
return choice
def resolve_duplicate(self, task, found_duplicates):
"""Decide what to do when a new album or item seems similar to one
that's already in the library.
"""
log.warn(u"This {0} is already in the library!",
("album" if task.is_album else "item"))
if config['import']['quiet']:
# In quiet mode, don't prompt -- just skip.
log.info(u'Skipping.')
sel = 's'
else:
# Print some detail about the existing and new items so the
# user can make an informed decision.
for duplicate in found_duplicates:
print_("Old: " + summarize_items(
list(duplicate.items()) if task.is_album else [duplicate],
not task.is_album,
))
print_("New: " + summarize_items(
task.imported_items(),
not task.is_album,
))
sel = ui.input_options(
('Skip new', 'Keep both', 'Remove old')
)
if sel == 's':
# Skip new.
task.set_choice(importer.action.SKIP)
elif sel == 'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif sel == 'r':
# Remove old.
task.should_remove_duplicates = True
else:
assert False
def should_resume(self, path):
return ui.input_yn(u"Import of the directory:\n{0}\n"
"was interrupted. Resume (Y/n)?"
.format(displayable_path(path)))
def _get_plugin_choices(self, task):
"""Get the extra choices appended to the plugins to the ui prompt.
The `before_choose_candidate` event is sent to the plugins, with
session and task as its parameters. Plugins are responsible for
checking the right conditions and returning a list of `PromptChoice`s,
which is flattened and checked for conflicts.
Raises `ValueError` if two of the choices have the same short letter.
Returns a list of `PromptChoice`s.
"""
# Send the before_choose_candidate event and flatten list.
extra_choices = list(chain(*plugins.send('before_choose_candidate',
session=self, task=task)))
# Take into account default options, for duplicate checking.
all_choices = [PromptChoice('a', 'Apply', None),
PromptChoice('s', 'Skip', None),
PromptChoice('u', 'Use as-is', None),
PromptChoice('t', 'as Tracks', None),
PromptChoice('g', 'Group albums', None),
PromptChoice('e', 'Enter search', None),
PromptChoice('i', 'enter Id', None),
PromptChoice('b', 'aBort', None)] +\
extra_choices
short_letters = [c.short for c in all_choices]
if len(short_letters) != len(set(short_letters)):
# Duplicate short letter has been found.
duplicates = [i for i, count in Counter(short_letters).items()
if count > 1]
for short in duplicates:
# Keep the first of the choices, removing the rest.
dup_choices = [c for c in all_choices if c.short == short]
for c in dup_choices[1:]:
log.warn(u"Prompt choice '{0}' removed due to conflict "
u"with '{1}' (short letter: '{2}')",
c.long, dup_choices[0].long, c.short)
extra_choices.remove(c)
return extra_choices
# The import command.
def import_files(lib, paths, query):
"""Import the files in the given list of paths or matching the
query.
"""
# Check the user-specified directories.
for path in paths:
if not os.path.exists(syspath(normpath(path))):
raise ui.UserError(u'no such file or directory: {0}'.format(
displayable_path(path)))
# Check parameter consistency.
if config['import']['quiet'] and config['import']['timid']:
raise ui.UserError("can't be both quiet and timid")
# Open the log.
if config['import']['log'].get() is not None:
logpath = syspath(config['import']['log'].as_filename())
try:
loghandler = logging.FileHandler(logpath)
except IOError:
raise ui.UserError(u"could not open log file for writing: "
u"{0}".format(displayable_path(logpath)))
else:
loghandler = None
# Never ask for input in quiet mode.
if config['import']['resume'].get() == 'ask' and \
config['import']['quiet']:
config['import']['resume'] = False
session = TerminalImportSession(lib, loghandler, paths, query)
session.run()
# Emit event.
plugins.send('import', lib=lib, paths=paths)
def import_func(lib, opts, args):
config['import'].set_args(opts)
# Special case: --copy flag suppresses import_move (which would
# otherwise take precedence).
if opts.copy:
config['import']['move'] = False
if opts.library:
query = decargs(args)
paths = []
else:
query = None
paths = args
if not paths:
raise ui.UserError('no path specified')
import_files(lib, paths, query)
import_cmd = ui.Subcommand(
'import', help='import new music', aliases=('imp', 'im')
)
import_cmd.parser.add_option(
'-c', '--copy', action='store_true', default=None,
help="copy tracks into library directory (default)"
)
import_cmd.parser.add_option(
'-C', '--nocopy', action='store_false', dest='copy',
help="don't copy tracks (opposite of -c)"
)
import_cmd.parser.add_option(
'-w', '--write', action='store_true', default=None,
help="write new metadata to files' tags (default)"
)
import_cmd.parser.add_option(
'-W', '--nowrite', action='store_false', dest='write',
help="don't write metadata (opposite of -w)"
)
import_cmd.parser.add_option(
'-a', '--autotag', action='store_true', dest='autotag',
help="infer tags for imported files (default)"
)
import_cmd.parser.add_option(
'-A', '--noautotag', action='store_false', dest='autotag',
help="don't infer tags for imported files (opposite of -a)"
)
import_cmd.parser.add_option(
'-p', '--resume', action='store_true', default=None,
help="resume importing if interrupted"
)
import_cmd.parser.add_option(
'-P', '--noresume', action='store_false', dest='resume',
help="do not try to resume importing"
)
import_cmd.parser.add_option(
'-q', '--quiet', action='store_true', dest='quiet',
help="never prompt for input: skip albums instead"
)
import_cmd.parser.add_option(
'-l', '--log', dest='log',
help='file to log untaggable albums for later review'
)
import_cmd.parser.add_option(
'-s', '--singletons', action='store_true',
help='import individual tracks instead of full albums'
)
import_cmd.parser.add_option(
'-t', '--timid', dest='timid', action='store_true',
help='always confirm all actions'
)
import_cmd.parser.add_option(
'-L', '--library', dest='library', action='store_true',
help='retag items matching a query'
)
import_cmd.parser.add_option(
'-i', '--incremental', dest='incremental', action='store_true',
help='skip already-imported directories'
)
import_cmd.parser.add_option(
'-I', '--noincremental', dest='incremental', action='store_false',
help='do not skip already-imported directories'
)
import_cmd.parser.add_option(
'--flat', dest='flat', action='store_true',
help='import an entire tree as a single album'
)
import_cmd.parser.add_option(
'-g', '--group-albums', dest='group_albums', action='store_true',
help='group tracks in a folder into separate albums'
)
import_cmd.parser.add_option(
'--pretend', dest='pretend', action='store_true',
help='just print the files to import'
)
import_cmd.parser.add_option(
'-S', '--search-id', dest='search_ids', action='append',
metavar='BACKEND_ID',
help='restrict matching to a specific metadata backend ID'
)
import_cmd.func = import_func
default_commands.append(import_cmd)
# list: Query and show library contents.
def list_items(lib, query, album, fmt=''):
"""Print out items in lib matching query. If album, then search for
albums instead of single items.
"""
if album:
for album in lib.albums(query):
ui.print_(format(album, fmt))
else:
for item in lib.items(query):
ui.print_(format(item, fmt))
def list_func(lib, opts, args):
list_items(lib, decargs(args), opts.album)
list_cmd = ui.Subcommand('list', help='query the library', aliases=('ls',))
list_cmd.parser.usage += "\n" \
'Example: %prog -f \'$album: $title\' artist:beatles'
list_cmd.parser.add_all_common_options()
list_cmd.func = list_func
default_commands.append(list_cmd)
# update: Update library contents according to on-disk tags.
def update_items(lib, query, album, move, pretend):
"""For all the items matched by the query, update the library to
reflect the item's embedded tags.
"""
with lib.transaction():
items, _ = _do_query(lib, query, album)
# Walk through the items and pick up their changes.
affected_albums = set()
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
ui.print_(format(item))
ui.print_(ui.colorize('text_error', u' deleted'))
if not pretend:
item.remove(True)
affected_albums.add(item.album_id)
continue
# Did the item change since last checked?
if item.current_mtime() <= item.mtime:
log.debug(u'skipping {0} because mtime is up to date ({1})',
displayable_path(item.path), item.mtime)
continue
# Read new data.
try:
item.read()
except library.ReadError as exc:
log.error(u'error reading {0}: {1}',
displayable_path(item.path), exc)
continue
# Special-case album artist when it matches track artist. (Hacky
# but necessary for preserving album-level metadata for non-
# autotagged imports.)
if not item.albumartist:
old_item = lib.get_item(item.id)
if old_item.albumartist == old_item.artist == item.artist:
item.albumartist = old_item.albumartist
item._dirty.discard('albumartist')
# Check for and display changes.
changed = ui.show_model_changes(item,
fields=library.Item._media_fields)
# Save changes.
if not pretend:
if changed:
# Move the item if it's in the library.
if move and lib.directory in ancestry(item.path):
item.move()
item.store()
affected_albums.add(item.album_id)
else:
# The file's mtime was different, but there were no
# changes to the metadata. Store the new mtime,
# which is set in the call to read(), so we don't
# check this again in the future.
item.store()
# Skip album changes while pretending.
if pretend:
return
# Modify affected albums to reflect changes in their items.
for album_id in affected_albums:
if album_id is None: # Singletons.
continue
album = lib.get_album(album_id)
if not album: # Empty albums have already been removed.
log.debug(u'emptied album {0}', album_id)
continue
first_item = album.items().get()
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
album[key] = first_item[key]
album.store()
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
log.debug(u'moving album {0}', album_id)
album.move()
def update_func(lib, opts, args):
update_items(lib, decargs(args), opts.album, ui.should_move(opts.move),
opts.pretend)
update_cmd = ui.Subcommand(
'update', help='update the library', aliases=('upd', 'up',)
)
update_cmd.parser.add_album_option()
update_cmd.parser.add_format_option()
update_cmd.parser.add_option(
'-m', '--move', action='store_true', dest='move',
help="move files in the library directory"
)
update_cmd.parser.add_option(
'-M', '--nomove', action='store_false', dest='move',
help="don't move files in library"
)
update_cmd.parser.add_option(
'-p', '--pretend', action='store_true',
help="show all changes but do nothing"
)
update_cmd.func = update_func
default_commands.append(update_cmd)
# remove: Remove items from library, delete files.
def remove_items(lib, query, album, delete):
"""Remove items matching query from lib. If album, then match and
remove whole albums. If delete, also remove files from disk.
"""
# Get the matching items.
items, albums = _do_query(lib, query, album)
# Prepare confirmation with user.
print_()
if delete:
fmt = u'$path - $title'
prompt = 'Really DELETE %i file%s (y/n)?' % \
(len(items), 's' if len(items) > 1 else '')
else:
fmt = ''
prompt = 'Really remove %i item%s from the library (y/n)?' % \
(len(items), 's' if len(items) > 1 else '')
# Show all the items.
for item in items:
ui.print_(format(item, fmt))
# Confirm with user.
if not ui.input_yn(prompt, True):
return
# Remove (and possibly delete) items.
with lib.transaction():
for obj in (albums if album else items):
obj.remove(delete)
def remove_func(lib, opts, args):
remove_items(lib, decargs(args), opts.album, opts.delete)
remove_cmd = ui.Subcommand(
'remove', help='remove matching items from the library', aliases=('rm',)
)
remove_cmd.parser.add_option(
"-d", "--delete", action="store_true",
help="also remove files from disk"
)
remove_cmd.parser.add_album_option()
remove_cmd.func = remove_func
default_commands.append(remove_cmd)
# stats: Show library/query statistics.
def show_stats(lib, query, exact):
"""Shows some statistics about the matched items."""
items = lib.items(query)
total_size = 0
total_time = 0.0
total_items = 0
artists = set()
albums = set()
album_artists = set()
for item in items:
if exact:
try:
total_size += os.path.getsize(item.path)
except OSError as exc:
log.info('could not get size of {}: {}', item.path, exc)
else:
total_size += int(item.length * item.bitrate / 8)
total_time += item.length
total_items += 1
artists.add(item.artist)
album_artists.add(item.albumartist)
if item.album_id:
albums.add(item.album_id)
size_str = '' + ui.human_bytes(total_size)
if exact:
size_str += ' ({0} bytes)'.format(total_size)
print_("""Tracks: {0}
Total time: {1}{2}
{3}: {4}
Artists: {5}
Albums: {6}
Album artists: {7}""".format(
total_items,
ui.human_seconds(total_time),
' ({0:.2f} seconds)'.format(total_time) if exact else '',
'Total size' if exact else 'Approximate total size',
size_str,
len(artists),
len(albums),
len(album_artists)),
)
def stats_func(lib, opts, args):
show_stats(lib, decargs(args), opts.exact)
stats_cmd = ui.Subcommand(
'stats', help='show statistics about the library or a query'
)
stats_cmd.parser.add_option(
'-e', '--exact', action='store_true',
help='exact size and time'
)
stats_cmd.func = stats_func
default_commands.append(stats_cmd)
# version: Show current beets version.
def show_version(lib, opts, args):
print_('beets version %s' % beets.__version__)
# Show plugins.
names = sorted(p.name for p in plugins.find_plugins())
if names:
print_('plugins:', ', '.join(names))
else:
print_('no plugins loaded')
version_cmd = ui.Subcommand(
'version', help='output version information'
)
version_cmd.func = show_version
default_commands.append(version_cmd)
# modify: Declaratively change metadata.
def modify_items(lib, mods, dels, query, write, move, album, confirm):
"""Modifies matching items according to user-specified assignments and
deletions.
`mods` is a dictionary of field and value pairse indicating
assignments. `dels` is a list of fields to be deleted.
"""
# Parse key=value specifications into a dictionary.
model_cls = library.Album if album else library.Item
for key, value in mods.items():
mods[key] = model_cls._parse(key, value)
# Get the items to modify.
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
# Apply changes *temporarily*, preview them, and collect modified
# objects.
print_('Modifying {0} {1}s.'
.format(len(objs), 'album' if album else 'item'))
changed = set()
for obj in objs:
obj.update(mods)
for field in dels:
try:
del obj[field]
except KeyError:
pass
if ui.show_model_changes(obj):
changed.add(obj)
# Still something to do?
if not changed:
print_('No changes to make.')
return
# Confirm action.
if confirm:
if write and move:
extra = ', move and write tags'
elif write:
extra = ' and write tags'
elif move:
extra = ' and move'
else:
extra = ''
if not ui.input_yn('Really modify%s (Y/n)?' % extra):
return
# Apply changes to database and files
with lib.transaction():
for obj in changed:
obj.try_sync(write, move)
def modify_parse_args(args):
"""Split the arguments for the modify subcommand into query parts,
assignments (field=value), and deletions (field!). Returns the result as
a three-tuple in that order.
"""
mods = {}
dels = []
query = []
for arg in args:
if arg.endswith('!') and '=' not in arg and ':' not in arg:
dels.append(arg[:-1]) # Strip trailing !.
elif '=' in arg and ':' not in arg.split('=', 1)[0]:
key, val = arg.split('=', 1)
mods[key] = val
else:
query.append(arg)
return query, mods, dels
def modify_func(lib, opts, args):
query, mods, dels = modify_parse_args(decargs(args))
if not mods and not dels:
raise ui.UserError('no modifications specified')
modify_items(lib, mods, dels, query, ui.should_write(opts.write),
ui.should_move(opts.move), opts.album, not opts.yes)
modify_cmd = ui.Subcommand(
'modify', help='change metadata fields', aliases=('mod',)
)
modify_cmd.parser.add_option(
'-m', '--move', action='store_true', dest='move',
help="move files in the library directory"
)
modify_cmd.parser.add_option(
'-M', '--nomove', action='store_false', dest='move',
help="don't move files in library"
)
modify_cmd.parser.add_option(
'-w', '--write', action='store_true', default=None,
help="write new metadata to files' tags (default)"
)
modify_cmd.parser.add_option(
'-W', '--nowrite', action='store_false', dest='write',
help="don't write metadata (opposite of -w)"
)
modify_cmd.parser.add_album_option()
modify_cmd.parser.add_format_option(target='item')
modify_cmd.parser.add_option(
'-y', '--yes', action='store_true',
help='skip confirmation'
)
modify_cmd.func = modify_func
default_commands.append(modify_cmd)
# move: Move/copy files to the library or a new base directory.
def move_items(lib, dest, query, copy, album, pretend):
"""Moves or copies items to a new base directory, given by dest. If
dest is None, then the library's base directory is used, making the
command "consolidate" files.
"""
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
# Filter out files that don't need to be moved.
isitemmoved = lambda item: item.path != item.destination(basedir=dest)
isalbummoved = lambda album: any(isitemmoved(i) for i in album.items())
objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)]
action = 'Copying' if copy else 'Moving'
entity = 'album' if album else 'item'
log.info(u'{0} {1} {2}{3}.', action, len(objs), entity,
's' if len(objs) != 1 else '')
if not objs:
return
if pretend:
if album:
show_path_changes([(item.path, item.destination(basedir=dest))
for obj in objs for item in obj.items()])
else:
show_path_changes([(obj.path, obj.destination(basedir=dest))
for obj in objs])
else:
for obj in objs:
log.debug(u'moving: {0}', util.displayable_path(obj.path))
obj.move(copy, basedir=dest)
obj.store()
def move_func(lib, opts, args):
dest = opts.dest
if dest is not None:
dest = normpath(dest)
if not os.path.isdir(dest):
raise ui.UserError('no such directory: %s' % dest)
move_items(lib, dest, decargs(args), opts.copy, opts.album, opts.pretend)
move_cmd = ui.Subcommand(
'move', help='move or copy items', aliases=('mv',)
)
move_cmd.parser.add_option(
'-d', '--dest', metavar='DIR', dest='dest',
help='destination directory'
)
move_cmd.parser.add_option(
'-c', '--copy', default=False, action='store_true',
help='copy instead of moving'
)
move_cmd.parser.add_option(
'-p', '--pretend', default=False, action='store_true',
help='show how files would be moved, but don\'t touch anything')
move_cmd.parser.add_album_option()
move_cmd.func = move_func
default_commands.append(move_cmd)
# write: Write tags into files.
def write_items(lib, query, pretend, force):
"""Write tag information from the database to the respective files
in the filesystem.
"""
items, albums = _do_query(lib, query, False, False)
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
log.info(u'missing file: {0}', util.displayable_path(item.path))
continue
# Get an Item object reflecting the "clean" (on-disk) state.
try:
clean_item = library.Item.from_path(item.path)
except library.ReadError as exc:
log.error(u'error reading {0}: {1}',
displayable_path(item.path), exc)
continue
# Check for and display changes.
changed = ui.show_model_changes(item, clean_item,
library.Item._media_tag_fields, force)
if (changed or force) and not pretend:
# We use `try_sync` here to keep the mtime up to date in the
# database.
item.try_sync(True, False)
def write_func(lib, opts, args):
write_items(lib, decargs(args), opts.pretend, opts.force)
write_cmd = ui.Subcommand('write', help='write tag information to files')
write_cmd.parser.add_option(
'-p', '--pretend', action='store_true',
help="show all changes but do nothing"
)
write_cmd.parser.add_option(
'-f', '--force', action='store_true',
help="write tags even if the existing tags match the database"
)
write_cmd.func = write_func
default_commands.append(write_cmd)
# config: Show and edit user configuration.
def config_func(lib, opts, args):
# Make sure lazy configuration is loaded
config.resolve()
# Print paths.
if opts.paths:
filenames = []
for source in config.sources:
if not opts.defaults and source.default:
continue
if source.filename:
filenames.append(source.filename)
# In case the user config file does not exist, prepend it to the
# list.
user_path = config.user_config_path()
if user_path not in filenames:
filenames.insert(0, user_path)
for filename in filenames:
print_(filename)
# Open in editor.
elif opts.edit:
config_edit()
# Dump configuration.
else:
print_(config.dump(full=opts.defaults, redact=opts.redact))
def config_edit():
"""Open a program to edit the user configuration.
An empty config file is created if no existing config file exists.
"""
path = config.user_config_path()
editor = util.editor_command()
try:
if not os.path.isfile(path):
open(path, 'w+').close()
util.interactive_open([path], editor)
except OSError as exc:
message = "Could not edit configuration: {0}".format(exc)
if not editor:
message += ". Please set the EDITOR environment variable"
raise ui.UserError(message)
config_cmd = ui.Subcommand('config',
help='show or edit the user configuration')
config_cmd.parser.add_option(
'-p', '--paths', action='store_true',
help='show files that configuration was loaded from'
)
config_cmd.parser.add_option(
'-e', '--edit', action='store_true',
help='edit user configuration with $EDITOR'
)
config_cmd.parser.add_option(
'-d', '--defaults', action='store_true',
help='include the default configuration'
)
config_cmd.parser.add_option(
'-c', '--clear', action='store_false',
dest='redact', default=True,
help='do not redact sensitive fields'
)
config_cmd.func = config_func
default_commands.append(config_cmd)
# completion: print completion script
def print_completion(*args):
for line in completion_script(default_commands + plugins.commands()):
print_(line, end='')
if not any(map(os.path.isfile, BASH_COMPLETION_PATHS)):
log.warn(u'Warning: Unable to find the bash-completion package. '
u'Command line completion might not work.')
BASH_COMPLETION_PATHS = map(syspath, [
u'/etc/bash_completion',
u'/usr/share/bash-completion/bash_completion',
u'/usr/share/local/bash-completion/bash_completion',
u'/opt/local/share/bash-completion/bash_completion', # SmartOS
u'/usr/local/etc/bash_completion', # Homebrew
])
def completion_script(commands):
"""Yield the full completion shell script as strings.
``commands`` is alist of ``ui.Subcommand`` instances to generate
completion data for.
"""
base_script = os.path.join(_package_path('beets.ui'), 'completion_base.sh')
with open(base_script, 'r') as base_script:
yield base_script.read()
options = {}
aliases = {}
command_names = []
# Collect subcommands
for cmd in commands:
name = cmd.name
command_names.append(name)
for alias in cmd.aliases:
if re.match(r'^\w+$', alias):
aliases[alias] = name
options[name] = {'flags': [], 'opts': []}
for opts in cmd.parser._get_all_options()[1:]:
if opts.action in ('store_true', 'store_false'):
option_type = 'flags'
else:
option_type = 'opts'
options[name][option_type].extend(
opts._short_opts + opts._long_opts
)
# Add global options
options['_global'] = {
'flags': ['-v', '--verbose'],
'opts': '-l --library -c --config -d --directory -h --help'.split(' ')
}
# Add flags common to all commands
options['_common'] = {
'flags': ['-h', '--help']
}
# Start generating the script
yield "_beet() {\n"
# Command names
yield " local commands='%s'\n" % ' '.join(command_names)
yield "\n"
# Command aliases
yield " local aliases='%s'\n" % ' '.join(aliases.keys())
for alias, cmd in aliases.items():
yield " local alias__%s=%s\n" % (alias, cmd)
yield '\n'
# Fields
yield " fields='%s'\n" % ' '.join(
set(library.Item._fields.keys() + library.Album._fields.keys())
)
# Command options
for cmd, opts in options.items():
for option_type, option_list in opts.items():
if option_list:
option_list = ' '.join(option_list)
yield " local %s__%s='%s'\n" % (option_type, cmd, option_list)
yield ' _beet_dispatch\n'
yield '}\n'
completion_cmd = ui.Subcommand(
'completion',
help='print shell script that provides command line completion'
)
completion_cmd.func = print_completion
completion_cmd.hide = True
default_commands.append(completion_cmd)
| {
"content_hash": "1333a13702a0703b7be78c5fe878dda3",
"timestamp": "",
"source": "github",
"line_count": 1709,
"max_line_length": 79,
"avg_line_length": 34.153306026916326,
"alnum_prop": 0.5760176809210527,
"repo_name": "parapente/beets",
"id": "c6846e92f4ad685fe0249f4e4f6f4f3f751d2eba",
"size": "59039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beets/ui/commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1693553"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
import random
from typing import List
from parlai.core.message import Message
from parlai.core.mutators import ManyEpisodeMutator, register_mutator
@register_mutator("flatten")
class FlattenMutator(ManyEpisodeMutator):
"""
Flattens the entire conversation history.
Simply concatenates all turns in the conversation with a newline. Frequently useful
when composed with other mutators.
"""
def many_episode_mutation(self, episode: List[Message]) -> List[List[Message]]:
history = []
for message in episode:
history.append(message.pop('text'))
message['text'] = '\n'.join(history)
yield [message]
history.append(random.choice(message['labels']))
| {
"content_hash": "a5e8ad8a56f547fb80ffa2abfe4251fe",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 87,
"avg_line_length": 33.40909090909091,
"alnum_prop": 0.689795918367347,
"repo_name": "facebookresearch/ParlAI",
"id": "741825c735e8c21ff14baa4c0439741d9af4ac7c",
"size": "935",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/mutators/flatten.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from .ips120 import Ips120
from .itc503 import Itc503
from .mercuryips import MercuryIps
from .triton200 import Triton200
| {
"content_hash": "4f428901cd6f4d2b20af3019dc4dba7c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 34,
"avg_line_length": 30.5,
"alnum_prop": 0.8360655737704918,
"repo_name": "masonlab/labdrivers",
"id": "d4998372ea2f407921f15f77745ddab290d3e3cb",
"size": "122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labdrivers/oxford/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "195295"
},
{
"name": "Python",
"bytes": "89960"
}
],
"symlink_target": ""
} |
""" file: rotation.py
author: Jess Robertson
CSIRO Mineral Resources Flagship
date: Wednesday April 01, 2015
description: rotation functions
"""
import numpy
import scipy.sparse
def rotation_matrix(angles_list=None, angles_array=None):
""" Returns a rotation matrix in n dimensions
The combined rotation array is build up by left-multiplying the
preexisting rotation array by the rotation around a given axis.
For a $d$-dimensional array, this is given by:
$$ C(\theta) = R(\theta_{d, d-1})R(\theta_{d, d-2})\times\ldots\times
R(\theta_{i, j})\times\ldots R(\theta_{1, 2})$$
where $i$ and $j$ are positive integers ranging from 1 to $d$, and
satisfy $i \leq j$.
"""
# Check inputs
if angles_list is not None and angles_array is not None:
raise ValueError('You should only supply one of the angles_list'
' or angles_array arguments to rotation_matrix')
elif angles_list is not None:
# Make sure that we have the right number of angles supplied,
# guess the dimension required
dimension_estimate = int(1 + numpy.sqrt(1 + 8 * len(angles_list))) // 2
checks = [int(dimension_estimate) - 1, int(dimension_estimate)]
allowed_angles = map(lambda d: d * (d - 1) / 2, checks)
if len(angles_list) not in allowed_angles:
err_string = (
'Wrong number of angles ({0}) supplied to rotation_matrix - '
'you should specify d*(d-1)/2 angles for a d-dimensional '
'rotation matrix (i.e. {1[0]} angles for d={2[0]} or {1[1]} '
'angles for d={2[1]})'
).format(len(angles_list), checks, allowed_angles)
raise ValueError(err_string)
else:
dim = dimension_estimate
# Generate angles array from list
angles_array = numpy.zeros((dim, dim))
angles_gen = (a for a in angles_list)
for idx, _ in numpy.ndenumerate(angles_array):
if idx[0] > idx[1]:
angles_array[idx] = next(angles_gen)
elif angles_array is not None:
angles_array = numpy.asarray(angles_array)
dim = angles_array.shape[0]
# Generate rotation matrix
identity = scipy.sparse.identity(dim, format='lil')
combined = identity.copy()
for idx, angle in numpy.ndenumerate(angles_array):
# Make sure we're on the lower-diagonal part of the angles array
if idx[0] <= idx[1]:
continue
# Build non-zero elements of rotation matrix using Givens rotations
# see: https://en.wikipedia.org/wiki/Givens_rotation
rotation = identity.copy()
rotation[idx[0], idx[0]] = numpy.cos(angle)
rotation[idx[1], idx[1]] = numpy.cos(angle)
rotation[idx[0], idx[1]] = numpy.sin(angle)
rotation[idx[1], idx[0]] = -numpy.sin(angle)
# Build combined rotation matrix
combined = combined.dot(rotation)
return numpy.asarray(combined.todense())
def rotate(points, angle_x, angle_y=None, angle_z=None):
""" Rotate points through some angle about the given axes
Parameters:
points - 2 or 3 by N array of points to rotate
angle_x, angle_y, angle_z - rotation angle about x/y/z axis in
degrees. y and z rotations are optional, should only be
specified for three-dimensional points.
"""
if points.shape[0] == 3:
if any(a is None for a in (angle_y, angle_z)):
raise ValueError('You have to specify three rotations for 3D data')
else:
angles = numpy.radians([angle_z, angle_y, angle_x])
return numpy.dot(points.T, rotation_matrix(angles).T).T
elif points.shape[0] == 2:
angle = numpy.radians([angle_x])
return numpy.dot(points.T, rotation_matrix(angle).T).T
def test():
import matplotlib.pyplot as plt
points = numpy.array([[1, 0], [0, 0], [0, 1]]).transpose()
rotpoints = rotate(points, 45)
print(type(rotpoints))
axes = plt.gca()
axes.plot(points[0], points[1], color='red')
axes.plot(rotpoints[0], rotpoints[1], color='blue')
axes.set_xlim(-1.1, 1.1)
axes.set_ylim(-1.1, 1.1)
axes.set_aspect("equal")
plt.show()
if __name__ == '__main__':
test()
| {
"content_hash": "fa599426ea83501d94e9daded73a8b75",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 79,
"avg_line_length": 37.63793103448276,
"alnum_prop": 0.6003206596426935,
"repo_name": "jesserobertson/pymaxion",
"id": "0e6522e6816b4f9bcc57fd3fe3d9915801c9215f",
"size": "4388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymaxion/rotations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1185"
},
{
"name": "Python",
"bytes": "21471"
}
],
"symlink_target": ""
} |
"""Shows the crash in the faucet log produced by given input"""
import logging
import os
import sys
from faucet import faucet
from ryu.controller import dpset
from faucet import faucet_experimental_api
import Fake
def main():
# go through all files in directory
# read file and store in string
with open(sys.argv[1]) as pkt:
packet_data = pkt.read()
# start faucet
application = faucet.Faucet(dpset=dpset.DPSet(), faucet_experimental_api=faucet_experimental_api.FaucetExperimentalAPI())
application.start()
# make sure dps are running
for dp_id, valve in list(application.valves.items()):
valve.dp.running = True
# create data from read file
byte_data = None
try:
byte_data = bytearray.fromhex(packet_data)
except (ValueError, TypeError):
pass
if byte_data is not None:
# create fake packet
dp = Fake.Datapath(1)
msg = Fake.Message(datapath=dp, cookie=1524372928, port=1, data=byte_data, in_port=1)
pkt = Fake.RyuEvent(msg)
# send packet to faucet and display error produced
application.packet_in_handler(pkt)
if __name__ == "__main__":
# make sure user specifies the afl crash folder
if len(sys.argv) == 2:
main()
else:
print('USAGE: python3 display_packet_crash.py <AFL_CRASH_FILE>')
os._exit(0)
| {
"content_hash": "a18dfb18224e851235b51563d349b504",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 125,
"avg_line_length": 28.479166666666668,
"alnum_prop": 0.662033650329188,
"repo_name": "wackerly/faucet",
"id": "1a3b0364b77c3415bf6f9be676f40ca8ea091de4",
"size": "1391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/fuzzer/display_packet_crash.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2525"
},
{
"name": "Python",
"bytes": "1022623"
},
{
"name": "Shell",
"bytes": "10272"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import random
import string
from datetime import timedelta
from django.utils import timezone
from django import forms
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.forms.util import flatatt
try:
from django.utils.encoding import force_unicode
except ImportError:
def force_unicode(x):
return(x)
from django.utils.html import escape, conditional_escape
from itertools import chain
from wiki import models
from wiki.conf import settings
from wiki.editors import getEditor
from wiki.core.diff import simple_merge
from django.forms.widgets import HiddenInput
from wiki.core.plugins.base import PluginSettingsFormMixin
from django.contrib.auth.forms import UserCreationForm
from wiki.core import permissions
from wiki.core.compat import get_user_model
User = get_user_model()
class SpamProtectionMixin():
"""Check a form for spam. Only works if properties 'request' and 'revision_model' are set."""
revision_model = models.ArticleRevision
def check_spam(self):
"""Check that user or IP address does not perform content edits that
are not allowed.
current_revision can be any object inheriting from models.BaseRevisionMixin
"""
request = self.request
user = None
ip_address = None
if request.user.is_authenticated():
user = request.user
else:
ip_address = request.META.get('REMOTE_ADDR', None)
if not (user or ip_address):
raise forms.ValidationError(_('Spam protection failed to find both a logged in user and an IP address.'))
def check_interval(from_time, max_count, interval_name):
from_time = timezone.now() - timedelta(minutes=settings.REVISIONS_MINUTES_LOOKBACK)
revisions = self.revision_model.objects.filter(
created__gte=from_time,
)
if user:
revisions = revisions.filter(user=user)
if ip_address:
revisions = revisions.filter(ip_address=ip_address)
revisions = revisions.count()
if revisions >= max_count:
raise forms.ValidationError(_('Spam protection: You are only allowed to create or edit %(revisions)d article(s) per %(interval_name)s.') %
{'revisions': max_count,
'interval_name': interval_name,})
if not settings.LOG_IPS_ANONYMOUS:
return
if request.user.has_perm('wiki.moderator'):
return
from_time = timezone.now() - timedelta(minutes=settings.REVISIONS_MINUTES_LOOKBACK)
if request.user.is_authenticated():
per_minute = settings.REVISIONS_PER_MINUTES
else:
per_minute = settings.REVISIONS_PER_MINUTES_ANONYMOUS
check_interval(from_time, per_minute,
_('minute') if settings.REVISIONS_MINUTES_LOOKBACK==1 else (_('%d minutes') % settings.REVISIONS_MINUTES_LOOKBACK),)
from_time = timezone.now() - timedelta(minutes=60)
if request.user.is_authenticated():
per_hour = settings.REVISIONS_PER_MINUTES
else:
per_hour = settings.REVISIONS_PER_MINUTES_ANONYMOUS
check_interval(from_time, per_hour, _('hour'))
class CreateRootForm(forms.Form):
title = forms.CharField(label=_('Title'), help_text=_('Initial title of the article. May be overridden with revision titles.'))
content = forms.CharField(label=_('Type in some contents'),
help_text=_('This is just the initial contents of your article. After creating it, you can use more complex features like adding plugins, meta data, related articles etc...'),
required=False, widget=getEditor().get_widget()) #@UndefinedVariable
class EditForm(forms.Form, SpamProtectionMixin):
title = forms.CharField(label=_('Title'),)
content = forms.CharField(label=_('Contents'),
required=False, widget=getEditor().get_widget()) #@UndefinedVariable
summary = forms.CharField(label=_('Summary'), help_text=_('Give a short reason for your edit, which will be stated in the revision log.'),
required=False)
current_revision = forms.IntegerField(required=False, widget=forms.HiddenInput())
def __init__(self, request, current_revision, *args, **kwargs):
self.request = request
self.no_clean = kwargs.pop('no_clean', False)
self.preview = kwargs.pop('preview', False)
self.initial_revision = current_revision
self.presumed_revision = None
if current_revision:
initial = {'content': current_revision.content,
'title': current_revision.title,
'current_revision': current_revision.id}
initial.update(kwargs.get('initial', {}))
# Manipulate any data put in args[0] such that the current_revision
# is reset to match the actual current revision.
data = None
if len(args) > 0:
data = args[0]
args = args[1:]
if data is None:
data = kwargs.get('data', None)
if data:
self.presumed_revision = data.get('current_revision', None)
if not str(self.presumed_revision) == str(self.initial_revision.id):
newdata = {}
for k,v in data.items():
newdata[k] = v
newdata['current_revision'] = self.initial_revision.id
newdata['content'] = simple_merge(self.initial_revision.content,
data.get('content', ""))
newdata['title'] = current_revision.title
kwargs['data'] = newdata
else:
# Always pass as kwarg
kwargs['data'] = data
kwargs['initial'] = initial
super(EditForm, self).__init__(*args, **kwargs)
def clean(self):
cd = self.cleaned_data
if self.no_clean or self.preview:
return cd
if not str(self.initial_revision.id) == str(self.presumed_revision):
raise forms.ValidationError(_('While you were editing, someone else changed the revision. Your contents have been automatically merged with the new contents. Please review the text below.'))
if cd['title'] == self.initial_revision.title and cd['content'] == self.initial_revision.content:
raise forms.ValidationError(_('No changes made. Nothing to save.'))
self.check_spam()
return cd
class SelectWidgetBootstrap(forms.Select):
"""
http://twitter.github.com/bootstrap/components.html#buttonDropdowns
Needs bootstrap and jquery
"""
def __init__(self, attrs={}, choices=(), disabled=False):
attrs['class'] = 'btn-group pull-left btn-group-form'
self.disabled = disabled
self.noscript_widget = forms.Select(attrs={}, choices=choices)
super(SelectWidgetBootstrap, self).__init__(attrs, choices)
def __setattr__(self, k, value):
super(SelectWidgetBootstrap, self).__setattr__(k, value)
if k != 'attrs' and k != 'disabled':
self.noscript_widget.__setattr__(k, value)
def render(self, name, value, attrs=None, choices=()):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = ["""<div%(attrs)s>"""
""" <button class="btn btn-group-label%(disabled)s" type="button">%(label)s</button>"""
""" <button class="btn btn-default dropdown-toggle%(disabled)s" type="button" data-toggle="dropdown">"""
""" <span class="caret"></span>"""
""" </button>"""
""" <ul class="dropdown-men">"""
""" %(options)s"""
""" </ul>"""
""" <input type="hidden" name="%(name)s" value="" class="btn-group-value" />"""
"""</div>"""
"""<noscript>%(noscript)s</noscript>"""
% {'attrs': flatatt(final_attrs),
'options':self.render_options(choices, [value]),
'label': _('Select an option'),
'name': name,
'disabled': ' disabled' if self.disabled else '',
'noscript': self.noscript_widget.render(name, value, {}, choices)} ]
return mark_safe('\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
option_value = force_unicode(option_value)
selected_html = (option_value in selected_choices) and ' selected="selected"' or ''
return '<li><a href="javascript:void(0)" data-value="%s"%s>%s</a></li>' % (
escape(option_value), selected_html,
conditional_escape(force_unicode(option_label)))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set([force_unicode(v) for v in selected_choices])
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append('<li class="divider" label="%s"></li>' % escape(force_unicode(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class Media(forms.Media):
js = ("wiki/js/forms.js",)
class TextInputPrepend(forms.TextInput):
def __init__(self, *args, **kwargs):
self.prepend = kwargs.pop('prepend', "")
super(TextInputPrepend, self).__init__(*args, **kwargs)
def render(self, *args, **kwargs):
html = super(TextInputPrepend, self).render(*args, **kwargs)
return mark_safe('<div class="input-group"><span class="input-group-addon">%s</span>%s</div>' % (self.prepend, html))
class CreateForm(forms.Form, SpamProtectionMixin):
def __init__(self, request, urlpath_parent, *args, **kwargs):
super(CreateForm, self).__init__(*args, **kwargs)
self.request = request
self.urlpath_parent = urlpath_parent
title = forms.CharField(label=_('Title'),)
slug = forms.SlugField(label=_('Slug'), help_text=_("This will be the address where your article can be found. Use only alphanumeric characters and - or _. Note that you cannot change the slug after creating the article."),
max_length=models.URLPath.SLUG_MAX_LENGTH)
content = forms.CharField(label=_('Contents'),
required=False, widget=getEditor().get_widget()) #@UndefinedVariable
summary = forms.CharField(label=_('Summary'), help_text=_("Write a brief message for the article's history log."),
required=False)
def clean_slug(self):
slug = self.cleaned_data['slug']
if slug.startswith("_"):
raise forms.ValidationError(_('A slug may not begin with an underscore.'))
if slug == 'admin':
raise forms.ValidationError(_("'admin' is not a permitted slug name."))
if settings.URL_CASE_SENSITIVE:
already_existing_slug = models.URLPath.objects.filter(slug=slug, parent=self.urlpath_parent)
else:
slug = slug.lower()
slug = slug.replace('-', '_')
already_existing_slug = models.URLPath.objects.filter(slug__iexact=slug, parent=self.urlpath_parent)
if already_existing_slug:
already_urlpath = already_existing_slug[0]
if already_urlpath.article and already_urlpath.article.current_revision.deleted:
raise forms.ValidationError(_('A deleted article with slug "%s" already exists.') % already_urlpath.slug)
else:
raise forms.ValidationError(_('A slug named "%s" already exists.') % already_urlpath.slug)
return slug
def clean(self):
self.check_spam()
return self.cleaned_data
class DeleteForm(forms.Form):
def __init__(self, *args, **kwargs):
self.article = kwargs.pop('article')
self.has_children = kwargs.pop('has_children')
super(DeleteForm, self).__init__(*args, **kwargs)
confirm = forms.BooleanField(required=False,
label=_('Yes, I am sure'))
purge = forms.BooleanField(widget=HiddenInput(), required=False,
label=_('Purge'),
help_text=_('Purge the article: Completely remove it (and all its contents) with no undo. Purging is a good idea if you want to free the slug such that users can create new articles in its place.'))
revision = forms.ModelChoiceField(models.ArticleRevision.objects.all(),
widget=HiddenInput(), required=False)
def clean(self):
cd = self.cleaned_data
if not cd['confirm']:
raise forms.ValidationError(_('You are not sure enough!'))
if cd['revision'] != self.article.current_revision:
raise forms.ValidationError(_('While you tried to delete this article, it was modified. TAKE CARE!'))
return cd
class PermissionsForm(PluginSettingsFormMixin, forms.ModelForm):
locked = forms.BooleanField(label=_('Lock article'), help_text=_('Deny all users access to edit this article.'),
required=False)
settings_form_headline = _('Permissions')
settings_order = 5
settings_write_access = False
owner_username = forms.CharField(required=False, label=_('Owner'),
help_text=_('Enter the username of the owner.'))
group = forms.ModelChoiceField(models.Group.objects.all(), empty_label=_('(none)'),
label=_('Group'), required=False)
if settings.USE_BOOTSTRAP_SELECT_WIDGET:
group.widget= SelectWidgetBootstrap()
recursive = forms.BooleanField(label=_('Inherit permissions'), help_text=_('Check here to apply the above permissions (excluding group and owner of the article) recursively to articles below this one.'),
required=False)
recursive_owner = forms.BooleanField(label=_('Inherit owner'), help_text=_('Check here to apply the ownership setting recursively to articles below this one.'),
required=False)
recursive_group = forms.BooleanField(label=_('Inherit group'), help_text=_('Check here to apply the group setting recursively to articles below this one.'),
required=False)
def get_usermessage(self):
if self.changed_data:
return _('Permission settings for the article were updated.')
else:
return _('Your permission settings were unchanged, so nothing saved.')
def __init__(self, article, request, *args, **kwargs):
self.article = article
self.user = request.user
self.request = request
kwargs['instance'] = article
kwargs['initial'] = {'locked': article.current_revision.locked}
super(PermissionsForm, self).__init__(*args, **kwargs)
self.can_change_groups = False
self.can_assign = False
if permissions.can_assign(article, request.user):
self.can_assign = True
self.can_change_groups = True
self.fields['group'].queryset = models.Group.objects.all()
elif permissions.can_assign_owner(article, request.user):
self.fields['group'].queryset = models.Group.objects.filter(user=request.user)
self.can_change_groups = True
else:
# Quick-fix...
# Set the group dropdown to readonly and with the current
# group as only selectable option
self.fields['group'] = forms.ModelChoiceField(
queryset = models.Group.objects.filter(id=self.instance.group.id) if self.instance.group else models.Group.objects.none(),
empty_label = _('(none)'),
required = False,
widget = SelectWidgetBootstrap(disabled=True) if settings.USE_BOOTSTRAP_SELECT_WIDGET else forms.Select(attrs={'disabled': True})
)
self.fields['group_read'].widget = forms.HiddenInput()
self.fields['group_write'].widget = forms.HiddenInput()
if not self.can_assign:
self.fields['owner_username'].widget = forms.TextInput(attrs={'readonly': 'true'})
self.fields['recursive'].widget = forms.HiddenInput()
self.fields['recursive_group'].widget = forms.HiddenInput()
self.fields['recursive_owner'].widget = forms.HiddenInput()
self.fields['locked'].widget = forms.HiddenInput()
self.fields['owner_username'].initial = getattr(article.owner, User.USERNAME_FIELD) if article.owner else ""
def clean_owner_username(self):
if self.can_assign:
username = self.cleaned_data['owner_username']
if username:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise forms.ValidationError(_('No user with that username'))
else:
user = None
else:
user = self.article.owner
return user
def save(self, commit=True):
article = super(PermissionsForm, self).save(commit=False)
# Alter the owner according to the form field owner_username
# TODO: Why not rename this field to 'owner' so this happens automatically?
article.owner = self.cleaned_data['owner_username']
# Revert any changes to group permissions if the
# current user is not allowed (see __init__)
# TODO: Write clean methods for this instead!
if not self.can_change_groups:
article.group = self.article.group
article.group_read = self.article.group_read
article.group_write = self.article.group_write
if self.can_assign:
if self.cleaned_data['recursive']:
article.set_permissions_recursive()
if self.cleaned_data['recursive_owner']:
article.set_owner_recursive()
if self.cleaned_data['recursive_group']:
article.set_group_recursive()
if self.cleaned_data['locked'] and not article.current_revision.locked:
revision = models.ArticleRevision()
revision.inherit_predecessor(self.article)
revision.set_from_request(self.request)
revision.automatic_log = _('Article locked for editing')
revision.locked = True
self.article.add_revision(revision)
elif not self.cleaned_data['locked'] and article.current_revision.locked:
revision = models.ArticleRevision()
revision.inherit_predecessor(self.article)
revision.set_from_request(self.request)
revision.automatic_log = _('Article unlocked for editing')
revision.locked = False
self.article.add_revision(revision)
article.save()
class Meta:
model = models.Article
fields = ('locked', 'owner_username', 'recursive_owner', 'group', 'recursive_group', 'group_read', 'group_write', 'other_read', 'other_write',
'recursive')
widgets = {}
class DirFilterForm(forms.Form):
query = forms.CharField(widget=forms.TextInput(attrs={'placeholder': _('Filter...'),
'class': 'search-query'}), required=False)
class SearchForm(forms.Form):
q = forms.CharField(widget=forms.TextInput(attrs={'placeholder': _('Search...'),
'class': 'search-query'}), required=False)
class UserCreationForm(UserCreationForm):
email = forms.EmailField(required=True)
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
# Add honeypots
self.honeypot_fieldnames = "address", "phone"
self.honeypot_class = ''.join(random.choice(string.ascii_uppercase + string.digits) for __ in range(10))
self.honeypot_jsfunction = 'f'+''.join(random.choice(string.ascii_uppercase + string.digits) for __ in range(10))
for fieldname in self.honeypot_fieldnames:
self.fields[fieldname] = forms.CharField(
widget=forms.TextInput(attrs={'class': self.honeypot_class}),
required=False,
)
def clean(self):
cd = super(UserCreationForm, self).clean()
for fieldname in self.honeypot_fieldnames:
if cd[fieldname]: raise forms.ValidationError("Thank you, non-human visitor. Please keep trying to fill in the form.")
return cd
class Meta:
model = User
fields = ( "username", "email" )
| {
"content_hash": "289474e8be150c7c2abf34a00ee41b91",
"timestamp": "",
"source": "github",
"line_count": 473,
"max_line_length": 229,
"avg_line_length": 46.23467230443975,
"alnum_prop": 0.5893730851890804,
"repo_name": "powellc/hacklabs-wiki",
"id": "9839e5bf061229962c475b1f394dc3edce3f51a5",
"size": "21893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hacklabs-wiki/apps/wiki/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "230955"
},
{
"name": "JavaScript",
"bytes": "35874"
},
{
"name": "Python",
"bytes": "590927"
},
{
"name": "Shell",
"bytes": "1098"
}
],
"symlink_target": ""
} |
from google.appengine.api import memcache
from webapp2_extras.security import generate_random_string
import jinja2
import json
import os
import webapp2
MEMCACHE_EXPIRE = 24 * 60 * 60 # seconds
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
)
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.write('Make a POST with json=your_stringified_json to have it beautified.')
def post(self):
try:
# Detect empty request and valid JSON
stripped_json = json.dumps(json.loads(self.request.get('json')))
except ValueError:
self.response.write('ERROR: You must provide a valid json.')
else:
random_string = generate_random_string(length=16)
if memcache.add(random_string, stripped_json, MEMCACHE_EXPIRE):
self.response.write('%s/%s' % (self.request.host, random_string))
else:
self.response.write('ERROR: Could not set cache.')
class CacheHandler(webapp2.RequestHandler):
def get(self, key):
cached = memcache.get(key)
if cached:
template = JINJA_ENVIRONMENT.get_template('templates/beautified.html')
self.response.write(template.render(json=cached))
else:
self.response.set_status(404)
self.response('ERROR: Could not find element "%s" in cache.' % key)
class RawCacheHandler(webapp2.RequestHandler):
def get(self, key):
cached = memcache.get(key)
if cached:
self.response.headers['Content-Type'] = 'application/json'
self.response.write(cached)
else:
self.response.set_status(404)
self.response('ERROR: Could not find element "%s" in cache.' % key)
urls = (
(r'/', MainPage),
(r'/(\w+)', CacheHandler),
(r'/raw/(\w+)', RawCacheHandler),
)
application = webapp2.WSGIApplication(urls, debug=True)
| {
"content_hash": "141c02a86c3e0bfc9ab32a7fd3d5bf36",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 97,
"avg_line_length": 32.96666666666667,
"alnum_prop": 0.6359959555106168,
"repo_name": "seguri/json-beautifier",
"id": "d15b39dd6150f0654858c0ba21aef007c3a22e3f",
"size": "1978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "json-beautifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "227"
},
{
"name": "HTML",
"bytes": "431"
},
{
"name": "JavaScript",
"bytes": "797"
},
{
"name": "Python",
"bytes": "1978"
}
],
"symlink_target": ""
} |
import requests, time, re
from lxml import html
import logging, os, platform, sys
import xml.dom.minidom, xml.sax.saxutils
#ENVIRONMENTAL INFORMATION
__author__ = "george@georgestarcher.com (George Starcher)"
_MI_APP_NAME = 'TA-laundryview'
_SPLUNK_HOME = os.getenv("SPLUNK_HOME")
if _SPLUNK_HOME == None:
_SPLUNK_HOME = os.getenv("SPLUNKHOME")
if _SPLUNK_HOME == None:
_SPLUNK_HOME = "/opt/splunk"
_OPERATING_SYSTEM = platform.system()
_APP_HOME = _SPLUNK_HOME + "/etc/apps/TA-laundryview"
_LIB_PATH = _APP_HOME + "bin/lib"
_PID = os.getpid()
_IS_WINDOWS = False
if _OPERATING_SYSTEM.lower() == "windows":
_IS_WINDOWS = True
_LIB_PATH.replace("/","\\")
_APP_HOME.replace("/","\\")
#SYSTEM EXIT CODES
_SYS_EXIT_FAILED_LAUNDRY = 7
_SYS_EXIT_GPARENT_PID_ONE = 8
#Setup logging
logging.root
logging.root.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S%z')
logging.Formatter.converter = time.gmtime
#Set timestamp and define machine error types
logTime = time.strftime("%Y-%m-%d %H:%M:%S%z")
machineError = ['Offline','Out of service','Unknown']
SCHEME = """<scheme>
<title>TA-laundryview</title>
<description>Collect laundry room machine data.</description>
<use_external_validation>true</use_external_validation>
<streaming_mode>simple</streaming_mode>
<endpoint>
<args>
<arg name="siteCode">
<title>LaundyView SiteCode</title>
<description>The LaundryView SiteCode</description>
</arg>
</args>
</endpoint>
</scheme>
"""
def doPrint(s):
""" A wrapper Function to output data by same method (print vs sys.stdout.write)"""
sys.stdout.write(s)
def do_scheme():
""" Prints the Scheme """
doPrint(SCHEME)
def get_source(config):
return "laundryview:" + config
def validate_arguments():
pass
def getSiteCode():
try:
# read everything from stdin
config_str = sys.stdin.read()
#parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE and \
param_name == "siteCode":
return param.firstChild.data
except Exception, e:
raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e)
return ""
def getSitePage(siteCode):
""" fetch the web page contents for the site """
page = requests.get('http://lite.laundryview.com/laundry_room.php?lr='+siteCode)
tree = html.fromstring(page.text)
return(tree)
def getSiteName(pageTree):
""" parse the site page tree for the location site name (school) """
site_name = pageTree.xpath('//table/tr//h1/text()')
return(site_name[0])
def getSiteRoom(pageTree):
""" parse the site page tree for the location site room name (laundry room) """
room_name = pageTree.xpath('//table/tr//strong/text()')
return(room_name[0])
def isInUse(machineStatus):
""" generate the machine in use field """
if machineStatus in machineError:
return('No')
if machineStatus=="Avail":
return('No')
else:
return('Yes')
def getMachineID(machine):
""" parse the machine ID and Number """
rexPattern = '^(.*?)\s\((.*?)\)'
machineID = re.search(rexPattern, machine).group(1)
machineNumber = re.search(rexPattern, machine).group(2)
return(machineID, machineNumber)
def outputWashers(pageTree, siteName, siteRoom):
""" parse the site page tree for the washers and their status """
washers = pageTree.xpath('//tr/td[1]/div/span[1]/text()')
washer_status = pageTree.xpath('//tr/td[1]/div/span[2]/text()')
washer_dict = zip(washers, washer_status)
for washer,status in washer_dict:
inUse = isInUse(status)
machineID, machineNumber = getMachineID(washer)
print logTime+" site_name=\""+siteName+"\" room_name=\""+siteRoom+"\" type=washer machineID="+machineID+" machineNumber="+machineNumber+" inUse="+inUse+" status=\""+status+"\""
def outputDryers(pageTree, siteName, siteRoom):
""" parse the site page tree for the dryers and their status """
dryers = pageTree.xpath('//tr/td[3]/div/span[1]/text()')
dryer_status = pageTree.xpath('//tr/td[3]/div/span[2]/text()')
dryer_dict = zip(dryers, dryer_status)
for dryer,status in dryer_dict:
inUse = isInUse(status)
machineID, machineNumber = getMachineID(dryer)
print logTime+" site_name=\""+siteName+"\" room_name=\""+siteRoom+"\" type=dryer machineID="+machineID+" machineNumber="+machineNumber+" inUse="+inUse+" status=\""+status+"\""
def getSite(siteCode):
try:
sitePageTree = getSitePage(siteCode)
siteName = getSiteName(sitePageTree)
siteRoom = getSiteRoom(sitePageTree)
outputWashers(sitePageTree, siteName, siteRoom)
outputDryers(sitePageTree, siteName, siteRoom)
except Exception, e:
logging.debug("script="+_MI_APP_NAME+" %s" % str(e))
exit(_SYS_EXIT_FAILED_LAUNDRY)
if __name__ == "__main__":
UoAsiteCodes = ['455621', '1937659', '1937660', '1937645', '1937611', '19376136', '1937663', '1937664', '1937671', '1937672', '1937669', '1937670', '1937665', '1937666', '1937656', '1937655', '1937651', '1937652', '1937653', '1937654', '1937618']
if len(sys.argv) > 1:
if sys.argv[1] == "--scheme":
do_scheme()
if sys.argv[1] == "--validate-arguments":
validate_arguments()
else:
pass
else:
siteCode = getSiteCode()
getSite(siteCode)
sys.exit(0)
| {
"content_hash": "47931975236114c454f84acf92cebdc0",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 250,
"avg_line_length": 35.87640449438202,
"alnum_prop": 0.633103664265581,
"repo_name": "georgestarcher/TA-laundryview",
"id": "504c63b93e3dd88447ad95b9156e14eb8ad72f83",
"size": "6386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/laundryview.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "942"
},
{
"name": "Python",
"bytes": "6386"
}
],
"symlink_target": ""
} |
class SDoc(object):
pass
class SLine(SDoc):
__slots__ = ('indent', )
def __init__(self, indent):
assert isinstance(indent, int)
self.indent = indent
def __repr__(self):
return f'SLine({repr(self.indent)})'
class SAnnotationPush(SDoc):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def __repr__(self):
return f'SAnnotationPush({repr(self.value)})'
class SAnnotationPop(SDoc):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def __repr__(self):
return f'SAnnotationPush({repr(self.value)})'
| {
"content_hash": "55ed1b35b04fa78fe57e9a55d9c96857",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 53,
"avg_line_length": 19.484848484848484,
"alnum_prop": 0.5567651632970451,
"repo_name": "tommikaikkonen/peprint",
"id": "75c4fb8036bdd2d043b7b1ef13fe3a5689646106",
"size": "644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peprint/sdoc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2269"
},
{
"name": "Python",
"bytes": "95629"
}
],
"symlink_target": ""
} |
import re
import hashlib
import uuid
from decimal import Decimal
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings as django_settings
from django.utils.datastructures import SortedDict
from django.core.exceptions import ImproperlyConfigured
from form_designer.fields import TemplateTextField, TemplateCharField, ModelNameField, RegexpExpressionField
from form_designer.utils import get_class
from form_designer import settings
if settings.VALUE_PICKLEFIELD:
try:
from picklefield.fields import PickledObjectField
except ImportError:
raise ImproperlyConfigured('FORM_DESIGNER_VALUE_PICKLEFIELD is True, but django-picklefield is not installed.')
class FormValueDict(dict):
def __init__(self, name, value, label):
self['name'] = name
self['value'] = value
self['label'] = label
super(FormValueDict, self).__init__()
class FormDefinition(models.Model):
name = models.SlugField(_('name'), max_length=255, unique=True)
require_hash = models.BooleanField(_('obfuscate URL to this form'), default=False, help_text=_('If enabled, the form can only be reached via a secret URL.'))
private_hash = models.CharField(editable=False, max_length=40, default='')
public_hash = models.CharField(editable=False, max_length=40, default='')
title = models.CharField(_('title'), max_length=255, blank=True, null=True)
body = models.TextField(_('body'), blank=True, null=True)
action = models.CharField(_('target URL'), help_text=_('If you leave this empty, the page where the form resides will be requested, and you can use the mail form and logging features. You can also send data to external sites: For instance, enter "http://www.google.ch/search" to create a search form.'), max_length=255, blank=True, null=True)
mail_to = TemplateCharField(_('send form data to e-mail address'), help_text=('Separate several addresses with a comma. Your form fields are available as template context. Example: "admin@domain.com, {{ from_email }}" if you have a field named `from_email`.'), max_length=255, blank=True, null=True)
mail_from = TemplateCharField(_('sender address'), max_length=255, help_text=('Your form fields are available as template context. Example: "{{ first_name }} {{ last_name }} <{{ from_email }}>" if you have fields named `first_name`, `last_name`, `from_email`.'), blank=True, null=True)
mail_subject = TemplateCharField(_('email subject'), max_length=255, help_text=('Your form fields are available as template context. Example: "Contact form {{ subject }}" if you have a field named `subject`.'), blank=True, null=True)
mail_uploaded_files = models.BooleanField(_('Send uploaded files as email attachments'), default=True)
method = models.CharField(_('method'), max_length=10, default="POST", choices=(('POST', 'POST'), ('GET', 'GET')))
success_message = models.CharField(_('success message'), max_length=255, blank=True, null=True)
error_message = models.CharField(_('error message'), max_length=255, blank=True, null=True)
submit_label = models.CharField(_('submit button label'), max_length=255, blank=True, null=True)
log_data = models.BooleanField(_('log form data'), help_text=_('Logs all form submissions to the database.'), default=True)
save_uploaded_files = models.BooleanField(_('save uploaded files'), help_text=_('Saves all uploaded files using server storage.'), default=True)
success_redirect = models.BooleanField(_('HTTP redirect after successful submission'), default=True)
success_clear = models.BooleanField(_('clear form after successful submission'), default=True)
allow_get_initial = models.BooleanField(_('allow initial values via URL'), help_text=_('If enabled, you can fill in form fields by adding them to the query string.'), default=True)
message_template = TemplateTextField(_('message template'), help_text=_('Your form fields are available as template context. Example: "{{ message }}" if you have a field named `message`. To iterate over all fields, use the variable `data` (a list containing a dictionary for each form field, each containing the elements `name`, `label`, `value`).'), blank=True, null=True)
form_template_name = models.CharField(_('form template'), max_length=255, choices=settings.FORM_TEMPLATES, blank=True, null=True)
display_logged = models.BooleanField(_('display logged submissions with form'), default=False)
class Meta:
verbose_name = _('Form')
verbose_name_plural = _('Forms')
def save(self, *args, **kwargs):
if not self.private_hash:
self.private_hash = hashlib.sha1(str(uuid.uuid4())).hexdigest()
if not self.public_hash:
self.public_hash = hashlib.sha1(str(uuid.uuid4())).hexdigest()
super(FormDefinition, self).save()
def get_field_dict(self):
field_dict = SortedDict()
for field in self.formdefinitionfield_set.all():
field_dict[field.name] = field
return field_dict
@models.permalink
def get_absolute_url(self):
if self.require_hash:
return ('form_designer.views.detail_by_hash', [str(self.public_hash)])
return ('form_designer.views.detail', [str(self.name)])
def get_form_data(self, form):
# TODO: refactor, move to utils or views
data = []
field_dict = self.get_field_dict()
form_keys = form.fields.keys()
def_keys = field_dict.keys()
for key in form_keys:
if key in def_keys and field_dict[key].include_result:
value = form.cleaned_data[key]
if getattr(value, '__form_data__', False):
value = value.__form_data__()
data.append(FormValueDict(key, value, form.fields[key].label))
return data
def get_form_data_context(self, form_data):
# TODO: refactor, move to utils
dict = {}
if form_data:
for field in form_data:
dict[field['name']] = field['value']
return dict
def compile_message(self, form_data, template=None):
# TODO: refactor, move to utils
from django.template.loader import get_template
from django.template import Context, Template
if template:
t = get_template(template)
elif not self.message_template:
t = get_template('txt/formdefinition/data_message.txt')
else:
t = Template(self.message_template)
context = Context(self.get_form_data_context(form_data))
context['data'] = form_data
return t.render(context)
def count_fields(self):
return self.formdefinitionfield_set.count()
count_fields.short_description = _('Fields')
def __unicode__(self):
return self.title or self.name
def log(self, form, user=None):
form_data = self.get_form_data(form)
created_by = None
if user and user.is_authenticated():
created_by = user
FormLog(form_definition=self, data=form_data, created_by=created_by).save()
def string_template_replace(self, text, context_dict):
# TODO: refactor, move to utils
from django.template import Context, Template, TemplateSyntaxError
try:
t = Template(text)
return t.render(Context(context_dict))
except TemplateSyntaxError:
return text
def send_mail(self, form, files=[]):
# TODO: refactor, move to utils
form_data = self.get_form_data(form)
message = self.compile_message(form_data)
context_dict = self.get_form_data_context(form_data)
mail_to = re.compile('\s*[,;]+\s*').split(self.mail_to)
for key, email in enumerate(mail_to):
mail_to[key] = self.string_template_replace(email, context_dict)
mail_from = self.mail_from or None
if mail_from:
mail_from = self.string_template_replace(mail_from, context_dict)
if self.mail_subject:
mail_subject = self.string_template_replace(self.mail_subject, context_dict)
else:
mail_subject = self.title
from django.core.mail import EmailMessage
message = EmailMessage(mail_subject, message, mail_from or None, mail_to)
if self.mail_uploaded_files:
for file_path in files:
message.attach_file(file_path)
message.send(fail_silently=False)
@property
def submit_flag_name(self):
name = settings.SUBMIT_FLAG_NAME % self.name
# make sure we are not overriding one of the actual form fields
while self.formdefinitionfield_set.filter(name__exact=name).count() > 0:
name += '_'
return name
class FormDefinitionField(models.Model):
form_definition = models.ForeignKey(FormDefinition)
field_class = models.CharField(_('field class'), choices=settings.FIELD_CLASSES, max_length=100)
position = models.IntegerField(_('position'), blank=True, null=True)
name = models.SlugField(_('name'), max_length=255)
label = models.CharField(_('label'), max_length=255, blank=True, null=True)
required = models.BooleanField(_('required'), default=True)
include_result = models.BooleanField(_('include in result'), help_text=('If this is disabled, the field value will not be included in logs and e-mails generated from form data.'), default=True)
widget = models.CharField(_('widget'), default='', choices=settings.WIDGET_CLASSES, max_length=255, blank=True, null=True)
initial = models.TextField(_('initial value'), blank=True, null=True)
help_text = models.CharField(_('help text'), max_length=255, blank=True, null=True)
choice_values = models.TextField(_('values'), help_text=_('One value per line'), blank=True, null=True)
choice_labels = models.TextField(_('labels'), help_text=_('One label per line'), blank=True, null=True)
max_length = models.IntegerField(_('max. length'), blank=True, null=True)
min_length = models.IntegerField(_('min. length'), blank=True, null=True)
max_value = models.FloatField(_('max. value'), blank=True, null=True)
min_value = models.FloatField(_('min. value'), blank=True, null=True)
max_digits = models.IntegerField(_('max. digits'), blank=True, null=True)
decimal_places = models.IntegerField(_('decimal places'), blank=True, null=True)
regex = RegexpExpressionField(_('regular Expression'), max_length=255, blank=True, null=True)
choice_model_choices = settings.CHOICE_MODEL_CHOICES
choice_model = ModelNameField(_('data model'), max_length=255, blank=True, null=True, choices=choice_model_choices, help_text=('your_app.models.ModelName' if not choice_model_choices else None))
choice_model_empty_label = models.CharField(_('empty label'), max_length=255, blank=True, null=True)
class Meta:
verbose_name = _('field')
verbose_name_plural = _('fields')
ordering = ['position']
def save(self, *args, **kwargs):
if self.position is None:
self.position = 0
super(FormDefinitionField, self).save(*args, **kwargs)
def ____init__(self, field_class=None, name=None, required=None, widget=None, label=None, initial=None, help_text=None, *args, **kwargs):
super(FormDefinitionField, self).__init__(*args, **kwargs)
self.name = name
self.field_class = field_class
self.required = required
self.widget = widget
self.label = label
self.initial = initial
self.help_text = help_text
def get_form_field_init_args(self):
args = {
'required': self.required,
'label': self.label if self.label else '',
'initial': self.initial if self.initial else None,
'help_text': self.help_text,
}
if self.field_class in ('django.forms.CharField', 'django.forms.EmailField', 'django.forms.RegexField'):
args.update({
'max_length': self.max_length,
'min_length': self.min_length,
})
if self.field_class in ('django.forms.IntegerField', 'django.forms.DecimalField'):
args.update({
'max_value': int(self.max_value) if self.max_value is not None else None,
'min_value': int(self.min_value) if self.min_value is not None else None,
})
if self.field_class == 'django.forms.DecimalField':
args.update({
'max_value': Decimal(str(self.max_value)) if self.max_value is not None else None,
'min_value': Decimal(str(self.min_value)) if self.max_value is not None else None,
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
})
if self.field_class == 'django.forms.RegexField':
if self.regex:
args.update({
'regex': self.regex
})
if self.field_class in ('django.forms.ChoiceField', 'django.forms.MultipleChoiceField'):
if self.choice_values:
choices = []
regex = re.compile('[\s]*\n[\s]*')
values = regex.split(self.choice_values)
labels = regex.split(self.choice_labels) if self.choice_labels else []
for index, value in enumerate(values):
try:
label = labels[index]
except:
label = value
choices.append((value, label))
args.update({
'choices': tuple(choices)
})
if self.field_class in ('django.forms.ModelChoiceField', 'django.forms.ModelMultipleChoiceField'):
args.update({
'queryset': ModelNameField.get_model_from_string(self.choice_model).objects.all()
})
if self.field_class == 'django.forms.ModelChoiceField':
args.update({
'empty_label': self.choice_model_empty_label
})
if self.widget:
args.update({
'widget': get_class(self.widget)()
})
return args
def __unicode__(self):
return self.label if self.label else self.name
class FormLog(models.Model):
form_definition = models.ForeignKey(FormDefinition, related_name='logs')
created = models.DateTimeField(_('Created'), auto_now=True)
created_by = models.ForeignKey(django_settings.AUTH_USER_MODEL, null=True, blank=True)
_data = None
def __unicode__(self):
return "%s (%s)" % (self.form_definition.title or self.form_definition.name, self.created)
def get_data(self):
if self._data:
# before instance is saved
return self._data
data = []
fields = self.form_definition.get_field_dict()
values_with_header = {}
values_without_header = []
for item in self.values.all():
field = fields.get(item.field_name, None)
if field:
# get field label if field definition still exists
label = field.label
else:
# field may have been removed
label = None
value_dict = FormValueDict(item.field_name, item.value, label)
if item.field_name in fields:
values_with_header[item.field_name] = value_dict
else:
values_without_header.append(value_dict)
for field_name, field in fields.items():
if field_name in values_with_header:
data.append(values_with_header[field_name])
else:
data.append(FormValueDict(field.name, None, field.label))
for value in values_without_header:
data.append(value)
return data
def set_data(self, form_data):
# keep form data in temporary variable since instance must
# be saved before saving values
self._data = form_data
data = property(get_data, set_data)
def save(self, *args, **kwargs):
super(FormLog, self).save(*args, **kwargs)
if self._data:
# safe form data and then clear temporary variable
for value in self.values.all():
value.delete()
for item in self._data:
value = FormValue()
value.field_name = item['name']
value.value = item['value']
self.values.add(value)
self._data = None
class FormValue(models.Model):
form_log = models.ForeignKey(FormLog, related_name='values')
field_name = models.SlugField(_('field name'), max_length=255)
if settings.VALUE_PICKLEFIELD:
# use PickledObjectField if available because it preserves the
# original data type
value = PickledObjectField(_('value'), null=True, blank=True)
else:
# otherwise just use a TextField, with the drawback that
# all values will just be stored as unicode strings,
# but you can easily query the database for form results.
value = models.TextField(_('value'), null=True, blank=True)
def __unicode__(self):
return u'%s = %s' % (self.field_name, self.value)
if 'south' in django_settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^form_designer\.fields\..*"])
| {
"content_hash": "0a5bd14a81739bbccc6d5154476707c8",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 377,
"avg_line_length": 46.32981530343008,
"alnum_prop": 0.6314140896406402,
"repo_name": "housleyjk/django-form-designer",
"id": "724c2a2db32c12a78348526c25d96c65c259c777",
"size": "17559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "form_designer/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7151"
},
{
"name": "Python",
"bytes": "135007"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import datetime
print ("Packages loaded")
# Load them!
cwd = os.getcwd()
loadpath = cwd + "/custom_data_signs.npz"
l = np.load(loadpath)
# See what's in here
print (l.files)
# Parse data
trainimg = l['trainimg']
trainlabel = l['trainlabel']
testimg = l['testimg']
testlabel = l['testlabel']
imgsize = l['imgsize']
use_gray = l['use_gray']
ntrain = trainimg.shape[0]
nclass = trainlabel.shape[1]
dim = trainimg.shape[1]
ntest = testimg.shape[0]
print ("%d train images loaded" % (ntrain))
print ("%d test images loaded" % (ntest))
print ("%d dimensional input" % (dim))
print ("Image size is %s" % (imgsize))
print ("%d classes" % (nclass))
#define network
tf.set_random_seed(0)
n_input = dim
n_output = nclass
if use_gray:
weights = {
'wd1': tf.Variable(tf.random_normal(
[(int)(imgsize[0]*imgsize[1]), 128], stddev=0.1),name="wd1"),
'wd2': tf.Variable(tf.random_normal([128, n_output], stddev=0.1),name="wd2")
}
else:
print "You should use gray images!!"
biases = {
'bd1': tf.Variable(tf.random_normal([128], stddev=0.1),name="bd1"),
'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1),name="bd2")
}
def conv_basic(_input, _w, _b, _keepratio, _use_gray):
# INPUT
if _use_gray:
_input_r = tf.reshape(_input, shape=[-1, imgsize[0], imgsize[1], 1])
else:
_input_r = tf.reshape(_input, shape=[-1, imgsize[0], imgsize[1], 3])
# VECTORIZE
_dense1 = tf.reshape(_input_r
, [-1, _w['wd1'].get_shape().as_list()[0]])
# FULLY CONNECTED LAYER 1
_fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
_fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
# FULLY CONNECTED LAYER 2
_out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
# RETURN
out = {
'out': _out
}
return out
print ("NETWORK READY")
#define functions
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_output])
keepratio = tf.placeholder(tf.float32)
# Functions!
_pred = conv_basic(x, weights, biases, keepratio, use_gray)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y))
WEIGHT_DECAY_FACTOR = 0.0001
l2_loss = tf.add_n([tf.nn.l2_loss(v)
for v in tf.trainable_variables()])
cost = cost + WEIGHT_DECAY_FACTOR*l2_loss
optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects
accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy
init = tf.initialize_all_variables()
print ("FUNCTIONS READY")
#optimize
# Parameters
training_epochs = 400
batch_size = 100
display_step = 1
# Launch the graph
sess = tf.Session()
sess.run(init)
# Training cycle
#Saver
save_step = 1;
#savedir = "nets/"
saver = tf.train.Saver(max_to_keep=3)
print('Start time: {:[%H:%M:%S]}'.format(datetime.datetime.now()))
for epoch in range(training_epochs):
avg_cost = 0.
num_batch = int(ntrain/batch_size)+1
# Loop over all batches
for i in range(num_batch):
randidx = np.random.randint(ntrain, size=batch_size)
batch_xs = trainimg[randidx, :]
batch_ys = trainlabel[randidx, :]
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys
, keepratio:0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys
, keepratio:1.})/num_batch
# Display logs per epoch step
if epoch % display_step == 0 or epoch == training_epochs-1:
print ('{:[%H:%M:%S] }'.format(datetime.datetime.now())+"Epoch: %03d/%03d cost: %.9f" %
(epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs
, y: batch_ys, keepratio:1.})
print (" Training accuracy: %.3f" % (train_acc))
test_acc = sess.run(accr, feed_dict={x: testimg
, y: testlabel, keepratio:1.})
print (" Test accuracy: %.3f" % (test_acc))
#save epoch
if epoch % save_step == 0:
saver.save(sess, cwd + "/nets/signs_fc.ckpt-" + str(epoch))
print ("Optimization Finished!")
sess.close()
print ("Session closed.")
| {
"content_hash": "c1fee3df5b3691281073fb540d9b0213",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 97,
"avg_line_length": 29.993243243243242,
"alnum_prop": 0.6010362694300518,
"repo_name": "stefanbo92/maleChildren",
"id": "5c633c0845def009824c21e99aacd8c795ec04a7",
"size": "4439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DeepLearning/trainDataNamesSimple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "506"
},
{
"name": "Java",
"bytes": "19480"
},
{
"name": "Python",
"bytes": "71742"
}
],
"symlink_target": ""
} |
import collections
class defaultdict_noinsert(collections.defaultdict):
def __missing__(self, key):
if self.default_factory is None:
raise KeyError((key,))
return self.default_factory()
| {
"content_hash": "482436438fbdc89aef87db512ca56114",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 52,
"avg_line_length": 27.5,
"alnum_prop": 0.6636363636363637,
"repo_name": "bigblindbais/pytk",
"id": "4b444a14d93651ef8a294471625f0e30e9651bc8",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pytk/more_collections/noinsert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91840"
}
],
"symlink_target": ""
} |
__author__ = 'huanpc'
import constant
import argparse
from random import randint
# Duong dan toi thu muc output file dataseed
DIR_OUTPUT_PATH = './output'
# Chon gia tri cho id trong bang customer
customer_id_begin = 5
product_id_begin = 0
# So ban ghi can tao
num_of_row = 100
#def createProductData():
def createCustomerData():
first_name_list = constant.FIRST_NAME
last_name_list = constant.LAST_NAME
i = 0
f = open(DIR_OUTPUT_PATH+'/customer_data_seed.csv','w')
column_heading = ['customer_id','customer_group_id','store_id','first_name','last_name','email','telephone','fax','password','salt','cart','whistlist',
'newsleter','address_id','custom_field','ip','status','approves','safe','token','date_added']
row = ['1',constant.CUSTOMER_GROUP_ID,constant.STORE_ID,'1','1','1','1','1',constant.PASSWORD,constant.SALT,constant.CART,constant.WHISTLIST,constant.NEWSLETTER,constant.ADDRESS_ID,
constant.CUSTOM_FIELD,constant.IP,constant.STATUS,constant.APPROVED,constant.SAFE,constant.TOKEN,constant.DATE_ADDED]
while i<num_of_row:
first_name = first_name_list[randint(0,len(constant.FIRST_NAME)-1)]
last_name = last_name_list[randint(0,len(constant.LAST_NAME)-1)]
row[0] = str(i+customer_id_begin)
row[3] = first_name
row[4] = last_name
row[5] = str(first_name+'.'+last_name+'@gmail.com').lower()
row[6] = str(randint(11111,99999))+ str(randint(11111,99999))
row[7] = row[6]
line = ','.join(row)
i+=1
f.write(line+'\n')
f.close()
def main():
# parser = argparse.ArgumentParser(description='Sinh du lieu mau cho tap test')
# parser.add_argument('integers', metavar='N', type=int, nargs='+',
# help='an integer for the accumulator')
createCustomerData()
if __name__ == '__main__':
main()
| {
"content_hash": "74ee44b1ace7ec3f6757c031ed77dd62",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 185,
"avg_line_length": 40.869565217391305,
"alnum_prop": 0.6420212765957447,
"repo_name": "huanpc/lab_cloud_computing",
"id": "30ef5d15a62b0f308f6865d41a8dfe7d47c74e38",
"size": "1880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Jmeter Test Plan/Seeding_Data/Main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6106"
}
],
"symlink_target": ""
} |
import os.path as osp
from grasp_fusion_lib.contrib import grasp_fusion
import train_common
here = osp.dirname(osp.abspath(__file__))
def main():
parser = train_common.get_parser()
parser.add_argument(
'--exclude-arc2017',
action='store_true',
help='Exclude ARC2017 objects from synthetic',
)
parser.add_argument(
'--background',
choices=['tote', 'tote+shelf'],
default='tote',
help='background image in 2D synthesis',
)
args = parser.parse_args()
args.logs_dir = osp.join(here, 'logs')
# Dataset.
args.dataset = 'synthetic'
train_data = \
grasp_fusion.datasets.SyntheticInstanceSegmentationDataset(
augmentation=True,
augmentation_level='all',
exclude_arc2017=args.exclude_arc2017,
background=args.background,
)
test_data = \
grasp_fusion.datasets.RealInstanceSegmentationDataset()
args.class_names = tuple(test_data.class_names.tolist())
# Model.
args.min_size = 600
args.max_size = 1000
args.anchor_scales = (4, 8, 16, 32)
# Run training!.
train_common.train(
args=args,
train_data=train_data,
test_data=test_data,
evaluator_type='coco',
)
if __name__ == '__main__':
main()
| {
"content_hash": "15248165b96bb64cb7b2ff4006a29fb6",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 67,
"avg_line_length": 23.732142857142858,
"alnum_prop": 0.5989465763732129,
"repo_name": "pazeshun/jsk_apc",
"id": "b63e20b5ad4bd69b846e06ff47db7ab288b96092",
"size": "1352",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demos/grasp_fusion/examples/grasp_fusion/instance_segmentation/train.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "101871"
},
{
"name": "CMake",
"bytes": "42995"
},
{
"name": "Common Lisp",
"bytes": "695864"
},
{
"name": "Dockerfile",
"bytes": "1503"
},
{
"name": "HTML",
"bytes": "6364"
},
{
"name": "Python",
"bytes": "406153"
},
{
"name": "Shell",
"bytes": "4475"
}
],
"symlink_target": ""
} |
from google.cloud import tpu_v1
async def sample_reimage_node():
# Create a client
client = tpu_v1.TpuAsyncClient()
# Initialize request argument(s)
request = tpu_v1.ReimageNodeRequest(
)
# Make the request
operation = client.reimage_node(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END tpu_v1_generated_Tpu_ReimageNode_async]
| {
"content_hash": "e698cba0ad0d6aedd56b8c31c3d30c27",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 52,
"avg_line_length": 21.59090909090909,
"alnum_prop": 0.6884210526315789,
"repo_name": "googleapis/python-tpu",
"id": "a19f27082ff0be1e6c14c34b6d9b1c61b02a418a",
"size": "1839",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/tpu_v1_generated_tpu_reimage_node_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "828092"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
} |
import hashlib
import json
import logging
import optparse
import os
import re
import sys
import unittest
import urllib2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
import local_gae
class TestCase(unittest.TestCase):
def setUp(self):
super(TestCase, self).setUp()
# Restart the server on each test. It's a bit slow but safer.
self.local_gae = local_gae.LocalGae()
self.local_gae.start_server(logging.getLogger().isEnabledFor(logging.DEBUG))
self.url = 'http://127.0.0.1:%d/' % self.local_gae.port
self.clear_cookies()
def tearDown(self):
if self.local_gae:
self.local_gae.stop_server()
self.local_gae = None
super(TestCase, self).tearDown()
def get(self, suburl):
return self.local_gae.get(suburl)
def post(self, suburl, data):
return self.local_gae.post(suburl, data)
def clear_cookies(self):
self.local_gae.clear_cookies()
def login(self, username, admin=False):
self.local_gae.login(username, admin)
def set_admin_pwd(self, password):
# There will be no entities until main() has been called. So do a dummy
# request first.
hashvalue = hashlib.sha1(password).hexdigest()
try:
self.get('doesnt_exist')
except urllib2.HTTPError:
pass
output = self.local_gae.query(
'import base_page\n'
# First verify the default value exists.
'n = db.GqlQuery("SELECT * FROM Passwords").count()\n'
'assert n == 1, "n == 1"\n'
# Then override its value with |password|.
'p = db.GqlQuery("SELECT * FROM Passwords").get()\n'
+ ('p.password_sha1 = %r\n' % hashvalue) +
'p.put()\n'
'print db.GqlQuery("SELECT * FROM Passwords").count(),\n')
self.assertEqual(output, '1')
def set_global_config(self, app_name, public_access):
cmd = (
'import base_page\n'
# Verify the default config exists.
'n = db.GqlQuery("SELECT * FROM GlobalConfig").count()\n'
'assert n == 1, "n == 1"\n'
# Then make sure access is sane.
'config = base_page.GlobalConfig(app_name=%r)\n' % app_name +
'config.public_access = %r\n' % public_access +
'config.put()\n'
'print "ok",\n'
)
output = self.local_gae.query(cmd)
self.assertEqual(output, 'ok')
class PublicTestCase(TestCase):
def setUp(self):
super(PublicTestCase, self).setUp()
self.set_global_config(app_name='bogus_app', public_access=True)
class StatusTest(PublicTestCase):
def test_all_status(self):
out = self.get('allstatus').splitlines()
out = [i for i in out if i]
self.assertEquals(2, len(out))
self.assertEquals('Who,When,GeneralStatus,Message', out[0])
self.assertTrue(
re.match('none,.+?, \d+?, .+?,open,welcome to status', out[1]), out[1])
def test_status(self):
self.assertEqual('1', self.get('status'))
def test_current(self):
out = self.get('current')
self.assertTrue(100 < len(out))
self.assertTrue(out.startswith('<html>'))
def test_current_raw(self):
# Default value.
self.assertEqual('welcome to status', self.get('current?format=raw'))
def test_current_json(self):
# pylint: disable=E1103
out = json.loads(self.get('current?format=json'))
expected = [
'date', 'username', 'message', 'general_state', 'can_commit_freely',
]
# TODO(maruel): Test actual values.
self.assertEqual(sorted(expected), sorted(out.keys()))
def test_status_push(self):
self.assertEqual('welcome to status', self.get('current?format=raw'))
self.assertEqual('welcome to status', self.get('current?format=raw'))
# Set a password, force status with password.
self.set_admin_pwd('bleh')
data = {
'message': 'foo',
'password': 'bleh',
'username': 'user1',
}
self.assertEqual('OK', self.post('status', data))
self.assertEqual('foo', self.get('current?format=raw'))
data['message'] = 'bar'
data['password'] = 'wrong password'
self.assertRaises(urllib2.HTTPError, self.post, 'status', data)
# Wasn't updated since the password was wrong.
self.assertEqual('foo', self.get('current?format=raw'))
data['message'] = 'boo'
data['password'] = 'bleh'
self.assertEqual('OK', self.post('status', data))
self.assertEqual('boo', self.get('current?format=raw'))
def test_root(self):
self.assertTrue(100 < len(self.get('')))
class LkgrTest(PublicTestCase):
def test_lkgr(self):
self.assertEqual('', self.get('lkgr'))
def test_lkgr_set(self):
self.set_admin_pwd('bleh')
data = {
'revision': 42,
'password': 'bleh',
'success': '1',
'steps': '',
}
out = self.post('revisions', data)
self.assertEqual('', out)
self.assertEqual('42', self.get('lkgr'))
self.assertRaises(urllib2.HTTPError, self.get, 'git-lkgr')
data['git_hash'] = 'c305f265aba93cc594a0fece50346c3af7fe3301'
out = self.post('revisions', data)
self.assertEqual('', out)
self.assertEqual('c305f265aba93cc594a0fece50346c3af7fe3301',
self.get('git-lkgr'))
data['password'] = 'wrongpassword'
data['revision'] = 23
self.assertRaises(urllib2.HTTPError, self.post, 'revisions', data)
self.assertEqual('42', self.get('lkgr'))
self.assertEqual('c305f265aba93cc594a0fece50346c3af7fe3301',
self.get('git-lkgr'))
data['password'] = 'bleh'
data['revision'] = 31337
out = self.post('revisions', data)
self.assertEqual('', out)
self.assertEqual('31337', self.get('lkgr'))
self.assertEqual('c305f265aba93cc594a0fece50346c3af7fe3301',
self.get('git-lkgr'))
data['git_hash'] = '988881adc9fc3655077dc2d4d757d480b5ea0e11'
out = self.post('revisions', data)
self.assertEqual('', out)
self.assertEqual('31337', self.get('lkgr'))
self.assertEqual('988881adc9fc3655077dc2d4d757d480b5ea0e11',
self.get('git-lkgr'))
class AccessControl(TestCase):
def _check_post_thru_ui(self, fails=False, fails_main_page=False):
if fails_main_page:
self.assertRaises(urllib2.HTTPError, self.get, '')
self.assertRaises(
urllib2.HTTPError, self.post, '',
{'message': 'foo', 'last_status_key': 'junk'})
else:
main_page = self.get('')
last_status_key = re.search(
r'name="last_status_key" value="(.*?)"', main_page)
if fails:
# last_status_key doesn't appear if you aren't an admin.
self.assertEqual(None, last_status_key)
self.assertRaises(
urllib2.HTTPError, self.post, '',
{'message': 'foo', 'last_status_key': 'junk'})
else:
self.post('', {'message': 'foo',
'last_status_key': last_status_key.group(1)})
self.assertEqual('foo', self.get('current?format=raw'))
def _check_current_page(self, fails=False, seeks_login=False):
if fails:
self.assertRaises(urllib2.HTTPError, self.get, 'current')
elif seeks_login:
out = self.get('current')
self.assertTrue(100 < len(out))
self.assertTrue(out.startswith('<html>'))
self.assertTrue('Login Required' in out)
else:
out = self.get('current')
self.assertTrue(100 < len(out))
self.assertTrue(out.startswith('<html>'))
self.assertTrue('<title>Login</title>' not in out)
self.assertTrue('Login Required' not in out)
def _check_current_raw_page(self, fails=False, seeks_login=False):
if fails:
self.assertRaises(urllib2.HTTPError, self.get, 'current?format=raw')
elif seeks_login:
out = self.get('current?format=raw')
self.assertTrue(100 < len(out))
self.assertTrue(out.startswith('<html>'))
self.assertTrue('<title>Login</title>' in out)
else:
out = self.get('current?format=raw')
self.assertTrue(not out.startswith('<html>'))
self.assertTrue('<title>Login</title>' not in out)
self.assertTrue('Login Required' not in out)
def _check_post_thru_status_fails(self):
self.assertRaises(urllib2.HTTPError, self.post,
'status', {'message': 'foo'})
def test_default_denies_chromium(self):
# Confirm default config does not allow chromium.org access.
self.login('bob@chromium.org')
self._check_current_page(fails=True)
self._check_current_raw_page(fails=True)
self._check_post_thru_ui(fails=True, fails_main_page=True)
self._check_post_thru_status_fails()
def test_private_requires_login(self):
# Confirm private access redirects to a login screen.
self._check_current_page(seeks_login=True)
self._check_current_raw_page(seeks_login=True)
def test_private_allows_google(self):
self.login('bob@google.com')
self._check_current_page()
self._check_current_raw_page()
self._check_post_thru_ui()
# Status, however, requires bot login.
self._check_post_thru_status_fails()
def test_private_denies_other(self):
self.login('bob@example.com')
self._check_current_page(fails=True)
self._check_current_raw_page(fails=True)
self._check_post_thru_ui(fails=True, fails_main_page=True)
self._check_post_thru_status_fails()
def test_public_allows_chromium(self):
self.set_global_config(app_name='foo', public_access=True)
self.login('bob@chromium.org')
self._check_current_page()
self._check_current_raw_page()
self._check_post_thru_ui()
# Status, however, requires bot login.
self._check_post_thru_status_fails()
def test_public_is_limited(self):
self.set_global_config(app_name='foo', public_access=True)
self.login('bar@baz.com')
self._check_current_page()
self._check_current_raw_page()
self._check_post_thru_ui(fails=True)
self._check_post_thru_status_fails()
def test_non_bot_admins_cant_forge(self):
self.login('admin@google.com')
data = {
'message': 'foo',
'username': 'bogus@google.com',
}
self.assertRaises(urllib2.HTTPError, self.post, 'status', data)
self.assertNotEqual('foo', self.get('current?format=raw'))
def test_update_global_config(self):
"""Verify updating the global config affects the active instance"""
result = self.local_gae.query(
'import base_page\n'
# Verify the default config exists.
'n = base_page.GlobalConfig.all().count()\n'
'assert n == 1, "n == 1"\n'
# Verify there is a config, and shows False.
'q = base_page.GlobalConfig.all()\n'
'assert q.count() == 1, "q.count() == 1"\n'
'config = q.get()\n'
'assert not config.public_access, "not config.public_access"\n'
# Make the instance public.
'config.public_access = True\n'
'config.put()\n'
'print "ok",\n')
self.assertEqual('ok', result)
# Login and try various operations.
self.login('bob@chromium.org')
self._check_current_page()
self._check_current_raw_page()
self._check_post_thru_ui()
# Verify the config now shows True.
result = self.local_gae.query(
'import base_page\n'
'q = base_page.GlobalConfig.all()\n'
'assert q.count() == 1, "q.count() == 1"\n'
'print q.get().public_access\n')
self.assertEqual('True\n', result)
def _init_logging(argv):
"""Set up our logging by re-using some of the unittest flags"""
parser = optparse.OptionParser()
parser.add_option('-v', action='count', default=0)
(opts, _) = parser.parse_args([x for x in argv if x.startswith('-v')])
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(2, opts.v)])
if __name__ == '__main__':
_init_logging(sys.argv)
unittest.main()
| {
"content_hash": "ec67cfeecd8e142e961efe4dd44edba0",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 80,
"avg_line_length": 34.15743440233236,
"alnum_prop": 0.6341754865141687,
"repo_name": "nicko96/Chrome-Infra",
"id": "d601aff895103e4afb13ff5de8e42fd1613f0117",
"size": "11905",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "appengine/chromium_status/tests/main_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "100398"
},
{
"name": "Go",
"bytes": "648467"
},
{
"name": "HTML",
"bytes": "7323317"
},
{
"name": "JavaScript",
"bytes": "913960"
},
{
"name": "Makefile",
"bytes": "11281"
},
{
"name": "Protocol Buffer",
"bytes": "2730"
},
{
"name": "Python",
"bytes": "4034630"
},
{
"name": "Shell",
"bytes": "21687"
}
],
"symlink_target": ""
} |
from guardian.shortcuts import remove_perm
from core.tests.base import LoggedTestCase
from core.tests.fixtures import ServerRoleFactory, DepartmentFactory, EnvironmentFactory
from .fixtures import *
class TaskTest(LoggedTestCase):
def setUp(self):
super(TaskTest, self).setUp()
self.application = ApplicationFactory(department=self.department)
self.task = TaskFactory(application=self.application)
def test_form_create_forbidden(self):
application = ApplicationFactory(department=self.department)
response = self.client.get('/application/%d/task/' % application.id)
self.assertForbidden(response)
def test_view(self):
response = self.client.get('/task/%d/' % self.task.id)
self.assertContains(response, self.task.name)
self.assertContains(response, reverse('task_execute_page', kwargs={'task_id': self.task.id}))
self.assertNotContains(response, reverse('task_form_page', kwargs={'task_id': self.task.id}))
def test_view_execute_forbidden(self):
self.remove_perm_from_user_group('task.execute_task', self.task)
response = self.client.get('/task/%d/' % self.task.id)
self.assertContains(response, self.task.name)
self.assertNotContains(response, reverse('task_execute_page', kwargs={'task_id': self.task.id}))
def test_view_forbidden_department(self):
application = ApplicationFactory(department=DepartmentFactory())
task = TaskFactory(application=application)
response = self.client.get('/task/%d/' % task.id)
self.assertForbidden(response)
def test_view_forbidden_task(self):
self.remove_perm_from_user_group('task.view_task', self.task)
response = self.client.get('/task/%d/' % self.task.id)
self.assertForbidden(response)
def test_execute(self):
environment_valid = EnvironmentFactory(application=self.application)
environment_invalid = EnvironmentFactory(application=self.application)
self.remove_perm_from_user_group('core.execute_environment', environment_invalid)
response = self.client.get('/task/%d/execute/' % self.task.id)
self.assertContains(response, self.task.name)
self.assertContains(response, environment_valid.name+"</option>")
self.assertNotContains(response, environment_invalid.name+"</option>")
self.assertNotContains(response, reverse('task_form_page', kwargs={'task_id': self.task.id}))
def test_execute_forbidden(self):
self.remove_perm_from_user_group('task.execute_task', self.task)
response = self.client.get('/task/%d/execute/' % self.task.id)
self.assertForbidden(response)
def test_execute_forbidden_change(self):
self.remove_perm_from_user_group('task.change_task', self.task)
response = self.client.get('/task/%d/execute/' % self.task.id)
self.assertNotContains(response, reverse('task_form_page', kwargs={'task_id': self.task.id}))
def test_list(self):
response = self.client.get('/application/%d/' % self.task.application.id)
self.assertContains(response, self.task.name)
def test_execution(self):
environment = EnvironmentFactory(application=self.application)
execution = ExecutionFactory(task=self.task, environment=environment, user=self.user)
response = self.client.get('/execution/%d/' % execution.id)
self.assertContains(response, self.task.name)
self.assertContains(response, environment.name)
self.assertContains(response, reverse('task_execute_page', kwargs={'task_id': self.task.id}))
self.assertNotContains(response, reverse('task_form_page', kwargs={'task_id': self.task.id}))
def test_execution_forbidden_change(self):
self.remove_perm_from_user_group('task.change_task', self.task)
self.remove_perm_from_user_group('task.execute_task', self.task)
environment = EnvironmentFactory(application=self.application)
execution = ExecutionFactory(task=self.task, environment=environment, user=self.user)
response = self.client.get('/execution/%d/' % execution.id)
self.assertContains(response, self.task.name)
self.assertContains(response, environment.name)
self.assertNotContains(response, reverse('task_execute_page', kwargs={'task_id': self.task.id}))
self.assertNotContains(response, reverse('task_form_page', kwargs={'task_id': self.task.id}))
def test_execution_forbidden(self):
environment = EnvironmentFactory(application=self.application)
execution = ExecutionFactory(task=self.task, environment=environment, user=self.user)
self.remove_perm_from_user_group('core.view_task', self.task)
response = self.client.get('/execution/%d/' % execution.id)
self.assertForbidden(response)
def test_form_edit_forbidden(self):
response = self.client.get('/task/%d/edit/' % self.task.id)
self.assertForbidden(response)
class TaskManagerTest(LoggedTestCase):
logged_is_manager = True
def setUp(self):
super(TaskManagerTest, self).setUp()
self.application = ApplicationFactory(department=self.department)
self.task = TaskFactory(application=self.application)
def test_view(self):
response = self.client.get('/task/%d/' % self.task.id)
self.assertContains(response, reverse('task_form_page', kwargs={'task_id': self.task.id}))
def test_execute(self):
response = self.client.get('/task/%d/execute/' % self.task.id)
self.assertContains(response, reverse('task_form_page', kwargs={'task_id': self.task.id}))
def test_execution(self):
environment = EnvironmentFactory(application=self.application)
execution = ExecutionFactory(task=self.task, environment=environment, user=self.user)
response = self.client.get('/execution/%d/' % execution.id)
self.assertContains(response, reverse('task_form_page', kwargs={'task_id': self.task.id}))
def test_delete(self):
response = self.client.post('/task/%d/delete/' % self.task.id)
self.assertContains(response, 'true')
def test_form_edit(self):
response = self.client.get('/task/%d/edit/' % self.task.id)
self.assertContains(response, 'Save')
def test_form_create(self):
response = self.client.get('/application/%d/task/' % self.task.application.id)
self.assertContains(response, 'Add task')
def test_create(self):
server_role = ServerRoleFactory(department=self.department)
data = {'name': 'TaskName',
'TaskCommand-0-ORDER': '1',
'TaskCommand-0-command': 'echo 123',
'TaskCommand-0-roles': server_role.id,
'application': self.application.id,
'TaskParameter-TOTAL_FORMS': 1,
'TaskParameter-INITIAL_FORMS': 0,
'TaskParameter-MAX_NUM_FORMS': 1000,
'TaskCommand-TOTAL_FORMS': 2,
'TaskCommand-INITIAL_FORMS': 0,
'TaskCommand-MAX_NUM_FORMS': 1000}
self.client.post('/application/%d/task/' % self.task.application.id, data)
try:
Task.objects.get(name='TaskName')
except Task.DoesNotExist:
self.fail('Task not created') | {
"content_hash": "d5d20651520266cf592a185a282195de",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 104,
"avg_line_length": 49.91156462585034,
"alnum_prop": 0.6731634182908546,
"repo_name": "senkal/gunnery",
"id": "d55d44b364584a1fd2b17a48aa5443e9b1407464",
"size": "7337",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gunnery/task/tests/test_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11576"
},
{
"name": "HTML",
"bytes": "85547"
},
{
"name": "JavaScript",
"bytes": "10378"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Puppet",
"bytes": "9374"
},
{
"name": "Python",
"bytes": "269673"
},
{
"name": "Shell",
"bytes": "12056"
}
],
"symlink_target": ""
} |
from distantbes.proto.proto import build_event_stream_pb2 as bes
from distantbes.utils import dict_to_workspace_status, get_current_timestamp
from distantbes.enums import CPU, CompilationMode, EXIT_CODES
from platform import node
def file_message(name, contents):
file_msg = bes.File()
file_msg.name = name
file_msg.contents = contents.encode()
return file_msg
def started(uuid, command, build_tool_version="0.0.1", start_time=None, pattern=[]):
if start_time is None:
start_time = get_current_timestamp()
event = bes.BuildEvent()
event.id.started.SetInParent()
event.started.uuid = uuid
event.started.start_time_millis = start_time
event.started.build_tool_version = build_tool_version
event.started.command = command
if len(pattern) > 0:
pattern_e = bes.BuildEventId()
pattern_e.pattern.SetInParent()
pattern_e.pattern.pattern.extend(pattern)
event.children.append(pattern_e)
return event
def workspace_status(build_user="distant", build_host=node()):
event = bes.BuildEvent()
event.id.workspace_status.SetInParent()
ws_dict = {"BUILD_USER":build_user, "BUILD_HOST":build_host}
ws_proto = dict_to_workspace_status(ws_dict)
event.workspace_status.CopyFrom(ws_proto)
return event
def progress(count, stdout):
event = bes.BuildEvent()
event.id.progress.opaque_count = count
event.progress.stdout = stdout
return event
def config(
cpu=CPU.k8,
compilation_mode=CompilationMode.fastbuild,
gendir="/usr/bin/distant-bes",
bindir="/usr/bin/distant-bes"
):
cpu = str(cpu)
compilation_mode = str(compilation_mode)
config_dict = {
'TARGET_CPU':cpu,
'COMPILATION_MODE':compilation_mode,
'GENDIR':gendir,
'BINDIR':bindir,
}
event = bes.BuildEvent()
event.id.configuration.SetInParent()
event.id.configuration.id = ""
event.configuration.cpu = cpu
event.configuration.platform_name = cpu
event.configuration.make_variable.update(config_dict)
event.configuration.mnemonic = "{}-{}".format(cpu, compilation_mode)
return event
def target_announce(label, kind="distant_target"):
event = bes.BuildEvent()
event.id.target_configured.label = label
event.configured.target_kind = kind
return event
def target_complete(label, success, artifacts=None):
event = bes.BuildEvent()
event.id.target_completed.label = label
event.completed.SetInParent()
if success:
event.completed.success = success
if isinstance(artifacts, list):
event.completed.important_output.extend(artifacts)
return event
def test_events(label, status=1, duration=1, total_duration=None, log=None):
if total_duration is None:
total_duration = duration
epoch = get_current_timestamp()
bazel_event_test = bes.BuildEvent()
bazel_event_test.id.test_result.label = label
bazel_event_test.id.test_result.run = 1
bazel_event_test.id.test_result.shard = 1
bazel_event_test.id.test_result.attempt = 1
bazel_event_test.test_result.test_attempt_duration_millis = duration
bazel_event_test.test_result.status = status
bazel_event_test.test_result.test_attempt_start_millis_epoch = epoch
if log is not None:
bazel_event_test.test_result.test_action_output.append(log)
bazel_event_test_s = bes.BuildEvent()
bazel_event_test_s.id.test_summary.label = label
bazel_event_test_s.test_summary.overall_status = status
bazel_event_test_s.test_summary.total_run_count = 1
if log is not None:
bazel_event_test_s.test_summary.passed.append(log)
bazel_event_test_s.test_summary.total_num_cached = 1
bazel_event_test_s.test_summary.first_start_time_millis = epoch
bazel_event_test_s.test_summary.last_stop_time_millis = epoch+total_duration
bazel_event_test_s.test_summary.total_run_duration_millis = total_duration
return bazel_event_test, bazel_event_test_s
def status_code(status_code):
event = bes.BuildEvent()
event.id.build_finished.SetInParent()
event.finished.exit_code.name = EXIT_CODES[status_code]
event.finished.exit_code.code = status_code
return event
def build_metrics(a_created, a_executed, t_loaded, t_configured, p_loaded):
event = bes.BuildEvent()
event.id.build_metrics.SetInParent()
event.build_metrics.SetInParent()
event.build_metrics.action_summary.actions_created = a_created
event.build_metrics.action_summary.actions_executed = a_executed
event.build_metrics.target_metrics.targets_loaded = t_loaded
event.build_metrics.target_metrics.targets_configured = t_configured
event.build_metrics.package_metrics.packages_loaded = p_loaded
return event
def build_metadata(dictionary):
event = bes.BuildEvent()
event.id.build_metadata.SetInParent()
event.build_metadata.metadata.update(dictionary)
return event
def last_event(elapsed_time):
event = bes.BuildEvent()
event.id.build_tool_logs.SetInParent()
event.last_message = True
file1 = bes.File(name="elapsed time", contents=str(elapsed_time).encode())
file2 = bes.File(name="command.profile.gz", uri="file:///dev/null")
event.build_tool_logs.log.append(file1)
event.build_tool_logs.log.append(file2)
return event
| {
"content_hash": "b699962549415eae4f4cd403e5c74abc",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 84,
"avg_line_length": 30.75,
"alnum_prop": 0.6951219512195121,
"repo_name": "antmicro/distant-bes",
"id": "ec687ebff0ec19b97e1a384d668fcaa4dcf7ac74",
"size": "5412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distantbes/events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "111621"
},
{
"name": "Shell",
"bytes": "55"
}
],
"symlink_target": ""
} |
def prime_or_not():
x = int(input("Enter a number: ")) # input a number
y = x-(x-2) # subtract because we know that the number is divisible by itself and one
while(y < x): # initiate while loop, since the numder is divisible by itself,skip it
if(x % y!= 0): # checks if x is not divisible by y and returns the following
print(x , "is not divisible by", y)
else:
print(x, "is divisible by", y) # else if divisible it breaks the loop
print("Ooops!.....It is not a prime")
break
y = y+1
if(y==x): #finally, if the looped y equals x then its proved that
print("Yay!...it is a prime") #it has not been divisible by any other number
def __name__ == "__main__":
prime_or_not()
| {
"content_hash": "96c7000e9a2b6bdb62251212740aa40f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 116,
"avg_line_length": 46.35,
"alnum_prop": 0.4919093851132686,
"repo_name": "NaveenK11/prime_or_not",
"id": "a5ad80eef0e3eab0188da8a6bb73c3e0d110def4",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prime.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1061"
}
],
"symlink_target": ""
} |
"""
Display Cores per socket options based on CPU selection
"""
from functools import reduce
def factors(n):
return set(reduce(list.__add__, ([i, n // i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)))
def get_options_list(field, **kwargs):
cpus = kwargs.get('control_value', None)
if cpus:
cpus = int(cpus)
return list(factors(cpus))
return None
| {
"content_hash": "d7c5a0efe4f4c1863f99ff037d31fd65",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 105,
"avg_line_length": 19.8,
"alnum_prop": 0.6060606060606061,
"repo_name": "CloudBoltSoftware/cloudbolt-forge",
"id": "d1a15a55ba08958a4173e2fece7ce96f4861225c",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actions/cloudbolt_plugins/cores_and_sockets/cores_per_socket_options.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1665"
},
{
"name": "HTML",
"bytes": "165828"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "PowerShell",
"bytes": "5779"
},
{
"name": "Python",
"bytes": "1742154"
},
{
"name": "Shell",
"bytes": "16836"
}
],
"symlink_target": ""
} |
"""r2
This file loads the finished app from r2.config.middleware.
"""
# _strptime is imported with PyImport_ImportModuleNoBlock which can fail
# miserably when multiple threads try to import it simultaneously.
# import this here to get it over with
# see "Non Blocking Module Imports" in:
# http://code.google.com/p/modwsgi/wiki/ApplicationIssues
import _strptime
# defer the (hefty) import until it's actually needed. this allows
# modules below r2 to be imported before cython files are built, also
# provides a hefty speed boost to said imports when they don't need
# the app initialization.
def make_app(*args, **kwargs):
from r2.config.middleware import make_app as real_make_app
return real_make_app(*args, **kwargs)
| {
"content_hash": "9bed427d35a618f81ca1ad8fe278887c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 72,
"avg_line_length": 38.63157894736842,
"alnum_prop": 0.7615803814713896,
"repo_name": "h2oloopan/easymerge",
"id": "0deeaaf9b3bd5e02ee8475f3aab0afb3468ed6cb",
"size": "1858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EasyMerge/tests/reddit/r2/r2/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13487"
},
{
"name": "CSS",
"bytes": "416664"
},
{
"name": "D",
"bytes": "2012"
},
{
"name": "Java",
"bytes": "583078"
},
{
"name": "JavaScript",
"bytes": "285692"
},
{
"name": "Python",
"bytes": "4212549"
},
{
"name": "Ruby",
"bytes": "920"
},
{
"name": "Shell",
"bytes": "40508"
},
{
"name": "TeX",
"bytes": "114952"
}
],
"symlink_target": ""
} |
def wait_for_enabled(self, timeout=None):
"""
DEPRECATED! Use wait_until_enabled command instead.
"""
self.logger.warning('"wait_for_enabled" command is deprecated, use "wait_until_enabled" instead!')
self.wait_until_enabled(timeout=timeout)
return self
| {
"content_hash": "6a6efe54d506e36d36615fa0c2e151f7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 102,
"avg_line_length": 34.875,
"alnum_prop": 0.6989247311827957,
"repo_name": "lukas-linhart/pageobject",
"id": "a94956d2dadee4ae7d38adc01c72cf7afe4082cf",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pageobject/commands/wait_for_enabled.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103810"
}
],
"symlink_target": ""
} |
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Apache Kafka APIs for Amazon MSK clusters"
prefix = "kafka-cluster"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AlterCluster = Action("AlterCluster")
AlterClusterDynamicConfiguration = Action("AlterClusterDynamicConfiguration")
AlterGroup = Action("AlterGroup")
AlterTopic = Action("AlterTopic")
AlterTopicDynamicConfiguration = Action("AlterTopicDynamicConfiguration")
AlterTransactionalId = Action("AlterTransactionalId")
Connect = Action("Connect")
CreateTopic = Action("CreateTopic")
DeleteGroup = Action("DeleteGroup")
DeleteTopic = Action("DeleteTopic")
DescribeCluster = Action("DescribeCluster")
DescribeClusterDynamicConfiguration = Action("DescribeClusterDynamicConfiguration")
DescribeGroup = Action("DescribeGroup")
DescribeTopic = Action("DescribeTopic")
DescribeTopicDynamicConfiguration = Action("DescribeTopicDynamicConfiguration")
DescribeTransactionalId = Action("DescribeTransactionalId")
ReadData = Action("ReadData")
WriteData = Action("WriteData")
WriteDataIdempotently = Action("WriteDataIdempotently")
| {
"content_hash": "bad9c24961294cee498462099814ac21",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 88,
"avg_line_length": 37.10526315789474,
"alnum_prop": 0.7595744680851064,
"repo_name": "cloudtools/awacs",
"id": "79d6a36c87edc1dd6186e46bd987e360742836af",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "awacs/kafka_cluster.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "963483"
}
],
"symlink_target": ""
} |
"""The Coolmaster integration."""
import logging
from pycoolmasternet_async import CoolMasterNet
from homeassistant.components.climate import SCAN_INTERVAL
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DATA_COORDINATOR, DATA_INFO, DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["climate"]
async def async_setup_entry(hass, entry):
"""Set up Coolmaster from a config entry."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
coolmaster = CoolMasterNet(host, port)
try:
info = await coolmaster.info()
if not info:
raise ConfigEntryNotReady
except (OSError, ConnectionRefusedError, TimeoutError) as error:
raise ConfigEntryNotReady() from error
coordinator = CoolmasterDataUpdateCoordinator(hass, coolmaster)
hass.data.setdefault(DOMAIN, {})
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = {
DATA_INFO: info,
DATA_COORDINATOR: coordinator,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass, entry):
"""Unload a Coolmaster config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class CoolmasterDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Coolmaster data."""
def __init__(self, hass, coolmaster):
"""Initialize global Coolmaster data updater."""
self._coolmaster = coolmaster
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self):
"""Fetch data from Coolmaster."""
try:
return await self._coolmaster.status()
except (OSError, ConnectionRefusedError, TimeoutError) as error:
raise UpdateFailed from error
| {
"content_hash": "76a010f1ef9ca1e4580bf2f47ffe9dd4",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 88,
"avg_line_length": 31.970588235294116,
"alnum_prop": 0.6895124195032198,
"repo_name": "kennedyshead/home-assistant",
"id": "e6cf6f362777a188455f8649342243d509c426ee",
"size": "2174",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/coolmaster/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import time
import numpy as np
import os
import keras
from keras.datasets import cifar10
from keras import backend as K
import tensorflow as tf
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants, signature_constants
np.random.seed(42)
input_data = np.random.rand(250, 299, 299, 3)
tmp_dir = os.path.join('/', 'tmp', 'inception_saved_models')
full_model_dir = 'full_saved_model'
optimized_model_dir = 'optimized_saved_model'
version = '1'
full_model_path = os.path.join(tmp_dir, full_model_dir, version)
optimized_model_path = os.path.join(tmp_dir, optimized_model_dir, version)
PATH_TO_LOAD = full_model_path
with tf.Session(graph=tf.Graph()) as sess:
start_time = time.time()
populated_graph = loader.load(
sess=sess,
tags=[tag_constants.SERVING],
export_dir=optimized_model_path)
print('loading the saved_model took {}s'.format(time.time()-start_time))
signature = populated_graph.signature_def['inception_signature']
input_tensor_info = signature.inputs['in_images']
output_tensor_info = signature.outputs['out_images']
input_tensor_name = input_tensor_info.name
output_tensor_name = output_tensor_info.name
batch_delimiters = []
batch_size = 1
batch_instantiator = 0
num_test_samples = input_data.shape[0]
while num_test_samples - batch_instantiator >= batch_size:
index_tuple = (batch_instantiator, batch_instantiator+batch_size)
batch_delimiters.append(index_tuple)
batch_instantiator += batch_size
if num_test_samples - batch_instantiator > 0:
batch_delimiters.append((batch_instantiator, num_test_samples))
feed_dict_list = [{input_tensor_name: input_data[tup[0]:tup[1]]}
for tup in batch_delimiters]
for i in range(5):
start_time = time.time()
batch_predictions = [sess.run(output_tensor_name, feed_dict)
for feed_dict in feed_dict_list]
prediction = np.concatenate(batch_predictions)
print('optimized prediction took {}s'.format(time.time()-start_time))
# predicted_digit = np.argmax(prediction, axis=-1)
#
# correct_predictions = 0
# for idx in range(num_test_samples):
# if y_test[idx][predicted_digit[idx]] == 1:
# correct_predictions += 1
#
# accuracy = correct_predictions / num_test_samples
#
# print('accuracy was {} percent over {} test samples'
# .format(accuracy, num_test_samples))
with tf.Session(graph=tf.Graph()) as sess:
start_time = time.time()
populated_graph = loader.load(
sess=sess,
tags=[tag_constants.SERVING],
export_dir=full_model_path)
print('loading the saved_model took {}s'.format(time.time()-start_time))
signature = populated_graph.signature_def['inception_signature']
input_tensor_info = signature.inputs['in_images']
output_tensor_info = signature.outputs['out_images']
input_tensor_name = input_tensor_info.name
output_tensor_name = output_tensor_info.name
batch_delimiters = []
batch_size = 1
batch_instantiator = 0
num_test_samples = input_data.shape[0]
while num_test_samples - batch_instantiator >= batch_size:
index_tuple = (batch_instantiator, batch_instantiator+batch_size)
batch_delimiters.append(index_tuple)
batch_instantiator += batch_size
if num_test_samples - batch_instantiator > 0:
batch_delimiters.append((batch_instantiator, num_test_samples))
feed_dict_list = [{input_tensor_name: input_data[tup[0]:tup[1]]}
for tup in batch_delimiters]
for i in range(5):
start_time = time.time()
batch_predictions = [sess.run(output_tensor_name, feed_dict)
for feed_dict in feed_dict_list]
prediction = np.concatenate(batch_predictions)
print('unoptimized prediction took {}s'.format(time.time()-start_time))
| {
"content_hash": "6990b409cd2697ce150414dad235fd22",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 33.43801652892562,
"alnum_prop": 0.6638655462184874,
"repo_name": "eth-n/keras-in-tf",
"id": "3e184b123668922ebcac85a50e45aa1de072a84d",
"size": "4046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inception/mnist_test_savedmodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25189"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class MotorsportConfig(AppConfig):
name = 'motorsport'
| {
"content_hash": "19d46e4910870563da3e63b9235d1534",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 19,
"alnum_prop": 0.7684210526315789,
"repo_name": "SphinxNZ/game-on",
"id": "8f5701a0faa2951de2ee93ba07e64c2eac4f9afe",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compete/motorsport/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "49133"
}
],
"symlink_target": ""
} |
"""
Description : Simple Python implementation of the Apriori Algorithm
Usage:
$python apriori.py -f DATASET.csv -s minSupport -c minConfidence
$python apriori.py -f DATASET.csv -s 0.15 -c 0.6
"""
import sys
from itertools import chain, combinations
from collections import defaultdict
from optparse import OptionParser
def subsets(arr):
""" Returns non empty subsets of arr"""
return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])
def returnItemsWithMinSupport(itemSet, transactionList, minSupport, freqSet):
"""calculates the support for items in the itemSet and returns a subset
of the itemSet each of whose elements satisfies the minimum support"""
_itemSet = set()
localSet = defaultdict(int)
for item in itemSet:
for transaction in transactionList:
if item.issubset(transaction):
freqSet[item] += 1
localSet[item] += 1
for item, count in localSet.items():
support = float(count) / len(transactionList)
if support >= minSupport:
_itemSet.add(item)
return _itemSet
def joinSet(itemSet, length):
"""Join a set with itself and returns the n-element itemsets"""
return set(
[i.union(j) for i in itemSet for j in itemSet if len(i.union(j)) == length]
)
def getItemSetTransactionList(data_iterator):
transactionList = list()
itemSet = set()
for record in data_iterator:
transaction = frozenset(record)
transactionList.append(transaction)
for item in transaction:
itemSet.add(frozenset([item])) # Generate 1-itemSets
return itemSet, transactionList
def runApriori(data_iter, minSupport, minConfidence):
"""
run the apriori algorithm. data_iter is a record iterator
Return both:
- items (tuple, support)
- rules ((pretuple, posttuple), confidence)
"""
itemSet, transactionList = getItemSetTransactionList(data_iter)
freqSet = defaultdict(int)
largeSet = dict()
# Global dictionary which stores (key=n-itemSets,value=support)
# which satisfy minSupport
assocRules = dict()
# Dictionary which stores Association Rules
oneCSet = returnItemsWithMinSupport(itemSet, transactionList, minSupport, freqSet)
currentLSet = oneCSet
k = 2
while currentLSet != set([]):
largeSet[k - 1] = currentLSet
currentLSet = joinSet(currentLSet, k)
currentCSet = returnItemsWithMinSupport(
currentLSet, transactionList, minSupport, freqSet
)
currentLSet = currentCSet
k = k + 1
def getSupport(item):
"""local function which Returns the support of an item"""
return float(freqSet[item]) / len(transactionList)
toRetItems = []
for key, value in largeSet.items():
toRetItems.extend([(tuple(item), getSupport(item)) for item in value])
toRetRules = []
for key, value in list(largeSet.items())[1:]:
for item in value:
_subsets = map(frozenset, [x for x in subsets(item)])
for element in _subsets:
remain = item.difference(element)
if len(remain) > 0:
confidence = getSupport(item) / getSupport(element)
if confidence >= minConfidence:
toRetRules.append(((tuple(element), tuple(remain)), confidence))
return toRetItems, toRetRules
def printResults(items, rules):
"""prints the generated itemsets sorted by support and the confidence rules sorted by confidence"""
for item, support in sorted(items, key=lambda x: x[1]):
print("item: %s , %.3f" % (str(item), support))
print("\n------------------------ RULES:")
for rule, confidence in sorted(rules, key=lambda x: x[1]):
pre, post = rule
print("Rule: %s ==> %s , %.3f" % (str(pre), str(post), confidence))
def to_str_results(items, rules):
"""prints the generated itemsets sorted by support and the confidence rules sorted by confidence"""
i, r = [], []
for item, support in sorted(items, key=lambda x: x[1]):
x = "item: %s , %.3f" % (str(item), support)
i.append(x)
for rule, confidence in sorted(rules, key=lambda x: x[1]):
pre, post = rule
x = "Rule: %s ==> %s , %.3f" % (str(pre), str(post), confidence)
r.append(x)
return i, r
def dataFromFile(fname):
"""Function which reads from the file and yields a generator"""
with open(fname, "rU") as file_iter:
for line in file_iter:
line = line.strip().rstrip(",") # Remove trailing comma
record = frozenset(line.split(","))
yield record
if __name__ == "__main__":
optparser = OptionParser()
optparser.add_option(
"-f", "--inputFile", dest="input", help="filename containing csv", default=None
)
optparser.add_option(
"-s",
"--minSupport",
dest="minS",
help="minimum support value",
default=0.15,
type="float",
)
optparser.add_option(
"-c",
"--minConfidence",
dest="minC",
help="minimum confidence value",
default=0.6,
type="float",
)
(options, args) = optparser.parse_args()
inFile = None
if options.input is None:
inFile = sys.stdin
elif options.input is not None:
inFile = dataFromFile(options.input)
else:
print("No dataset filename specified, system with exit\n")
sys.exit("System will exit")
minSupport = options.minS
minConfidence = options.minC
items, rules = runApriori(inFile, minSupport, minConfidence)
printResults(items, rules)
| {
"content_hash": "dbed55b7fda8de10cb7a6f875843c5e8",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 103,
"avg_line_length": 30.924324324324324,
"alnum_prop": 0.6198217094913476,
"repo_name": "asaini/Apriori",
"id": "ff77470b349d27dc465e2efd35820b438cad5a43",
"size": "5721",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "apriori.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14977"
}
],
"symlink_target": ""
} |
import os
from distutils.core import setup
import platform
from datetime import datetime
import py2exe
# Leave out any package not strictly necessary.
# TODO: There are almost certainly some more that can be removed.
# Definitely leave in the _ssl module, because CherryPy relies on
# it even though we're not using it.
excludes = [
'_tkinter', 'Tkconstants', 'Tkinter', 'tcl',
'win32com.gen_py', #leave this out to dynamically generate typelibs!
]
# These are Windows system DLLs, and we don't want to (and
# aren't allowed to) redistribute.
dll_excludes = [
'msvcr71.dll',
'mfc90.dll',
'API-MS-Win-Core-ErrorHandling-L1-1-0.dll',
'API-MS-Win-Core-LibraryLoader-L1-1-0.dll',
'API-MS-Win-Core-LocalRegistry-L1-1-0.dll',
'API-MS-Win-Core-Misc-L1-1-0.dll',
'API-MS-Win-Core-ProcessThreads-L1-1-0.dll',
'API-MS-Win-Core-Profile-L1-1-0.dll',
'API-MS-Win-Core-Synch-L1-1-0.dll',
'API-MS-Win-Core-SysInfo-L1-1-0.dll',
'API-MS-Win-Security-Base-L1-1-0.dll',
'CFGMGR32.dll',
'DEVOBJ.dll',
'MPR.dll',
'POWRPROF.dll',
'SETUPAPI.dll',
]
# List of packages that need to be included but that py2exe is
# missing. (placeholder)
#mod_includes = []
package_includes = []
py2exe_opts= {
'excludes': excludes,
'dll_excludes': dll_excludes,
'packages': package_includes,
'compressed': True, #compresses Library
'optimize': 2, #generate .pyo w/ -OO options
}
if not platform.architecture()[0] == '64bit':
py2exe_opts['bundle_files'] = 2 #not supported on win64
'''
This ugly section is needed to copy all the static files and
various distribution files to the directories included with
the py2exe dist directory.
'''
setup_data_files = []
extra_dirs = [
('licenses', 'distribution'),
('config', os.path.join('gilamon','config')),
('templates', os.path.join('gilamon','templates')),
('static', os.path.join('gilamon','static'))]
cwd = os.getcwd()
for extra_dir in extra_dirs:
extra_path = os.path.join(cwd, extra_dir[1])
for root, dirs, files in os.walk(extra_path):
# need this to make sure subdirectories are added by py2exe
if os.path.abspath(root) == os.path.abspath(extra_dir[1]):
root_path = extra_dir[0]
else:
root_path = os.path.join(
extra_dir[0], os.path.relpath(root, extra_dir[1]))
if files:
extra_files = []
for filename in files:
extra_files.append(os.path.join(root, filename))
setup_data_files.append((root_path, extra_files))
setup (
version = '0.8.2.' + datetime.now().strftime('%j'),
description = 'GilaMon DFSR Monitor',
name = 'GilaMon',
author = 'Tim Gross',
author_email = 'gross.timothy@gmail.com',
url = 'https://bitbucket.org/tgross/gilamon',
options = { 'py2exe': py2exe_opts },
service = ['gilamon.gilamon_service'],
data_files = setup_data_files,
)
| {
"content_hash": "092b270d924232d260806c9f539f54b2",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 32.104166666666664,
"alnum_prop": 0.6164828033744322,
"repo_name": "tgross/gilamon",
"id": "11290cac5c10e152702174fa9259c94c6fdced3e",
"size": "3082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1530"
},
{
"name": "Python",
"bytes": "40009"
},
{
"name": "Shell",
"bytes": "29"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib import auth
from models import UserInfo, RoleList, PermissionList
from django.contrib.auth.forms import ReadOnlyPasswordHashField, PasswordResetForm
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
class LoginUserForm(forms.Form):
email = forms.CharField(label='Email', error_messages={'required': 'email should not be empty'},
widget=forms.TextInput(attrs={'class': 'string email optional', 'size': 35}))
password = forms.CharField(label='Password', error_messages={'required': 'password should not be empty'},
widget=forms.PasswordInput(attrs={'class': 'password optional', 'size': 35}))
def __init__(self, request=None, *args, **kwargs):
self.request = request
self.user_cache = None
super(LoginUserForm, self).__init__(*args, **kwargs)
def clean_password(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user_cache = auth.authenticate(email=email, password=password)
if self.user_cache is None:
raise forms.ValidationError('email and password not matched !!!')
elif not self.user_cache.is_active:
raise forms.ValidationError('this account has been forbit')
return self.cleaned_data
def get_user(self):
return self.user_cache
class UserCreationForm(forms.ModelForm):
password1 = forms.CharField(widget=forms.PasswordInput)
password2 = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = UserInfo
fields = ('email', 'username')
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError('password not correct')
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = UserInfo
fields = ('email', 'username', 'password')
def clean_password(self):
return self.initial['password']
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email', 'username', 'is_superuser', 'is_active')
list_filter = ('is_superuser', 'is_active')
fieldsets = (
(None, {'fields': ('email', 'username', 'password')}),
('Personal info', {'fields': ('last_login',)}),
('Permissions', {'fields': ('is_superuser','is_active')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'username', 'last_login', 'password1', 'password2', 'is_superuser', 'is_active')}
),
)
search_fields = ('email', 'username')
ordering = ('email', 'username')
filter_horizontal = ()
class ChangePasswordForm(forms.Form):
old_password = forms.CharField(label='Current Password',
error_messages={'required': 'please input Current password'},
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
new_password1 = forms.CharField(label='New Password', error_messages={'required': 'please input the new password'},
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
new_password2 = forms.CharField(label='New Password Confirm',
error_messages={'required': 'please input the new password again'},
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
def __init__(self, user, *args, **kwargs):
self.user = user
super(ChangePasswordForm, self).__init__(*args, **kwargs)
def clean_old_password(self):
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(u'current password incorrect')
return old_password
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if len(password1)<6:
raise forms.ValidationError('New Password should be bit than 6 bit')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError('password is not same with before')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordResetForm(PasswordResetForm):
def end_email(slef, subject_template_name, email_template_name, context, from_email, to_email,
html_email_template_name=None):
super.send_email(subject_template_name, email_template_name, context, from_email, to_email,
html_email_template_name)
| {
"content_hash": "9a919ee36ed21aa92d7423d7d0481839",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 119,
"avg_line_length": 40.16541353383459,
"alnum_prop": 0.6233620366903782,
"repo_name": "JiaLei123/Tuna",
"id": "845db10c13a5e7e95377955dcb1447053a2bcbe4",
"size": "5390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "259614"
},
{
"name": "HTML",
"bytes": "1869737"
},
{
"name": "JavaScript",
"bytes": "2383675"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "39403"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from math import log, ceil
import myhdl
from myhdl import (Signal, ResetSignal, intbv, modbv, concat,
always_comb, always_seq,)
from rhea.system import FIFOBus
from .fifo_mem import fifo_mem
from .fifo_syncers import sync_reset, sync_mbits
@myhdl.block
def fifo_async(clock_write, clock_read, fifobus, reset, size=128):
"""
The following is a general purpose, platform independent
asynchronous FIFO (dual clock domains).
Cross-clock boundary FIFO, based on:
"Simulation and Synthesis Techniques for Asynchronous FIFO Design"
Typically in the "rhea" package the FIFOBus interface is used to
interface with the FIFOs
"""
# @todo: use the clock_write and clock_read from the FIFOBus
# @todo: interface, make this interface compliant with the
# @todo: fifos: fifo_async(reset, clock, fifobus)
# for simplification the memory size is forced to a power of
# two - full address range, ptr (mem indexes) will wrap
asz = int(ceil(log(size, 2)))
fbus = fifobus # alias
# an extra bit is used to determine full vs. empty (see paper)
waddr = Signal(modbv(0)[asz:])
raddr = Signal(modbv(0)[asz:])
wptr = Signal(modbv(0)[asz+1:])
rptr = Signal(modbv(0)[asz+1:])
wq2_rptr = Signal(intbv(0)[asz+1:])
rq2_wptr = Signal(intbv(0)[asz+1:])
wfull = Signal(bool(0))
rempty = Signal(bool(1))
# sync'd resets, the input reset is more than likely sync'd to one
# of the clock domains, sync both regardless ...
wrst = ResetSignal(reset.active, active=reset.active, async=reset.async)
rrst = ResetSignal(reset.active, active=reset.active, async=reset.async)
# @todo: if ResetSignal use the active attribute to determine
# if 'not reset' or 'reset'. If the active==0 then
# set invert=False
sr1_inst = sync_reset(clock_write, reset, wrst)
sr2_inst = sync_reset(clock_read, reset, rrst)
mb1_inst = sync_mbits(clock_write, wrst, rptr, wq2_rptr)
mb2_inst = sync_mbits(clock_read, rrst, wptr, rq2_wptr)
@always_comb
def beh_assigns():
fbus.empty.next = rempty
fbus.full.next = wfull
_we = Signal(bool(0))
_re = Signal(bool(0))
@always_comb
def beh_wr():
_we.next = fbus.write and not fbus.full
_re.next = False
# unused but needed for the fifo_mem block
wad = Signal(waddr.val)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Memory for the FIFO
fifomem_inst = fifo_mem(
clock_write, _we, fbus.write_data, waddr,
clock_read, _re, fbus.read_data, raddr, wad
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# --Text from the paper--
# The read pointer is a dual nbit Gray code counter. The nbit
# pointer (rptr) is passed to the write clock domain through the
# syncs. The (n-1)bit pointer (raddr) is used to address the FIFO
# buffer. The FIFO empty output is registered and is asserted on
# the next rising rclk edge when the next rptr value equals the
# sync wptr value.
rbin = Signal(modbv(0)[asz+1:])
@always_seq(clock_read.posedge, reset=rrst)
def beh_rptrs():
# increment when read and not empty
rbn = rbin + (fbus.read and not rempty)
rbin.next = rbn
rpn = (rbn >> 1) ^ rbn # gray counter
rptr.next = rpn
# FIFO empty when the next rptr == sync'd wptr or on reset
rempty.next = (rpn == rq2_wptr)
# the data is register from the memory, the data is delayed
fbus.read_valid.next = fbus.read and not rempty
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# --Text from the paper--
# The write pointer is a dual nbit gray code conter. The nbit
# pointer (wptr) is passed to the read clock domain through the
# sync'ers. The (n-1)-bit pointer (waddr) is used ot address the
# FIFO buffer. The FIFO full asserted when the next modified
# value equals the sync'd and modified wrptr2 value (except MSBs).
wbin = Signal(modbv(0)[asz+1:])
@always_seq(clock_write.posedge, reset=wrst)
def beh_wptrs():
# increment when write and not full
wbn = wbin + (fbus.write and not wfull)
wbin.next = wbn
wpn = (wbn >> 1) ^ wbn
wptr.next = wpn
# what the dillio with the full determination ...
wfull.next = (wpn == concat(~wq2_rptr[asz+1:asz-1],
wq2_rptr[asz-1:0]))
@always_comb
def beh_addrs():
waddr.next = wbin[asz:0]
raddr.next = rbin[asz:0]
return myhdl.instances()
fifo_async.fbus_intf = FIFOBus | {
"content_hash": "dca4dc56682601623c6d916a8d1c6bb9",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 76,
"avg_line_length": 34.838235294117645,
"alnum_prop": 0.6076403545799915,
"repo_name": "NickShaffner/rhea",
"id": "93c7c0b7beb1728c67ec7d4ec8ccb86c1753c5bc",
"size": "4835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rhea/cores/fifo/fifo_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2185"
},
{
"name": "Python",
"bytes": "671881"
},
{
"name": "Shell",
"bytes": "1590"
},
{
"name": "VHDL",
"bytes": "10452"
},
{
"name": "Verilog",
"bytes": "22193"
}
],
"symlink_target": ""
} |
import wx
from cairis.core.armid import *
from ComponentViewPanel import ComponentViewPanel
from cairis.core.ComponentViewParameters import ComponentViewParameters
import DialogClassParameters
__author__ = 'Shamal Faily'
class ComponentViewDialog(wx.Dialog):
def __init__(self,parent,parameters):
wx.Dialog.__init__(self,parent,parameters.id(),parameters.label(),style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(400,500))
self.theName = ''
self.theSynopsis = ''
self.theComponents = []
self.theConnectors = []
self.theComponentViewId = -1
self.panel = 0
self.buildControls(parameters)
self.commitVerb = 'Add'
def buildControls(self,parameters):
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = ComponentViewPanel(self)
self.panel.buildControls(parameters.createFlag())
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,COMPONENTVIEW_BUTTONCOMMIT_ID,self.onCommit)
def load(self,cv):
self.theComponentViewId = cv.id()
self.panel.loadControls(cv)
self.commitVerb = 'Edit'
def onCommit(self,evt):
commitLabel = self.commitVerb + ' component view'
nameCtrl = self.FindWindowById(COMPONENTVIEW_TEXTNAME_ID)
synCtrl = self.FindWindowById(COMPONENTVIEW_TEXTSYNOPSIS_ID)
comCtrl = self.FindWindowById(COMPONENTVIEW_LISTCOMPONENTS_ID)
conCtrl = self.FindWindowById(COMPONENTVIEW_LISTCONNECTORS_ID)
self.theName = nameCtrl.GetValue()
self.theSynopsis = synCtrl.GetValue()
self.theComponents = comCtrl.dimensions()
self.theConnectors = conCtrl.dimensions()
if len(self.theName) == 0:
dlg = wx.MessageDialog(self,'Name cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.theSynopsis) == 0:
dlg = wx.MessageDialog(self,'Synopsis cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.theComponents) == 0:
dlg = wx.MessageDialog(self,'Components need to be defined for component views',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
else:
self.EndModal(COMPONENTVIEW_BUTTONCOMMIT_ID)
def parameters(self):
parameters = ComponentViewParameters(self.theName,self.theSynopsis,[],[],[],[],[],self.theComponents,self.theConnectors)
parameters.setId(self.theComponentViewId)
return parameters
| {
"content_hash": "c8247b85e1689034fd651a5ee51b88cc",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 163,
"avg_line_length": 37.13636363636363,
"alnum_prop": 0.7168502651978784,
"repo_name": "nathanbjenx/cairis",
"id": "dbd43e7be733183c21f75c578a1aeb859c8b2bda",
"size": "3250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cairis/gui/ComponentViewDialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588306"
},
{
"name": "Dockerfile",
"bytes": "829"
},
{
"name": "Gherkin",
"bytes": "1615"
},
{
"name": "HTML",
"bytes": "1664076"
},
{
"name": "JavaScript",
"bytes": "416319"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "PLpgSQL",
"bytes": "1494775"
},
{
"name": "Python",
"bytes": "4006311"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
} |
__version__ = '1.0.0'
from flask import Flask
app = Flask(__name__)
from app.views import main_views
| {
"content_hash": "4bd59c4fb7ec2aa539e76b22f089b033",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 32,
"avg_line_length": 11.777777777777779,
"alnum_prop": 0.6509433962264151,
"repo_name": "yangwii/graphql_py_client",
"id": "4653cbbe2811a753eaed464f1207948b93c7e402",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2200"
},
{
"name": "HTML",
"bytes": "11800"
},
{
"name": "JavaScript",
"bytes": "10271"
},
{
"name": "Python",
"bytes": "4008"
}
],
"symlink_target": ""
} |
import copy
from .. import create_aiexperiment
from .constants import (
DEFAULT_CLASSIFICATION_METRIC,
DEFAULT_EARLY_STOPPING_ROUNDS,
DEFAULT_EVALS_NAME,
DEFAULT_ITERS_PER_DIM,
DEFAULT_NUM_BOOST_ROUND,
DEFAULT_REGRESSION_METRIC,
DEFAULT_SEARCH_PARAMS,
MAX_BO_ITERATIONS,
METRICS_OPTIMIZATION_STRATEGY,
PARAMETER_INFORMATION,
SIGOPT_DEFAULTS_SOURCE_NAME,
SIGOPT_DEFAULTS_SOURCE_PRIORITY,
SUPPORTED_AUTOBOUND_PARAMS,
SUPPORTED_METRICS_TO_OPTIMIZE,
)
from .run import parse_run_options
from .run import run as XGBRunWrapper
XGB_EXPERIMENT_KEYWORD = '_IS_XGB_EXPERIMENT'
# Sentinel value to distinguish between default early_stopping_rounds and input early_stopping_rounds w/ default value
class DefaultEarlyStoppingRounds:
pass
_default_early_stopping_rounds = DefaultEarlyStoppingRounds()
class XGBExperiment:
def __init__(self, experiment_config, dtrain, evals, params, num_boost_round, early_stopping_rounds, run_options):
self.experiment_config_parsed = copy.deepcopy(experiment_config)
self.dtrain = dtrain
self.evals = evals
self.params = params
self.num_boost_round = num_boost_round
self.run_options = run_options
if early_stopping_rounds == _default_early_stopping_rounds:
self.early_stopping_rounds = DEFAULT_EARLY_STOPPING_ROUNDS
self.early_stopping_round_used_sigopt_default = True
else:
self.early_stopping_rounds = early_stopping_rounds # if None, deactivate early stopping
self.early_stopping_round_used_sigopt_default = False
self.sigopt_experiment = None
def parse_and_create_metrics(self):
if 'metrics' in self.experiment_config_parsed and isinstance(self.experiment_config_parsed['metrics'], list):
for metric in self.experiment_config_parsed['metrics']:
if metric['strategy'] == 'optimize' and metric['name'] not in SUPPORTED_METRICS_TO_OPTIMIZE:
raise ValueError(
f"The chosen metric to optimize, {metric['name']}, is not supported."
)
else:
if 'metrics' not in self.experiment_config_parsed: # pick a default metric
if 'objective' in self.params:
objective = self.params['objective']
if objective.split(':')[0] in ['binary', 'multi']:
metric_to_optimize = DEFAULT_CLASSIFICATION_METRIC
else:
metric_to_optimize = DEFAULT_REGRESSION_METRIC # do regression if anything else (including ranking)
else:
metric_to_optimize = DEFAULT_REGRESSION_METRIC
else:
if self.experiment_config_parsed['metrics'] not in SUPPORTED_METRICS_TO_OPTIMIZE:
raise ValueError(
f"The chosen metric to optimize, {self.experiment_config_parsed['metrics']}, is not supported."
)
metric_to_optimize = self.experiment_config_parsed['metrics']
optimization_strategy = METRICS_OPTIMIZATION_STRATEGY[metric_to_optimize]
self.experiment_config_parsed['metrics'] = [{
'name': metric_to_optimize,
'strategy': 'optimize',
'objective': optimization_strategy
}]
# change optimized metric to reflect updated name
for metric in self.experiment_config_parsed['metrics']:
if metric['strategy'] == 'optimize':
if isinstance(self.evals, list):
metric['name'] = self.evals[0][1] + '-' + metric['name'] # optimize metric on first eval set by default
else:
metric['name'] = DEFAULT_EVALS_NAME + '-' + metric['name']
def check_and_fill_parameter_types(self):
params_to_check = [p for p in self.experiment_config_parsed['parameters'] if p['name'] in PARAMETER_INFORMATION]
for parameter in params_to_check:
parameter_name = parameter['name']
proper_parameter_type = PARAMETER_INFORMATION[parameter_name]['type']
if 'type' in parameter:
experiment_config_parameter_type = parameter['type']
if experiment_config_parameter_type != proper_parameter_type:
raise ValueError(
f'Parameter {parameter_name} type listed incorrectly as {experiment_config_parameter_type} '
f'in experiment config, and should be listed as having type {proper_parameter_type}.'
)
else:
parameter['type'] = proper_parameter_type
def check_and_fill_parameter_bounds(self):
params_to_check = [p for p in self.experiment_config_parsed['parameters'] if p['name'] in PARAMETER_INFORMATION]
for parameter in params_to_check:
parameter_name = parameter['name']
if 'bounds' not in parameter and PARAMETER_INFORMATION[parameter_name]['type'] in ['double', 'int']:
if parameter_name not in SUPPORTED_AUTOBOUND_PARAMS:
raise ValueError('We do not support autoselection of bounds for {param_name}.')
param_info = PARAMETER_INFORMATION[parameter_name]
transformation = param_info['transformation'] if 'transformation' in param_info else None
parameter.update(
dict(
name=parameter_name,
type=param_info['type'],
bounds=param_info['bounds'],
transformation=transformation
)
)
else:
if parameter['type'] == 'categorical':
if 'categorical_values' not in parameter:
raise ValueError(f'We do not support autoselection of categorical_values for {parameter_name}.')
proper_parameter_values = PARAMETER_INFORMATION[parameter_name]['values']
config_parameter_values = parameter['categorical_values']
if not set(proper_parameter_values) > set(config_parameter_values):
raise ValueError(
f'The set of possible categorical values {config_parameter_values} is not a subset of '
f'the permissible categorical values {proper_parameter_values}.'
)
else:
pass # TODO: check bounds for double, int, and grid parameters in later PR
def parse_and_create_parameters(self):
if 'parameters' not in self.experiment_config_parsed:
default_search_space = []
for parameter_name in DEFAULT_SEARCH_PARAMS:
param_info = PARAMETER_INFORMATION[parameter_name]
transformation = param_info['transformation'] if 'transformation' in param_info else None
default_search_space.append(
dict(
name=parameter_name,
type=param_info['type'],
bounds=param_info['bounds'],
transformation=transformation
)
)
self.experiment_config_parsed['parameters'] = default_search_space
else:
self.check_and_fill_parameter_types()
self.check_and_fill_parameter_bounds()
# Check key overlap between parameters to be optimized and parameters that are set
params_optimized = [param['name'] for param in self.experiment_config_parsed['parameters']]
params_overlap = set(params_optimized) & set(self.params.keys())
if len(params_overlap) != 0:
raise ValueError(
f'There is overlap between tuned parameters and user-set parameters: {params_overlap}.'
'Parameter names cannot be defined in both locations'
)
# Check that num_boost_round is not set by both sigopt experiment and user
if self.num_boost_round and 'num_boost_round' in params_optimized:
raise ValueError(
'num_boost_round has been denoted as an optimization parameter, but also has been fixed in the input arguments'
f'to have value {self.num_boost_round}. Please remove it from either the search space or the input arguments.'
)
def parse_and_create_aiexperiment(self):
self.parse_and_create_metrics()
self.parse_and_create_parameters()
if 'budget' not in self.experiment_config_parsed:
chosen_budget = DEFAULT_ITERS_PER_DIM * len(self.experiment_config_parsed['parameters'])
self.experiment_config_parsed['budget'] = min(chosen_budget, MAX_BO_ITERATIONS)
if 'parallel_bandwidth' not in self.experiment_config_parsed:
self.experiment_config_parsed['parallel_bandwidth'] = 1
if 'type' not in self.experiment_config_parsed:
self.experiment_config_parsed['type'] = 'offline'
self.experiment_config_parsed['metadata'] = {XGB_EXPERIMENT_KEYWORD: 'True'}
self.sigopt_experiment = create_aiexperiment(**self.experiment_config_parsed)
def run_experiment(self):
for run in self.sigopt_experiment.loop():
with run:
if self.num_boost_round:
num_boost_round_run = self.num_boost_round
elif 'num_boost_round' in run.params:
num_boost_round_run = run.params['num_boost_round']
else:
num_boost_round_run = DEFAULT_NUM_BOOST_ROUND
self.run_options['run'] = run
XGBRunWrapper(
self.params,
self.dtrain,
num_boost_round=num_boost_round_run,
evals=self.evals,
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=False,
run_options=self.run_options,
)
# mark early stopping rounds as SigOpt Default
if self.early_stopping_round_used_sigopt_default:
run.set_parameters_sources_meta(
SIGOPT_DEFAULTS_SOURCE_NAME,
sort=SIGOPT_DEFAULTS_SOURCE_PRIORITY,
default_show=True,
)
run.set_parameters_source(
{'early_stopping_rounds': DEFAULT_EARLY_STOPPING_ROUNDS},
SIGOPT_DEFAULTS_SOURCE_NAME,
)
def experiment(
experiment_config,
dtrain,
evals,
params,
num_boost_round=None,
early_stopping_rounds=_default_early_stopping_rounds,
run_options=None,
):
run_options_parsed = parse_run_options(run_options)
xgb_experiment = XGBExperiment(
experiment_config,
dtrain,
evals,
params,
num_boost_round,
early_stopping_rounds,
run_options_parsed,
)
xgb_experiment.parse_and_create_aiexperiment()
xgb_experiment.run_experiment()
return xgb_experiment.sigopt_experiment
| {
"content_hash": "9bdd1c64b2b8dce237da67dbac8a201b",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 119,
"avg_line_length": 41.63598326359833,
"alnum_prop": 0.6721937493719224,
"repo_name": "sigopt/sigopt-python",
"id": "885472abbf179549815836a382da81bba66b5ad5",
"size": "10022",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sigopt/xgboost/experiment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2411"
},
{
"name": "Makefile",
"bytes": "545"
},
{
"name": "Python",
"bytes": "542280"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
import pytest
from rdflib.graph import Graph
from rdflib.namespace import RDFS
from rdflib.term import BNode, Literal
remove_me = (BNode(), RDFS.label, Literal("remove_me"))
@pytest.fixture(scope="function")
def get_store(request):
store = Graph(store="default")
store.open("store")
store.add(remove_me)
yield store
store.close()
def test_add(get_store):
store = get_store
subject = BNode()
store.add((subject, RDFS.label, Literal("foo")))
def test_remove(get_store):
store = get_store
store.remove(remove_me)
store.remove((None, None, None))
def test_triples(get_store):
store = get_store
for s, p, o in store:
pass
| {
"content_hash": "78a5d8a4a3342820c17384e2cfc7aabd",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 55,
"avg_line_length": 19.13888888888889,
"alnum_prop": 0.6632801161103048,
"repo_name": "RDFLib/rdflib",
"id": "e37530647a00cc3f756b3871939ed767cc7412cf",
"size": "689",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_store/test_store_triple_store.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "635"
},
{
"name": "HTML",
"bytes": "41303"
},
{
"name": "Python",
"bytes": "2828721"
},
{
"name": "Ruby",
"bytes": "31777"
},
{
"name": "Shell",
"bytes": "6030"
},
{
"name": "XSLT",
"bytes": "1588"
}
],
"symlink_target": ""
} |
import sys
import pyodbc
import codecs
import re
reload(sys)
sys.setdefaultencoding('utf8')
class FileMakerFmpDoc:
meta = ''
data = []
def __init__(self, meta, data):
self.meta = fmpMeta = FileMakerFmpMeta(meta)
for record in data:
self.data.append(FileMakerFmpRow(record))
def toXml(self):
xml = '<?xml version="1.0" encoding="UTF-8" ?><FMPXMLRESULT xmlns="http://www.filemaker.com/fmpxmlresult">' + self.meta.toXml() + '<RESULTSET>'
for row in self.data:
xml = xml + row.toXml()
xml = xml + '</RESULTSET></FMPXMLRESULT>'
return xml
class FileMakerFmpMeta:
meta = []
def __init__(self, metaFields):
self.meta = metaFields
def toXml(self):
xml = ''
for field in self.meta:
xml = xml + '<FIELD NAME="' + field + '"/>'
return '<METADATA>' + xml + '</METADATA>'
class FileMakerFmpRow:
row = []
def __init__(self, rowData):
self.row = rowData
def toXml(self):
xml = ''
for col in self.row:
value = col
if value == None:
value = ''
xml = xml + '<COL><DATA>' + value + '</DATA></COL>'
return '<ROW>' + xml + '</ROW>'
if len(sys.argv) == 3:
configFile = sys.argv[1]
resultFile = sys.argv[2]
config = open(configFile, 'r').read()
dsn = re.search('filemaker_database *= *(.+)', config).group(1)
uid = re.search('filemaker_username *= *(.+)', config).group(1)
pwd = re.search('filemaker_password *= *(.+)', config).group(1)
con = 'DSN='+ dsn +';UID='+ uid +';PWD='+ pwd
conn = pyodbc.connect(con) # the DSN value should be the name of the entry in odbc.ini, not freetds.conf
crsr = conn.cursor()
rows = crsr.execute("select TMNr_plus_texLett, ddbSerIDP, ddbSer, ddbVol, ddbDoc from Hauptregister").fetchall()
fields = ['TMNr_plus_texLett', 'ddbSerIDP', 'ddbSer', 'ddbVol', 'ddbDoc']
fmpDoc = FileMakerFmpDoc(fields, rows)
print fmpDoc.toXml()
file = codecs.open(resultFile, "w", "utf-8")
file.write(fmpDoc.toXml().decode('cp1252'))
file.close()
crsr.close()
conn.close()
| {
"content_hash": "59069700dddb66ac53ff9278c9b83ed4",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 147,
"avg_line_length": 29.02857142857143,
"alnum_prop": 0.6161417322834646,
"repo_name": "Edelweiss/hgv",
"id": "7385a4f435beaebb4eb56c6b4ae872719d104d81",
"size": "2049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Papyrillio/HgvBundle/Script/ddbSer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30023"
},
{
"name": "JavaScript",
"bytes": "1316"
},
{
"name": "PHP",
"bytes": "161830"
},
{
"name": "Python",
"bytes": "2049"
},
{
"name": "Shell",
"bytes": "2535"
},
{
"name": "Twig",
"bytes": "397946"
},
{
"name": "XSLT",
"bytes": "8000"
}
],
"symlink_target": ""
} |
"""Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest:
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_copy(self):
a = self.type2test(b"abcd")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertEqual(a, b)
self.assertEqual(type(a), type(b))
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxsize])
self.assertRaises(IndexError, lambda: b[sys.maxsize+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxsize])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-1])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxsize])
self.assertRaises(ValueError, self.type2test, [sys.maxsize+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character
# sizes.
self.assertEqual(self.type2test(b"\0a\0b\0c") == "abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == "abc",
False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == "abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == "abc",
False)
self.assertEqual(self.type2test() == str(), False)
self.assertEqual(self.type2test() != str(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
def test_encoding(self):
sample = "Hello world\n\u1234\u5678\u9abc"
for enc in ("utf-8", "utf-16"):
b = self.type2test(sample, enc)
self.assertEqual(b, self.type2test(sample.encode(enc)))
self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin-1")
b = self.type2test(sample, "latin-1", "ignore")
self.assertEqual(b, self.type2test(sample[:-3], "utf-8"))
def test_decode(self):
sample = "Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf-8", "utf-16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = "Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin-1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf-8")
self.assertEqual(b.decode("utf-8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf-8"),
"Hello world\n")
# Default encoding is utf-8
self.assertEqual(self.type2test(b'\xe2\x98\x83').decode(), '\u2603')
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + "def")
self.assertRaises(TypeError, lambda: "abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
with self.assertRaises((OverflowError, MemoryError)):
c = b * sys.maxsize
with self.assertRaises((OverflowError, MemoryError)):
b *= sys.maxsize
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: "a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEqual(self.type2test.fromhex('1a2B30'), b)
self.assertEqual(self.type2test.fromhex(' 1A 2B 30 '), b)
self.assertEqual(self.type2test.fromhex('0000'), b'\0\0')
self.assertRaises(TypeError, self.type2test.fromhex, b'1B')
self.assertRaises(ValueError, self.type2test.fromhex, 'a')
self.assertRaises(ValueError, self.type2test.fromhex, 'rt')
self.assertRaises(ValueError, self.type2test.fromhex, '1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, '\x00')
self.assertRaises(ValueError, self.type2test.fromhex, '12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
dot_join = self.type2test(b".:").join
self.assertEqual(dot_join([b"ab", b"cd"]), b"ab.:cd")
self.assertEqual(dot_join([memoryview(b"ab"), b"cd"]), b"ab.:cd")
self.assertEqual(dot_join([b"ab", memoryview(b"cd")]), b"ab.:cd")
self.assertEqual(dot_join([bytearray(b"ab"), b"cd"]), b"ab.:cd")
self.assertEqual(dot_join([b"ab", bytearray(b"cd")]), b"ab.:cd")
# Stress it with many items
seq = [b"abc"] * 1000
expected = b"abc" + b".:abc" * 999
self.assertEqual(dot_join(seq), expected)
# Error handling and cleanup when some item in the middle of the
# sequence has the wrong type.
with self.assertRaises(TypeError):
dot_join([bytearray(b"ab"), "cd", b"ef"])
with self.assertRaises(TypeError):
dot_join([memoryview(b"ab"), "cd", b"ef"])
def test_count(self):
b = self.type2test(b'mississippi')
i = 105
p = 112
w = 119
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
self.assertEqual(b.count(i), 4)
self.assertEqual(b.count(w), 0)
self.assertEqual(b.count(b'i', 6), 2)
self.assertEqual(b.count(b'p', 6), 2)
self.assertEqual(b.count(b'i', 1, 3), 1)
self.assertEqual(b.count(b'p', 7, 9), 1)
self.assertEqual(b.count(i, 6), 2)
self.assertEqual(b.count(p, 6), 2)
self.assertEqual(b.count(i, 1, 3), 1)
self.assertEqual(b.count(p, 7, 9), 1)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
with self.assertRaises(TypeError) as cm:
b.startswith([b'h'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
with self.assertRaises(TypeError) as cm:
b.endswith([b'o'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_find(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
self.assertEqual(b.find(i), 1)
self.assertEqual(b.find(w), -1)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(i, 6), 7)
self.assertEqual(b.find(i, 1, 3), 1)
self.assertEqual(b.find(w, 1, 3), -1)
for index in (-1, 256, sys.maxsize + 1):
self.assertRaisesRegex(
ValueError, r'byte must be in range\(0, 256\)',
b.find, index)
def test_rfind(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
self.assertEqual(b.rfind(i), 10)
self.assertEqual(b.rfind(w), -1)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(i, 1, 3), 1)
self.assertEqual(b.rfind(i, 3, 9), 7)
self.assertEqual(b.rfind(w, 1, 3), -1)
def test_index(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.index(b'ss'), 2)
self.assertRaises(ValueError, b.index, b'w')
self.assertRaises(ValueError, b.index, b'mississippian')
self.assertEqual(b.index(i), 1)
self.assertRaises(ValueError, b.index, w)
self.assertEqual(b.index(b'ss', 3), 5)
self.assertEqual(b.index(b'ss', 1, 7), 2)
self.assertRaises(ValueError, b.index, b'ss', 1, 3)
self.assertEqual(b.index(i, 6), 7)
self.assertEqual(b.index(i, 1, 3), 1)
self.assertRaises(ValueError, b.index, w, 1, 3)
def test_rindex(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.rindex(b'ss'), 5)
self.assertRaises(ValueError, b.rindex, b'w')
self.assertRaises(ValueError, b.rindex, b'mississippian')
self.assertEqual(b.rindex(i), 10)
self.assertRaises(ValueError, b.rindex, w)
self.assertEqual(b.rindex(b'ss', 3), 5)
self.assertEqual(b.rindex(b'ss', 0, 6), 2)
self.assertEqual(b.rindex(i, 1, 3), 1)
self.assertEqual(b.rindex(i, 3, 9), 7)
self.assertRaises(ValueError, b.rindex, w, 1, 3)
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
# with keyword args
b = self.type2test(b'a|b|c|d')
self.assertEqual(b.split(sep=b'|'), [b'a', b'b', b'c', b'd'])
self.assertEqual(b.split(b'|', maxsplit=1), [b'a', b'b|c|d'])
self.assertEqual(b.split(sep=b'|', maxsplit=1), [b'a', b'b|c|d'])
self.assertEqual(b.split(maxsplit=1, sep=b'|'), [b'a', b'b|c|d'])
b = self.type2test(b'a b c d')
self.assertEqual(b.split(maxsplit=1), [b'a', b'b c d'])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, ' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
# with keyword args
b = self.type2test(b'a|b|c|d')
self.assertEqual(b.rsplit(sep=b'|'), [b'a', b'b', b'c', b'd'])
self.assertEqual(b.rsplit(b'|', maxsplit=1), [b'a|b|c', b'd'])
self.assertEqual(b.rsplit(sep=b'|', maxsplit=1), [b'a|b|c', b'd'])
self.assertEqual(b.rsplit(maxsplit=1, sep=b'|'), [b'a|b|c', b'd'])
b = self.type2test(b'a b c d')
self.assertEqual(b.rsplit(maxsplit=1), [b'a b c', b'd'])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, ' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_iterator_pickling(self):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
it = itorg = iter(self.type2test(b))
data = list(self.type2test(b))
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), data)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
continue
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(list(it), data[1:])
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, 'b')
def test_center(self):
# Fill character can be either bytes or bytearray (issue 12380)
b = self.type2test(b'abc')
for fill_type in (bytes, bytearray):
self.assertEqual(b.center(7, fill_type(b'-')),
self.type2test(b'--abc--'))
def test_ljust(self):
# Fill character can be either bytes or bytearray (issue 12380)
b = self.type2test(b'abc')
for fill_type in (bytes, bytearray):
self.assertEqual(b.ljust(7, fill_type(b'-')),
self.type2test(b'abc----'))
def test_rjust(self):
# Fill character can be either bytes or bytearray (issue 12380)
b = self.type2test(b'abc')
for fill_type in (bytes, bytearray):
self.assertEqual(b.rjust(7, fill_type(b'-')),
self.type2test(b'----abc'))
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_maketrans(self):
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(self.type2test.maketrans(b'abc', b'xyz'), transtable)
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374xyz'
self.assertEqual(self.type2test.maketrans(b'\375\376\377', b'xyz'), transtable)
self.assertRaises(ValueError, self.type2test.maketrans, b'abc', b'xyzq')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def')
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_integer_arguments_out_of_byte_range(self):
b = self.type2test(b'hello')
for method in (b.count, b.find, b.index, b.rfind, b.rindex):
self.assertRaises(ValueError, method, -1)
self.assertRaises(ValueError, method, 256)
self.assertRaises(ValueError, method, 9999)
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegex(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
class BytesTest(BaseBytesTest, unittest.TestCase):
type2test = bytes
def test_buffer_is_readonly(self):
fd = os.open(__file__, os.O_RDONLY)
with open(fd, "rb", buffering=0) as f:
self.assertRaises(TypeError, f.readinto, b"")
def test_custom(self):
class A:
def __bytes__(self):
return b'abc'
self.assertEqual(bytes(A()), b'abc')
class A: pass
self.assertRaises(TypeError, bytes, A())
class A:
def __bytes__(self):
return None
self.assertRaises(TypeError, bytes, A())
class A:
def __bytes__(self):
return b'a'
def __index__(self):
return 42
self.assertEqual(bytes(A()), b'a')
# Test PyBytes_FromFormat()
def test_from_format(self):
test.support.import_module('ctypes')
from ctypes import pythonapi, py_object, c_int, c_char_p
PyBytes_FromFormat = pythonapi.PyBytes_FromFormat
PyBytes_FromFormat.restype = py_object
self.assertEqual(PyBytes_FromFormat(b'format'),
b'format')
self.assertEqual(PyBytes_FromFormat(b'%'), b'%')
self.assertEqual(PyBytes_FromFormat(b'%%'), b'%')
self.assertEqual(PyBytes_FromFormat(b'%%s'), b'%s')
self.assertEqual(PyBytes_FromFormat(b'[%%]'), b'[%]')
self.assertEqual(PyBytes_FromFormat(b'%%%c', c_int(ord('_'))), b'%_')
self.assertEqual(PyBytes_FromFormat(b'c:%c', c_int(255)),
b'c:\xff')
self.assertEqual(PyBytes_FromFormat(b's:%s', c_char_p(b'cstr')),
b's:cstr')
# Issue #19969
self.assertRaises(OverflowError,
PyBytes_FromFormat, b'%c', c_int(-1))
self.assertRaises(OverflowError,
PyBytes_FromFormat, b'%c', c_int(256))
class ByteArrayTest(BaseBytesTest, unittest.TestCase):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
self.assertEqual(list(b), list(sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except OSError:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_clear(self):
b = bytearray(b'python')
b.clear()
self.assertEqual(b, b'')
b = bytearray(b'')
b.clear()
self.assertEqual(b, b'')
b = bytearray(b'')
b.append(ord('r'))
b.clear()
b.append(ord('p'))
self.assertEqual(b, b'p')
def test_copy(self):
b = bytearray(b'abc')
bb = b.copy()
self.assertEqual(bb, b'abc')
b = bytearray(b'')
bb = b.copy()
self.assertEqual(bb, b'')
# test that it's indeed a copy and not a reference
b = bytearray(b'abc')
bb = b.copy()
self.assertEqual(b, bb)
self.assertIsNot(b, bb)
bb.append(ord('d'))
self.assertEqual(bb, b'abcd')
self.assertEqual(b, b'abc')
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(br"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
b[3:] = b'foo'
self.assertEqual(b, bytearray([0, 1, 2, 102, 111, 111]))
b[:3] = memoryview(b'foo')
self.assertEqual(b, bytearray([102, 111, 111, 102, 111, 111]))
b[3:4] = []
self.assertEqual(b, bytearray([102, 111, 111, 111, 111]))
for elem in [5, -5, 0, int(10e20), 'str', 2.3,
['a', 'b'], [b'a', b'b'], [[]]]:
with self.assertRaises(TypeError):
b[3:4] = elem
for elem in [[254, 255, 256], [-256, 9000]]:
with self.assertRaises(ValueError):
b[3:4] = elem
def test_setslice_extend(self):
# Exercise the resizing logic (see issue #19087)
b = bytearray(range(100))
self.assertEqual(list(b), list(range(100)))
del b[:10]
self.assertEqual(list(b), list(range(10, 100)))
b.extend(range(100, 110))
self.assertEqual(list(b), list(range(10, 110)))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += ""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(int, orig * 25))
a.extend(int(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove('e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(b'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(b'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
@test.support.requires_docstrings
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
def test_return_self(self):
# bytearray.replace must always return a new bytearray
b = bytearray()
self.assertFalse(b.replace(b'', b'') is b)
def test_compare(self):
if sys.flags.bytes_warning:
def bytes_warning():
return test.support.check_warnings(('', BytesWarning))
with bytes_warning():
b'' == ''
with bytes_warning():
b'' != ''
with bytes_warning():
bytearray(b'') == ''
with bytes_warning():
bytearray(b'') != ''
else:
self.skipTest("BytesWarning is needed for this test: use -bb option")
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(b".")[0]', 'val.rpartition(b".")[2]',
'val.splitlines()[0]', 'val.replace(b"", b"")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
sep = self.marshal(b'')
newval = sep.join([val])
self.assertEqual(val, newval)
self.assertIsNot(val, newval)
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super().fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
class ByteArrayAsStringTest(FixedStringTest, unittest.TestCase):
type2test = bytearray
contains_bytes = True
class BytesAsStringTest(FixedStringTest, unittest.TestCase):
type2test = bytes
contains_bytes = True
class SubclassTest:
def test_basic(self):
self.assertTrue(issubclass(self.subclass2test, self.type2test))
self.assertIsInstance(self.subclass2test(), self.type2test)
a, b = b"abcd", b"efgh"
_a, _b = self.subclass2test(a), self.subclass2test(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = self.subclass2test(b"abcd")
s2 = self.type2test().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is self.type2test, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is self.type2test)
def test_pickle(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
class ByteArraySubclass(bytearray):
pass
class BytesSubclass(bytes):
pass
class ByteArraySubclassTest(SubclassTest, unittest.TestCase):
type2test = bytearray
subclass2test = ByteArraySubclass
def test_init_override(self):
class subclass(bytearray):
def __init__(me, newarg=1, *args, **kwargs):
bytearray.__init__(me, *args, **kwargs)
x = subclass(4, b"abcd")
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
class BytesSubclassTest(SubclassTest, unittest.TestCase):
type2test = bytes
subclass2test = BytesSubclass
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "59b63c06d73b04c45682589902b1904e",
"timestamp": "",
"source": "github",
"line_count": 1433,
"max_line_length": 765,
"avg_line_length": 38.65317515701326,
"alnum_prop": 0.5657699945838599,
"repo_name": "OptimusGitEtna/RestSymf",
"id": "43b6c824a8383a33567b734c9434d8895e9d795f",
"size": "55390",
"binary": false,
"copies": "58",
"ref": "refs/heads/master",
"path": "Python-3.4.2/Lib/test/test_bytes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "594205"
},
{
"name": "C",
"bytes": "15348597"
},
{
"name": "C++",
"bytes": "65109"
},
{
"name": "CSS",
"bytes": "12039"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "JavaScript",
"bytes": "10597"
},
{
"name": "Makefile",
"bytes": "9444"
},
{
"name": "Objective-C",
"bytes": "1390141"
},
{
"name": "PHP",
"bytes": "93070"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24018306"
},
{
"name": "Shell",
"bytes": "440753"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from django.db.models import Q
from contactbox.models import Message
from datetime import datetime, timedelta
class Command(BaseCommand):
help = "Remind about Contact message"
args = ''
def handle(self, *options, **extras):
yesterday = datetime.now() - timedelta(days=1)
messages = Message.objects.filter(Q(notification_date__isnull=True) | Q(notification_date__lte=yesterday),
unread=True)
for m in messages:
m.notify()
| {
"content_hash": "aa4407849218cd30256bf0fba76a96a2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 114,
"avg_line_length": 31.555555555555557,
"alnum_prop": 0.6496478873239436,
"repo_name": "ArabellaTech/contactBox",
"id": "d42bf44259f964ba13428cd5fd743266a3b32c81",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contactbox/management/commands/remind_contact.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "904"
},
{
"name": "Python",
"bytes": "18589"
}
],
"symlink_target": ""
} |
"""
Implements a Nexus-OS NETCONF over SSHv2 API Client
"""
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from neutron.i18n import _LW
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
config as conf)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
constants as const)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
exceptions as cexc)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
nexus_snippets as snipp)
LOG = logging.getLogger(__name__)
class CiscoNexusDriver(object):
"""Nexus Driver Main Class."""
def __init__(self):
self.ncclient = None
self.nexus_switches = conf.ML2MechCiscoConfig.nexus_dict
self.connections = {}
self._close_ssh_session = False if (
(cfg.CONF.rpc_workers + cfg.CONF.api_workers) <
const.MAX_NEXUS_SSH_SESSIONS) else True
def _import_ncclient(self):
"""Import the NETCONF client (ncclient) module.
The ncclient module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of ncclient.
"""
return importutils.import_module('ncclient.manager')
def _get_close_ssh_session(self):
return self._close_ssh_session
def _close_session(self, mgr, nexus_host):
"""Close the connection to the nexus switch."""
if mgr:
self.connections.pop(nexus_host, None)
mgr.close_session()
def _get_config(self, nexus_host, filter=''):
"""Get Nexus Host Configuration:
:param nexus_host: IP address of switch
:param filter: filter string in XML format
:returns: Configuration requested in string format
"""
try:
# If exception raised in connect, mgr left unassigned
# resulting in error during exception handling
mgr = None
mgr = self.nxos_connect(nexus_host)
data_xml = mgr.get(filter=('subtree', filter)).data_xml
return data_xml
except Exception as e:
try:
self._close_session(mgr, nexus_host)
except Exception:
pass
raise cexc.NexusConfigFailed(nexus_host=nexus_host, config=filter,
exc=e)
def _edit_config(self, nexus_host, target='running', config='',
allowed_exc_strs=None, check_to_close_session=True):
"""Modify switch config for a target config type.
:param nexus_host: IP address of switch to configure
:param target: Target config type
:param config: Configuration string in XML format
:param allowed_exc_strs: Exceptions which have any of these strings
as a subset of their exception message
(str(exception)) can be ignored
:param check_to_close_session: Set to False when configured to close
the ssh session is not to be checked.
:raises: NexusConfigFailed: if _edit_config() encountered an exception
not containing one of allowed_exc_strs
"""
if not allowed_exc_strs:
allowed_exc_strs = []
try:
# If exception raised in connect, mgr left unassigned
# resulting in error during exception handling
mgr = None
mgr = self.nxos_connect(nexus_host)
LOG.debug("NexusDriver edit config: %s", config)
if mgr:
mgr.edit_config(target=target, config=config)
except Exception as e:
for exc_str in allowed_exc_strs:
if exc_str in unicode(e):
return
try:
self._close_session(mgr, nexus_host)
except Exception:
pass
# Raise a Neutron exception. Include a description of
# the original ncclient exception.
raise cexc.NexusConfigFailed(nexus_host=nexus_host, config=config,
exc=e)
# if configured, close the ncclient ssh session.
if check_to_close_session and self._get_close_ssh_session():
self._close_session(mgr, nexus_host)
def nxos_connect(self, nexus_host):
"""Make SSH connection to the Nexus Switch."""
if getattr(self.connections.get(nexus_host), 'connected', None):
return self.connections[nexus_host]
if not self.ncclient:
self.ncclient = self._import_ncclient()
nexus_ssh_port = int(self.nexus_switches[nexus_host, 'ssh_port'])
nexus_user = self.nexus_switches[nexus_host, const.USERNAME]
nexus_password = self.nexus_switches[nexus_host, const.PASSWORD]
hostkey_verify = cfg.CONF.ml2_cisco.host_key_checks
try:
try:
# With new ncclient version, we can pass device_params...
man = self.ncclient.connect(host=nexus_host,
port=nexus_ssh_port,
username=nexus_user,
password=nexus_password,
hostkey_verify=hostkey_verify,
device_params={"name": "nexus"})
except TypeError:
# ... but if that causes an error, we appear to have the old
# ncclient installed, which doesn't understand this parameter.
man = self.ncclient.connect(host=nexus_host,
port=nexus_ssh_port,
username=nexus_user,
password=nexus_password,
hostkey_verify=hostkey_verify)
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original ncclient exception.
raise cexc.NexusConnectFailed(nexus_host=nexus_host, exc=e)
self.connections[nexus_host] = man
return self.connections[nexus_host]
def create_xml_snippet(self, customized_config):
"""Create XML snippet.
Creates the Proper XML structure for the Nexus Switch Configuration.
and also does 'copy run start' if configured to do so. This
latter command allows configuration to persist on the switch after
reboot.
"""
if conf.cfg.CONF.ml2_cisco.persistent_switch_config:
customized_config += (snipp.EXEC_SAVE_CONF_SNIPPET)
conf_xml_snippet = snipp.EXEC_CONF_SNIPPET % (customized_config)
return conf_xml_snippet
def get_interface_switch_trunk_allowed(self, nexus_host,
intf_type, interface):
"""Given the nexus host and specific interface data, get the
interface data from host and determine if
'switchport trunk allowed vlan' is configured.
:param nexus_host: IP address of Nexus switch
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:returns False: On error or when config CLI not present.
True: When config CLI is present.
"""
confstr = snipp.EXEC_GET_INTF_SNIPPET % (intf_type, interface)
response = self._get_config(nexus_host, confstr)
LOG.debug("GET call returned interface %(if_type)s %(interface)s "
"config", {'if_type': intf_type, 'interface': interface})
if response and re.search("switchport trunk allowed vlan", response):
return True
return False
def get_version(self, nexus_host):
"""Given the nexus host, get the version data.
:param nexus_host: IP address of Nexus switch
:returns version number
"""
confstr = snipp.EXEC_GET_VERSION_SNIPPET
response = self._get_config(nexus_host, confstr)
LOG.debug("GET call returned version")
version = None
if response:
version = re.findall(
"\<sys_ver_str\>([\x20-\x7e]+)\<\/sys_ver_str\>", response)
return version
def get_nexus_type(self, nexus_host):
"""Given the nexus host, get the type of Nexus switch.
:param nexus_host: IP address of Nexus switch
:returns Nexus type
"""
confstr = snipp.EXEC_GET_INVENTORY_SNIPPET
response = self._get_config(nexus_host, confstr)
if response:
nexus_type = re.findall(
"\<[mod:]*desc\>\"*Nexus\s*(\d)\d+\s*[0-9A-Z]+\s*"
"[cC]hassis\s*\"*\<\/[mod:]*desc\>",
response)
if len(nexus_type) > 0:
LOG.debug("GET call returned Nexus type %d",
int(nexus_type[0]))
return int(nexus_type[0])
LOG.warn(_LW("GET call failed to return Nexus type"))
return -1
def create_vlan(self, nexus_host, vlanid, vlanname, vni):
"""Create a VLAN on a Nexus Switch.
Creates a VLAN given the VLAN ID, name and possible VxLAN ID.
"""
if vni:
snippet = (snipp.CMD_VLAN_CONF_VNSEGMENT_SNIPPET %
(vlanid, vlanname, vni))
else:
snippet = snipp.CMD_VLAN_CONF_SNIPPET % (vlanid, vlanname)
confstr = self.create_xml_snippet(snippet)
LOG.debug("NexusDriver: ")
self._edit_config(nexus_host, target='running', config=confstr,
allowed_exc_strs=["VLAN with the same name exists"],
check_to_close_session=False)
# Enable VLAN active and no-shutdown states. Some versions of
# Nexus switch do not allow state changes for the extended VLAN
# range (1006-4094), but these errors can be ignored (default
# values are appropriate).
for snippet in [snipp.CMD_VLAN_ACTIVE_SNIPPET,
snipp.CMD_VLAN_NO_SHUTDOWN_SNIPPET]:
try:
check_to_close_session = False if (
snippet == snipp.CMD_VLAN_ACTIVE_SNIPPET) else True
confstr = self.create_xml_snippet(snippet % vlanid)
self._edit_config(
nexus_host,
target='running',
config=confstr,
allowed_exc_strs=["Can't modify state for extended",
"Command is only allowed on VLAN"],
check_to_close_session=check_to_close_session)
except cexc.NexusConfigFailed:
with excutils.save_and_reraise_exception():
self.delete_vlan(nexus_host, vlanid)
def delete_vlan(self, nexus_host, vlanid):
"""Delete a VLAN on Nexus Switch given the VLAN ID."""
confstr = snipp.CMD_NO_VLAN_CONF_SNIPPET % vlanid
confstr = self.create_xml_snippet(confstr)
self._edit_config(nexus_host, target='running', config=confstr,
allowed_exc_strs=["None of the VLANs exist"])
def build_intf_confstr(self, snippet, intf_type, interface, vlanid):
"""Build the VLAN config string xml snippet to be used."""
confstr = snippet % (intf_type, interface, vlanid, intf_type)
confstr = self.create_xml_snippet(confstr)
return confstr
def enable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type,
interface):
"""Enable a VLAN on a trunk interface.
:param nexus_host: IP address of Nexus switch
:param vlanid: Vlanid to add to interface
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:returns None: if config was successfully
Exception object: See _edit_config for details
"""
response = self.get_interface_switch_trunk_allowed(nexus_host,
intf_type, interface)
#
# If 'switchport trunk allowed vlan' not configured on the
# switch, configure the VLAN onto the interface without the
# 'add' keyword to define initial vlan config; otherwise
# include the 'add' keyword.
#
snippet = (snipp.CMD_INT_VLAN_SNIPPET if (response is False)
else snipp.CMD_INT_VLAN_ADD_SNIPPET)
confstr = self.build_intf_confstr(
snippet=snippet,
intf_type=intf_type,
interface=interface,
vlanid=vlanid
)
self._edit_config(nexus_host, target='running',
config=confstr)
LOG.debug("Successfully added switchport trunk vlan %(vlanid)s "
"on int %(if_type)s %(interface)s.",
{'vlanid': vlanid, 'if_type': intf_type,
'interface': interface})
def disable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type,
interface):
"""Disable a VLAN on a trunk interface."""
confstr = (snipp.CMD_NO_VLAN_INT_SNIPPET %
(intf_type, interface, vlanid, intf_type))
confstr = self.create_xml_snippet(confstr)
self._edit_config(nexus_host, target='running', config=confstr)
def create_and_trunk_vlan(self, nexus_host, vlan_id, vlan_name,
intf_type, nexus_port, vni):
"""Create VLAN and trunk it on the specified ports."""
self.create_vlan(nexus_host, vlan_id, vlan_name, vni)
LOG.debug("NexusDriver created VLAN: %s", vlan_id)
if nexus_port:
self.enable_vlan_on_trunk_int(nexus_host, vlan_id, intf_type,
nexus_port)
def create_vlan_svi(self, nexus_host, vlan_id, gateway_ip):
"""Create VLAN vn_segment."""
confstr = snipp.CMD_VLAN_SVI_SNIPPET % (vlan_id, gateway_ip)
confstr = self.create_xml_snippet(confstr)
LOG.debug("NexusDriver: ")
self._edit_config(nexus_host, target='running', config=confstr)
def delete_vlan_svi(self, nexus_host, vlan_id):
"""Delete VLAN vn_segment."""
confstr = snipp.CMD_NO_VLAN_SVI_SNIPPET % vlan_id
confstr = self.create_xml_snippet(confstr)
LOG.debug("NexusDriver: ")
self._edit_config(nexus_host, target='running', config=confstr)
def enable_vxlan_feature(self, nexus_host, nve_int_num, src_intf):
"""Enable VXLAN on the switch."""
# Configure the "feature" commands and NVE interface
# (without "member" subcommand configuration).
# The Nexus 9K will not allow the "interface nve" configuration
# until the "feature nv overlay" command is issued and installed.
# To get around the N9K failing on the "interface nve" command
# send the two XML snippets down separately.
confstr = self.create_xml_snippet(snipp.CMD_FEATURE_VXLAN_SNIPPET)
LOG.debug("NexusDriver: ")
self._edit_config(nexus_host, config=confstr)
confstr = self.create_xml_snippet((snipp.CMD_INT_NVE_SNIPPET %
(nve_int_num, src_intf)))
LOG.debug("NexusDriver: ")
self._edit_config(nexus_host, config=confstr)
def disable_vxlan_feature(self, nexus_host):
"""Disable VXLAN on the switch."""
# Removing the "feature" commands also removes the NVE interface.
confstr = self.create_xml_snippet(snipp.CMD_NO_FEATURE_VXLAN_SNIPPET)
LOG.debug("NexusDriver: ")
self._edit_config(nexus_host, config=confstr)
def create_nve_member(self, nexus_host, nve_int_num, vni, mcast_group):
"""Add a member configuration to the NVE interface."""
confstr = self.create_xml_snippet((snipp.CMD_INT_NVE_MEMBER_SNIPPET %
(nve_int_num, vni, mcast_group)))
LOG.debug("NexusDriver: ")
self._edit_config(nexus_host, config=confstr)
def delete_nve_member(self, nexus_host, nve_int_num, vni):
"""Delete a member configuration on the NVE interface."""
confstr = self.create_xml_snippet((snipp.CMD_INT_NVE_NO_MEMBER_SNIPPET
% (nve_int_num, vni)))
LOG.debug("NexusDriver: ")
self._edit_config(nexus_host, config=confstr)
| {
"content_hash": "5664798774c067dc647bbd9e6807ff04",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 78,
"avg_line_length": 42.82367758186398,
"alnum_prop": 0.5771425210281748,
"repo_name": "hareeshpc/networking-cisco",
"id": "e937ccce1efd9c3747e9c5c797a80462f0df6119",
"size": "17637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_cisco/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "1234061"
},
{
"name": "Shell",
"bytes": "42688"
}
],
"symlink_target": ""
} |
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-extensions'
copyright = 'Copyright (C) 2008-2015 Michael Trier, Bas van Oostveen and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.2'
# The full version, including alpha/beta/rc tags.
release = '3.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-extensionsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [(
'index', 'django-extensions.tex', 'django-extensions Documentation',
'Michael Trier, Bas van Oostveen, and contributors', 'manual'
), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
| {
"content_hash": "fe4f0e4fda035ce4c3704aa3aaba93e9",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 86,
"avg_line_length": 32.45294117647059,
"alnum_prop": 0.7107123436650353,
"repo_name": "django-extensions/django-extensions",
"id": "442c1986e18cd861bf932ad765350b5bc1812a2d",
"size": "6470",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "740"
},
{
"name": "HTML",
"bytes": "2126"
},
{
"name": "JavaScript",
"bytes": "41410"
},
{
"name": "Makefile",
"bytes": "1257"
},
{
"name": "Python",
"bytes": "826197"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ISCSIVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'chap_auth_discovery': 'bool',
'chap_auth_session': 'bool',
'fs_type': 'str',
'initiator_name': 'str',
'iqn': 'str',
'iscsi_interface': 'str',
'lun': 'int',
'portals': 'list[str]',
'read_only': 'bool',
'secret_ref': 'V1LocalObjectReference',
'target_portal': 'str'
}
attribute_map = {
'chap_auth_discovery': 'chapAuthDiscovery',
'chap_auth_session': 'chapAuthSession',
'fs_type': 'fsType',
'initiator_name': 'initiatorName',
'iqn': 'iqn',
'iscsi_interface': 'iscsiInterface',
'lun': 'lun',
'portals': 'portals',
'read_only': 'readOnly',
'secret_ref': 'secretRef',
'target_portal': 'targetPortal'
}
def __init__(self, chap_auth_discovery=None, chap_auth_session=None, fs_type=None, initiator_name=None, iqn=None, iscsi_interface=None, lun=None, portals=None, read_only=None, secret_ref=None, target_portal=None, local_vars_configuration=None): # noqa: E501
"""V1ISCSIVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._chap_auth_discovery = None
self._chap_auth_session = None
self._fs_type = None
self._initiator_name = None
self._iqn = None
self._iscsi_interface = None
self._lun = None
self._portals = None
self._read_only = None
self._secret_ref = None
self._target_portal = None
self.discriminator = None
if chap_auth_discovery is not None:
self.chap_auth_discovery = chap_auth_discovery
if chap_auth_session is not None:
self.chap_auth_session = chap_auth_session
if fs_type is not None:
self.fs_type = fs_type
if initiator_name is not None:
self.initiator_name = initiator_name
self.iqn = iqn
if iscsi_interface is not None:
self.iscsi_interface = iscsi_interface
self.lun = lun
if portals is not None:
self.portals = portals
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
self.target_portal = target_portal
@property
def chap_auth_discovery(self):
"""Gets the chap_auth_discovery of this V1ISCSIVolumeSource. # noqa: E501
chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication # noqa: E501
:return: The chap_auth_discovery of this V1ISCSIVolumeSource. # noqa: E501
:rtype: bool
"""
return self._chap_auth_discovery
@chap_auth_discovery.setter
def chap_auth_discovery(self, chap_auth_discovery):
"""Sets the chap_auth_discovery of this V1ISCSIVolumeSource.
chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication # noqa: E501
:param chap_auth_discovery: The chap_auth_discovery of this V1ISCSIVolumeSource. # noqa: E501
:type: bool
"""
self._chap_auth_discovery = chap_auth_discovery
@property
def chap_auth_session(self):
"""Gets the chap_auth_session of this V1ISCSIVolumeSource. # noqa: E501
chapAuthSession defines whether support iSCSI Session CHAP authentication # noqa: E501
:return: The chap_auth_session of this V1ISCSIVolumeSource. # noqa: E501
:rtype: bool
"""
return self._chap_auth_session
@chap_auth_session.setter
def chap_auth_session(self, chap_auth_session):
"""Sets the chap_auth_session of this V1ISCSIVolumeSource.
chapAuthSession defines whether support iSCSI Session CHAP authentication # noqa: E501
:param chap_auth_session: The chap_auth_session of this V1ISCSIVolumeSource. # noqa: E501
:type: bool
"""
self._chap_auth_session = chap_auth_session
@property
def fs_type(self):
"""Gets the fs_type of this V1ISCSIVolumeSource. # noqa: E501
fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi # noqa: E501
:return: The fs_type of this V1ISCSIVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1ISCSIVolumeSource.
fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi # noqa: E501
:param fs_type: The fs_type of this V1ISCSIVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def initiator_name(self):
"""Gets the initiator_name of this V1ISCSIVolumeSource. # noqa: E501
initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. # noqa: E501
:return: The initiator_name of this V1ISCSIVolumeSource. # noqa: E501
:rtype: str
"""
return self._initiator_name
@initiator_name.setter
def initiator_name(self, initiator_name):
"""Sets the initiator_name of this V1ISCSIVolumeSource.
initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. # noqa: E501
:param initiator_name: The initiator_name of this V1ISCSIVolumeSource. # noqa: E501
:type: str
"""
self._initiator_name = initiator_name
@property
def iqn(self):
"""Gets the iqn of this V1ISCSIVolumeSource. # noqa: E501
iqn is the target iSCSI Qualified Name. # noqa: E501
:return: The iqn of this V1ISCSIVolumeSource. # noqa: E501
:rtype: str
"""
return self._iqn
@iqn.setter
def iqn(self, iqn):
"""Sets the iqn of this V1ISCSIVolumeSource.
iqn is the target iSCSI Qualified Name. # noqa: E501
:param iqn: The iqn of this V1ISCSIVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and iqn is None: # noqa: E501
raise ValueError("Invalid value for `iqn`, must not be `None`") # noqa: E501
self._iqn = iqn
@property
def iscsi_interface(self):
"""Gets the iscsi_interface of this V1ISCSIVolumeSource. # noqa: E501
iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). # noqa: E501
:return: The iscsi_interface of this V1ISCSIVolumeSource. # noqa: E501
:rtype: str
"""
return self._iscsi_interface
@iscsi_interface.setter
def iscsi_interface(self, iscsi_interface):
"""Sets the iscsi_interface of this V1ISCSIVolumeSource.
iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). # noqa: E501
:param iscsi_interface: The iscsi_interface of this V1ISCSIVolumeSource. # noqa: E501
:type: str
"""
self._iscsi_interface = iscsi_interface
@property
def lun(self):
"""Gets the lun of this V1ISCSIVolumeSource. # noqa: E501
lun represents iSCSI Target Lun number. # noqa: E501
:return: The lun of this V1ISCSIVolumeSource. # noqa: E501
:rtype: int
"""
return self._lun
@lun.setter
def lun(self, lun):
"""Sets the lun of this V1ISCSIVolumeSource.
lun represents iSCSI Target Lun number. # noqa: E501
:param lun: The lun of this V1ISCSIVolumeSource. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and lun is None: # noqa: E501
raise ValueError("Invalid value for `lun`, must not be `None`") # noqa: E501
self._lun = lun
@property
def portals(self):
"""Gets the portals of this V1ISCSIVolumeSource. # noqa: E501
portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
:return: The portals of this V1ISCSIVolumeSource. # noqa: E501
:rtype: list[str]
"""
return self._portals
@portals.setter
def portals(self, portals):
"""Sets the portals of this V1ISCSIVolumeSource.
portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
:param portals: The portals of this V1ISCSIVolumeSource. # noqa: E501
:type: list[str]
"""
self._portals = portals
@property
def read_only(self):
"""Gets the read_only of this V1ISCSIVolumeSource. # noqa: E501
readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. # noqa: E501
:return: The read_only of this V1ISCSIVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1ISCSIVolumeSource.
readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. # noqa: E501
:param read_only: The read_only of this V1ISCSIVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""Gets the secret_ref of this V1ISCSIVolumeSource. # noqa: E501
:return: The secret_ref of this V1ISCSIVolumeSource. # noqa: E501
:rtype: V1LocalObjectReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1ISCSIVolumeSource.
:param secret_ref: The secret_ref of this V1ISCSIVolumeSource. # noqa: E501
:type: V1LocalObjectReference
"""
self._secret_ref = secret_ref
@property
def target_portal(self):
"""Gets the target_portal of this V1ISCSIVolumeSource. # noqa: E501
targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
:return: The target_portal of this V1ISCSIVolumeSource. # noqa: E501
:rtype: str
"""
return self._target_portal
@target_portal.setter
def target_portal(self, target_portal):
"""Sets the target_portal of this V1ISCSIVolumeSource.
targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
:param target_portal: The target_portal of this V1ISCSIVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and target_portal is None: # noqa: E501
raise ValueError("Invalid value for `target_portal`, must not be `None`") # noqa: E501
self._target_portal = target_portal
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ISCSIVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ISCSIVolumeSource):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "a589db44e4e05deeb75e01e0ccebc2c0",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 328,
"avg_line_length": 35.52618453865337,
"alnum_prop": 0.6228414993682437,
"repo_name": "kubernetes-client/python",
"id": "8a55ca2011a1cbac8d018380a772397cbf1c331b",
"size": "14263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_iscsi_volume_source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
} |
from datetime import datetime
from sqlalchemy import desc
from flask_login import UserMixin
from werkzeug.security import check_password_hash, generate_password_hash
from reason import db
from sqlalchemy.util import KeyedTuple
class User(db.Model, UserMixin):
__bind_key__ = 'userDb'
__tablename__ = 'User'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
password_hash = db.Column(db.String)
reports = db.relationship('Report', backref='User', lazy='dynamic')
@property
def password(self):
raise AttributeError('password: write-only field')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@staticmethod
def get_by_username(username):
print username
return User.query.filter_by(username=username).first()
def __repr__(self):
return " {} ".format(self.username)
class Report(db.Model):
__bind_key__ = 'userDb'
__tablename__ = 'Report'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('User.id'), nullable=False)
date = db.Column(db.DateTime, default=datetime.utcnow())
description = db.Column(db.String(300))
def __repr__(self):
return "Report ID{} for UserID {}".format(self.id, self.uid)
| {
"content_hash": "c1c262f94635995d27436ebdfca9317b",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 31.708333333333332,
"alnum_prop": 0.6780551905387647,
"repo_name": "pombredanne/reason",
"id": "180a661a4adc1d27301ebebc815e791b9796339f",
"size": "1522",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reason/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23856"
},
{
"name": "HTML",
"bytes": "1672687"
},
{
"name": "Python",
"bytes": "118565"
},
{
"name": "Shell",
"bytes": "65"
}
],
"symlink_target": ""
} |
from daemonx.daemon import Daemon
class Daemon1(Daemon):
def __init__(self, *args, **kwargs):
super(Daemon1, self).__init__(*args, **kwargs)
self.string1 = self.conf['string1']
self.string2 = self.conf['string2']
self.string3 = self.global_conf['common1']['string3']
def run_once(self):
self.logger.info('%s %s %s' %
(self.string1, self.string2, self.string3))
| {
"content_hash": "feb8fd199f80b25ba21873770e12f4d1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 61,
"avg_line_length": 32.61538461538461,
"alnum_prop": 0.5966981132075472,
"repo_name": "greglange/daemonx",
"id": "dcf32f1dd23e3230d2a9bf89e2bec6061efff367",
"size": "1002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daemonx/example/daemon1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26674"
}
],
"symlink_target": ""
} |
from flask import make_response, current_app
from ultrajson import dumps
def output_html(data, code=200, headers=None):
if type(data) is dict:
data = dumps(data)
resp = make_response(data, code)
resp.headers.extend(headers or {})
resp.headers['Content-Type'] = 'text/html; charset=utf-8'
return resp
def output_json(data, code, headers=None):
"""Makes a Flask response with a JSON encoded body"""
settings = current_app.config.get('RESTFUL_JSON', {})
# If we're in debug mode, and the indent is not set, we set it to a
# reasonable value here. Note that this won't override any existing value
# that was set. We also set the "sort_keys" value.
if current_app.debug:
settings.setdefault('indent', 4)
settings.setdefault('sort_keys', True)
# always end the json dumps with a new line
# see https://github.com/mitsuhiko/flask/pull/1262
dumped = dumps(data, **settings) + "\n"
resp = make_response(dumped, code)
resp.headers.extend(headers or {})
return resp
| {
"content_hash": "65eb6138de56d922ed7bdf9efda8af80",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 35.166666666666664,
"alnum_prop": 0.6691943127962086,
"repo_name": "openmaraude/APITaxi_utils",
"id": "b69410b64f2d4b2b1f95b1481fc066d569113be0",
"size": "1056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "APITaxi_utils/representations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33634"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
from idlnode import *
def render(idl_node, indent_str=' '):
output = []
indent_stack = []
def begin_indent():
indent_stack.append(indent_str)
def end_indent():
indent_stack.pop()
def sort(nodes):
return sorted(nodes, key=lambda node: node.id)
def wln(node=None):
"""Writes the given node and adds a new line."""
w(node)
output.append('\n')
def wsp(node):
"""Writes the given node and adds a space if there was output."""
mark = len(output)
w(node)
if mark != len(output):
w(' ')
def w(node, list_separator=None):
"""Writes the given node.
Args:
node -- a string, IDLNode instance or a list of such.
list_separator -- if provided, and node is a list,
list_separator will be written between the list items.
"""
if node is None:
return
elif isinstance(node, str):
if output and output[-1].endswith('\n'):
# Auto-indent.
output.extend(indent_stack)
output.append(node)
elif isinstance(node, list):
for i in range(0, len(node)):
if i > 0:
w(list_separator)
w(node[i])
elif isinstance(node, IDLFile):
w(node.interfaces)
w(node.enums)
w(node.typeDefs)
elif isinstance(node, IDLModule):
wsp(node.annotations)
wsp(node.ext_attrs)
wln('module %s {' % node.id)
begin_indent()
w(node.interfaces)
w(node.enums)
w(node.typeDefs)
end_indent()
wln('};')
elif isinstance(node, IDLEnum):
w('enum %s {}' % node.id)
# TODO(antonm): emit values as well.
elif isinstance(node, IDLInterface):
if node.annotations:
wln(node.annotations)
if node.ext_attrs:
wln(node.ext_attrs)
w('interface %s' % node.id)
begin_indent()
begin_indent()
if node.parents:
wln(' :')
w(node.parents, ',\n')
wln(' {')
end_indent()
if node.constants:
wln()
wln('/* Constants */')
w(sort(node.constants))
if node.attributes:
wln()
wln('/* Attributes */')
w(sort(node.attributes))
if node.operations:
wln()
wln('/* Operations */')
w(sort(node.operations))
end_indent()
wln('};')
elif isinstance(node, IDLParentInterface):
wsp(node.annotations)
w(node.type.id)
elif isinstance(node, IDLAnnotations):
sep = ''
for (name, annotation) in sorted(node.items()):
w(sep)
sep = ' '
if annotation and len(annotation):
subRes = []
for (argName, argValue) in sorted(annotation.items()):
if argValue is None:
subRes.append(argName)
else:
subRes.append('%s=%s' % (argName, argValue))
w('@%s(%s)' % (name, ', '.join(subRes)))
else:
w('@%s' % name)
elif isinstance(node, IDLExtAttrs):
if len(node):
w('[')
i = 0
for k in sorted(node):
if i > 0:
w(', ')
w(k)
v = node[k]
if v is not None:
if isinstance(v, IDLExtAttrFunctionValue):
if v.id:
w('=')
w(v)
elif isinstance(v, list):
assert k == 'Constructor'
w(v[0])
for c in v[1:]:
w(', ')
w(k)
w(c)
else:
w('=%s' % v.__str__())
i += 1
w(']')
elif isinstance(node, IDLExtAttrFunctionValue):
if node.id:
w(node.id)
w('(')
w(node.arguments, ', ')
w(')')
elif isinstance(node, IDLAttribute):
wsp(node.annotations)
wsp(node.ext_attrs)
if node.is_read_only:
w('readonly ')
w('attribute ')
w(node.type.id)
if (node.type.nullable):
w('?')
w(' ')
w(node.id)
wln(';')
elif isinstance(node, IDLConstant):
wsp(node.annotations)
wsp(node.ext_attrs)
wln('const %s %s = %s;' % (node.type.id, node.id, node.value))
elif isinstance(node, IDLOperation):
wsp(node.annotations)
wsp(node.ext_attrs)
if node.is_static:
w('static ')
if node.specials:
w(node.specials, ' ')
w(' ')
w(node.type.id)
if (node.type.nullable):
w('?')
w(' ')
w(node.id)
w('(')
w(node.arguments, ', ')
w(')')
wln(';')
elif isinstance(node, IDLArgument):
wsp(node.ext_attrs)
if (node.optional):
w('optional ')
w(node.type.id)
if node.type.nullable:
w('?')
w(' %s' % node.id)
else:
raise TypeError("Expected str or IDLNode but %s found" % type(node))
w(idl_node)
return ''.join(output)
| {
"content_hash": "3490ff7433877c7c125a697696831f3b",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 80,
"avg_line_length": 32.5,
"alnum_prop": 0.41971153846153847,
"repo_name": "dart-lang/sdk",
"id": "ba095dd2871b456ec8f45df0ba2ed65efde9a83b",
"size": "6240",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tools/dom/scripts/idlrenderer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "7081"
},
{
"name": "Batchfile",
"bytes": "4407"
},
{
"name": "C",
"bytes": "1393784"
},
{
"name": "C++",
"bytes": "22197561"
},
{
"name": "CMake",
"bytes": "1598"
},
{
"name": "CSS",
"bytes": "142589"
},
{
"name": "Dart",
"bytes": "142185469"
},
{
"name": "Dockerfile",
"bytes": "495"
},
{
"name": "GAP",
"bytes": "43658"
},
{
"name": "HTML",
"bytes": "746243"
},
{
"name": "Java",
"bytes": "637086"
},
{
"name": "JavaScript",
"bytes": "238808"
},
{
"name": "Makefile",
"bytes": "9320"
},
{
"name": "Objective-C++",
"bytes": "854"
},
{
"name": "Python",
"bytes": "1227015"
},
{
"name": "SCSS",
"bytes": "23561"
},
{
"name": "Shell",
"bytes": "139201"
},
{
"name": "TeX",
"bytes": "75730"
}
],
"symlink_target": ""
} |
"""
Provides features to support a Tintri storage array.
Servers in environments that have a Tintri store will have a "Tintri" tab on
their detail view. This tab exposes storage performance metrics as well as some
actions users can take on the server, such as taking a snapshot.
"""
import os
import datetime
import json
import time
import logging
from dateutil import parser
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from cbhooks.views import _format_action_html_response
from extensions.views import admin_extension, tab_extension, TabExtensionDelegate
from infrastructure.models import Server
from resourcehandlers.vmware.models import VsphereResourceHandler
from tabs.views import TabGroup
from utilities.decorators import dialog_view
from utilities.exceptions import CloudBoltException
from utilities.logger import ThreadLogger
from utilities.permissions import cbadmin_required
from utilities.templatetags import helper_tags
from xui.tintri.forms import TintriCloneSnapshotForm, TintriEndpointForm, TintriSnapshotForm
from xui.tintri.tintri_api import Tintri
logger = ThreadLogger(__name__)
"""
UI Extension view using Tintri rest API
"""
COLOR1 = '#2A17B1'
COLOR2 = '#FF9E00'
COLOR3 = '#00A67C'
def get_ci(server):
tintri = Tintri()
ci = tintri.get_connection_info()
if not ci:
return None
t = {}
t['ip'] = ci.ip
t['username'] = ci.username
t['password'] = ci.password
return t
def get_session(server):
"""
Get authenticated Tintri Session for the given server
Requires:
ConnectionInfo object with name 'Tintri VMstore for Environment X'
Otherwise return None
Args:
server (obj): CB server object
Returns:
tintri: Tintri object
"""
# instantiate the Tintri server.
tintri = Tintri()
tintri.get_session_id(None, save_to_self=True)
# Login to VMstore
conn = tintri.get_connection_info()
if not conn:
return None
return tintri
def get_appliance_info(tintri):
"""
Get Tintri Appliance details
Args:
tintri (obj): Tintri object
Returns:
appliance: Dict of apliance details
"""
appliance = {}
info = tintri.get_appliance_info()
product = None
if tintri.is_vmstore():
product = 'Tintri VMstore'
elif tintri.is_tgc():
product = 'Tintri Global Center'
appliance['product'] = product
appliance['model'] = info.get('modelName')
return appliance
def get_vm(tintri, server, save_to_server=True):
"""
Get Tintri Virtual Machine object by VM name
Args:
tintri (obj): Tintri object with session_id
server (Server): The CloudBolt Server object
save_to_server: bool. If not in the server save it as tintri_vm_uuid
Returns:
vm
# """
# vm_filter_spec = VirtualMachineFilterSpec()
vm_name = server.get_vm_name()
logging.info('Requesting VM details from Tintri for VM: "{}"'.format(vm_name))
if server.tintri_vm_uuid:
tintri_vm = tintri.get_vm_by_uuid(server.tintri_vm_uuid)
else:
tintri_vm = tintri.get_vm_by_name(vm_name)
vm = {}
vm["name"] = tintri_vm.get("vmware").get("name")
vm["uuid"] = tintri_vm.get("uuid").get("uuid")
vm['maxNormalizedIops'] = tintri_vm.get("qosConfig").get("maxNormalizedIops")
logging.info(f"Found Tintri VM with Name: {vm.get('name')} and UUID: {vm.get('uuid')}")
if save_to_server:
server.tintri_vm_uuid = vm.get("uuid", None)
return vm
def get_vm_stats(tintri, vm_uuid, days):
'''
Get all Tintri Virtual Machine stats for the past X days (from now)
Args:
tintri (obj): Tintri object with session_id
vm_uuid (str): Virtual Machine's UUID
days (int): Total days of stats
Returns:
sorted_stats: [] of sorted stats
'''
# vm_stats_filter_spec = VirtualMachineFilterSpec()
# Specify date range for stats
# The end date in ISO8601 format - 'YYYY-MM-DDThh:mm:ss.ms-/+zz:zz'
until = datetime.datetime.now()
since = until - datetime.timedelta(days=days)
until = until.isoformat()[:-3] + '-00:00'
since = since.isoformat()[:-3] + '-00:00'
results = tintri.get_vm_historic_stats(uuid=vm_uuid, since=since, until=until)
sorted_stats = results.get('items')[0].get('sortedStats')
return sorted_stats
def get_chart_plotline(vm_stats, attr, name, color):
'''
Generate chart dictionary for highcharts graph
Args:
vm_stats (obj): Results of get_vm_stats
attr (str): Attr of stats object for chart
name (str): Name of the chart
color (str): Line color of chart
Returns:
chart: {} Chart data for highcharts graph
'''
data = []
for stat in vm_stats:
a = stat.get(attr)
date = parser.parse(stat.get('timeEnd'))
timestamp = int(time.mktime(date.utctimetuple())) * 1000
result = [timestamp, a]
data.append(result)
plot = {
'data': data,
'name': name,
'color': color,
'type': "area"
}
return plot
class TintriTabDelegate(TabExtensionDelegate):
def should_display(self):
if self.instance.resource_handler:
return isinstance(self.instance.resource_handler.cast(), VsphereResourceHandler)
return False
def dict_to_vmstat(statdict):
vmstat = VirtualMachineStat()
for k, v in statdict.items():
setattr(vmstat, k, v)
return vmstat
@tab_extension(model=Server,
title='Tintri Metrics',
description='Tintri Metrics Tab',
delegate=TintriTabDelegate)
def server_metrics_tintri(request, obj_id=None):
"""
Tintri Server Tab Extension
Requires:
Install Tintri PySDK
ConnectionInfo object with name 'tintri'
VCenter cluster with Tintri VMStore
"""
server = get_object_or_404(Server, pk=obj_id)
vm_name = server.get_vm_name()
appliance_info = {
'product': 'Tintri VMstore',
'model': 'T5000'
}
if server.tags.filter(name='demdo'):
mydir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(mydir, 'demo.json')) as data_file:
# When using the demo JSON, to get the graphs to appear, the 'time' and 'endTime'
# values need to be updated to be within the last day. TODO: automate this here
vm_stat_dicts = json.load(data_file)
vm_stats = []
for statdict in vm_stat_dicts:
vm_stats.append(dict_to_vmstat(statdict))
maxNormalizedIops = 1000
else:
no_vm_message = None
# get real stats from Tintri
vm = None
error = None
try:
tintri = get_session(server)
appliance_info = get_appliance_info(tintri)
vm = get_vm(tintri, server, save_to_server=True)
except CloudBoltException as e:
error = str(e)
if vm:
vm_stats = get_vm_stats(tintri, server.tintri_vm_uuid, days=1)
sorted_stats = vm_stats[-1]
latency = [
get_chart_plotline(vm_stats,
attr='latencyNetworkMs',
name='Network',
color=COLOR1),
get_chart_plotline(vm_stats,
attr='latencyHostMs',
name='Host',
color=COLOR2),
get_chart_plotline(vm_stats,
attr='latencyDiskMs',
name='Storage',
color=COLOR3),
]
iops = [
get_chart_plotline(vm_stats,
attr='normalizedTotalIops',
name='Total',
color=COLOR1),
]
throughput = [
get_chart_plotline(vm_stats,
attr='throughputReadMBps',
name='Read',
color=COLOR3),
get_chart_plotline(vm_stats,
attr='throughputWriteMBps',
name='Write',
color=COLOR2),
]
maxNormalizedIops = vm.get('maxNormalizedIops')
else:
status = 'warning'
msg = 'Could not find server \'{}\' in the Tintri Appliance '.format(vm_name)
if error:
status = "danger" # make sure the message is red
msg = f"Error finding server '{vm_name}': {error}"
maxNormalizedIops = 0
no_vm_message = helper_tags.alert(status, msg)
sorted_stats = {
'spaceUsedGiB': 0, 'spaceProvisionedGiB': 0, 'spaceUsedChangeGiB': 0
}
iops = []
throughput = []
latency = []
tintri_data = {
"disk_used": format(sorted_stats.get('spaceUsedGiB'),
'.1f'),
"disk_provisioned": format(sorted_stats.get('spaceProvisionedGiB'),
'.1f'),
"disk_changed": format(sorted_stats.get('spaceUsedChangeGiB'),
'.1f'),
"chart_latency": latency,
"chart_iops": iops,
"max_iops": maxNormalizedIops,
"chart_throughput": throughput,
"max_line_color": "red",
}
return render(
request, 'tintri/templates/server_metrics.html', dict(
appliance_info=appliance_info,
tintri_data=tintri_data,
server=server,
connection_info=get_ci(server),
no_vm_message=no_vm_message
)
)
def epoch_ms_to_html(epoch_ms):
from django.utils.html import format_html
from utilities.templatetags.helper_tags import when
if epoch_ms < 0:
return format_html("<i>Never</i>")
else:
from datetime import datetime
return when(datetime.fromtimestamp(epoch_ms/1000))
@tab_extension(model=Server,
title='Tintri Snapshots',
description='Tintri Snapshots Tab',
delegate=TintriTabDelegate)
def server_snapshots_tintri(request, obj_id=None):
"""
Tintri Server Tab Extension
Requires:
Install Tintri PySDK
ConnectionInfo object with name 'tintri'
VCenter cluster with Tintri VMStore
"""
server = get_object_or_404(Server, pk=obj_id)
appliance_info = {
'product': 'Tintri VMstore',
'model': 'T5000'
}
no_vm_message = None
can_manage_snapshots = False
vm = None
error = None
try:
tintri = get_session(server)
appliance_info = get_appliance_info(tintri)
vm = get_vm
except CloudBoltException as e:
error = str(e)
if vm:
profile = request.get_user_profile()
can_manage_snapshots = profile.has_permission("server.manage_snapshots", server)
snapshots = tintri.get_snapshots(f"vmUuid={server.tintri_vm_uuid}")
for snap in snapshots:
snap["created_as_html"] = epoch_ms_to_html(snap["createTime"])
snap["expire_as_html"] = epoch_ms_to_html(snap["expirationTime"])
else:
status = 'warning'
msg = 'Could not find server \'{}\' in the Tintri Appliance '.format(vm_name)
if error:
status = "danger" # make sure the message is red
msg = f"Error finding server '{vm_name}': {error}"
no_vm_message = helper_tags.alert(status, msg)
return render(
request, 'tintri/templates/server_snapshots.html', dict(
server=server,
snapshots=snapshots,
can_manage_snapshots=can_manage_snapshots,
no_vm_message=no_vm_message
)
)
@dialog_view
@cbadmin_required
def edit_tintri_endpoint(request, endpoint_id=None):
"""
Create and edit dialog for a Tintri Appliance endpoint.
If `endpoint_id` is None, creates a new one
"""
endpoint = Tintri().get_connection_info()
action_url = reverse('create_tintri_endpoint')
if endpoint or endpoint_id:
action_url = reverse('edit_tintri_endpoint', args=[endpoint.id])
if request.method == 'POST':
form = TintriEndpointForm(request.POST, instance=endpoint)
if form.is_valid():
form.save()
msg = "The Tintri Appliance Endpoint settings have been saved."
messages.success(request, msg)
profile = request.get_user_profile()
logger.info("Endpoint set to {} by {}.".format(endpoint, profile.user.username))
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
form = TintriEndpointForm(instance=endpoint)
return {
'title': "Modify Tintri Appliance's Endpoint Settings",
'form': form,
'use_ajax': True,
'action_url': action_url,
'top_content': "Tintri Appliance Endpoint, Used to support advanced server metrics and actions",
'submit': 'Save',
}
@dialog_view
@cbadmin_required
def delete_tintri_endpoint(request):
endpoint = Tintri().get_connection_info()
if request.method == 'POST':
endpoint.delete()
msg = "The Tintri Appliance's Endpoint has been deleted."
messages.success(request, msg)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
return {
'title': "Remove Tintri Appliance's Endpoint?",
'content': 'Are you sure you want to delete Tintri Appliance\'s endpoint \'{}\'?'.format(endpoint),
'use_ajax': True,
'action_url': reverse('delete_tintri_endpoint'),
'submit': 'Remove'
}
@dialog_view
@cbadmin_required
def verify_tintri_endpoint(request):
tintri = Tintri()
endpoint = tintri.get_connection_info()
if not endpoint:
messages.warn(
request, "No Tintri Connection Endpoint found! Nothing to verify")
return HttpResponseRedirect(request.META['HTTP_REFERER'])
try:
tintri.verify_connection()
except Exception as err:
status = 'danger'
msg = format_html('Could not make a connection to the Tintri Appliance '
'<b>"{}"</b>:<br>{}', endpoint, str(err))
else:
status = 'success'
msg = format_html('Successfully connected to the Tintri Appliance at '
'<b>"{}"</b>.', endpoint)
# The alert helper tag will escape the state and msg if they aren't properly
# marked safe (after any necessary escaping with format_html) here
content = helper_tags.alert(status, msg)
return {
'title': "Verify connection to Tintri Appliance's Connection Endpoint",
'content': content,
'submit': None,
'cancel': "OK",
}
@admin_extension(title='Tintri Integration', description='Set-up appliance connection, manage hypervisors, etc')
def admin_page(request):
tintri = Tintri()
tintri_context = {
'tabs': TabGroup(
template_dir='tintri/templates',
context={
"endpoint": tintri.get_connection_info(),
"clone_from_snapshot": tintri.get_or_create_clone_from_snapshot_server_action(),
"take_snapshot": tintri.get_or_create_take_snapshot_server_action(),
"delete_snapshot": tintri.get_or_create_delete_snapshot_server_action()
},
request=request,
tabs=[
# First tab uses template 'groups/tabs/tab-main.html'
#(_("Configuration"), 'configuration', {}),
# Tab 2 is conditionally-shown in this slot and
# uses template 'groups/tabs/tab-related-items.html'
(_("Overview"), 'overview', {}),
],
)
}
return render(request, 'tintri/templates/admin_page.html', tintri_context)
@dialog_view
def create_tintri_snapshot(request, server_id):
profile = request.get_user_profile()
server = get_object_or_404(Server, pk=server_id)
if request.method == 'POST':
form = TintriSnapshotForm(request.POST, server=server)
action = Tintri().get_or_create_take_snapshot_server_action()
if form.is_valid():
context = form.save()
action_response = action.run_hook_as_job(
owner=profile, servers=[server], context=context
)
action_kwargs = {
"action": action,
"server": server,
"profile": profile,
"request": request,
}
_format_action_html_response(
action_response=action_response, **action_kwargs
)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
form = TintriSnapshotForm(server=server)
return {
'title': "Create Tintri Snapshot",
'form': form,
'use_ajax': True,
'action_url': reverse('create_tintri_snapshot', args=[server_id]),
'submit': 'Take Snapshot',
}
@dialog_view
def delete_tintri_snapshot(request, server_id, snapshot_uuid):
profile = request.get_user_profile()
server = get_object_or_404(Server, pk=server_id)
if request.method == 'POST':
action = Tintri().get_or_create_delete_snapshot_server_action()
context = {'snapshot_uuid': snapshot_uuid}
action_response = action.run_hook_as_job(
owner=profile, servers=[server], context=context
)
action_kwargs = {
"action": action,
"server": server,
"profile": profile,
"request": request,
}
_format_action_html_response(
action_response=action_response, **action_kwargs
)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
return {
'title': "Delete Tintri Snapshot",
"theme": "danger",
"content": (
f"Are you sure you want to delete snapshot with uuid '{snapshot_uuid}' from Tintri?"
" This action cannot be undone!"
),
'use_ajax': True,
'action_url': reverse('delete_tintri_snapshot', args=[server_id, snapshot_uuid]),
'submit': 'Delete',
}
@dialog_view
def clone_from_tintri_snapshot(request, server_id, snapshot_uuid):
profile = request.get_user_profile()
server = get_object_or_404(Server, pk=server_id)
if request.method == 'POST':
form = TintriCloneSnapshotForm(request.POST, server=server)
action = Tintri().get_or_create_clone_from_snapshot_server_action()
if form.is_valid():
context = form.save()
context["snapshot_uuid"] = snapshot_uuid
action_response = action.run_hook_as_job(
owner=profile, servers=[server], context=context
)
action_kwargs = {
"action": action,
"server": server,
"profile": profile,
"request": request,
}
_format_action_html_response(
action_response=action_response, **action_kwargs
)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
form = TintriCloneSnapshotForm(server=server)
return {
'title': "Clone VM from Tintri Snapshot",
'content': f"Cloning from snapshot with UUID {snapshot_uuid}",
'form': form,
'use_ajax': True,
'action_url': reverse('clone_from_tintri_snapshot', args=[server_id, snapshot_uuid]),
'submit': 'Clone',
} | {
"content_hash": "40a732e324b570740db2c79e7a52a3f4",
"timestamp": "",
"source": "github",
"line_count": 613,
"max_line_length": 112,
"avg_line_length": 32.52691680261012,
"alnum_prop": 0.5907517929685541,
"repo_name": "CloudBoltSoftware/cloudbolt-forge",
"id": "8b65ea8e0cbbb6a8f08488fbfb68e2e54da5804f",
"size": "19939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui_extensions/tintri/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1665"
},
{
"name": "HTML",
"bytes": "165828"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "PowerShell",
"bytes": "5779"
},
{
"name": "Python",
"bytes": "1742154"
},
{
"name": "Shell",
"bytes": "16836"
}
],
"symlink_target": ""
} |
__author__ = "George Chantzialexiou"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" A Simple Mandelbrot Fractal Generator.
We use this example to explore the distributed capabilites of
the Pilot Job and Filesystem APIs. The mandelbrot module
calculates partial mandelbrot set fractal and writes it to a
PNG image file. To determine which part of the fracat we create
we use the iterations variable.
It requires the Python Image Library (PIL) which can be easily
installed with 'easy_install PIL'.
This programm is required by the mandelbrot_pilot.py programm
which creates the mandelbrot fractal using the capabilites of
Pilot Job API.
on the command line::
python mandelbrot.py imgX imgY xBeg xEnd yBeg yEnd cores iterations
The parameters are as follows:
imgX, imgY: the dimensions of the mandelbrot image, e.g. 1024, 1024
xBeg, xEnd: the x-axis portion of the (sub-)image to calculate
yBeg, yEnd: the y-axis portion of the (sub-)image to calculate
cores : the number of the system's cores
iterations: the part of the image we create. Can vary from 1 to cores.
"""
__author__ = "Ole Christian Weidner"
__copyright__ = "Copyright 2012, Ole Christian Weidner"
__license__ = "MIT"
import sys
from PIL import Image
################################################################################
##
def makemandel(mandelx, mandely, xbeg, xend, ybeg, yend, cores, iterations):
# drawing area (xa < xb and ya < yb)
xa = -2.0
xb = 1.0
ya = -1.5
yb = 1.5
# maximum iterations
maxIt = 128
# the output image
image = Image.new("RGB", (xend-xbeg, yend-ybeg))
ybeg2 = int(((yend*(iterations-1))/cores))
yend2 = int(((yend*(iterations))/cores))
for y in range(ybeg2, yend2):
cy = y * (yb - ya) / (mandely - 1) + ya
for x in range(xbeg, xend):
cx = x * (xb - xa) / (mandelx - 1) + xa
c = complex(cx, cy)
z = 0
for i in range(maxIt):
if abs(z) > 2.0: break
z = z * z + c
r = i % 4 * 16
g = i % 6 * 16
b = i % 16 * 16
image.putpixel((x-xbeg, y-ybeg), b * 65536 + g * 256 + r)
image.save('mandel_%d.gif' % iterations , "GIF")
return image
################################################################################
##
if __name__ == "__main__":
args = sys.argv[1:]
imgX = int(sys.argv[1])
imgY = int(sys.argv[2])
xBeg = int(sys.argv[3])
xEnd = int(sys.argv[4])
yBeg = int(sys.argv[5])
yEnd = int(sys.argv[6])
cores = int(sys.argv[7])
iterations = int(sys.argv[8])
makemandel(imgX, imgY, xBeg, xEnd, yBeg, yEnd, cores, iterations)
sys.exit(0)
| {
"content_hash": "ca3adc55b002a21c817aa0b8a5547145",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 80,
"avg_line_length": 29.1010101010101,
"alnum_prop": 0.556056924678931,
"repo_name": "georgeha/mandelbrot",
"id": "d4fd957b331e2e23bcba4a1acb7f1785c4fc49f5",
"size": "2882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mandelbrot_core/mandel_lines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37917"
},
{
"name": "Shell",
"bytes": "6732"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from base import Base
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
email_address = Column(String, nullable=False)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship("User", back_populates="addresses")
def __repr__(self):
return "<Address(name='%s'>" % (
self.email_address
) | {
"content_hash": "1167413bbf0b099d62d034c33a7a2db9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 59,
"avg_line_length": 30.625,
"alnum_prop": 0.6632653061224489,
"repo_name": "b-ritter/python-notes",
"id": "fb6fac1dec130b12a43f46b7edb85a1edb355c05",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database_abstraction/address.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15367"
},
{
"name": "Shell",
"bytes": "289"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Comment'
db.create_table('comments_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('link', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['links.Link'])),
('body', self.gf('django.db.models.fields.TextField')()),
('as_html', self.gf('django.db.models.fields.TextField')(blank=True)),
('posted_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='posted_by', to=orm['auth.User'])),
('posted_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('comments', ['Comment'])
def backwards(self, orm):
# Deleting model 'Comment'
db.delete_table('comments_comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 1, 10, 17, 15, 913076)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 1, 10, 17, 15, 912950)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'channels.channel': {
'Meta': {'object_name': 'Channel'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_official': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'as_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.Link']"}),
'posted_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posted_by'", 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'links.language': {
'Meta': {'object_name': 'Language'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'links.link': {
'Meta': {'object_name': 'Link'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['channels.Channel']"}),
'comment_score': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_banned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sponsored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.Language']"}),
'player': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'posted_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'rating': ('django.db.models.fields.PositiveIntegerField', [], {}),
'shown': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '144'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'vote_score': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['comments']
| {
"content_hash": "5b090532c31c691ba17f396e83a21565",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 182,
"avg_line_length": 69.54545454545455,
"alnum_prop": 0.549281045751634,
"repo_name": "linkfloyd/linkfloyd",
"id": "c43a4d8b486ec725d599e45e5af0a80d7925fa9a",
"size": "7668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linkfloyd/comments/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27118"
},
{
"name": "JavaScript",
"bytes": "60925"
},
{
"name": "Python",
"bytes": "234160"
}
],
"symlink_target": ""
} |
from typing import Optional, TypedDict
from backend.common.sitevars.sitevar import Sitevar
class ContentType(TypedDict):
api_key: str
class GoogleApiSecret(Sitevar[ContentType]):
@staticmethod
def key() -> str:
return "google.secrets"
@staticmethod
def description() -> str:
return "For Google API Calls"
@staticmethod
def default_value() -> ContentType:
return ContentType(api_key="")
@classmethod
def secret_key(cls) -> Optional[str]:
secret_key = cls.get().get("api_key")
return secret_key if secret_key else None # Drop empty strings
| {
"content_hash": "843b454da2bd0c92cb0ab84849fddc03",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 23.96153846153846,
"alnum_prop": 0.6645264847512039,
"repo_name": "the-blue-alliance/the-blue-alliance",
"id": "2074644cf2b42f92e0a8337a41c6b7108eb8fb96",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "src/backend/common/sitevars/google_api_secret.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "359032"
},
{
"name": "Dockerfile",
"bytes": "2503"
},
{
"name": "HTML",
"bytes": "5877313"
},
{
"name": "JavaScript",
"bytes": "755910"
},
{
"name": "Less",
"bytes": "244218"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "4321885"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "27698"
}
],
"symlink_target": ""
} |
FIELD_TYPES = {
# u'java.lang.String': 'string',
# u'java.util.Date': 'date',
# u'com.atlassian.jira.project.version.Version': 'version[*]',
# u'com.atlassian.jira.issue.issuetype.IssueType': 'enum[1]',
# u'com.atlassian.jira.issue.priority.Priority': 'enum[1]',
# u'com.atlassian.jira.issue.status.Status': 'state[1]',
# u'com.opensymphony.user.User': 'user[1]',
# u'com.atlassian.jira.bc.project.component.ProjectComponent': 'ownedField[*]',
# u'com.atlassian.jira.plugin.system.customfieldtypes:importid': 'string',
# u'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons': 'enum[1]',
# u'com.atlassian.jira.toolkit:multikeyfield': 'enum[*]',
# u'com.atlassian.jira.toolkit:participants': 'user[*]',
# u'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes': 'enum[*]',
# u'com.atlassian.jira.plugin.system.customfieldtypes:textfield': 'string'
u'aggregatetimespent' : u'integer',
u'aggregatetimeestimate' : u'integer',
u'Fix versions' : u'version[*]',
u'priority' : u'enum[1]',
u'timespent' : u'integer',
u'State' : u'state[1]',
u'Affected versions' : u'version[*]',
u'Type' : u'enum[*]',
u'customfield_10550' : u'string',
u'assignee' : u'user[1]',
u'customfield_10250' : u'string',
u'timeestimate' : u'integer',
u'components' : u'ownedField[*]',
u'resolution' : u'state[1]',
u'timeoriginalestimate' : u'integer',
u'aggregatetimeoriginalestimate' : u'integer',
u'customfield_10153' : u'date',
u'customfield_10051' : u'date',
u'customfield_10050' : u'date',
u'customfield_10156' : u'string',
u'customfield_10154' : u'string',
u'Estimation' : u'period'
}
FIELD_NAMES = {
u'reporter': 'reporterName',
u'fixVersions': 'Fix versions',
u'versions': 'Affected versions',
u'status' : 'State',
u'issuetype' : 'Type',
u'resolutiondate' : 'resolved',
u'timeoriginalestimate': 'Estimation'
}
EXISTING_FIELDS = ['numberInProject', 'projectShortName', 'summary', 'description', 'created',
'updated', 'updaterName', 'resolved', 'reporterName']
class JiraException(Exception):
def __init__(self, *args, **kwargs):
super(JiraException, self).__init__(*args, **kwargs)
| {
"content_hash": "45bc06689197a26b9023f560ac9e7907",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 94,
"avg_line_length": 44.57142857142857,
"alnum_prop": 0.5885416666666666,
"repo_name": "softintouch/python-youtrack-api",
"id": "86b0090dbd4bdb9912395f5bb77679a216bedc14",
"size": "2496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sit_youtrack/jira/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "766130"
}
],
"symlink_target": ""
} |
import os
path = os.path.dirname(__file__)
| {
"content_hash": "9174f76718a867cbef09c1bcf5273993",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 14.666666666666666,
"alnum_prop": 0.6590909090909091,
"repo_name": "westernx/sgfs",
"id": "5d73876c7051fef48e61e90fd889bf37a00a47b2",
"size": "44",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sgfs/nuke/setup/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "236515"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pylab as plt
from MulLayer import MulLayer
class AddLayer:
def __init__ (self):
pass
def forward(self, x, y):
out = x + y
return out
def backward(self, dout):
dx = dout * 1
dy = dout * 1
return dx,dy
apple = 100
apple_num = 2
orange = 150
orange_num = 3
tax = 1.1
mul_apple_layer = MulLayer()
mul_orange_layer = MulLayer()
add_apple_orange_layer = AddLayer()
mul_tax_layer = MulLayer()
apple_price = mul_apple_layer.forward( apple, apple_num)
orange_price = mul_orange_layer.forward( orange, orange_num)
all_price = add_apple_orange_layer.forward( apple_price, orange_price)
price = mul_tax_layer.forward( all_price, tax)
dprice = 1
dall_price, dtax = mul_tax_layer.backward( dprice)
dapple_price, dorange_price = add_apple_orange_layer.backward(dall_price)
dorange, dorange_num = mul_orange_layer.backward(dorange_price)
dapple, dapple_num = mul_apple_layer.backward( dapple_price)
print("price=", price)
print(dapple_num, dapple, dorange, dorange_num, dtax)
| {
"content_hash": "c33b39bd02cfc50d1f790e4ca3327128",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 24.23404255319149,
"alnum_prop": 0.6453028972783144,
"repo_name": "leejw51/BumblebeeNet",
"id": "62d42a9436f1d10dfd3bd547d3921a8c811e5679",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Test/BackwardPropagation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "39413"
},
{
"name": "Go",
"bytes": "19053"
},
{
"name": "Jupyter Notebook",
"bytes": "604049"
},
{
"name": "Protocol Buffer",
"bytes": "615"
},
{
"name": "Python",
"bytes": "72792"
},
{
"name": "Swift",
"bytes": "115174"
}
],
"symlink_target": ""
} |
import ddt
import mock
from rally import exceptions
from rally.plugins.openstack.scenarios.vm import vmtasks
from tests.unit import test
BASE = "rally.plugins.openstack.scenarios.vm.vmtasks"
@ddt.ddt
class VMTasksTestCase(test.ScenarioTestCase):
def setUp(self):
super(VMTasksTestCase, self).setUp()
self.context.update({"user": {"keypair": {"name": "keypair_name"},
"credential": mock.MagicMock()}})
def create_env(self, scenario):
self.ip = {"id": "foo_id", "ip": "foo_ip", "is_floating": True}
scenario._boot_server_with_fip = mock.Mock(
return_value=("foo_server", self.ip))
scenario._wait_for_ping = mock.Mock()
scenario._delete_server_with_fip = mock.Mock()
scenario._create_volume = mock.Mock(
return_value=mock.Mock(id="foo_volume"))
scenario._run_command = mock.MagicMock(
return_value=(0, "{\"foo\": 42}", "foo_err"))
scenario.add_output = mock.Mock()
return scenario
def test_boot_runcommand_delete(self):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
scenario._run_command = mock.MagicMock(
return_value=(0, "{\"foo\": 42}", "foo_err"))
scenario.run("foo_image", "foo_flavor",
command={"script_file": "foo_script",
"interpreter": "foo_interpreter"},
username="foo_username",
password="foo_password",
use_floating_ip="use_fip",
floating_network="ext_network",
force_delete="foo_force",
volume_args={"size": 16},
foo_arg="foo_value")
scenario._create_volume.assert_called_once_with(16, imageRef=None)
scenario._boot_server_with_fip.assert_called_once_with(
"foo_image", "foo_flavor", key_name="keypair_name",
use_floating_ip="use_fip", floating_network="ext_network",
block_device_mapping={"vdrally": "foo_volume:::1"},
foo_arg="foo_value")
scenario._wait_for_ping.assert_called_once_with("foo_ip")
scenario._run_command.assert_called_once_with(
"foo_ip", 22, "foo_username", "foo_password",
command={"script_file": "foo_script",
"interpreter": "foo_interpreter"})
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete="foo_force")
scenario.add_output.assert_called_once_with(
additive={"title": "Command output", "chart_plugin": "Lines",
"data": [["foo", 42.0]]})
@ddt.data(
{"output": (0, "", ""), "raises": exceptions.ScriptError},
{"output": (0, "{\"foo\": 42}", ""),
"expected": [{"additive": {"chart_plugin": "Lines",
"data": [["foo", 42.0]],
"title": "Command output"}}]},
{"output": (1, "{\"foo\": 42}", ""), "raises": exceptions.ScriptError},
{"output": ("", 1, ""), "raises": TypeError},
{"output": (0, "{\"additive\": [1, 2]}", ""),
"expected": [{"additive": 1}, {"additive": 2}]},
{"output": (0, "{\"complete\": [3, 4]}", ""),
"expected": [{"complete": 3}, {"complete": 4}]},
{"output": (0, "{\"additive\": [1, 2], \"complete\": [3, 4]}", ""),
"expected": [{"additive": 1}, {"additive": 2},
{"complete": 3}, {"complete": 4}]}
)
@ddt.unpack
def test_boot_runcommand_delete_add_output(self, output,
expected=None, raises=None):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
scenario._run_command.return_value = output
kwargs = {"command": {"remote_path": "foo"},
"username": "foo_username",
"password": "foo_password",
"use_floating_ip": "use_fip",
"floating_network": "ext_network",
"force_delete": "foo_force",
"volume_args": {"size": 16},
"foo_arg": "foo_value"}
if raises:
self.assertRaises(raises, scenario.run,
"foo_image", "foo_flavor", **kwargs)
self.assertFalse(scenario.add_output.called)
else:
scenario.run("foo_image", "foo_flavor", **kwargs)
calls = [mock.call(**kw) for kw in expected]
scenario.add_output.assert_has_calls(calls, any_order=True)
scenario._create_volume.assert_called_once_with(16, imageRef=None)
scenario._boot_server_with_fip.assert_called_once_with(
"foo_image", "foo_flavor", key_name="keypair_name",
use_floating_ip="use_fip", floating_network="ext_network",
block_device_mapping={"vdrally": "foo_volume:::1"},
foo_arg="foo_value")
scenario._run_command.assert_called_once_with(
"foo_ip", 22, "foo_username", "foo_password",
command={"remote_path": "foo"})
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete="foo_force")
def test_boot_runcommand_delete_command_timeouts(self):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
scenario._run_command.side_effect = exceptions.SSHTimeout()
self.assertRaises(exceptions.SSHTimeout,
scenario.run,
"foo_image", "foo_flavor", "foo_interpreter",
"foo_script", "foo_username")
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(scenario.add_output.called)
def test_boot_runcommand_delete_ping_wait_timeouts(self):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
scenario._wait_for_ping.side_effect = exceptions.TimeoutException(
resource_type="foo_resource",
resource_name="foo_name",
resource_id="foo_id",
desired_status="foo_desired_status",
resource_status="foo_resource_status")
exc = self.assertRaises(exceptions.TimeoutException,
scenario.run,
"foo_image", "foo_flavor", "foo_interpreter",
"foo_script", "foo_username",
wait_for_ping=True)
self.assertEqual(exc.kwargs["resource_type"], "foo_resource")
self.assertEqual(exc.kwargs["resource_name"], "foo_name")
self.assertEqual(exc.kwargs["resource_id"], "foo_id")
self.assertEqual(exc.kwargs["desired_status"], "foo_desired_status")
self.assertEqual(exc.kwargs["resource_status"], "foo_resource_status")
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(scenario.add_output.called)
@mock.patch("%s.json" % BASE)
def test_boot_runcommand_delete_json_fails(self, mock_json):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
mock_json.loads.side_effect = ValueError()
self.assertRaises(exceptions.ScriptError,
scenario.run,
"foo_image", "foo_flavor", "foo_interpreter",
"foo_script", "foo_username")
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(scenario.add_output.called)
@mock.patch("%s.BootRuncommandDelete.run" % BASE)
def test_boot_runcommand_delete_custom_image(self, mock_scenario):
context = {
"user": {
"tenant_id": "tenant_id",
"credential": mock.Mock()
},
"tenant": {
"custom_image": {"id": "image_id"}
}
}
scenario = vmtasks.BootRuncommandDeleteCustomImage(context)
scenario.run(flavor="flavor_id",
command={
"script_file": "foo_script",
"interpreter": "bar_interpreter"},
username="username")
mock_scenario.assert_called_once_with(
image="image_id", flavor="flavor_id", username="username",
command={
"script_file": "foo_script",
"interpreter": "bar_interpreter"}
)
@mock.patch("%s.heat" % BASE)
@mock.patch("%s.sshutils" % BASE)
def test_runcommand_heat(self, mock_sshutils, mock_heat):
fake_ssh = mock.Mock()
fake_ssh.execute.return_value = [0, "key:val", ""]
mock_sshutils.SSH.return_value = fake_ssh
fake_stack = mock.Mock()
fake_stack.stack.outputs = [{"output_key": "gate_node",
"output_value": "ok"}]
mock_heat.main.Stack.return_value = fake_stack
context = {
"user": {"keypair": {"name": "name", "private": "pk"},
"credential": "ok"},
"tenant": {"networks": [{"router_id": "1"}]}
}
scenario = vmtasks.RuncommandHeat(context)
scenario.generate_random_name = mock.Mock(return_value="name")
scenario.add_output = mock.Mock()
workload = {"username": "admin",
"resource": ["foo", "bar"]}
scenario.run(workload, "template",
{"file_key": "file_value"},
{"param_key": "param_value"})
expected = {"chart_plugin": "Table",
"data": {"rows": [["key", "val"]],
"cols": ["key", "value"]},
"description": "Data generated by workload",
"title": "Workload summary"}
scenario.add_output.assert_called_once_with(complete=expected)
| {
"content_hash": "54e0530a1da0ea18c3de6a77862945db",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 46.09502262443439,
"alnum_prop": 0.5354864042406989,
"repo_name": "vganapath/rally",
"id": "c0cc01b2bc39b97782303a462aa197efa473b56d",
"size": "10816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "52008"
},
{
"name": "JavaScript",
"bytes": "8550"
},
{
"name": "Mako",
"bytes": "18645"
},
{
"name": "Python",
"bytes": "3621510"
},
{
"name": "Shell",
"bytes": "43808"
}
],
"symlink_target": ""
} |
print "Hello"
print "World"
print '' | {
"content_hash": "db2a8ff6b8b6aba4302b343345cdcf2f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 13,
"avg_line_length": 9.25,
"alnum_prop": 0.6756756756756757,
"repo_name": "janusnic/dj-21v",
"id": "7afc8aba1d6dd6e7d616eaee49a971f1e5fdacc1",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit_01/benchmarks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "607197"
},
{
"name": "HTML",
"bytes": "352620"
},
{
"name": "JavaScript",
"bytes": "4098502"
},
{
"name": "Python",
"bytes": "1906453"
}
],
"symlink_target": ""
} |
from pandaloginvestigator.core.workers import worker_translator
from pandaloginvestigator.core.utils import utils
from multiprocessing import Pool
import logging
import time
logger = logging.getLogger(__name__)
def translate_logs(dir_pandalogs_path, dir_unpacked_path, syscall_dict, dir_translated_path, core_num, max_num=None,
file_list=None):
"""
Convert system call numbers into explicit system call names.
Iterate through all the log files in the folder specified in the configuration. Generate equal lists of files to
pass to worker_translator workers. The number of logs to translate is passed as argument, translate all logs file if
max_num = None. Logs time spent in the process.
:param dir_pandalogs_path:
:param dir_unpacked_path:
:param syscall_dict:
:param dir_translated_path:
:param core_num:
:param max_num:
:param file_list:
:return:
"""
logger.info('Starting translating operation with max_num = ' + str(max_num))
t1 = time.time()
filenames, max_num = utils.input_with_modifiers(dir_unpacked_path, dir_pandalogs_path, file_list=file_list,
max_num=max_num)
file_names_sublists = utils.divide_workload(filenames, core_num, max_num)
formatted_input = utils.format_worker_input(
core_num,
file_names_sublists,
(
dir_unpacked_path,
dir_translated_path,
syscall_dict
)
)
pool = Pool(processes=core_num)
pool.map(worker_translator.work, formatted_input)
pool.close()
pool.join()
t2 = time.time()
logger.info('Total translating time: ' + str(t2 - t1))
| {
"content_hash": "76ab09c1978d3cf5250a8c4a99ef4420",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 120,
"avg_line_length": 37.130434782608695,
"alnum_prop": 0.6627634660421545,
"repo_name": "ClonedOne/pandalog_investigator",
"id": "9e6589e5467dabe536d0b280ffca04f154ac769e",
"size": "1708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandaloginvestigator/core/analysis/log_translator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87851"
}
],
"symlink_target": ""
} |
import unittest
from telemetry.timeline import chrome_trace_category_filter
class ChromeTraceCategoryFilterTest(unittest.TestCase):
def CheckBasicCategoryFilters(self, cf):
self.assertEquals(set(['x']), set(cf.included_categories))
self.assertEquals(set(['y']), set(cf.excluded_categories))
self.assertEquals(
set(['disabled-by-default-z']), set(cf.disabled_by_default_categories))
self.assertEquals(set(['DELAY(7;foo)']), set(cf.synthetic_delays))
self.assertTrue('x' in cf.filter_string)
self.assertEquals(
'x,disabled-by-default-z,-y,DELAY(7;foo)',
cf.stable_filter_string)
def testBasic(self):
cf = chrome_trace_category_filter.ChromeTraceCategoryFilter(
'x,-y,disabled-by-default-z,DELAY(7;foo)')
self.CheckBasicCategoryFilters(cf)
def testBasicWithSpace(self):
cf = chrome_trace_category_filter.ChromeTraceCategoryFilter(
' x ,\n-y\t,disabled-by-default-z ,DELAY(7;foo)')
self.CheckBasicCategoryFilters(cf)
def testNoneAndEmptyCategory(self):
a = chrome_trace_category_filter.ChromeTraceCategoryFilter()
self.assertEquals(a.stable_filter_string, '')
self.assertEquals(a.filter_string, '')
self.assertEquals(str(a.GetDictForChromeTracing()), '{}')
# Initializing chrome trace category filter with empty string is the same
# as initialization with None.
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(filter_string='')
self.assertEquals(b.stable_filter_string, '')
self.assertEquals(b.filter_string, '')
self.assertEquals(str(b.GetDictForChromeTracing()), '{}')
def testAddIncludedCategory(self):
a = chrome_trace_category_filter.ChromeTraceCategoryFilter()
a.AddIncludedCategory('foo')
a.AddIncludedCategory('bar')
a.AddIncludedCategory('foo')
self.assertEquals(a.stable_filter_string, 'bar,foo')
def testAddExcludedCategory(self):
a = chrome_trace_category_filter.ChromeTraceCategoryFilter()
a.AddExcludedCategory('foo')
a.AddExcludedCategory('bar')
a.AddExcludedCategory('foo')
self.assertEquals(a.stable_filter_string, '-bar,-foo')
def testIncludeAndExcludeCategoryRaisesAssertion(self):
a = chrome_trace_category_filter.ChromeTraceCategoryFilter()
a.AddIncludedCategory('foo')
self.assertRaises(AssertionError, a.AddExcludedCategory, 'foo')
a = chrome_trace_category_filter.ChromeTraceCategoryFilter()
a.AddExcludedCategory('foo')
self.assertRaises(AssertionError, a.AddIncludedCategory, 'foo')
self.assertRaises(AssertionError,
chrome_trace_category_filter.ChromeTraceCategoryFilter,
'foo,-foo')
self.assertRaises(AssertionError,
chrome_trace_category_filter.ChromeTraceCategoryFilter,
'-foo,foo')
def testIsSubset(self):
b = chrome_trace_category_filter.ChromeTraceCategoryFilter()
a = chrome_trace_category_filter.ChromeTraceCategoryFilter()
self.assertEquals(a.IsSubset(b), True)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter()
a = chrome_trace_category_filter.ChromeTraceCategoryFilter("test1,test2")
self.assertEquals(a.IsSubset(b), True)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter()
a = chrome_trace_category_filter.ChromeTraceCategoryFilter("-test1,-test2")
self.assertEquals(a.IsSubset(b), True)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter("test1,test2")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter()
self.assertEquals(a.IsSubset(b), None)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter()
a = chrome_trace_category_filter.ChromeTraceCategoryFilter("test*")
self.assertEquals(a.IsSubset(b), None)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter("test?")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter()
self.assertEquals(a.IsSubset(b), None)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter("test1")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter("test1,test2")
self.assertEquals(a.IsSubset(b), False)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter("-test1")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter("test1")
self.assertEquals(a.IsSubset(b), False)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter("test1,test2")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter("test2,test1")
self.assertEquals(a.IsSubset(b), True)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter("-test1,-test2")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter("-test2")
self.assertEquals(a.IsSubset(b), False)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"disabled-by-default-test1")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"disabled-by-default-test1,disabled-by-default-test2")
self.assertEquals(a.IsSubset(b), False)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"disabled-by-default-test1")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"disabled-by-default-test2")
self.assertEquals(a.IsSubset(b), False)
def testIsSubsetWithSyntheticDelays(self):
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016)")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016)")
self.assertEquals(a.IsSubset(b), True)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016)")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter()
self.assertEquals(a.IsSubset(b), True)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter()
a = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016)")
self.assertEquals(a.IsSubset(b), False)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016)")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.032)")
self.assertEquals(a.IsSubset(b), False)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016;static)")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016;oneshot)")
self.assertEquals(a.IsSubset(b), False)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016),DELAY(bar;0.1)")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(bar;0.1),DELAY(foo;0.016)")
self.assertEquals(a.IsSubset(b), True)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016),DELAY(bar;0.1)")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(bar;0.1)")
self.assertEquals(a.IsSubset(b), True)
b = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.016),DELAY(bar;0.1)")
a = chrome_trace_category_filter.ChromeTraceCategoryFilter(
"DELAY(foo;0.032),DELAY(bar;0.1)")
self.assertEquals(a.IsSubset(b), False)
| {
"content_hash": "a8d94fb877bc4af177aa5b55c4d2f6fd",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 80,
"avg_line_length": 41.651162790697676,
"alnum_prop": 0.7205471803461754,
"repo_name": "catapult-project/catapult-csm",
"id": "df7e3667d77d5d447d75469a275ffd7444216ab1",
"size": "7327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telemetry/telemetry/timeline/chrome_trace_category_filter_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
from django.conf import settings
from manager import VarnishManager
urlpatterns = patterns('varnishapp.views',
(r'', 'management'),
)
| {
"content_hash": "1674db7d2836c0f34d4fe72fbd247930",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 22.5,
"alnum_prop": 0.7555555555555555,
"repo_name": "vdmann/cse-360-image-hosting-website",
"id": "643d8b2d6957ba67e551b8af395dd902be276e3a",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-varnish-master/varnishapp/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "393656"
},
{
"name": "C++",
"bytes": "8206"
},
{
"name": "CSS",
"bytes": "156932"
},
{
"name": "JavaScript",
"bytes": "460188"
},
{
"name": "Objective-C",
"bytes": "10168"
},
{
"name": "PHP",
"bytes": "6649"
},
{
"name": "Python",
"bytes": "6922419"
},
{
"name": "Shell",
"bytes": "241220"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import TemplateView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'cloudfuzzy.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', TemplateView.as_view(template_name="index.html"), name='index'),
url(r'^teste/$', TemplateView.as_view(template_name="teste.html"), name='index'),
url(r'^systems$', TemplateView.as_view(template_name="systems/base.html"), name='systems'),
(r'^api/', include('api_rest.urls')),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('django.contrib.auth.views',
url(r'^login/$', 'login', {'template_name': 'users/login.html', }, name='login'),
url(r'^logout/$', 'logout', {'template_name': 'users/login.html'}, name='logout'),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| {
"content_hash": "a0360aa0cb97b2dfd148de3e0952ff3c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 95,
"avg_line_length": 32.06060606060606,
"alnum_prop": 0.6512287334593573,
"repo_name": "arruda/cloudfuzzy",
"id": "239e30736fcf71fa45c63b142f847306ea1ea4c4",
"size": "1058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudfuzzy/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "189161"
},
{
"name": "CoffeeScript",
"bytes": "4335"
},
{
"name": "JavaScript",
"bytes": "50500"
},
{
"name": "Python",
"bytes": "166778"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "6348"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.