text stringlengths 4 1.02M | meta dict |
|---|---|
"""
This file is part of WSQL-SDK
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = "@bg"
from unittest import TestCase, mock
from io import StringIO
from warnings import catch_warnings
from wsql_sdk import translator
_TEST_FILES = {
"./main.sql": """
#include "common/func.sql"
select $_var1 from $_var2 where $_var3;
$f1(MAX(*), `test2`);
$f2(*, as23!@#%4);
$f3("a", "b c");
""",
"./common/func.sql": """
#include "vars.sql"
#define f1(a,b) select $a from $b
#define f2(d, e) select $e from $d;\
select $d from $e
#define f3(a, b) CALL p($a, $b)
""",
"./common/vars.sql": """
#define _var1 "var1"
#define _var2 'var2'
#define _var3 `var3`
#define _var4 1
""",
"./recursive.sql": """
#include "recursive.sql"
""",
"./invalid_if.sql": """
#if defined("TEST")
#include "/common/vars.sql"
"""
}
_EXPECTED = """\
select "var1" from 'var2' where `var3`;
select MAX(*) from `test2`;
select as23!@#%4 from *; select * from as23!@#%4;
CALL p("a", "b c");\
"""
def _open_mock(filename):
return StringIO(_TEST_FILES[filename])
def _listdir_mock(dirname):
return [x.rpartition('/')[-1] for x in _TEST_FILES if x.startswith(dirname)]
class TestTranslator(TestCase):
def setUp(self):
self.output = StringIO()
self.trans = translator.Translator(self.output)
def test_define_variable(self):
self.trans.parse(StringIO('#DEFINE a "1" '))
self.assertEqual('"1"', self.trans.variables["a"])
self.trans.parse(StringIO('#DEFINE b `12%&89qa@`'))
self.assertEqual('`12%&89qa@`', self.trans.variables["b"])
self.trans.parse(StringIO('#DEFINE c "1" '))
self.assertEqual('"1"', self.trans.variables["c"])
self.trans.parse(StringIO("#DEFINE d '*'"))
self.assertEqual("'*'", self.trans.variables["d"])
def test_expand_variable(self):
self.trans.variables["var1"] = "`12%&89qa@`"
self.trans.parse(StringIO('select $var1; select $var1;'))
self.output.seek(0)
self.assertEqual("select `12%&89qa@`; select `12%&89qa@`;\n", self.output.read())
def test_define_macros(self):
self.trans.parse(StringIO('#DEFINE f1(a) select * \\ \nfrom $a'))
macros = self.trans.functions['f1']
self.assertEqual(['a'], list(macros.args))
self.assertEqual('select * from $a', macros.body)
self.assertEqual(14, macros.ast[0][1])
self.assertEqual(16, macros.ast[0][2])
def test_undefine_function(self):
self.trans.parse(StringIO('#define f1(a) select * from $a\n$f1(b);\n#undef f1'))
self.output.seek(0)
self.assertEqual("select * from b;\n", self.output.read())
self.assertNotIn('f1', self.trans.functions)
def test_undefine_variable(self):
self.trans.parse(StringIO('#define _v 1\nselect $_v from a;\n#undef _v'))
self.output.seek(0)
self.assertEqual("select 1 from a;\n", self.output.read())
self.assertNotIn('_v', self.trans.variables)
self.trans.reset()
with catch_warnings(record=True) as log:
self.trans.parse(StringIO("#undef v\n"))
self.assertEqual(1, len(log))
self.assertIn("0: macros v is not defined!", str(log[0]))
def test_expand_macros(self):
self.trans.parse(StringIO('#DEFINE f1(a) select * \\ \nfrom $a\n$f1(`table1`);'))
self.output.seek(0)
self.assertEqual("select * from `table1`;\n", self.output.read())
self.trans.reset()
self.assertRaises(ValueError, self.trans.parse, StringIO('#DEFINE f2(a,b) select * \\ \nfrom $a\n$f2(`table1`);'))
def test_expand_function(self):
self.trans.parse(StringIO('#DEFINE v 1\n#DEFINE f(a) select $a\n$f($v);'))
self.output.seek(0)
self.assertEqual("-- CONSTANT v 1\nselect 1;\n", self.output.read())
def test_include(self):
with mock.patch('builtins.open', lambda f, *args, **kwargs: _open_mock(f)):
with mock.patch('os.listdir', _listdir_mock):
self.trans.compile('main.sql')
self.output.seek(0)
self.assertEqual(_EXPECTED, self.output.read().strip())
def test_if(self):
""" test conditions """
self.trans.parse(StringIO("#define var4 1\n"
"#if var4 == 0\n"
"select $var4 from t1;\n"
"#define var5 2\n"
"#include \"./var1.sql\"\n"
"#else\n"
"select $var4 from t2;\n"
"#endif"))
self.output.seek(0)
self.assertEqual("-- CONSTANT var4 1\nselect 1 from t2;\n", self.output.read())
self.trans.reset()
self.output.seek(0)
self.output.truncate(0)
self.trans.parse(StringIO("#if defined('VAR')\n"
"select TRUE\n"
"#else\n"
"select FALSE\n"
"#endif\n"))
self.output.seek(0)
self.assertEqual("select FALSE\n", self.output.read())
with mock.patch('builtins.open', lambda f, *args, **kwargs: _open_mock(f)):
with mock.patch('os.listdir', _listdir_mock):
self.trans.reset()
self.assertRaisesRegex(ValueError, "mismatch if/endif",
self.trans.compile, "invalid_if.sql")
def test_redefine_macros(self):
""" test warning generated if macros was redefined """
with catch_warnings(record=True) as log:
self.trans.parse(StringIO("#define var4 1\n#define var4 2\n"))
self.assertEqual(1, len(log))
self.assertIn("1: macros var4 already defined!", str(log[0]))
self.trans.reset()
with catch_warnings(record=True) as log:
self.trans.parse(StringIO("#define f(a) 1\n#define f(a) 2\n"))
self.assertEqual(1, len(log))
self.assertIn("1: macros f already defined!", str(log[0]))
def test_recursion(self):
""" test recursive includes"""
with mock.patch('builtins.open', lambda f, *args, **kwargs: _open_mock(f)):
with mock.patch('os.listdir', _listdir_mock):
with catch_warnings(record=True) as log:
self.trans.compile("recursive.sql")
self.assertEqual(2, len(log))
self.assertIn("Already included: ./recursive.sql", str(log[0]))
def test_no_inclusion_warning(self):
""" test warning if there is no include files found"""
with catch_warnings(record=True) as log:
with mock.patch('builtins.open', lambda f, *args, **kwargs: _open_mock(f)):
self.trans.compile("recursive.sql")
self.assertEqual(1, len(log))
self.assertIn("Not included: ./recursive.sql", str(log[0]))
def test_arguments_parse(self):
""" test cmdline arguments parsing """
args = translator.parse_arguments(["test.sql", "test_o.sql", "-d", "k:v", "-d", "k1:v1 v2"])
self.assertEqual("test.sql", args.input[0])
self.assertEqual("test_o.sql", args.output)
self.assertEqual(["k:v", "k1:v1 v2"], args.defines)
args = translator.parse_arguments(["test.sql"])
self.assertEqual("test.sql", args.input[0])
self.assertIsNone(args.output)
def test_global_defines(self):
"""test external defines"""
self.trans.variables["DB_NAME"] = "test"
self.trans.parse(StringIO("use `$DB_NAME`;"))
self.output.seek(0)
self.assertEqual("use `test`;\n", self.output.read())
def test_recursive_expand_macros(self):
"""test recursive expand macros"""
self.trans.parse(StringIO(
"""\
#define _G a
#define _K $_G
#define f1(t, g) $t WHERE $g
#define f2(v, t) SELECT $v FROM $f1($t, $_K)
$f2(1, t);"""
))
self.output.seek(0)
self.assertEqual("SELECT 1 FROM t WHERE a;", self.output.read().strip())
def test_define_empty_macros(self):
self.trans.parse(StringIO('#DEFINE f1(a)\n$f1(1)\n'))
macros = self.trans.functions['f1']
self.assertEqual(['a'], list(macros.args))
self.assertEqual(0, len(macros.body))
self.output.seek(0)
self.assertEqual('', self.output.read())
| {
"content_hash": "8bdc0bd3252db65c4156390e35d67f9d",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 122,
"avg_line_length": 38.276595744680854,
"alnum_prop": 0.5797665369649806,
"repo_name": "WebSQL/sdk",
"id": "51683e55ff9cae84ddc8d4a50aecc00a8f5e5f80",
"size": "8995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_translator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92864"
}
],
"symlink_target": ""
} |
"""Support for Open-Meteo weather."""
from __future__ import annotations
from open_meteo import Forecast as OpenMeteoForecast
from homeassistant.components.weather import Forecast, WeatherEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import DOMAIN, WMO_TO_HA_CONDITION_MAP
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Open-Meteo weather entity based on a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities([OpenMeteoWeatherEntity(entry=entry, coordinator=coordinator)])
class OpenMeteoWeatherEntity(CoordinatorEntity, WeatherEntity):
"""Defines an Open-Meteo weather entity."""
_attr_temperature_unit = TEMP_CELSIUS
coordinator: DataUpdateCoordinator[OpenMeteoForecast]
def __init__(
self, *, entry: ConfigEntry, coordinator: DataUpdateCoordinator
) -> None:
"""Initialize Open-Meteo weather entity."""
super().__init__(coordinator=coordinator)
self._attr_unique_id = entry.entry_id
self._attr_name = entry.title
self._attr_device_info = DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, entry.entry_id)},
manufacturer="Open-Meteo",
name=entry.title,
)
@property
def condition(self) -> str | None:
"""Return the current condition."""
if not self.coordinator.data.current_weather:
return None
return WMO_TO_HA_CONDITION_MAP.get(
self.coordinator.data.current_weather.weather_code
)
@property
def temperature(self) -> float | None:
"""Return the platform temperature."""
if not self.coordinator.data.current_weather:
return None
return self.coordinator.data.current_weather.temperature
@property
def wind_speed(self) -> float | None:
"""Return the wind speed."""
if not self.coordinator.data.current_weather:
return None
return self.coordinator.data.current_weather.wind_speed
@property
def wind_bearing(self) -> float | str | None:
"""Return the wind bearing."""
if not self.coordinator.data.current_weather:
return None
return self.coordinator.data.current_weather.wind_direction
@property
def forecast(self) -> list[Forecast] | None:
"""Return the forecast in native units."""
if self.coordinator.data.daily is None:
return None
forecasts: list[Forecast] = []
daily = self.coordinator.data.daily
for index, time in enumerate(self.coordinator.data.daily.time):
forecast = Forecast(
datetime=time.isoformat(),
)
if daily.weathercode is not None:
forecast["condition"] = WMO_TO_HA_CONDITION_MAP.get(
daily.weathercode[index]
)
if daily.precipitation_sum is not None:
forecast["precipitation"] = daily.precipitation_sum[index]
if daily.temperature_2m_max is not None:
forecast["temperature"] = daily.temperature_2m_max[index]
if daily.temperature_2m_min is not None:
forecast["templow"] = daily.temperature_2m_min[index]
if daily.wind_direction_10m_dominant is not None:
forecast["wind_bearing"] = daily.wind_direction_10m_dominant[index]
if daily.wind_speed_10m_max is not None:
forecast["wind_speed"] = daily.wind_speed_10m_max[index]
forecasts.append(forecast)
return forecasts
| {
"content_hash": "cf4bf49d2702b7261982fdc7d823fea0",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 86,
"avg_line_length": 34.932203389830505,
"alnum_prop": 0.655992236778263,
"repo_name": "GenericStudent/home-assistant",
"id": "bb7170bb5da1c65562b43b87ff612fca630dd862",
"size": "4122",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/open_meteo/weather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
"""
Support to interact with a Music Player Daemon.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.mpd/
"""
import logging
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, PLATFORM_SCHEMA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_SET, SUPPORT_PLAY_MEDIA, MEDIA_TYPE_PLAYLIST,
MediaPlayerDevice)
from homeassistant.const import (
STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_PORT, CONF_PASSWORD,
CONF_HOST)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-mpd2==0.5.5']
_LOGGER = logging.getLogger(__name__)
CONF_LOCATION = 'location'
DEFAULT_LOCATION = 'MPD'
DEFAULT_PORT = 6600
SUPPORT_MPD = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | \
SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_PLAY_MEDIA
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_LOCATION, default=DEFAULT_LOCATION): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the MPD platform."""
daemon = config.get(CONF_HOST)
port = config.get(CONF_PORT)
location = config.get(CONF_LOCATION)
password = config.get(CONF_PASSWORD)
import mpd
# pylint: disable=no-member
try:
mpd_client = mpd.MPDClient()
mpd_client.connect(daemon, port)
if password is not None:
mpd_client.password(password)
mpd_client.close()
mpd_client.disconnect()
except socket.error:
_LOGGER.error("Unable to connect to MPD")
return False
except mpd.CommandError as error:
if "incorrect password" in str(error):
_LOGGER.error("MPD reported incorrect password")
return False
else:
raise
add_devices([MpdDevice(daemon, port, location, password)])
class MpdDevice(MediaPlayerDevice):
"""Representation of a MPD server."""
# pylint: disable=no-member, too-many-public-methods, abstract-method
def __init__(self, server, port, location, password):
"""Initialize the MPD device."""
import mpd
self.server = server
self.port = port
self._name = location
self.password = password
self.status = None
self.currentsong = None
self.client = mpd.MPDClient()
self.client.timeout = 10
self.client.idletimeout = None
self.update()
def update(self):
"""Get the latest data and update the state."""
import mpd
try:
self.status = self.client.status()
self.currentsong = self.client.currentsong()
except (mpd.ConnectionError, BrokenPipeError, ValueError):
# Cleanly disconnect in case connection is not in valid state
try:
self.client.disconnect()
except mpd.ConnectionError:
pass
self.client.connect(self.server, self.port)
if self.password is not None:
self.client.password(self.password)
self.status = self.client.status()
self.currentsong = self.client.currentsong()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self.status['state'] == 'play':
return STATE_PLAYING
elif self.status['state'] == 'pause':
return STATE_PAUSED
else:
return STATE_OFF
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.currentsong['id']
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
# Time does not exist for streams
return self.currentsong.get('time')
@property
def media_title(self):
"""Title of current playing media."""
name = self.currentsong.get('name', None)
title = self.currentsong.get('title', None)
if name is None and title is None:
return "None"
elif name is None:
return title
elif title is None:
return name
else:
return '{}: {}'.format(name, title)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self.currentsong.get('artist')
@property
def media_album_name(self):
"""Album of current playing media (Music track only)."""
return self.currentsong.get('album')
@property
def volume_level(self):
"""Return the volume level."""
return int(self.status['volume'])/100
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_MPD
def turn_off(self):
"""Service to send the MPD the command to stop playing."""
self.client.stop()
def turn_on(self):
"""Service to send the MPD the command to start playing."""
self.client.play()
def set_volume_level(self, volume):
"""Set volume of media player."""
self.client.setvol(int(volume * 100))
def volume_up(self):
"""Service to send the MPD the command for volume up."""
current_volume = int(self.status['volume'])
if current_volume <= 100:
self.client.setvol(current_volume + 5)
def volume_down(self):
"""Service to send the MPD the command for volume down."""
current_volume = int(self.status['volume'])
if current_volume >= 0:
self.client.setvol(current_volume - 5)
def media_play(self):
"""Service to send the MPD the command for play/pause."""
self.client.pause(0)
def media_pause(self):
"""Service to send the MPD the command for play/pause."""
self.client.pause(1)
def media_next_track(self):
"""Service to send the MPD the command for next track."""
self.client.next()
def media_previous_track(self):
"""Service to send the MPD the command for previous track."""
self.client.previous()
def play_media(self, media_type, media_id, **kwargs):
"""Send the media player the command for playing a playlist."""
_LOGGER.info(str.format("Playing playlist: {0}", media_id))
if media_type == MEDIA_TYPE_PLAYLIST:
self.client.clear()
self.client.load(media_id)
self.client.play()
else:
_LOGGER.error(str.format("Invalid media type. Expected: {0}",
MEDIA_TYPE_PLAYLIST))
| {
"content_hash": "ae42b6ded505ef61fd8aeb85d61eedfb",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 74,
"avg_line_length": 30.56595744680851,
"alnum_prop": 0.6164555199777252,
"repo_name": "hexxter/home-assistant",
"id": "56af3cd88f96f97789fee0319e5c86f9084baedf",
"size": "7183",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/mpd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1371597"
},
{
"name": "Python",
"bytes": "3699472"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
def datetime64_to_microseconds(dt):
return dt.astype('uint64')
def travel_time(start_time, path, measurements_by_station, station_metadata, time_granularity=60*60):
"""Calculate the travel time along the given path at the given start time
Args:
path - list of station IDs that must be traversed to reach the destination
start_time - start time datetime64
station_data - dataframes grouped by station
time_granularity - granularity of samples in seconds
"""
time_granularity *= 1000000 # convert to microseconds
time = datetime64_to_microseconds(start_time)
total_dist = 0
for i in range(len(path)-1):
# calculate how long it takes to get to the next station based on the
# current time
sid1 = path[i]
sid2 = path[i+1]
measurements = measurements_by_station[sid1]
quantized = np.datetime64(time - time % time_granularity)
filtered = measurements[measurements['timestamp'] == quantized]
speed = filtered.iloc[0]['avgspeed']
if np.isnan(speed):
return (np.nan, np.nan)
station1_metadata = station_metadata.loc[sid1]
station2_metadata = station_metadata.loc[sid2]
dist = abs(station1_metadata['Abs_PM'] - station2_metadata['Abs_PM'])
total_dist += dist
# TODO: what if speed is NAN? interpolate
time += 1000000 * 60 * 60 * dist / speed
return (total_dist, np.datetime64(time) - start_time)
def test_travel_time():
path = [213, 224, 285, 485]
station_metadata = pd.DataFrame({'Abs_PM' : pd.Series([0, 60, 75, 85], index=[213, 224, 285, 485])})
base_time = np.datetime64('2013-01-01')
hour = np.timedelta64(1000000 * 60 * 60)
times = pd.Series([base_time, base_time + hour], index=range(2))
speeds = [[40, np.nan], [np.nan, 60], [np.nan, 120], [np.nan, np.nan]]
samples_by_station = {path[i] : pd.DataFrame({'timestamp' : times, 'avgspeed' : speeds[i]}) for i in range(len(path))}
start_time = base_time + np.timedelta64(5 * 1000000 * 60) # start at 5 minutes past the hour
# Traveling 60 miles at 40 MPH should put us in the next hour (total time = 1:35)
# Then traveling 15 miles at 60 MPH should keep us in the same hour (total time = 1:50)
# Then 10 miles at 120 MPH should get us to our destination (total time = 1:55)
# Travel time is 1:55 minus the 5 minutes past the hour we started at, so 1:50
print travel_time(start_time, path, samples_by_station, station_metadata)
if __name__ == '__main__':
test_travel_time()
| {
"content_hash": "b83bccab199cf0a712872a2186b83ecf",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 120,
"avg_line_length": 41.71666666666667,
"alnum_prop": 0.6815821014782262,
"repo_name": "sryza/freewaydata",
"id": "6d007d5e98fc1ee0f9d55e7afd5acad534ad5144",
"size": "2503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/traveltime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10846"
}
],
"symlink_target": ""
} |
"""
fs.memoryfs
===========
A Filesystem that exists in memory only. Which makes them extremely fast, but non-permanent.
If you open a file from a `memoryfs` you will get back a StringIO object from the standard library.
"""
import datetime
import stat
from fs.path import iteratepath, pathsplit, normpath
from fs.base import *
from fs.errors import *
from fs import _thread_synchronize_default
from fs.filelike import StringIO
from fs import iotools
from os import SEEK_END
import threading
import six
from six import b
def _check_mode(mode, mode_chars):
for c in mode_chars:
if c not in mode:
return False
return True
class MemoryFile(object):
def seek_and_lock(f):
def deco(self, *args, **kwargs):
try:
self._lock.acquire()
self.mem_file.seek(self.pos)
ret = f(self, *args, **kwargs)
self.pos = self.mem_file.tell()
return ret
finally:
self._lock.release()
return deco
def __init__(self, path, memory_fs, mem_file, mode, lock):
self.closed = False
self.path = path
self.memory_fs = memory_fs
self.mem_file = mem_file
self.mode = mode
self._lock = lock
self.pos = 0
if _check_mode(mode, 'a'):
lock.acquire()
try:
self.mem_file.seek(0, SEEK_END)
self.pos = self.mem_file.tell()
finally:
lock.release()
elif _check_mode(mode, 'w'):
lock.acquire()
try:
self.mem_file.seek(0)
self.mem_file.truncate()
finally:
lock.release()
assert self.mem_file is not None, "self.mem_file should have a value"
def __str__(self):
return "<MemoryFile in %s %s>" % (self.memory_fs, self.path)
def __repr__(self):
return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
def __unicode__(self):
return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
def __del__(self):
if not self.closed:
self.close()
def flush(self):
pass
def __iter__(self):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
self.mem_file.seek(self.pos)
for line in self.mem_file:
yield line
@seek_and_lock
def next(self):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
return self.mem_file.next()
@seek_and_lock
def readline(self, *args, **kwargs):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
return self.mem_file.readline(*args, **kwargs)
def close(self):
do_close = False
self._lock.acquire()
try:
do_close = not self.closed and self.mem_file is not None
if do_close:
self.closed = True
finally:
self._lock.release()
if do_close:
self.memory_fs._on_close_memory_file(self, self.path)
@seek_and_lock
def read(self, size=None):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
if size is None:
size = -1
return self.mem_file.read(size)
@seek_and_lock
def seek(self, *args, **kwargs):
return self.mem_file.seek(*args, **kwargs)
@seek_and_lock
def tell(self):
return self.pos
@seek_and_lock
def truncate(self, *args, **kwargs):
if 'r' in self.mode and '+' not in self.mode:
raise IOError("File not open for writing")
return self.mem_file.truncate(*args, **kwargs)
#@seek_and_lock
def write(self, data):
if 'r' in self.mode and '+' not in self.mode:
raise IOError("File not open for writing")
self.memory_fs._on_modify_memory_file(self.path)
self._lock.acquire()
try:
self.mem_file.seek(self.pos)
self.mem_file.write(data)
self.pos = self.mem_file.tell()
finally:
self._lock.release()
@seek_and_lock
def writelines(self, *args, **kwargs):
return self.mem_file.writelines(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False
class DirEntry(object):
def sync(f):
def deco(self, *args, **kwargs):
if self.lock is not None:
try:
self.lock.acquire()
return f(self, *args, **kwargs)
finally:
self.lock.release()
else:
return f(self, *args, **kwargs)
return deco
def __init__(self, type, name, contents=None):
assert type in ("dir", "file"), "Type must be dir or file!"
self.type = type
self.name = name
if contents is None and type == "dir":
contents = {}
self.open_files = []
self.contents = contents
self.mem_file = None
self.created_time = datetime.datetime.now()
self.modified_time = self.created_time
self.accessed_time = self.created_time
self.xattrs = {}
self.lock = None
if self.type == 'file':
self.mem_file = StringIO()
self.lock = threading.RLock()
def get_value(self):
self.lock.acquire()
try:
return self.mem_file.getvalue()
finally:
self.lock.release()
data = property(get_value)
def desc_contents(self):
if self.isfile():
return "<file %s>" % self.name
elif self.isdir():
return "<dir %s>" % "".join("%s: %s" % (k, v.desc_contents()) for k, v in self.contents.iteritems())
def isdir(self):
return self.type == "dir"
def isfile(self):
return self.type == "file"
def __str__(self):
return "%s: %s" % (self.name, self.desc_contents())
@sync
def __getstate__(self):
state = self.__dict__.copy()
state.pop('lock')
if self.mem_file is not None:
state['mem_file'] = self.data
return state
def __setstate__(self, state):
self.__dict__.update(state)
if self.type == 'file':
self.lock = threading.RLock()
else:
self.lock = None
if self.mem_file is not None:
data = self.mem_file
self.mem_file = StringIO()
self.mem_file.write(data)
class MemoryFS(FS):
"""An in-memory filesystem.
"""
_meta = {'thread_safe': True,
'network': False,
'virtual': False,
'read_only': False,
'unicode_paths': True,
'case_insensitive_paths': False,
'atomic.move': False,
'atomic.copy': False,
'atomic.makedir': True,
'atomic.rename': True,
'atomic.setcontents': False}
def _make_dir_entry(self, *args, **kwargs):
return self.dir_entry_factory(*args, **kwargs)
def __init__(self, file_factory=None):
super(MemoryFS, self).__init__(thread_synchronize=_thread_synchronize_default)
self.dir_entry_factory = DirEntry
self.file_factory = file_factory or MemoryFile
if not callable(self.file_factory):
raise ValueError("file_factory should be callable")
self.root = self._make_dir_entry('dir', 'root')
def __str__(self):
return "<MemoryFS>"
def __repr__(self):
return "MemoryFS()"
def __unicode__(self):
return "<MemoryFS>"
@synchronize
def _get_dir_entry(self, dirpath):
dirpath = normpath(dirpath)
current_dir = self.root
for path_component in iteratepath(dirpath):
if current_dir.contents is None:
return None
dir_entry = current_dir.contents.get(path_component, None)
if dir_entry is None:
return None
current_dir = dir_entry
return current_dir
@synchronize
def _dir_entry(self, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
return dir_entry
@synchronize
def desc(self, path):
if self.isdir(path):
return "Memory dir"
elif self.isfile(path):
return "Memory file object"
else:
return "No description available"
@synchronize
def isdir(self, path):
path = normpath(path)
if path in ('', '/'):
return True
dir_item = self._get_dir_entry(path)
if dir_item is None:
return False
return dir_item.isdir()
@synchronize
def isfile(self, path):
path = normpath(path)
if path in ('', '/'):
return False
dir_item = self._get_dir_entry(path)
if dir_item is None:
return False
return dir_item.isfile()
@synchronize
def exists(self, path):
path = normpath(path)
if path in ('', '/'):
return True
return self._get_dir_entry(path) is not None
@synchronize
def makedir(self, dirname, recursive=False, allow_recreate=False):
if not dirname and not allow_recreate:
raise PathError(dirname)
fullpath = normpath(dirname)
if fullpath in ('', '/'):
if allow_recreate:
return
raise DestinationExistsError(dirname)
dirpath, dirname = pathsplit(dirname.rstrip('/'))
if recursive:
parent_dir = self._get_dir_entry(dirpath)
if parent_dir is not None:
if parent_dir.isfile():
raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
else:
if not allow_recreate:
if dirname in parent_dir.contents:
raise DestinationExistsError(dirname, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
current_dir = self.root
for path_component in iteratepath(dirpath)[:-1]:
dir_item = current_dir.contents.get(path_component, None)
if dir_item is None:
break
if not dir_item.isdir():
raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
current_dir = dir_item
current_dir = self.root
for path_component in iteratepath(dirpath):
dir_item = current_dir.contents.get(path_component, None)
if dir_item is None:
new_dir = self._make_dir_entry("dir", path_component)
current_dir.contents[path_component] = new_dir
current_dir = new_dir
else:
current_dir = dir_item
parent_dir = current_dir
else:
parent_dir = self._get_dir_entry(dirpath)
if parent_dir is None:
raise ParentDirectoryMissingError(dirname, msg="Could not make dir, as parent dir does not exist: %(path)s")
dir_item = parent_dir.contents.get(dirname, None)
if dir_item is not None:
if dir_item.isdir():
if not allow_recreate:
raise DestinationExistsError(dirname)
else:
raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
if dir_item is None:
parent_dir.contents[dirname] = self._make_dir_entry("dir", dirname)
#@synchronize
#def _orphan_files(self, file_dir_entry):
# for f in file_dir_entry.open_files[:]:
# f.close()
@synchronize
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
path = normpath(path)
filepath, filename = pathsplit(path)
parent_dir_entry = self._get_dir_entry(filepath)
if parent_dir_entry is None or not parent_dir_entry.isdir():
raise ResourceNotFoundError(path)
if 'r' in mode or 'a' in mode:
if filename not in parent_dir_entry.contents:
raise ResourceNotFoundError(path)
file_dir_entry = parent_dir_entry.contents[filename]
if file_dir_entry.isdir():
raise ResourceInvalidError(path)
file_dir_entry.accessed_time = datetime.datetime.now()
mem_file = self.file_factory(path, self, file_dir_entry.mem_file, mode, file_dir_entry.lock)
file_dir_entry.open_files.append(mem_file)
return mem_file
elif 'w' in mode:
if filename not in parent_dir_entry.contents:
file_dir_entry = self._make_dir_entry("file", filename)
parent_dir_entry.contents[filename] = file_dir_entry
else:
file_dir_entry = parent_dir_entry.contents[filename]
file_dir_entry.accessed_time = datetime.datetime.now()
mem_file = self.file_factory(path, self, file_dir_entry.mem_file, mode, file_dir_entry.lock)
file_dir_entry.open_files.append(mem_file)
return mem_file
if parent_dir_entry is None:
raise ResourceNotFoundError(path)
@synchronize
def remove(self, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
if dir_entry.isdir():
raise ResourceInvalidError(path, msg="That's a directory, not a file: %(path)s")
pathname, dirname = pathsplit(path)
parent_dir = self._get_dir_entry(pathname)
del parent_dir.contents[dirname]
@synchronize
def removedir(self, path, recursive=False, force=False):
path = normpath(path)
if path in ('', '/'):
raise RemoveRootError(path)
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
if not dir_entry.isdir():
raise ResourceInvalidError(path, msg="Can't remove resource, its not a directory: %(path)s" )
if dir_entry.contents and not force:
raise DirectoryNotEmptyError(path)
if recursive:
rpathname = path
while rpathname:
rpathname, dirname = pathsplit(rpathname)
parent_dir = self._get_dir_entry(rpathname)
if not dirname:
raise RemoveRootError(path)
del parent_dir.contents[dirname]
# stop recursing if the directory has other contents
if parent_dir.contents:
break
else:
pathname, dirname = pathsplit(path)
parent_dir = self._get_dir_entry(pathname)
if not dirname:
raise RemoveRootError(path)
del parent_dir.contents[dirname]
@synchronize
def rename(self, src, dst):
src = normpath(src)
dst = normpath(dst)
src_dir, src_name = pathsplit(src)
src_entry = self._get_dir_entry(src)
if src_entry is None:
raise ResourceNotFoundError(src)
open_files = src_entry.open_files[:]
for f in open_files:
f.flush()
f.path = dst
dst_dir,dst_name = pathsplit(dst)
dst_entry = self._get_dir_entry(dst)
if dst_entry is not None:
raise DestinationExistsError(dst)
src_dir_entry = self._get_dir_entry(src_dir)
src_xattrs = src_dir_entry.xattrs.copy()
dst_dir_entry = self._get_dir_entry(dst_dir)
if dst_dir_entry is None:
raise ParentDirectoryMissingError(dst)
dst_dir_entry.contents[dst_name] = src_dir_entry.contents[src_name]
dst_dir_entry.contents[dst_name].name = dst_name
dst_dir_entry.xattrs.update(src_xattrs)
del src_dir_entry.contents[src_name]
@synchronize
def settimes(self, path, accessed_time=None, modified_time=None):
now = datetime.datetime.now()
if accessed_time is None:
accessed_time = now
if modified_time is None:
modified_time = now
dir_entry = self._get_dir_entry(path)
if dir_entry is not None:
dir_entry.accessed_time = accessed_time
dir_entry.modified_time = modified_time
return True
return False
@synchronize
def _on_close_memory_file(self, open_file, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is not None and open_file in dir_entry.open_files:
dir_entry.open_files.remove(open_file)
@synchronize
def _on_modify_memory_file(self, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is not None:
dir_entry.modified_time = datetime.datetime.now()
@synchronize
def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
if dir_entry.isfile():
raise ResourceInvalidError(path, msg="not a directory: %(path)s")
paths = dir_entry.contents.keys()
for (i,p) in enumerate(paths):
if not isinstance(p,unicode):
paths[i] = unicode(p)
return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
@synchronize
def getinfo(self, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
info = {}
info['created_time'] = dir_entry.created_time
info['modified_time'] = dir_entry.modified_time
info['accessed_time'] = dir_entry.accessed_time
if dir_entry.isdir():
info['st_mode'] = 0755 | stat.S_IFDIR
else:
info['size'] = len(dir_entry.data or b(''))
info['st_mode'] = 0666 | stat.S_IFREG
return info
@synchronize
def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=1024*64):
src_dir_entry = self._get_dir_entry(src)
if src_dir_entry is None:
raise ResourceNotFoundError(src)
src_xattrs = src_dir_entry.xattrs.copy()
super(MemoryFS, self).copydir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
dst_dir_entry = self._get_dir_entry(dst)
if dst_dir_entry is not None:
dst_dir_entry.xattrs.update(src_xattrs)
@synchronize
def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=1024*64):
src_dir_entry = self._get_dir_entry(src)
if src_dir_entry is None:
raise ResourceNotFoundError(src)
src_xattrs = src_dir_entry.xattrs.copy()
super(MemoryFS, self).movedir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
dst_dir_entry = self._get_dir_entry(dst)
if dst_dir_entry is not None:
dst_dir_entry.xattrs.update(src_xattrs)
@synchronize
def copy(self, src, dst, overwrite=False, chunk_size=1024*64):
src_dir_entry = self._get_dir_entry(src)
if src_dir_entry is None:
raise ResourceNotFoundError(src)
src_xattrs = src_dir_entry.xattrs.copy()
super(MemoryFS, self).copy(src, dst, overwrite, chunk_size)
dst_dir_entry = self._get_dir_entry(dst)
if dst_dir_entry is not None:
dst_dir_entry.xattrs.update(src_xattrs)
@synchronize
def move(self, src, dst, overwrite=False, chunk_size=1024*64):
src_dir_entry = self._get_dir_entry(src)
if src_dir_entry is None:
raise ResourceNotFoundError(src)
src_xattrs = src_dir_entry.xattrs.copy()
super(MemoryFS, self).move(src, dst, overwrite, chunk_size)
dst_dir_entry = self._get_dir_entry(dst)
if dst_dir_entry is not None:
dst_dir_entry.xattrs.update(src_xattrs)
@synchronize
def getcontents(self, path, mode="rb", encoding=None, errors=None, newline=None):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
if not dir_entry.isfile():
raise ResourceInvalidError(path, msg="not a file: %(path)s")
data = dir_entry.data or b('')
if 'b' not in mode:
return iotools.decode_binary(data, encoding=encoding, errors=errors, newline=newline)
return data
@synchronize
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=1024*64):
if isinstance(data, six.binary_type):
if not self.exists(path):
self.open(path, 'wb').close()
dir_entry = self._get_dir_entry(path)
if not dir_entry.isfile():
raise ResourceInvalidError('Not a directory %(path)s', path)
new_mem_file = StringIO()
new_mem_file.write(data)
dir_entry.mem_file = new_mem_file
return len(data)
return super(MemoryFS, self).setcontents(path, data=data, encoding=encoding, errors=errors, chunk_size=chunk_size)
# if isinstance(data, six.text_type):
# return super(MemoryFS, self).setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
# if not self.exists(path):
# self.open(path, 'wb').close()
# dir_entry = self._get_dir_entry(path)
# if not dir_entry.isfile():
# raise ResourceInvalidError('Not a directory %(path)s', path)
# new_mem_file = StringIO()
# new_mem_file.write(data)
# dir_entry.mem_file = new_mem_file
@synchronize
def setxattr(self, path, key, value):
dir_entry = self._dir_entry(path)
key = unicode(key)
dir_entry.xattrs[key] = value
@synchronize
def getxattr(self, path, key, default=None):
key = unicode(key)
dir_entry = self._dir_entry(path)
return dir_entry.xattrs.get(key, default)
@synchronize
def delxattr(self, path, key):
dir_entry = self._dir_entry(path)
try:
del dir_entry.xattrs[key]
except KeyError:
pass
@synchronize
def listxattrs(self, path):
dir_entry = self._dir_entry(path)
return dir_entry.xattrs.keys()
| {
"content_hash": "800c680f049fdc291152996b40538588",
"timestamp": "",
"source": "github",
"line_count": 693,
"max_line_length": 155,
"avg_line_length": 33.331890331890335,
"alnum_prop": 0.5696350491363262,
"repo_name": "Konubinix/pyfilesystem",
"id": "5688c40cf2c859379f21233309a21a78969e6bed",
"size": "23121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fs/memoryfs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1048053"
}
],
"symlink_target": ""
} |
'''
@author: Frank
'''
import unittest
from virtualrouter import virtualrouter
from virtualrouter.plugins import dns
from zstacklib.utils import jsonobject
from zstacklib.utils import uuidhelper
from zstacklib.utils import http
import time
class Test(unittest.TestCase):
CALLBACK_URL = 'http://localhost:7272/testcallback'
def setUp(self):
self.service = virtualrouter.VirtualRouter()
self.service.http_server.register_sync_uri('/testcallback', self.callback)
self.service.start()
time.sleep(1)
def callback(self, req):
rsp = jsonobject.loads(req[http.REQUEST_BODY])
print jsonobject.dumps(rsp)
def testName(self):
cmd = dns.SetDnsCmd()
info = dns.DnsInfo()
info.dnsAddress = '72.72.72.72'
cmd.dns = [info]
rsp = http.json_dump_post('http://localhost:7272/setdns', cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL})
time.sleep(10)
self.service.stop()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | {
"content_hash": "a1f8950c7d0ba5625e783d2033b621e8",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 151,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.64198606271777,
"repo_name": "mingjian2049/zstack-utility",
"id": "ee875fe05107feb1675c2816f3b60630e81936d3",
"size": "1148",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "virtualrouter/test/test_configure_dns.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4445"
},
{
"name": "Pascal",
"bytes": "187"
},
{
"name": "Puppet",
"bytes": "10417"
},
{
"name": "Python",
"bytes": "2380992"
},
{
"name": "Shell",
"bytes": "235730"
}
],
"symlink_target": ""
} |
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import logging
from shot_detector.handlers import BaseVideoHandler, BaseEventHandler
class BaseShotDetector(BaseVideoHandler, BaseEventHandler):
"""
...
"""
__logger = logging.getLogger(__name__)
def detect(self, input_uri='', format_name=None, **kwargs):
"""
:param str input_uri:
file name of input video or path to resource
for example `http://localhost:8090/live.flv`
You can use any string, that can be accepted
by input ffmpeg-parameter. For example:
'http://localhost:8090/live.flv',
:param str format_name:
name of video format. Use it for hardware devices
:param dict kwargs: any options for consecutive methods,
ignores it and pass it through
:return:
"""
video_state = self.handle_video(
input_uri=input_uri,
format_name=format_name,
**kwargs
)
return video_state
| {
"content_hash": "7eff77fc7523a7739ac163e55bfad76d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 69,
"avg_line_length": 30.342105263157894,
"alnum_prop": 0.6027753686036427,
"repo_name": "w495/python-video-shot-detector",
"id": "2f0e5e581f289a1ac9f92b0264fea7544cdfdc41",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shot_detector/detectors/base_shot_detector.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Makefile",
"bytes": "1751"
},
{
"name": "Python",
"bytes": "599048"
},
{
"name": "Shell",
"bytes": "89"
}
],
"symlink_target": ""
} |
from configs.module import Module
from irc.utils import formatcodes
def init():
m = Module('echo')
m.set_help('Echo text and provide various text functions.')
m.add_command_hook('echo',
{
'function': echo,
'help': 'Echo text.',
'noquote': True,
'args': [
{
'name': 'text',
'optional': False,
'help': 'Text to echo.',
'end': True,
},
],
})
m.add_command_hook('absorb',
{
'function': absorb,
'help': 'Absorb text.',
'args': [
{
'name': 'return',
'optional': False,
'help': 'Text to return.'
},
{
'name': 'text',
'optional': False,
'help': 'Text to echo.',
'end': True,
},
],
})
m.add_command_hook('qecho',
{
'function': echo,
'help': 'Echo text with quote.',
'args': [
{
'name': 'text',
'optional': False,
'help': 'Text to echo.',
'end': True,
},
],
})
m.add_command_hook('bold',
{
'function': bold,
'help': 'Echo bold text.',
'noquote': True,
'args': [
{
'name': 'text',
'optional': False,
'help': 'Text to echo.',
'end': True,
},
],
})
m.add_short_command_hook(italic,
'italic::Echo italic text.',
['text...::Text to echo.'], noquote=True)
m.add_short_command_hook(underline,
'underline::Echo underlined text.',
['text...::Text to echo.'], noquote=True)
m.add_short_command_hook(strike,
'strike::Echo strikthroughed text.',
['text...::Text to echo.'], noquote=True)
m.add_short_command_hook(color,
'color::Echo colored text.',
['color::Color: fgnd[,bgnd].',
'text...::Text to echo.'], noquote=True)
m.add_short_command_hook(lambda fp, args: formatcodes.reset,
'reset::Echo the reset code.',
[])
m.add_alias('give', 'echo $#: <*$*>')
return m
def echo(fp, args):
return "%s" % (args.getlinstr('text', ''))
def absorb(fp, args):
return "%s" % (args.getlinstr('return', ''))
def bold(fp, args):
return formatcodes.bold + "%s%s" % (
args.getlinstr('text', ''), formatcodes.reset
if args.getlinstr('text', '') else '')
def italic(fp, args):
return formatcodes.italic + "%s%s" % (
args.getlinstr('text', ''), formatcodes.reset
if args.getlinstr('text', '') else '')
def underline(fp, args):
return formatcodes.underline + "%s%s" % (
args.getlinstr('text', ''), formatcodes.reset
if args.getlinstr('text', '') else '')
def strike(fp, args):
return formatcodes.strike + "%s%s" % (
args.getlinstr('text', ''), formatcodes.reset
if args.getlinstr('text', '') else '')
def color(fp, args):
return formatcodes.color + "%s%s%s" % (
args.getlinstr('color'),
args.getlinstr('text', ''), formatcodes.reset
if args.getlinstr('text', '') else '') | {
"content_hash": "590810a484cfec85ed39856e772baa1e",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 64,
"avg_line_length": 29.040650406504064,
"alnum_prop": 0.4333706606942889,
"repo_name": "shacknetisp/fourthevaz",
"id": "b2824531e68f905b27d9a8a2e3a050162d292d42",
"size": "3596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/core/echo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3159"
},
{
"name": "Python",
"bytes": "252429"
},
{
"name": "Shell",
"bytes": "928"
}
],
"symlink_target": ""
} |
from github import Github
import requests
from bs4 import BeautifulSoup
from st2actions.runners.pythonrunner import Action
__all__ = [
'BaseGithubAction'
]
BASE_URL = 'https://github.com'
class BaseGithubAction(Action):
def __init__(self, config):
super(BaseGithubAction, self).__init__(config=config)
token = self.config.get('token', None)
token = token or None
self._client = Github(token)
def _web_session(self):
'''Returns a requests session to scrape off the web'''
login_url = BASE_URL + '/login'
session = requests.Session()
request = session.get(login_url).text
html = BeautifulSoup(request)
token = html.find('input', {'name': 'authenticity_token'}).attrs['value']
commit_value = html.find('input', {'name': 'commit'}).attrs['value']
session_path = html.find('form', {'method': 'post'}).attrs['action']
login_data = {
'login': self.config['user'],
'password': self.config['password'],
'commit': commit_value,
'authenticity_token': token
}
session_url = BASE_URL + session_path
session.post(session_url, data=login_data)
return session
def _get_analytics(self, category, repo):
url = 'https://github.com/' + repo + '/graphs/' + category
s = self._web_session()
response = s.get(url)
return response.json()
| {
"content_hash": "c5249a112986b71815c61c2ced4f6eab",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 81,
"avg_line_length": 31.630434782608695,
"alnum_prop": 0.5958762886597938,
"repo_name": "lmEshoo/st2contrib",
"id": "7f15b31fb9628b5ddae04bf8b34245495db006ff",
"size": "1455",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "packs/github/actions/lib/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8530"
},
{
"name": "Makefile",
"bytes": "2262"
},
{
"name": "Python",
"bytes": "444890"
},
{
"name": "Shell",
"bytes": "3635"
}
],
"symlink_target": ""
} |
'''A module containing a class for storing Creature objects in a
SQLite database.'''
import csv
import sqlite3
__all__ = ['CreatureDB']
class CreatureDB(object):
'''Class for storing Creature objects in a SQLite database.'''
def __init__(self, name='creature.db', use_nominal_cr=False):
self.min_cr = 0.0
self.max_cr = float('inf')
# set flags
self.using_nominal_cr = use_nominal_cr
# initialize database
self.connection = sqlite3.connect(name)
self.connection.text_factory = str
self._create_table()
def _construct_table_columns(self):
'''Constructs a tuple that defines the columns in
the "creatures" table
:returns tuple that defines the columns in "creatures" table
'''
columns = ('id integer primary key autoincrement',
'name varchar(45)')
# set type of CR column depending on flag
if self.using_nominal_cr:
columns = columns + ('CR varchar(10)',)
else:
columns = columns + ('CR real',)
# add the remaining database fields to column tuple
main_entry_columns = (
'hp integer', 'HD integer',
'ac integer', 'touch_ac integer', 'flatfooted_ac integer',
'Fort integer', 'Ref integer', 'Will integer',
'Str integer', 'Dex integer', 'Con integer',
'Int integer', 'Wis integer', 'Cha integer',
'BAB integer', 'CMB integer', 'CMD integer'
)
columns = columns + main_entry_columns
return columns
def _construct_tuple_insert_values(self, creature):
'''Constructs a tuple of Creature values for insertion into
the "creatures" table
:returns tuple of values for insertion into "creatures" table
'''
values = (creature.name,)
# set value of CR column depending on flag
if self.using_nominal_cr:
values = values + ('CR ' + creature.cr,)
else:
values = values + (creature.cr,)
# add the remaining database fields to values tuple
main_entry_values = (
creature.hp,
creature.hd,
creature.ac['AC'],
creature.ac['touch'],
creature.ac['flat-footed'],
creature.saves['Fort'],
creature.saves['Ref'],
creature.saves['Will'],
creature.ability_scores['Str'],
creature.ability_scores['Dex'],
creature.ability_scores['Con'],
creature.ability_scores['Int'],
creature.ability_scores['Wis'],
creature.ability_scores['Cha'],
creature.bab,
creature.cmb,
creature.cmd
)
values = values + main_entry_values
return values
def _create_table(self):
'''Creates a SQLite table with the given name for storing
Creature objects if it does not already exist
:param name: a string value for the name of the table
'''
# create table
columns = self._construct_table_columns()
query = '''create table if not exists creatures
(
%s,%s,
%s,%s,
%s,%s,%s,
%s,%s,%s,
%s,%s,%s,%s,%s,%s,%s,
%s, %s, %s
)''' % columns
self.connection.execute(query)
def add_creature(self, creature):
'''Adds a Creature object as a row in the appropriate table
of the SQLite database
:param creature: a Creature object to be added to the database
'''
# check that creature CR is within desired range
creature_cr = float(creature.cr)
if creature_cr < self.min_cr or creature_cr > self.max_cr:
return
# ignore duplicate creatures
if self.is_creature_in_db(creature):
return
# insert creature into database
values = self._construct_tuple_insert_values(creature)
query = '''insert into creatures
(
name,CR,
hp,HD,
ac,touch_ac,flatfooted_ac,
Fort, Ref, Will,
Str,Dex,Con,Int,Wis,Cha,
BAB,CMB,CMD
)
values
(
?,?,
?,?,
?,?,?,
?,?,?,
?,?,?,?,?,?,
?,?,?
)'''
self.connection.execute(query, values)
def commit_and_close(self):
'''Commits any uncommitted changes to the SQLite database and
closes the connection
'''
self.connection.commit()
self.connection.close()
def export_as_csv(self, file_name='creature.csv'):
'''Exports the data in this object as a .csv file.
:param file_name: the name of the output csv file
'''
cursor = self.connection.cursor()
data = cursor.execute('select * from creatures')
# write data to output file
csv_file = open(file_name, 'w')
writer = csv.writer(csv_file)
writer.writerow([
'id',
'name', 'CR',
'hp', 'HD',
'ac', 'touch_ac', 'flatfooted_ac',
'Fort', 'Ref', 'Will',
'Str', 'Dex', 'Con', 'Int', 'Wis', 'Cha',
'BAB', 'CMB', 'CMD'
])
writer.writerows(data)
csv_file.close()
def is_creature_in_db(self, creature):
''' Determines whether or not a datbase entry exists for a
given creature
:returns True if entry exists, False otherwise
'''
# set value of CR column depending on flag
creature_cr = creature.cr
if self.using_nominal_cr:
creature_cr = 'CR ' + creature.cr
# query database for creature
values = (creature.name, creature_cr)
query = '''select * from creatures where name=? and cr=?'''
cursor = self.connection.cursor()
cursor.execute(query, values)
return cursor.fetchone() is not None
| {
"content_hash": "3d2594d19714ffa4f3bcf715c9015310",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 70,
"avg_line_length": 34.79891304347826,
"alnum_prop": 0.5092925191316571,
"repo_name": "lot9s/pathfinder-rpg-utils",
"id": "66a8219a037a7e8683c03fc2bc823490a1f72d46",
"size": "6403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data-mining/bestiary/db/creatureDB.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47955"
},
{
"name": "TeX",
"bytes": "22739"
}
],
"symlink_target": ""
} |
from .util import GeneralLicense
class License(GeneralLicense):
_labels = ['mit']
name = 'MIT'
description = '''
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
text = '''
MIT License
Copyright (c) YEAR AUTHOR
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
| {
"content_hash": "5910ef2ca91ce5c830a33759a1493d61",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 46.6530612244898,
"alnum_prop": 0.7729658792650919,
"repo_name": "nullhack/python-template",
"id": "c4a2e408d118331aeb78b89da554177ef5fa20cd",
"size": "2286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "licenses/mit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7743"
},
{
"name": "Makefile",
"bytes": "7619"
},
{
"name": "Python",
"bytes": "27378"
},
{
"name": "Smarty",
"bytes": "1284"
}
],
"symlink_target": ""
} |
"""
Bombolone
~~~~~~~~~~~~~~~~~~~~~
Bombolone is a tasty Content Management System for Python based on Flask,
MongoDB, AngularJS, Sass and Bootstrap. It's designed to be a simple,
flexible toolset for projects of any size.
:copyright: (c) 2014 by @zizzamia
:license: BSD (See LICENSE for details)
"""
__title__ = 'bombolone'
__version__ = '0.3.3'
__author__ = '@zizzamia'
__copyright__ = 'Copyright 2014 Bombolone' | {
"content_hash": "d1b8998811110b3ebead16a1bf64bf28",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 26.1875,
"alnum_prop": 0.6730310262529833,
"repo_name": "Opentaste/bombolone",
"id": "14479bc6a90f4c8a209c0df35254474a2d4ec1c2",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "28486"
},
{
"name": "HTML",
"bytes": "60964"
},
{
"name": "JavaScript",
"bytes": "45325"
},
{
"name": "Python",
"bytes": "149118"
},
{
"name": "Ruby",
"bytes": "1239"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from chaco.scales.api import *
| {
"content_hash": "45dbb437f725cb555581fc8000155c9c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 38,
"avg_line_length": 35,
"alnum_prop": 0.7714285714285715,
"repo_name": "enthought/etsproxy",
"id": "5a3849c2f60e353c87b99e4b0954972f231da0a8",
"size": "85",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/chaco/scales/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509 as c_x509
from cryptography.x509.oid import NameOID
import mock
import six
from magnum.common import exception
from magnum.common.x509 import operations
from magnum.tests import base
class TestX509(base.BaseTestCase):
def setUp(self):
super(TestX509, self).setUp()
self.issuer_name = six.u("fake-issuer")
self.subject_name = six.u("fake-subject")
self.ca_encryption_password = six.b("fake-ca-password")
self.encryption_password = six.b("fake-password")
def _load_pems(self, keypairs, encryption_password):
private_key = serialization.load_pem_private_key(
keypairs['private_key'],
password=encryption_password,
backend=default_backend(),
)
certificate = c_x509.load_pem_x509_certificate(
keypairs['certificate'], default_backend())
return certificate, private_key
def _generate_ca_certificate(self, issuer_name=None):
issuer_name = issuer_name or self.issuer_name
keypairs = operations.generate_ca_certificate(
issuer_name, encryption_password=self.ca_encryption_password)
return self._load_pems(keypairs, self.ca_encryption_password)
def _generate_client_certificate(self, issuer_name, subject_name):
ca = operations.generate_ca_certificate(
self.issuer_name, encryption_password=self.ca_encryption_password)
keypairs = operations.generate_client_certificate(
self.issuer_name,
self.subject_name,
ca['private_key'],
encryption_password=self.encryption_password,
ca_key_password=self.ca_encryption_password,
)
return self._load_pems(keypairs, self.encryption_password)
def _public_bytes(self, public_key):
return public_key.public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def _generate_private_key(self):
return rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
def _build_csr(self, private_key):
csr = c_x509.CertificateSigningRequestBuilder()
csr = csr.subject_name(c_x509.Name([
c_x509.NameAttribute(NameOID.COMMON_NAME, self.subject_name)
]))
return csr.sign(private_key, hashes.SHA256(), default_backend())
def assertHasPublicKey(self, keypairs):
key = keypairs[1]
cert = keypairs[0]
self.assertEqual(self._public_bytes(key.public_key()),
self._public_bytes(cert.public_key()))
def assertHasSubjectName(self, cert, subject_name):
actual_subject_name = cert.subject.get_attributes_for_oid(
c_x509.NameOID.COMMON_NAME)
actual_subject_name = actual_subject_name[0].value
self.assertEqual(subject_name, actual_subject_name)
def assertHasIssuerName(self, cert, issuer_name):
actual_issuer_name = cert.issuer.get_attributes_for_oid(
c_x509.NameOID.COMMON_NAME)
actual_issuer_name = actual_issuer_name[0].value
self.assertEqual(issuer_name, actual_issuer_name)
def assertInClientExtensions(self, cert):
key_usage = c_x509.KeyUsage(True, False, True, False, False, False,
False, False, False)
key_usage = c_x509.Extension(key_usage.oid, True, key_usage)
extended_key_usage = c_x509.ExtendedKeyUsage([c_x509.OID_CLIENT_AUTH])
extended_key_usage = c_x509.Extension(extended_key_usage.oid, False,
extended_key_usage)
basic_constraints = c_x509.BasicConstraints(ca=False, path_length=None)
basic_constraints = c_x509.Extension(basic_constraints.oid, True,
basic_constraints)
self.assertIn(key_usage, cert.extensions)
self.assertIn(extended_key_usage, cert.extensions)
self.assertIn(basic_constraints, cert.extensions)
def test_generate_ca_certificate_with_bytes_issuer_name(self):
issuer_name = six.b("bytes-issuer-name")
cert, _ = self._generate_ca_certificate(issuer_name)
issuer_name = issuer_name.decode('utf-8')
self.assertHasSubjectName(cert, issuer_name)
self.assertHasIssuerName(cert, issuer_name)
def test_generate_ca_certificate_has_publickey(self):
keypairs = self._generate_ca_certificate(self.issuer_name)
self.assertHasPublicKey(keypairs)
def test_generate_ca_certificate_set_subject_name(self):
cert, _ = self._generate_ca_certificate(self.issuer_name)
self.assertHasSubjectName(cert, self.issuer_name)
def test_generate_ca_certificate_set_issuer_name(self):
cert, _ = self._generate_ca_certificate(self.issuer_name)
self.assertHasIssuerName(cert, self.issuer_name)
def test_generate_ca_certificate_set_extentions_as_ca(self):
cert, _ = self._generate_ca_certificate(self.issuer_name)
key_usage = c_x509.KeyUsage(False, False, False, False, False, True,
False, False, False)
key_usage = c_x509.Extension(key_usage.oid, True, key_usage)
basic_constraints = c_x509.BasicConstraints(ca=True, path_length=0)
basic_constraints = c_x509.Extension(basic_constraints.oid, True,
basic_constraints)
self.assertIn(key_usage, cert.extensions)
self.assertIn(basic_constraints, cert.extensions)
def test_generate_client_certificate_has_publickey(self):
keypairs = self._generate_client_certificate(
self.issuer_name, self.subject_name)
self.assertHasPublicKey(keypairs)
def test_generate_client_certificate_set_subject_name(self):
cert, _ = self._generate_client_certificate(
self.issuer_name, self.subject_name)
self.assertHasSubjectName(cert, self.subject_name)
def test_generate_client_certificate_set_issuer_name(self):
cert, key = self._generate_client_certificate(
self.issuer_name, self.subject_name)
self.assertHasIssuerName(cert, self.issuer_name)
def test_generate_client_certificate_set_extentions_as_client(self):
cert, key = self._generate_client_certificate(
self.issuer_name, self.subject_name)
self.assertInClientExtensions(cert)
@mock.patch('cryptography.x509.load_pem_x509_csr')
@mock.patch('six.b')
def test_sign_with_unicode_csr(self, mock_six, mock_load_pem):
ca_key = self._generate_private_key()
private_key = self._generate_private_key()
csr_obj = self._build_csr(private_key)
csr = csr_obj.public_bytes(serialization.Encoding.PEM)
csr = six.text_type(csr.decode('utf-8'))
mock_load_pem.return_value = csr_obj
operations.sign(csr, self.issuer_name, ca_key,
skip_validation=True)
mock_six.assert_called_once_with(csr)
def test_sign_with_invalid_csr(self):
ca_key = self._generate_private_key()
csr = 'test'
csr = six.u(csr)
self.assertRaises(exception.InvalidCsr,
operations.sign,
csr, self.issuer_name, ca_key, skip_validation=True)
| {
"content_hash": "dc4e404df0622a58eff8ce5d8abb5c4d",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 79,
"avg_line_length": 39.66321243523316,
"alnum_prop": 0.6496407576747224,
"repo_name": "jay-lau/magnum",
"id": "459e23625781f5c7d1bafa58de772fa040aef1d5",
"size": "8259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnum/tests/unit/common/x509/test_sign.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Python",
"bytes": "393112"
}
],
"symlink_target": ""
} |
import boinc_path_config
from sched_messages import SchedMessages, CRITICAL
from subprocess import CalledProcessError, check_output as _check_output, STDOUT
from itertools import chain
import os, os.path as osp
from uuid import uuid4
from time import time
import argparse
import re
log = SchedMessages()
class CheckOutputError(Exception): pass
projdir = osp.realpath(osp.join(osp.dirname(__file__),'..','..'))
def _get_create_work_args():
try: _check_output([osp.join(projdir,'bin','create_work')],stderr=STDOUT)
except CalledProcessError as e: doc = e.output
matches = [g.groups() for g in [re.search('--(.*?) (.*?) ',l) for l in doc.splitlines()] if g]
args = {k:{'n':int,'x':float}.get(v,str) for k,v in matches}
args['additional_xml']=str
return args
create_work_args = _get_create_work_args()
def check_output(cmd,*args,**kwargs):
"""
Wraps subprocess.check_output and logs errors to BOINC
"""
try:
return _check_output(cmd,stderr=STDOUT,*args,**kwargs)
except CalledProcessError as e:
log.printf(CRITICAL,"Error calling %s:\n%s\n",str(cmd),e.output)
raise CheckOutputError
except Exception as e:
log.printf(CRITICAL,"Error calling %s:\n%s\n",str(cmd),str(e))
raise CheckOutputError
def dir_hier_path(filename):
return check_output(['bin/dir_hier_path',filename],cwd=projdir).strip()
def stage_file(name,contents,perm=None):
base,ext = osp.splitext(name)
fullname = base + '_' + uuid4().hex + ext
download_path = check_output(['bin/dir_hier_path',fullname],cwd=projdir).strip()
with open(download_path,'w') as f: f.write(contents)
if perm: os.chmod(download_path,perm)
return fullname
def create_work(appname,create_work_args,input_files):
"""
Creates and stages input files based on a list of (name,contents) in input_files,
and calls bin/create_work with extra args specified by create_work_args
"""
return check_output((['bin/create_work','--appname',appname]+
list(chain(*(['--%s'%k,'%s'%v] for k,v in create_work_args.items())))+
[stage_file(*i) if isinstance(i,tuple) else i for i in input_files]),
cwd=projdir)
def add_create_work_args(parser,exclude=None):
"""
Add BOINC's bin/create_work arguments to a Python argparse parser
exclude can be a list of args not to add
"""
for k,v in sorted(create_work_args.items()):
if exclude is None or k not in exclude:
parser.add_argument('--%s'%k,type=v,metavar={int:'n',float:'x',str:'string'}[v])
parser.add_argument('--credit',type=float, metavar='x')
def read_create_work_args(args):
"""
Read create_work_args from Python argparse args
"""
if isinstance(args,argparse.Namespace): args=vars(args)
cwargs = {k:v for k,v in args.items() if k in create_work_args and v is not None}
if args.get('credit'):
cwargs['additional_xml'] = (args['additional_xml'] or '')+'<credit>%s</credit>'%args['credit']
return cwargs
| {
"content_hash": "0bcd4278687c7ea504a66425836a9284",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 102,
"avg_line_length": 35.825581395348834,
"alnum_prop": 0.6517364492048037,
"repo_name": "grctest/project-rain-site",
"id": "2c4cb9b463f34d1a55e09ca25011b46f4407c555",
"size": "3081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ProjectRain_Docker/images/makeproject/boinc2docker/py/Boinc/create_work.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "103"
},
{
"name": "C++",
"bytes": "134254"
},
{
"name": "CSS",
"bytes": "83476"
},
{
"name": "Go",
"bytes": "146"
},
{
"name": "HTML",
"bytes": "111578"
},
{
"name": "JavaScript",
"bytes": "177392"
},
{
"name": "PHP",
"bytes": "757095"
},
{
"name": "Python",
"bytes": "24653"
},
{
"name": "Ruby",
"bytes": "4719"
},
{
"name": "Shell",
"bytes": "33478"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class OutlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="outlinecolor", parent_name="densitymapbox.colorbar", **kwargs
):
super(OutlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| {
"content_hash": "5d6411c1a164d9b97f8e57097d2d4adc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 34.23076923076923,
"alnum_prop": 0.6292134831460674,
"repo_name": "plotly/plotly.py",
"id": "adb140cd1d7fd4d713db03b127e8586cee4e42f0",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/densitymapbox/colorbar/_outlinecolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
This module provides helper code to make type annotation within Airflow
codebase easier.
"""
try:
# Protocol and TypedDict are only added to typing module starting from
# python 3.8 we can safely remove this shim import after Airflow drops
# support for <3.8
from typing import ( # type: ignore # noqa # pylint: disable=unused-import
Protocol,
TypedDict,
runtime_checkable,
)
except ImportError:
from typing_extensions import Protocol, TypedDict, runtime_checkable # type: ignore # noqa
| {
"content_hash": "6e3050c5ba474762b0fdab170100b57e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 95,
"avg_line_length": 33.875,
"alnum_prop": 0.7047970479704797,
"repo_name": "DinoCow/airflow",
"id": "6fd6d8c8252f2e26e2cab251b7e61144888f8c4d",
"size": "1330",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/typing_compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56963"
},
{
"name": "HTML",
"bytes": "140781"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1473771"
},
{
"name": "Shell",
"bytes": "18638"
}
],
"symlink_target": ""
} |
""" Defines the ImagePlot class.
"""
from __future__ import with_statement
# Standard library imports
from math import ceil, floor, pi
# Enthought library imports.
from traits.api import Bool, Either, Enum, Instance, \
List, Range, Trait, Tuple
from kiva.agg import GraphicsContextArray
# Local relative imports
from base_2d_plot import Base2DPlot
class ImagePlot(Base2DPlot):
""" A plot based on an image.
"""
#------------------------------------------------------------------------
# Data-related traits
#------------------------------------------------------------------------
# Overall alpha value of the image. Ranges from 0.0 for transparent to 1.0
# for full intensity.
alpha = Trait(1.0, Range(0.0, 1.0))
# The interpolation method to use when rendering an image onto the GC.
interpolation = Enum("nearest", "bilinear", "bicubic")
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
# Are the cache traits valid? If False, new ones need to be computed.
_image_cache_valid = Bool(False)
# Cached image of the bmp data (not the bmp data in self.data.value).
_cached_image = Instance(GraphicsContextArray)
# Tuple-defined rectangle (x, y, dx, dy) in screen space in which the
# **_cached_image** is to be drawn.
_cached_dest_rect = Either(Tuple, List)
#------------------------------------------------------------------------
# Base2DPlot interface
#------------------------------------------------------------------------
def _render(self, gc):
""" Actually draws the plot.
Implements the Base2DPlot interface.
"""
if not self._image_cache_valid:
self._compute_cached_image()
if "bottom" in self.origin:
sy = -1
else:
sy = 1
if "left" in self.origin:
sx = 1
else:
sx = -1
# If the orientation is flipped, the BR and TL cases are swapped
if self.orientation == "v" and sx == sy:
sx, sy = -sx, -sy
with gc:
gc.clip_to_rect(self.x, self.y, self.width, self.height)
gc.set_alpha(self.alpha)
# Kiva image interpolation note:
# Kiva's Agg backend uses the interpolation setting of the *source*
# image to determine the type of interpolation to use when drawing the
# image. The mac backend uses the interpolation setting on the
# destination GC.
old_interp = self._cached_image.get_image_interpolation()
if hasattr(gc, "set_interpolation_quality"):
from kiva.quartz.ABCGI import InterpolationQuality
interp_quality_dict = {"nearest": InterpolationQuality.none,
"bilinear": InterpolationQuality.low,
"bicubic": InterpolationQuality.high}
gc.set_interpolation_quality(interp_quality_dict[self.interpolation])
elif hasattr(gc, "set_image_interpolation"):
self._cached_image.set_image_interpolation(self.interpolation)
x, y, w, h = self._cached_dest_rect
if self.orientation == "h": # for horizontal orientation:
gc.translate_ctm(x+w/2, y+h/2) # translate back normally
else: # for vertical orientation:
gc.translate_ctm(y+h/2, x+w/2) # translate back with dx,dy swap
gc.scale_ctm(sx, sy) # flip axes as appropriate
if self.orientation == "v": # for vertical orientation:
gc.scale_ctm(1,-1) # restore origin to lower left
gc.rotate_ctm(pi/2) # rotate 1/4 turn clockwise
gc.translate_ctm(-x-w/2, -y-h/2) # translate image center to origin
gc.draw_image(self._cached_image, self._cached_dest_rect)
self._cached_image.set_image_interpolation(old_interp)
def map_index(self, screen_pt, threshold=0.0, outside_returns_none=True,
index_only=False):
""" Maps a screen space point to an index into the plot's index array(s).
Implements the AbstractPlotRenderer interface. Uses 0.0 for *threshold*,
regardless of the passed value.
"""
# For image plots, treat hittesting threshold as 0.0, because it's
# the only thing that really makes sense.
return Base2DPlot.map_index(self, screen_pt, 0.0, outside_returns_none,
index_only)
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _compute_cached_image(self, data=None, mapper=None):
""" Computes the correct sub-image coordinates and renders an image
into self._cached_image.
The parameter *data* is for subclasses that might not store an RGB(A)
image as the value, but need to compute one to display (colormaps, etc.).
The parameter *mapper* is also for subclasses that might not store an
RGB(A) image as their value, and gives an opportunity to produce the
values only for the visible region, rather than for the whole plot,
at the expense of more frequent computation.
"""
if data is None:
data = self.value.data
(lpt, upt) = self.index.get_bounds()
ll_x, ll_y = self.map_screen([lpt])[0]
ur_x, ur_y = self.map_screen([upt])[0]
if "right" in self.origin:
ll_x, ur_x = ur_x, ll_x
if "top" in self.origin:
ll_y, ur_y = ur_y, ll_y
virtual_width = ur_x - ll_x
virtual_height = ur_y - ll_y
args = self.position \
+ self.bounds \
+ [ll_x, ll_y, virtual_width, virtual_height]
img_pixels, gc_rect = self._calc_zoom_coords(*args)
# Grab the appropriate sub-image, if necessary
if img_pixels is not None:
i1, j1, i2, j2 = img_pixels
if "top" in self.origin:
y_length = self.value.get_array_bounds()[1][1]
j1 = y_length - j1
j2 = y_length - j2
# swap so that j1 < j2
j1, j2 = j2, j1
if "right" in self.origin:
x_length = self.value.get_array_bounds()[0][1]
i1 = x_length - i1
i2 = x_length - i2
# swap so that i1 < i2
i1, i2 = i2, i1
# Since data is row-major, j1 and j2 go first
data = data[j1:j2, i1:i2]
if mapper is not None:
data = mapper(data)
# Furthermore, the data presented to the GraphicsContextArray needs to
# be contiguous. If it is not, we need to make a copy.
if not data.flags['C_CONTIGUOUS']:
data = data.copy()
if data.shape[2] == 3:
kiva_depth = "rgb24"
elif data.shape[2] == 4:
kiva_depth = "rgba32"
else:
raise RuntimeError, "Unknown colormap depth value: %i" \
% data.value_depth
self._cached_image = GraphicsContextArray(data, pix_format=kiva_depth)
if gc_rect is not None:
self._cached_dest_rect = gc_rect
else:
self._cached_dest_rect = (ll_x, ll_y, virtual_width, virtual_height)
self._image_cache_valid = True
def _calc_zoom_coords(self, px, py, plot_width, plot_height,
ix, iy, image_width, image_height):
""" Calculates the coordinates of a zoomed sub-image.
Because of floating point limitations, it is not advisable to request a
extreme level of zoom, e.g., idx or idy > 10^10.
Parameters
----------
px : number
X-coordinate of plot pixel bounds
py : number
Y-coordinate of plot pixel bounds
plot_width : number
Width of plot pixel bounds
plot_height : number
Height of plot pixel bounds
ix : number
X-coordinate of image pixel bounds
iy : number
Y-coordinate of image pixel bounds
image_width : number
Width of image pixel bounds
image_height : number
Height of image pixel bounds
Returns
-------
((i1, j1, i2, j2), (x, y, dx, dy))
Lower left and upper right indices of the sub-image to be extracted,
and graphics context origin and extents to draw the sub-image into.
(None, None)
No image extraction is necessary.
"""
if (image_width < 1.5*plot_width) and (image_height < 1.5*plot_height):
return (None, None)
if 0 in (plot_width, plot_height, image_width, image_height):
return (None, None)
# We figure out the subimage coordinates using a two-step process:
# 1. convert the plot boundaries from screen space into pixel offsets
# in the virtual image
# 2. convert the coordinates in the virtual image into indices
# into the image data array
# 3. from the data array indices, compute the screen coordinates of
# the corners of the data array sub-indices
# in all the cases below, x1,y1 refers to the lower-left corner, and
# x2,y2 refers to the upper-right corner.
# 1. screen space -> pixel offsets
if self.orientation == "h":
x1 = px - ix
x2 = (px + plot_width) - ix
y1 = py - iy
y2 = (py + plot_height) - iy
else:
x1 = px - ix
x2 = (px + plot_height) - ix
y1 = py - iy
y2 = (py + plot_width) - iy
# 2. pixel offsets -> data array indices
# X and Y are transposed because for image plot data
pixel_bounds = self.value.get_array_bounds()
xpixels = pixel_bounds[0][1] - pixel_bounds[0][0]
ypixels = pixel_bounds[1][1] - pixel_bounds[1][0]
i1 = max(floor(float(x1) / image_width * xpixels), 0)
i2 = min(ceil(float(x2) / image_width * xpixels), xpixels)
j1 = max(floor(float(y1) / image_height * ypixels), 0)
j2 = min(ceil(float(y2) / image_height * ypixels), ypixels)
# 3. array indices -> new screen space coordinates
x1 = float(i1)/xpixels * image_width + ix
x2 = float(i2)/xpixels * image_width + ix
y1 = float(j1)/ypixels * image_height + iy
y2 = float(j2)/ypixels * image_height + iy
# Handle really, really, subpixel cases
subimage_index = [i1, j1, i2, j2]
subimage_coords = [x1, y1, x2-x1, y2-y1]
plot_dimensions = (px, py, plot_width, plot_height)
xparams = (0, 2)
yparams = (1, 3)
for pos_index, size_index in (xparams, yparams):
if subimage_index[pos_index] == subimage_index[pos_index+2]-1:
# xcoords lie inside the same pixel, so set the subimage
# coords to be the width of the image
subimage_coords[pos_index] = plot_dimensions[pos_index]
subimage_coords[size_index] = plot_dimensions[size_index]
elif subimage_index[pos_index] == subimage_index[pos_index+2]-2:
# coords span across a pixel boundary. Find the scaling
# factor of the virtual (and potentially large) subimage
# size to the image size, and scale it down. We can do
# this without distortion b/c we are straddling only one
# pixel boundary.
#
# If we scale down the extent to twice the screen size, we can
# be sure that no matter what the offset, we will cover the
# entire screen, since we are only straddling one pixel boundary.
# The formula for calculating the new origin can be worked out
# on paper.
extent = subimage_coords[size_index]
pixel_extent = extent/2 # we are indexed into two pixels
origin = subimage_coords[pos_index]
scale = float(2 * plot_dimensions[size_index] / extent)
subimage_coords[size_index] *= scale
subimage_coords[pos_index] = origin + (1-scale)*pixel_extent
subimage_index = map(int, subimage_index)
return [subimage_index, subimage_coords]
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
def _index_data_changed_fired(self):
self._image_cache_valid = False
self.request_redraw()
def _index_mapper_changed_fired(self):
self._image_cache_valid = False
self.request_redraw()
def _value_data_changed_fired(self):
self._image_cache_valid = False
self.request_redraw()
| {
"content_hash": "8272de1c68ad6fc4b848ff4262ef8d72",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 85,
"avg_line_length": 41.30124223602485,
"alnum_prop": 0.5351530190239868,
"repo_name": "burnpanck/chaco",
"id": "d1df84072e6f2f8599160fa0d4acea4d2a0ff7f0",
"size": "13465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chaco/image_plot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57089"
},
{
"name": "C++",
"bytes": "9881"
},
{
"name": "Gnuplot",
"bytes": "611"
},
{
"name": "Python",
"bytes": "1761203"
}
],
"symlink_target": ""
} |
"""Process handling utilities."""
import os
import subprocess
import sys
import tempfile
import threading
import time
import urllib.request
try:
import psutil
except ImportError:
psutil = None
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
TOOL_ARGS = {
'unshare': [
'-c', # Map current user to same user in user namespace.
'-n', # Enter network namespace.
],
}
TOOL_URLS = {
'extra_sanitizers':
'https://storage.googleapis.com/oss-fuzz-sanitizers/latest'
}
def _end_process(terminate_function, process_result):
"""Ends a running process.
Ignores exceptions.
Args:
process: A subprocess.Popen object.
terminate_function: The function to terminate the process.
process_result: A ProcessResult object where timeout information will be
written to.
"""
try:
terminate_function()
except OSError:
logs.log('Process already killed.')
process_result.timed_out = True
def wait_process(process,
timeout,
input_data=None,
terminate_before_kill=False,
terminate_wait_time=None):
"""Waits until either the process exits or times out.
Args:
process: A subprocess.Popen object.
timeout: Maximum number of seconds to wait for before sending a signal.
input_data: Input to be sent to the process.
terminate_before_kill: A bool indicating that SIGTERM should be sent to
the process first before SIGKILL (to let the SIGTERM handler run).
terminate_wait_time: Maximum number of seconds to wait for the SIGTERM
handler.
Returns:
A ProcessResult.
"""
result = ProcessResult()
is_windows = environment.platform() == 'WINDOWS'
# On Windows, terminate() just calls Win32 API function TerminateProcess()
# which is equivalent to process kill. So, skip terminate_before_kill.
if terminate_before_kill and not is_windows:
first_timeout_function = process.terminate
# Use a second timer to send the process kill.
second_timer = threading.Timer(timeout + terminate_wait_time, _end_process,
[process.kill, result])
else:
first_timeout_function = process.kill
second_timer = None
first_timer = threading.Timer(timeout, _end_process,
[first_timeout_function, result])
output = None
start_time = time.time()
try:
first_timer.start()
if second_timer:
second_timer.start()
output = process.communicate(input_data)[0]
finally:
first_timer.cancel()
if second_timer:
second_timer.cancel()
result.return_code = process.poll()
result.output = output
result.time_executed = time.time() - start_time
return result
def kill_process_tree(root_pid):
"""Kill process tree."""
try:
parent = psutil.Process(root_pid)
children = parent.children(recursive=True)
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
logs.log_warn('Failed to find or access process.')
return
for child in children:
try:
child.kill()
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
logs.log_warn('Failed to kill process child.')
try:
parent.kill()
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
logs.log_warn('Failed to kill process.')
class ChildProcess(object):
"""A class representing a process that's running."""
def __init__(self,
popen,
command,
max_stdout_len=None,
stdout_file=None,
interactive=False):
self._popen = popen
self._command = command
self._max_stdout_len = max_stdout_len
self._stdout_file = stdout_file
self._interactive = interactive
@property
def command(self):
return self._command
@property
def popen(self):
return self._popen
def communicate(self, input=None): # pylint: disable=redefined-builtin
"""subprocess.Popen.communicate."""
stdout = b''
stderr = b''
if self._interactive:
if input:
self._popen.stdin.write(input)
while True:
line = self._popen.stdout.readline()
if not line:
break
if self._stdout_file:
self._stdout_file.write(line)
else:
stdout += line
sys.stdout.write(utils.decode_to_unicode(line))
self._popen.wait()
else:
stdout, stderr = self._popen.communicate(input)
if not self._max_stdout_len:
return stdout, stderr
with self._stdout_file:
return utils.read_from_handle_truncated(self._stdout_file,
self._max_stdout_len), stderr
def poll(self):
"""subprocess.Popen.poll."""
return self._popen.poll()
def kill(self):
"""Kills running process and all of its associated children."""
kill_process_tree(self._popen.pid)
def terminate(self):
"""subprocess.Popen.terminate."""
try:
self._popen.terminate()
except OSError:
logs.log_warn('Failed to terminate process.')
class ProcessResult(object):
"""Object representing result of a process execution.
Returned by ProcessRunner.run_and_wait().
Attributes:
command: A list of arguments representing the command line that was run.
return_code: Exit code of the process.
output: Process output.
time_executed: Number of seconds process ran for.
timed_out: Whether or not the process timed out.
"""
def __init__(self,
command=None,
return_code=None,
output=None,
time_executed=None,
timed_out=False):
"""Inits the ProcessResult."""
self.command = command
self.return_code = return_code
self.output = output
self.time_executed = time_executed
self.timed_out = timed_out
class ProcessRunner(object):
"""Generic process runner class.
Attributes:
executable_path: Path to the executable to be run.
default_args: An optional sequence of arguments that are always passed to
the executable when run.
"""
def __init__(self, executable_path, default_args=None):
"""Inits ProcessRunner."""
self._executable_path = executable_path
self._default_args = []
if default_args:
self.default_args.extend(default_args)
@property
def executable_path(self):
return self._executable_path
@property
def default_args(self):
return self._default_args
def get_command(self, additional_args=None):
"""Returns the command line for running the executable.
Args:
additional_args: A sequence of additional arguments to be passed to the
executable.
Returns:
A list containing the command arguments to be passed to subprocess.Popen.
"""
command = [self._executable_path]
command.extend(self._default_args)
if additional_args:
command.extend(additional_args)
return command
def run(self,
additional_args=None,
max_stdout_len=None,
extra_env=None,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**popen_args):
"""Runs the executable.
Does not block the caller.
Args:
additional_args: A sequence of additional arguments to be passed to the
executable.
max_stdout_len: Optional. Maximum number of bytes to collect in stdout.
extra_env: Optional. A dictionary containing environment variables and
their values. These will be set in the environment of the new process.
stdin: Optional. Passed to subprocess.Popen, defaults to subprocess.PIPE,
stdout: Optional. Passed to subprocess.Popen, defaults to subprocess.PIPE
stderr: Optional. Passed to subprocess.Popen, defaults to
subprocess.STDOUT
**popen_args: Additional arguments that are passed to subprocess.Popen.
Returns:
A subprocess.Popen object for the process.
"""
# TODO: Rename popen_args to popen_kwargs.
command = self.get_command(additional_args)
stdout_file = None
if stdout == subprocess.PIPE and max_stdout_len:
stdout_file = tempfile.TemporaryFile()
stdout = stdout_file
interactive = environment.get_value('CF_INTERACTIVE')
if interactive:
popen_args['bufsize'] = 0
if stdout != subprocess.PIPE:
# If the provided stdout is a file object, (i.e. not subprocess.PIPE),
# we need to pipe writes through to there to ensure consistent
# behaviour.
stdout_file = stdout
stdout = subprocess.PIPE
env = popen_args.pop('env', os.environ.copy())
if extra_env is not None:
env.update(extra_env)
return ChildProcess(
subprocess.Popen(
command,
env=env,
stdin=stdin,
stdout=stdout,
stderr=stderr,
**popen_args),
command,
max_stdout_len=max_stdout_len,
stdout_file=stdout_file,
interactive=interactive)
# Note: changes to this function may require changes to
# untrusted_runner.proto.
def run_and_wait(self,
additional_args=None,
timeout=None,
terminate_before_kill=False,
terminate_wait_time=None,
input_data=None,
max_stdout_len=None,
extra_env=None,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**popen_args) -> ProcessResult:
"""Runs the executable.
Blocks the caller until the process exits.
Args:
additional_args: A sequence of additional arguments to be passed to the
executable.
timeout: Maximum number of seconds to run the process for.
terminate_before_kill: A bool indicating that SIGTERM should be sent to
the process first before SIGKILL (to let the SIGTERM handler run).
terminate_wait_time: Maximum number of seconds to wait for the SIGTERM
handler.
input_data: Optional. A string to be passed as input to the process.
max_stdout_len: Optional. Maximum number of bytes to collect in stdout.
extra_env: Optional. A dictionary containing environment variables and
their values. These will be added to the environment of the new
process.
stdout: Optional. Passed to subprocess.Popen, defaults to subprocess.PIPE
stderr: Optional. Passed to subprocess.Popen, defaults to
subprocess.STDOUT
**popen_args: Additional arguments that are passed to subprocess.Popen.
Returns:
A tuple of (return code, output, time process ran for, or None on timeout)
"""
process = self.run(
additional_args,
max_stdout_len=max_stdout_len,
extra_env=extra_env,
stdin=stdin,
stdout=stdout,
stderr=stderr,
**popen_args)
start_time = time.time()
if not timeout:
output = process.communicate(input_data)[0]
return ProcessResult(process.command, process.poll(), output,
time.time() - start_time, False)
result = wait_process(
process,
timeout=timeout,
input_data=input_data,
terminate_before_kill=terminate_before_kill,
terminate_wait_time=terminate_wait_time)
result.command = process.command
return result
class UnicodeProcessRunnerMixin(object):
"""Mixin for process runner subclasses to output unicode output."""
def run_and_wait(self, *args, **kwargs) -> ProcessResult: # pylint: disable=arguments-differ
"""Overridden run_and_wait which always decodes the output."""
result = ProcessRunner.run_and_wait(self, *args, **kwargs)
if result.output is not None:
result.output = utils.decode_to_unicode(result.output)
return result
class UnicodeProcessRunner(UnicodeProcessRunnerMixin, ProcessRunner):
"""ProcessRunner which always returns unicode output."""
class ModifierProcessRunnerMixin(object):
"""ProcessRunner mixin with modifiers."""
def tool_prefix(self, tool):
"""Prefix the command with a tool and its args"""
if not environment.get_value(f'USE_{tool.upper()}'):
return []
if environment.platform() != 'LINUX':
raise RuntimeError('Modifiers only supported on Linux')
tool_path = environment.get_default_tool_path(tool)
if not os.path.exists(tool_path) and tool in TOOL_URLS:
urllib.request.urlretrieve(TOOL_URLS.get(tool), tool_path)
if os.path.exists(tool_path):
os.chmod(tool_path, 0o755)
if not os.path.exists(tool_path):
raise RuntimeError(f'{tool} not found')
return [tool_path] + TOOL_ARGS.get(tool, [])
def get_command(self, additional_args=None):
"""Overridden get_command."""
command = [self._executable_path]
command.extend(self._default_args)
if additional_args:
command.extend(additional_args)
return self.tool_prefix('unshare') + self.tool_prefix(
'extra_sanitizers') + command
class ModifierProcessRunner(ModifierProcessRunnerMixin, ProcessRunner):
"""ProcessRunner with modifiers."""
class UnicodeModifierRunner(ModifierProcessRunnerMixin, UnicodeProcessRunner):
"""Unicode modifiers runner."""
| {
"content_hash": "81e05438311f7b3c03d11d6e9d8be06f",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 95,
"avg_line_length": 29.58901098901099,
"alnum_prop": 0.6516378221793062,
"repo_name": "google/clusterfuzz",
"id": "ad68ee4345ee68b63eaa3497edd3caada24b1df0",
"size": "14038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/clusterfuzz/_internal/system/new_process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21721"
},
{
"name": "C",
"bytes": "3485"
},
{
"name": "C++",
"bytes": "16326"
},
{
"name": "CSS",
"bytes": "16789"
},
{
"name": "Dockerfile",
"bytes": "25218"
},
{
"name": "Go",
"bytes": "16253"
},
{
"name": "HTML",
"bytes": "503044"
},
{
"name": "JavaScript",
"bytes": "9433"
},
{
"name": "Jinja",
"bytes": "3308"
},
{
"name": "PowerShell",
"bytes": "17307"
},
{
"name": "Python",
"bytes": "5085058"
},
{
"name": "Ruby",
"bytes": "93"
},
{
"name": "Shell",
"bytes": "80910"
},
{
"name": "Starlark",
"bytes": "1951"
}
],
"symlink_target": ""
} |
import simplejson
import json
import logging
import urllib2
from google.appengine.api import users
from google.appengine.api import urlfetch
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from rest_framework import status
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from iupds import settings
from iupdsmanager.views import is_logged_in, ServiceUnavailable, get_object
from iupdsmanager.models import Application, Profile, AccessToken, Grant
# ouath2
from iupdsmanager.authorization_code import AuthorizationCodeGrantPds
from iupdsmanager.user_data import UserData
log = logging.getLogger('oauth2_provider')
logging.basicConfig(level=logging.DEBUG)
# log = logging.getLogger(__name__)
user_data = UserData()
def oauth_login(request):
try:
if request.method == 'GET':
client_id = str(request.GET['client_id'])
app = Application.objects.get(client_id=client_id)
redirect_uri = str(app.redirect_uris)
post_login_redirect_url = settings.APPSCALE_PDS_HOST + "/oauth/login/?client_id=" + client_id + \
"&redirect_uri=" + redirect_uri + "&state=random_state_string&response_type=code"
if is_logged_in():
application = {'name': str(app.name), 'scopes_descriptions': settings.SCOPES,
'scope': " ".join(settings.SCOPES), 'redirect_uri': redirect_uri, 'client_id': client_id}
return render(request, "oauth2_provider/authorize.html", application)
else:
return redirect(users.create_login_url(post_login_redirect_url))
elif request.method == 'POST':
if 'allow' in request.POST and request.POST.get('allow') == 'Authorize':
payload = 'response_type=code&client_id=' + str(request.POST.get('client_id')).strip()+'&redirect_uri='\
+ str(request.POST.get('redirect_uri')).strip() + '&state=' \
+ str(request.POST.get('state')).strip() + '&scope='\
+ str(request.POST.get('scope')).strip() \
+ '&key_rules={"allowance":1000,"rate":1000,"per":60,"expires":' \
+ str(settings.ACCESS_TOKEN_EXPIRE_SECONDS) \
+ ',"quota_max":-1,"quota_renews":1406121006,"quota_remaining":0,' \
'"quota_renewal_rate":60,"access_rights":{"' + settings.PDS_API_ID \
+ '":{"api_name":"' + settings.PDS_API_NAME + '","api_id":"' \
+ settings.PDS_API_ID \
+ '","versions":["Default"],"allowed_urls":[{"url":"/api/v1/users/' \
+ str(user_data.get_user_id()).strip() \
+ '/emails/(.*)","methods":["GET"]},{"url":"/api/v1/users/' \
+ str(user_data.get_user_id()).strip()+'/telephones/(.*)","methods":["GET"]},{"url":"/api/v1/users/' \
+ str(user_data.get_user_id()).strip()+'/addresses/(.*)","methods":["GET"]},{"url":"/api/v1/users/' \
+ str(user_data.get_user_id()).strip()+'/persons/(.*)","methods":["GET"]}]}},' \
'"org_id":"' + settings.TYK_API_ORG_ID+'","oauth_client_id":"' \
+ str(request.POST.get('client_id')).strip() \
+ '","hmac_enabled":false,"hmac_string":"","apply_policy_id":"' \
+ settings.TYK_API_POLICY_ID+'"}'
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'x-tyk-authorization': settings.TYK_AUTHORIZATION_NODE_SECRET, 'cache-control': "no-cache"}
# make POST
r = urlfetch.fetch(url=settings.TYK_OAUTH_AUTHORIZE_ENDPOINT, payload=payload, method=urlfetch.POST,
headers=headers)
if r.status_code == 200:
response = simplejson.loads(r.content)
# save
grant = AuthorizationCodeGrantPds()
user_profile = Profile.objects.get(email=user_data.get_user_email())
client_id = str(request.GET.get('client_id'))
application = Application.objects.get(client_id=client_id)
request_ = {'client_id': request.POST.get('client_id'),
'redirect_uri': request.POST.get('redirect_uri'),
'response_type': request.POST.get('response_type', "code"),
'state': request.POST.get('state', None), 'client': application, 'user': user_profile,
'scopes': request.POST.get('scope')}
code = {'code': response['code']}
grant.save_authorization_client_code(get_object(request_), code)
return redirect(response['redirect_to']+"&user_id="+str(user_profile.id))
else:
response = {'message': r.content, 'status_code': r.status_code}
print "Error " + str(r.content) + " - " + str(r.status_code)
return render(request, "oauth2_provider/authorize_error.html", response)
else:
log.debug("Redirecting " + request.POST.get('redirect_uri') + "?error=access_denied")
print "Redirecting " + request.POST.get('redirect_uri') + "?error=access_denied"
return redirect(request.POST.get('redirect_uri') + "?error=access_denied")
else:
return redirect(request.POST.get('redirect_uri') + "?error=method_not_allowed")
except ServiceUnavailable or TypeError:
return redirect(request.POST.get('redirect_uri') + "?error=internal_server_error")
except Application.DoesNotExist:
return redirect(request.GET.get('redirect_uri') + "?error=Application with the client_id does not exist!")
def oauth_tyk_notify(request):
print "oauth_tyk_notify"
try:
if request.method == 'POST':
received_json_data = json.loads(request.body)
refresh_token = received_json_data['refresh_token']
auth_code = received_json_data['auth_code']
new_oauth_token = received_json_data['new_oauth_token']
old_refresh_token = received_json_data['old_refresh_token']
notification_type = received_json_data['notification_type']
grant = Grant.objects.get(code=auth_code)
print "Saving access_token"
token = {
'access_token': new_oauth_token,
'scope': grant.scope,
'refresh_token': refresh_token,
'auth_code': auth_code,
'old_refresh_token': old_refresh_token,
'notification_type': notification_type,
'new_oauth_token': new_oauth_token
}
request_ = {
'client': grant.application,
'user': grant.user,
'refresh_token': "",
'grant_type': 'authorization_code'
}
pds_auth = AuthorizationCodeGrantPds()
pds_auth.save_bearer_token(token, get_object(request_))
return HttpResponse(status=200)
except Grant.DoesNotExist or Profile.DoesNotExist or Application.DoesNotExist:
print "exception"
return HttpResponse(status=404)
def oauth_create_client(request):
try:
if request.method == 'POST':
received_data = json.loads(request.body)
pds_api_id = str(settings.PDS_API_ID)
token_callback_ = received_data['redirect_uri']
payload = "{\"api_id\":\"%s\",\"redirect_uri\":\"%s\"}" % (pds_api_id, token_callback_)
headers = {
'content-type': "application/json",
'cache-control': "no-cache",
'X-Tyk-Authorization': settings.TYK_AUTHORIZATION_NODE_SECRET,
}
print "Sending client creation request"
req = urllib2.Request(settings.TYK_CREATE_CLIENT_ENDPOINT, data=payload, headers=headers)
response = urllib2.urlopen(req)
if response.getcode() == 200:
response_data = simplejson.loads(response.read())
client = Application(
client_id=response_data['client_id'],
client_secret=response_data['secret'],
redirect_uris=response_data['redirect_uri'],
authorization_grant_type='authorization-code',
client_type='public',
name=received_data['client_name']
)
client.save()
print "Client saved, returning!"
return JsonResponse({
'client_id':response_data['client_id'],
'client_secret':response_data['secret'],
'redirect_uri':response_data['redirect_uri']})
else:
print response.read()
return HttpResponse(response.read(), status=404)
except Exception as e:
print e.message
return HttpResponse(status=404)
@api_view(['GET'])
@renderer_classes((JSONRenderer,))
def application_list(request):
try:
"""
Get all Connected to the current User.
"""
if request.method == 'GET':
if is_logged_in():
user_profile = Profile.objects.filter(email=user_data.get_user_email())
apps = Grant.objects.filter(user=user_profile).values_list('application__pk', flat=True)
applications = Application.objects.filter(pk__in=apps).values()
return Response({'user_applications': applications})
else:
users.create_login_url('/')
else:
return Response({'status': False, 'message': 'Method not allowed'},
status=status.HTTP_405_METHOD_NOT_ALLOWED)
except Application.DoesNotExist or Profile.DoesNotExist:
return Response({'status': False, 'message': 'Not Found'}, status=status.HTTP_404_NOT_FOUND)
@api_view(['DELETE'])
@renderer_classes((JSONRenderer,))
def revoke_application(request, pk):
try:
"""
Get all Connected to the current User.
"""
if request.method == 'DELETE':
if is_logged_in():
# delete /tyk/oauth/refresh/{key}?api_id={api_id}
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'x-tyk-authorization': settings.TYK_AUTHORIZATION_NODE_SECRET, 'cache-control': "no-cache"}
application = Application.objects.get(pk=pk)
tokens = AccessToken.objects.filter(application=application, user=user_data.get_profile()).values()
pds_auth = AuthorizationCodeGrantPds()
# make DELETE
for token in tokens:
pds_auth.revoke_token(token['token'], 'access_token', request)
r = urlfetch.fetch(url=settings.TYK_DELETE_ACCESS_TOKEN + "/" +
token['token'] + "?api_id=" + settings.PDS_API_ID,
method=urlfetch.DELETE, headers=headers)
if r.status_code == 200:
response = simplejson.loads(r.content)
print "Delete token from tyk"
print "Deleting Grants"
Grant.objects.filter(application=application, user=user_data.get_profile()).delete()
return Response()
else:
response = simplejson.loads(r.content)
print "unable to delete token from tyk"
print response
else:
users.create_login_url('/')
else:
return Response({'status': False, 'message': 'Method not allowed'},
status=status.HTTP_405_METHOD_NOT_ALLOWED)
except Application.DoesNotExist or Profile.DoesNotExist or AccessToken.DoesNotExist:
return Response({'status': False, 'message': 'Not Found'}, status=status.HTTP_404_NOT_FOUND)
@api_view(['POST'])
def logout(request):
if is_logged_in():
return Response({'logout_url': users.create_logout_url("/", _auth_domain=None)}, status=status.HTTP_200_OK)
else:
return Response({'status': 'Bad request', 'message': 'The user is not logged in'}, status=status.HTTP_410_GONE)
| {
"content_hash": "b48776ae66a28010a89090c383fa876d",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 120,
"avg_line_length": 45.13732394366197,
"alnum_prop": 0.5527732272408145,
"repo_name": "Sunnepah/iupds-appscale",
"id": "3a7f0af76b751a357fffe9c9bdbfd2902d92cca5",
"size": "12835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdsoauth/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "API Blueprint",
"bytes": "14969"
},
{
"name": "CSS",
"bytes": "254897"
},
{
"name": "HTML",
"bytes": "41274"
},
{
"name": "JavaScript",
"bytes": "360163"
},
{
"name": "Python",
"bytes": "141661"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
import rospy
import time
import os
import math
from sensor_msgs.msg import Imu, NavSatFix
from barc.msg import ECU, Encoder, Z_KinBkMdl
from numpy import pi, cos, sin, eye, array, zeros, unwrap
from ekf import ekf
from system_models import f_KinBkMdl, h_KinBkMdl
from tf import transformations
from numpy import unwrap
# input variables [default values]
d_f = 0 # steering angle [deg]
acc = 0 # acceleration [m/s]
# raw measurement variables
yaw_prev = 0
(roll, pitch, yaw, a_x, a_y, a_z, w_x, w_y, w_z) = zeros(9)
yaw_prev = 0
yaw_local = 0
read_yaw0 = False
psi = 0
psi_meas = 0
# from encoder
v = 0
v_meas = 0
t0 = time.time()
n_FL = 0 # counts in the front left tire
n_FR = 0 # counts in the front right tire
n_BL = 0 # counts in the back left tire
n_BR = 0 # counts in the back right tire
n_FL_prev = 0
n_FR_prev = 0
n_BL_prev = 0
n_BR_prev = 0
r_tire = 0.036 # radius from tire center to perimeter along magnets [m]
dx_qrt = 2.0*pi*r_tire/4.0 # distance along quarter tire edge [m]
# from gps
x_local = 0.0
y_local = 0.0
z_local = 0.0
def lla2flat(lla, llo, psio, href):
'''
lla -- array of geodetic coordinates
(latitude, longitude, and altitude),
in [degrees, degrees, meters].
Latitude and longitude values can be any value.
However, latitude values of +90 and -90 may return
unexpected values because of singularity at the poles.
llo -- Reference location, in degrees, of latitude and
longitude, for the origin of the estimation and
the origin of the flat Earth coordinate system.
psio -- Angular direction of flat Earth x-axis
(degrees clockwise from north), which is the angle
in degrees used for converting flat Earth x and y
coordinates to the North and East coordinates.
href -- Reference height from the surface of the Earth to
the flat Earth frame with regard to the flat Earth
frame, in meters.
usage: print(lla2flat((0.1, 44.95, 1000.0), (0.0, 45.0), 5.0, -100.0))
'''
R = 6378137.0 # Equator radius in meters
f = 0.00335281066474748071 # 1/298.257223563, inverse flattening
Lat_p = lla[0] * math.pi / 180.0 # from degrees to radians
Lon_p = lla[1] * math.pi / 180.0 # from degrees to radians
Alt_p = lla[2] # meters
# Reference location (lat, lon), from degrees to radians
Lat_o = llo[0] * math.pi / 180.0
Lon_o = llo[1] * math.pi / 180.0
psio = psio * math.pi / 180.0 # from degrees to radians
dLat = Lat_p - Lat_o
dLon = Lon_p - Lon_o
ff = (2.0 * f) - (f ** 2) # Can be precomputed
sinLat = math.sin(Lat_o)
# Radius of curvature in the prime vertical
Rn = R / math.sqrt(1 - (ff * (sinLat ** 2)))
# Radius of curvature in the meridian
Rm = Rn * ((1 - ff) / (1 - (ff * (sinLat ** 2))))
dNorth = (dLat) / math.atan2(1, Rm)
dEast = (dLon) / math.atan2(1, (Rn * math.cos(Lat_o)))
# Rotate matrice clockwise
Xp = (dNorth * math.cos(psio)) + (dEast * math.sin(psio))
Yp = (-dNorth * math.sin(psio)) + (dEast * math.cos(psio))
Zp = -Alt_p - href
return Xp, Yp, Zp
# ecu command update
def ecu_callback(data):
global acc, d_f
acc = data.motor # input acceleration
d_f = data.servo # input steering angle
# GPS measurement update
def gps_callback(data):
global x_local, y_local, z_local
gps_latitude = data.latitude
gps_longitude = data.longitude
gps_altitude = data.altitude
(x_gps, y_gps, z_gps) = lla2flat((gps_latitude, gps_longitude, gps_altitude),(37.87459266,-122.260241555),0,100)
x_local = x_gps + 14
y_local = y_gps
z_gps = z_gps
# rospy.logwarn("x = {}, y = {}".format(x_local,y_local))
# imu measurement update
def imu_callback(data):
# units: [rad] and [rad/s]
global roll, pitch, yaw, a_x, a_y, a_z, w_x, w_y, w_z
global yaw_prev, yaw0, read_yaw0, yaw_local, psi_meas
# get orientation from quaternion data, and convert to roll, pitch, yaw
# extract angular velocity and linear acceleration data
ori = data.orientation
quaternion = (ori.x, ori.y, ori.z, ori.w)
(roll, pitch, yaw) = transformations.euler_from_quaternion(quaternion)
# save initial measurements
if not read_yaw0:
read_yaw0 = True
yaw_prev = yaw
yaw0 = yaw
# unwrap measurement
yaw = unwrap(array([yaw_prev, yaw]), discont = pi)[1]
yaw_prev = yaw
yaw_local = yaw - yaw0
psi_meas = yaw_local
# extract angular velocity and linear acceleration data
w_x = data.angular_velocity.x
w_y = data.angular_velocity.y
w_z = data.angular_velocity.z
a_x = data.linear_acceleration.x
a_y = data.linear_acceleration.y
a_z = data.linear_acceleration.z
# encoder measurement update
def enc_callback(data):
global v, t0, dt_v_enc, v_meas
global n_FL, n_FR, n_FL_prev, n_FR_prev
global n_BL, n_BR, n_BL_prev, n_BR_prev
n_FL = data.FL
n_FR = data.FR
n_BL = data.BL
n_BR = data.BR
# compute time elapsed
tf = time.time()
dt = tf - t0
# if enough time elapse has elapsed, estimate v_x
if dt >= dt_v_enc:
# compute speed : speed = distance / time
v_FL = float(n_FL - n_FL_prev)*dx_qrt/dt
v_FR = float(n_FR - n_FR_prev)*dx_qrt/dt
v_BL = float(n_BL - n_BL_prev)*dx_qrt/dt
v_BR = float(n_BR - n_BR_prev)*dx_qrt/dt
# Uncomment/modify according to your encoder setup
# v_meas = (v_FL + v_FR)/2.0
# Modification for 3 working encoders
v_meas = (v_FL + v_BL + v_BR)/3.0
# Modification for bench testing (driven wheels only)
# v = (v_BL + v_BR)/2.0
# update old data
n_FL_prev = n_FL
n_FR_prev = n_FR
n_BL_prev = n_BL
n_BR_prev = n_BR
t0 = time.time()
# state estimation node
def state_estimation():
global dt_v_enc
global v_meas, psi_meas
global x_local, y_local
# initialize node
rospy.init_node('state_estimation', anonymous=True)
# topic subscriptions / publications
rospy.Subscriber('imu/data', Imu, imu_callback)
rospy.Subscriber('encoder', Encoder, enc_callback)
rospy.Subscriber('ecu', ECU, ecu_callback)
rospy.Subscriber('fix', NavSatFix, gps_callback)
state_pub = rospy.Publisher('state_estimate', Z_KinBkMdl, queue_size = 10)
# get vehicle dimension parameters
L_a = rospy.get_param("L_a") # distance from CoG to front axel
L_b = rospy.get_param("L_b") # distance from CoG to rear axel
vhMdl = (L_a, L_b)
# get encoder parameters
dt_v_enc = rospy.get_param("state_estimation/dt_v_enc") # time interval to compute v_x from encoders
# get EKF observer properties
q_std = rospy.get_param("state_estimation/q_std") # std of process noise
r_std = rospy.get_param("state_estimation/r_std") # std of measurementnoise
# set node rate
loop_rate = 50
dt = 1.0 / loop_rate
rate = rospy.Rate(loop_rate)
t0 = time.time()
# estimation variables for Luemberger observer
z_EKF = zeros(4)
# estimation variables for EKF
P = eye(4) # initial dynamics coveriance matrix
Q = (q_std**2)*eye(4) # process noise coveriance matrix
R = array([[0.1,0.0,0.0,0.0],
[0.0,0.1,0.0,0.0],
[0.0,0.0,r_std,0.0],
[0.0,0.0,0.0,r_std]]) # measurement noise coveriance matrix
while not rospy.is_shutdown():
# publish state estimate
(x, y, psi, v) = z_EKF
# publish information
state_pub.publish( Z_KinBkMdl(x, y, psi, v) )
# collect measurements, inputs, system properties
# collect inputs
y = array([x_local, y_local, psi_meas, v_meas])
u = array([ d_f, acc ])
args = (u,vhMdl,dt)
# apply EKF and get each state estimate
(z_EKF,P) = ekf(f_KinBkMdl, z_EKF, P, h_KinBkMdl, y, Q, R, args )
# wait
rate.sleep()
if __name__ == '__main__':
try:
state_estimation()
except rospy.ROSInterruptException:
pass
| {
"content_hash": "d10d1b2f8a4edffdd92b0c4a5ee2b20b",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 116,
"avg_line_length": 32.28838951310861,
"alnum_prop": 0.5777752000927967,
"repo_name": "BARCproject/barc",
"id": "0deaef1934e8056523acbaff88b81f24d1bd7999",
"size": "9403",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "workspace/src/barc/src/estimation/ekf_Kin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "37857"
},
{
"name": "C++",
"bytes": "34556"
},
{
"name": "CMake",
"bytes": "25703"
},
{
"name": "CSS",
"bytes": "143"
},
{
"name": "HTML",
"bytes": "27848"
},
{
"name": "JavaScript",
"bytes": "10764902"
},
{
"name": "Julia",
"bytes": "117617"
},
{
"name": "Less",
"bytes": "69047"
},
{
"name": "MATLAB",
"bytes": "9115"
},
{
"name": "Python",
"bytes": "343196"
},
{
"name": "SCSS",
"bytes": "69934"
},
{
"name": "Shell",
"bytes": "13578"
},
{
"name": "Vim script",
"bytes": "370"
}
],
"symlink_target": ""
} |
'''
将各个属性进行整理,然后得到特征矩阵:
得到的特征矩阵分为三类:
1.
'''
if __name__ == "__main__":
# 输入:
precontentfilename = "../all_asc_tweetsOutput/Preprocess/precontent"
sentimentfilename = "../all_asc_tweetsOutput/sentiment"
yearfilename = "../all_asc_tweetsOutput/year"
monthfilename = "../all_asc_tweetsOutput/month"
dayfilename = "../all_asc_tweetsOutput/day"
hourfilename = "../all_asc_tweetsOutput/hour"
minfilename = "../all_asc_tweetsOutput/min"
publicwordPMIfilename = "../Dictionary/publicwordPMI"
nonpublicwordpmifilename = "../all_asc_tweetsOutput/Feature/nonpublicwordfreqge2"
relationattributefilename = "../all_asc_tweetsOutput/Feature/RelationAtt"
# 输出:
FMatrixWithNPWfilename = "../all_asc_tweetsOutput/Feature/FeatureMatrixWithNPW"
FMatrixWithNPWsetZerofilename = "../all_asc_tweetsOutput/Feature/FeatureMatrixWithNPWsetZero"
FMatrixWithoutNPWfilename = "../all_asc_tweetsOutput/Feature/FeatureMatrixWithoutNPW"
FMatrixWithNPWsetzeroRelationfilename = "../all_asc_tweetsOutput/Feature/FeatureMatrixWithNPWRelation"
arffwithNPWriter = open(FMatrixWithNPWfilename,"w")
arffwithNPsetzeroWriter = open(FMatrixWithNPWsetZerofilename,"w")
arffwithoutNPWriter = open(FMatrixWithoutNPWfilename,"w")
FMatrixWithNPWsetzeroRelationWriter = open(FMatrixWithNPWsetzeroRelationfilename,"w")
emocNo=6962
negNo = 6963
yearNo = 6964
monthNo = 6965
dayNo = 6966
hourNo = 6967
minNo = 6968
ParentsAttNo = 6969
ChildrenAttNo = 6970
TopicNo = 6971
TopicName = "obama"
sentimentNo = 6972
try:
sentimentreader = open(sentimentfilename,"r")
yearreader = open(yearfilename,"r")
monthreader = open(monthfilename,"r")
dayreader = open(dayfilename,"r")
hourreader = open(hourfilename,"r")
minreader = open(minfilename,"r")
publicwordreader = open(publicwordPMIfilename,"r")
nonpublicwordreader = open(nonpublicwordpmifilename,"r")
except:
print "error IO"
TweetNo=1
DicTweetRelationAtt={}
with open(relationattributefilename,"r") as relationattreader:
for relationatts in relationattreader:
relationattarr = relationatts.strip().split(" ")
DicTweetRelationAtt[TweetNo] = relationattarr
TweetNo = TweetNo+1
relationattreader.close()
tweetNo=1
tweetdic={}
for sentiment in sentimentreader:
tweetlis=[]
tweetlis.append(sentiment.strip())
year = yearreader.readline().strip()
month = monthreader.readline().strip()
day = dayreader.readline().strip()
hour = hourreader.readline().strip()
min = minreader.readline().strip()
tweetlis.extend([year,month,day,hour,min])
tweetdic[tweetNo]=tweetlis
tweetNo = tweetNo +1
sentimentreader.close()
yearreader.close(),monthreader.close(),dayreader.close(),hourreader.close(),minreader.close()
wordNO=1
publicworddic = {}
for publicword in publicwordreader:
wordvalue = publicword.strip().split()
publicworddic[wordvalue[0]] = [wordvalue[1],wordNO]
wordNO = wordNO+1
publicwordreader.close()
nonpublicworddic = {}
for nonpublicword in nonpublicwordreader:
wordvalue=nonpublicword.strip().split()
nonpublicworddic[wordvalue[0]] = [wordvalue[1],wordNO]
wordNO = wordNO+1
nonpublicwordreader.close()
num_publicword = publicworddic.__len__()
num_nonpublicword = nonpublicworddic.__len__()
numtextfeature = publicworddic.__len__()+nonpublicworddic.__len__()
tweetNo = 0
with open(precontentfilename,"r") as contentreader:
for tweetcontent in contentreader:
tweetNo = tweetNo+1
arffwithNPWriter.write("{"), arffwithoutNPWriter.write("{")
arffwithNPsetzeroWriter.write("{")
FMatrixWithNPWsetzeroRelationWriter.write("{")
wordarr = tweetcontent.strip().split()
num_EMO=0
num_NEG=0
num_ADD_MIN=0
wordarrdic={}
wordwithNPsetzerodic={}
wordwithoutNPdic={}
for word in wordarr:
# 公共情感词
if word in publicworddic.keys():
wordarrdic[publicworddic[word][1]]=publicworddic[word][0]
wordwithNPsetzerodic[publicworddic[word][1]]=publicworddic[word][0]
wordwithoutNPdic[publicworddic[word][1]]=publicworddic[word][0]
#arffwithNPWriter.write(str(publicworddic[word][1])+" "+str(publicworddic[word][0])+",")
elif word in nonpublicworddic.keys():
wordarrdic[nonpublicworddic[word][1]]=nonpublicworddic[word][0]
wordwithNPsetzerodic[nonpublicworddic[word][1]]=0
#arffwithNPWriter.write(str(nonpublicworddic[word][1])+" "+str(nonpublicworddic[word][0])+",")
elif word == "POSEMOC":
num_EMO = num_EMO+1;
elif word == "NEGEMOC":
num_EMO = num_EMO - 1;
elif word == "NEGWORD":
num_NEG = num_NEG+1
elif word == "POSADD":
num_ADD_MIN = num_ADD_MIN +1
elif word == "NEGMIS":
num_ADD_MIN = num_ADD_MIN -1
wordarrdiclist = sorted(wordarrdic.items())
for wordNo,value in wordarrdiclist:
arffwithNPWriter.write(str(wordNo)+" "+str(value)+",")
wordwithoutNPWdiclist = sorted(wordwithoutNPdic.items())
for wordNo,value in wordwithoutNPWdiclist:
arffwithoutNPWriter.write(str(wordNo)+" "+str(value)+",")
wordwithNPsetzerodiclist = sorted(wordwithNPsetzerodic.items())
for wordNo,value in wordwithNPsetzerodiclist:
arffwithNPsetzeroWriter.write(str(wordNo)+" "+str(value)+",")
FMatrixWithNPWsetzeroRelationWriter.write(str(wordNo)+" "+str(value)+",")
arffwithNPWriter.write(str(emocNo)+" "+str(num_EMO)+","+str(negNo)+" "+\
str(num_NEG)+","+str(yearNo)+" "+str(tweetdic[tweetNo][1])+\
","+str(monthNo)+" "+str(tweetdic[tweetNo][2])+","+\
str(dayNo)+" "+str(tweetdic[tweetNo][3])+","+\
str(hourNo)+" "+str(tweetdic[tweetNo][4])+","+\
str(minNo)+" "+str(tweetdic[tweetNo][5])+","+\
str(sentimentNo)+" "+tweetdic[tweetNo][0]+"}\n")
arffwithoutNPWriter.write(str(emocNo)+" "+str(num_EMO)+","+str(negNo)+" "+str(num_NEG)+","+\
str(yearNo)+" "+str(tweetdic[tweetNo][1])+","+\
str(monthNo)+" "+str(tweetdic[tweetNo][2])+","+\
str(dayNo)+" "+str(tweetdic[tweetNo][3])+","+\
str(hourNo)+" "+str(tweetdic[tweetNo][4])+","+\
str(minNo)+" "+str(tweetdic[tweetNo][5])+","+\
str(sentimentNo)+" "+tweetdic[tweetNo][0]+"}\n")
arffwithNPsetzeroWriter.write(str(emocNo)+" "+str(num_EMO)+","+str(negNo)+" "+str(num_NEG)+","+\
str(yearNo)+" "+str(tweetdic[tweetNo][1])+","+\
str(monthNo)+" "+str(tweetdic[tweetNo][2])+","+\
str(dayNo)+" "+str(tweetdic[tweetNo][3])+","+\
str(hourNo)+" "+str(tweetdic[tweetNo][4])+","+\
str(minNo)+" "+str(tweetdic[tweetNo][5])+","+\
str(sentimentNo)+" "+tweetdic[tweetNo][0]+"}\n")
FMatrixWithNPWsetzeroRelationWriter.write(str(emocNo)+" "+str(num_EMO)+","+str(negNo)+" "+str(num_NEG)+\
","+str(yearNo)+" "+str(tweetdic[tweetNo][1])+","+\
str(monthNo)+" "+str(tweetdic[tweetNo][2])+","+\
str(dayNo)+" "+str(tweetdic[tweetNo][3])+","+\
str(hourNo)+" "+str(tweetdic[tweetNo][4])+","+\
str(minNo)+" "+str(tweetdic[tweetNo][5])+","+\
str(ParentsAttNo)+" "+str(DicTweetRelationAtt[tweetNo][0])+","+\
str(ChildrenAttNo)+" "+str(DicTweetRelationAtt[tweetNo][1])+","+\
str(TopicNo)+" "+TopicName+","+\
str(sentimentNo)+" "+tweetdic[tweetNo][0]+"}\n")
contentreader.close()
arffwithNPWriter.flush(),arffwithNPWriter.close()
arffwithoutNPWriter.flush(),arffwithoutNPWriter.close()
arffwithNPsetzeroWriter.flush(),arffwithNPsetzeroWriter.close()
FMatrixWithNPWsetzeroRelationWriter.flush(),FMatrixWithNPWsetzeroRelationWriter.close()
| {
"content_hash": "a8bb0a19826a83f3a44ac886bd8bd89c",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 119,
"avg_line_length": 48.68062827225131,
"alnum_prop": 0.5491503549150355,
"repo_name": "opinion-extraction-propagation/TASC-FeatureExtraction",
"id": "382d869e579b6babab9c141596899fd5ad595f8d",
"size": "9465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FeatureProcess/MergeToGetFM.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "60633"
}
],
"symlink_target": ""
} |
from distutils.core import setup
import distutils.cmd
import sys, os, subprocess, traceback, re
def main():
rv_filename = 'RELEASE-VERSION'
try:
version = get_version()
version_file = open(rv_filename, 'w')
version_file.write(version)
version_file.close() # ensure sdist build process sees new contents
except Exception:
try:
version_file = open(rv_filename, 'r')
version = version_file.read()
except IOError as e:
if e.errno == 2 and e.filename == rv_filename:
sys.stderr.write(
"""This does not appear to be a git repository, and the file %s is not
present. In order to build or install PySB, please either download a
distribution from http://pypi.python.org/pypi/pysb or clone the git repository
at https://github.com/pysb/pysb.git\n""" % rv_filename)
sys.exit(1)
setup(name='pysb',
version=version,
description='Python Systems Biology modeling framework',
long_description='PySB (pronounced "Pie Ess Bee") is a framework ' + \
'for building rule-based mathematical models of biochemical ' + \
'systems. It works nicely with scientific Python libraries ' + \
'such as NumPy, SciPy and SymPy for model simulation and ' + \
'analysis.',
author='Jeremy Muhlich',
author_email='jmuhlich@bitflood.org',
url='http://pysb.org/',
packages=['pysb', 'pysb.generator', 'pysb.tools', 'pysb.examples',
'pysb.export', 'pysb.testing'],
scripts=['scripts/pysb_export'],
requires=['numpy', 'scipy', 'sympy'],
keywords=['systems', 'biology', 'model', 'rules'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Mathematics',
],
)
class GitError(Exception):
pass
def get_version():
"""Get a nice version number from git-describe"""
# ensure that we are working in a pysb git repo
setup_path = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(setup_path, '.git')):
raise Exception("setup.py is not in the root of a git repository; "
"aborting")
os.chdir(setup_path)
# run git describe
gitcmd = ['git', 'describe', '--always', '--abbrev=4']
try:
gitproc = subprocess.Popen(gitcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retcode = gitproc.wait()
if retcode:
raise GitError(gitproc.stderr.read())
version = gitproc.stdout.next().strip()
version = re.sub(r'^v', '', version)
return version
except (OSError, GitError) as e:
raise Exception("Error running 'git describe' to determine version:\n\n" +
"command\n=====\n" + " ".join(gitcmd) + "\n\n" +
"error\n====\n" + str(e) + "\n")
if __name__ == '__main__':
main()
| {
"content_hash": "0f8271db04c7c2ddf0223b44e3d7ef05",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 90,
"avg_line_length": 40.48809523809524,
"alnum_prop": 0.5777712437518377,
"repo_name": "neurord/pysb",
"id": "32d389d2c720fe07c5d3f674166446226eae5a9c",
"size": "3424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "4582"
},
{
"name": "Puppet",
"bytes": "3461"
},
{
"name": "Python",
"bytes": "494227"
},
{
"name": "Ruby",
"bytes": "2124"
},
{
"name": "Shell",
"bytes": "4507"
}
],
"symlink_target": ""
} |
"""## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@div
@@truediv
@@floordiv
@@mod
@@cross
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
## Matrix Math Functions
TensorFlow provides several operations that you can use to add linear algebra
functions on matrices to your graph.
@@batch_matrix_diag
@@batch_matrix_diag_part
@@batch_matrix_band_part
@@diag
@@diag_part
@@trace
@@transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@batch_matrix_determinant
@@matrix_inverse
@@batch_matrix_inverse
@@cholesky
@@batch_cholesky
@@cholesky_solve
@@batch_cholesky_solve
@@self_adjoint_eig
@@batch_self_adjoint_eig
@@matrix_solve
@@batch_matrix_solve
@@matrix_triangular_solve
@@batch_matrix_triangular_solve
@@matrix_solve_ls
@@batch_matrix_solve_ls
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@batch_fft
@@batch_ifft
@@batch_fft2d
@@batch_ifft2d
@@batch_fft3d
@@batch_ifft3d
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@accumulate_n
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@listdiff
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.python.client import graph_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same size and type as `x` with absolute values.
"""
with ops.op_scope([x], name, "Abs") as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
def complex_abs(x, name=None):
r"""Computes the complex absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float` or `double` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\).
For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` of type `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(scalar, dtype=x.dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
y: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.op_scope([x], name, "Pow") as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.op_scope([real, imag], name, "Complex") as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("Types of real and imag don't match: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` or `double` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float` or `double`.
"""
with ops.op_scope([input], name, "Real") as name:
return gen_math_ops.real(input, Tout=input.dtype.real_dtype, name=name)
def imag(input, name=None):
"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` or `double` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float` or `double`.
"""
with ops.op_scope([input], name, "Imag") as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, -4.4]
tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float` or `double`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.floor(x + 0.5, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
with ops.op_scope([x], name, "Cast") as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, dtype, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == dtype:
return x
return gen_math_ops.cast(x, dtype, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.op_scope([value], name, "saturate_cast") as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value, ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value, ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.op_scope([x, y], None, op_name) as name:
if not isinstance(y, ops.SparseTensor):
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.op_scope([sp_x, y], None, op_name) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return ops.SparseTensor(sp_x.indices, func(sp_x.indices, sp_x.values,
sp_x.shape, y, name=name),
sp_x.shape)
def r_binary_op_wrapper(y, x):
with ops.op_scope([x, y], None, op_name) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.op_scope([sp_indices, sp_values, sp_shape, y],
name, "truediv") as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(sp_indices, sp_values,
sp_shape, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.op_scope([x, y], name, "truediv") as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.div(x, y, name=name)
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding down for floating point.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.op_scope([x, y], name, "floordiv") as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return gen_math_ops.floor(gen_math_ops.div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
return gen_math_ops.div(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, ops.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.shape, x, name)
return ops.SparseTensor(y.indices, new_vals, y.shape)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(gen_math_ops.div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops.mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, name="range"):
"""Creates a sequence of integers.
Creates a sequence of integers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D (scalar) of type `int32`. First entry in sequence.
Defaults to 0.
limit: A 0-D (scalar) of type `int32`. Upper limit of sequence,
exclusive.
delta: A 0-D `Tensor` (scalar) of type `int32`. Optional. Default is 1.
Number that increments `start`.
name: A name for the operation (optional).
Returns:
An 1-D `int32` `Tensor`.
"""
if limit is None:
start, limit = 0, start
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
start_value = tensor_util.constant_value(op.inputs[0])
limit_value = tensor_util.constant_value(op.inputs[1])
delta_value = tensor_util.constant_value(op.inputs[2])
if start_value is None or limit_value is None or delta_value is None:
return [tensor_shape.vector(None)]
else:
return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //
delta_value)]
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
# TODO(zongheng): remove this once rank() supports SparseTensor.
if isinstance(x, ops.SparseTensor):
return range(0, array_ops.size(x.shape))
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def trace(x, name=None):
""" Compute the trace of a tensor `x`.
`trace(x)` returns the sum of along the diagonal.
For example:
```python
# 'x' is [[1, 1],
# [1, 1]]
tf.trace(x) ==> 2
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
```
Args:
x: 2-D tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.op_scope([x], name, "Trace") as name:
x = ops.convert_to_tensor(x, name="x")
if len(x.get_shape()) != 2:
raise ValueError("Expected a tensor with rank 2, rank %d tensor received"
% len(x.get_shape()))
return reduce_sum(array_ops.diag_part(x), name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float`, `double`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float`, `double`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.matmul_shape)
ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
@ops.RegisterStatistics("MatMul", "weight_parameters")
def _calc_mat_mul_weight_parameters(graph, node):
"""Calculates the on-disk size of the weights for MatMul."""
# We assume here that the weights are always in the second input to the op,
# which is generally true by convention for fully-connected layers, but not
# enforced or checked.
weights_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
weights_shape.assert_is_fully_defined()
return ops.OpStats("weight_parameters",
(int(weights_shape[1]) * int(weights_shape[0])))
def _as_indexed_slices(x):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape(x)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == dtypes.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if tensor_dtype is None:
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
tensor_dtype = inputs[0].dtype
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if not shape.is_fully_defined():
# TODO(pbar): Make a version of assign_add that accepts an uninitialized
# lvalue, and takes its shape from that? This would allow accumulate_n to
# work in all situations that add_n currently works.
raise ValueError("Cannot infer the shape of the accumulator for "
"accumulate_n. Pass the shape argument, or set the shape "
"of at least one of the inputs.")
with ops.op_scope(inputs, name, "AccumulateN") as name:
if len(inputs) == 1:
return inputs[0]
var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
var_name = var.op.name
var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
update_ops = []
for input_tensor in inputs:
op = state_ops.assign_add(var, input_tensor, use_locking=True)
update_ops.append(op)
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(var,
var_name=var_name,
name=name)
@ops.RegisterShape("BatchMatMul")
def _BatchMatMulShape(op):
"""Shape function for BatchMatMul op."""
a_shape = op.inputs[0].get_shape()
adj_a = op.get_attr("adj_x")
b_shape = op.inputs[1].get_shape()
adj_b = op.get_attr("adj_y")
if a_shape.dims is None and b_shape.dims is None:
return [tensor_shape.unknown_shape()]
batch_dims = a_shape[:-2].merge_with(b_shape[:-2])
output_rows = a_shape[-1] if adj_a else a_shape[-2]
output_cols = b_shape[-2] if adj_b else b_shape[-1]
inner_a = a_shape[-2] if adj_a else a_shape[-1]
inner_b = b_shape[-1] if adj_b else b_shape[-2]
inner_a.assert_is_compatible_with(inner_b)
return [batch_dims.concatenate([output_rows, output_cols])]
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.op_scope([x], name, "Sigmoid") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
the return type is `quint8`.
"""
with ops.op_scope([x], name, "Tanh") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._tanh(x, name=name)
ops.RegisterShape("Abs")(common_shapes.unchanged_shape)
ops.RegisterShape("Ceil")(common_shapes.unchanged_shape)
ops.RegisterShape("Conj")(common_shapes.unchanged_shape)
ops.RegisterShape("Cos")(common_shapes.unchanged_shape)
ops.RegisterShape("Cross")(common_shapes.unchanged_shape)
ops.RegisterShape("Exp")(common_shapes.unchanged_shape)
ops.RegisterShape("Floor")(common_shapes.unchanged_shape)
ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape)
ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
ops.RegisterShape("Log")(common_shapes.unchanged_shape)
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
ops.RegisterShape("Real")(common_shapes.unchanged_shape)
ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Sign")(common_shapes.unchanged_shape)
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Lgamma")(common_shapes.unchanged_shape)
ops.RegisterShape("Digamma")(common_shapes.unchanged_shape)
ops.RegisterShape("Erf")(common_shapes.unchanged_shape)
ops.RegisterShape("Erfc")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
ops.RegisterShape("FFT")(common_shapes.unchanged_shape)
ops.RegisterShape("IFFT")(common_shapes.unchanged_shape)
ops.RegisterShape("FFT2D")(common_shapes.unchanged_shape)
ops.RegisterShape("IFFT2D")(common_shapes.unchanged_shape)
ops.RegisterShape("FFT3D")(common_shapes.unchanged_shape)
ops.RegisterShape("IFFT3D")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchFFT")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchIFFT")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchFFT2D")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchIFFT2D")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchFFT3D")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchIFFT3D")(common_shapes.unchanged_shape)
@ops.RegisterShape("Add")
@ops.RegisterShape("Complex")
@ops.RegisterShape("Div")
@ops.RegisterShape("Equal")
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Igamma")
@ops.RegisterShape("Igammac")
@ops.RegisterShape("Zeta")
@ops.RegisterShape("Polygamma")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
@ops.RegisterShape("Maximum")
@ops.RegisterShape("Minimum")
@ops.RegisterShape("Mod")
@ops.RegisterShape("Mul")
@ops.RegisterShape("NotEqual")
@ops.RegisterShape("Pow")
@ops.RegisterShape("Sub")
@ops.RegisterShape("SquaredDifference")
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
shape_x = op.inputs[0].get_shape()
shape_y = op.inputs[1].get_shape()
if shape_x.ndims is None or shape_y.ndims is None:
return [tensor_shape.unknown_shape()]
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return [tensor_shape.TensorShape(return_dims)]
@ops.RegisterShape("SparseDenseCwiseMul")
@ops.RegisterShape("SparseDenseCwiseDiv")
@ops.RegisterShape("SparseDenseCwiseAdd")
def _SparseDenseBinaryOpShape(op): # pylint: disable=invalid-name
"""Common shape for 'sparse <binary cwise op> dense -> sparse' operators."""
nnz = op.inputs[1].get_shape()[0]
return [tensor_shape.TensorShape(nnz)]
@ops.RegisterShape("AddN")
def _AddNShape(op):
merged_shape = tensor_shape.unknown_shape()
for input_ in op.inputs:
merged_shape = merged_shape.merge_with(input_.get_shape())
return [merged_shape]
@ops.RegisterShape("Select")
def _SelectShape(op):
"""Shape function for SelectOp."""
# The inputs 'then' and 'else' must have the same shape.
# The input 'cond' must either have the same shape as 'then' and
# 'else', or be a vector if 'then' and 'else' are at least vectors.
c_shape = op.inputs[0].get_shape()
t_shape = op.inputs[1].get_shape()
e_shape = op.inputs[2].get_shape()
t_e_shape = t_shape.merge_with(e_shape)
c_shape_list = c_shape.as_list() if c_shape.ndims is not None else None
t_e_shape_list = t_e_shape.as_list() if t_e_shape.ndims is not None else None
if c_shape_list is not None and t_e_shape_list is not None:
if len(c_shape_list) != 1:
# If the rank of 'cond' is != 1, the shape must match 'then' and 'else'
t_e_shape = t_e_shape.merge_with(c_shape)
if t_e_shape_list:
# If then and else are not scalars, then cond must be at least
# a vector, and its first value must match that of 'else'
c_shape = c_shape.with_rank_at_least(1)
if len(c_shape.as_list()) == 1:
c_shape.merge_with(tensor_shape.vector(t_e_shape_list[0]))
return [t_e_shape]
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
"""Common shape function for arg-reduction ops."""
dimension_shape = op.inputs[1].get_shape()
dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
elif input_shape.ndims <= 1:
return [tensor_shape.scalar()]
dimension = tensor_util.constant_value(op.inputs[1])
if dimension is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
elif 0 <= dimension and dimension < input_shape.ndims:
returned_shape = []
for i, dim in enumerate(input_shape.dims):
if i != dimension:
returned_shape.append(dim)
return [tensor_shape.TensorShape(returned_shape)]
else:
raise ValueError(
"dimension (%d) must be in the range [0, %d), where %d is the number "
"of dimensions in the input"
% (dimension, input_shape.ndims, input_shape.ndims))
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
"""Common shape function for reduction ops."""
input_shape = op.inputs[0].get_shape()
reduction_indices = tensor_util.constant_value(op.inputs[1])
keep_dims = op.get_attr("keep_dims")
if reduction_indices is None or input_shape.ndims is None:
if keep_dims:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
return [tensor_shape.unknown_shape()]
# Turn reduction_indices from scalar to vector if necessary
reduction_indices = np.ravel(reduction_indices)
for reduction_index in reduction_indices:
if (reduction_index < -input_shape.ndims or
reduction_index >= input_shape.ndims):
raise ValueError("Invalid reduction dimension %d for input with %d "
"dimensions" % (reduction_index, input_shape.ndims))
reduction_indices = set([(x + input_shape.ndims) % input_shape.ndims
for x in reduction_indices])
returned_dims = []
if keep_dims:
for i, dim in enumerate(input_shape.dims):
if i in reduction_indices:
returned_dims.append(1)
else:
returned_dims.append(dim)
else:
for i, dim in enumerate(input_shape.dims):
if i not in reduction_indices:
returned_dims.append(dim)
return [tensor_shape.TensorShape(returned_dims)]
@ops.RegisterShape("SegmentMax")
@ops.RegisterShape("SegmentMean")
@ops.RegisterShape("SegmentMin")
@ops.RegisterShape("SegmentProd")
@ops.RegisterShape("SegmentSum")
def _SegmentReductionShape(op):
"""Common shape function for segment reduction ops."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
segment_ids_shape.assert_has_rank(1)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMean")
@ops.RegisterShape("SparseSegmentSqrtN")
@ops.RegisterShape("SparseSegmentSum")
def _SparseSegmentReductionShape(op):
"""Common shape function for sparse segment reduction ops."""
data_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
indices_shape.assert_has_rank(1)
segment_ids_shape = op.inputs[2].get_shape()
segment_ids_shape.assert_has_rank(1)
indices_shape.assert_is_compatible_with(segment_ids_shape)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMeanGrad")
@ops.RegisterShape("SparseSegmentSqrtNGrad")
# pylint: disable=invalid-name
def _SparseSegmentReductionGradShape(op):
"""Shape function for the SparseSegment[Mean|SqrtN]Grad ops."""
input_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank(1)
unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)
unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.scalar())
dim0 = tensor_util.constant_value(op.inputs[3])
return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]
# pylint: enable=invalid-name
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
"""Shape function for UnsortedSegmentSum."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
mid = segment_ids_shape.ndims
if mid is None:
return [tensor_shape.unknown_shape()]
else:
num_segments = tensor_util.constant_value(op.inputs[2])
return [tensor_shape.TensorShape([num_segments]).concatenate(
data_shape[mid:])]
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
num = tensor_util.constant_value(op.inputs[2])
return [tensor_shape.vector(num)]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[range(input_rank), # [0, 1, 2, 3]
axes], # [1, 2]
[input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)]) # [1, 1]
| {
"content_hash": "15980d1c9a1df58d4c0a6b244e768590",
"timestamp": "",
"source": "github",
"line_count": 1718,
"max_line_length": 86,
"avg_line_length": 33.5430733410943,
"alnum_prop": 0.6589445919447481,
"repo_name": "ninotoshi/tensorflow",
"id": "9312a84f3eb463c065c198e0f9670b752f05830d",
"size": "58305",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/math_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151630"
},
{
"name": "C++",
"bytes": "6579490"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "657597"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16175"
},
{
"name": "Jupyter Notebook",
"bytes": "777942"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "66333"
},
{
"name": "Python",
"bytes": "3809695"
},
{
"name": "Shell",
"bytes": "66697"
},
{
"name": "TypeScript",
"bytes": "329009"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "teach.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "b0a2b11198cdfed46272c5f700ef5073",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 69,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.7079646017699115,
"repo_name": "selenamarie/teach-today",
"id": "6088206c52d4b0ebbe25766f19d9d4e458a2e297",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "23705"
}
],
"symlink_target": ""
} |
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import iengage_client
from iengage_client.rest import ApiException
from iengage_client.models.request_for_me import RequestForMe
class TestRequestForMe(unittest.TestCase):
""" RequestForMe unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testRequestForMe(self):
"""
Test RequestForMe
"""
model = iengage_client.models.request_for_me.RequestForMe()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "dcbe214dbfcb274615e2c63e6a1adf62",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 186,
"avg_line_length": 22.5,
"alnum_prop": 0.6988888888888889,
"repo_name": "iEngage/python-sdk",
"id": "eb9e3a140a6e504c8a621761eb4b852c6a9c1ebf",
"size": "917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_request_for_me.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2373684"
},
{
"name": "Shell",
"bytes": "1664"
}
],
"symlink_target": ""
} |
import pytest
from tc_common import BaseTest
class TestMySQLBackUp(BaseTest):
@pytest.mark.vcr
def test_mysql_backup_create_time(self):
policy = self.load_policy(
{
"name": "test_mysql_backup_create_time",
"resource": "tencentcloud.mysql-backup",
"filters": [
{
"type": "value",
"key": "Date",
"value": 0,
"value_type": "age",
"op": "greater-than"
}
]
}
)
resources = policy.run()
assert len(resources) > 0
| {
"content_hash": "c62d665d44d519dbeb5125438f6769bd",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 56,
"avg_line_length": 27.92,
"alnum_prop": 0.4040114613180516,
"repo_name": "thisisshi/cloud-custodian",
"id": "0768787cb8507863182f862ccb52b9c74dca6aca",
"size": "777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/c7n_tencentcloud/tests/test_tc_mysql_backup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2126"
},
{
"name": "Go",
"bytes": "146637"
},
{
"name": "HCL",
"bytes": "62085"
},
{
"name": "Jinja",
"bytes": "19775"
},
{
"name": "Makefile",
"bytes": "14242"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "6684814"
},
{
"name": "Shell",
"bytes": "15323"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import signal
import subprocess
import sys
from six import print_
from ccmlib import common, repository
from ccmlib.cluster import Cluster
from ccmlib.cluster_factory import ClusterFactory
from ccmlib.cmds.command import Cmd
from ccmlib.common import ArgumentError, get_default_signals
from ccmlib.dse_cluster import DseCluster
from ccmlib.dse_node import DseNode
from ccmlib.node import Node, NodeError
CLUSTER_CMDS = [
"create",
"add",
"populate",
"list",
"switch",
"status",
"remove",
"clear",
"liveset",
"start",
"stop",
"flush",
"compact",
"stress",
"updateconf",
"updatedseconf",
"updatelog4j",
"cli",
"setdir",
"bulkload",
"setlog",
"scrub",
"verify",
"invalidatecache",
"checklogerror",
"showlastlog",
"jconsole",
"setworkload"
]
def commands():
return CLUSTER_CMDS
def parse_populate_count(v):
if v is None:
return None
tmp = v.split(':')
if len(tmp) == 1:
return int(tmp[0])
else:
return [int(t) for t in tmp]
class ClusterCreateCmd(Cmd):
options_list = [
(['--no-switch'], {'action': "store_true", 'dest': "no_switch", 'help': "Don't switch to the newly created cluster", 'default': False}),
(['-p', '--partitioner'], {'type': "string", 'dest': "partitioner", 'help': "Set the cluster partitioner class"}),
(['-v', "--version"], {'type': "string", 'dest': "version", 'help': "Download and use provided cassandra or dse version. If version is of the form 'git:<branch name>', then the specified cassandra branch will be downloaded from the git repo and compiled. (takes precedence over --install-dir)", 'default': None}),
(['-o', "--opsc"], {'type': "string", 'dest': "opscenter", 'help': "Download and use provided opscenter version to install with DSE. Will have no effect on cassandra installs)", 'default': None}),
(["--dse"], {'action': "store_true", 'dest': "dse", 'help': "Use with -v to indicate that the version being loaded is DSE"}),
(["--dse-username"], {'type': "string", 'dest': "dse_username", 'help': "The username to use to download DSE with", 'default': None}),
(["--dse-password"], {'type': "string", 'dest': "dse_password", 'help': "The password to use to download DSE with", 'default': None}),
(["--dse-credentials"], {'type': "string", 'dest': "dse_credentials_file", 'help': "An ini-style config file containing the dse_username and dse_password under a dse_credentials section. [default to {}/.dse.ini if it exists]".format(common.get_default_path_display_name()), 'default': None}),
(["--install-dir"], {'type': "string", 'dest': "install_dir", 'help': "Path to the cassandra or dse directory to use [default %default]", 'default': "./"}),
(['-n', '--nodes'], {'type': "string", 'dest': "nodes", 'help': "Populate the new cluster with that number of nodes (a single int or a colon-separate list of ints for multi-dc setups)"}),
(['-i', '--ipprefix'], {'type': "string", 'dest': "ipprefix", 'help': "Ipprefix to use to create the ip of a node while populating"}),
(['-I', '--ip-format'], {'type': "string", 'dest': "ipformat", 'help': "Format to use when creating the ip of a node (supports enumerating ipv6-type addresses like fe80::%d%lo0)"}),
(['-s', "--start"], {'action': "store_true", 'dest': "start_nodes", 'help': "Start nodes added through -s", 'default': False}),
(['-d', "--debug"], {'action': "store_true", 'dest': "debug", 'help': "If -s is used, show the standard output when starting the nodes", 'default': False}),
(['-b', "--binary-protocol"], {'action': "store_true", 'dest': "binary_protocol", 'help': "Enable the binary protocol (starting from C* 1.2.5 the binary protocol is started by default and this option is a no-op)", 'default': False}),
(['-D', "--debug-log"], {'action': "store_true", 'dest': "debug_log", 'help': "With -n, sets debug logging on the new nodes", 'default': False}),
(['-T', "--trace-log"], {'action': "store_true", 'dest': "trace_log", 'help': "With -n, sets trace logging on the new nodes", 'default': False}),
(["--vnodes"], {'action': "store_true", 'dest': "vnodes", 'help': "Use vnodes (256 tokens). Must be paired with -n.", 'default': False}),
(['--jvm_arg'], {'action': "append", 'dest': "jvm_args", 'help': "Specify a JVM argument", 'default': []}),
(['--profile'], {'action': "store_true", 'dest': "profile", 'help': "Start the nodes with yourkit agent (only valid with -s)", 'default': False}),
(['--profile-opts'], {'type': "string", 'action': "store", 'dest': "profile_options", 'help': "Yourkit options when profiling", 'default': None}),
(['--ssl'], {'type': "string", 'dest': "ssl_path", 'help': "Path to keystore.jks and cassandra.crt files (and truststore.jks [not required])", 'default': None}),
(['--require_client_auth'], {'action': "store_true", 'dest': "require_client_auth", 'help': "Enable client authentication (only vaid with --ssl)", 'default': False}),
(['--node-ssl'], {'type': "string", 'dest': "node_ssl_path", 'help': "Path to keystore.jks and truststore.jks for internode encryption", 'default': None}),
(['--pwd-auth'], {'action': "store_true", 'dest': "node_pwd_auth", 'help': "Change authenticator to PasswordAuthenticator (default credentials)", 'default': False}),
(['--byteman'], {'action': "store_true", 'dest': "install_byteman", 'help': "Start nodes with byteman agent running", 'default': False}),
(['--root'], {'action': "store_true", 'dest': "allow_root", 'help': "Allow CCM to start cassandra as root", 'default': False}),
(['--datadirs'], {'type': "int", 'dest': "datadirs", 'help': "Number of data directories to use", 'default': 1}),
]
descr_text = "Create a new cluster"
usage = "usage: ccm create [options] cluster_name"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, cluster_name=True)
if options.ipprefix and options.ipformat:
parser.print_help()
parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
self.nodes = parse_populate_count(options.nodes)
if self.options.vnodes and self.nodes is None:
print_("Can't set --vnodes if not populating cluster in this command.")
parser.print_help()
exit(1)
if not options.version:
try:
common.validate_install_dir(options.install_dir)
except ArgumentError:
parser.print_help()
parser.error("%s is not a valid cassandra directory. You must define a cassandra dir or version." % options.install_dir)
if common.get_dse_version(options.install_dir) is not None:
common.assert_jdk_valid_for_cassandra_version(common.get_dse_cassandra_version(options.install_dir))
else:
common.assert_jdk_valid_for_cassandra_version(common.get_version_from_build(options.install_dir))
if common.is_win() and os.path.exists('c:\windows\system32\java.exe'):
print_("""WARN: c:\windows\system32\java.exe exists.
This may cause registry issues, and jre7 to be used, despite jdk8 being installed.
""")
def run(self):
try:
if self.options.dse or (not self.options.version and common.isDse(self.options.install_dir)):
cluster = DseCluster(self.path, self.name, install_dir=self.options.install_dir, version=self.options.version, dse_username=self.options.dse_username, dse_password=self.options.dse_password, dse_credentials_file=self.options.dse_credentials_file, opscenter=self.options.opscenter, verbose=True)
else:
cluster = Cluster(self.path, self.name, install_dir=self.options.install_dir, version=self.options.version, verbose=True)
except OSError as e:
import traceback
print_('Cannot create cluster: %s\n%s' % (str(e), traceback.format_exc()), file=sys.stderr)
exit(1)
if self.options.partitioner:
cluster.set_partitioner(self.options.partitioner)
if cluster.cassandra_version() >= "1.2.5":
self.options.binary_protocol = True
if self.options.binary_protocol:
cluster.set_configuration_options({'start_native_transport': True})
if cluster.cassandra_version() >= "1.2" and self.options.vnodes:
cluster.set_configuration_options({'num_tokens': 256})
if not self.options.no_switch:
common.switch_cluster(self.path, self.name)
print_('Current cluster is now: %s' % self.name)
if not (self.options.ipprefix or self.options.ipformat):
self.options.ipformat = '127.0.0.%d'
if self.options.ssl_path:
cluster.enable_ssl(self.options.ssl_path, self.options.require_client_auth)
if self.options.node_ssl_path:
cluster.enable_internode_ssl(self.options.node_ssl_path)
if self.options.node_pwd_auth:
cluster.enable_pwd_auth()
if self.options.datadirs:
cluster.set_datadir_count(self.options.datadirs)
if self.nodes is not None:
try:
if self.options.debug_log:
cluster.set_log_level("DEBUG")
if self.options.trace_log:
cluster.set_log_level("TRACE")
cluster.populate(self.nodes, self.options.debug, use_vnodes=self.options.vnodes, ipprefix=self.options.ipprefix, ipformat=self.options.ipformat, install_byteman=self.options.install_byteman)
if self.options.start_nodes:
profile_options = None
if self.options.profile:
profile_options = {}
if self.options.profile_options:
profile_options['options'] = self.options.profile_options
if cluster.start(verbose=self.options.debug, wait_for_binary_proto=self.options.binary_protocol, jvm_args=self.options.jvm_args, profile_options=profile_options, allow_root=self.options.allow_root) is None:
details = ""
if not self.options.debug_log:
details = " (you can use --debug for more information)"
print_("Error starting nodes, see above for details%s" % details, file=sys.stderr)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterAddCmd(Cmd):
options_list = [
(['-b', '--auto-bootstrap'], {'action': "store_true", 'dest': "bootstrap", 'help': "Set auto bootstrap for the node", 'default': False}),
(['-s', '--seeds'], {'action': "store_true", 'dest': "is_seed", 'help': "Configure this node as a seed", 'default': False}),
(['-i', '--itf'], {'type': "string", 'dest': "itfs", 'help': "Set host and port for thrift, the binary protocol and storage (format: host[:port])"}),
(['-t', '--thrift-itf'], {'type': "string", 'dest': "thrift_itf", 'help': "Set the thrift host and port for the node (format: host[:port])"}),
(['-l', '--storage-itf'], {'type': "string", 'dest': "storage_itf", 'help': "Set the storage (cassandra internal) host and port for the node (format: host[:port])"}),
(['--binary-itf'], {'type': "string", 'dest': "binary_itf", 'help': "Set the binary protocol host and port for the node (format: host[:port])."}),
(['-j', '--jmx-port'], {'type': "string", 'dest': "jmx_port", 'help': "JMX port for the node", 'default': "7199"}),
(['-r', '--remote-debug-port'], {'type': "string", 'dest': "remote_debug_port", 'help': "Remote Debugging Port for the node", 'default': "2000"}),
(['-n', '--token'], {'type': "string", 'dest': "initial_token", 'help': "Initial token for the node", 'default': None}),
(['-d', '--data-center'], {'type': "string", 'dest': "data_center", 'help': "Datacenter name this node is part of", 'default': None}),
(['--dse'], {'action': "store_true", 'dest': "dse_node", 'help': "Add node to DSE Cluster", 'default': False}),
]
descr_text = "Add a new node to the current cluster"
usage = "usage: ccm add [options] node_name"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True, load_node=False)
if options.itfs is None and (options.thrift_itf is None or options.storage_itf is None or options.binary_itf is None):
print_('Missing thrift and/or storage and/or binary protocol interfaces or jmx port', file=sys.stderr)
parser.print_help()
exit(1)
if self.name in self.cluster.nodes:
print_("This name is already in use. Choose another.", file=sys.stderr)
parser.print_help()
exit(1)
used_jmx_ports = [node.jmx_port for node in self.cluster.nodelist()]
if options.jmx_port in used_jmx_ports:
print_("This JMX port is already in use. Choose another.", file=sys.stderr)
parser.print_help()
exit(1)
if options.thrift_itf is None:
options.thrift_itf = options.itfs
if options.storage_itf is None:
options.storage_itf = options.itfs
if options.binary_itf is None:
options.binary_itf = options.itfs
self.thrift = common.parse_interface(options.thrift_itf, 9160)
self.storage = common.parse_interface(options.storage_itf, 7000)
self.binary = common.parse_interface(options.binary_itf, 9042)
if self.binary[0] != self.thrift[0]:
print_('Cannot set a binary address different from the thrift one', file=sys.stderr)
exit(1)
self.jmx_port = options.jmx_port
self.remote_debug_port = options.remote_debug_port
self.initial_token = options.initial_token
def run(self):
try:
if self.options.dse_node:
node = DseNode(self.name, self.cluster, self.options.bootstrap, self.thrift, self.storage, self.jmx_port, self.remote_debug_port, self.initial_token, binary_interface=self.binary)
else:
node = Node(self.name, self.cluster, self.options.bootstrap, self.thrift, self.storage, self.jmx_port, self.remote_debug_port, self.initial_token, binary_interface=self.binary)
self.cluster.add(node, self.options.is_seed, self.options.data_center)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterPopulateCmd(Cmd):
options_list = [
(['-n', '--nodes'], {'type': "string", 'dest': "nodes", 'help': "Number of nodes to populate with (a single int or a colon-separate list of ints for multi-dc setups)"}),
(['-d', '--debug'], {'action': "store_true", 'dest': "debug", 'help': "Enable remote debugging options", 'default': False}),
(['--vnodes'], {'action': "store_true", 'dest': "vnodes", 'help': "Populate using vnodes", 'default': False}),
(['-i', '--ipprefix'], {'type': "string", 'dest': "ipprefix", 'help': "Ipprefix to use to create the ip of a node"}),
(['-I', '--ip-format'], {'type': "string", 'dest': "ipformat", 'help': "Format to use when creating the ip of a node (supports enumerating ipv6-type addresses like fe80::%d%lo0)"}),
]
descr_text = "Add a group of new nodes with default options"
usage = "usage: ccm populate -n <node count> {-d}"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
if options.ipprefix and options.ipformat:
parser.print_help()
parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
self.nodes = parse_populate_count(options.nodes)
if self.nodes is None:
parser.print_help()
parser.error("Not a valid number of nodes. Did you use -n?")
exit(1)
def run(self):
try:
if self.cluster.cassandra_version() >= "1.2" and self.options.vnodes:
self.cluster.set_configuration_options({'num_tokens': 256})
if not (self.options.ipprefix or self.options.ipformat):
self.options.ipformat = '127.0.0.%d'
self.cluster.populate(self.nodes, self.options.debug, use_vnodes=self.options.vnodes, ipprefix=self.options.ipprefix, ipformat=self.options.ipformat)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterListCmd(Cmd):
descr_text = "List existing clusters"
usage = "usage: ccm list [options]"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
try:
current = common.current_cluster_name(self.path)
except Exception:
current = ''
for dir in os.listdir(self.path):
if os.path.exists(os.path.join(self.path, dir, 'cluster.conf')):
print_(" %s%s" % ('*' if current == dir else ' ', dir))
class ClusterSwitchCmd(Cmd):
descr_text = "Switch of current (active) cluster"
usage = "usage: ccm switch [options] cluster_name"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, cluster_name=True)
if not os.path.exists(os.path.join(self.path, self.name, 'cluster.conf')):
print_("%s does not appear to be a valid cluster (use ccm list to view valid clusters)" % self.name, file=sys.stderr)
exit(1)
def run(self):
common.switch_cluster(self.path, self.name)
class ClusterStatusCmd(Cmd):
options_list = [
(['-v', '--verbose'], {'action': "store_true", 'dest': "verbose", 'help': "Print full information on all nodes", 'default': False}),
]
descr_text = "Display status on the current cluster"
usage = "usage: ccm status [options]"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.show(self.options.verbose)
class ClusterRemoveCmd(Cmd):
descr_text = "Remove the current or specified cluster (delete all data)"
usage = "usage: ccm remove [options] [cluster_name]"
def validate(self, parser, options, args):
self.other_cluster = None
if len(args) > 0:
# Setup to remove the specified cluster:
Cmd.validate(self, parser, options, args)
self.other_cluster = args[0]
if not os.path.exists(os.path.join(
self.path, self.other_cluster, 'cluster.conf')):
print_("%s does not appear to be a valid cluster"
" (use ccm list to view valid clusters)"
% self.other_cluster, file=sys.stderr)
exit(1)
else:
# Setup to remove the current cluster:
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
if self.other_cluster:
# Remove the specified cluster:
cluster = ClusterFactory.load(self.path, self.other_cluster)
cluster.remove()
# Remove CURRENT flag if the specified cluster is the current cluster:
if self.other_cluster == common.current_cluster_name(self.path):
os.remove(os.path.join(self.path, 'CURRENT'))
else:
# Remove the current cluster:
self.cluster.remove()
os.remove(os.path.join(self.path, 'CURRENT'))
class ClusterClearCmd(Cmd):
descr_text = "Clear the current cluster data (and stop all nodes)"
usage = "usage: ccm clear [options]"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.clear()
class ClusterLivesetCmd(Cmd):
descr_text = "Print a comma-separated list of addresses of running nodes (helpful in scripts)"
usage = "usage: ccm liveset [options]"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
l = [node.network_interfaces['storage'][0] for node in list(self.cluster.nodes.values()) if node.is_live()]
print_(",".join(l))
class ClusterSetdirCmd(Cmd):
options_list = [
(['-v', "--version"], {'type': "string", 'dest': "version", 'help': "Download and use provided cassandra or dse version. If version is of the form 'git:<branch name>', then the specified cassandra branch will be downloaded from the git repo and compiled. (takes precedence over --install-dir)", 'default': None}),
(["--install-dir"], {'type': "string", 'dest': "install_dir", 'help': "Path to the cassandra or dse directory to use [default %default]", 'default': "./"}),
(['-n', '--node'], {'type': "string", 'dest': "node", 'help': "Set directory only for the specified node"}),
]
descr_text = "Set the install directory (cassandra or dse) to use"
usage = "usage: ccm setdir [options]"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
target = self.cluster
if self.options.node:
target = self.cluster.nodes.get(self.options.node)
if not target:
print_("Node not found: %s" % self.options.node)
return
target.set_install_dir(install_dir=self.options.install_dir, version=self.options.version, verbose=True)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterClearrepoCmd(Cmd):
descr_text = "Cleanup downloaded cassandra sources"
usage = "usage: ccm clearrepo [options]"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
repository.clean_all()
class ClusterStartCmd(Cmd):
options_list = [
(['-v', '--verbose'], {'action': "store_true", 'dest': "verbose", 'help': "Print standard output of cassandra process", 'default': False}),
(['--no-wait'], {'action': "store_true", 'dest': "no_wait", 'help': "Do not wait for cassandra node to be ready. Overrides all other wait options.", 'default': False}),
# This option (wait-other-notice) is now deprecated, as it was never respected
(['--wait-other-notice'], {'action': "store_true", 'dest': "deprecate", 'help': "DEPRECATED/IGNORED: Use '--skip-wait-other-notice' instead. This is now on by default.", 'default': False}),
(['--skip-wait-other-notice'], {'action': "store_false", 'dest': "wait_other_notice", 'help': "Skip waiting until all live nodes of the cluster have marked the other nodes UP", 'default': True}),
(['--wait-for-binary-proto'], {'action': "store_true", 'dest': "wait_for_binary_proto", 'help': "Wait for the binary protocol to start", 'default': False}),
(['--jvm_arg'], {'action': "append", 'dest': "jvm_args", 'help': "Specify a JVM argument", 'default': []}),
(['--profile'], {'action': "store_true", 'dest': "profile", 'help': "Start the nodes with yourkit agent (only valid with -s)", 'default': False}),
(['--profile-opts'], {'type': "string", 'action': "store", 'dest': "profile_options", 'help': "Yourkit options when profiling", 'default': None}),
(['--quiet-windows'], {'action': "store_true", 'dest': "quiet_start", 'help': "Pass -q on Windows 2.2.4+ and 3.0+ startup. Ignored on linux.", 'default': False}),
(['--root'], {'action': "store_true", 'dest': "allow_root", 'help': "Allow CCM to start cassandra as root", 'default': False}),
]
descr_text = "Start all the non started nodes of the current cluster"
usage = "usage: ccm cluster start [options]"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
if self.options.deprecate:
print_("WARN: --wait-other-notice is deprecated. Please see the help text.")
if self.options.no_wait and (self.options.wait_for_binary_proto or self.options.deprecate):
print_("ERROR: --no-wait was specified alongside one or more wait options. This is invalid.")
exit(1)
def run(self):
try:
profile_options = None
if self.options.profile:
profile_options = {}
if self.options.profile_options:
profile_options['options'] = self.options.profile_options
if len(self.cluster.nodes) == 0:
print_("No node in this cluster yet. Use the populate command before starting.")
exit(1)
if self.cluster.start(no_wait=self.options.no_wait,
wait_other_notice=self.options.wait_other_notice,
wait_for_binary_proto=self.options.wait_for_binary_proto,
verbose=self.options.verbose,
jvm_args=self.options.jvm_args,
profile_options=profile_options,
quiet_start=self.options.quiet_start,
allow_root=self.options.allow_root) is None:
details = ""
if not self.options.verbose:
details = " (you can use --verbose for more information)"
print_("Error starting nodes, see above for details%s" % details, file=sys.stderr)
exit(1)
except NodeError as e:
print_(str(e), file=sys.stderr)
print_("Standard error output is:", file=sys.stderr)
for line in e.process.stderr:
print_(line.rstrip('\n'), file=sys.stderr)
exit(1)
class ClusterStopCmd(Cmd):
options_list = [
(['-v', '--verbose'], {'action': "store_true", 'dest': "verbose", 'help': "Print nodes that were not running", 'default': False}),
(['--no-wait'], {'action': "store_true", 'dest': "no_wait", 'help': "Do not wait for the node to be stopped", 'default': False}),
(['-g', '--gently'], {'action': "store_const", 'dest': "signal_event", 'help': "Shut down gently (default)", 'const': signal.SIGTERM, 'default': signal.SIGTERM}),
(['--hang-up'], {'action': "store_const", 'dest': "signal_event", 'help': "Shut down via hang up (kill -1)", 'const': get_default_signals()['1']}),
(['--not-gently'], {'action': "store_const", 'dest': "signal_event", 'help': "Shut down immediately (kill -9)", 'const': get_default_signals()['9']}),
]
descr_text = "Stop all the nodes of the cluster"
usage = "usage: ccm cluster stop [options] name"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
not_running = self.cluster.stop(wait=not self.options.no_wait, signal_event=self.options.signal_event)
if self.options.verbose and len(not_running) > 0:
sys.stdout.write("The following nodes were not running: ")
for node in not_running:
sys.stdout.write(node.name + " ")
print_("")
except NodeError as e:
print_(str(e), file=sys.stderr)
exit(1)
class _ClusterNodetoolCmd(Cmd):
usage = "This is a private class, how did you get here?"
descr_text = "This is a private class, how did you get here?"
nodetool_cmd = ''
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.nodetool(self.nodetool_cmd)
class ClusterFlushCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster flush [options] name"
nodetool_cmd = 'flush'
descr_text = "Flush all (running) nodes of the cluster"
class ClusterCompactCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster compact [options] name"
nodetool_cmd = 'compact'
descr_text = "Compact all (running) node of the cluster"
class ClusterDrainCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster drain [options] name"
nodetool_cmd = 'drain'
descr_text = "Drain all (running) node of the cluster"
class ClusterStressCmd(Cmd):
descr_text = "Run stress using all live nodes"
usage = "usage: ccm stress [options] [stress_options]"
ignore_unknown_options = True
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.stress_options = args + parser.get_ignored()
def run(self):
try:
rc = self.cluster.stress(self.stress_options)
exit(rc)
except Exception as e:
print_(e, file=sys.stderr)
exit(1)
class ClusterUpdateconfCmd(Cmd):
options_list = [
(['--no-hh', '--no-hinted-handoff'], {'action': "store_false", 'dest': "hinted_handoff", 'default': True, 'help': "Disable hinted handoff"}),
(['--batch-cl', '--batch-commit-log'], {'action': "store_true", 'dest': "cl_batch", 'default': None, 'help': "Set commit log to batch mode"}),
(['--periodic-cl', '--periodic-commit-log'], {'action': "store_true", 'dest': "cl_periodic", 'default': None, 'help': "Set commit log to periodic mode"}),
(['--rt', '--rpc-timeout'], {'action': "store", 'type': 'int', 'dest': "rpc_timeout", 'help': "Set rpc timeout"}),
(['-y', '--yaml'], {'action': "store_true", 'dest': "literal_yaml", 'default': False, 'help': "If enabled, treat argument as yaml, not kv pairs. Option syntax looks like ccm updateconf -y 'a: [b: [c,d]]'"}),
]
descr_text = "Update the cassandra config files for all nodes"
usage = "usage: ccm updateconf [options] [ new_setting | ... ], where new_setting should be a string of the form 'compaction_throughput_mb_per_sec: 32'; nested options can be separated with a period like 'client_encryption_options.enabled: false'"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.setting = common.parse_settings(args, literal_yaml=self.options.literal_yaml)
if self.options.cl_batch and self.options.cl_periodic:
print_("Can't set commitlog to be both batch and periodic.{}".format(os.linesep))
parser.print_help()
exit(1)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
self.setting['hinted_handoff_enabled'] = self.options.hinted_handoff
if self.options.rpc_timeout is not None:
if self.cluster.cassandra_version() < "1.2":
self.setting['rpc_timeout_in_ms'] = self.options.rpc_timeout
else:
self.setting['read_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['range_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['write_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['truncate_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['request_timeout_in_ms'] = self.options.rpc_timeout
self.cluster.set_configuration_options(values=self.setting)
if self.options.cl_batch:
self.cluster.set_batch_commitlog(True)
if self.options.cl_periodic:
self.cluster.set_batch_commitlog(False)
class ClusterUpdatedseconfCmd(Cmd):
options_list = [
(['-y', '--yaml'], {'action': "store_true", 'dest': "literal_yaml", 'default': False, 'help': "Pass in literal yaml string. Option syntax looks like ccm updatedseconf -y 'a: [b: [c,d]]'"}),
]
descr_text = "Update the dse config files for all nodes"
usage = "usage: ccm updatedseconf [options] [ new_setting | ... ], where new_setting should be a string of the form 'max_solr_concurrency_per_core: 2'; nested options can be separated with a period like 'cql_slow_log_options.enabled: true'"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.setting = common.parse_settings(args, literal_yaml=self.options.literal_yaml)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
self.cluster.set_dse_configuration_options(values=self.setting)
#
# Class implements the functionality of updating log4j-server.properties
# on ALL nodes by copying the given config into
# ~/.ccm/name-of-cluster/nodeX/conf/log4j-server.properties
#
class ClusterUpdatelog4jCmd(Cmd):
options_list = [
(['-p', '--path'], {'type': "string", 'dest': "log4jpath", 'help': "Path to new Cassandra log4j configuration file"}),
]
descr_text = "Update the Cassandra log4j-server.properties configuration file on all nodes"
usage = "usage: ccm updatelog4j -p <log4j config>"
ignore_unknown_options = True
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.log4jpath = options.log4jpath
if self.log4jpath is None:
raise KeyError("[Errno] -p or --path <path of new log4j congiguration file> is not provided")
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
except KeyError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
try:
self.cluster.update_log4j(self.log4jpath)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterCliCmd(Cmd):
options_list = [
(['-x', '--exec'], {'type': "string", 'dest': "cmds", 'default': None, 'help': "Execute the specified commands and exit"}),
(['-v', '--verbose'], {'action': "store_true", 'dest': "verbose", 'help': "With --exec, show cli output after completion", 'default': False}),
]
descr_text = "Launch cassandra cli connected to some live node (if any)"
usage = "usage: ccm cli [options] [cli_options]"
ignore_unknown_options = True
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.cli_options = parser.get_ignored() + args[1:]
def run(self):
out, err, rc = self.cluster.run_cli(self.options.cmds, self.cli_options)
if self.options.verbose:
print_("CLI OUTPUT:\n-------------------------------")
print_(out)
print_("-------------------------------\nCLI ERROR:\n-------------------------------")
print_(err)
class ClusterBulkloadCmd(Cmd):
descr_text = "Bulkload files into the cluster by connecting to some live node (if any)"
usage = "usage: ccm bulkload [options] [sstable_dir]"
ignore_unknown_options = True
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.loader_options = parser.get_ignored() + args
def run(self):
self.cluster.bulkload(self.loader_options)
class ClusterScrubCmd(Cmd):
descr_text = "Scrub files"
usage = "usage: ccm scrub [options] <keyspace> <cf>"
ignore_unknown_options = True
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.scrub_options = parser.get_ignored() + args
def run(self):
self.cluster.scrub(self.scrub_options)
class ClusterVerifyCmd(Cmd):
descr_text = "Verify files"
usage = "usage: ccm verify [options] <keyspace> <cf>"
ignore_unknown_options = True
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.verify_options = parser.get_ignored() + args
def run(self):
self.cluster.verify(self.verify_options)
class ClusterSetlogCmd(Cmd):
options_list = [
(['-c', '--class'], {'type': "string", 'dest': "class_name", 'default': None, 'help': "Optional java class/package. Logging will be set for only this class/package if set"}),
]
descr_text = "Set log level (INFO, DEBUG, ...) with/without Java class for all node of the cluster - require a node restart"
usage = "usage: ccm setlog [options] level"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
if len(args) == 0:
print_('Missing log level', file=sys.stderr)
parser.print_help()
exit(1)
self.level = args[0]
def run(self):
try:
self.cluster.set_log_level(self.level, self.options.class_name)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterInvalidatecacheCmd(Cmd):
descr_text = "Destroys ccm's local git cache."
usage = "usage: ccm invalidatecache"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
try:
common.invalidate_cache()
except Exception as e:
print_(str(e), file=sys.stderr)
print_("Error while deleting cache. Please attempt manually.")
exit(1)
class ClusterChecklogerrorCmd(Cmd):
descr_text = "Check for errors in log file of each node."
usage = "usage: ccm checklogerror"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
for node in self.cluster.nodelist():
errors = node.grep_log_for_errors()
for mylist in errors:
for line in mylist:
print_(line)
class ClusterShowlastlogCmd(Cmd):
descr_text = "Show the last.log for the most recent build through your $PAGER"
usage = "usage: ccm showlastlog"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
log = repository.lastlogfilename()
pager = os.environ.get('PAGER', common.platform_pager())
os.execvp(pager, (pager, log))
class ClusterJconsoleCmd(Cmd):
descr_text = "Opens jconsole client and connects to all running nodes"
usage = "usage: ccm jconsole"
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
cmds = ["jconsole"] + ["localhost:%s" % node.jmx_port for node in self.cluster.nodes.values()]
try:
subprocess.call(cmds, stderr=sys.stderr)
except OSError:
print_("Could not start jconsole. Please make sure jconsole can be found in your $PATH.")
exit(1)
class ClusterSetworkloadCmd(Cmd):
descr_text = "Sets the workloads for a DSE cluster"
usage = "usage: ccm setworkload [cassandra|solr|hadoop|spark|dsefs|cfs|graph],..."
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.workloads = args[0].split(',')
valid_workloads = ['cassandra', 'solr', 'hadoop', 'spark', 'dsefs', 'cfs', 'graph']
for workload in self.workloads:
if workload not in valid_workloads:
print_(workload, ' is not a valid workload')
exit(1)
def run(self):
try:
if len(self.cluster.nodes) == 0:
print_("No node in this cluster yet. Use the populate command before starting.")
exit(1)
for node in list(self.cluster.nodes.values()):
node.set_workloads(workloads=self.workloads)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
| {
"content_hash": "6557dc2341a82e85058521885a37bb53",
"timestamp": "",
"source": "github",
"line_count": 857,
"max_line_length": 321,
"avg_line_length": 47.38973162193699,
"alnum_prop": 0.6069977593381429,
"repo_name": "mike-tr-adamson/ccm",
"id": "a1478935d51639edb570d9d09c5e49522c283b72",
"size": "40614",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ccmlib/cmds/cluster_cmds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "321504"
},
{
"name": "Ruby",
"bytes": "3527"
},
{
"name": "Shell",
"bytes": "2634"
}
],
"symlink_target": ""
} |
from .AlternatingAttention import *
| {
"content_hash": "5977ebac2896e9074cd8bcbeb5bbb886",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 36,
"alnum_prop": 0.8333333333333334,
"repo_name": "nschuc/alternating-reader-tf",
"id": "7fe8d42b431ca1661b3072c51f30b8e9b6673332",
"size": "36",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31171"
}
],
"symlink_target": ""
} |
import unittest
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import testbed
from google.appengine.datastore import datastore_stub_util
import webapp2
import web
class TestHandlers(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Create a consistency policy that will simulate the High Replication consistency model.
self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=0)
# Initialize the datastore stub with this policy.
self.testbed.init_datastore_v3_stub(consistency_policy=self.policy)
def tearDown(self):
self.testbed.deactivate()
def testHello(self):
# Build a request object passing the URI path to be tested.
# You can also pass headers, query arguments etc.
request = webapp2.Request.blank('/')
# Get a response for that request.
response = request.get_response(web.application)
# Let's check if the response is correct.
self.assertEqual(response.status_int, 200)
self.assertIn('In the Cloud or Not', response.body)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "0789b0278eba006394615e01266a2330",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 92,
"avg_line_length": 33.64102564102564,
"alnum_prop": 0.7370426829268293,
"repo_name": "vishy16/test_repo",
"id": "315403c7fd3d8ac90095e9cd47164e3a0a5d494b",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6272"
},
{
"name": "JavaScript",
"bytes": "466"
},
{
"name": "Python",
"bytes": "5125"
}
],
"symlink_target": ""
} |
"""Adds functionality for handling data problems.
A data problem is a database integrity problem which is not
visible by the DBMS because detecting it requires higher business
intelligence. Some data problems can be fixed automatically,
others need human interaction.
The application developer writes **data checkers**,
i.e. pieces of code which contain that business intelligence and which
are attached to a given model.
Examples of data problems are:
- :class:`lino_xl.lib.countries.models.PlaceChecker`
- :class:`lino_xl.lib.beid.mixins.BeIdCardHolderChecker`
- :class:`lino_xl.lib.addresses.mixins.AddressOwnerChecker`
- :class:`lino.mixins.dupable.DupableChecker`
- :class:`lino_welfare.modlib.pcsw.models.SSINChecker`
- :class:`lino_welfare.modlib.pcsw.models.ClientCoachingsChecker`
- :class:`lino_welfare.modlib.isip.mixins.OverlappingContractsChecker`
- :class:`lino_welfare.modlib.dupable_clients.models.SimilarClientsChecker`
Users automatically get a button "Update data problems" on
objects for which there is at least one checker available.
The application developer can also add a :class:`ProblemsByOwner`
table to the `detail_layout` of any model.
.. autosummary::
:toctree:
roles
choicelists
models
fixtures.checkdata
management.commands.checkdata
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
"""See :doc:`/dev/plugins`.
.. attribute:: responsible_user
The :attr:`username <lino.modlib.users.User.username>`
of the **main checkdata responsible**, i.e. a designated
user who will be attributed to data problems for which
no *specific responible* could be designated (returned by the
checker's :meth:`get_responsible_user
<lino.modlib.checkdata.choicelists.Checker.get_responsible_user>`
method).
The default value for this is `None`, except on a demo site
(i.e. which has :attr:`is_demo_site
<lino.core.site.Site.is_demo_site>` set to `True`) where it is
``"'robin'``.
"""
verbose_name = _("Checkdata")
needs_plugins = ['lino.modlib.users', 'lino.modlib.gfks']
# plugin settings
responsible_user = None # the username (a string)
_responsible_user = None # the cached User object
def get_responsible_user(self, checker, obj):
if self.responsible_user is None:
return None
if self._responsible_user is None:
User = self.site.models.users.User
try:
self._responsible_user = User.objects.get(
username=self.responsible_user)
except User.DoesNotExist:
msg = "Invalid username '{0}' in `responsible_user` "
msg = msg.format(self.responsible_user)
raise Exception(msg)
return self._responsible_user
def on_plugins_loaded(self, site):
"""Set :attr:`responsible_user` to ``"'robin'`` if this is a demo site
(:attr:`is_demo_site <lino.core.site.Site.is_demo_site>`).
"""
super(Plugin, self).on_plugins_loaded(site)
if site.is_demo_site:
self.configure(responsible_user='robin')
def setup_main_menu(self, site, user_type, m):
g = site.plugins.office
m = m.add_menu(g.app_label, g.verbose_name)
m.add_action('checkdata.MyProblems')
def setup_explorer_menu(config, site, user_type, m):
g = site.plugins.system
m = m.add_menu(g.app_label, g.verbose_name)
m.add_action('checkdata.Checkers')
m.add_action('checkdata.AllProblems')
# m.add_action('checkdata.Severities')
# m.add_action('checkdata.Feedbacks')
| {
"content_hash": "d9d5f91c9f149629e8372682ef106a4f",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 78,
"avg_line_length": 34.44859813084112,
"alnum_prop": 0.6692892023874119,
"repo_name": "khchine5/lino",
"id": "dd8032a16330bb3425c86a9c47bbbb885d4b1bb2",
"size": "3785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino/modlib/checkdata/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "704"
},
{
"name": "CSS",
"bytes": "1372581"
},
{
"name": "Emacs Lisp",
"bytes": "277895"
},
{
"name": "HTML",
"bytes": "1146746"
},
{
"name": "Hack",
"bytes": "3416"
},
{
"name": "JavaScript",
"bytes": "1212734"
},
{
"name": "PHP",
"bytes": "56272"
},
{
"name": "Python",
"bytes": "2484371"
},
{
"name": "Shell",
"bytes": "5752"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class Shanghai(object):
"""
A main class for a Shanghai application.
"""
name = 'shanghai'
verbose_name = 'Shanghai'
def __init__(self):
self._registry = dict()
def register(self, resource):
"""
Registers a resource in the application by it's name.
:param resource: A resource class to register
"""
instance = resource(self)
self._registry[instance.name] = instance
def resource_for(self, name):
"""
Lookups for a resource by the provided name.
:param name: Name of the resource
:return: Resource or `None` if not found
"""
return self._registry.get(name, None)
def inspect(self):
for resource in self._registry.values():
resource.inspector.inspect_primary_key()
resource.inspector.inspect_attributes()
resource.inspector.inspect_belongs_to()
for resource in self._registry.values():
resource.inspector.inspect_has_many()
def get_urls(self):
"""
Iterates over the registered resources and ask for their URLs.
:return: A list of URL patterns from registered resources
"""
from django.conf.urls import patterns, include
pattern_list = list()
for name in sorted(self._registry.keys()):
resource = self._registry.get(name)
pattern_list.append(('', include(resource.urls)))
return patterns('', *pattern_list)
@property
def urls(self):
return self.get_urls()
class ShanghaiConfig(AppConfig):
"""
A default configuration for a Shanghai application.
"""
name = Shanghai.name
verbose_name = Shanghai.verbose_name
def ready(self):
super(ShanghaiConfig, self).ready()
self.module.autodiscover()
self.module.autoinspect()
| {
"content_hash": "5f06c5ccf7e3888e90fb7e67b3e9ef6f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 70,
"avg_line_length": 26,
"alnum_prop": 0.6070686070686071,
"repo_name": "bobisjan/django-shanghai",
"id": "3316fef62db1813f9383823f75cfb5e4cbec009c",
"size": "1924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shanghai/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "285"
},
{
"name": "Python",
"bytes": "122674"
}
],
"symlink_target": ""
} |
from datetime import datetime, date, timedelta
from django.db import models
class Category(models.Model):
"""
A category grouping services
"""
name = models.CharField(max_length=100)
slug = models.SlugField()
description = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
verbose_name_plural = 'categories'
def __unicode__(self):
return self.name
class Service(models.Model):
"""
A service to track.
"""
name = models.CharField(max_length=100)
slug = models.SlugField()
description = models.CharField(max_length=255)
category = models.ForeignKey(Category, related_name='services', null=True)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return 'service', [self.slug]
def last_known_event(self, event_date):
try:
return Event.objects.filter(service=self,
start__year=event_date.year,
start__month=event_date.month,
start__day=event_date.day)[0]
except IndexError:
return None
def last_five_days(self):
"""
Used on home page.
"""
lowest = Status.objects.default()
severity = lowest.severity
yesterday = date.today() - timedelta(days=1)
ago = yesterday - timedelta(days=5)
events = self.events.select_related().filter(start__gt=ago,
start__lt=date.today())
stats = {}
while yesterday > ago:
temp = self.last_known_event(yesterday)
if temp:
image = temp.status.image
else:
image = lowest.image
stats["%s-%s" % (yesterday.month, yesterday.day)] = {
"image": image,
"day": yesterday,
}
yesterday = yesterday - timedelta(days=1)
for event in events:
if event.status.severity > severity:
issue_date = '%s-%s' % (event.start.month, event.start.day)
if issue_date in stats.keys():
stats[issue_date]["image"] = "information"
stats[issue_date]["information"] = True
results = []
keys = stats.keys()
keys.sort()
keys.reverse()
for k in keys:
results.append(stats[k])
return results
def current_event(self):
try:
t_event = self.events.latest()
event = {
'service': t_event.service,
'status': t_event.status,
'message': t_event.message,
'start': t_event.start,
'informational': t_event.informational,
}
except:
event = None
return event
class StatusManager(models.Manager):
def default(self):
return self.get_query_set().filter(severity=10)[0]
class Status(models.Model):
"""
A possible system status.
"""
name = models.CharField(max_length=100)
slug = models.SlugField()
description = models.CharField(max_length=255)
SEVERITY_CHOICES = (
(10, 'NORMAL'),
(30, 'WARNING'),
(40, 'ERROR'),
(50, 'CRITICAL'),
)
severity = models.IntegerField(choices=SEVERITY_CHOICES)
image = models.CharField(max_length=100)
objects = StatusManager()
class Meta:
ordering = ('severity',)
verbose_name_plural = 'statuses'
def __unicode__(self):
return self.name
class Event(models.Model):
service = models.ForeignKey(Service, related_name='events')
status = models.ForeignKey(Status, related_name='events')
message = models.TextField()
start = models.DateTimeField(default=datetime.now)
informational = models.BooleanField(default=False)
class Meta:
ordering = ('-start',)
get_latest_by = 'start'
| {
"content_hash": "9cc3a6b17bbc2bdee64154d54fce4d0c",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 78,
"avg_line_length": 27.7027027027027,
"alnum_prop": 0.5463414634146342,
"repo_name": "sijis/django-plank",
"id": "f5642c6117d1097edff5a3e614bb1b9c7aeb5614",
"size": "4100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plank/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7811"
},
{
"name": "Python",
"bytes": "17197"
}
],
"symlink_target": ""
} |
import argparse
import tempfile
from pathlib import Path
from typing import Union
from .fmi2slave import FMI2_MODEL_OPTIONS
from .builder import FmuBuilder
FilePath = Union[str, Path]
def create_csv_slave(csv_file: FilePath):
classname = csv_file.stem.capitalize()
filename = csv_file.name
return f"""
import re
import csv
from math import isclose # requires >= python 3.5
from pythonfmu.fmi2slave import Fmi2Type, Fmi2Slave, Fmi2Causality, Fmi2Variability, Integer, Real, Boolean, String
def lerp(v0: float, v1: float, t: float) -> float:
return (1 - t) * v0 + t * v1
def normalize(x: float, in_min: float, in_max: float, out_min: float, out_max: float) -> float:
x = max(min(x, in_max), in_min)
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def get_fmi2_type(s: str) -> Fmi2Type:
s_lower = s.lower()
for type in Fmi2Type:
if type.name in s_lower:
if type == Fmi2Type.enumeration:
raise NotImplementedError(f"Unsupported type: {{Fmi2Type.enumeration.name}}")
else:
return type
raise TypeError(f"Could not process type from input string: {{s}}")
TYPE2OBJ = {{
Fmi2Type.integer: Integer,
Fmi2Type.real: Real,
Fmi2Type.boolean: Boolean,
Fmi2Type.string: String
}}
class Header:
def __init__(self, s):
matches = re.findall(r"\\[(.*?)\\]", s)
if len(matches) > 0:
match = matches[-1]
self.name = s.replace("[" + match + "]", "").rstrip()
self.type = get_fmi2_type(match)
else:
self.name = s
self.type = Fmi2Type.real
def __repr__(self):
return f"Header(name={{self.name}}, type={{self.type.name}})"
class {classname}(Fmi2Slave):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.current_index = 0
self.next_index = None
self.current_time = 0.0
self.interpolate = True
data = dict()
def read_csv():
with open(self.resources + '/' + "{filename}") as f:
return list(csv.reader(f, skipinitialspace=True, delimiter=',', quotechar='"'))
read = read_csv()
header_row = read[0]
headers = list(map(lambda h: Header(h.strip()), header_row[1:len(header_row)]))
rows = read[1:len(read)]
self.num_rows = len(rows)
self.times = []
for header in headers:
data[header.name] = []
def get_value(header):
current_value = data[header.name][self.current_index]
if self.next_index is None or header.type is not Fmi2Type.real:
return current_value
next_value = data[header.name][self.next_index]
if current_value == next_value:
return current_value
current_value_t = self.times[self.current_index]
next_value_t = self.times[self.next_index]
t = normalize(self.current_time, current_value_t, next_value_t, 0, 1)
return lerp(current_value, next_value, t)
self.register_variable(
TYPE2OBJ[header.type](header.name,
causality=Fmi2Causality.output,
variability=Fmi2Variability.constant,
getter=lambda header=header: get_value(header)), nested=False)
for i in range(0, self.num_rows):
row = rows[i]
self.times.append(float(row[0]))
for j in range(1, len(row)):
header = headers[j-1]
if header.type == Fmi2Type.integer:
data[header.name].append(int(row[j]))
elif header.type == Fmi2Type.real:
data[header.name].append(float(row[j]))
elif header.type == Fmi2Type.boolean:
data[header.name].append(row[j] == 'true')
elif header.type == Fmi2Type.string:
data[header.name].append(row[j])
self.register_variable(Integer("num_rows",
causality=Fmi2Causality.output,
variability=Fmi2Variability.constant))
self.register_variable(Real("end_time",
causality=Fmi2Causality.output,
variability=Fmi2Variability.constant,
getter=lambda: self.times[-1]))
self.register_variable(Boolean("interpolate",
causality=Fmi2Causality.parameter,
variability=Fmi2Variability.tunable))
def find_indices(self, t, dt):
current_t = self.times[self.current_index]
while current_t < t:
if self.current_index == self.num_rows-1:
break
self.current_index += 1
current_t = self.times[self.current_index]
if current_t > t and not isclose(current_t, t, rel_tol=1e-6):
self.current_index -= 1
current_t = self.times[self.current_index]
if self.interpolate and self.current_index <= self.num_rows-2:
self.next_index = self.current_index+1
next_t = self.times[self.next_index]
while t+dt >= next_t and not isclose(t+dt, next_t, abs_tol=1e-6):
if self.next_index + 1 < self.num_rows:
self.next_index += 1
next_t = self.times[self.next_index]
def setup_experiment(self, start_time: float):
self.current_time = start_time
self.find_indices(start_time, 0)
def do_step(self, current_time: float, step_size: float) -> bool:
if (self.current_index == self.num_rows):
return False
self.current_time = current_time + step_size
self.find_indices(self.current_time, step_size)
return True
"""
class CsvFmuBuilder:
@staticmethod
def build_FMU(
csv_file: FilePath,
dest: FilePath = ".",
**options,
) -> Path:
csv_file = Path(csv_file)
if not csv_file.exists():
raise ValueError(f"No such file {csv_file!s}")
if not csv_file.suffix.endswith(".csv"):
raise ValueError(f"File {csv_file!s} must have extension '.csv'!")
options["dest"] = dest
options["project_files"] = {csv_file}
with tempfile.TemporaryDirectory(prefix="pythonfmu_") as tempd:
temp_dir = Path(tempd)
script_file = temp_dir / (csv_file.stem + ".py")
with open(script_file, "+w") as f:
f.write(create_csv_slave(csv_file))
options["script_file"] = script_file
return FmuBuilder.build_FMU(**options)
def create_command_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"-f",
"--file",
dest="csv_file",
help="Path to the CSV file.",
required=True
)
parser.add_argument(
"-d", "--dest", dest="dest", help="Where to save the FMU.", default="."
)
parser.add_argument(
"--doc",
dest="documentation_folder",
help="Documentation folder to include in the FMU.",
default=None
)
for option in FMI2_MODEL_OPTIONS:
action = "store_false" if option.value else "store_true"
parser.add_argument(
f"--{option.cli}",
dest=option.name,
help=f"If given, {option.name}={action[6:]}",
action=action
)
parser.set_defaults(execute=CsvFmuBuilder.build_FMU)
| {
"content_hash": "d95884de2a2ce2f5680f0c0724dcfe82",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 115,
"avg_line_length": 34.734234234234236,
"alnum_prop": 0.550641940085592,
"repo_name": "joshua-cogliati-inl/raven",
"id": "c36c513b1fa48722059d29c190945bf213b9459b",
"size": "7711",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "ravenframework/contrib/PythonFMU/pythonfmu/csvbuilder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1556080"
},
{
"name": "Batchfile",
"bytes": "1095"
},
{
"name": "C",
"bytes": "148504"
},
{
"name": "C++",
"bytes": "48279546"
},
{
"name": "CMake",
"bytes": "9998"
},
{
"name": "Jupyter Notebook",
"bytes": "84202"
},
{
"name": "MATLAB",
"bytes": "202335"
},
{
"name": "Makefile",
"bytes": "2399"
},
{
"name": "Perl",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "6952659"
},
{
"name": "R",
"bytes": "67"
},
{
"name": "SWIG",
"bytes": "8574"
},
{
"name": "Shell",
"bytes": "124279"
},
{
"name": "TeX",
"bytes": "479725"
}
],
"symlink_target": ""
} |
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.security import Identity
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.oxauth.service import AuthenticationService
from org.gluu.util import StringHelper
import java
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Basic (with external logout). Initialization"
print "Basic (with external logout). Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Basic (with external logout). Destroy"
print "Basic (with external logout). Destroyed successfully"
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, requestParameters):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
authenticationService = CdiUtil.bean(AuthenticationService)
if (step == 1):
print "Basic (with external logout). Authenticate for step 1"
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
logged_in = authenticationService.authenticate(user_name, user_password)
if (not logged_in):
return False
return True
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
if (step == 1):
print "Basic (with external logout). Prepare for Step 1"
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
return None
def getCountAuthenticationSteps(self, configurationAttributes):
return 1
def getPageForStep(self, configurationAttributes, step):
return ""
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Basic (with external logout). Get external logout URL call"
return "https://www.dummy.org/app/logout.htm"
# In order to get this method call RP should end_session request to https://<server>/oxauth/logout.htm enpoint
# instead of https://<server>/oxauth/restv1/end_session endpoint
def logout(self, configurationAttributes, requestParameters):
print "Basic (with external logout). Logout call"
return True
| {
"content_hash": "e14eae7c8488f97e4b1e0fe0152c85c0",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 114,
"avg_line_length": 36.67058823529412,
"alnum_prop": 0.6952197625922362,
"repo_name": "GluuFederation/oxAuth",
"id": "8ebfaa3c59ecda7fcf40a9f79d405183244edf40",
"size": "3279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/integrations/basic.external_logout/BasicExternalAuthenticatorWithExternalLogout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "78"
},
{
"name": "CSS",
"bytes": "91820"
},
{
"name": "HTML",
"bytes": "689156"
},
{
"name": "Java",
"bytes": "7932142"
},
{
"name": "JavaScript",
"bytes": "1475711"
},
{
"name": "Mustache",
"bytes": "2244"
},
{
"name": "Python",
"bytes": "1023958"
},
{
"name": "Shell",
"bytes": "302"
}
],
"symlink_target": ""
} |
'''
Scroll View
===========
.. versionadded:: 1.0.4
The :class:`ScrollView` widget provides a scrollable/pannable viewport that is
clipped at the scrollview's bounding box.
Scrolling Behavior
------------------
ScrollView accepts only one child, and applies a viewport/window to it
according to the :data:`scroll_x` and :data:`scroll_y` properties. Touches are
analyzed to determine if the user wants to scroll or control the child in some
other manner - you cannot do both at the same time. To determine if interaction
is a scrolling gesture, these properties are used:
- :data:`ScrollView.scroll_distance` a minimum distance to travel, default
to 20 pixels.
- :data:`ScrollView.scroll_timeout` a maximum time period, default to 250
milliseconds.
If a touch travels :data:`~ScrollView.scroll_distance` pixels within the
:data:`~ScrollView.scroll_timeout` period, it is recognized as a scrolling
gesture and translatation (scroll/pan) will begin. If the timeout occurs, the
touch down event is dispatched to the child instead (no translation).
The default value for thoses settings can be changed in the configuration file::
[widgets]
scroll_timeout = 250
scroll_distance = 20
.. versionadded:: 1.1.1
Scrollview now animates scrolling in Y when a mousewheel is used.
Limiting to X or Y Axis
-----------------------
By default, ScrollView allows scrolling in both the X and Y axes. You can
explicitly disable scrolling on an axis by setting
:data:`ScrollView.do_scroll_x` or :data:`ScrollView.do_scroll_y` to False.
Managing the Content Size
-------------------------
ScrollView manages the position of the child content, not the size. You must
carefully specify the `size_hint` of your content to get the desired
scroll/pan effect.
By default, size_hint is (1, 1), so the content size will fit your ScrollView
exactly (you will have nothing to scroll). You must deactivate at least one of
the size_hint instructions (x or y) of the child to enable scrolling.
To scroll a :class:`GridLayout` on Y-axis/vertically, set the child's width
identical to that of the ScrollView (size_hint_x=1, default), and set the
size_hint_y property to None::
layout = GridLayout(cols=1, spacing=10, size_hint_y=None)
# Make sure the height is such that there is something to scroll.
layout.bind(minimum_height=layout.setter('height'))
for i in range(30):
btn = Button(text=str(i), size_hint_y=None, height=40)
layout.add_widget(btn)
root = ScrollView(size_hint=(None, None), size=(400, 400))
root.add_widget(layout)
Effects
-------
.. versionadded:: 1.7.0
An effect is a subclass of :class:`~kivy.effects.scroll.ScrollEffect` that will
compute informations during the dragging, and apply transformation to the
:class:`ScrollView`. Depending of the effect, more computing can be done for
calculating over-scroll, bouncing, etc.
All the effects are located in the :mod:`kivy.effects`.
'''
__all__ = ('ScrollView', )
from functools import partial
from kivy.animation import Animation
from kivy.config import Config
from kivy.clock import Clock
from kivy.uix.stencilview import StencilView
from kivy.metrics import sp
from kivy.effects.dampedscroll import DampedScrollEffect
from kivy.properties import NumericProperty, BooleanProperty, AliasProperty, \
ObjectProperty, ListProperty, OptionProperty
# When we are generating documentation, Config doesn't exist
_scroll_timeout = _scroll_distance = 0
if Config:
_scroll_timeout = Config.getint('widgets', 'scroll_timeout')
_scroll_distance = sp(Config.getint('widgets', 'scroll_distance'))
class ScrollView(StencilView):
'''ScrollView class. See module documentation for more information.
.. versionchanged:: 1.7.0
`auto_scroll`, `scroll_friction`, `scroll_moves`, `scroll_stoptime' has
been deprecated, use :data:`effect_cls` instead.
'''
scroll_distance = NumericProperty(_scroll_distance)
'''Distance to move before scrolling the :class:`ScrollView`, in pixels. As
soon as the distance has been traveled, the :class:`ScrollView` will start
to scroll, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target device's
screen.
:data:`scroll_distance` is a :class:`~kivy.properties.NumericProperty`,
default to 20 (pixels), according to the default value in user
configuration.
'''
scroll_timeout = NumericProperty(_scroll_timeout)
'''Timeout allowed to trigger the :data:`scroll_distance`, in milliseconds.
If the user has not moved :data:`scroll_distance` within the timeout,
the scrolling will be disabled, and the touch event will go to the children.
:data:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty`,
default to 55 (milliseconds), according to the default value in user
configuration.
.. versionchanged:: 1.5.0
Default value changed from 250 to 55.
'''
scroll_x = NumericProperty(0.)
'''X scrolling value, between 0 and 1. If 0, the content's left side will
touch the left side of the ScrollView. If 1, the content's right side will
touch the right side.
This property is controled by :class:`ScrollView` only if
:data:`do_scroll_x` is True.
:data:`scroll_x` is a :class:`~kivy.properties.NumericProperty`,
default to 0.
'''
scroll_y = NumericProperty(1.)
'''Y scrolling value, between 0 and 1. If 0, the content's bottom side will
touch the bottom side of the ScrollView. If 1, the content's top side will
touch the top side.
This property is controled by :class:`ScrollView` only if
:data:`do_scroll_y` is True.
:data:`scroll_y` is a :class:`~kivy.properties.NumericProperty`,
default to 1.
'''
do_scroll_x = BooleanProperty(True)
'''Allow scroll on X axis.
:data:`do_scroll_x` is a :class:`~kivy.properties.BooleanProperty`,
default to True.
'''
do_scroll_y = BooleanProperty(True)
'''Allow scroll on Y axis.
:data:`do_scroll_y` is a :class:`~kivy.properties.BooleanProperty`,
default to True.
'''
def _get_do_scroll(self):
return (self.do_scroll_x, self.do_scroll_y)
def _set_do_scroll(self, value):
if type(value) in (list, tuple):
self.do_scroll_x, self.do_scroll_y = value
else:
self.do_scroll_x = self.do_scroll_y = bool(value)
do_scroll = AliasProperty(_get_do_scroll, _set_do_scroll,
bind=('do_scroll_x', 'do_scroll_y'))
'''Allow scroll on X or Y axis.
:data:`do_scroll` is a :class:`~kivy.properties.AliasProperty` of
(:data:`do_scroll_x` + :data:`do_scroll_y`)
'''
def _get_vbar(self):
# must return (y, height) in %
# calculate the viewport size / scrollview size %
if self._viewport is None:
return 0, 1.
vh = self._viewport.height
h = self.height
if vh < h or vh == 0:
return 0, 1.
ph = max(0.01, h / float(vh))
sy = min(1.0, max(0.0, self.scroll_y))
py = (1. - ph) * sy
return (py, ph)
vbar = AliasProperty(_get_vbar, None, bind=(
'scroll_y', '_viewport', 'viewport_size'))
'''Return a tuple of (position, size) of the vertical scrolling bar.
.. versionadded:: 1.2.0
The position and size are normalized between 0-1, and represent a
percentage of the current scrollview height. This property is used
internally for drawing the little vertical bar when you're scrolling.
:data:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.
'''
def _get_hbar(self):
# must return (x, width) in %
# calculate the viewport size / scrollview size %
if self._viewport is None:
return 0, 1.
vw = self._viewport.width
w = self.width
if vw < w or vw == 0:
return 0, 1.
pw = max(0.01, w / float(vw))
sx = min(1.0, max(0.0, self.scroll_x))
px = (1. - pw) * sx
return (px, pw)
hbar = AliasProperty(_get_hbar, None, bind=(
'scroll_x', '_viewport', 'viewport_size'))
'''Return a tuple of (position, size) of the horizontal scrolling bar.
.. versionadded:: 1.2.0
The position and size are normalized between 0-1, and represent a
percentage of the current scrollview height. This property is used
internally for drawing the little horizontal bar when you're scrolling.
:data:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.
'''
bar_color = ListProperty([.7, .7, .7, .9])
'''Color of horizontal / vertical scroll bar, in RGBA format.
.. versionadded:: 1.2.0
:data:`bar_color` is a :class:`~kivy.properties.ListProperty`, default to
[.7, .7, .7, .9].
'''
bar_width = NumericProperty('2dp')
'''Width of the horizontal / vertical scroll bar. The width is interpreted
as a height for the horizontal bar.
.. versionadded:: 1.2.0
:data:`bar_width` is a :class:`~kivy.properties.NumericProperty`, default
to 2
'''
bar_margin = NumericProperty(0)
'''Margin between the bottom / right side of the scrollview when drawing
the horizontal / vertical scroll bar.
.. versionadded:: 1.2.0
:data:`bar_margin` is a :class:`~kivy.properties.NumericProperty`, default
to 0
'''
effect_cls = ObjectProperty(DampedScrollEffect, allownone=True)
'''Class effect to instanciate for X and Y axis.
.. versionadded:: 1.7.0
:data:`effect_cls` is a :class:`~kivy.properties.ObjectProperty`, default to
:class:`DampedScrollEffect`.
'''
effect_x = ObjectProperty(None, allownone=True)
'''Effect to apply for the X axis. If None is set, an instance of
:data:`effect_cls` will be created.
.. versionadded:: 1.7.0
:data:`effect_x` is a :class:`~kivy.properties.ObjectProperty`, default to
None
'''
effect_y = ObjectProperty(None, allownone=True)
'''Effect to apply for the Y axis. If None is set, an instance of
:data:`effect_cls` will be created.
.. versionadded:: 1.7.0
:data:`effect_y` is a :class:`~kivy.properties.ObjectProperty`, default to
None, read-only.
'''
viewport_size = ListProperty([0, 0])
'''(internal) Size of the internal viewport. This is the size of your only
child in the scrollview.
'''
scroll_type = OptionProperty('content', options=('content', 'bars', 'both'))
'''Sets the type of scrolling to use for the content of the scrollview.
.. versionadded:: 1.8.0
:data:`scroll_type` is a :class:`~kivy.properties.OptionProperty`, default
to content.
'''
def on_scroll_type(self, instance, value):
self.bar_width = max('9dp', self.bar_width)\
if value[0] == 'b' else self.bar_width
# private, for internal use only
_viewport = ObjectProperty(None, allownone=True)
bar_alpha = NumericProperty(1.)
def _set_viewport_size(self, instance, value):
self.viewport_size = value
def on__viewport(self, instance, value):
if value:
value.bind(size=self._set_viewport_size)
self.viewport_size = value.size
def __init__(self, **kwargs):
self._touch = None
self._trigger_update_from_scroll = Clock.create_trigger(
self.update_from_scroll, -1)
super(ScrollView, self).__init__(**kwargs)
if self.effect_x is None and self.effect_cls is not None:
self.effect_x = self.effect_cls(target_widget=self._viewport)
if self.effect_y is None and self.effect_cls is not None:
self.effect_y = self.effect_cls(target_widget=self._viewport)
self.bind(
width=self._update_effect_x_bounds,
height=self._update_effect_y_bounds,
viewport_size=self._update_effect_bounds,
_viewport=self._update_effect_widget,
scroll_x=self._trigger_update_from_scroll,
scroll_y=self._trigger_update_from_scroll,
pos=self._trigger_update_from_scroll,
size=self._trigger_update_from_scroll)
self._update_effect_widget()
self._update_effect_x_bounds()
self._update_effect_y_bounds()
def on_effect_x(self, instance, value):
if value:
value.bind(scroll=self._update_effect_x)
value.target_widget = self._viewport
def on_effect_y(self, instance, value):
if value:
value.bind(scroll=self._update_effect_y)
value.target_widget = self._viewport
def on_effect_cls(self, instance, cls):
self.effect_x = self.effect_cls(target_widget=self._viewport)
self.effect_x.bind(scroll=self._update_effect_x)
self.effect_y = self.effect_cls(target_widget=self._viewport)
self.effect_y.bind(scroll=self._update_effect_y)
def _update_effect_widget(self, *args):
if self.effect_x:
self.effect_x.target_widget = self._viewport
if self.effect_y:
self.effect_y.target_widget = self._viewport
def _update_effect_x_bounds(self, *args):
if not self._viewport or not self.effect_x:
return
self.effect_x.min = -(self.viewport_size[0] - self.width)
self.effect_x.max = 0
self.effect_x.value = self.effect_x.min * self.scroll_x
def _update_effect_y_bounds(self, *args):
if not self._viewport or not self.effect_y:
return
self.effect_y.min = -(self.viewport_size[1] - self.height)
self.effect_y.max = 0
self.effect_y.value = self.effect_y.min * self.scroll_y
def _update_effect_bounds(self, *args):
if not self._viewport:
return
if self.effect_x:
self._update_effect_x_bounds()
if self.effect_y:
self._update_effect_y_bounds()
def _update_effect_x(self, *args):
vp = self._viewport
if not vp or not self.effect_x:
return
sw = vp.width - self.width
if sw < 1:
return
sx = self.effect_x.scroll / float(sw)
self.scroll_x = -sx
self._trigger_update_from_scroll()
def _update_effect_y(self, *args):
vp = self._viewport
if not vp or not self.effect_y:
return
sh = vp.height - self.height
if sh < 1:
return
sy = self.effect_y.scroll / float(sh)
self.scroll_y = -sy
self._trigger_update_from_scroll()
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
touch.ud[self._get_uid('svavoid')] = True
return
if self.disabled:
return True
if self._touch or (not (self.do_scroll_x or self.do_scroll_y)):
return super(ScrollView, self).on_touch_down(touch)
# handle mouse scrolling, only if the viewport size is bigger than the
# scrollview size, and if the user allowed to do it
vp = self._viewport
if vp and 'button' in touch.profile and \
touch.button.startswith('scroll'):
btn = touch.button
m = self.scroll_distance
e = None
if (self.effect_x and self.do_scroll_y and vp.height > self.height
and btn in ('scrolldown', 'scrollup')):
e = self.effect_y
elif (self.effect_y and self.do_scroll_x and vp.width > self.width
and btn in ('scrollleft', 'scrollright')):
e = self.effect_x
if e:
if btn in ('scrolldown', 'scrollleft'):
e.value = max(e.value - m, e.min)
e.velocity = 0
elif btn in ('scrollup', 'scrollright'):
e.value = min(e.value + m, e.max)
e.velocity = 0
touch.ud[self._get_uid('svavoid')] = True
e.trigger_velocity_update()
return True
scroll_type = self.scroll_type
# no mouse scrolling, so the user is going to drag the scrollview with
# this touch.
self._touch = touch
uid = self._get_uid()
touch.grab(self)
ud = touch.ud
ud[uid] = {
'mode': 'unknown',
'dx': 0,
'dy': 0,
'user_stopped': False,
'time': touch.time_start}
if self.do_scroll_x and self.effect_x:
if scroll_type[0] == 'b' and touch.y < self.bar_width:
ud['in_bar_x'] = True
else:
if scroll_type != 'bars':
self.effect_x.start(touch.x)
if self.do_scroll_y and self.effect_y:
if scroll_type[0] == 'b' and touch.x > self.right - self.bar_width:
ud['in_bar_y'] = True
else:
if scroll_type != 'bars':
self.effect_y.start(touch.y)
if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):
return
if scroll_type == 'bars':
self._change_touch_mode()
else:
Clock.schedule_once(self._change_touch_mode,
self.scroll_timeout / 1000.)
return True
def on_touch_move(self, touch):
if self._get_uid('svavoid') in touch.ud:
return
if self._touch is not touch:
super(ScrollView, self).on_touch_move(touch)
return self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
uid = self._get_uid()
ud = touch.ud[uid]
mode = ud['mode']
# check if the minimum distance has been travelled
if mode == 'unknown' or mode == 'scroll':
if self.do_scroll_x and self.effect_x:
width = self.width
if touch.ud.get('in_bar_x', False):
dx = touch.dx / float(width - width * self.hbar[1])
self.scroll_x = min(max(self.scroll_x + dx, 0.), 1.)
self._trigger_update_from_scroll()
else:
if self.scroll_type != 'bars':
self.effect_x.update(touch.x)
if self.do_scroll_y and self.effect_y:
height = self.height
if touch.ud.get('in_bar_y', False):
dy = touch.dy / float(height - height * self.vbar[1])
self.scroll_y = min(max(self.scroll_y + dy, 0.), 1.)
self._trigger_update_from_scroll()
else:
if self.scroll_type != 'bars':
self.effect_y.update(touch.y)
if mode == 'unknown':
ud['dx'] += abs(touch.dx)
ud['dy'] += abs(touch.dy)
if ud['dx'] > self.scroll_distance:
if not self.do_scroll_x:
self._change_touch_mode()
return
mode = 'scroll'
if ud['dy'] > self.scroll_distance:
if not self.do_scroll_y:
self._change_touch_mode()
return
mode = 'scroll'
ud['mode'] = mode
if mode == 'scroll':
ud['dt'] = touch.time_update - ud['time']
ud['time'] = touch.time_update
ud['user_stopped'] = True
return True
def on_touch_up(self, touch):
if self._get_uid('svavoid') in touch.ud:
return
if self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._touch = None
uid = self._get_uid()
ud = touch.ud[uid]
if self.do_scroll_x and self.effect_x:
if not touch.ud.get('in_bar_x', False) and\
self.scroll_type != 'bars':
self.effect_x.stop(touch.x)
if self.do_scroll_y and self.effect_y and\
self.scroll_type != 'bars':
if not touch.ud.get('in_bar_y', False):
self.effect_y.stop(touch.y)
if ud['mode'] == 'unknown':
# we must do the click at least..
# only send the click if it was not a click to stop
# autoscrolling
if not ud['user_stopped']:
super(ScrollView, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .2)
else:
if self._touch is not touch and self.uid not in touch.ud:
super(ScrollView, self).on_touch_up(touch)
# if we do mouse scrolling, always accept it
if 'button' in touch.profile and touch.button.startswith('scroll'):
return True
return self._get_uid() in touch.ud
def update_from_scroll(self, *largs):
'''Force the reposition of the content, according to current value of
:data:`scroll_x` and :data:`scroll_y`.
This method is automatically called when one of the :data:`scroll_x`,
:data:`scroll_y`, :data:`pos` or :data:`size` properties change, or
if the size of the content changes.
'''
if not self._viewport:
return
vp = self._viewport
# update from size_hint
if vp.size_hint_x is not None:
vp.width = vp.size_hint_x * self.width
if vp.size_hint_y is not None:
vp.height = vp.size_hint_y * self.height
if vp.width > self.width:
sw = vp.width - self.width
x = self.x - self.scroll_x * sw
else:
x = self.x
if vp.height > self.height:
sh = vp.height - self.height
y = self.y - self.scroll_y * sh
else:
y = self.top - vp.height
vp.pos = x, y
# new in 1.2.0, show bar when scrolling happen
# and slowly remove them when no scroll is happening.
self.bar_alpha = 1.
Animation.stop_all(self, 'bar_alpha')
Clock.unschedule(self._start_decrease_alpha)
Clock.schedule_once(self._start_decrease_alpha, .5)
def _start_decrease_alpha(self, *l):
self.bar_alpha = 1.
# show bars if scroll_type != content
bar_alpha = .2 if self.scroll_type[0] != 'c' else 0
Animation(bar_alpha=bar_alpha, d=.5, t='out_quart').start(self)
#
# Private
#
def add_widget(self, widget, index=0):
if self._viewport:
raise Exception('ScrollView accept only one widget')
super(ScrollView, self).add_widget(widget, index)
self._viewport = widget
widget.bind(size=self._trigger_update_from_scroll)
self._trigger_update_from_scroll()
def remove_widget(self, widget):
super(ScrollView, self).remove_widget(widget)
if widget is self._viewport:
self._viewport = None
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def _change_touch_mode(self, *largs):
if not self._touch:
return
uid = self._get_uid()
touch = self._touch
ud = touch.ud[uid]
if ud['mode'] != 'unknown' or ud['user_stopped']:
return
if self.do_scroll_x and self.effect_x:
self.effect_x.cancel()
if self.do_scroll_y and self.effect_y:
self.effect_y.cancel()
# XXX the next line was in the condition. But this stop
# the possibily to "drag" an object out of the scrollview in the
# non-used direction: if you have an horizontal scrollview, a
# vertical gesture will not "stop" the scroll view to look for an
# horizontal gesture, until the timeout is done.
# and touch.dx + touch.dy == 0:
touch.ungrab(self)
self._touch = None
# correctly calculate the position of the touch inside the
# scrollview
touch.push()
touch.apply_transform_2d(self.to_widget)
touch.apply_transform_2d(self.to_parent)
super(ScrollView, self).on_touch_down(touch)
touch.pop()
return
def _do_touch_up(self, touch, *largs):
super(ScrollView, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(ScrollView, self).on_touch_up(touch)
touch.grab_current = None
if __name__ == '__main__':
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
class ScrollViewApp(App):
def build(self):
layout1 = GridLayout(cols=4, spacing=10, size_hint=(None, None))
layout1.bind(minimum_height=layout1.setter('height'),
minimum_width=layout1.setter('width'))
for i in range(40):
btn = Button(text=str(i), size_hint=(None, None),
size=(200, 100))
layout1.add_widget(btn)
scrollview1 = ScrollView(bar_width='2dp',)
scrollview1.add_widget(layout1)
layout2 = GridLayout(cols=4, spacing=10, size_hint=(None, None))
layout2.bind(minimum_height=layout2.setter('height'),
minimum_width=layout2.setter('width'))
for i in range(40):
btn = Button(text=str(i), size_hint=(None, None),
size=(200, 100))
layout2.add_widget(btn)
scrollview2 = ScrollView(scroll_type='bars')
scrollview2.add_widget(layout2)
root = GridLayout(cols=2)
root.add_widget(scrollview1)
root.add_widget(scrollview2)
return root
ScrollViewApp().run()
| {
"content_hash": "11137988142a9100b55d195eddcd9bc3",
"timestamp": "",
"source": "github",
"line_count": 730,
"max_line_length": 80,
"avg_line_length": 35.586301369863016,
"alnum_prop": 0.5905766417738086,
"repo_name": "5y/kivy",
"id": "26974a30398caeaa8d07dad57567f9b17e183f4a",
"size": "25978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy/uix/scrollview.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import argparse
import logging
import math
import os
import sqlite3
from collections import defaultdict
from pathlib import Path
from struct import unpack
import cv2
import matplotlib.pyplot as pl
import numpy as np
import opensfm.actions.undistort as osfm_u
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from opensfm import dataset, features, pygeometry, pymap, types
EXPORT_DIR_NAME = "opensfm_export"
logger = logging.getLogger(__name__)
camera_models = {
0: ("SIMPLE_PINHOLE", 3),
1: ("PINHOLE", 4),
2: ("SIMPLE_RADIAL", 4),
3: ("RADIAL", 5),
4: ("OPENCV", 8),
5: ("OPENCV_FISHEYE", 8),
6: ("FULL_OPENCV", 12),
7: ("FOV", 5),
8: ("SIMPLE_RADIAL_FISHEYE", 4),
9: ("RADIAL_FISHEYE", 5),
10: ("THIN_PRISM_FISHEYE", 12),
}
def compute_and_save_undistorted_reconstruction(
reconstruction, tracks_manager, data, udata
):
image_format = data.config["undistorted_image_format"]
urec = types.Reconstruction()
utracks_manager = pymap.TracksManager()
undistorted_shots = []
for shot in reconstruction.shots.values():
if shot.camera.projection_type == "perspective":
ucamera = osfm_u.perspective_camera_from_perspective(shot.camera)
elif shot.camera.projection_type == "brown":
ucamera = osfm_u.perspective_camera_from_brown(shot.camera)
elif shot.camera.projection_type == "fisheye":
ucamera = osfm_u.perspective_camera_from_fisheye(shot.camera)
else:
raise ValueError
urec.add_camera(ucamera)
ushot = osfm_u.get_shot_with_different_camera(urec, shot, image_format)
if tracks_manager:
osfm_u.add_subshot_tracks(tracks_manager, utracks_manager, shot, ushot)
undistorted_shots.append(ushot)
image = data.load_image(shot.id, unchanged=True, anydepth=True)
if image is not None:
max_size = data.config["undistorted_image_max_size"]
undistorted = osfm_u.undistort_image(
shot, undistorted_shots, image, cv2.INTER_AREA, max_size
)
for k, v in undistorted.items():
udata.save_undistorted_image(k, v)
udata.save_undistorted_reconstruction([urec])
if tracks_manager:
udata.save_undistorted_tracks_manager(utracks_manager)
return urec
def small_colorbar(ax, mappable=None):
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
pl.colorbar(cax=cax, mappable=mappable)
def depth_colormap(d, cmap=None, invalid_val=0, invalid_color=(0.5, 0.5, 0.5)):
"""
Colormaps and sets 0 (invalid) values to zero_color
"""
sm = cm.ScalarMappable(cmap=cm.get_cmap(cmap))
sm.set_array(d)
rgb = sm.to_rgba(d)[:, :, :3]
rgb[d == invalid_val] = invalid_color
return rgb, sm
def import_cameras_images(db, data):
cursor = db.cursor()
cursor.execute(
"SELECT camera_id, model, width, height, prior_focal_length, params FROM "
"cameras;"
)
cameras = {}
for row in cursor:
camera_id, camera_model_id, width, height, prior_focal, params = row
params = np.fromstring(params, dtype=np.double)
cam = cam_from_colmap_params(
camera_model_id, width, height, params, prior_focal
)
cam.id = str(camera_id)
cameras[camera_id] = cam
data.save_camera_models(cameras)
images_map = {}
cursor.execute("SELECT image_id, camera_id, name FROM images;")
for row in cursor:
image_id, camera_id, filename = int(row[0]), int(row[1]), row[2]
images_map[image_id] = (filename, camera_id)
cam = cameras[camera_id]
focal_ratio = cam.focal_x if cam.projection_type == "brown" else cam.focal
exif_data = {
"make": "unknown",
"model": "unknown",
"width": cam.width,
"height": cam.height,
"projection_type": cam.projection_type,
"focal_ratio": focal_ratio,
"orientation": 1,
"camera": "{}".format(camera_id),
"skey": "TheSequence",
"capture_time": 0.0,
"gps": {},
}
data.save_exif(filename, exif_data)
cursor.close()
return cameras, images_map
def pair_id_to_image_ids(pair_id):
image_id2 = pair_id % 2147483647
image_id1 = (pair_id - image_id2) // 2147483647
return image_id1, image_id2
def get_scale_orientation_from_affine(arr):
# (x, y, a_11, a_12, a_21, a_22)
a11 = arr[:, 2]
a12 = arr[:, 3]
a21 = arr[:, 4]
a22 = arr[:, 5]
scale_x = np.sqrt(a11 * a11 + a21 * a21)
scale_y = np.sqrt(a12 * a12 + a22 * a22)
orientation = np.arctan2(a21, a11)
# shear = np.arctan2(-a12, a22) - orientation
scale = (scale_x + scale_y) / 2
return scale, orientation
def import_features(db, data, image_map, camera_map):
cursor = db.cursor()
cursor.execute("SELECT image_id, rows, cols, data FROM keypoints;")
keypoints = {}
colors = {}
for row in cursor:
image_id, n_rows, n_cols, arr = row
filename, camera_id = image_map[image_id]
cam = camera_map[camera_id]
arr = np.fromstring(arr, dtype=np.float32).reshape((n_rows, n_cols))
rgb = data.load_image(filename).astype(np.float32)
xc = np.clip(arr[:, 1].astype(int), 0, rgb.shape[0] - 1)
yc = np.clip(arr[:, 0].astype(int), 0, rgb.shape[1] - 1)
colors[image_id] = rgb[xc, yc, :]
arr[:, :2] = features.normalized_image_coordinates(
arr[:, :2], cam.width, cam.height
)
if n_cols == 4:
x, y, s, o = arr[:, 0], arr[:, 1], arr[:, 2], arr[:, 3]
elif n_cols == 6:
x, y = arr[:, 0], arr[:, 1]
s, o = get_scale_orientation_from_affine(arr)
elif n_cols == 2:
x, y = arr[:, 0], arr[:, 1]
s = np.zeros_like(x)
o = np.zeros_like(x)
else:
raise ValueError
s = s / max(cam.width, cam.height)
keypoints[image_id] = np.vstack((x, y, s, o)).T
cursor.execute("SELECT image_id, rows, cols, data FROM descriptors;")
for row in cursor:
image_id, n_rows, n_cols, arr = row
filename, _ = image_map[image_id]
descriptors = np.fromstring(arr, dtype=np.uint8).reshape((n_rows, n_cols))
kp = keypoints[image_id]
features_data = features.FeaturesData(kp, descriptors, colors[image_id], None)
data.save_features(filename, features_data)
cursor.close()
return keypoints
def import_matches(db, data, image_map):
cursor = db.cursor()
min_matches = 1
cursor.execute(
"SELECT pair_id, data FROM two_view_geometries WHERE rows>=?;", (min_matches,)
)
matches_per_im1 = {m[0]: {} for m in image_map.values()}
for row in cursor:
pair_id = row[0]
inlier_matches = np.fromstring(row[1], dtype=np.uint32).reshape(-1, 2)
image_id1, image_id2 = pair_id_to_image_ids(pair_id)
image_name1 = image_map[image_id1][0]
image_name2 = image_map[image_id2][0]
matches_per_im1[image_name1][image_name2] = inlier_matches
for image_name1, matches in matches_per_im1.items():
data.save_matches(image_name1, matches)
cursor.close()
def import_cameras_reconstruction(path_cameras, rec):
"""
Imports cameras from a COLMAP reconstruction cameras.bin file
"""
logger.info("Importing cameras from {}".format(path_cameras))
with open(path_cameras, "rb") as f:
n_cameras = unpack("<Q", f.read(8))[0]
for _ in range(n_cameras):
camera_id = unpack("<i", f.read(4))[0]
camera_model_id = unpack("<i", f.read(4))[0]
width = unpack("<Q", f.read(8))[0]
height = unpack("<Q", f.read(8))[0]
params = []
n_params = camera_models[camera_model_id][1]
for _ in range(n_params):
params.append(unpack("<d", f.read(8))[0])
cam = cam_from_colmap_params(camera_model_id, width, height, params)
cam.id = str(camera_id)
rec.add_camera(cam)
def cam_from_colmap_params(camera_model_id, width, height, params, prior_focal=1):
"""
Helper function to map from colmap parameters to an OpenSfM camera
"""
mapping = {1: "pinhole", 3: "perspective", 9: "fisheye"}
if camera_model_id not in mapping.keys():
raise ValueError("Not supported: " + camera_models[camera_model_id][0])
projection_type = mapping[camera_model_id]
normalizer = max(width, height)
focal = params[0] / normalizer if prior_focal else 0.85
if projection_type == "perspective":
cam = pygeometry.Camera.create_perspective(focal, params[3], params[4])
elif projection_type == "pinhole":
cam = pygeometry.Camera.create_perspective(focal, 0, 0)
else: # projection_type == 'fisheye'
cam = pygeometry.Camera.create_fisheye(focal, params[3], 0)
cam.width = width
cam.height = height
return cam
def import_points_reconstruction(path_points, rec):
logger.info("Importing points from {}".format(path_points))
with open(path_points, "rb") as f:
n_points = unpack("<Q", f.read(8))[0]
for _ in range(n_points):
pid = unpack("<Q", f.read(8))[0]
x = unpack("<d", f.read(8))[0]
y = unpack("<d", f.read(8))[0]
z = unpack("<d", f.read(8))[0]
r = unpack("<B", f.read(1))[0]
g = unpack("<B", f.read(1))[0]
b = unpack("<B", f.read(1))[0]
_ = unpack("<d", f.read(8))[0] # error
track_len = unpack("<Q", f.read(8))[0]
# Ignore track info
f.seek(8 * track_len, 1)
p = rec.create_point(str(pid), (x, y, z))
p.color = (r, g, b)
def read_colmap_ply(path_ply):
"""
Reads the ply output from COLMAP.
This is not a generic ply binary reader but a quick hack to read only this file
"""
logger.info("Reading fused pointcloud {}".format(path_ply))
header_should_be = [
"ply\n",
"format binary_little_endian 1.0\n",
"element vertex\n",
"property float x\n",
"property float y\n",
"property float z\n",
"property float nx\n",
"property float ny\n",
"property float nz\n",
"property uchar red\n",
"property uchar green\n",
"property uchar blue\n",
"end_header\n",
]
properties = [
("x", "<f4"),
("y", "<f4"),
("z", "<f4"),
("nx", "<f4"),
("ny", "<f4"),
("nz", "<f4"),
("red", "<u1"),
("green", "<u1"),
("blue", "<u1"),
]
n_vertices = 0
with open(path_ply, "rb") as f:
header = []
for line in f:
line = line.decode()
if line.startswith("element vertex"):
n_vertices = int(line.strip().split()[-1])
line = "element vertex\n"
header.append(line)
if line == header_should_be[-1]:
break
assert header == header_should_be
data = np.fromfile(f, dtype=properties, count=n_vertices)
points, normals, colors = [], [], []
for row in data:
points.append(np.array([row[0], row[1], row[2]]))
normals.append(np.array([row[3], row[4], row[5]]))
colors.append(np.array([row[6], row[7], row[8]]))
return np.array(points), np.array(normals), np.array(colors)
def import_images_reconstruction(path_images, keypoints, rec):
"""
Read images.bin, building shots and tracks graph
"""
logger.info("Importing images from {}".format(path_images))
tracks_manager = pymap.TracksManager()
image_ix_to_shot_id = {}
with open(path_images, "rb") as f:
n_ims = unpack("<Q", f.read(8))[0]
for image_ix in range(n_ims):
image_id = unpack("<I", f.read(4))[0]
q0 = unpack("<d", f.read(8))[0]
q1 = unpack("<d", f.read(8))[0]
q2 = unpack("<d", f.read(8))[0]
q3 = unpack("<d", f.read(8))[0]
t0 = unpack("<d", f.read(8))[0]
t1 = unpack("<d", f.read(8))[0]
t2 = unpack("<d", f.read(8))[0]
camera_id = unpack("<I", f.read(4))[0]
filename = ""
while True:
c = f.read(1).decode()
if c == "\0":
break
filename += c
q = np.array([q0, q1, q2, q3])
q /= np.linalg.norm(q)
t = np.array([t0, t1, t2])
pose = pygeometry.Pose(rotation=quaternion_to_angle_axis(q), translation=t)
shot = rec.create_shot(filename, str(camera_id), pose)
image_ix_to_shot_id[image_ix] = shot.id
n_points_2d = unpack("<Q", f.read(8))[0]
for point2d_ix in range(n_points_2d):
x = unpack("<d", f.read(8))[0]
y = unpack("<d", f.read(8))[0]
point3d_id = unpack("<Q", f.read(8))[0]
if point3d_id != np.iinfo(np.uint64).max:
kp = keypoints[image_id][point2d_ix]
r, g, b = rec.points[str(point3d_id)].color
obs = pymap.Observation(
x,
y,
kp[2],
int(r),
int(g),
int(b),
point2d_ix,
)
tracks_manager.add_observation(shot.id, str(point3d_id), obs)
return tracks_manager, image_ix_to_shot_id
def read_vis(path_vis, image_ix_to_shot_id):
logger.info("Reading visibility file {}".format(path_vis))
points_seen = defaultdict(list)
with open(path_vis, "rb") as f:
n_points = unpack("<Q", f.read(8))[0]
for point_ix in range(n_points):
n_images = unpack("<I", f.read(4))[0]
for _ in range(n_images):
image_ix = unpack("<I", f.read(4))[0]
shot_id = image_ix_to_shot_id[image_ix]
points_seen[shot_id].append(point_ix)
for ixs in points_seen.values():
assert len(ixs) == len(set(ixs))
return points_seen
def import_depthmaps_from_fused_pointcloud(udata, urec, image_ix_to_shot_id, path_ply):
"""
Imports the depthmaps by reprojecting the fused pointcloud
"""
# Read ply
points, normals, colors = read_colmap_ply(path_ply)
# Read visibility file
points_seen = read_vis(path_ply.with_suffix(".ply.vis"), image_ix_to_shot_id)
# Project to shots and save as depthmaps
max_size = udata.config["depthmap_resolution"]
for shot_id, points_seen_ixs in points_seen.items():
logger.info("Projecting shot {}".format(shot_id))
project_pointcloud_save_depth(
udata, urec, points[points_seen_ixs], shot_id, max_size
)
def project_pointcloud_save_depth(udata, urec, points, shot_id, max_sz):
# Project points to the undistorted image
shot = urec.shots[shot_id]
w, h = shot.camera.width, shot.camera.height
large = max(w, h)
if large > max_sz:
ar = w / h
if w > h:
w = max_sz
h = int(w / ar)
else:
h = max_sz
w = int(ar * h)
points_2d = shot.project_many(points)
pixel_coords = features.denormalized_image_coordinates(points_2d, w, h).astype(int)
# Filter out points that fall out of the image
# <<< aren't we supposed to have points that are visible from this image only??!?!
mask = np.ones(pixel_coords.shape[0], dtype=bool)
mask[pixel_coords[:, 0] < 0] = 0
mask[pixel_coords[:, 1] < 0] = 0
mask[pixel_coords[:, 0] >= w] = 0
mask[pixel_coords[:, 1] >= h] = 0
pixel_coords = pixel_coords[mask]
# Compute the depth
distances = np.linalg.norm(points - shot.pose.get_origin(), axis=1)
viewing_angles = np.arctan2(np.linalg.norm(points_2d, axis=1), shot.camera.focal)
depths = distances * np.cos(viewing_angles)
depths[depths > udata.config["depthmap_max_depth"]] = 0
# Create depth image
depth_image = np.zeros([h, w])
depth_image[pixel_coords[:, 1], pixel_coords[:, 0]] = depths[mask]
# Save numpy
filepath = Path(udata.depthmap_file(shot_id, "clean.npz"))
filepath.parent.mkdir(exist_ok=True, parents=True)
np.savez_compressed(
filepath, depth=depth_image, plane=np.zeros(1), score=np.zeros(1)
)
# Save jpg for visualization
import matplotlib.pyplot as plt
fig = plt.figure()
rgb, sm = depth_colormap(depth_image)
plt.imshow(rgb)
small_colorbar(plt.gca(), mappable=sm)
filepath = Path(udata.data_path) / "plot_depthmaps" / "{}.png".format(shot_id)
filepath.parent.mkdir(exist_ok=True, parents=True)
plt.savefig(filepath, dpi=300)
plt.close(fig)
def quaternion_to_angle_axis(quaternion):
if quaternion[0] > 1:
quaternion = quaternion / np.linalg.norm(quaternion)
qw, qx, qy, qz = quaternion
s = max(0.001, math.sqrt(1 - qw * qw))
x = qx / s
y = qy / s
z = qz / s
angle = 2 * math.acos(qw)
return [angle * x, angle * y, angle * z]
def main():
parser = argparse.ArgumentParser(
description="Convert COLMAP database to OpenSfM dataset"
)
parser.add_argument("database", help="path to the database to be processed")
parser.add_argument("images", help="path to the images")
args = parser.parse_args()
logger.info(f"Converting {args.database} to COLMAP format")
p_db = Path(args.database)
assert p_db.is_file()
export_folder = p_db.parent / EXPORT_DIR_NAME
export_folder.mkdir(exist_ok=True)
images_path = export_folder / "images"
if not images_path.exists():
os.symlink(os.path.abspath(args.images), images_path, target_is_directory=True)
# Copy the config if this is an colmap export of an opensfm export
if (
p_db.parent.name == "colmap_export"
and not (export_folder / "config.yaml").exists()
):
os.symlink(p_db.parent.parent / "config.yaml", export_folder / "config.yaml")
data = dataset.DataSet(export_folder)
db = sqlite3.connect(p_db.as_posix())
camera_map, image_map = import_cameras_images(db, data)
# Create image_list.txt
with open(export_folder / "image_list.txt", "w") as f:
for _, (filename, _) in image_map.items():
f.write("images/" + filename + "\n")
data.load_image_list()
keypoints = import_features(db, data, image_map, camera_map)
import_matches(db, data, image_map)
rec_cameras = p_db.parent / "cameras.bin"
rec_points = p_db.parent / "points3D.bin"
rec_images = p_db.parent / "images.bin"
if rec_cameras.exists() and rec_images.exists() and rec_points.exists():
reconstruction = types.Reconstruction()
import_cameras_reconstruction(rec_cameras, reconstruction)
import_points_reconstruction(rec_points, reconstruction)
tracks_manager, _ = import_images_reconstruction(
rec_images, keypoints, reconstruction
)
data.save_reconstruction([reconstruction])
data.save_tracks_manager(tracks_manager)
# Save undistorted reconstruction as well
udata = dataset.UndistortedDataSet(data, io_handler=data.io_handler)
urec = compute_and_save_undistorted_reconstruction(
reconstruction, tracks_manager, data, udata
)
# Project colmap's fused pointcloud to save depths in opensfm format
path_ply = p_db.parent / "dense/fused.ply"
if path_ply.is_file():
rec_cameras = p_db.parent / "dense/sparse/cameras.bin"
rec_images = p_db.parent / "dense/sparse/images.bin"
rec_points = p_db.parent / "points3D.bin"
reconstruction = types.Reconstruction()
import_cameras_reconstruction(rec_cameras, reconstruction)
import_points_reconstruction(rec_points, reconstruction)
_, image_ix_to_shot_id = import_images_reconstruction(
rec_images, keypoints, reconstruction
)
logger.info(f"Projecting {path_ply} to depth images")
import_depthmaps_from_fused_pointcloud(
udata, urec, image_ix_to_shot_id, path_ply
)
else:
logger.info(
"Not importing dense reconstruction: Didn't find {}".format(path_ply)
)
else:
logger.info(
"Didn't find some of the reconstruction files at {}".format(p_db.parent)
)
db.close()
if __name__ == "__main__":
main()
| {
"content_hash": "1d14af3b7a6aef0e9d0648db1376983c",
"timestamp": "",
"source": "github",
"line_count": 590,
"max_line_length": 87,
"avg_line_length": 35.271186440677965,
"alnum_prop": 0.5747717443536761,
"repo_name": "mapillary/OpenSfM",
"id": "9388883f4a8673404148c80f4a11a3a393f5056c",
"size": "21093",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bin/import_colmap.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "396"
},
{
"name": "C++",
"bytes": "648986"
},
{
"name": "CMake",
"bytes": "78367"
},
{
"name": "CSS",
"bytes": "6426"
},
{
"name": "Dockerfile",
"bytes": "642"
},
{
"name": "HTML",
"bytes": "63144"
},
{
"name": "JavaScript",
"bytes": "1054984"
},
{
"name": "Python",
"bytes": "1141169"
},
{
"name": "Shell",
"bytes": "4006"
}
],
"symlink_target": ""
} |
from functools import lru_cache
def maxgain(n, weights, values, capacity):
@lru_cache(None)
def get_maxgain(ind, remaining_weight):
if ind == n or remaining_weight == 0:
return 0
if weights[ind] > remaining_weight:
return get_maxgain(ind+1, remaining_weight)
ignore = get_maxgain(ind+1, remaining_weight)
consider = values[ind]+get_maxgain(ind+1, remaining_weight-weights[ind])
return max(ignore, consider)
return get_maxgain(0, capacity)
t = int(input())
for _ in range(t):
n = int(input())
weights = input().split()
weights = [int(w) for w in weights]
values = input().split()
values = [int(v) for v in values]
capacity = int(input())
print(maxgain(n, weights, values, capacity))
| {
"content_hash": "818013564a736a525879dd5a6a8d943b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 30.73076923076923,
"alnum_prop": 0.6157697121401752,
"repo_name": "sayak1711/coding_solutions",
"id": "ca5c54a879960d5b90f5ea4eb321ac4d21e12e34",
"size": "870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coding-practice/Dynamic Programming/0 1 knapsack recursive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "15194"
},
{
"name": "Java",
"bytes": "518"
},
{
"name": "Python",
"bytes": "77655"
},
{
"name": "Ruby",
"bytes": "2076"
}
],
"symlink_target": ""
} |
"""
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by Lev Givon were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, Lev Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import datetime, date, timedelta
from dateutil.parser import parse
import os
from textwrap import dedent
import warnings
import numpy as np
from pandas import compat
from pandas.compat import u, u_safe
from pandas.core.dtypes.common import (
is_categorical_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
from pandas import (Timestamp, Period, Series, DataFrame, # noqa
Index, MultiIndex, Float64Index, Int64Index,
Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT,
Categorical, CategoricalIndex)
from pandas._libs.tslib import NaTType
from pandas.core.sparse.api import SparseSeries, SparseDataFrame
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.errors import PerformanceWarning
from pandas.io.common import get_filepath_or_buffer, _stringify_path
from pandas.core.internals import BlockManager, make_block, _safe_reshape
import pandas.core.internals as internals
from pandas.io.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
from pandas.util._move import (
BadMove as _BadMove,
move_into_mutable_buffer as _move_into_mutable_buffer,
)
# check whcih compression libs we have installed
try:
import zlib
def _check_zlib():
pass
except ImportError:
def _check_zlib():
raise ImportError('zlib is not installed')
_check_zlib.__doc__ = dedent(
"""\
Check if zlib is installed.
Raises
------
ImportError
Raised when zlib is not installed.
""",
)
try:
import blosc
def _check_blosc():
pass
except ImportError:
def _check_blosc():
raise ImportError('blosc is not installed')
_check_blosc.__doc__ = dedent(
"""\
Check if blosc is installed.
Raises
------
ImportError
Raised when blosc is not installed.
""",
)
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
encoding: encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
if compressor:
compressor = u(compressor)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = compat.BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding: Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : type of object stored in file
"""
path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
l = list(unpack(fh, encoding=encoding, **kwargs))
if len(l) == 1:
return l[0]
return l
# see if we have an actual file
if isinstance(path_or_buf, compat.string_types):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
# treat as a binary-like
if isinstance(path_or_buf, compat.binary_type):
fh = None
try:
fh = compat.BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
# a buffer like
if hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
u('datetime64[ns]'): np.dtype('M8[ns]'),
u('datetime64[us]'): np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
u('timedelta64[ns]'): np.dtype('m8[ns]'),
u('timedelta64[us]'): np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
'category': 'category'
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# numpy 1.6.1 compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if is_categorical_dtype(dtype):
return values
elif is_object_dtype(dtype):
return np.array(values, dtype=object)
dtype = pandas_dtype(dtype).base
if not as_is_ext:
values = values.encode('latin1')
if compress:
if compress == u'zlib':
_check_zlib()
decompress = zlib.decompress
elif compress == u'blosc':
_check_blosc()
decompress = blosc.decompress
else:
raise ValueError("compress must be one of 'zlib' or 'blosc'")
try:
return np.frombuffer(
_move_into_mutable_buffer(decompress(values)),
dtype=dtype,
)
except _BadMove as e:
# Pull the decompressed data off of the `_BadMove` exception.
# We don't just store this in the locals because we want to
# minimize the risk of giving users access to a `bytes` object
# whose data is also given to a mutable buffer.
values = e.args[0]
if len(values) > 1:
# The empty string and single characters are memoized in many
# string creating functions in the capi. This case should not
# warn even though we need to make a copy because we are only
# copying at most 1 byte.
warnings.warn(
'copying data after decompressing; this may mean that'
' decompress is caching its result',
PerformanceWarning,
)
# fall through to copying `np.fromstring`
# Copy the string into a numpy array.
return np.fromstring(values, dtype=dtype)
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {u'typ': u'range_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'start': getattr(obj, '_start', None),
u'stop': getattr(obj, '_stop', None),
u'step': getattr(obj, '_step', None)}
elif isinstance(obj, PeriodIndex):
return {u'typ': u'period_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'compress': compressor}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
tz = u(tz.zone)
obj = obj.tz_convert('UTC')
return {u'typ': u'datetime_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'tz': tz,
u'compress': compressor}
elif isinstance(obj, MultiIndex):
return {u'typ': u'multi_index',
u'klass': u(obj.__class__.__name__),
u'names': getattr(obj, 'names', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
else:
return {u'typ': u'index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif isinstance(obj, Categorical):
return {u'typ': u'category',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'codes': obj.codes,
u'categories': obj.categories,
u'ordered': obj.ordered,
u'compress': compressor}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError(
'msgpack sparse series is not implemented'
)
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {u'typ': u'series',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'index': obj.index,
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {u'typ': u'block_manager',
u'klass': u(obj.__class__.__name__),
u'axes': data.axes,
u'blocks': [{u'locs': b.mgr_locs.as_array,
u'values': convert(b.values),
u'shape': b.values.shape,
u'dtype': u(b.dtype.name),
u'klass': u(b.__class__.__name__),
u'compress': compressor} for b in data.blocks]
}
elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
np.timedelta64, NaTType)):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = u(tz.zone)
freq = obj.freq
if freq is not None:
freq = u(freq.freqstr)
return {u'typ': u'timestamp',
u'value': obj.value,
u'freq': freq,
u'tz': tz}
if isinstance(obj, NaTType):
return {u'typ': u'nat'}
elif isinstance(obj, np.timedelta64):
return {u'typ': u'timedelta64',
u'data': obj.view('i8')}
elif isinstance(obj, timedelta):
return {u'typ': u'timedelta',
u'data': (obj.days, obj.seconds, obj.microseconds)}
elif isinstance(obj, np.datetime64):
return {u'typ': u'datetime64',
u'data': u(str(obj))}
elif isinstance(obj, datetime):
return {u'typ': u'datetime',
u'data': u(obj.isoformat())}
elif isinstance(obj, date):
return {u'typ': u'date',
u'data': u(obj.isoformat())}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {u'typ': u'period',
u'ordinal': obj.ordinal,
u'freq': u(obj.freq)}
elif isinstance(obj, BlockIndex):
return {u'typ': u'block_index',
u'klass': u(obj.__class__.__name__),
u'blocs': obj.blocs,
u'blengths': obj.blengths,
u'length': obj.length}
elif isinstance(obj, IntIndex):
return {u'typ': u'int_index',
u'klass': u(obj.__class__.__name__),
u'indices': obj.indices,
u'length': obj.length}
elif isinstance(obj, np.ndarray):
return {u'typ': u'ndarray',
u'shape': obj.shape,
u'ndim': obj.ndim,
u'dtype': u(obj.dtype.name),
u'data': convert(obj),
u'compress': compressor}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {u'typ': u'np_scalar',
u'sub_typ': u'np_complex',
u'dtype': u(obj.dtype.name),
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
else:
return {u'typ': u'np_scalar',
u'dtype': u(obj.dtype.name),
u'data': u(obj.__repr__())}
elif isinstance(obj, complex):
return {u'typ': u'np_complex',
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get(u'typ')
if typ is None:
return obj
elif typ == u'timestamp':
freq = obj[u'freq'] if 'freq' in obj else obj[u'offset']
return Timestamp(obj[u'value'], tz=obj[u'tz'], freq=freq)
elif typ == u'nat':
return NaT
elif typ == u'period':
return Period(ordinal=obj[u'ordinal'], freq=obj[u'freq'])
elif typ == u'index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
return globals()[obj[u'klass']](data, dtype=dtype, name=obj[u'name'])
elif typ == u'range_index':
return globals()[obj[u'klass']](obj[u'start'],
obj[u'stop'],
obj[u'step'],
name=obj[u'name'])
elif typ == u'multi_index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
data = [tuple(x) for x in data]
return globals()[obj[u'klass']].from_tuples(data, names=obj[u'names'])
elif typ == u'period_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'])
return globals()[obj[u'klass']]._from_ordinals(data, **d)
elif typ == u'datetime_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'], verify_integrity=False)
result = globals()[obj[u'klass']](data, **d)
tz = obj[u'tz']
# reverse tz conversion
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
elif typ == u'category':
from_codes = globals()[obj[u'klass']].from_codes
return from_codes(codes=obj[u'codes'],
categories=obj[u'categories'],
ordered=obj[u'ordered'])
elif typ == u'series':
dtype = dtype_for(obj[u'dtype'])
pd_dtype = pandas_dtype(dtype)
index = obj[u'index']
result = globals()[obj[u'klass']](unconvert(obj[u'data'], dtype,
obj[u'compress']),
index=index,
dtype=pd_dtype,
name=obj[u'name'])
return result
elif typ == u'block_manager':
axes = obj[u'axes']
def create_block(b):
values = _safe_reshape(unconvert(
b[u'values'], dtype_for(b[u'dtype']),
b[u'compress']), b[u'shape'])
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if u'locs' in b:
placement = b[u'locs']
else:
placement = axes[0].get_indexer(b[u'items'])
return make_block(values=values,
klass=getattr(internals, b[u'klass']),
placement=placement,
dtype=b[u'dtype'])
blocks = [create_block(b) for b in obj[u'blocks']]
return globals()[obj[u'klass']](BlockManager(blocks, axes))
elif typ == u'datetime':
return parse(obj[u'data'])
elif typ == u'datetime64':
return np.datetime64(parse(obj[u'data']))
elif typ == u'date':
return parse(obj[u'data']).date()
elif typ == u'timedelta':
return timedelta(*obj[u'data'])
elif typ == u'timedelta64':
return np.timedelta64(int(obj[u'data']))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == u'block_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'blocs'],
obj[u'blengths'])
elif typ == u'int_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'indices'])
elif typ == u'ndarray':
return unconvert(obj[u'data'], np.typeDict[obj[u'dtype']],
obj.get(u'compress')).reshape(obj[u'shape'])
elif typ == u'np_scalar':
if obj.get(u'sub_typ') == u'np_complex':
return c2f(obj[u'real'], obj[u'imag'], obj[u'dtype'])
else:
dtype = dtype_for(obj[u'dtype'])
try:
return dtype(obj[u'data'])
except:
return dtype.type(obj[u'data'])
elif typ == u'np_complex':
return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j')
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
def unpack(packed, object_hook=decode,
list_hook=None, use_list=False, encoding='utf-8',
unicode_errors='strict', object_pairs_hook=None,
max_buffer_size=0, ext_hook=ExtType):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
"""
return Unpacker(packed, object_hook=object_hook,
list_hook=list_hook,
use_list=use_list, encoding=encoding,
unicode_errors=unicode_errors,
object_pairs_hook=object_pairs_hook,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Packer(_Packer):
def __init__(self, default=encode,
encoding='utf-8',
unicode_errors='strict',
use_single_float=False,
autoreset=1,
use_bin_type=1):
super(Packer, self).__init__(default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type)
class Unpacker(_Unpacker):
def __init__(self, file_like=None, read_size=0, use_list=False,
object_hook=decode,
object_pairs_hook=None, list_hook=None, encoding='utf-8',
unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType):
super(Unpacker, self).__init__(file_like=file_like,
read_size=read_size,
use_list=use_list,
object_hook=object_hook,
object_pairs_hook=object_pairs_hook,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Iterator(object):
""" manage the unpacking iteration,
close the file on completion """
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
needs_closing = True
try:
# see if we have an actual file
if isinstance(self.path, compat.string_types):
try:
path_exists = os.path.exists(self.path)
except TypeError:
path_exists = False
if path_exists:
fh = open(self.path, 'rb')
else:
fh = compat.BytesIO(self.path)
else:
if not hasattr(self.path, 'read'):
fh = compat.BytesIO(self.path)
else:
# a file-like
needs_closing = False
fh = self.path
unpacker = unpack(fh)
for o in unpacker:
yield o
finally:
if needs_closing:
fh.close()
| {
"content_hash": "b686265a92817eef80381e24e1656eb0",
"timestamp": "",
"source": "github",
"line_count": 790,
"max_line_length": 79,
"avg_line_length": 34.821518987341776,
"alnum_prop": 0.5294267330691773,
"repo_name": "nmartensen/pandas",
"id": "a2fc4db23700c2405eae522eafa7ff44e2c2f651",
"size": "27509",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pandas/io/packers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4071"
},
{
"name": "C",
"bytes": "492947"
},
{
"name": "C++",
"bytes": "17353"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "12132382"
},
{
"name": "R",
"bytes": "1177"
},
{
"name": "Shell",
"bytes": "22662"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
from os import listdir
from os.path import isfile, isdir, join, dirname
from re import sub
from urllib import urlopen
from subprocess import Popen, PIPE
from configparser import ConfigParser
import fcntl, socket, struct
def getHwAddr(ifname):
"""The pure python solution for this problem under Linux to get the MAC for a specific local interface,
originally posted as a comment by vishnubob and improved by on Ben Mackey in http://code.activestate.com/recipes/439094-get-the-ip-address-associated-with-a-network-inter/"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
def bonding(config):
try:
out, err = Popen([config['binaries']['quattor-query'], "/hardware/name"], stdout=PIPE).communicate()
system_id = out.splitlines()[-1].split("'")[1].replace('system','')
except:
system_id = False
mac_addresses = {}
if system_id:
record = {
"systemId" : system_id,
"bonds" : {}
}
if isdir(config['paths']['bonding']):
bonds = listdir(config['paths']['bonding'])
if bonds:
for bond in bonds:
bond_file = join(config['paths']['bonding'],bond)
if isfile(bond_file) and 'bond' in bond:
# Read bond information and tokenise
fh = open(bond_file)
data = fh.read()
fh.close()
data = data.splitlines()
data = [ l.split(': ', 1) for l in data ]
# Initialise structure
sections = [{}]
for line in data:
if len(line) == 2:
key, value = line
# Munge the keys slightly
key = sub(r'\(.+\)', '', key)
key = key.title().replace(' ', '')
sections[-1][key] = value
else:
sections.append({})
record["bonds"][bond] = sections
# Store the mac addresses behind bonded links for later use
for section in sections:
if 'PermanentHwAddr' in section:
mac_addresses[section['SlaveInterface']] = section['PermanentHwAddr']
print "Submitting bonding data to MagDB."
record = str(record).replace("'", '"')
try:
f = urlopen(config['urls']['bonding'], "system="+system_id+"&record="+record)
print "MagDB says: " + f.read()
except IOError:
print "Unable to submit results to MagDB"
else:
print "No network bonds found."
else:
print "No bonding information on system."
else:
print "Unable to determine systemId, will not look for network bonds."
return mac_addresses
def lldp(config, mac_addresses):
try:
out, err = Popen([config['binaries']['lldpctl'], "-f", "keyvalue"], stdout=PIPE).communicate()
except:
out = False
if out:
out = out.split('\n')[:-1]
data = []
for line in out:
if 'via=LLDP' in line:
data.append({})
if 'unknown-tlvs' in line:
continue
key, value = line.split('=')
key = key.split('.')[1:]
leaf = data[-1]
for k in key[:-1]:
if k not in leaf:
leaf[k] = {}
leaf = leaf[k]
leaf[key[-1]] = value.replace("'", "`")
# Initialise structure
record = []
for d in data:
link = {}
rid = 0
for k, v in d.iteritems():
rid = int(v['rid'])
# If the port is a member of a bonded link, the apparent mac address may have changed therefore we should use the mac address behind the bond
if k in mac_addresses:
mac = mac_addresses[k]
else:
mac = getHwAddr(k)
link[mac] = v
link[mac]['name'] = k
if rid <= 1:
record.append(link)
print "Submitting LLDP data to MagDB."
record = str(record).replace("'", '"')
try:
f = urlopen(config['urls']['lldp'], "record="+record)
print "MagDB says: " + f.read()
except IOError:
print "Unable to submit results to MagDB"
else:
print "No LLDP data found."
print "Complete."
def main():
config = ConfigParser()
config.read(['/etc/magdb-discover.conf', join(dirname(__file__), 'magdb-discover.conf')])
mac_addresses = bonding(config)
lldp(config, mac_addresses)
if __name__ == "__main__":
main()
| {
"content_hash": "860bcc8610f8470b676ac95d57bbdb9d",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 181,
"avg_line_length": 35.013513513513516,
"alnum_prop": 0.48552682362022387,
"repo_name": "stfc/MagDB",
"id": "581034365100d2df7c40cf8de31c1c37c279efb6",
"size": "5206",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "client/magdb-discover.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "136359"
},
{
"name": "Python",
"bytes": "70002"
}
],
"symlink_target": ""
} |
import json
""" Input: the lyrics list, the tfidf scores list, a list of song and line indices
Output: List of tuples of words with highest tf-idf scores
Given a list of song-line tuple (song_index, line_index),
returns a list of a word-score tuple, with the word with highest score
at the head of the list.
"""
def score_lookup(lyrics, scores, songs_lst):
tfidf_sum = {}
tfidf_count = {}
tfidf_scores = []
for song, line in songs_lst:
for word_idx in range(len(scores[song][line])):
word = lyrics[song][line][word_idx]
score = scores[song][line][word_idx]
if word.isalpha():
if tfidf_sum.has_key(word.lower()):
tfidf_sum[word.lower()] += score
tfidf_count[word.lower()] += 1
else:
tfidf_sum[word.lower()] = score
tfidf_count[word.lower()] = 1
for word, sum_score in tfidf_sum.items():
tfidf_scores.append((word, sum_score / tfidf_count[word]))
return sorted(tfidf_scores, key=lambda x: x[1], reverse=True)
| {
"content_hash": "66f0771600593535c4fb3499c1bbd958",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 82,
"avg_line_length": 41.22222222222222,
"alnum_prop": 0.5831087151841868,
"repo_name": "warrencrowell/cs4300sp2016-TweetBeat",
"id": "2fd8a53da744b8e7b1dcf6c0f05ac9a1f5c7ada0",
"size": "1113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_template/dataset_score_lookup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3592"
},
{
"name": "HTML",
"bytes": "12259"
},
{
"name": "OpenEdge ABL",
"bytes": "15885355"
},
{
"name": "Python",
"bytes": "39539"
},
{
"name": "TeX",
"bytes": "212"
}
],
"symlink_target": ""
} |
"""
WSGI config for smarthumidor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "smarthumidor.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "a0f5502af778cb4478f0a1d03bdc519b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.5,
"alnum_prop": 0.7794486215538847,
"repo_name": "bkillenit/smarthumidor",
"id": "9bce7ce09b0c63bfaec1b661689590d6ddb57320",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/smarthumidor/smarthumidor/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "9684"
},
{
"name": "C++",
"bytes": "4738"
},
{
"name": "Python",
"bytes": "4335"
}
],
"symlink_target": ""
} |
"""
$ ./example5_mpl.py [fits file]
This example program shows to capture button and keypress events
for your own use. After loading a FITS file use the following keys:
Press 'x' to turn on capture of events and bypass most normal keystroke
processing. Press it again to resume normal processing. An on-screen
message will tell you which mode you are in.
While in 'capture mode' you can draw points with the right mouse button.
Press 'c' to clear the canvas of drawn points.
"""
import sys, os
# just in case you want to use qt
os.environ['QT_API'] = 'pyqt'
import matplotlib
options = ['Qt4Agg', 'GTK', 'GTKAgg', 'MacOSX', 'GTKCairo', 'WXAgg',
'TkAgg', 'QtAgg', 'FltkAgg', 'WX']
# Force a specific toolkit, if you leave commented matplotlib will choose
# an appropriate one for your system
#matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
from ginga.mplw.ImageViewCanvasMpl import ImageViewCanvas
from ginga.mplw.ImageViewCanvasTypesMpl import DrawingCanvas
from ginga.AstroImage import AstroImage
from ginga.misc import log
from ginga import cmap
# Set to True to get diagnostic logging output
use_logger = False
class MyGingaFigure(object):
def __init__(self, logger, fig):
self.logger = logger
# create a ginga object and tell it about the figure
fi = ImageViewCanvas(logger)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.add_callback('key-press', self.key_press_ginga)
fi.set_figure(fig)
self.fitsimage = fi
# enable all interactive features
fi.get_bindings().enable_all(True)
canvas = DrawingCanvas()
canvas.enable_draw(True)
canvas.set_callback('button-press', self.btn_down)
#canvas.set_callback('motion', self.drag)
canvas.set_callback('button-release', self.btn_up)
canvas.set_drawtype('point', color='cyan')
canvas.set_callback('draw-event', self.draw_event)
canvas.add_callback('key-press', self.key_press)
canvas.setSurface(self.fitsimage)
canvas.ui_setActive(True)
self.canvas = canvas
def load(self, fitspath):
# load an image
image = AstroImage(self.logger)
image.load_file(fitspath)
self.fitsimage.set_image(image)
def capture(self):
"""
Insert our canvas so that we intercept all events before they reach
processing by the bindings layer of Ginga.
"""
# insert the canvas
self.fitsimage.add(self.canvas, tag='mycanvas')
def release(self):
"""
Remove our canvas so that we no longer intercept events.
"""
# retract the canvas
self.fitsimage.deleteObjectByTag('mycanvas')
def clear(self):
"""
Clear the canvas of any drawing made on it.
"""
self.canvas.deleteAllObjects()
def get_wcs(self, data_x, data_y):
"""Return (re_deg, dec_deg) for the (data_x, data_y) position
based on any WCS associated with the loaded image.
"""
img = self.fitsimage.get_image()
ra, dec = img.pixtoradec(data_x, data_y)
return ra, dec
# CALLBACKS
# NOTE: return values on callbacks are important: if True then lower
# layer Ginga canvas items will not get events
def key_press(self, canvas, keyname):
if keyname == 'x':
self.fitsimage.onscreen_message("Moving to regular mode",
delay=1.0)
self.release()
elif keyname == 'c':
self.clear()
return True
fi = canvas.fitsimage
data_x, data_y = fi.get_last_data_xy()
ra, dec = self.get_wcs(data_x, data_y)
print "key %s pressed at data %d,%d ra=%s dec=%s" % (
keyname, data_x, data_y, ra, dec)
return True
def key_press_ginga(self, fitsimage, keyname):
if keyname == 'x':
self.fitsimage.onscreen_message("Moving to capture mode",
delay=1.0)
self.capture()
return True
def btn_down(self, canvas, button, data_x, data_y):
ra, dec = self.get_wcs(data_x, data_y)
print "button %s pressed at data %d,%d ra=%s dec=%s" % (
button, data_x, data_y, ra, dec)
return False
def btn_up(self, canvas, button, data_x, data_y):
ra, dec = self.get_wcs(data_x, data_y)
print "button %s released at data %d,%d ra=%s dec=%s" % (
button, data_x, data_y, ra, dec)
return False
def draw_event(self, canvas, tag):
obj = canvas.getObjectByTag(tag)
data_x, data_y = obj.x, obj.y
ra, dec = self.get_wcs(data_x, data_y)
print "A %s was drawn at data %d,%d ra=%s dec=%s" % (
obj.kind, data_x, data_y, ra, dec)
return True
# create a regular matplotlib figure
fig = plt.figure()
# Here is our object
logger = log.get_logger(null=not use_logger, log_stderr=True)
foo = MyGingaFigure(logger, fig)
# load an image, if one was provided
if len(sys.argv) > 1:
foo.load(sys.argv[1])
# Press 'x' to turn on capture of events. Press it again to resume normal
# processing of events.
# Press 'c' to clear the canvas of drawn points.
plt.show()
| {
"content_hash": "2ea6d118648ccb94a98671da0a2596ac",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 75,
"avg_line_length": 33.56875,
"alnum_prop": 0.6196239061627258,
"repo_name": "Rbeaty88/ginga",
"id": "1d061ec7356ec567b0f76dc963cf67d16ab448d8",
"size": "5695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/example5_mpl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2102613"
}
],
"symlink_target": ""
} |
from socket import *
# Message to send
msg = '\r\nI love computer networks!'
endmsg = '\r\n.\r\n'
# Choose a mail server (e.g. Google mail server) and call it mailserver
mailserver = 'smtp.gmail.com'
# Create socket called clientSocket and establish a TCP connection with mailserver
clientSocket = socket(AF_INET, SOCK_STREAM)
# Port number may change according to the mail server
clientSocket.connect((mailserver, 587))
recv = clientSocket.recv(1024).decode()
print(recv)
if recv[:3] != '220':
print('220 reply not received from server.')
# Send HELO command and print server response.
heloCommand = 'HELO gmail.com\r\n'
clientSocket.send(heloCommand.encode())
recv1 = clientSocket.recv(1024).decode()
print(recv1)
if recv1[:3] != '250':
print('250 reply not received from server.')
# Send MAIL FROM command and print server response.
mailfrom = 'MAIL FROM: <alice@gmail.com>\r\n'
clientSocket.send(mailfrom.encode())
recv2 = clientSocket.recv(1024).decode()
print(recv2)
if recv2[:3] != '250':
print('250 reply not received from server.')
# Send RCPT TO command and print server response.
rcptto = 'RCPT TO: <bob@yahoo.com>\r\n'
clientSocket.send(rcptto.encode())
recv3 = clientSocket.recv(1024).decode()
print(recv3)
if recv3[:3] != '250':
print('250 reply not received from server.')
# Send DATA command and print server response.
data = 'DATA\r\n'
clientSocket.send(data.encode())
recv4 = clientSocket.recv(1024).decode()
print(recv4)
if recv4[:3] != '354':
print('354 reply not received from server.')
# Send message data.
clientSocket.send('SUBJECT: Greeting To you!\r\n'.encode())
clientSocket.send('test again'.encode())
clientSocket.send(msg.encode())
# Message ends with a single period.
clientSocket.send(endmsg.encode())
recv5 = clientSocket.recv(1024).decode()
print(recv5)
if recv5[:3] != '250':
print('250 reply not received from server.')
# Send QUIT command and get server response.
quitcommand = 'QUIT\r\n'
clientSocket.send(quitcommand.encode())
recv6 = clientSocket.recv(1024).decode()
print(recv6)
if recv6[:3] != '221':
print('221 reply not received from server.')
| {
"content_hash": "d8059444cc7c6b09dcdc77377f6ab153",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 82,
"avg_line_length": 29.319444444444443,
"alnum_prop": 0.7280909521553766,
"repo_name": "moranzcw/Computer-Networking-A-Top-Down-Approach-NOTES",
"id": "3b3ab3b56e02388becde68d8d46d63dd08bfdb9c",
"size": "2111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Resource/7th-Python-Solution/Solutions/SMTPClient/SMTPClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223430941"
}
],
"symlink_target": ""
} |
def tsplit(string, delimiters):
"""Behaves str.split but supports multiple delimiters."""
delimiters = tuple(delimiters)
stack = [string,]
for delimiter in delimiters:
for i, substring in enumerate(stack):
substack = substring.split(delimiter)
stack.pop(i)
for j, _substring in enumerate(substack):
stack.insert(i+j, _substring)
return stack
| {
"content_hash": "5861f0505946261f693c9c4e582e20fb",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 61,
"avg_line_length": 31.785714285714285,
"alnum_prop": 0.5910112359550562,
"repo_name": "ActiveState/code",
"id": "4a6560c2e50cc3382bf535bd14286d0948f1a572",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/577616_Split_Strings_w_Multiple/recipe-577616.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext as _
from dragoman_blog.model_bases import BaseEntry, BaseEntryTranslation
from cms.models.fields import PlaceholderField
class Entry(BaseEntry):
placeholder = PlaceholderField('dragoman_placeholder')
class Meta:
verbose_name = _('Entry')
verbose_name_plural = _('Entrys')
abstract = False
app_label = 'cmsplugin_dragoman' | {
"content_hash": "5744ac099f347c99f31be2edd0457ec2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 31.923076923076923,
"alnum_prop": 0.7108433734939759,
"repo_name": "fivethreeo/cmsplugin-dragoman",
"id": "11eae2e9dc213d888fe5ac3a6b87fa529009ddc5",
"size": "415",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cmsplugin_dragoman/dragoman_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5403"
}
],
"symlink_target": ""
} |
"""
requests_cache.core
~~~~~~~~~~~~~~~~~~~
Core functions for configuring cache and monkey patching ``requests``
"""
from contextlib import contextmanager
from datetime import datetime, timedelta
from alp.request import requests
from alp.request.requests import Session as OriginalSession
from alp.request.requests.hooks import dispatch_hook
from . import backends
from .compat import str, basestring
try:
ver = tuple(map(int, requests.__version__.split(".")))
except ValueError:
pass
else:
# We don't need to dispatch hook in Requests <= 1.1.0
if ver < (1, 2, 0):
dispatch_hook = lambda key, hooks, hook_data, *a, **kw: hook_data
del ver
class CachedSession(OriginalSession):
""" Requests ``Sessions`` with caching support.
"""
def __init__(self, cache_name='cache', backend='sqlite', expire_after=None,
allowable_codes=(200,), allowable_methods=('GET',),
**backend_options):
"""
:param cache_name: for ``sqlite`` backend: cache file will start with this prefix,
e.g ``cache.sqlite``
for ``mongodb``: it's used as database name
for ``redis``: it's used as the namespace. This means all keys
are prefixed with ``'cache_name:'``
:param backend: cache backend name e.g ``'sqlite'``, ``'mongodb'``, ``'redis'``, ``'memory'``.
(see :ref:`persistence`). Or instance of backend implementation.
:param expire_after: number of seconds after cache will be expired
or `None` (default) to ignore expiration
:type expire_after: float
:param allowable_codes: limit caching only for response with this codes (default: 200)
:type allowable_codes: tuple
:param allowable_methods: cache only requests of this methods (default: 'GET')
:type allowable_methods: tuple
:kwarg backend_options: options for chosen backend. See corresponding
:ref:`sqlite <backends_sqlite>`, :ref:`mongo <backends_mongo>`
and :ref:`redis <backends_redis>` backends API documentation
"""
if isinstance(backend, basestring):
try:
self.cache = backends.registry[backend](cache_name, **backend_options)
except KeyError:
raise ValueError('Unsupported backend "%s" try one of: %s' %
(backend, ', '.join(backends.registry.keys())))
else:
self.cache = backend
self._cache_expire_after = expire_after
self._cache_allowable_codes = allowable_codes
self._cache_allowable_methods = allowable_methods
self._is_cache_disabled = False
super(CachedSession, self).__init__()
def send(self, request, **kwargs):
if (self._is_cache_disabled
or request.method not in self._cache_allowable_methods):
response = super(CachedSession, self).send(request, **kwargs)
response.from_cache = False
return response
cache_key = self.cache.create_key(request)
def send_request_and_cache_response():
response = super(CachedSession, self).send(request, **kwargs)
if response.status_code in self._cache_allowable_codes:
self.cache.save_response(cache_key, response)
response.from_cache = False
return response
response, timestamp = self.cache.get_response_and_time(cache_key)
if response is None:
return send_request_and_cache_response()
if self._cache_expire_after is not None:
difference = datetime.utcnow() - timestamp
if difference > timedelta(seconds=self._cache_expire_after):
self.cache.delete(cache_key)
return send_request_and_cache_response()
# dispatch hook here, because we've removed it before pickling
response = dispatch_hook('response', request.hooks, response, **kwargs)
response.from_cache = True
return response
def request(self, method, url, params=None, data=None, headers=None,
cookies=None, files=None, auth=None, timeout=None,
allow_redirects=True, proxies=None, hooks=None, stream=None,
verify=None, cert=None):
response = super(CachedSession, self).request(method, url, params, data,
headers, cookies, files,
auth, timeout,
allow_redirects, proxies,
hooks, stream, verify, cert)
if self._is_cache_disabled:
return response
main_key = self.cache.create_key(response.request)
for r in response.history:
self.cache.add_key_mapping(
self.cache.create_key(r.request), main_key
)
return response
@contextmanager
def cache_disabled(self):
"""
Context manager for temporary disabling cache
::
>>> s = CachedSession()
>>> with s.cache_disabled():
... s.get('http://httpbin.org/ip')
"""
self._is_cache_disabled = True
try:
yield
finally:
self._is_cache_disabled = False
def install_cache(cache_name='cache', backend='sqlite', expire_after=None,
allowable_codes=(200,), allowable_methods=('GET',),
session_factory=CachedSession, **backend_options):
"""
Installs cache for all ``Requests`` requests by monkey-patching ``Session``
Parameters are the same as in :class:`CachedSession`. Additional parameters:
:param session_factory: Session factory. It should inherit :class:`CachedSession` (default)
"""
_patch_session_factory(
lambda : session_factory(cache_name=cache_name,
backend=backend,
expire_after=expire_after,
allowable_codes=allowable_codes,
allowable_methods=allowable_methods,
**backend_options)
)
# backward compatibility
configure = install_cache
def uninstall_cache():
""" Restores ``requests.Session`` and disables cache
"""
_patch_session_factory(OriginalSession)
@contextmanager
def disabled():
"""
Context manager for temporary disabling globally installed cache
.. warning:: not thread-safe
::
>>> with requests_cache.disabled():
... requests.get('http://httpbin.org/ip')
... requests.get('http://httpbin.org/get')
"""
previous = requests.Session
uninstall_cache()
try:
yield
finally:
_patch_session_factory(previous)
def get_cache():
""" Returns internal cache object from globally installed ``CachedSession``
"""
return requests.Session().cache
def clear():
""" Clears globally installed cache
"""
get_cache().clear()
def _patch_session_factory(session_factory=CachedSession):
requests.Session = requests.sessions.Session = session_factory
| {
"content_hash": "0f9b179600b90a9148d4b49f12693e8e",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 102,
"avg_line_length": 36.82673267326733,
"alnum_prop": 0.5765559887081597,
"repo_name": "bfontaine/alfred-pp",
"id": "e567b16b908d2157f9336c30a91a6a92343530dc",
"size": "7485",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "alp/request/requests_cache/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1713901"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('status', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='standupuser',
name='user',
field=models.OneToOneField(
null=True,
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='profile'
),
),
]
| {
"content_hash": "8f51fc80a52c577e5e057f9f04f688f6",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 66,
"avg_line_length": 24.96,
"alnum_prop": 0.5689102564102564,
"repo_name": "mozilla/standup",
"id": "720caf6df8eb8b2a38268a6059505179fa444b4c",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "standup/status/migrations/0002_standupuser_user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32606"
},
{
"name": "Dockerfile",
"bytes": "1165"
},
{
"name": "HTML",
"bytes": "24348"
},
{
"name": "JavaScript",
"bytes": "4703"
},
{
"name": "Makefile",
"bytes": "2434"
},
{
"name": "Python",
"bytes": "113036"
},
{
"name": "Shell",
"bytes": "4920"
}
],
"symlink_target": ""
} |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class TextAnalyticsClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for TextAnalyticsClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param endpoint: Supported Cognitive Services endpoint (e.g.,
https://:code:`<resource-name>`.api.cognitiveservices.azure.com). Required.
:type endpoint: str
:keyword api_version: Api Version. Default value is "2022-05-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
super(TextAnalyticsClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2022-05-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
self.credential = credential
self.endpoint = endpoint
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://cognitiveservices.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'ai-textanalytics/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| {
"content_hash": "cbf9b7f38d48656433cccb7c8212a4d0",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 129,
"avg_line_length": 47.296875,
"alnum_prop": 0.6957383548067394,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e6046dcdb4f9190eff67ca928e9fac33d60d8be0",
"size": "3495",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v2022_05_01/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
Administration docstring
"""
| {
"content_hash": "a18f7fa974a6ef48c4259d6d3fc3e39e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 24,
"avg_line_length": 11,
"alnum_prop": 0.696969696969697,
"repo_name": "nuwainfo/treeio",
"id": "276b15f89f59721c67937e4eb65e608b767949bd",
"size": "146",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "core/administration/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "400811"
},
{
"name": "JavaScript",
"bytes": "2137384"
},
{
"name": "Makefile",
"bytes": "4598"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2827961"
},
{
"name": "Shell",
"bytes": "6488"
}
],
"symlink_target": ""
} |
from sympy.core import Basic, C, Dict, sympify
from sympy.matrices import zeros
from sympy.functions import floor
from sympy.utilities.misc import default_sort_key
from sympy.utilities.iterables import has_dups, flatten
from sympy.ntheory.residue_ntheory import int_tested
import random
from collections import defaultdict
class Partition(C.FiniteSet):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose union equals a given set.
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
"""
_rank = None
_partition = None
def __new__(cls, partition):
"""
Generates a new partition object.
This method also verifies if the arguments passed are
valid and raises a ValueError if they are not.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1, 2], [3]])
>>> a
{{1, 2}, {3}}
>>> a.partition
[[1, 2], [3]]
>>> len(a)
2
>>> a.members
(1, 2, 3)
"""
args = partition
if not all(isinstance(part, list) for part in args):
raise ValueError("Partition should be a list of lists.")
# sort so we have a canonical reference for RGS
partition = sorted(sum(partition, []), key=default_sort_key)
if has_dups(partition):
raise ValueError("Partition contained duplicated elements.")
obj = C.FiniteSet.__new__(cls, map(C.FiniteSet, args))
obj.members = tuple(partition)
obj.size = len(partition)
return obj
def sort_key(self, order=None):
"""Return a canonical key that can be used for sorting.
Ordering is based on the size and sorted elements of the partition
and ties are broken with the rank.
Examples
========
>>> from sympy.utilities.iterables import default_sort_key
>>> from sympy.combinatorics.partitions import Partition
>>> from sympy.abc import x
>>> a = Partition([[1, 2]])
>>> b = Partition([[3, 4]])
>>> c = Partition([[1, x]])
>>> d = Partition([range(4)])
>>> l = [d, b, a + 1, a, c]
>>> l.sort(key=default_sort_key); l
[{{1, 2}}, {{1}, {2}}, {{1, x}}, {{3, 4}}, {{0, 1, 2, 3}}]
"""
if order is None:
members = self.members
else:
members = tuple(sorted(self.members,
key=lambda w: default_sort_key(w, order)))
return self.size, members, self.rank
@property
def partition(self):
"""Return partition as a sorted list of lists.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition([[1], [2, 3]]).partition
[[1], [2, 3]]
"""
if self._partition is None:
self._partition = sorted(sorted(p) for p in self.args)
return self._partition
def __add__(self, other):
"""
Return permutation whose rank is ``other`` greater than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1, 2], [3]])
>>> a.rank
1
>>> (a + 1).rank
2
>>> (a + 100).rank
1
"""
other = int_tested(other)
offset = self.rank + other
result = RGS_unrank((offset) %
RGS_enum(self.size),
self.size)
return Partition.from_rgs(result, self.members)
def __sub__(self, other):
"""
Return permutation whose rank is ``other`` less than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1, 2], [3]])
>>> a.rank
1
>>> (a - 1).rank
0
>>> (a - 100).rank
1
"""
return self.__add__(-other)
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other based on rank.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1, 2], [3, 4, 5]])
>>> b = Partition([[1], [2, 3], [4], [5]])
>>> a.rank, b.rank
(9, 34)
>>> a <= a
True
>>> a <= b
True
"""
return self.sort_key() <= sympify(other).sort_key()
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1, 2], [3, 4, 5]])
>>> b = Partition([[1], [2, 3], [4], [5]])
>>> a.rank, b.rank
(9, 34)
>>> a < b
True
"""
return self.sort_key() < sympify(other).sort_key()
@property
def rank(self):
"""
Gets the rank of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1, 2], [3], [4, 5]])
>>> a.rank
13
"""
if self._rank != None:
return self._rank
self._rank = RGS_rank(self.RGS)
return self._rank
@property
def RGS(self):
"""
Returns the "restricted growth string" of the partition.
The RGS is returned as a list of indices, L, where L[i] indicates
the block in which element i appears. For example, in a partition
of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is
[1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([[1, 2], [3], [4, 5]])
>>> a.members
(1, 2, 3, 4, 5)
>>> a.RGS
(0, 0, 1, 2, 2)
>>> a + 1
{{1, 2}, {3}, {4}, {5}}
>>> _.RGS
(0, 0, 1, 2, 3)
"""
rgs = {}
partition = self.partition
for i, part in enumerate(partition):
for j in part:
rgs[j] = i
return tuple([rgs[i] for i in sorted(i for p in partition for i in p)])
@classmethod
def from_rgs(self, rgs, elements):
"""
Creates a set partition from a restricted growth string.
The indices given in rgs are assumed to be the index
of the element as given in elements *as provided* (the
elements are not sorted by this routine). Block numbering
starts from 0. If any block was not referenced in ``rgs``
an error will be raised.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde'))
{{c}, {a, d}, {b, e}}
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead'))
{{e}, {a, c}, {b, d}}
>>> a = Partition([[1, 4], [2], [3, 5]])
>>> Partition.from_rgs(a.RGS, a.members)
{{1, 4}, {2}, {3, 5}}
"""
if len(rgs) != len(elements):
raise ValueError('mismatch in rgs and element lengths')
max_elem = max(rgs) + 1
partition = [[] for i in xrange(max_elem)]
j = 0
for i in rgs:
partition[i].append(elements[j])
j += 1
if not all(p for p in partition):
raise ValueError('some blocks of the partition were empty.')
return Partition(partition)
class IntegerPartition(Basic):
"""
This class represents an integer partition.
In number theory and combinatorics, a partition of a positive integer,
``n``, also called an integer partition, is a way of writing ``n`` as a
list of positive integers that sum to n. Two partitions that differ only
in the order of summands are considered to be the same partition; if order
matters then the partitions are referred to as compositions. For example,
4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1];
the compositions [1, 2, 1] and [1, 1, 2] are the same as partition
[2, 1, 1].
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
Reference: http://en.wikipedia.org/wiki/Partition_(number_theory)
"""
_dict = None
_keys = None
def __new__(cls, partition, integer=None):
"""
Generates a new IntegerPartition object from a list or dictionary.
The partition can be given as a list of positive integers or a
dictionary of (integer, multiplicity) items. If the partition is
preceeded by an integer an error will be raised if the partition
does not sum to that given integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([5, 4, 3, 1, 1])
>>> a
IntegerPartition(14, (5, 4, 3, 1, 1))
>>> print a
[5, 4, 3, 1, 1]
>>> IntegerPartition({1:3, 2:1})
IntegerPartition(5, (2, 1, 1, 1))
If the value that the partion should sum to is given first, a check
will be made to see n error will be raised if there is a discrepancy:
>>> IntegerPartition(10, [5, 4, 3, 1])
Traceback (most recent call last):
...
ValueError: The partition is not valid
"""
from sympy.ntheory.residue_ntheory import int_tested
if integer is not None:
integer, partition = partition, integer
if isinstance(partition, (dict, Dict)):
_ = []
for k, v in sorted(partition.items(), reverse=True):
if not v:
continue
k, v = int_tested(k, v)
_.extend([k]*v)
partition = tuple(_)
else:
partition = tuple(sorted(int_tested(partition), reverse=True))
sum_ok = False
if integer is None:
integer = sum(partition)
sum_ok = True
else:
integer = int_tested(integer)
if not sum_ok and sum(partition) != integer:
raise ValueError("Partition did not add to %s" % integer)
if any(i < 1 for i in partition):
raise ValueError("The summands must all be positive.")
obj = Basic.__new__(cls, integer, partition)
obj.partition = list(partition)
obj.integer = integer
return obj
def prev_lex(self):
"""Return the previous partition of the integer, n, in lexical order,
wrapping around to [1, ..., 1] if the partition is [n].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([4])
>>> print p.prev_lex()
[3, 1]
>>> p.partition > p.prev_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
keys = self._keys
if keys == [1]:
return IntegerPartition({self.integer: 1})
if keys[-1] != 1:
d[keys[-1]] -= 1
if keys[-1] == 2:
d[1] = 2
else:
d[keys[-1] - 1] = d[1] = 1
else:
d[keys[-2]] -= 1
left = d[1] + keys[-2]
new = keys[-2]
d[1] = 0
while left:
new -= 1
if left - new >= 0:
d[new] += left//new
left -= d[new]*new
return IntegerPartition(self.integer, d)
def next_lex(self):
"""Return the next partition of the integer, n, in lexical order,
wrapping around to [n] if the partition is [1, ..., 1].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([3, 1])
>>> print p.next_lex()
[4]
>>> p.partition < p.next_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
key = self._keys
a = key[-1]
if a == self.integer:
d.clear()
d[1] = self.integer
elif a == 1:
if d[a] > 1:
d[a + 1] += 1
d[a] -= 2
else:
b = key[-2]
d[b + 1] += 1
d[1] = (d[b] - 1)*b
d[b] = 0
else:
if d[a] > 1:
if len(key) == 1:
d.clear()
d[a + 1] = 1
d[1] = self.integer - a - 1
else:
a1 = a + 1
d[a1] += 1
d[1] = d[a]*a - a1
d[a] = 0
else:
b = key[-2]
b1 = b + 1
d[b1] += 1
need = d[b]*b + d[a]*a - b1
d[a] = d[b] = 0
d[1] = need
return IntegerPartition(self.integer, d)
def as_dict(self):
"""Return the partition as a dictionary whose keys are the
partition integers and the values are the multiplicity of that
integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict()
{1: 3, 2: 1, 3: 4}
"""
if self._dict is None:
d = {}
self._keys = []
for i in self.partition:
if not i in d:
d[i] = 0
self._keys.append(i)
d[i] += 1
self._dict = d
return self._dict
@property
def conjugate(self):
"""
Computes the conjugate partition of itself.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6, 3, 3, 2, 1])
>>> a.conjugate
[5, 4, 3, 1, 1, 1]
"""
j = 1
temp_arr = list(self.partition) + [0]
k = temp_arr[0]
b = [0]*k
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __lt__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3, 1])
>>> a < a
False
>>> b = a.next_lex()
>>> a < b
True
>>> a == b
False
"""
return list(reversed(self.partition)) < list(reversed(other.partition))
def __le__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([4])
>>> a <= a
True
"""
return list(reversed(self.partition)) <= list(reversed(other.partition))
def as_ferrers(self, char='#'):
"""
Prints the ferrer diagram of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> print IntegerPartition([1, 1, 5]).as_ferrers()
#####
#
#
"""
return "\n".join([char*i for i in self.partition])
def __str__(self):
return str(list(self.partition))
def random_integer_partition(n, seed=None):
"""
Generates a random integer partition summing to ``n`` as a list
of reverse-sorted integers.
Examples
========
>>> from sympy.combinatorics.partitions import random_integer_partition
For the following, a seed is given so a known value can be shown; in
practice, the seed would not be given.
>>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1])
[85, 12, 2, 1]
>>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1])
[5, 3, 1, 1]
>>> random_integer_partition(1)
[1]
"""
from sympy.utilities.randtest import _randint
n = int_tested(n)
if n < 1:
raise ValueError('n must be a positive integer')
randint = _randint(seed)
partition = []
while (n > 0):
k = randint(1, n)
mult = randint(1, n//k)
partition.append((k, mult))
n -= k*mult
partition.sort(reverse=True)
partition = flatten([[k]*m for k, m in partition])
return partition
def RGS_generalized(m):
"""
Computes the m + 1 generalized unrestricted growth strings
and returns them as rows in matrix.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_generalized
>>> RGS_generalized(6)
[ 1, 1, 1, 1, 1, 1, 1]
[ 1, 2, 3, 4, 5, 6, 0]
[ 2, 5, 10, 17, 26, 0, 0]
[ 5, 15, 37, 77, 0, 0, 0]
[ 15, 52, 151, 0, 0, 0, 0]
[ 52, 203, 0, 0, 0, 0, 0]
[203, 0, 0, 0, 0, 0, 0]
"""
d = zeros(m + 1)
for i in xrange(0, m + 1):
d[0, i] = 1
for i in xrange(1, m + 1):
for j in xrange(m):
if j <= m - i:
d[i, j] = j * d[i - 1, j] \
+ d[i - 1, j + 1]
else:
d[i, j] = 0
return d
def RGS_enum(m):
"""
RGS_enum computes the total number of restricted growth strings
possible for a superset of size m.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_enum
>>> from sympy.combinatorics.partitions import Partition
>>> RGS_enum(4)
15
>>> RGS_enum(5)
52
>>> RGS_enum(6)
203
We can check that the enumeration is correct by actually generating
the partitions. Here, the 15 partitions of 4 items are generated:
>>> a = Partition([range(4)])
>>> s = set()
>>> for i in range(20):
... s.add(a)
... a += 1
...
>>> assert len(s) == 15
"""
if (m < 1):
return 0
elif (m == 1):
return 1
else:
m += 1
b = [1] * (m)
for j in xrange(1, m):
for i in xrange(1, j):
b[j] += C.binomial(j - 1, i) * b[i]
nrgf = b[m - 1]
return nrgf
def RGS_unrank(rank, m):
"""
Gives the unranked restricted growth string for a given
superset size.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_unrank
>>> RGS_unrank(14, 4)
[0, 1, 2, 3]
>>> RGS_unrank(0, 4)
[0, 0, 0, 0]
"""
if m < 1:
raise ValueError("The superset size must be >= 1")
if rank < 0 or RGS_enum(m) <= rank:
raise ValueError("Invalid arguments")
L = [1] * (m + 1)
j = 1
D = RGS_generalized(m)
for i in xrange(2, m+1):
v = D[m - i, j]
cr = j*v
if cr <= rank:
L[i] = j + 1
rank -= cr
j += 1
else:
L[i] = int(rank / v + 1)
rank %= v
return map(lambda x: x - 1, L[1:])
def RGS_rank(rgs):
"""
Computes the rank of a restricted growth string.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank
>>> RGS_rank([0, 1, 2, 1, 3])
42
>>> RGS_rank(RGS_unrank(4, 7))
4
"""
rgs_size = len(rgs)
rank = 0
D = RGS_generalized(rgs_size)
for i in xrange(1, rgs_size):
n = len(rgs[(i + 1):])
m = max(rgs[0:i])
rank += D[n, m + 1] * rgs[i]
return rank
| {
"content_hash": "5a29eab0f06c239a9b64ef21e2a1d93f",
"timestamp": "",
"source": "github",
"line_count": 704,
"max_line_length": 80,
"avg_line_length": 28.515625,
"alnum_prop": 0.49778331257783315,
"repo_name": "flacjacket/sympy",
"id": "5beb710f4aff5964fb9a8bbe84374b3bab63a0a6",
"size": "20075",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/combinatorics/partitions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10293116"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from subprocess import Popen, PIPE
# proc = Popen(["./badencrypt.py", "poop"],stdout=PIPE)
# hexCiphertext = proc.communicate()[0].strip()
hexCiphertext = "1caaeb57ac2d4af7f0b7fce4e7238427d80721572ab7756552cecce8b3b35f30b098ba91594575af78cfaa06e282f53e286ce54345ea5dc244d20c2c370d4a332fcc462d463aa505ec31ec2c79d784bf"
xor = 0x0
versionCiphertext = ""
for b in range(4):
hexGuess = b*2
for i in range(256):
ciphertextGuess = int(hexCiphertext[32+hexGuess:34+hexGuess], 16) ^ i
# Add a leading 0 if hex is less than 16
if ciphertextGuess < 16:
ciphertextGuess = "0%x" % ciphertextGuess
else:
ciphertextGuess = "%x" % ciphertextGuess
proc = Popen(["./baddecrypt.py", versionCiphertext+ciphertextGuess+hexCiphertext[34+hexGuess:96]],stdout=PIPE)
output = proc.communicate()[0].strip()
if b == 0 and "Wrong version!" != output:
mask = i << 24
xor |= mask
versionCiphertext += ciphertextGuess
break
elif b == 1 and "Wrong subversion!" != output:
mask = i << 16
xor |= mask
versionCiphertext += ciphertextGuess
break
elif b == 2 and "First reserved byte error!" != output:
mask = i << 8
xor |= mask
versionCiphertext += ciphertextGuess
break
elif b == 3 and "Second reserved byte error!" != output:
xor |= i
versionCiphertext += ciphertextGuess
break
cracked = xor ^ 0x01010000
plaintext = ""
for i in range(4):
plaintext = chr(cracked % 256) + plaintext
cracked = cracked >> 8
print "The message is " + plaintext
# Forget about guessing the last three bytes, only first four are important
# proc = Popen(["./baddecrypt.py", versionCiphertext+hexCiphertext[40:96]],stdout=PIPE)
# output = proc.communicate()[0].strip()
# import re
# pattern = re.compile(r'(\d+)')
# # Capture msglen from "Length of ", msglen, "is too large!"
# msglen = pattern.search(output).group()
# # Check message length wasn't actually valid
# if msglen:
# msg = int(msglen)
# # Can't figure out the fourth to last char
# for i in range(3):
# print chr(msg % 256)
# msg = msg >> 8 | {
"content_hash": "1597a555b074aa608d6acaf11669695b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 178,
"avg_line_length": 30.746268656716417,
"alnum_prop": 0.6898058252427185,
"repo_name": "somethingnew2-0/CS642-HW4",
"id": "d22f83b6c627eddd19a7101ebb4f19eb103420f4",
"size": "2060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oracle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "1716"
},
{
"name": "Python",
"bytes": "5164"
}
],
"symlink_target": ""
} |
"""
:mod:`tests` -- Utility methods for tests.
===================================
.. automodule:: utils
:platform: Unix
:synopsis: Tests for Nova.
.. moduleauthor:: Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
.. moduleauthor:: Tim Simpson <tim.simpson@rackspace.com>
"""
from proboscis import asserts
from trove_guestagent.tests.config import CONFIG
from troveclient.compat.xml import TroveXmlClient
from trove_guestagent.openstack.common import processutils
def add_report_event_to(home, name):
"""Takes a module, class, etc, and an attribute name to decorate."""
func = getattr(home, name)
def __cb(*args, **kwargs):
# While %s turns a var into a string but in some rare cases explicit
# str() is less likely to raise an exception.
arg_strs = [repr(arg) for arg in args]
arg_strs += ['%s=%s' % (repr(key), repr(value))
for (key, value) in kwargs.items()]
CONFIG.get_reporter().log("[RDC] Calling : %s(%s)..."
% (name, ','.join(arg_strs)))
value = func(*args, **kwargs)
CONFIG.get_reporter.log("[RDC] returned %s." % str(value))
return value
setattr(home, name, __cb)
class TestClient(object):
"""Decorates the rich clients with some extra methods.
These methods are filled with test asserts, meaning if you use this you
get the tests for free.
"""
def __init__(self, real_client):
"""Accepts a normal client."""
self.real_client = real_client
def assert_http_code(self, expected_http_code):
resp, body = self.real_client.client.last_response
asserts.assert_equal(resp.status, expected_http_code)
@property
def last_http_code(self):
resp, body = self.real_client.client.last_response
return resp.status
@staticmethod
def find_flavor_self_href(flavor):
self_links = [link for link in flavor.links if link['rel'] == 'self']
asserts.assert_true(len(self_links) > 0, "Flavor had no self href!")
flavor_href = self_links[0]['href']
asserts.assert_false(flavor_href is None,
"Flavor link self href missing.")
return flavor_href
def find_flavors_by(self, condition, flavor_manager=None):
flavor_manager = flavor_manager or self.flavors
flavors = flavor_manager.list()
return [flavor for flavor in flavors if condition(flavor)]
def find_flavors_by_name(self, name, flavor_manager=None):
return self.find_flavors_by(lambda flavor: flavor.name == name,
flavor_manager)
def find_flavors_by_ram(self, ram, flavor_manager=None):
return self.find_flavors_by(lambda flavor: flavor.ram == ram,
flavor_manager)
def find_flavor_and_self_href(self, flavor_id, flavor_manager=None):
"""Given an ID, returns flavor and its self href."""
flavor_manager = flavor_manager or self.flavors
asserts.assert_false(flavor_id is None)
flavor = flavor_manager.get(flavor_id)
asserts.assert_false(flavor is None)
flavor_href = self.find_flavor_self_href(flavor)
return flavor, flavor_href
def __getattr__(self, item):
return getattr(self.real_client, item)
def call_xmllint(name, body):
try:
with open(CONFIG.xml_temp_file, 'w') as file:
file.write(body)
#if CONFIG.get('xml_xsd', None):
args = [CONFIG.xml_temp_file]
if CONFIG.get('xml_xsd', None):
args += ["--schema", CONFIG.xml_xsd]
processutils.execute(CONFIG.xmllint_bin, *args,
check_exit_code=0, shell=False)
except processutils.ProcessExecutionError as pe:
fail("Error validating XML! %s" % pe)
class XmlLintClient(TroveXmlClient):
content_type = 'xml'
def http_log(self, args, kwargs, resp, body):
#self.pretty_log(args, kwargs, resp, body)
if kwargs.get('body', None):
call_xmllint("request", kwargs['body'])
if body:
call_xmllint("response", body)
| {
"content_hash": "133c3d53f4618aa51f3151e2995ccd56",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 77,
"avg_line_length": 35.41525423728814,
"alnum_prop": 0.6111509930605408,
"repo_name": "denismakogon/trove-guestagent",
"id": "389ef2089379d70f1edb500db9cdb52cf7b02074",
"size": "4864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove_guestagent/tests/util/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19900"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1023022"
}
],
"symlink_target": ""
} |
"""
.. module: security_monkey.accounts.sample_extended_aws
:platform: Unix
:synopsis: Extends an AWS account with additional qualifiers that may be
used to in the applied_to_account method in custom auditors
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <opensource@bwater.com>
"""
# from security_monkey.account_manager import CustomFieldConfig
# from security_monkey.account_managers.aws_account import AWSAccountManager
# from security_monkey.datastore import Account, AccountTypeCustomValues
# from security_monkey import app
#
#
# class ExtendedAWSAccountManager(AWSAccountManager):
# account_type = 'EXTENDED_AWS'
# compatable_account_types = ['AWS']
# custom_field_configs = AWSAccountManager.custom_field_configs + [
# CustomFieldConfig('security_level', 'Security Level', False,
# 'A numeric value used to indicated the risk'),
# ]
#
# def __init__(self):
# super(ExtendedAWSAccountManager, self).__init__()
#
# def _load(self, account):
# """
# Demonstrates ability to retrieve data from some other system of record
# """
# account.custom_fields.append(AccountTypeCustomValues(name='security_level',
# value=self._getFromConfig(account.identifier)))
# return account
#
# def _getFromConfig(self, account_number):
# """
# Currently hard coded but could access some other datasource
# """
# return "5"
| {
"content_hash": "9d0a2654433c2ec9ae33f5e639362ab3",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 110,
"avg_line_length": 36.404761904761905,
"alnum_prop": 0.6546762589928058,
"repo_name": "markofu/security_monkey",
"id": "adf2a69c78461611a8fa9e18e63ebb33b2f4680a",
"size": "2155",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "security_monkey/account_managers/custom/sample_extended_aws.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22116"
},
{
"name": "Dart",
"bytes": "86565"
},
{
"name": "HTML",
"bytes": "80747"
},
{
"name": "JavaScript",
"bytes": "8629"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "483322"
},
{
"name": "Shell",
"bytes": "19151"
}
],
"symlink_target": ""
} |
"""Test code for the Face layer of RPC Framework."""
import abc
import contextlib
import threading
import unittest
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from grpc_test.framework.common import test_constants
from grpc_test.framework.common import test_control
from grpc_test.framework.common import test_coverage
from grpc_test.framework.interfaces.face import _3069_test_constant
from grpc_test.framework.interfaces.face import _digest
from grpc_test.framework.interfaces.face import _stock_service
from grpc_test.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class _PauseableIterator(object):
def __init__(self, upstream):
self._upstream = upstream
self._condition = threading.Condition()
self._paused = False
@contextlib.contextmanager
def pause(self):
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
def __iter__(self):
return self
def next(self):
with self._condition:
while self._paused:
self._condition.wait()
return next(self._upstream)
class TestCase(test_coverage.Coverage, unittest.TestCase):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
__metaclass__ = abc.ABCMeta
NAME = 'FutureInvocationAsynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
self._digest = _digest.digest(
_stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations, None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
self._digest_pool.shutdown(wait=True)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response = response_future.result()
test_messages.verify(request, response, self)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_future = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
response = response_future.result()
test_messages.verify(requests, response, self)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_iterator = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
test_messages.verify(first_request, first_response, self)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
second_response = second_response_future.result()
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
second_response = second_response_future.result()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
@unittest.skip('TODO(nathaniel): implement.')
def testWaitingForSomeButNotAllParallelInvocations(self):
raise NotImplementedError()
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
cancel_method_return_value = response_future.cancel()
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
cancel_method_return_value = response_future.cancel()
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_future = self._invoker.future(
group, method)(request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.fail():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.fail():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
| {
"content_hash": "20b0dd76697abf50c85531ce1bd572db",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 96,
"avg_line_length": 42.0997150997151,
"alnum_prop": 0.6923597482574271,
"repo_name": "doubi-workshop/grpc",
"id": "272a37f15f818c7cf81788eece97ec836fa0caf6",
"size": "16306",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/python/grpcio_test/grpc_test/framework/interfaces/face/_future_invocation_asynchronous_event_service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4084"
},
{
"name": "C",
"bytes": "3713043"
},
{
"name": "C#",
"bytes": "656533"
},
{
"name": "C++",
"bytes": "1046929"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "199121"
},
{
"name": "Makefile",
"bytes": "1215058"
},
{
"name": "Objective-C",
"bytes": "254993"
},
{
"name": "PHP",
"bytes": "71664"
},
{
"name": "Protocol Buffer",
"bytes": "110494"
},
{
"name": "Python",
"bytes": "1503052"
},
{
"name": "Ruby",
"bytes": "351984"
},
{
"name": "Shell",
"bytes": "25857"
},
{
"name": "Swift",
"bytes": "5275"
}
],
"symlink_target": ""
} |
'''
Created on Apr 29, 2010
@author: jnaous
'''
from django import forms
from models import OpenFlowAggregate, OpenFlowSliceInfo, OpenFlowConnection
from openflow.plugin.models import OpenFlowInterface, NonOpenFlowConnection
from expedient.common.utils import create_or_update
from django.forms.models import ModelChoiceField
class OpenFlowAggregateForm(forms.ModelForm):
'''
A form to create and edit OpenFlow Aggregates.
'''
class Meta:
model = OpenFlowAggregate
exclude = ['client', 'owner', 'users', "leaf_name"]
class OpenFlowSliceInfoForm(forms.ModelForm):
class Meta:
model = OpenFlowSliceInfo
exclude = ["slice"]
class OpenFlowConnectionSelectionForm(forms.Form):
"""
A form to select multiple openflow connections.
"""
of_connections = forms.ModelMultipleChoiceField(
OpenFlowConnection.objects.all(),
widget=forms.CheckboxSelectMultiple,
required=False,
label="Existing OpenFlow Connections")
non_of_connections = forms.ModelMultipleChoiceField(
NonOpenFlowConnection.objects.all(),
widget=forms.CheckboxSelectMultiple,
required=False,
label="Existing Non-OpenFlow Connections")
def __init__(self, of_cnxn_qs, non_of_cnxn_qs, *args, **kwargs):
super(OpenFlowConnectionSelectionForm, self).__init__(*args, **kwargs)
self.fields["of_connections"].queryset = of_cnxn_qs
self.fields["non_of_connections"].queryset = non_of_cnxn_qs
class AsLeafClassModelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
if hasattr(obj, "as_leaf_class"):
return "%s" % obj.as_leaf_class()
class OpenFlowStaticConnectionForm(forms.Form):
"""
A form for selecting a local and a remote interface to create a static
bi-directional connection.
"""
local_interface = AsLeafClassModelChoiceField(
OpenFlowInterface.objects.all())
remote_interface = AsLeafClassModelChoiceField(
OpenFlowInterface.objects.all())
def __init__(self, aggregate, *args, **kwargs):
super(OpenFlowStaticConnectionForm, self).__init__(*args, **kwargs)
self.fields["local_interface"].queryset = \
OpenFlowInterface.objects.filter(aggregate__id=aggregate.id)
self.fields["remote_interface"].queryset = \
OpenFlowInterface.objects.exclude(aggregate__id=aggregate.id)
def save_connections(self):
"""
Create two unique unidirectional links between the two interfaces.
@return: tuple of the created connections. If a connection is not
created, None is returned for it.
"""
cnxn1, created = create_or_update(
OpenFlowConnection,
filter_attrs=dict(
src_iface = self.cleaned_data["local_interface"],
dst_iface = self.cleaned_data["remote_interface"],
),
)
if not created: cnxn1 = None
cnxn2, created = create_or_update(
OpenFlowConnection,
filter_attrs=dict(
dst_iface = self.cleaned_data["local_interface"],
src_iface = self.cleaned_data["remote_interface"],
),
)
if not created: cnxn2 = None
return (cnxn1, cnxn2)
class NonOpenFlowStaticConnectionForm(forms.ModelForm):
"""
A form for selecting a local interface and a non-openflow resource to
create a static connection.
"""
class Meta:
model = NonOpenFlowConnection
def __init__(self, aggregate, resource_qs, *args, **kwargs):
super(NonOpenFlowStaticConnectionForm, self).__init__(*args, **kwargs)
self.fields["of_iface"].queryset = \
OpenFlowInterface.objects.filter(aggregate__id=aggregate.id)
self.fields["resource"].queryset = resource_qs
| {
"content_hash": "f0f9bd8db4807136588912334c574f62",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 33.27731092436975,
"alnum_prop": 0.6436868686868686,
"repo_name": "avlach/univbris-ocf",
"id": "1ff57f8d4bd7172a109e7260720001f2a4c44733",
"size": "3960",
"binary": false,
"copies": "1",
"ref": "refs/heads/ofelia.opticaldevelopment",
"path": "expedient/src/python/plugins/openflow/plugin/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "127542"
},
{
"name": "JavaScript",
"bytes": "289680"
},
{
"name": "Perl",
"bytes": "4421"
},
{
"name": "Python",
"bytes": "3446617"
},
{
"name": "Racket",
"bytes": "32770"
},
{
"name": "Shell",
"bytes": "7609"
}
],
"symlink_target": ""
} |
import sys
import json
from datetime import datetime
from dateutil.parser import parse
import paho.mqtt.client as mqtt
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.mqtt import MQTTUtils
# Some useful stuff
brokerHost = "mqtt.freo.me"
brokerPort = 1883
brokerUrl = "tcp://"+brokerHost+":"+str(brokerPort)
listenTopic = "/tfl/"
cpDir = "/home/oxclo/cp"
def update(ds,state):
# The ds is a dstream of new data (from MQTT)
# Each entry is a dictionary with keys: trainNumber, stationId, expArrival (plus others)
# The state is the previously calculated state
# In this case a dictionary of train -> (station, expected time, delayed)
# If the station has changed we set delayed false and update expected time
# if the station is the same, we check the current expected time against the previous
# and mark delayed if it has extended
if state==None:
state = dict()
else:
for current in ds:
trainNumber = current['trainNumber']
stationId = current['stationId']
exp = parse(current['expArrival'])
if trainNumber in state.keys():
old = state[trainNumber]
print old
if old['stationId'] != stationId:
state[trainNumber] = dict(stationId = stationId, expArrival = exp, delayed = False, delay = 0)
else:
delay = exp-old['expArrival']
delay = delay.seconds
if (delay > 60): #anything less that a minute is not "delayed"
state[trainNumber] = dict(stationId = stationId, expArrival = exp, delayed = True, delay = delay)
else:
state[trainNumber] = dict(stationId = stationId, expArrival = exp, delayed = False, delay = 0)
return state
sc = SparkContext(appName="TFLStreaming")
ssc = StreamingContext(sc, 5) # batch interval 5 sec
ssc.checkpoint(cpDir)
lines = MQTTUtils.createStream(ssc, brokerUrl, listenTopic)
windowed = lines.window(600,5) # look at the last 10 minutes worth with a sliding window of 5 seconds
dicts = lines.map(lambda js: json.loads(js)) # convert from json into a Python dict
mapped = dicts.map(lambda d: (d['trainNumber'],d)) # make the train number the key
ds = mapped.updateStateByKey(update) # compare against previous data
info = ds.filter(lambda (r, d): bool(d)) # ignore if there is no previous data
# the state from the update is a dict (train -> info)
# this is then mapped with a key so we have (train, (train->info))
# so let's get rid of the redundancy
unpack = info.map(lambda (r, d): (r, d[r]))
# now let's swap this over so that the key is whether the train is delayed or not, and assign a count
remap = unpack.map(lambda (r,d): ('delayed', 1) if d['delayed'] else ('ontime', 1))
#now let's count the results with a reducer
counts = remap.reduceByKey(lambda a,b: a+b)
# and print the result to the console
counts.pprint()
#start the processing
ssc.start()
# keep running forever (until Ctrl-C)
ssc.awaitTermination() | {
"content_hash": "aef3d8a1fa70db5d7814b63d5ec8e136",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 121,
"avg_line_length": 41.078947368421055,
"alnum_prop": 0.6668802049967969,
"repo_name": "pzfreo/auto-deploy-node-js",
"id": "861c52ca35cffae820ce2f3a36b7d3d2ffe1d7e0",
"size": "3122",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tfl/tfl-streaming-spark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "3073"
},
{
"name": "Python",
"bytes": "7528"
},
{
"name": "Shell",
"bytes": "907"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from sentry.web.frontend.base import BaseView
class HomeView(BaseView):
def get(self, request):
# TODO(dcramer): deal with case when the user cannot create orgs
organization = self.get_active_organization(request)
if organization is None:
url = reverse('sentry-create-organization')
else:
url = reverse('sentry-organization-home', args=[organization.slug])
return HttpResponseRedirect(url)
| {
"content_hash": "6496ef18db52ded3ef1e9f976081c204",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 79,
"avg_line_length": 34.76470588235294,
"alnum_prop": 0.7089678510998308,
"repo_name": "jokey2k/sentry",
"id": "032133900256fa53b8045331848d9d91bf910873",
"size": "591",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/sentry/web/frontend/home.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "580459"
},
{
"name": "Gettext Catalog",
"bytes": "2933595"
},
{
"name": "HTML",
"bytes": "292821"
},
{
"name": "JavaScript",
"bytes": "608760"
},
{
"name": "Makefile",
"bytes": "2710"
},
{
"name": "Python",
"bytes": "5105385"
}
],
"symlink_target": ""
} |
"""Utilities of test case class."""
import uuid
from contextlib import contextmanager
from . import ctx
from web.models.issue import Issue
from web.models.comment import Comment
from web.models.attached_file import AttachedFile
from web.models.user import User
def create_issue(subject=None, comments=None, state_id=1):
"""Creates an issue instance."""
if subject is None:
subject = str(uuid.uuid4())
if comments is None:
comments = [create_comment()]
return Issue(subject, comments, state_id)
def create_comment(issue=None, user=None, body=None, pub_date=None, attached_files=None):
"""Creates a comment instance."""
if body is None:
body = str(uuid.uuid4())
if user is None:
user = create_user()
return Comment(issue, user, body, pub_date, attached_files)
def create_attached_file(comment=None, name=None, data=None):
"""Creates a attached file instance."""
if name is None:
name = str(uuid.uuid4()) + '.txt'
if data is None:
data = uuid.uuid4().bytes
return AttachedFile(comment, name, data)
def create_user(id=None, name=None, password=None):
"""Creates an user instance."""
if id is None:
id = str(uuid.uuid4())
if name is None:
name = str(uuid.uuid4())
if password is None:
password = str(uuid.uuid4())
ret = User(id, name, password)
ret.add()
return ret
def delete_all_issues():
"""Deletes all rows of issue table."""
Issue.query.delete()
@contextmanager
def login(user=None, password=None, do_logout=True):
"""Login as 'user'. if 'user' is None, new user is created."""
if password is None:
password = 'test'
if user is None:
user = create_user(password=password)
res = ctx['TEST_APP'].post('/user/login/', data={
'csrf_token': ctx['CSRF_TOKEN'],
'user_id': user.id,
'password': password
}, follow_redirects=True)
yield (user, res)
if do_logout:
logout()
def logout():
"""Logout."""
return ctx['TEST_APP'].get('/user/logout/', follow_redirects=True)
| {
"content_hash": "2a1004bce80072f6794a9e8764774417",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 89,
"avg_line_length": 25.841463414634145,
"alnum_prop": 0.633317602642756,
"repo_name": "mmktomato/zenmai-bts",
"id": "4eb121afdaf96b344c637b1c9a22bc4195814c4d",
"size": "2119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/zenmai_test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "201"
},
{
"name": "HTML",
"bytes": "9387"
},
{
"name": "Python",
"bytes": "40137"
}
],
"symlink_target": ""
} |
"""
This module is all programs entrance.
It have menu.
"""
import tempfile
import uuid
from flask import Flask, request, render_template, send_file
from ciphers import (additive_cipher, multiply_cipher,
affine_cipher, Playfair,
Foursquare, Vigenere, Swaper,
ElGamal, check_sign, generate_key)
PATH_ROOT = "/"
PATH_LAB = "/lab/"
PATH_LAB_1 = PATH_LAB + "1"
PATH_LAB_2 = PATH_LAB + "2"
PATH_LAB_3 = PATH_LAB + "3"
PATH_LAB_4 = PATH_LAB + "4"
PATH_LAB_6 = PATH_LAB + "6"
PARAMS = (
("additive_key", 8),
("additive_data", "TEST STRING"),
("additive_result", None),
("radditive_data", "AS ASRRFW FF!"),
("radditive_result", None),
("mult_key", 11),
("mult_data", "test string".upper()),
("mult_result", None),
("rmult_data", "ADIAOSJDIJ AIOJSDIOAJOI"),
("rmult_result", None),
("affine_key", 8),
("affine_key2", 11),
("affine_data", "TEST STRING"),
("affine_result", None),
("raffine_data", "TEST STRING"),
("raffine_result", None),
)
PARAMS_2 = (
("playfair_key", ""),
("playfair_data", "TEST STRING"),
("playfair_result", None),
("rplayfair_key", ""),
("rplayfair_data", "TEST STRING"),
("rplayfair_result", None),
("foursquare_key1", ""),
("foursquare_key2", ""),
("foursquare_data", "TEST STRING"),
("foursquare_result", None),
("rfoursquare_data", "TEST STRING"),
("rfoursquare_key1", ""),
("rfoursquare_key2", ""),
("rfoursquare_result", None),
)
PARAMS_3 = (
("vigenere_key", ""),
("vigenere_alph", ""),
("vigenere_data", "ЗАХИСТ_ІНФОРМАЦІЇ"),
("vigenere_result", None),
("rvigenere_key", ""),
("rvigenere_alph", ""),
("rvigenere_data", "ФОЧИВЕБІЯЖРРЮОШІЧ"),
("rvigenere_result", None),
)
PARAMS_4 = (
("swaper_key", "3, 1, 4, 5, 2"),
("swaper_data", "ENEMYATTACKSTONIGHT"),
("swaper_result", None),
("rswaper_key", "3, 1, 4, 5, 2"),
("rswaper_data", "ETTHEAKIMAOTYCNXNTSG"),
("rswaper_result", None),
)
app = Flask(__name__)
@app.route(PATH_ROOT)
def root_page():
return render_template('main_page.html')
@app.route(PATH_LAB_1, methods=['GET', 'POST'])
def lab1_page():
default_context = {}
default_context['last_action'] = request.form.get("action", "additive")
for param in PARAMS:
default_context[param[0]] = request.form.get(param[0], param[1])
if request.method == 'POST':
try:
if default_context['last_action'] == 'additive':
default_context['additive_result'] = additive_cipher(
default_context['additive_data'],
int(default_context['additive_key']),
reverse=False)
elif default_context['last_action'] == 'radditive':
default_context['radditive_result'] = additive_cipher(
default_context['radditive_data'],
1,
reverse=True)
elif default_context['last_action'] == 'mult':
default_context['mult_result'] = multiply_cipher(
default_context['mult_data'],
int(default_context['mult_key']),
reverse=False)
elif default_context['last_action'] == 'rmult':
default_context['rmult_result'] = multiply_cipher(
default_context['rmult_data'],
1,
reverse=True)
elif default_context['last_action'] == 'affine':
default_context['affine_result'] = affine_cipher(
default_context['affine_data'],
int(default_context['affine_key']),
int(default_context['affine_key2']),
reverse=False)
elif default_context['last_action'] == 'raffine':
default_context['raffine_result'] = affine_cipher(
default_context['raffine_data'],
1,
2,
reverse=True)
except Exception as err:
print("exception: ", err)
return render_template('labs/lab1.html', **default_context)
@app.route(PATH_LAB_2, methods=['GET', 'POST'])
def lab2_page():
default_context = {}
default_context['last_action'] = request.form.get("action", "playfair")
for param in PARAMS_2:
default_context[param[0]] = request.form.get(param[0], param[1])
if request.method == 'POST':
try:
if default_context['last_action'] == 'playfair':
p = Playfair(default_context['playfair_key'])
default_context['playfair_result'] = p.encipher(default_context['playfair_data'])
elif default_context['last_action'] == 'rplayfair':
p = Playfair(default_context['rplayfair_key'])
default_context['rplayfair_result'] = p.decipher(default_context['rplayfair_data'])
elif default_context['last_action'] == 'foursquare':
f = Foursquare(default_context['foursquare_key1'], default_context['foursquare_key2'])
default_context['foursquare_result'] = f.encipher(default_context['foursquare_data'])
elif default_context['last_action'] == 'rfoursquare':
f = Foursquare(default_context['rfoursquare_key1'], default_context['rfoursquare_key2'])
default_context['rfoursquare_result'] = f.decipher(default_context['rfoursquare_data'])
except Exception as err:
print("exception: ", err)
return render_template("labs/lab2.html", **default_context)
@app.route(PATH_LAB_3, methods=['GET', 'POST'])
def lab3_page():
default_context = {}
default_context['last_action'] = request.form.get("action", "vigenere")
for param in PARAMS_3:
default_context[param[0]] = request.form.get(param[0], param[1])
if request.method == 'POST':
try:
if default_context['last_action'] == 'vigenere':
v = Vigenere(default_context['vigenere_alph'], default_context['vigenere_key'])
default_context['vigenere_result'] = v.encipher(default_context['vigenere_data'])
default_context['last_pattern'] = v.last_pattern
default_context['alph'] = v.alph
default_context['sep'] = "*" * len(v.last_pattern)
elif default_context['last_action'] == 'rvigenere':
v = Vigenere(default_context['rvigenere_alph'], default_context['rvigenere_key'])
default_context['rvigenere_result'] = v.decipher(default_context['rvigenere_data'])
default_context['last_pattern'] = v.last_pattern
default_context['alph'] = v.alph
default_context['sep'] = "*" * len(v.last_pattern)
except Exception as err:
print("exception: ", err)
return render_template("labs/lab3.html", **default_context)
@app.route(PATH_LAB_4, methods=['GET', 'POST'])
def lab4_page():
default_context = {}
default_context['last_action'] = request.form.get("action", "swaper")
for param in PARAMS_4:
default_context[param[0]] = request.form.get(param[0], param[1])
if request.method == 'POST':
try:
if default_context['last_action'] == 'swaper':
v = Swaper(default_context['swaper_key'])
default_context['swaper_result'] = v.encode_data(default_context['swaper_data'])
elif default_context['last_action'] == 'rswaper':
v = Swaper(default_context['rswaper_key'])
default_context['rswaper_result'] = v.decode_data(default_context['rswaper_data'])
except Exception as err:
print("exception: ", err)
return render_template("labs/lab4.html", **default_context)
EL_DOCUMENT = "el-gamal-document"
EL_SIGN = "el-gamal-sign"
EL_ACTION = "action"
EL_ERROR = "error"
EL_SUCCESS = "success"
EL_ACTION_TYPE_SIGN = 'sign'
EL_ACTION_TYPE_CHECK = 'check'
EL_ACTION_TYPE_GEN = 'generate'
@app.route(PATH_LAB_6, methods=['GET', 'POST'])
def lab6_page():
context = {}
if request.method == 'POST':
try:
action = request.form.get(EL_ACTION, EL_ACTION_TYPE_SIGN)
document = request.files.get(EL_DOCUMENT, None)
key = request.files.get(EL_SIGN, None)
if action == EL_ACTION_TYPE_GEN:
p = str(uuid.uuid4()) + '.txt'
f = open(p, 'w')
print(f, p)
f.write("%s\n%s\n%s\n%s" % (*generate_key(), ))
f.close()
return send_file(p, as_attachment=True)
print(action)
if not document or not key:
context[EL_ERROR] = "Відсутні потрібні файли!"
else:
if action == EL_ACTION_TYPE_SIGN:
p, g, x, y = (int(line) for line in key.readlines())
print(p, g, x, y)
(a, b) = ElGamal(p, g, x, y).sign(document.read())
path = str(uuid.uuid4()) + '.txt'
f = open(path, 'w')
f.write("%s\n%s\n%s\n%s\n%s" % (a, b, p, g, y, ))
f.close()
return send_file(path, as_attachment=True)
else:
a, b, p, g, y = (int(line) for line in key.readlines())
if check_sign(document.read(), a, b, p, g, y):
context[EL_SUCCESS] = True
else:
context[EL_ERROR] = "Не правильний підпис!"
except Exception as err:
print("Exception: %s" % (err, ))
return render_template("labs/lab6.html", **context)
app.run(port=8002, debug=True, host='0.0.0.0', threaded=True)
| {
"content_hash": "7a5c52b5a74579db2e144e6e683500cc",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 104,
"avg_line_length": 35.55,
"alnum_prop": 0.5427968655816757,
"repo_name": "alexei-alexov/information_protection",
"id": "c941943ebe37034f6d5b8328c56541e79572b1eb",
"size": "10050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "29232"
},
{
"name": "Python",
"bytes": "44355"
}
],
"symlink_target": ""
} |
"""Module provider for Namesilo"""
import logging
from xml.etree import ElementTree
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["namesilo.com"]
def provider_parser(subparser):
"""Configure provider parser for Namesilo"""
subparser.add_argument("--auth-token", help="specify key for authentication")
class Provider(BaseProvider):
"""Provider class for Namesilo"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = (
self._get_provider_option("api_endpoint") or "https://www.namesilo.com/api"
)
def _authenticate(self):
try:
self._get("/getDomainInfo", {"domain": self.domain})
except Exception as e:
raise AuthenticationError(str(e))
self.domain_id = self.domain
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
record = {
"domain": self.domain_id,
"rrhost": self._relative_name(name),
"rrtype": rtype,
"rrvalue": content,
}
if self._get_lexicon_option("ttl"):
record["rrttl"] = self._get_lexicon_option("ttl")
try:
self._get("/dnsAddRecord", record)
except ValueError as err:
# noop if attempting to create record that already exists.
LOGGER.debug("Ignoring error: %s", err)
LOGGER.debug("create_record: %s", True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
query = {"domain": self.domain_id}
payload = self._get("/dnsListRecords", query)
records = []
for record in payload.find("reply").findall("resource_record"):
processed_record = {
"type": record.find("type").text,
"name": record.find("host").text,
"ttl": record.find("ttl").text,
"content": record.find("value").text,
"id": record.find("record_id").text,
}
records.append(processed_record)
if rtype:
records = [record for record in records if record["type"] == rtype]
if name:
records = [
record for record in records if record["name"] == self._full_name(name)
]
if content:
records = [record for record in records if record["content"] == content]
LOGGER.debug("list_records: %s", records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {"domain": self.domain_id, "rrid": identifier}
# if rtype:
# data['type'] = rtype
if name:
data["rrhost"] = self._relative_name(name)
if content:
data["rrvalue"] = content
if self._get_lexicon_option("ttl"):
data["rrttl"] = self._get_lexicon_option("ttl")
self._get("/dnsUpdateRecord", data)
LOGGER.debug("update_record: %s", True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
data = {"domain": self.domain_id}
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_id)
for record_id in delete_record_id:
data["rrid"] = record_id
self._get("/dnsDeleteRecord", data)
LOGGER.debug("delete_record: %s", True)
return True
# Helpers
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
query_params["version"] = 1
query_params["type"] = "xml"
query_params["key"] = self._get_provider_option("auth_token")
response = requests.request(
action, self.api_endpoint + url, params=query_params
)
# data=json.dumps(data))
# if the request fails for any reason, throw an error.
response.raise_for_status()
tree = ElementTree.ElementTree(ElementTree.fromstring(response.content))
root = tree.getroot()
if root.find("reply").find("code").text == "280":
raise ValueError(
"An error occurred: {0}, {1}".format(
root.find("reply").find("detail").text,
root.find("reply").find("code").text,
)
)
if root.find("reply").find("code").text != "300":
raise Exception(
"An error occurred: {0}, {1}".format(
root.find("reply").find("detail").text,
root.find("reply").find("code").text,
)
)
return root
| {
"content_hash": "399d53f90a47279594ca2c8916ee11b3",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 87,
"avg_line_length": 34.742138364779876,
"alnum_prop": 0.5677045619116582,
"repo_name": "AnalogJ/lexicon",
"id": "4639d16bb5a696d9d8b474c1ff999c9c4504f9da",
"size": "5524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lexicon/providers/namesilo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1008"
},
{
"name": "Python",
"bytes": "899115"
},
{
"name": "Shell",
"bytes": "1980"
}
],
"symlink_target": ""
} |
"""Tools for creating Raw objects from numpy arrays"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from ..base import _BaseRaw
from ...utils import verbose, logger
class RawArray(_BaseRaw):
"""Raw object from numpy array
Parameters
----------
data : array, shape (n_channels, n_times)
The channels' time series.
info : instance of Info
Info dictionary. Consider using `create_info` to populate
this structure.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
EpochsArray, EvokedArray, create_info
"""
@verbose
def __init__(self, data, info, verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples')
logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s'
% (dtype.__name__, data.shape[0], data.shape[1]))
if len(data) != len(info['ch_names']):
raise ValueError('len(data) does not match len(info["ch_names"])')
assert len(info['ch_names']) == info['nchan']
super(RawArray, self).__init__(info, data, verbose=verbose)
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % (
self.first_samp, self.last_samp,
float(self.first_samp) / info['sfreq'],
float(self.last_samp) / info['sfreq']))
logger.info('Ready.')
| {
"content_hash": "d4a56083f7697f37780a941190fd0aaa",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 34.16,
"alnum_prop": 0.5743559718969555,
"repo_name": "yousrabk/mne-python",
"id": "8231c612317cbba1855ad825a21e86169de60a66",
"size": "1708",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mne/io/array/array.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3171"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4489354"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from PyQt5.QtGui import QPainter, QColor, QPen
from PyQt5.QtCore import QPoint
import math
class QTurtle():
direction = 0
posstack = []
ppos = pos = None
delta = None
speed = None
qp = None
def __init__(self, qp, x, y, d=math.pi/2, speed=5):
self.ppos = self.pos = QPoint(x,y)
self.qp = qp
self.delta = d
self.speed = speed
def left(self):
self.direction -= self.delta
def right(self):
self.direction += self.delta
def store(self):
self.posstack += [(self.direction,self.pos)]
def recall(self):
if len(self.posstack)<=0:
print("ERROR: popped empty posstack!")
else:
(self.direction,self.pos) = self.posstack.pop()
def move(self, draw=True):
self.ppos = self.pos
nx = self.ppos.x() + self.speed*math.cos(self.direction)
ny = self.ppos.y() + self.speed*math.sin(self.direction)
self.pos = QPoint(nx, ny)
if draw:
self.draw()
def draw(self):
''' Print a line between this and the previous pos'''
self.qp.drawLine(self.ppos.x(), self.ppos.y(), self.pos.x(), self.pos.y())
| {
"content_hash": "67b7c93b774b351766c903f6ad13982b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 22.933333333333334,
"alnum_prop": 0.6550387596899225,
"repo_name": "ax-rwnd/lparser-python",
"id": "2cecb0eb80a93964b9a00d8829e4985dd083bff1",
"size": "1032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qturtle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7364"
}
],
"symlink_target": ""
} |
"""Common test objects."""
import copy
from datetime import datetime
import json
from unittest.mock import ANY, patch
from homeassistant.components import mqtt
from homeassistant.components.mqtt import debug_info
from homeassistant.components.mqtt.const import MQTT_DISCONNECTED
from homeassistant.components.mqtt.mixins import MQTT_ATTRIBUTES_BLOCKED
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_UNAVAILABLE
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_setup_component
from tests.common import async_fire_mqtt_message, mock_registry
DEFAULT_CONFIG_DEVICE_INFO_ID = {
"identifiers": ["helloworld"],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"suggested_area": "default_area",
"configuration_url": "http://example.com",
}
DEFAULT_CONFIG_DEVICE_INFO_MAC = {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"suggested_area": "default_area",
"configuration_url": "http://example.com",
}
async def help_test_availability_when_connection_lost(hass, mqtt_mock, domain, config):
"""Test availability after MQTT disconnection."""
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
mqtt_mock.connected = False
async_dispatcher_send(hass, MQTT_DISCONNECTED)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async def help_test_availability_without_topic(hass, mqtt_mock, domain, config):
"""Test availability without defined availability topic."""
assert "availability_topic" not in config[domain]
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_topic"] = "availability-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload_all(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_mode"] = "all"
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload_any(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_mode"] = "any"
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async def help_test_default_availability_list_single(
hass,
mqtt_mock,
caplog,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability list and availability_topic are mutually exclusive.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability"] = [
{"topic": "availability-topic1"},
]
config[domain]["availability_topic"] = "availability-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is None
assert (
"Invalid config for [sensor.mqtt]: two or more values in the same group of exclusion 'availability'"
in caplog.text
)
async def help_test_custom_availability_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by custom payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_topic"] = "availability-topic"
config[domain]["payload_available"] = "good"
config[domain]["payload_not_available"] = "nogood"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic", "nogood")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_discovery_update_availability(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test update of discovered MQTTAvailability.
This is a test helper for the MQTTAvailability mixin.
"""
# Add availability settings to config
config1 = copy.deepcopy(config)
config1[domain]["availability_topic"] = "availability-topic1"
config2 = copy.deepcopy(config)
config2[domain]["availability"] = [
{"topic": "availability-topic2"},
{"topic": "availability-topic3"},
]
config3 = copy.deepcopy(config)
config3[domain]["availability_topic"] = "availability-topic4"
data1 = json.dumps(config1[domain])
data2 = json.dumps(config2[domain])
data3 = json.dumps(config3[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Change availability_topic
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "availability-topic3", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Change availability_topic
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data3)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "availability-topic3", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "availability-topic4", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, domain, config
):
"""Test the setting of attribute via MQTT with JSON payload.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "100"
async def help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, domain, config, extra_blocked_attributes
):
"""Test the setting of blocked attribute via MQTT with JSON payload.
This is a test helper for the MqttAttributes mixin.
"""
extra_blocked_attributes = extra_blocked_attributes or []
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
data = json.dumps(config[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
val = "abc123"
for attr in MQTT_ATTRIBUTES_BLOCKED:
async_fire_mqtt_message(hass, "attr-topic", json.dumps({attr: val}))
state = hass.states.get(f"{domain}.test")
assert state.attributes.get(attr) != val
for attr in extra_blocked_attributes:
async_fire_mqtt_message(hass, "attr-topic", json.dumps({attr: val}))
state = hass.states.get(f"{domain}.test")
assert state.attributes.get(attr) != val
async def help_test_setting_attribute_with_template(hass, mqtt_mock, domain, config):
"""Test the setting of attribute via MQTT with JSON payload.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
config[domain]["json_attributes_template"] = "{{ value_json['Timer1'] | tojson }}"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass, "attr-topic", json.dumps({"Timer1": {"Arm": 0, "Time": "22:18"}})
)
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("Arm") == 0
assert state.attributes.get("Time") == "22:18"
async def help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, domain, config
):
"""Test attributes get extracted from a JSON result.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, domain, config
):
"""Test JSON validation of attributes.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def help_test_discovery_update_attr(hass, mqtt_mock, caplog, domain, config):
"""Test update of discovered MQTTAttributes.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config1 = copy.deepcopy(config)
config1[domain]["json_attributes_topic"] = "attr-topic1"
config2 = copy.deepcopy(config)
config2[domain]["json_attributes_topic"] = "attr-topic2"
data1 = json.dumps(config1[domain])
data2 = json.dumps(config2[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "75"
async def help_test_unique_id(hass, mqtt_mock, domain, config):
"""Test unique id option only creates one entity per unique_id."""
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(domain)) == 1
async def help_test_discovery_removal(hass, mqtt_mock, caplog, domain, data):
"""Test removal of discovered component.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is not None
assert state.name == "test"
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is None
async def help_test_discovery_update(
hass,
mqtt_mock,
caplog,
domain,
discovery_config1,
discovery_config2,
state_data1=None,
state_data2=None,
):
"""Test update of discovered component.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add some future configuration to the configurations
config1 = copy.deepcopy(discovery_config1)
config1["some_future_option_1"] = "future_option_1"
config2 = copy.deepcopy(discovery_config2)
config2["some_future_option_2"] = "future_option_2"
discovery_data1 = json.dumps(config1)
discovery_data2 = json.dumps(config2)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", discovery_data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is not None
assert state.name == "Beer"
if state_data1:
for (mqtt_messages, expected_state, attributes) in state_data1:
for (topic, data) in mqtt_messages:
async_fire_mqtt_message(hass, topic, data)
state = hass.states.get(f"{domain}.beer")
if expected_state:
assert state.state == expected_state
if attributes:
for (attr, value) in attributes:
assert state.attributes.get(attr) == value
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", discovery_data2)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is not None
assert state.name == "Milk"
if state_data2:
for (mqtt_messages, expected_state, attributes) in state_data2:
for (topic, data) in mqtt_messages:
async_fire_mqtt_message(hass, topic, data)
state = hass.states.get(f"{domain}.beer")
if expected_state:
assert state.state == expected_state
if attributes:
for (attr, value) in attributes:
assert state.attributes.get(attr) == value
state = hass.states.get(f"{domain}.milk")
assert state is None
async def help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, domain, data1, discovery_update
):
"""Test update of discovered component without changes.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
assert not discovery_update.called
async def help_test_discovery_broken(hass, mqtt_mock, caplog, domain, data1, data2):
"""Test handling of bad discovery message."""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is None
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get(f"{domain}.beer")
assert state is None
async def help_test_entity_device_info_with_identifier(hass, mqtt_mock, domain, config):
"""Test device registry integration.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
assert device.suggested_area == "default_area"
assert device.configuration_url == "http://example.com"
async def help_test_entity_device_info_with_connection(hass, mqtt_mock, domain, config):
"""Test device registry integration.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_MAC)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
)
assert device is not None
assert device.connections == {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
assert device.suggested_area == "default_area"
assert device.configuration_url == "http://example.com"
async def help_test_entity_device_info_remove(hass, mqtt_mock, domain, config):
"""Test device registry remove."""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
dev_registry = dr.async_get(hass)
ent_registry = er.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = dev_registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique")
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", "")
await hass.async_block_till_done()
device = dev_registry.async_get_device({("mqtt", "helloworld")})
assert device is None
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique")
async def help_test_entity_device_info_update(hass, mqtt_mock, domain, config):
"""Test device registry update.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Milk"
async def help_test_entity_id_update_subscriptions(
hass, mqtt_mock, domain, config, topics=None
):
"""Test MQTT subscriptions are managed when entity_id is updated."""
# Add unique_id to config
config = copy.deepcopy(config)
config[domain]["unique_id"] = "TOTALLY_UNIQUE"
if topics is None:
# Add default topics to config
config[domain]["availability_topic"] = "avty-topic"
config[domain]["state_topic"] = "test-topic"
topics = ["avty-topic", "test-topic"]
assert len(topics) > 0
registry = mock_registry(hass, {})
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is not None
assert mqtt_mock.async_subscribe.call_count == len(topics)
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
mqtt_mock.async_subscribe.reset_mock()
registry.async_update_entity(f"{domain}.test", new_entity_id=f"{domain}.milk")
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is None
state = hass.states.get(f"{domain}.milk")
assert state is not None
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
async def help_test_entity_id_update_discovery_update(
hass, mqtt_mock, domain, config, topic=None
):
"""Test MQTT discovery update after entity_id is updated."""
# Add unique_id to config
config = copy.deepcopy(config)
config[domain]["unique_id"] = "TOTALLY_UNIQUE"
if topic is None:
# Add default topic to config
config[domain]["availability_topic"] = "avty-topic"
topic = "avty-topic"
ent_registry = mock_registry(hass, {})
data = json.dumps(config[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, topic, "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, topic, "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
ent_registry.async_update_entity(f"{domain}.test", new_entity_id=f"{domain}.milk")
await hass.async_block_till_done()
config[domain]["availability_topic"] = f"{topic}_2"
data = json.dumps(config[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(domain)) == 1
async_fire_mqtt_message(hass, f"{topic}_2", "online")
state = hass.states.get(f"{domain}.milk")
assert state.state != STATE_UNAVAILABLE
async def help_test_entity_debug_info(hass, mqtt_mock, domain, config):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
async def help_test_entity_debug_info_max_messages(hass, mqtt_mock, domain, config):
"""Test debug_info message overflow.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
for i in range(0, debug_info.STORED_MESSAGES + 1):
async_fire_mqtt_message(hass, "test-topic", f"{i}")
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert (
len(debug_info_data["entities"][0]["subscriptions"][0]["messages"])
== debug_info.STORED_MESSAGES
)
messages = [
{
"payload": f"{i}",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "test-topic",
}
for i in range(1, debug_info.STORED_MESSAGES + 1)
]
assert {"topic": "test-topic", "messages": messages} in debug_info_data["entities"][
0
]["subscriptions"]
async def help_test_entity_debug_info_message(
hass, mqtt_mock, domain, config, topic=None, payload=None
):
"""Test debug_info message overflow.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
if topic is None:
# Add default topic to config
config["state_topic"] = "state-topic"
topic = "state-topic"
if payload is None:
payload = "ON"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": topic, "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, topic, payload)
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {
"topic": topic,
"messages": [
{
"payload": payload,
"qos": 0,
"retain": False,
"time": start_dt,
"topic": topic,
}
],
} in debug_info_data["entities"][0]["subscriptions"]
async def help_test_entity_debug_info_remove(hass, mqtt_mock, domain, config):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
assert debug_info_data["entities"][0]["entity_id"] == f"{domain}.test"
entity_id = debug_info_data["entities"][0]["entity_id"]
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", "")
await hass.async_block_till_done()
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 0
assert entity_id not in hass.data[debug_info.DATA_MQTT_DEBUG_INFO]["entities"]
async def help_test_entity_debug_info_update_entity_id(hass, mqtt_mock, domain, config):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
dev_registry = dr.async_get(hass)
ent_registry = mock_registry(hass, {})
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = dev_registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert debug_info_data["entities"][0]["entity_id"] == f"{domain}.test"
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
ent_registry.async_update_entity(f"{domain}.test", new_entity_id=f"{domain}.milk")
await hass.async_block_till_done()
await hass.async_block_till_done()
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert debug_info_data["entities"][0]["entity_id"] == f"{domain}.milk"
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
assert (
f"{domain}.test" not in hass.data[debug_info.DATA_MQTT_DEBUG_INFO]["entities"]
)
async def help_test_entity_disabled_by_default(hass, mqtt_mock, domain, config):
"""Test device registry remove."""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["enabled_by_default"] = False
config["unique_id"] = "veryunique1"
dev_registry = dr.async_get(hass)
ent_registry = er.async_get(hass)
# Discover a disabled entity
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla1/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique1")
assert not hass.states.get(entity_id)
assert dev_registry.async_get_device({("mqtt", "helloworld")})
# Discover an enabled entity, tied to the same device
config["enabled_by_default"] = True
config["unique_id"] = "veryunique2"
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla2/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique2")
assert hass.states.get(entity_id)
# Remove the enabled entity, both entities and the device should be removed
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla2/config", "")
await hass.async_block_till_done()
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique1")
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique2")
assert not dev_registry.async_get_device({("mqtt", "helloworld")})
async def help_test_entity_category(hass, mqtt_mock, domain, config):
"""Test device registry remove."""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
ent_registry = er.async_get(hass)
# Discover an entity without entity category
unique_id = "veryunique1"
config["unique_id"] = unique_id
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{unique_id}/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, unique_id)
assert hass.states.get(entity_id)
entry = ent_registry.async_get(entity_id)
assert entry.entity_category is None
# Discover an entity with entity category set to "config"
unique_id = "veryunique2"
config["entity_category"] = "config"
config["unique_id"] = unique_id
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{unique_id}/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, unique_id)
assert hass.states.get(entity_id)
entry = ent_registry.async_get(entity_id)
assert entry.entity_category == "config"
# Discover an entity with entity category set to "no_such_category"
unique_id = "veryunique3"
config["entity_category"] = "no_such_category"
config["unique_id"] = unique_id
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{unique_id}/config", data)
await hass.async_block_till_done()
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, unique_id)
| {
"content_hash": "6bf8d01b00156e18b8eb93ba7ca6c048",
"timestamp": "",
"source": "github",
"line_count": 1268,
"max_line_length": 108,
"avg_line_length": 34.28627760252366,
"alnum_prop": 0.6645198389879241,
"repo_name": "home-assistant/home-assistant",
"id": "16af5b8e48484603e294def1b6cda7fc3f740eea",
"size": "43475",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.settings.apipassword import views
urlpatterns = patterns('',
url(r'^$', views.ApiPasswordView.as_view(), name='index'))
| {
"content_hash": "1563be2bed911060a198796e5f5d7c9f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 29.25,
"alnum_prop": 0.7649572649572649,
"repo_name": "trystack/python-django-horizon-facebook",
"id": "e2fb5ce6e0dbcca5b5db87b438c641f5b229184b",
"size": "897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/settings/apipassword/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3011"
},
{
"name": "Python",
"bytes": "19739"
},
{
"name": "Shell",
"bytes": "499"
}
],
"symlink_target": ""
} |
from rest_framework import generics, permissions, authentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.db.models import Q
from .serializers import *
from .permissions import *
from .models import Account as User
class UserList(generics.ListAPIView):
model = User
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (permissions.IsAuthenticated, )
paginate_by = 100
class UserDetail(generics.RetrieveAPIView):
lookup_field = 'username'
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (permissions.IsAuthenticated, )
class CurrentUser(APIView):
lookup_field = 'username'
#authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, format=None):
serializer = UserSerializer(request.user)
return Response(serializer.data)
| {
"content_hash": "6cef28d8a96d12c48fc76d9d1e358077",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 67,
"avg_line_length": 33.42424242424242,
"alnum_prop": 0.7633726201269265,
"repo_name": "underlost/coreExtend",
"id": "b577f948c9f4c72ab5d9bef54390e775905afda4",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coreExtend/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13222"
},
{
"name": "Python",
"bytes": "20723"
}
],
"symlink_target": ""
} |
from itertools import count
import re, os, cStringIO, time, cgi, urlparse
from xml.dom import minidom as dom
from xml.sax.handler import ErrorHandler, feature_validation
from xml.dom.pulldom import SAX2DOM
from xml.sax import make_parser
from xml.sax.xmlreader import InputSource
from twisted.python import htmlizer, text
from twisted.python.filepath import FilePath
from twisted.web import domhelpers
import process, latex, indexer, numberer, htmlbook
# relative links to html files
def fixLinks(document, ext):
"""
Rewrite links to XHTML lore input documents so they point to lore XHTML
output documents.
Any node with an C{href} attribute which does not contain a value starting
with C{http}, C{https}, C{ftp}, or C{mailto} and which does not have a
C{class} attribute of C{absolute} or which contains C{listing} and which
does point to an URL ending with C{html} will have that attribute value
rewritten so that the filename extension is C{ext} instead of C{html}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@return: C{None}
"""
supported_schemes=['http', 'https', 'ftp', 'mailto']
for node in domhelpers.findElementsWithAttribute(document, 'href'):
href = node.getAttribute("href")
if urlparse.urlparse(href)[0] in supported_schemes:
continue
if node.getAttribute("class") == "absolute":
continue
if node.getAttribute("class").find('listing') != -1:
continue
# This is a relative link, so it should be munged.
if href.endswith('html') or href[:href.rfind('#')].endswith('html'):
fname, fext = os.path.splitext(href)
if '#' in fext:
fext = ext+'#'+fext.split('#', 1)[1]
else:
fext = ext
node.setAttribute("href", fname + fext)
def addMtime(document, fullpath):
"""
Set the last modified time of the given document.
@type document: A DOM Node or Document
@param document: The output template which defines the presentation of the
last modified time.
@type fullpath: C{str}
@param fullpath: The file name from which to take the last modified time.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class","mtime"):
txt = dom.Text()
txt.data = time.ctime(os.path.getmtime(fullpath))
node.appendChild(txt)
def _getAPI(node):
"""
Retrieve the fully qualified Python name represented by the given node.
The name is represented by one or two aspects of the node: the value of the
node's first child forms the end of the name. If the node has a C{base}
attribute, that attribute's value is prepended to the node's value, with
C{.} separating the two parts.
@rtype: C{str}
@return: The fully qualified Python name.
"""
base = ""
if node.hasAttribute("base"):
base = node.getAttribute("base") + "."
return base+node.childNodes[0].nodeValue
def fixAPI(document, url):
"""
Replace API references with links to API documentation.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@return: C{None}
"""
# API references
for node in domhelpers.findElementsWithAttribute(document, "class", "API"):
fullname = _getAPI(node)
anchor = dom.Element('a')
anchor.setAttribute('href', url % (fullname,))
anchor.setAttribute('title', fullname)
while node.childNodes:
child = node.childNodes[0]
node.removeChild(child)
anchor.appendChild(child)
node.appendChild(anchor)
if node.hasAttribute('base'):
node.removeAttribute('base')
def fontifyPython(document):
"""
Syntax color any node in the given document which contains a Python source
listing.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
def matcher(node):
return (node.nodeName == 'pre' and node.hasAttribute('class') and
node.getAttribute('class') == 'python')
for node in domhelpers.findElements(document, matcher):
fontifyPythonNode(node)
def fontifyPythonNode(node):
"""
Syntax color the given node containing Python source code.
The node must have a parent.
@return: C{None}
"""
oldio = cStringIO.StringIO()
latex.getLatexText(node, oldio.write,
entities={'lt': '<', 'gt': '>', 'amp': '&'})
oldio = cStringIO.StringIO(oldio.getvalue().strip()+'\n')
howManyLines = len(oldio.getvalue().splitlines())
newio = cStringIO.StringIO()
htmlizer.filter(oldio, newio, writer=htmlizer.SmallerHTMLWriter)
lineLabels = _makeLineNumbers(howManyLines)
newel = dom.parseString(newio.getvalue()).documentElement
newel.setAttribute("class", "python")
node.parentNode.replaceChild(newel, node)
newel.insertBefore(lineLabels, newel.firstChild)
def addPyListings(document, dir):
"""
Insert Python source listings into the given document from files in the
given directory based on C{py-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{py-listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
If a node has a C{skipLines} attribute, its value will be parsed as an
integer and that many lines will be skipped at the beginning of the source
file.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced Python listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"py-listing"):
filename = node.getAttribute("href")
outfile = cStringIO.StringIO()
lines = map(str.rstrip, open(os.path.join(dir, filename)).readlines())
skip = node.getAttribute('skipLines') or 0
lines = lines[int(skip):]
howManyLines = len(lines)
data = '\n'.join(lines)
data = cStringIO.StringIO(text.removeLeadingTrailingBlanks(data))
htmlizer.filter(data, outfile, writer=htmlizer.SmallerHTMLWriter)
sourceNode = dom.parseString(outfile.getvalue()).documentElement
sourceNode.insertBefore(_makeLineNumbers(howManyLines), sourceNode.firstChild)
_replaceWithListing(node, sourceNode.toxml(), filename, "py-listing")
def _makeLineNumbers(howMany):
"""
Return an element which will render line numbers for a source listing.
@param howMany: The number of lines in the source listing.
@type howMany: C{int}
@return: An L{dom.Element} which can be added to the document before
the source listing to add line numbers to it.
"""
# Figure out how many digits wide the widest line number label will be.
width = len(str(howMany))
# Render all the line labels with appropriate padding
labels = ['%*d' % (width, i) for i in range(1, howMany + 1)]
# Create a p element with the right style containing the labels
p = dom.Element('p')
p.setAttribute('class', 'py-linenumber')
t = dom.Text()
t.data = '\n'.join(labels) + '\n'
p.appendChild(t)
return p
def _replaceWithListing(node, val, filename, class_):
captionTitle = domhelpers.getNodeText(node)
if captionTitle == os.path.basename(filename):
captionTitle = 'Source listing'
text = ('<div class="%s">%s<div class="caption">%s - '
'<a href="%s"><span class="filename">%s</span></a></div></div>' %
(class_, val, captionTitle, filename, filename))
newnode = dom.parseString(text).documentElement
node.parentNode.replaceChild(newnode, node)
def addHTMLListings(document, dir):
"""
Insert HTML source listings into the given document from files in the given
directory based on C{html-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{html-listing}
will have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced HTML listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"html-listing"):
filename = node.getAttribute("href")
val = ('<pre class="htmlsource">\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "html-listing")
def addPlainListings(document, dir):
"""
Insert text listings into the given document from files in the given
directory based on C{listing} nodes.
Any node in C{document} with a C{class} attribute set to C{listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced text listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"listing"):
filename = node.getAttribute("href")
val = ('<pre>\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "listing")
def getHeaders(document):
"""
Return all H2 and H3 nodes in the given document.
@type document: A DOM Node or Document
@rtype: C{list}
"""
return domhelpers.findElements(
document,
lambda n, m=re.compile('h[23]$').match: m(n.nodeName))
def generateToC(document):
"""
Create a table of contents for the given document.
@type document: A DOM Node or Document
@rtype: A DOM Node
@return: a Node containing a table of contents based on the headers of the
given document.
"""
subHeaders = None
headers = []
for element in getHeaders(document):
if element.tagName == 'h2':
subHeaders = []
headers.append((element, subHeaders))
elif subHeaders is None:
raise ValueError(
"No H3 element is allowed until after an H2 element")
else:
subHeaders.append(element)
auto = count().next
def addItem(headerElement, parent):
anchor = dom.Element('a')
name = 'auto%d' % (auto(),)
anchor.setAttribute('href', '#' + name)
text = dom.Text()
text.data = domhelpers.getNodeText(headerElement)
anchor.appendChild(text)
headerNameItem = dom.Element('li')
headerNameItem.appendChild(anchor)
parent.appendChild(headerNameItem)
anchor = dom.Element('a')
anchor.setAttribute('name', name)
headerElement.appendChild(anchor)
toc = dom.Element('ol')
for headerElement, subHeaders in headers:
addItem(headerElement, toc)
if subHeaders:
subtoc = dom.Element('ul')
toc.appendChild(subtoc)
for subHeaderElement in subHeaders:
addItem(subHeaderElement, subtoc)
return toc
def putInToC(document, toc):
"""
Insert the given table of contents into the given document.
The node with C{class} attribute set to C{toc} has its children replaced
with C{toc}.
@type document: A DOM Node or Document
@type toc: A DOM Node
"""
tocOrig = domhelpers.findElementsWithAttribute(document, 'class', 'toc')
if tocOrig:
tocOrig= tocOrig[0]
tocOrig.childNodes = [toc]
def removeH1(document):
"""
Replace all C{h1} nodes in the given document with empty C{span} nodes.
C{h1} nodes mark up document sections and the output template is given an
opportunity to present this information in a different way.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
h1 = domhelpers.findNodesNamed(document, 'h1')
empty = dom.Element('span')
for node in h1:
node.parentNode.replaceChild(empty, node)
def footnotes(document):
"""
Find footnotes in the given document, move them to the end of the body, and
generate links to them.
A footnote is any node with a C{class} attribute set to C{footnote}.
Footnote links are generated as superscript. Footnotes are collected in a
C{ol} node at the end of the document.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
footnotes = domhelpers.findElementsWithAttribute(document, "class",
"footnote")
if not footnotes:
return
footnoteElement = dom.Element('ol')
id = 1
for footnote in footnotes:
href = dom.parseString('<a href="#footnote-%(id)d">'
'<super>%(id)d</super></a>'
% vars()).documentElement
text = ' '.join(domhelpers.getNodeText(footnote).split())
href.setAttribute('title', text)
target = dom.Element('a')
target.setAttribute('name', 'footnote-%d' % (id,))
target.childNodes = [footnote]
footnoteContent = dom.Element('li')
footnoteContent.childNodes = [target]
footnoteElement.childNodes.append(footnoteContent)
footnote.parentNode.replaceChild(href, footnote)
id += 1
body = domhelpers.findNodesNamed(document, "body")[0]
header = dom.parseString('<h2>Footnotes</h2>').documentElement
body.childNodes.append(header)
body.childNodes.append(footnoteElement)
def notes(document):
"""
Find notes in the given document and mark them up as such.
A note is any node with a C{class} attribute set to C{note}.
(I think this is a very stupid feature. When I found it I actually
exclaimed out loud. -exarkun)
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
notes = domhelpers.findElementsWithAttribute(document, "class", "note")
notePrefix = dom.parseString('<strong>Note: </strong>').documentElement
for note in notes:
note.childNodes.insert(0, notePrefix)
def findNodeJustBefore(target, nodes):
"""
Find the last Element which is a sibling of C{target} and is in C{nodes}.
@param target: A node the previous sibling of which to return.
@param nodes: A list of nodes which might be the right node.
@return: The previous sibling of C{target}.
"""
while target is not None:
node = target.previousSibling
while node is not None:
if node in nodes:
return node
node = node.previousSibling
target = target.parentNode
raise RuntimeError("Oops")
def getFirstAncestorWithSectionHeader(entry):
"""
Visit the ancestors of C{entry} until one with at least one C{h2} child
node is found, then return all of that node's C{h2} child nodes.
@type entry: A DOM Node
@param entry: The node from which to begin traversal. This node itself is
excluded from consideration.
@rtype: C{list} of DOM Nodes
@return: All C{h2} nodes of the ultimately selected parent node.
"""
for a in domhelpers.getParents(entry)[1:]:
headers = domhelpers.findNodesNamed(a, "h2")
if len(headers) > 0:
return headers
return []
def getSectionNumber(header):
"""
Retrieve the section number of the given node.
This is probably intended to interact in a rather specific way with
L{numberDocument}.
@type header: A DOM Node or L{None}
@param header: The section from which to extract a number. The section
number is the value of this node's first child.
@return: C{None} or a C{str} giving the section number.
"""
if not header:
return None
return domhelpers.gatherTextNodes(header.childNodes[0])
def getSectionReference(entry):
"""
Find the section number which contains the given node.
This function looks at the given node's ancestry until it finds a node
which defines a section, then returns that section's number.
@type entry: A DOM Node
@param entry: The node for which to determine the section.
@rtype: C{str}
@return: The section number, as returned by C{getSectionNumber} of the
first ancestor of C{entry} which defines a section, as determined by
L{getFirstAncestorWithSectionHeader}.
"""
headers = getFirstAncestorWithSectionHeader(entry)
myHeader = findNodeJustBefore(entry, headers)
return getSectionNumber(myHeader)
def index(document, filename, chapterReference):
"""
Extract index entries from the given document and store them for later use
and insert named anchors so that the index can link back to those entries.
Any node with a C{class} attribute set to C{index} is considered an index
entry.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type filename: C{str}
@param filename: A link to the output for the given document which will be
included in the index to link to any index entry found here.
@type chapterReference: ???
@param chapterReference: ???
@return: C{None}
"""
entries = domhelpers.findElementsWithAttribute(document, "class", "index")
if not entries:
return
i = 0;
for entry in entries:
i += 1
anchor = 'index%02d' % i
if chapterReference:
ref = getSectionReference(entry) or chapterReference
else:
ref = 'link'
indexer.addEntry(filename, anchor, entry.getAttribute('value'), ref)
# does nodeName even affect anything?
entry.nodeName = entry.tagName = entry.endTagName = 'a'
for attrName in entry.attributes.keys():
entry.removeAttribute(attrName)
entry.setAttribute('name', anchor)
def setIndexLink(template, indexFilename):
"""
Insert a link to an index document.
Any node with a C{class} attribute set to C{index-link} will have its tag
name changed to C{a} and its C{href} attribute set to C{indexFilename}.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type indexFilename: C{str}
@param indexFilename: The address of the index document to which to link.
If any C{False} value, this function will remove all index-link nodes.
@return: C{None}
"""
indexLinks = domhelpers.findElementsWithAttribute(template,
"class",
"index-link")
for link in indexLinks:
if indexFilename is None:
link.parentNode.removeChild(link)
else:
link.nodeName = link.tagName = link.endTagName = 'a'
for attrName in link.attributes.keys():
link.removeAttribute(attrName)
link.setAttribute('href', indexFilename)
def numberDocument(document, chapterNumber):
"""
Number the sections of the given document.
A dot-separated chapter, section number is added to the beginning of each
section, as defined by C{h2} nodes.
This is probably intended to interact in a rather specific way with
L{getSectionNumber}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document.
@return: C{None}
"""
i = 1
for node in domhelpers.findNodesNamed(document, "h2"):
label = dom.Text()
label.data = "%s.%d " % (chapterNumber, i)
node.insertBefore(label, node.firstChild)
i += 1
def fixRelativeLinks(document, linkrel):
"""
Replace relative links in C{str} and C{href} attributes with links relative
to C{linkrel}.
@type document: A DOM Node or Document
@param document: The output template.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
"""
for attr in 'src', 'href':
for node in domhelpers.findElementsWithAttribute(document, attr):
href = node.getAttribute(attr)
if not href.startswith('http') and not href.startswith('/'):
node.setAttribute(attr, linkrel+node.getAttribute(attr))
def setTitle(template, title, chapterNumber):
"""
Add title and chapter number information to the template document.
The title is added to the end of the first C{title} tag and the end of the
first tag with a C{class} attribute set to C{title}. If specified, the
chapter is inserted before the title.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type title: C{list} of DOM Nodes
@param title: Nodes from the input document defining its title.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document. If not applicable, any C{False} value will result in this
information being omitted.
@return: C{None}
"""
if numberer.getNumberSections() and chapterNumber:
titleNode = dom.Text()
# This is necessary in order for cloning below to work. See Python
# isuse 4851.
titleNode.ownerDocument = template.ownerDocument
titleNode.data = '%s. ' % (chapterNumber,)
title.insert(0, titleNode)
for nodeList in (domhelpers.findNodesNamed(template, "title"),
domhelpers.findElementsWithAttribute(template, "class",
'title')):
if nodeList:
for titleNode in title:
nodeList[0].appendChild(titleNode.cloneNode(True))
def setAuthors(template, authors):
"""
Add author information to the template document.
Names and contact information for authors are added to each node with a
C{class} attribute set to C{authors} and to the template head as C{link}
nodes.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type authors: C{list} of two-tuples of C{str}
@param authors: List of names and contact information for the authors of
the input document.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(template,
"class", 'authors'):
# First, similarly to setTitle, insert text into an <div
# class="authors">
container = dom.Element('span')
for name, href in authors:
anchor = dom.Element('a')
anchor.setAttribute('href', href)
anchorText = dom.Text()
anchorText.data = name
anchor.appendChild(anchorText)
if (name, href) == authors[-1]:
if len(authors) == 1:
container.appendChild(anchor)
else:
andText = dom.Text()
andText.data = 'and '
container.appendChild(andText)
container.appendChild(anchor)
else:
container.appendChild(anchor)
commaText = dom.Text()
commaText.data = ', '
container.appendChild(commaText)
node.appendChild(container)
# Second, add appropriate <link rel="author" ...> tags to the <head>.
head = domhelpers.findNodesNamed(template, 'head')[0]
authors = [dom.parseString('<link rel="author" href="%s" title="%s"/>'
% (href, name)).childNodes[0]
for name, href in authors]
head.childNodes.extend(authors)
def setVersion(template, version):
"""
Add a version indicator to the given template.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type version: C{str}
@param version: The version string to add to the template.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(template, "class",
"version"):
text = dom.Text()
text.data = version
node.appendChild(text)
def getOutputFileName(originalFileName, outputExtension, index=None):
"""
Return a filename which is the same as C{originalFileName} except for the
extension, which is replaced with C{outputExtension}.
For example, if C{originalFileName} is C{'/foo/bar.baz'} and
C{outputExtension} is C{'quux'}, the return value will be
C{'/foo/bar.quux'}.
@type originalFileName: C{str}
@type outputExtension: C{stR}
@param index: ignored, never passed.
@rtype: C{str}
"""
return os.path.splitext(originalFileName)[0]+outputExtension
def munge(document, template, linkrel, dir, fullpath, ext, url, config, outfileGenerator=getOutputFileName):
"""
Mutate C{template} until it resembles C{document}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type template: A DOM Node or Document
@param template: The template document which defines the desired
presentation format of the content.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type dir: C{str}
@param dir: The directory in which to search for source listing files.
@type fullpath: C{str}
@param fullpath: The file name which contained the input document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type config: C{dict}
@param config: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
fixRelativeLinks(template, linkrel)
addMtime(template, fullpath)
removeH1(document)
if not config.get('noapi', False):
fixAPI(document, url)
fontifyPython(document)
fixLinks(document, ext)
addPyListings(document, dir)
addHTMLListings(document, dir)
addPlainListings(document, dir)
putInToC(template, generateToC(document))
footnotes(document)
notes(document)
setIndexLink(template, indexer.getIndexFilename())
setVersion(template, config.get('version', ''))
# Insert the document into the template
chapterNumber = htmlbook.getNumber(fullpath)
title = domhelpers.findNodesNamed(document, 'title')[0].childNodes
setTitle(template, title, chapterNumber)
if numberer.getNumberSections() and chapterNumber:
numberDocument(document, chapterNumber)
index(document, outfileGenerator(os.path.split(fullpath)[1], ext),
htmlbook.getReference(fullpath))
authors = domhelpers.findNodesNamed(document, 'link')
authors = [(node.getAttribute('title') or '',
node.getAttribute('href') or '')
for node in authors
if node.getAttribute('rel') == 'author']
setAuthors(template, authors)
body = domhelpers.findNodesNamed(document, "body")[0]
tmplbody = domhelpers.findElementsWithAttribute(template, "class",
"body")[0]
tmplbody.childNodes = body.childNodes
tmplbody.setAttribute("class", "content")
class _LocationReportingErrorHandler(ErrorHandler):
"""
Define a SAX error handler which can report the location of fatal
errors.
Unlike the errors reported during parsing by other APIs in the xml
package, this one tries to mismatched tag errors by including the
location of both the relevant opening and closing tags.
"""
def __init__(self, contentHandler):
self.contentHandler = contentHandler
def fatalError(self, err):
# Unfortunately, the underlying expat error code is only exposed as
# a string. I surely do hope no one ever goes and localizes expat.
if err.getMessage() == 'mismatched tag':
expect, begLine, begCol = self.contentHandler._locationStack[-1]
endLine, endCol = err.getLineNumber(), err.getColumnNumber()
raise process.ProcessingFailure(
"mismatched close tag at line %d, column %d; expected </%s> "
"(from line %d, column %d)" % (
endLine, endCol, expect, begLine, begCol))
raise process.ProcessingFailure(
'%s at line %d, column %d' % (err.getMessage(),
err.getLineNumber(),
err.getColumnNumber()))
class _TagTrackingContentHandler(SAX2DOM):
"""
Define a SAX content handler which keeps track of the start location of
all open tags. This information is used by the above defined error
handler to report useful locations when a fatal error is encountered.
"""
def __init__(self):
SAX2DOM.__init__(self)
self._locationStack = []
def setDocumentLocator(self, locator):
self._docLocator = locator
SAX2DOM.setDocumentLocator(self, locator)
def startElement(self, name, attrs):
self._locationStack.append((name, self._docLocator.getLineNumber(), self._docLocator.getColumnNumber()))
SAX2DOM.startElement(self, name, attrs)
def endElement(self, name):
self._locationStack.pop()
SAX2DOM.endElement(self, name)
class _LocalEntityResolver(object):
"""
Implement DTD loading (from a local source) for the limited number of
DTDs which are allowed for Lore input documents.
@ivar filename: The name of the file containing the lore input
document.
@ivar knownDTDs: A mapping from DTD system identifiers to L{FilePath}
instances pointing to the corresponding DTD.
"""
s = FilePath(__file__).sibling
knownDTDs = {
None: s("xhtml1-strict.dtd"),
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd": s("xhtml1-strict.dtd"),
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd": s("xhtml1-transitional.dtd"),
"xhtml-lat1.ent": s("xhtml-lat1.ent"),
"xhtml-symbol.ent": s("xhtml-symbol.ent"),
"xhtml-special.ent": s("xhtml-special.ent"),
}
del s
def __init__(self, filename):
self.filename = filename
def resolveEntity(self, publicId, systemId):
source = InputSource()
source.setSystemId(systemId)
try:
dtdPath = self.knownDTDs[systemId]
except KeyError:
raise process.ProcessingFailure(
"Invalid DTD system identifier (%r) in %s. Only "
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd "
"is allowed." % (systemId, self.filename))
source.setByteStream(dtdPath.open())
return source
def parseFileAndReport(filename, _open=file):
"""
Parse and return the contents of the given lore XHTML document.
@type filename: C{str}
@param filename: The name of a file containing a lore XHTML document to
load.
@raise process.ProcessingFailure: When the contents of the specified file
cannot be parsed.
@rtype: A DOM Document
@return: The document contained in C{filename}.
"""
content = _TagTrackingContentHandler()
error = _LocationReportingErrorHandler(content)
parser = make_parser()
parser.setContentHandler(content)
parser.setErrorHandler(error)
# In order to call a method on the expat parser which will be used by this
# parser, we need the expat parser to be created. This doesn't happen
# until reset is called, normally by the parser's parse method. That's too
# late for us, since it will then go on to parse the document without
# letting us do any extra set up. So, force the expat parser to be created
# here, and then disable reset so that the parser created is the one
# actually used to parse our document. Resetting is only needed if more
# than one document is going to be parsed, and that isn't the case here.
parser.reset()
parser.reset = lambda: None
# This is necessary to make the xhtml1 transitional declaration optional.
# It causes LocalEntityResolver.resolveEntity(None, None) to be called.
# LocalEntityResolver handles that case by giving out the xhtml1
# transitional dtd. Unfortunately, there is no public API for manipulating
# the expat parser when using xml.sax. Using the private _parser attribute
# may break. It's also possible that make_parser will return a parser
# which doesn't use expat, but uses some other parser. Oh well. :(
# -exarkun
parser._parser.UseForeignDTD(True)
parser.setEntityResolver(_LocalEntityResolver(filename))
# This is probably no-op because expat is not a validating parser. Who
# knows though, maybe you figured out a way to not use expat.
parser.setFeature(feature_validation, False)
fObj = _open(filename)
try:
try:
parser.parse(fObj)
except IOError, e:
raise process.ProcessingFailure(
e.strerror + ", filename was '" + filename + "'")
finally:
fObj.close()
return content.document
def makeSureDirectoryExists(filename):
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if (not os.path.exists(dirname)):
os.makedirs(dirname)
def doFile(filename, linkrel, ext, url, templ, options={}, outfileGenerator=getOutputFileName):
"""
Process the input document at C{filename} and write an output document.
@type filename: C{str}
@param filename: The path to the input file which will be processed.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type templ: A DOM Node or Document
@param templ: The template on which the output document will be based.
This is mutated and then serialized to the output file.
@type options: C{dict}
@param options: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
doc = parseFileAndReport(filename)
clonedNode = templ.cloneNode(1)
munge(doc, clonedNode, linkrel, os.path.dirname(filename), filename, ext,
url, options, outfileGenerator)
newFilename = outfileGenerator(filename, ext)
_writeDocument(newFilename, clonedNode)
def _writeDocument(newFilename, clonedNode):
"""
Serialize the given node to XML into the named file.
@param newFilename: The name of the file to which the XML will be
written. If this is in a directory which does not exist, the
directory will be created.
@param clonedNode: The root DOM node which will be serialized.
@return: C{None}
"""
makeSureDirectoryExists(newFilename)
f = open(newFilename, 'w')
f.write(clonedNode.toxml('utf-8'))
f.close()
| {
"content_hash": "cc862e7423126cae5cc9e6e76086f7d8",
"timestamp": "",
"source": "github",
"line_count": 1118,
"max_line_length": 112,
"avg_line_length": 34.97406082289803,
"alnum_prop": 0.6552006342548784,
"repo_name": "biddisco/VTK",
"id": "5cc71aa77e17013fce6cdfc0c174c80b4266d77b",
"size": "39175",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "ThirdParty/Twisted/twisted/lore/tree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "C",
"bytes": "45542302"
},
{
"name": "C++",
"bytes": "60467840"
},
{
"name": "CSS",
"bytes": "157961"
},
{
"name": "Cuda",
"bytes": "28721"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "IDL",
"bytes": "4406"
},
{
"name": "Java",
"bytes": "184678"
},
{
"name": "JavaScript",
"bytes": "978324"
},
{
"name": "Objective-C",
"bytes": "121232"
},
{
"name": "Objective-C++",
"bytes": "101052"
},
{
"name": "Pascal",
"bytes": "3255"
},
{
"name": "Perl",
"bytes": "177007"
},
{
"name": "Python",
"bytes": "13262355"
},
{
"name": "Shell",
"bytes": "41929"
},
{
"name": "Tcl",
"bytes": "1894036"
}
],
"symlink_target": ""
} |
import logging
import mimetypes
import os
import sys
from bottle import get, default_app, static_file
from bson import json_util as json
from greenlet import greenlet as Greenlet
from six import StringIO
from lxml import html
from lxml.html import builder as E
from sockjs.tornado import router as _router, SockJSRouter
from sockjs.tornado import SockJSConnection
from tornado import gen
from tornado.escape import xhtml_unescape as unescape
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, FallbackHandler
from tornado.wsgi import WSGIContainer
from . import build, client, compiler, _log
from .model import model
_routes = []
_root_path = os.path.dirname(__file__)
_view_path = 'views'
_controller_path = 'controllers'
_cdn = True
_bundle_files = [
(
'SockJS',
'//cdnjs.cloudflare.com/ajax/libs/sockjs-client/0.3.4/sockjs.min.js',
'sockjs-0.3.4.min.js'
),
(
'jQuery',
'//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js',
'jquery-1.10.2/jquery.min.js'
),
(
'angular',
'//ajax.googleapis.com/ajax/libs/angularjs/1.2.6/angular.min.js',
'angular-1.2.6/angular.min.js'
),
(
'check(angular.module, ["ngAnimate"])',
'//ajax.googleapis.com/ajax/libs/angularjs/1.2.6/'
'angular-animate.min.js',
'angular-1.2.6/angular-animate.min.js'
),
(
'angulate',
'angulate-0.1.0/angulate.js'
),
(
'avalon',
'avalon.js'
)
]
_router.DEFAULT_SETTINGS['sockjs_url'] = '/bundle/sockjs-0.3.4.min.js'
_methods = {}
# Fix mimetypes
mimetypes.add_type('image/png', '.png', True)
mimetypes.add_type('audio/mpeg', '.mp3', True)
mimetypes.add_type('application/x-font-ttf', '.ttf', True)
mimetypes.add_type('application/x-font-woff', '.woff', True)
class ChannelConnection(SockJSConnection):
route = None
func = None
def __init__(self, *args, **kwargs):
super(ChannelConnection, self).__init__(*args, **kwargs)
self.info = None
def on_open(self, info):
self.info = info
_log.info('OPEN Channel {0} ({1})'.format(self.route, info.ip))
@gen.coroutine
def on_message(self, message):
try:
yield Greenlet(gen.coroutine(self.func)).switch(message)
except Exception as e:
_log.exception(e)
def on_close(self):
_log.info('CLOSE Channel {0} ({1})'.format(self.route, self.info.ip))
def channel(route):
def _d(f):
attrs = {'route': route, 'func': f}
connection = type('ChannelConnection', (ChannelConnection, ), attrs)
_routes.extend(SockJSRouter(connection, route).urls)
return f
return _d
def method(func_or_str=None):
if callable(func_or_str):
f = func_or_str
method_name = '{0}.{1}'.format(f.__module__, f.__name__)
assert method_name not in _methods, \
"Server method '{0}' already exists".format(method_name)
_methods[method_name] = f
f.__server_method__ = method_name
return f
def _d(f):
method_name = func_or_str or '{0}.{1}'.format(
f.__module__, f.__name__)
assert method_name not in _methods, \
"Server method '{0}' already exists".format(method_name)
_methods[method_name] = f
f.__server_method__ = method_name
return f
return _d
@channel('/_avalon')
def _server(request, message):
message = json.loads(message)
method = message['method']
params = message['params']
if method == 'subscribe':
model.subscribe(request, *params)
if method == 'update':
model[params[0]].update(query=params[1], **params[2])
if method == 'rpc':
if not _methods.get(params[0]):
raise ValueError('Method {0} not found'.format(params[0]))
request.send(json.dumps({
'id': message['id'],
'response': 'rpc',
'result': _methods[params[0]](*params[1:])
}))
@get('/')
def _index():
# Gather, convert and process assets
DOCTYPE = '<!DOCTYPE html>'
style = StringIO()
head = E.HEAD()
body = E.BODY()
templates = []
template_names = []
def visit(node, f):
for c in node.getchildren():
visit(c, f)
if c.tag != 'template':
continue
names = [n[1:] for n in c.keys() if n and n[0] == ':']
if not names:
_log.error('Unbound template found (%s)', f)
continue
for name in names:
if name in template_names:
_log.error('Duplicate template "%s" found (%s)', name, f)
continue
template = E.SCRIPT(
id='template-{0}'.format(name),
type='text/x-angulate-template'
)
template.text = c.text
template.extend(c.getchildren())
templates.append(template)
template_names.extend(names)
node.remove(c)
return
for dirpath, dirnames, filenames in os.walk(_view_path):
for filename in filenames:
ext = os.path.splitext(filename)[-1]
filename = os.path.join(dirpath, filename)
handler = build.style_handler.get(ext)
if handler:
style.write(handler(filename))
continue
handler = build.template_handler.get(ext)
if not handler:
continue
contents = handler(filename)
if not contents:
_log.warning('View is empty (%s)', filename)
continue
try:
dom = html.fromstring('<head></head>' + contents)
except Exception as e:
_log.error('Parse error (%s) %s', filename, e)
continue
for e in dom.getchildren():
if e.tag == 'head':
head.extend(e.getchildren())
elif e.tag == 'body':
visit(e, filename)
body.text = (body.text or '') + (e.text or '')
body.extend(e.getchildren())
elif e.tag == 'template':
visit(E.BODY(e), filename)
else:
_log.error('View is invalid (%s)', filename)
continue
s = 'angulate.registerTemplate("{0}", "{1}");'
templates.append(
E.SCRIPT(
'\n'.join([
s.format(name, 'template-{0}'.format(name))
for name in template_names
]),
type='text/javascript'))
# Append styles
head.append(E.STYLE(style.getvalue()))
# Append compiled runtime and Javascript functions
body.extend([
E.SCRIPT(
compiler.runtime(),
type='text/javascript'),
E.SCRIPT(
'\n'.join(f for f in client.compiled()),
type='text/javascript')
])
# Append bundle
for b in _bundle_files:
assert len(b) in [2, 3], 'Invalid bundle file config'
if len(b) == 2:
body.append(E.SCRIPT(
src='bundle/{0}'.format(b[1]),
type='text/javascript'))
elif _cdn:
link = html.tostring(E.SCRIPT(
src='bundle/{0}'.format(b[2]),
type='text/javascript'
), encoding='utf-8')
link = link.decode('utf-8').replace('</script>', '<\/script>')
body.extend([
E.SCRIPT(
src=b[1],
type='text/javascript'),
E.SCRIPT(
"window.{0} || document.write('{1}')".format(b[0], link),
type='text/javascript')
])
else:
body.append(E.SCRIPT(
src='bundle/{0}'.format(b[2]),
type='text/javascript'))
# Append templates
body.extend(templates)
# Bootstrap angular
body.append(E.SCRIPT(
'\n'.join([
'window.app = angular.module("app", ["ngAnimate", "angulate"]);',
'window.app.run(["$rootScope", function($rootScope) {',
' $rootScope._session = avalon.session;',
' avalon.scope = $rootScope;'
'}])',
'angular.bootstrap(document, ["app"]);'
]),
type='text/javascript'))
return unescape(html.tostring(E.HTML(head, body), doctype=DOCTYPE,
encoding='utf-8'))
@get('/bundle/<filename:re:(?!\.).+>')
def _bundle(filename):
return static_file(filename, root=os.path.join(_root_path, 'bundle'))
@get('/<filename:re:(?!\.).+>')
def _static(filename):
return static_file(filename, root=_view_path)
def serve(db=None, mount_app=None, port=8080, verbose=False,
view_path=None, controller_path=None, cdn=True):
global _view_path, _controller_path, _cdn
_view_path = view_path or _view_path
_controller_path = controller_path or _controller_path
_cdn = cdn
if verbose:
_log.setLevel(logging.INFO)
if mount_app:
r = _routes + [(mount_app[0], FallbackHandler, {
'fallback': WSGIContainer(mount_app[1])
})]
else:
r = _routes
# Connect to db
if db:
model.connect(db)
wsgi_app = WSGIContainer(default_app())
app = Application(r + [
('.*', FallbackHandler, {'fallback': wsgi_app})
])
# Import controllers
module_path = os.path.join(_controller_path, '..')
if module_path not in sys.path:
sys.path.append(module_path)
for dirpath, dirnames, filenames in os.walk(_controller_path):
for f in filenames:
module, ext = os.path.splitext(f)
if ext != '.py':
continue
Greenlet(__import__).switch('{0}.{1}'.format(dirpath, module))
server = HTTPServer(app)
server.listen(port)
IOLoop.instance().start()
| {
"content_hash": "6c32d961f97a69832cea40295674db05",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 77,
"avg_line_length": 30.625,
"alnum_prop": 0.5187470336971998,
"repo_name": "nehz/avalon",
"id": "6354ebe51b2b4f88a4649ce4362e8a1da3c9389c",
"size": "10780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avalon/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "16739"
},
{
"name": "Python",
"bytes": "59853"
}
],
"symlink_target": ""
} |
"""Tests and benchmarks for the trace_type module."""
import collections
import timeit
from absl.testing import parameterized
from tensorflow.core.function import trace_type
from tensorflow.core.function.trace_type import default_types
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.types import trace
class TestAttr:
"""Helps test attrs collections."""
def __init__(self, name):
self.name = name
class TestAttrsClass:
"""Helps test attrs collections."""
__attrs_attrs__ = (TestAttr('a'), TestAttr('b'))
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return isinstance(
other, TestAttrsClass) and self.a == other.a and self.b == other.b
class DummyGenericClass:
"""Helps test memory leaks for GenericType."""
pass
class TraceTypeBuilderTest(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['eager']))
def testIteratorAliasing(self):
it1 = iter(dataset_ops.DatasetV2.from_tensor_slices([1, 2, 3]))
it2 = iter(dataset_ops.DatasetV2.from_tensor_slices([1, 2, 3]))
self.assertEqual(
trace_type.from_value((it1, it1)), trace_type.from_value((it2, it2)))
self.assertEqual(
trace_type.from_value((it1, it2)), trace_type.from_value((it2, it1)))
self.assertNotEqual(
trace_type.from_value((it1, it1)), trace_type.from_value((it1, it2)))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testIteratorTypesImplementTracing(self):
self.assertTrue(
issubclass(iterator_ops.OwnedIterator, trace.SupportsTracingProtocol))
self.assertTrue(
issubclass(iterator_ops.IteratorSpec, trace.SupportsTracingProtocol))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testCompositeAndSpec(self):
composite_tensor = ragged_tensor.RaggedTensor.from_row_splits(
values=[1, 2, 3], row_splits=[0, 2, 3])
spec = ragged_tensor.RaggedTensorSpec([2, None], dtypes.int32)
self.assertEqual(
trace_type.from_value(composite_tensor), trace_type.from_value(spec))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testVariableAliasing(self):
v1 = resource_variable_ops.ResourceVariable([1])
v2 = resource_variable_ops.ResourceVariable([1])
v3 = resource_variable_ops.ResourceVariable([1])
all_unique = trace_type.from_value((v1, v2, v3))
all_same = trace_type.from_value((v1, v1, v1))
self.assertNotEqual(all_unique, all_same)
v3 = resource_variable_ops.ResourceVariable([2])
v4 = resource_variable_ops.ResourceVariable([2])
v5 = resource_variable_ops.ResourceVariable([2])
all_unique_again = trace_type.from_value((v3, v4, v5))
all_same_again = trace_type.from_value((v4, v4, v4))
self.assertEqual(all_unique, all_unique_again)
self.assertEqual(all_same, all_same_again)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testTensorEquality(self):
context = trace_type.InternalTracingContext()
tensor_a = array_ops.zeros([11, 3, 5],
dtype=dtypes.int32).__tf_tracing_type__(context)
tensor_b = array_ops.zeros([11, 4, 5],
dtype=dtypes.int32).__tf_tracing_type__(context)
tensor_c = array_ops.zeros(
[11, 3, 5], dtype=dtypes.float32).__tf_tracing_type__(context)
tensor_d = array_ops.ones([11, 3, 5],
dtype=dtypes.int32).__tf_tracing_type__(context)
self.assertNotEqual(tensor_a, tensor_b)
self.assertNotEqual(tensor_a, tensor_c)
self.assertNotEqual(tensor_b, tensor_c)
self.assertEqual(tensor_a, tensor_d)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testTensorAndSpecEquality(self):
context = trace_type.InternalTracingContext()
tensor = array_ops.zeros([11, 3, 5],
dtype=dtypes.int32).__tf_tracing_type__(context)
spec = tensor_spec.TensorSpec(
[11, 3, 5], dtype=dtypes.int32).__tf_tracing_type__(context)
spec_with_name = tensor_spec.TensorSpec(
[11, 3, 5], dtype=dtypes.int32,
name='name').__tf_tracing_type__(context)
self.assertEqual(tensor, spec)
self.assertNotEqual(tensor, spec_with_name)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testTensorShapeUnknown(self):
context = trace_type.InternalTracingContext()
spec_1 = tensor_spec.TensorSpec(
None, dtype=dtypes.int32).__tf_tracing_type__(context)
spec_2 = tensor_spec.TensorSpec(
None, dtype=dtypes.int32).__tf_tracing_type__(context)
self.assertEqual(spec_1, spec_2)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testAttrsTraceTypeGeneration(self):
trace_a = trace_type.from_value(TestAttrsClass(1, 2))
expected = default_types.Attrs.from_type_and_attributes(
TestAttrsClass, (default_types.Literal(1), default_types.Literal(2)))
self.assertEqual(trace_a, expected)
self.assertTrue(trace_a.is_subtype_of(trace_a))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testTupleEquality(self):
trace_a = trace_type.from_value((1, 2, 3, 4))
trace_b = trace_type.from_value((1, 2, 2, 4))
trace_c = trace_type.from_value((1, 2, 3))
trace_d = trace_type.from_value((1, 2, 3, 4))
self.assertNotEqual(trace_a, trace_b)
self.assertNotEqual(trace_a, trace_c)
self.assertNotEqual(trace_b, trace_c)
self.assertEqual(trace_a, trace_d)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testListEquality(self):
trace_a = trace_type.from_value([1, 2, 3, 4])
trace_b = trace_type.from_value([1, 2, 2, 4])
trace_c = trace_type.from_value([1, 2, 3])
trace_d = trace_type.from_value([1, 2, 3, 4])
self.assertNotEqual(trace_a, trace_b)
self.assertNotEqual(trace_a, trace_c)
self.assertNotEqual(trace_b, trace_c)
self.assertEqual(trace_a, trace_d)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testDictEquality(self):
trace_a = trace_type.from_value({1: 2, 3: 4})
trace_b = trace_type.from_value({1: 2, 3: 2})
trace_c = trace_type.from_value({1: 2, 3: 0})
trace_d = trace_type.from_value({3: 4, 1: 2})
self.assertNotEqual(trace_a, trace_b)
self.assertNotEqual(trace_a, trace_c)
self.assertNotEqual(trace_b, trace_c)
self.assertEqual(trace_a, trace_d)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testComplexStruct(self):
struct = {(1, 2, 3): {(1, 2): {12: 2}}, (3, 2, 3): (2, {2: 3})}
trace_a = trace_type.from_value(struct)
trace_b = trace_type.from_value(struct)
self.assertEqual(trace_a, trace_b)
self.assertTrue(trace_a.is_subtype_of(trace_b))
self.assertTrue(trace_b.is_subtype_of(trace_a))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testCustomUnequableTypeSucceeds(self):
class CustomUnequable:
def __eq__(self, o):
raise ValueError
def __hash__(self):
return 0
object_a = CustomUnequable()
object_b = CustomUnequable()
trace_a_1 = trace_type.from_value(object_a)
trace_a_2 = trace_type.from_value(object_a)
trace_b = trace_type.from_value(object_b)
self.assertEqual(trace_a_1, trace_a_2)
with self.assertRaises(ValueError):
trace_a_1.__eq__(trace_b)
del object_a
self.assertNotEqual(trace_a_1, trace_a_2)
self.assertNotEqual(trace_a_2, trace_a_1)
del object_b
self.assertNotEqual(trace_a_1, trace_a_2)
self.assertNotEqual(trace_a_2, trace_a_1)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testCustomUnhashableTypeFailsGracefully(self):
class CustomUnhashable:
def __eq__(self, o):
return True
obj = CustomUnhashable()
with self.assertRaisesRegex(
TypeError,
r'could not be represented through the generic tracing type'):
trace_type.from_value(obj)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGetPlaceholderValue(self):
composite_value = [1, 2, (3, [4, 5]), {6: [7]}, TestAttrsClass(8, (10, 11))]
composite_type = trace_type.from_value(composite_value)
placeholder_value = composite_type._placeholder_value()
self.assertEqual(composite_value, placeholder_value)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testWrappedNamedTuple(self):
ActualType = collections.namedtuple('ActualType', ['a', 'b', 'c'])
class MockWrapper(tuple):
# Generated through trackable data structures:
# //tensorflow/python/training/tracking/data_structures.py
# With design pattern similar to Python functools:
# https://docs.python.org/3/library/functools.html?highlight=__wrapped__#functools.update_wrapper
__wrapped__ = ActualType(1, 2, 3)
self.assertEqual(
trace_type.from_value(MockWrapper()),
trace_type.from_value(ActualType(1, 2, 3)))
class SignatureToTraceTypeTest(test.TestCase):
def testTensorSpecs(self):
self.assertEqual(
trace_type.from_value(
tensor_spec.TensorSpec(shape=None),
trace_type.InternalTracingContext(is_legacy_signature=True)),
tensor_spec.TensorSpec(shape=None))
def testListofTensorSpecs(self):
self.assertEqual(
trace_type.from_value([
tensor_spec.TensorSpec(shape=None),
tensor_spec.TensorSpec(shape=None)
], trace_type.InternalTracingContext(is_legacy_signature=True)),
default_types.List(
tensor_spec.TensorSpec(shape=None),
tensor_spec.TensorSpec(shape=None)))
def testDictofTensorSpecs(self):
self.assertEqual(
trace_type.from_value(
{
'a': tensor_spec.TensorSpec(shape=None),
'b': tensor_spec.TensorSpec(shape=None)
}, trace_type.InternalTracingContext(is_legacy_signature=True)),
default_types.Dict({
'a': tensor_spec.TensorSpec(shape=None),
'b': tensor_spec.TensorSpec(shape=None)
}))
class TraceTypeMemoryTest(test.TestCase):
@test_util.assert_no_new_pyobjects_executing_eagerly
def testGeneric(self):
trace_type.from_value(1)
trace_type.from_value(DummyGenericClass())
@test_util.assert_no_new_pyobjects_executing_eagerly
def testTensor(self):
tensor = array_ops.zeros([10])
trace_type.from_value(tensor)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testTuple(self):
trace_type.from_value((1, 2, 3))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testDict(self):
trace_type.from_value({1: 1, 2: 2, 3: 3})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testList(self):
trace_type.from_value([1, 2, 3])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrs(self):
trace_type.from_value(TestAttrsClass(1, 2))
class TraceTypeGenerationBenchmark(test.Benchmark):
def benchmarkTensor(self):
shapes = [[1], [2, 19], [5, 11, 24], [4, 5, 9, 23]]
tensors = []
for s in shapes:
tensors.append(array_ops.zeros(s))
def encode_tensors(tensors):
trace_type.from_value(tensors)
iterations = 100000
t = timeit.timeit(lambda: encode_tensors(tensors), number=iterations)
self.report_benchmark(
name='tensor_cache_key_generation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'tensor_cache_key_generation_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkTensorSpec(self):
shapes = [[1], [2, 19], [5, 11, 24], [4, 5, 9, 23]]
tensor_specs = []
for s in shapes:
tensor_specs.append(tensor_spec.TensorSpec(s, dtypes.int32))
def encode_tensor_specs(tensor_specs):
trace_type.from_value(tensor_specs)
iterations = 100000
t = timeit.timeit(
lambda: encode_tensor_specs(tensor_specs), number=iterations)
self.report_benchmark(
name='tensor_spec_cache_key_generation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'tensor_spec_cache_key_generation_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkVariable(self):
var_list = [
variables.Variable(1.0),
variables.Variable(1),
variables.Variable([1])
]
def encode_variables(var_list):
trace_type.from_value(var_list)
iterations = 10000
t = timeit.timeit(lambda: encode_variables(var_list), number=iterations)
self.report_benchmark(
name='variable_cache_key_generation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'variable_cache_key_generation_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkTraceTypeLookup(self):
@function.defun
def defined(t):
return t
call_arg_list = [
1,
array_ops.zeros([5, 13]),
array_ops.zeros([9, 22, 24]),
array_ops.zeros([5, 13, 2])
]
for c in call_arg_list:
defined(c)
lookup_call_arg = array_ops.zeros([5, 13])
iterations = 10000
t = timeit.timeit(stmt=lambda: defined(lookup_call_arg), number=iterations)
self.report_benchmark(
name='cache_key_lookup',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'cache_key_lookup_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkNestedStruct(self):
struct = {(1, 2, 3): {(1, 2): {12: 2}}, (3, 2, 3): (2, {2: 3})}
def encode_struct(struct):
trace_type.from_value(struct)
iterations = 100000
t = timeit.timeit(lambda: encode_struct(struct), number=iterations)
self.report_benchmark(
name='nested_struct_cache_key_generation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'nested_struct_cache_key_generation_avg_ms',
'value': t / iterations * 1000
}])
def benchmarkFunctionInvocation(self):
struct = (variables.Variable(1.0), array_ops.zeros([5, 13]), {
'tensor': array_ops.zeros([5, 20]),
'variable': variables.Variable(1.0)
})
@function.defun
def defined(t):
return t
defined(struct) # Get it traced and cached.
iterations = 10000
t = timeit.timeit(lambda: defined(struct), number=iterations)
self.report_benchmark(
name='function_invocation',
iters=iterations,
wall_time=t,
metrics=[{
'name': 'function_invocation_time_avg_ms',
'value': t / iterations * 1000
}])
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| {
"content_hash": "ac86358e034a865163c875feb362d4d1",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 103,
"avg_line_length": 34.147902869757175,
"alnum_prop": 0.656797465899541,
"repo_name": "tensorflow/tensorflow-experimental_link_static_libraries_once",
"id": "216032306706e5f8cc4b3aaed901dc2d78267954",
"size": "16158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/core/function/trace_type/trace_type_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1343737"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "123969891"
},
{
"name": "CMake",
"bytes": "182027"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416070"
},
{
"name": "Go",
"bytes": "2095490"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074471"
},
{
"name": "Jupyter Notebook",
"bytes": "789401"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11067751"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "169288"
},
{
"name": "Objective-C++",
"bytes": "294177"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42585406"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "620507"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14577"
},
{
"name": "Starlark",
"bytes": "7486225"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from future.builtins import open, range, str
from _ast import PyCF_ONLY_AST
import os
from shutil import copyfile, copytree
from django.contrib.auth import get_user_model
from django.db import connection
from django.template import Context, Template
from django.test import TestCase as BaseTestCase
from django.test.client import RequestFactory
from mezzanine.conf import settings
from mezzanine.utils.importing import path_for_import
User = get_user_model()
# Ignore these warnings in pyflakes - if added to, please comment why.
IGNORE_ERRORS = (
# local_settings import.
"'from local_settings import *' used",
# Used to version subpackages.
"'__version__' imported but unused",
# No caching fallback.
"redefinition of function 'nevercache'",
# Dummy fallback in templates for django-compressor.
"redefinition of function 'compress'",
# Fabic config fallback.
"redefinition of unused 'conf'",
# Fixing these would make the code ugiler IMO.
"continuation line",
"closing bracket does not match",
# Jython compatiblity.
"redefinition of unused 'Image",
# Django custom user compatibility.
"'get_user_model' imported but unused",
# Actually a Python template file.
"live_settings.py",
# lambdas are OK.
"do not assign a lambda",
# Imported during deprecation grace period after moving to utils.admin.
"'SingletonAdmin' imported but unused",
)
class TestCase(BaseTestCase):
"""
This is the base test case providing common features for all tests
across the different apps in Mezzanine.
"""
def setUp(self):
"""
Creates an admin user, sets up the debug cursor, so that we can
track the number of queries used in various places, and creates
a request factory for views testing.
"""
self._username = "test"
self._password = "test"
self._emailaddress = "example@example.com"
args = (self._username, self._emailaddress, self._password)
self._user = User.objects.create_superuser(*args)
self._request_factory = RequestFactory()
self._debug_cursor = connection.force_debug_cursor
connection.force_debug_cursor = True
def tearDown(self):
"""
Clean up the admin user created and debug cursor.
"""
self._user.delete()
connection.force_debug_cursor = self._debug_cursor
def queries_used_for_template(self, template, **context):
"""
Return the number of queries used when rendering a template
string.
"""
connection.queries_log.clear()
t = Template(template)
t.render(Context(context))
return len(connection.queries)
def create_recursive_objects(self, model, parent_field, **kwargs):
"""
Create multiple levels of recursive objects.
"""
per_level = list(range(3))
for _ in per_level:
kwargs[parent_field] = None
level1 = model.objects.create(**kwargs)
for _ in per_level:
kwargs[parent_field] = level1
level2 = model.objects.create(**kwargs)
for _ in per_level:
kwargs[parent_field] = level2
model.objects.create(**kwargs)
def copy_test_to_media(module, name):
"""
Copies a file from Mezzanine's test data path to MEDIA_ROOT.
Used in tests and demo fixtures.
"""
mezzanine_path = path_for_import(module)
test_path = os.path.join(mezzanine_path, "static", "test", name)
to_path = os.path.join(settings.MEDIA_ROOT, name)
to_dir = os.path.dirname(to_path)
if not os.path.exists(to_dir):
os.makedirs(to_dir)
if os.path.isdir(test_path):
copy = copytree
else:
copy = copyfile
try:
copy(test_path, to_path)
except OSError:
pass
def _run_checker_for_package(checker, package_name, extra_ignore=None):
"""
Runs the checker function across every Python module in the
given package.
"""
ignore_strings = IGNORE_ERRORS
if extra_ignore:
ignore_strings += extra_ignore
package_path = path_for_import(package_name)
for (root, dirs, files) in os.walk(str(package_path)):
for f in files:
if (f == "local_settings.py" or not f.endswith(".py") or
root.split(os.sep)[-1] in ["migrations"]):
# Ignore
continue
for warning in checker(os.path.join(root, f)):
for ignore in ignore_strings:
if ignore in warning:
break
else:
yield warning.replace(package_path, package_name, 1)
def run_pyflakes_for_package(package_name, extra_ignore=None):
"""
If pyflakes is installed, run it across the given package name
returning any warnings found.
"""
from pyflakes.checker import Checker
def pyflakes_checker(path):
with open(path, "U") as source_file:
source = source_file.read()
try:
tree = compile(source, path, "exec", PyCF_ONLY_AST)
except (SyntaxError, IndentationError) as value:
info = (path, value.lineno, value.args[0])
yield "Invalid syntax in %s:%d: %s" % info
else:
result = Checker(tree, path)
for warning in result.messages:
yield str(warning)
args = (pyflakes_checker, package_name, extra_ignore)
return _run_checker_for_package(*args)
def run_pep8_for_package(package_name, extra_ignore=None):
"""
If pep8 is installed, run it across the given package name
returning any warnings or errors found.
"""
import pep8
class Checker(pep8.Checker):
"""
Subclass pep8's Checker to hook into error reporting.
"""
def __init__(self, *args, **kwargs):
super(Checker, self).__init__(*args, **kwargs)
self.report_error = self._report_error
def _report_error(self, line_number, offset, text, check):
"""
Store pairs of line numbers and errors.
"""
self.errors.append((line_number, text.split(" ", 1)[1]))
def check_all(self, *args, **kwargs):
"""
Assign the errors attribute and return it after running.
"""
self.errors = []
super(Checker, self).check_all(*args, **kwargs)
return self.errors
def pep8_checker(path):
for line_number, text in Checker(path).check_all():
yield "%s:%s: %s" % (path, line_number, text)
args = (pep8_checker, package_name, extra_ignore)
return _run_checker_for_package(*args)
| {
"content_hash": "3a5c9f0f70086d7d65ceede72923b81f",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 75,
"avg_line_length": 31.685185185185187,
"alnum_prop": 0.6113383985973115,
"repo_name": "spookylukey/mezzanine",
"id": "d2a33169c9c511ad0d894538006e480d960096cf",
"size": "6844",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mezzanine/utils/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "60127"
},
{
"name": "HTML",
"bytes": "89327"
},
{
"name": "JavaScript",
"bytes": "453729"
},
{
"name": "Python",
"bytes": "659453"
}
],
"symlink_target": ""
} |
"""
Expectation Propagation
=======================
Introduction
------------
This module implements the building-blocks for EP inference: EP parameter
fitting, log of the marginal likelihood, and derivative of the log of the
marginal likelihood.
"""
from .ep import EP
from .linear_kernel import EPLinearKernel
__all__ = ["EP", "EPLinearKernel"]
| {
"content_hash": "0c769d697181d540068127e22a04e1f0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 21.9375,
"alnum_prop": 0.6894586894586895,
"repo_name": "limix/glimix-core",
"id": "38733f0d9b09d50e2183816ae4f84e0ef15cf7da",
"size": "351",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "glimix_core/_ep/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "300302"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from transactions.models import *
class EndUserAdmin(admin.ModelAdmin):
def get_model_perms(self, request):
'''
Return empty perms dict thus hiding the model from admin index.
'''
return {}
class StudentAdmin(admin.ModelAdmin):
fields = ('name', 'card_number', 'gender', 'phone_number', 'roll_number', 'branch', 'batch', 'semester',)
list_display = ('name', 'roll_number', 'branch')
list_filter = ('branch', 'batch', 'semester',)
search_fields = ('name', 'rollno', 'phone_number',)
ordering = ('name', 'roll_number')
class EmployeeAdmin(admin.ModelAdmin):
fields = ('name', 'card_number', 'gender', 'phone_number', 'employee_id', 'joining_date', 'department', 'designation',)
list_display = ('name', 'employee_id', 'department', 'designation')
list_filter = ('joining_date', 'department', 'designation')
search_fields = ('employee_id', 'name', 'phone_number')
ordering = ('name', 'employee_id')
admin.site.register(EndUser, EndUserAdmin)
admin.site.register(Student, StudentAdmin)
admin.site.register(Employee, EmployeeAdmin)
| {
"content_hash": "31ea003d90836df48c2dc494a4f788e5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 120,
"avg_line_length": 41.074074074074076,
"alnum_prop": 0.6807935076645627,
"repo_name": "sourabhv/Cope-Old",
"id": "d66136ec61ecd5fd9f06e9e0e61ef95de3ff8262",
"size": "1109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "transactions/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9557"
},
{
"name": "JavaScript",
"bytes": "14970"
},
{
"name": "Python",
"bytes": "26821"
}
],
"symlink_target": ""
} |
import formatter
import unittest
from test import test_support
htmllib = test_support.import_module('htmllib', deprecated=True)
class AnchorCollector(htmllib.HTMLParser):
def __init__(self, *args, **kw):
self.__anchors = []
htmllib.HTMLParser.__init__(self, *args, **kw)
def get_anchor_info(self):
return self.__anchors
def anchor_bgn(self, *args):
self.__anchors.append(args)
class DeclCollector(htmllib.HTMLParser):
def __init__(self, *args, **kw):
self.__decls = []
htmllib.HTMLParser.__init__(self, *args, **kw)
def get_decl_info(self):
return self.__decls
def unknown_decl(self, data):
self.__decls.append(data)
class HTMLParserTestCase(unittest.TestCase):
def test_anchor_collection(self):
# See SF bug #467059.
parser = AnchorCollector(formatter.NullFormatter(), verbose=1)
parser.feed(
"""<a href='http://foo.org/' name='splat'> </a>
<a href='http://www.python.org/'> </a>
<a name='frob'> </a>
""")
parser.close()
self.assertEqual(parser.get_anchor_info(),
[('http://foo.org/', 'splat', ''),
('http://www.python.org/', '', ''),
('', 'frob', ''),
])
def test_decl_collection(self):
# See SF patch #545300
parser = DeclCollector(formatter.NullFormatter(), verbose=1)
parser.feed(
"""<html>
<body>
hallo
<![if !supportEmptyParas]> <![endif]>
</body>
</html>
""")
parser.close()
self.assertEqual(parser.get_decl_info(),
["if !supportEmptyParas",
"endif"
])
def test_main():
test_support.run_unittest(HTMLParserTestCase)
if __name__ == "__main__":
test_main()
| {
"content_hash": "e646de647c906d8df6a4fd897bbae5e8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 70,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.4924279433317049,
"repo_name": "Jeff-Tian/mybnb",
"id": "f45e87112539a279d5ac542e9f9f9468881f4c89",
"size": "2047",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "Python27/Lib/test/test_htmllib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455330"
},
{
"name": "Batchfile",
"bytes": "6263"
},
{
"name": "C",
"bytes": "2304983"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "31815"
},
{
"name": "CSS",
"bytes": "30628"
},
{
"name": "Cucumber",
"bytes": "248616"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "31983"
},
{
"name": "HTML",
"bytes": "376863"
},
{
"name": "JavaScript",
"bytes": "20239"
},
{
"name": "M4",
"bytes": "67848"
},
{
"name": "Makefile",
"bytes": "142926"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "19913027"
},
{
"name": "REXX",
"bytes": "3862"
},
{
"name": "Ruby",
"bytes": "14954382"
},
{
"name": "Shell",
"bytes": "366205"
},
{
"name": "Tcl",
"bytes": "2150972"
},
{
"name": "TeX",
"bytes": "230259"
},
{
"name": "Visual Basic",
"bytes": "494"
},
{
"name": "XSLT",
"bytes": "3736"
},
{
"name": "Yacc",
"bytes": "14342"
}
],
"symlink_target": ""
} |
import sys
import pytest
import typeconverter
if sys.version_info < (3, 0, 0):
PY3 = None
PY2 = object()
string_basetype = basestring
def to_str(x):
return unicode(x)
else:
PY3 = object()
PY2 = None
string_basetype = str
def to_str(x):
return str(x)
def test_converter():
converter = typeconverter.Converter(string_basetype)
@converter.handle(list)
def convert_list(li):
return ', '.join(map(converter.convert, li))
@converter.handle(tuple)
def contert_tuple(tp):
return '(' + ', '.join(map(converter.convert, tp)) + ')'
if PY2:
@converter.handle(int, float, long)
def convert_number(n):
return 'n' + str(n)
else:
@converter.handle(int, float)
def convert_number(n):
return 'n' + str(n)
@converter.default
def convert(obj):
return str(obj)
assert 'a, b, c' == converter.convert(['a', 'b', 'c'])
assert '(a, b)' == converter.convert(('a', 'b'))
assert 'n123' == converter.convert(123)
if PY2:
assert 'n1' == converter.convert(long(1))
assert '{}' == converter.convert({})
assert 'n1, n2, n3' == converter.convert([1, 2, 3])
def test_multiple():
converter = typeconverter.Converter(list)
@converter.handle(tuple, set)
def convert_iterable(i):
return list(i)
s = {1, 2, 3}
converted = converter.convert(s)
assert len(s) == len(converted)
assert isinstance(converted, list)
for i in s:
assert i in converted
with pytest.raises(TypeError):
converter.convert('str')
def test_chain():
converter = typeconverter.Converter((list, dict, int, string_basetype))
class A(object):
def __init__(self, v):
self.v = v
@converter.handle(A)
def convert_A(a):
return a.v
class B(object):
def __init__(self, v):
self.v = v
@converter.handle(B)
def convert_B(b):
return A(b.v)
assert 1 == converter.convert(A(1))
assert 2 == converter.convert(B(2))
assert '1' == converter.convert(A('1'))
assert '2' == converter.convert(B('2'))
def test_assert():
class DeepConverter(typeconverter.Converter):
def assert_type(self, obj):
super(DeepConverter, self).assert_type(obj)
if isinstance(obj, list):
for i in obj:
self.assert_type(i)
elif isinstance(obj, dict):
for k, v in obj.iter_items():
self.assert_type(k)
self.assert_type(v)
converter = DeepConverter((list, dict, string_basetype))
@converter.handle(set, tuple)
def convert_iterable(i):
return list(i)
@converter.handle(list)
def convert_list(li):
return [converter.convert(x) for x in li]
@converter.handle(dict)
def convert_dict(d):
converted = {}
for k, v in d.iter_items():
converted[converter.convert(k)] = converter.convert(v)
return converted
@converter.default
def convert(obj):
return to_str(obj)
assert [['1', '2', '3'], 'b', 'c'] == converter.convert(
({1, 2, 3}, 'b', 'c')
)
| {
"content_hash": "2e190e3f2707b54dbcaf93e4cf8c626a",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 75,
"avg_line_length": 23.773722627737225,
"alnum_prop": 0.5618667485416027,
"repo_name": "Hardtack/TypeConverter",
"id": "023de3a613b083cc418b31d01a6f61b0d8d15b69",
"size": "3283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_typeconverter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7598"
}
],
"symlink_target": ""
} |
"""ScopedAnnotation allows the profiler to annotate device (e.g., GPU) events.
Usage:
with scoped_annotation.ScopedAnnotation('name'):
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.profiler.internal import _pywrap_scoped_annotation
class ScopedAnnotation(object):
"""Context manager that generates an annotation for the profiler."""
def __init__(self, name, **kwargs):
if _pywrap_scoped_annotation.ScopedAnnotation.IsEnabled():
if kwargs:
name += '#' + ','.join(key + '=' + str(value)
for key, value in six.iteritems(kwargs)) + '#'
self._scoped_annotation = _pywrap_scoped_annotation.ScopedAnnotation(name)
else:
self._scoped_annotation = None
def __enter__(self):
if self._scoped_annotation:
self._scoped_annotation.Enter()
def __exit__(self, exc_type, exc_val, exc_tb):
if self._scoped_annotation:
self._scoped_annotation.Exit()
| {
"content_hash": "492f3c3700af3eb0ec7d0ea1e8deed5f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 29.942857142857143,
"alnum_prop": 0.6612595419847328,
"repo_name": "renyi533/tensorflow",
"id": "1d7e2b024b4e1a789e9bcfda306d865a00f77cd9",
"size": "1737",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/python/profiler/scoped_annotation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
from entrez.models import EntrezEntry
class Room(models.Model):
name = models.CharField(max_length=255)
desc = models.TextField()
creator = models.ForeignKey(User, related_name='creator')
members = models.ManyToManyField(User, related_name='member')
def __unicode__(self):
return self.name
class RoomEntry(models.Model):
room = models.ForeignKey(Room)
entry = models.ForeignKey(EntrezEntry)
link_time = models.DateTimeField()
class Meta:
ordering=['-link_time']
def __unicode__(self):
return self.entry.title
| {
"content_hash": "d432571c381265f1f7b37d0ce787b54e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 65,
"avg_line_length": 26.12,
"alnum_prop": 0.6937212863705973,
"repo_name": "indexofire/django-entrez",
"id": "785925d17a1cb0a231f215ed8977424c46e10459",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/room/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "197757"
},
{
"name": "Python",
"bytes": "150089"
},
{
"name": "Shell",
"bytes": "6705"
}
],
"symlink_target": ""
} |
"""Urls for the Zinnia feeds"""
from django.conf.urls.defaults import url
from django.conf.urls.defaults import patterns
from zinnia.feeds import LatestEntries
from zinnia.feeds import EntryDiscussions
from zinnia.feeds import EntryComments
from zinnia.feeds import EntryTrackbacks
from zinnia.feeds import EntryPingbacks
from zinnia.feeds import SearchEntries
from zinnia.feeds import TagEntries
from zinnia.feeds import CategoryEntries
from zinnia.feeds import AuthorEntries
urlpatterns = patterns('',
url(r'^latest/$',
LatestEntries(),
name='zinnia_entry_latest_feed'),
url(r'^search/$',
SearchEntries(),
name='zinnia_entry_search_feed'),
url(r'^tags/(?P<slug>[- \w]+)/$',
TagEntries(),
name='zinnia_tag_feed'),
url(r'^authors/(?P<username>[.+-@\w]+)/$',
AuthorEntries(),
name='zinnia_author_feed'),
url(r'^categories/(?P<path>[-\/\w]+)/$',
CategoryEntries(),
name='zinnia_category_feed'),
url(r'^discussions/(?P<slug>[-\w]+)/$',
EntryDiscussions(),
name='zinnia_entry_discussion_feed'),
url(r'^comments/(?P<slug>[-\w]+)/$',
EntryComments(),
name='zinnia_entry_comment_feed'),
url(r'^pingbacks/(?P<slug>[-\w]+)/$',
EntryPingbacks(),
name='zinnia_entry_pingback_feed'),
url(r'^trackbacks/(?P<slug>[-\w]+)/$',
EntryTrackbacks(),
name='zinnia_entry_trackback_feed'),
)
| {
"content_hash": "82c3ec835a78c58e6347f3b0dce6c466",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 65,
"avg_line_length": 45.18181818181818,
"alnum_prop": 0.46277665995975853,
"repo_name": "jfdsmit/django-blog-zinnia",
"id": "7a0b8eba4340c8a97fd8b1d57861665bf7a5ffec",
"size": "1988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zinnia/urls/feeds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "235050"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "462213"
},
{
"name": "Ruby",
"bytes": "244"
}
],
"symlink_target": ""
} |
import requests
from requests.compat import urljoin
from agent import Agent
from multiprocessing import Pool
def spawn(request):
Agent(request['sid'], request['server'], request['term'], request['crn']).run()
class Spawner:
def __init__(self, server, pool_size):
self.server = server
self.pool_size = pool_size
def get_requests(self):
url = urljoin(self.server, 'requests')
resp = requests.get(url)
return resp.json().get('requests')
def run(self, spawn_id):
reqs = self.get_requests()
for r in reqs:
r['sid'] = spawn_id
r['server'] = self.server
Pool(self.pool_size).map(spawn, reqs)
if __name__ == '__main__':
import time
spawn_id = str(time.time())
import sys
port = int(sys.argv[1]) if len(sys.argv) == 2 else 8257
Spawner('http://0.0.0.0:%d' % port, 5).run(spawn_id)
| {
"content_hash": "d4fcf00a841939e97ad411131ffa8929",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 81,
"avg_line_length": 23.555555555555557,
"alnum_prop": 0.6356132075471698,
"repo_name": "chidoli/agent-pete",
"id": "07fbb22489847c5939f0ff8f867ee1c5def79e3d",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spawner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10525"
},
{
"name": "HTML",
"bytes": "12910"
},
{
"name": "JavaScript",
"bytes": "594"
},
{
"name": "Python",
"bytes": "8517"
}
],
"symlink_target": ""
} |
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import errno
import re
from typing import Optional
import msgfy
import pyparsing as pp
import subprocrunner as spr
from ._const import Network, Tc, TcCommandOutput, TrafficDirection
try:
import ujson as json
except ImportError:
import json # type: ignore
RE_CONTAINER_ID = re.compile(r"[a-z0-9]{12}\s+\(device=[a-z0-9]+\)")
# e.g. edfd9dbb3969 (device=veth6f7b798)
class TcConfigLoader:
def __init__(self, logger):
self.__logger = logger
self.__config_table = None
self.is_overwrite = False
self.tc_command_output = TcCommandOutput.NOT_SET
def load_tcconfig(self, config_file_path):
from voluptuous import ALLOW_EXTRA, Any, Required, Schema
schema = Schema(
{Required(str): {Any(*TrafficDirection.LIST): {str: {str: Any(str, int, float)}}}},
extra=ALLOW_EXTRA,
)
with open(config_file_path, encoding="utf-8") as fp:
self.__config_table = json.load(fp)
schema(self.__config_table)
self.__logger.debug(
"tc config file: {:s}".format(json.dumps(self.__config_table, indent=4))
)
def get_tcconfig_commands(self):
from .tcset import get_arg_parser
command_list = []
for device, device_table in self.__config_table.items():
is_container = RE_CONTAINER_ID.search(device) is not None
if is_container:
device = device.split()[0]
if self.is_overwrite:
command_list.append(
" ".join(
[Tc.Command.TCDEL, device, "--all"] + (["--docker"] if is_container else [])
)
)
for direction, direction_table in device_table.items():
is_first_set = True
for tc_filter, filter_table in direction_table.items():
self.__logger.debug(
"is_first_set={}, filter='{}', table={}".format(
is_first_set, tc_filter, filter_table
)
)
if not filter_table:
continue
option_list = [device, "--direction={:s}".format(direction)] + (
["--docker"] if is_container else []
)
for key, value in filter_table.items():
arg_item = "--{:s}={}".format(key, value)
parse_result = get_arg_parser().parse_known_args(["dummy", arg_item])
if parse_result[1]:
self.__logger.debug(
"unknown parameter: key={}, value={}".format(key, value)
)
continue
option_list.append(arg_item)
try:
src_network = self.__parse_tc_filter_src_network(tc_filter)
if src_network not in (Network.Ipv4.ANYWHERE, Network.Ipv6.ANYWHERE):
option_list.append("--src-network={:s}".format(src_network))
except pp.ParseException:
pass
try:
dst_network = self.__parse_tc_filter_dst_network(tc_filter)
if dst_network not in (Network.Ipv4.ANYWHERE, Network.Ipv6.ANYWHERE):
option_list.append("--dst-network={:s}".format(dst_network))
except pp.ParseException:
pass
try:
src_port = self.__parse_tc_filter_src_port(tc_filter)
option_list.append("--src-port={}".format(src_port))
except pp.ParseException:
pass
try:
dst_port = self.__parse_tc_filter_dst_port(tc_filter)
option_list.append("--dst-port={}".format(dst_port))
except pp.ParseException:
pass
if not is_first_set:
option_list.append("--add")
if self.tc_command_output == TcCommandOutput.STDOUT:
option_list.append("--tc-command")
elif self.tc_command_output == TcCommandOutput.SCRIPT:
option_list.append("--tc-script")
is_first_set = False
command_list.append(" ".join([Tc.Command.TCSET] + option_list))
return command_list
@staticmethod
def __parse_tc_filter_src_network(text):
network_pattern = pp.SkipTo("{:s}=".format(Tc.Param.SRC_NETWORK), include=True) + pp.Word(
pp.alphanums + "." + "/"
)
return network_pattern.parseString(text)[-1]
@staticmethod
def __parse_tc_filter_dst_network(text):
network_pattern = pp.SkipTo("{:s}=".format(Tc.Param.DST_NETWORK), include=True) + pp.Word(
pp.alphanums + "." + "/"
)
return network_pattern.parseString(text)[-1]
@staticmethod
def __parse_tc_filter_src_port(text):
port_pattern = pp.SkipTo("{:s}=".format(Tc.Param.SRC_PORT), include=True) + pp.Word(pp.nums)
return port_pattern.parseString(text)[-1]
@staticmethod
def __parse_tc_filter_dst_port(text):
port_pattern = pp.SkipTo("{:s}=".format(Tc.Param.DST_PORT), include=True) + pp.Word(pp.nums)
return port_pattern.parseString(text)[-1]
def set_tc_from_file(
logger, config_file_path: str, is_overwrite: bool, tc_command_output: Optional[str]
) -> int:
return_code = 0
loader = TcConfigLoader(logger)
loader.is_overwrite = is_overwrite
loader.tc_command_output = tc_command_output
try:
loader.load_tcconfig(config_file_path)
except OSError as e:
logger.error(msgfy.to_error_message(e))
return errno.EIO
for tcconfig_command in loader.get_tcconfig_commands():
runner = spr.SubprocessRunner(tcconfig_command)
return_code |= runner.run()
if return_code != 0:
logger.error(runner.stderr)
elif tc_command_output == TcCommandOutput.STDOUT:
print(runner.stdout.strip())
return return_code
| {
"content_hash": "842180ee52409ee587b190f04c2aa408",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 100,
"avg_line_length": 34.21808510638298,
"alnum_prop": 0.5230840976216384,
"repo_name": "thombashi/tcconfig",
"id": "8d979b9653ac9938a5deefbfc2011adfb1b4c7b2",
"size": "6433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tcconfig/_importer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1071"
},
{
"name": "Python",
"bytes": "279792"
},
{
"name": "Shell",
"bytes": "3172"
}
],
"symlink_target": ""
} |
from system.log_support import init_logger
import time
from system.motion_detection import MotionDetectorV3Traced
import config
import datetime as dts
import numpy as np
from system.camera_support import CameraConnectionSupport
import cv2 as cv
import imutils
class MotionDetectionTester(CameraConnectionSupport):
def __init__(self, camConnectionString, logger):
CameraConnectionSupport.__init__(self, camConnectionString, logger)
# initializing motion detector
self.detector = MotionDetectorV3Traced()
self.detector.resizeBeforeDetect = False
self.detector.multiFrameDetection = False
self.inMotionDetectedState = False
self.__camConnectionDts = None
self.__canDetectMotion = False
def canDetectMotion(self):
if self.__canDetectMotion:
return True
if self.__camConnectionDts is None:
return False
minDts = self.__camConnectionDts + dts.timedelta(seconds=config.INITIAL_WAIT_INTERVAL_BEFORE_MOTION_DETECTION_SECS)
if minDts > self.utcNow():
return False
self.__canDetectMotion = True
return True
def loop(self): # noqa
"""
Main loop for motion detection tester
:return:
"""
self.logger.info("main loop started")
emptyFrame = None
while True:
# initializing connection to camera
if self.cap is None:
if not self._initCamera():
continue
self.__camConnectionDts = self.utcNow()
# reading frames from camera
ret, current_frame = self.cap.read()
# if can't read current frame - going to the next loop
if (ret == False) or (current_frame is None): # the connection broke, or the stream came to an end
continue
current_frame = imutils.resize(current_frame, width=500, height=500)
instant = time.time() # get timestamp of the frame
############################################################
# calculating width and height of current video stream #
############################################################
frameWidth = np.size(current_frame, 0)
frameHeight = np.size(current_frame, 1)
if emptyFrame is None:
emptyFrame = np.zeros((frameHeight, frameWidth, 3), np.uint8)
resolutionChanged = False
if None in [self.frameWidth, self.frameHeight]:
self.frameWidth = frameWidth
self.frameHeight = frameHeight
self.nb_pixels = self.frameWidth * self.frameHeight
self.logger.info("self.width = {}".format(self.frameWidth))
self.logger.info("self.height = {}".format(self.frameHeight))
resolutionChanged = True
else:
resolutionChanged = ((self.frameWidth != frameWidth) or (self.frameHeight != frameHeight))
if resolutionChanged:
self.onFrameSizeUpdate(frameWidth, frameHeight)
########################
# detecting motion #
########################
motionDetected = False
# detection motion if can do it now
if self.canDetectMotion():
if self.detector.motionDetected(current_frame):
self.trigger_time = instant # Update the trigger_time
if not self.inMotionDetectedState:
self.logger.info("something moved!")
motionDetected = True
self.inMotionDetectedState = True
now = self.utcNow()
# prolongating motion for minimal motion duration
if (not motionDetected) and (self.detector.motionDetectionDts is not None):
minDuration = self.detector.motionDetectionDts + dts.timedelta(seconds=config.MINIMAL_MOTION_DURATION)
if minDuration > now:
motionDetected = True
# clearing motion detection flag when needed
if not motionDetected:
self.inMotionDetectedState = False
# calculating left seconds for motion (for further use in label)
dx = 0
if motionDetected:
dx = now - self.detector.motionDetectionDts
dx = config.MINIMAL_MOTION_DURATION - dx.seconds
# adding label for frame with detected motion
if motionDetected:
text = "MOTION DETECTED [{}]".format(dx)
cv.putText(
current_frame,
text,
(10, 20),
cv.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 0, 255), # b g r
2
)
# show current frame
cv.imshow("frame", current_frame)
# reading key and breaking loop when Esc or "q" key pressed
key = cv.waitKey(1)
if (key & 0xFF == ord("q")) or (key == 27):
break
if self.cap is not None:
self.cap.release()
cv.destroyAllWindows()
self.logger.info("main loop finished")
def main():
logger = init_logger()
logger.info("app started")
processor = MotionDetectionTester(config.cam, logger)
processor.loop()
logger.info("app finished")
if __name__ == "__main__":
main()
| {
"content_hash": "06e51dd9a26eb91e93280ac7203c6a6e",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 123,
"avg_line_length": 33.48795180722892,
"alnum_prop": 0.5509983810037776,
"repo_name": "JFF-Bohdan/pynvr",
"id": "e5031a901d8ecc9270c279cd08bb069988c75538",
"size": "5559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "motion_detection_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "325"
},
{
"name": "Python",
"bytes": "53105"
}
],
"symlink_target": ""
} |
"""
[path]
cd /Users/brunoflaven/Documents/01_work/blog_articles/extending_streamlit_usage/simple_app_spacy_example/
[file]
streamlit run simple_app_spacy_example_11.py
# more on infos and apps on
https://streamlit.io/
https://streamlit.io/gallery
https://docs.streamlit.io/en/stable/
Source: https://github.com/Jcharis/
Source: https://github.com/explosion/spacy-streamlit
python -m spacy download en_core_web_lg
"""
import spacy
from spacy_streamlit import visualize_similarity
nlp = spacy.load("en_core_web_lg")
visualize_similarity(nlp, ("pizza", "fries"))
| {
"content_hash": "0a0075159cae084af970657e5edef48d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 105,
"avg_line_length": 19.548387096774192,
"alnum_prop": 0.7112211221122112,
"repo_name": "bflaven/BlogArticlesExamples",
"id": "e07f73afc02a33e9a44a19c16bafb496c6ff1108",
"size": "650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extending_streamlit_usage/008_simple_app_spacy_example/simple_app_spacy_example_11.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "2756"
},
{
"name": "CSS",
"bytes": "3497"
},
{
"name": "CoffeeScript",
"bytes": "1785"
},
{
"name": "Dockerfile",
"bytes": "993"
},
{
"name": "HTML",
"bytes": "23687927"
},
{
"name": "JavaScript",
"bytes": "12838"
},
{
"name": "Jupyter Notebook",
"bytes": "2918640"
},
{
"name": "Makefile",
"bytes": "4058"
},
{
"name": "PHP",
"bytes": "223161"
},
{
"name": "Python",
"bytes": "1461699"
},
{
"name": "Shell",
"bytes": "12291"
}
],
"symlink_target": ""
} |
"""
WSGI config for restaurant project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "restaurant.settings")
application = get_wsgi_application()
| {
"content_hash": "7e54e62a190189f1876ef32485723420",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.8125,
"alnum_prop": 0.7732997481108312,
"repo_name": "PI-Technology/django-main-app",
"id": "1dbb67b9ca28b9808fd85197e18ae3df306add03",
"size": "397",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "restaurant/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "78853"
},
{
"name": "HTML",
"bytes": "62553"
},
{
"name": "JavaScript",
"bytes": "19267"
},
{
"name": "Python",
"bytes": "24890"
}
],
"symlink_target": ""
} |
import os
import tensorflow as tf
import tempfile
from datetime import datetime, timedelta
import numpy as np
import argparse
import logging
import shutil
import subprocess
import apache_beam as beam
import random
import sys
from . import train_autoencoder as trainer
class Predict(beam.DoFn):
def __init__(self, model_dir):
self.model_dir = model_dir
self.embedder = None
def process(self, x):
# Create a model for every worker only once
# The Model is not pickleable, so it can not be created
# in the constructor
if not self.embedder:
# create embedder
print('Loading model into worker TensorFlow version = ', tf.__version__)
model = tf.keras.models.load_model(self.model_dir)
embed_output = model.get_layer('refc_embedding').output
self.embedder = tf.keras.Model(model.input, embed_output, name='embedder')
# embed
result = x.copy()
refc = tf.expand_dims(tf.expand_dims(x['ref'], 0), -1) # [h,w] to [1, h, w, 1]
emb = self.embedder.predict(refc)
result['ref'] = tf.squeeze(emb, axis=0)
yield result
def convert_types(x):
result = {}
print(x)
for key in ['size', 'ref']:
result[key] = x[key].numpy().tolist()
for key in ['time', 'valid_time']:
# 'b'2019-09-18T11:00:00'' to 2019-09-18 11:00:00
result[key] = str(x[key].numpy()).replace('T', ' ').replace('b','').replace("'", '')
return result
def run_job(options):
# create objects we need
schema = {'fields': [
{'name': 'size', 'type': 'INTEGER', 'mode': 'REPEATED'},
{'name': 'ref', 'type': 'FLOAT', 'mode': 'REPEATED'},
{'name': 'time', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'},
{'name': 'valid_time', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'},
]}
# start the pipeline
opts = beam.pipeline.PipelineOptions(flags=[], **options)
with beam.Pipeline(options['runner'], options=opts) as p:
# read examples
(
p
| 'read_tfr' >> beam.io.tfrecordio.ReadFromTFRecord(os.path.join(options['input']))
| 'parse_tfr' >> beam.Map(trainer.parse_tfrecord)
| 'compute_embed' >> beam.ParDo(Predict(options['savedmodel']))
| 'convert_types' >> beam.Map(convert_types)
| 'write_bq' >> beam.io.gcp.bigquery.WriteToBigQuery(
table=options['output_table'], schema=schema,
write_disposition=beam.io.gcp.bigquery.BigQueryDisposition.WRITE_TRUNCATE)
)
def main(args):
parser = argparse.ArgumentParser(
description='Create embeddings of TF records')
parser.add_argument(
'--project',
default='',
help='Specify GCP project to bill to run on cloud')
parser.add_argument(
'--output_table', required=True, help='PROJECT:dataset.table')
parser.add_argument(
'--savedmodel', required=True, help='location of saved autoencoder model')
parser.add_argument(
'--input', required=True, help='TF Record pattern')
parser.add_argument(
'--outdir', required=True, help='For staging etc.')
# parse command-line args and add a few more
logging.basicConfig(level=getattr(logging, 'INFO', None))
options = parser.parse_args().__dict__
outdir = options['outdir']
options.update({
'staging_location':
os.path.join(outdir, 'tmp', 'staging'),
'temp_location':
os.path.join(outdir, 'tmp'),
'job_name':
'wxsearch-' + datetime.now().strftime('%y%m%d-%H%M%S'),
'teardown_policy':
'TEARDOWN_ALWAYS',
'max_num_workers':
5,
'machine_type':
'n1-standard-2',
'region':
'us-central1',
'setup_file':
os.path.join(os.path.dirname(os.path.abspath(__file__)), '../setup.py'),
'save_main_session':
False,
})
if not options['project']:
print('Launching local job ... hang on')
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir)
options['runner'] = 'DirectRunner'
else:
print('Launching Dataflow job {} ... hang on'.format(options['job_name']))
try:
subprocess.check_call('gsutil -m rm -r {}'.format(outdir).split())
except: # pylint: disable=bare-except
pass
options['runner'] = 'DataflowRunner'
print('Local TensorFlow version = ', tf.__version__)
run_job(options)
| {
"content_hash": "433ad6a08f530c58e5b8e68f3e6e0906",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 93,
"avg_line_length": 35.06201550387597,
"alnum_prop": 0.5878841476895865,
"repo_name": "GoogleCloudPlatform/ml-design-patterns",
"id": "2e1afd33ffdac99fb5d35d5ab8ebe7a7f6581c29",
"size": "4523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "02_data_representation/weather_search/wxsearch/compute_embedding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "96"
},
{
"name": "Jupyter Notebook",
"bytes": "21558412"
},
{
"name": "Python",
"bytes": "50168"
},
{
"name": "Shell",
"bytes": "2356"
}
],
"symlink_target": ""
} |
"""
Here we try to safely use crispy_form if installed
"""
from django.utils.translation import ugettext as _
from bazar.utils.imports import safe_import_module
# Try to import "crispy-forms" base stuff to use for the default helper
try:
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
except ImportError:
# Dummy objects when crispy-forms is not available
def default_helper():
return None
class CrispyFormMixin(object): pass
else:
# Ok, crispy-forms is installed
def default_helper(form_tag=True):
helper = FormHelper()
helper.form_action = '.'
helper.form_tag = form_tag
helper.add_input(Submit('submit', _('Submit')))
return helper
class CrispyFormMixin(object):
"""
Embed the technic in a form mixin to use "crispy-forms" and safely fallback if not installed
Mixin attributes that you can define to change behavior :
* crispy_form_helper_path: Python path to the helper;
* crispy_form_helper_kwargs: Kwargs dict to give to the helper when initialized;
* crispy_form_tag: A boolean, add <form> tag if True;
"""
crispy_form_helper_path = None # Custom layout method path
crispy_form_helper_kwargs = {}
crispy_form_tag = True
def __init__(self, *args, **kwargs):
# Specified helper if any (and import succeed)
helper = safe_import_module(self.crispy_form_helper_path, default=default_helper)
if helper is not None:
self.helper = helper(form_tag=self.crispy_form_tag, **self.crispy_form_helper_kwargs)
else:
# Default helper
self.helper = default_helper(form_tag=self.crispy_form_tag)
| {
"content_hash": "cc629081bd7bed5c6beff4df6aae9587",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 101,
"avg_line_length": 38.59574468085106,
"alnum_prop": 0.6411245865490628,
"repo_name": "emencia/emencia-django-bazar",
"id": "abf25030bb93ffb4fcf64033e3adfa6fa2c15d87",
"size": "1814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bazar/forms/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "340534"
},
{
"name": "HTML",
"bytes": "12135"
},
{
"name": "JavaScript",
"bytes": "11842"
},
{
"name": "Makefile",
"bytes": "995"
},
{
"name": "Python",
"bytes": "83916"
},
{
"name": "Ruby",
"bytes": "981"
}
],
"symlink_target": ""
} |
import codecs
import os
import re
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
# Read the version number from a source file.
# Why read it, and not import?
# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
def find_version(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
try:
f = codecs.open(os.path.join(here, *file_paths), 'r', 'latin1')
version_file = f.read()
f.close()
except:
raise RuntimeError("Unable to find version string.")
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the long description from the relevant file
try:
f = codecs.open('README.rst', encoding='utf-8')
long_description = f.read()
f.close()
except:
long_description = ''
try:
f = codecs.open('requirements.txt', encoding='utf-8')
requirements = f.read().splitlines()
f.close()
except:
requirements = []
setup(
name='ansible_template_ui',
version=find_version('ansible_template_ui/__init__.py'),
description='Web UI for testing ansible templates',
long_description=long_description,
keywords='ansible jinja jinja2 template ansible-template-ui',
author='Matt Martz',
author_email='matt@sivel.net',
url='https://github.com/sivel/ansible-template-ui',
license='Apache License, Version 2.0',
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=requirements,
package_data={
'': [
'client/*',
]
},
)
| {
"content_hash": "836e50c05c0f7a4598ba611140b3b4b8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 71,
"avg_line_length": 28.53846153846154,
"alnum_prop": 0.6339622641509434,
"repo_name": "sivel/ansible-template-ui",
"id": "f1c72e14b868b545ae5a418a35adfc30f245e0aa",
"size": "2532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "913"
},
{
"name": "HTML",
"bytes": "4154"
},
{
"name": "JavaScript",
"bytes": "1473"
},
{
"name": "Python",
"bytes": "6909"
},
{
"name": "Shell",
"bytes": "767"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name = "uri",
version = "1.0",
description = "A library for URI handling featuring an implementation of URI-Templates",
author = 'Jacob Kaplan-Moss',
author_email = 'jacob@jaobian.org',
py_modules = ['uri'],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
]
) | {
"content_hash": "ba73726719ac0ba5a6a7fbf6d028b862",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 92,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6032608695652174,
"repo_name": "jacobian-archive/uri",
"id": "f840b41d64166c57a7d4d24c4469940c2b809b65",
"size": "552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10084"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, request
import search_engine
m = {"crossroads": 1, "cafe3": 3, "ckc": 4, "foothill": 6}
libraries = ['anthropology library',
'art history/classics library',
'bancroft library/university archives',
'berkeley law library',
'bioscience & natural resources library',
'business library',
'career counseling library',
'ced visual resources center',
'chemistry library',
'copy center',
'data lab',
'doe library',
'earth sciences & map library',
'east asian library',
'education psychology library',
'engineering library',
'environmental design archives',
'environmental design library',
'ethnic studies library',
'graduate services',
'graduate theological union library',
'institute for research on labor and employment library',
'institute of governmental studies library',
'institute of transportation studies library',
'interlibrary services',
'lawrence berkeley national laboratory library',
'main stacks (gardner)',
'mark twain papers & project',
'mathematics statistics library',
'media resources center',
'moffitt library',
'morrison library',
'music library',
'newspapers & microforms library',
'northern regional library facility',
'optometry and health sciences library',
'pacific earthquake research (peer) center library',
'pacific film archive library & film study center',
'physics-astronomy library',
'privileges desk',
'public health library',
'robbins collection',
'social research library',
'south/southeast asia library']
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/', methods=['POST'])
def search_post():
text = request.form['text'].lower()
# search DH menus
if text in m:
#to-do: can only search non-duplicates
menu = search_engine.get_menu_items(m[text])
return render_template('home_menu.html', items = menu, name=text)
# search library hours
for l in libraries:
if text in l:
hour = search_engine.get_library_hour(text)
return render_template('home_lib.html', text=hour)
# search local current weather
if text == "weather":
weather = search_engine.get_weather()
return render_template('home_weather.html', list = weather)
return render_template('home_default.html')
if __name__ == '__main__':
app.run(debug = True)
#to-do: pre-load the information before search to save time
#to-do: push to live
#to-do: use API Central to create personalized information search
| {
"content_hash": "f4e31691832f52caddbdcb823d9ba23c",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 73,
"avg_line_length": 30.25,
"alnum_prop": 0.7032664305391578,
"repo_name": "yishuangl/cal",
"id": "0656fcef841c3759264d80df7ed501f5b831a1f4",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "505"
},
{
"name": "HTML",
"bytes": "3214"
},
{
"name": "Python",
"bytes": "5480"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="dtick", parent_name="choropleth.colorbar", **kwargs
):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs,
)
| {
"content_hash": "c58a088220bb9b7c9f58131953a10c43",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 35.57142857142857,
"alnum_prop": 0.6044176706827309,
"repo_name": "plotly/plotly.py",
"id": "a1ad58a5e86670c7805d436d9ad6fafaea6dd64d",
"size": "498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choropleth/colorbar/_dtick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Utilities for dynamically importing python files."""
import importlib
import pkgutil
def LoadModulesForPath(path, package_prefix=None):
"""Load all modules on 'path', with prefix 'package_prefix'.
Example usage:
LoadModulesForPath(__path__, __name__)
Args:
path: Path containing python modules.
package_prefix: prefix (e.g., package name) to prefix all modules.
'path' and 'package_prefix' will be joined with a '.'.
Yields:
Imported modules.
"""
prefix = package_prefix + '.' if package_prefix else ''
# If iter_modules is invoked within a zip file, the zipimporter adds the
# prefix to the names of archived modules, but not archived packages. Because
# the prefix is necessary to correctly import a package, this behavior is
# undesirable, so do not pass the prefix to iter_modules. Instead, apply it
# explicitly afterward.
for _, modname, _ in pkgutil.iter_modules(path):
# Skip recursively listed modules (e.g. 'subpackage.module').
if '.' not in modname:
yield importlib.import_module(prefix + modname)
| {
"content_hash": "2ec2f3cc220c97849ff95282f98cbd8e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 37.172413793103445,
"alnum_prop": 0.7077922077922078,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "197462a1e0421bbff159fcf6bbc9d1c3d4584982",
"size": "1689",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/import_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import warnings
from typing import Sequence
from airflow.providers.common.sql.operators.sql import SQLExecuteQueryOperator
class ExasolOperator(SQLExecuteQueryOperator):
"""
Executes sql code in a specific Exasol database
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
template references are recognized by str ending in '.sql'
:param exasol_conn_id: reference to a specific Exasol database
:param autocommit: if True, each command is automatically committed.
(default value: False)
:param parameters: (optional) the parameters to render the SQL query with.
:param schema: (optional) name of the schema which overwrite defined one in connection
"""
template_fields: Sequence[str] = ("sql",)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
ui_color = "#ededed"
def __init__(
self, *, exasol_conn_id: str = "exasol_default", schema: str | None = None, **kwargs
) -> None:
if schema is not None:
hook_params = kwargs.pop("hook_params", {})
kwargs["hook_params"] = {"schema": schema, **hook_params}
super().__init__(conn_id=exasol_conn_id, **kwargs)
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator`.""",
DeprecationWarning,
stacklevel=2,
)
| {
"content_hash": "0f1fba03a1962ef6e535c037c2603f5d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 96,
"avg_line_length": 38.146341463414636,
"alnum_prop": 0.6540920716112532,
"repo_name": "apache/airflow",
"id": "253e443b8ee6c0b873e310010b6cff39d61b3103",
"size": "2351",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "airflow/providers/exasol/operators/exasol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
import argparse
import collections
import operator
import os
import sys
import time
from typing import Dict, Generator, Tuple, Optional
import mxnet as mx
import numpy as np
from . import arguments
from . import constants as C
from . import vocab
from .data_io import smart_open, get_tokens, tokens2ids
from .log import setup_main_logger, log_sockeye_version
from .utils import check_condition
logger = setup_main_logger(__name__, console=True, file_logging=False)
class Lexicon:
"""
Lexicon model component. Stores lexicon and supports two operations:
(1) Given source batch, lookup translation distributions in the lexicon
(2) Given attention score vector and lexicon lookups, compute the lexical bias for the decoder
:param source_vocab_size: Source vocabulary size.
:param target_vocab_size: Target vocabulary size.
:param learn: Whether to adapt lexical biases during training.
"""
def __init__(self, source_vocab_size: int, target_vocab_size: int, learn: bool = False) -> None:
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
# TODO: once half-precision works, use float16 for this variable to save memory
self.lexicon = mx.sym.Variable(name=C.LEXICON_NAME,
shape=(self.source_vocab_size,
self.target_vocab_size))
if not learn:
logger.info("Fixed lexicon bias terms")
self.lexicon = mx.sym.BlockGrad(self.lexicon)
else:
logger.info("Learning lexicon bias terms")
def lookup(self, source: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Lookup lexicon distributions for source.
:param source: Input. Shape: (batch_size, source_seq_len).
:return: Lexicon distributions for input. Shape: (batch_size, target_vocab_size, source_seq_len).
"""
return mx.sym.swapaxes(data=mx.sym.Embedding(data=source,
input_dim=self.source_vocab_size,
weight=self.lexicon,
output_dim=self.target_vocab_size,
name=C.LEXICON_NAME + "_lookup"), dim1=1, dim2=2)
@staticmethod
def calculate_lex_bias(source_lexicon: mx.sym.Symbol, attention_prob_score: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Given attention/alignment scores, calculates a weighted sum over lexical distributions
that serve as a bias for the decoder softmax.
* https://arxiv.org/pdf/1606.02006.pdf
* http://www.aclweb.org/anthology/W/W16/W16-4610.pdf
:param source_lexicon: Lexical biases for sentence Shape: (batch_size, target_vocab_size, source_seq_len).
:param attention_prob_score: Attention score. Shape: (batch_size, source_seq_len).
:return: Lexical bias. Shape: (batch_size, 1, target_vocab_size).
"""
# attention_prob_score: (batch_size, source_seq_len) -> (batch_size, source_seq_len, 1)
attention_prob_score = mx.sym.expand_dims(attention_prob_score, axis=2)
# lex_bias: (batch_size, target_vocab_size, 1)
lex_bias = mx.sym.batch_dot(source_lexicon, attention_prob_score)
# lex_bias: (batch_size, 1, target_vocab_size)
lex_bias = mx.sym.swapaxes(data=lex_bias, dim1=1, dim2=2)
return lex_bias
def initialize_lexicon(cmdline_arg: str, vocab_source: Dict[str, int], vocab_target: Dict[str, int]) -> mx.nd.NDArray:
"""
Reads a probabilistic word lexicon as given by the commandline argument and converts
to log probabilities.
If specified, smooths with custom value, uses 0.001 otherwise.
:param cmdline_arg: Commandline argument.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:return: Lexicon array. Shape: (vocab_source_size, vocab_target_size).
"""
fields = cmdline_arg.split(":", 1)
path = fields[0]
lexicon = read_lexicon(path, vocab_source, vocab_target)
assert lexicon.shape == (len(vocab_source), len(vocab_target)), "Invalid lexicon shape"
eps = 0.001
if len(fields) == 2:
eps = float(fields[1])
check_condition(eps > 0, "epsilon must be >0")
logger.info("Smoothing lexicon with eps=%.4f", eps)
lexicon = mx.nd.array(np.log(lexicon + eps))
return lexicon
def lexicon_iterator(path: str,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int]) -> Generator[Tuple[int, int, float], None, None]:
"""
Yields lines from a translation table of format: src, trg, logprob.
:param path: Path to lexicon file.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:return: Generator returning tuples (src_id, trg_id, prob).
"""
assert C.UNK_SYMBOL in vocab_source
assert C.UNK_SYMBOL in vocab_target
src_unk_id = vocab_source[C.UNK_SYMBOL]
trg_unk_id = vocab_target[C.UNK_SYMBOL]
with smart_open(path) as fin:
for line in fin:
src, trg, logprob = line.rstrip("\n").split("\t")
prob = np.exp(float(logprob))
src_id = vocab_source.get(src, src_unk_id)
trg_id = vocab_target.get(trg, trg_unk_id)
yield src_id, trg_id, prob
def read_lexicon(path: str, vocab_source: Dict[str, int], vocab_target: Dict[str, int]) -> np.ndarray:
"""
Loads lexical translation probabilities from a translation table of format: src, trg, logprob.
Source words unknown to vocab_source are discarded.
Target words unknown to vocab_target contribute to p(unk|source_word).
See Incorporating Discrete Translation Lexicons into Neural Machine Translation, Section 3.1 & Equation 5
(https://arxiv.org/pdf/1606.02006.pdf))
:param path: Path to lexicon file.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:return: Lexicon array. Shape: (vocab_source_size, vocab_target_size).
"""
src_unk_id = vocab_source[C.UNK_SYMBOL]
trg_unk_id = vocab_target[C.UNK_SYMBOL]
lexicon = np.zeros((len(vocab_source), len(vocab_target)))
n = 0
for src_id, trg_id, prob in lexicon_iterator(path, vocab_source, vocab_target):
if src_id == src_unk_id:
continue
if trg_id == trg_unk_id:
lexicon[src_id, trg_unk_id] += prob
else:
lexicon[src_id, trg_id] = prob
n += 1
logger.info("Loaded lexicon from '%s' with %d entries", path, n)
return lexicon
class LexiconInitializer(mx.initializer.Initializer):
"""
Given a lexicon NDArray, initialize the variable named C.LEXICON_NAME with it.
:param lexicon: Lexicon array.
"""
def __init__(self, lexicon: mx.nd.NDArray) -> None:
super().__init__()
self.lexicon = lexicon
def _init_default(self, sym_name, arr):
assert sym_name == C.LEXICON_NAME, "This initializer should only be used for a lexicon parameter variable"
logger.info("Initializing '%s' with lexicon.", sym_name)
assert len(arr.shape) == 2, "Only 2d weight matrices supported."
self.lexicon.copyto(arr)
class TopKLexicon:
"""
Lexicon component that stores the k most likely target words for each source word. Used during
decoding to restrict target vocabulary for each source sequence.
:param vocab_source: Trained model source vocabulary.
:param vocab_target: Trained mode target vocabulary.
"""
def __init__(self,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int]) -> None:
self.vocab_source = vocab_source
self.vocab_target = vocab_target
# Shape: (vocab_source_size, k), k determined at create() or load()
self.lex = None # type: np.ndarray
# Always allow special vocab symbols in target vocab
self.always_allow = np.array([vocab_target[symbol] for symbol in C.VOCAB_SYMBOLS], dtype=np.int)
def create(self, path: str, k: int = 20):
"""
Create from a scored lexicon file (fast_align format) using vocab from a trained Sockeye model.
:param path: Path to lexicon file.
:param k: Number of target entries per source to keep.
"""
self.lex = np.zeros((len(self.vocab_source), k), dtype=np.int)
# Read lexicon
src_unk_id = self.vocab_source[C.UNK_SYMBOL]
trg_unk_id = self.vocab_target[C.UNK_SYMBOL]
_lex = collections.defaultdict(dict) # type: Dict[int, Dict[int, float]]
for src_id, trg_id, prob in lexicon_iterator(path, self.vocab_source, self.vocab_target):
# Unk token will always be part of target vocab, so no need to track it here
if src_id == src_unk_id or trg_id == trg_unk_id:
continue
_lex[src_id][trg_id] = prob
# Sort and copy top-k trg_ids to lex array row src_id
for src_id, trg_entries in _lex.items():
top_k = list(sorted(trg_entries.items(), key=operator.itemgetter(1), reverse=True))[:k]
self.lex[src_id, :len(top_k)] = list(trg_id for trg_id, _ in top_k)
# Free memory after copy
trg_entries.clear()
logger.info("Created top-k lexicon from \"%s\", k=%d.", path, k)
def save(self, path: str):
"""
Save lexicon in Numpy array format. Lexicon will be specific to Sockeye model.
:param path: Path to Numpy array output file.
"""
with open(path, 'wb') as out:
np.save(out, self.lex)
logger.info("Saved top-k lexicon to \"%s\"", path)
def load(self, path: str, k: Optional[int] = None):
"""
Load lexicon from Numpy array file. The top-k target ids will be sorted by increasing target id.
:param path: Path to Numpy array file.
:param k: Optionally load less items than stored in path.
"""
load_time_start = time.time()
with open(path, 'rb') as inp:
_lex = np.load(inp)
loaded_k = _lex.shape[1]
if k is not None:
top_k = min(k, loaded_k)
if k > loaded_k:
logger.warning("Can not load top-%d translations from lexicon that "
"contains at most %d entries per source.", k, loaded_k)
else:
top_k = loaded_k
self.lex = np.zeros((len(self.vocab_source), top_k), dtype=_lex.dtype)
for src_id, trg_ids in enumerate(_lex):
self.lex[src_id, :] = np.sort(trg_ids[:top_k])
load_time = time.time() - load_time_start
logger.info("Loaded top-%d lexicon from \"%s\" in %.4fs.", top_k, path, load_time)
def get_trg_ids(self, src_ids: np.ndarray) -> np.ndarray:
"""
Lookup possible target ids for input sequence of source ids.
:param src_ids: Sequence(s) of source ids (any shape).
:return: Possible target ids for source (unique sorted, always includes special symbols).
"""
# TODO: When MXNet adds support for set operations, we can migrate to avoid conversions to/from NumPy.
unique_src_ids = np.lib.arraysetops.unique(src_ids)
trg_ids = np.lib.arraysetops.union1d(self.always_allow, self.lex[unique_src_ids, :].reshape(-1))
return trg_ids
def create(args):
global logger
logger = setup_main_logger('create', console=not args.quiet, file_logging=True, path=args.output + ".log")
log_sockeye_version(logger)
logger.info("Creating top-k lexicon from \"%s\"", args.input)
logger.info("Reading source and target vocab from \"%s\"", args.model)
vocab_source = vocab.load_source_vocabs(args.model)[0]
vocab_target = vocab.load_target_vocab(args.model)
logger.info("Building top-%d lexicon", args.k)
lexicon = TopKLexicon(vocab_source, vocab_target)
lexicon.create(args.input, args.k)
lexicon.save(args.output)
def inspect(args):
global logger
logger = setup_main_logger('inspect', console=True, file_logging=False)
log_sockeye_version(logger)
logger.info("Inspecting top-k lexicon at \"%s\"", args.lexicon)
vocab_source = vocab.load_source_vocabs(args.model)[0]
vocab_target = vocab.vocab_from_json(os.path.join(args.model, C.VOCAB_TRG_NAME))
vocab_target_inv = vocab.reverse_vocab(vocab_target)
lexicon = TopKLexicon(vocab_source, vocab_target)
lexicon.load(args.lexicon, args.k)
logger.info("Reading from STDIN...")
for line in sys.stdin:
tokens = list(get_tokens(line))
if not tokens:
continue
ids = tokens2ids(tokens, vocab_source)
print("Input: n=%d" % len(tokens), " ".join("%s(%d)" % (tok, i) for tok, i in zip(tokens, ids)))
trg_ids = lexicon.get_trg_ids(np.array(ids))
tokens_trg = [vocab_target_inv.get(trg_id, C.UNK_SYMBOL) for trg_id in trg_ids]
print("Output: n=%d" % len(tokens_trg), " ".join("%s(%d)" % (tok, i) for tok, i in zip(tokens_trg, trg_ids)))
print()
def main():
"""
Commandline interface for building/inspecting top-k lexicons using during decoding.
"""
params = argparse.ArgumentParser(description="Create or inspect a top-k lexicon for use during decoding.")
subparams = params.add_subparsers(title="Commands")
params_create = subparams.add_parser('create', description="Create top-k lexicon for use during decoding. See contrib/fast_align/README.md for information on creating input lexical tables.")
arguments.add_lexicon_args(params_create)
arguments.add_lexicon_create_args(params_create)
arguments.add_logging_args(params_create)
params_create.set_defaults(func=create)
params_inspect = subparams.add_parser('inspect', description="Inspect top-k lexicon for use during decoding.")
arguments.add_lexicon_inspect_args(params_inspect)
arguments.add_lexicon_args(params_inspect)
params_inspect.set_defaults(func=inspect)
args = params.parse_args()
if 'func' not in args:
params.print_help()
return 1
else:
args.func(args)
if __name__ == "__main__":
main()
| {
"content_hash": "ed677ff9848c4469d8afe59f2fb4ae6d",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 194,
"avg_line_length": 43.41337386018237,
"alnum_prop": 0.6328502415458938,
"repo_name": "mlperf/training_results_v0.6",
"id": "e18071658ab553ae44ff8f217cab7ac14941611e",
"size": "14849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/sockeye/sockeye/lexicon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
import asyncio
import json
from typing import Any
from collections import OrderedDict
from plenum.common.constants import NONCE, TYPE, IDENTIFIER, DATA
from plenum.common.types import f
from plenum.common.util import getCryptonym
from anoncreds.protocol.prover import Prover
from anoncreds.protocol.types import SchemaKey, ID, Claims, ClaimAttributeValues, ProofRequest
from sovrin_client.agent.msg_constants import CLAIM_REQUEST, PROOF, CLAIM_FIELD, \
CLAIM_REQ_FIELD, PROOF_FIELD, \
REQ_AVAIL_CLAIMS, ISSUER_DID, SCHEMA_SEQ_NO, PROOF_REQUEST_FIELD
from sovrin_client.client.wallet.connection import Connection
from sovrin_common.exceptions import LinkNotReady
class AgentProver:
def __init__(self, prover: Prover):
self.prover = prover
def sendRequestForAvailClaims(self, link: Connection):
if self.loop.is_running():
self.loop.call_soon(asyncio.ensure_future,
self.sendRequestForAvailClaimsAsync(link))
else:
self.loop.run_until_complete(
self.sendRequestForAvailClaimsAsync(link))
async def sendRequestForAvailClaimsAsync(self, link: Connection):
op = {
TYPE: REQ_AVAIL_CLAIMS,
NONCE: link.request_nonce
}
try:
self.signAndSendToLink(msg=op, linkName=link.name)
except LinkNotReady as ex:
self.notifyMsgListener(str(ex))
def sendReqClaim(self, link: Connection, schemaKey):
if self.loop.is_running():
self.loop.call_soon(asyncio.ensure_future,
self.send_claim(link, schemaKey))
else:
self.loop.run_until_complete(
self.send_claim(link, schemaKey))
# async def send_claim(self, link, claim_to_request):
# return await self.sendReqClaimAsync(link, claim_to_request)
async def send_claim(self, link: Connection, schema_key):
name, version, origin = schema_key
schema_key = SchemaKey(name, version, origin)
claimReq = await self.prover.createClaimRequest(
schemaId=ID(schema_key),
proverId=link.request_nonce,
reqNonRevoc=False)
# It has served its purpose by this point. Claim Requests do not need a
# nonce.
schema = await self.prover.wallet.getSchema(ID(schema_key))
claimRequestDetails = {
SCHEMA_SEQ_NO: schema.seqId,
ISSUER_DID: origin,
CLAIM_REQ_FIELD: claimReq.to_str_dict()
}
op = {
TYPE: CLAIM_REQUEST,
NONCE: link.request_nonce,
DATA: claimRequestDetails
}
self.signAndSendToLink(msg=op, linkName=link.name)
def handleProofRequest(self, msg):
body, _ = msg
link = self._getLinkByTarget(getCryptonym(body.get(IDENTIFIER)))
proofRequest = body.get(PROOF_REQUEST_FIELD)
proofRequest = ProofRequest.from_str_dict(proofRequest)
proofReqExist = False
for request in link.proofRequests:
if request.name == proofRequest.name:
proofReqExist = True
break
self.notifyMsgListener(' Proof request {} received from {}.\n'
.format(proofRequest.name, link.name))
if not proofReqExist:
link.proofRequests.append(proofRequest)
else:
self.notifyMsgListener(' Proof request {} already exist.\n'
.format(proofRequest.name))
async def handleReqClaimResponse(self, msg):
body, _ = msg
issuerId = body.get(IDENTIFIER)
claim = body[DATA]
li = self._getLinkByTarget(getCryptonym(issuerId))
if li:
schemaId = ID(schemaId=claim[SCHEMA_SEQ_NO])
schema = await self.prover.wallet.getSchema(schemaId)
self.notifyResponseFromMsg(li.name, body.get(f.REQ_ID.nm))
self.notifyMsgListener(
' Received claim "{}".\n'.format(schema.name))
pk = await self.prover.wallet.getPublicKey(schemaId)
claim_attributes = {k: ClaimAttributeValues.from_str_dict(
v) for k, v in json.loads(claim[CLAIM_FIELD]).items()}
claim_signature = Claims.from_str_dict(claim[f.SIG.nm], pk.N)
await self.prover.processClaim(schemaId, claim_attributes, claim_signature)
else:
self.notifyMsgListener("No matching connection found")
def sendProof(self, link: Connection, proofReq: ProofRequest):
if self.loop.is_running():
self.loop.call_soon(asyncio.ensure_future,
self.sendProofAsync(link, proofReq))
else:
self.loop.run_until_complete(self.sendProofAsync(link, proofReq))
async def sendProofAsync(self, link: Connection, proofRequest: ProofRequest):
# TODO _F_ this nonce should be from the Proof Request, not from an
# invitation
# TODO rename presentProof to buildProof or generateProof
proof = await self.prover.presentProof(proofRequest)
proof.requestedProof.self_attested_attrs.update(
proofRequest.selfAttestedAttrs)
op = {
TYPE: PROOF,
NONCE: link.request_nonce,
PROOF_FIELD: proof.to_str_dict(),
PROOF_REQUEST_FIELD: proofRequest.to_str_dict()
}
self.signAndSendToLink(msg=op, linkName=link.name)
def handleProofStatusResponse(self, msg: Any):
body, _ = msg
data = body.get(DATA)
identifier = body.get(IDENTIFIER)
li = self._getLinkByTarget(getCryptonym(identifier))
self.notifyResponseFromMsg(li.name, body.get(f.REQ_ID.nm))
self.notifyMsgListener(data)
async def getMatchingConnectionsWithReceivedClaimAsync(self, claimName=None):
matchingLinkAndAvailableClaim = self.wallet.getMatchingConnectionsWithAvailableClaim(
claimName)
matchingLinkAndReceivedClaim = []
for li, cl in matchingLinkAndAvailableClaim:
name, version, origin = cl
schemaKeyId = ID(
SchemaKey(name=name, version=version, issuerId=origin))
schema = await self.prover.wallet.getSchema(schemaKeyId)
claimAttrs = OrderedDict()
for attr in schema.attrNames:
claimAttrs[attr] = None
attrs = None
try:
attrs = await self.prover.wallet.getClaimAttributes(schemaKeyId)
except ValueError:
pass # it means no claim was issued
if attrs:
if set(claimAttrs.keys()).intersection(attrs.keys()):
for k in claimAttrs.keys():
claimAttrs[k] = attrs[k].raw
matchingLinkAndReceivedClaim.append((li, cl, claimAttrs))
return matchingLinkAndReceivedClaim
async def getMatchingRcvdClaimsAsync(self, attributes):
linksAndReceivedClaim = await self.getMatchingConnectionsWithReceivedClaimAsync()
attributes = set(attributes)
matchingLinkAndRcvdClaim = []
for li, cl, issuedAttrs in linksAndReceivedClaim:
if attributes.intersection(issuedAttrs.keys()):
matchingLinkAndRcvdClaim.append((li, cl, issuedAttrs))
return matchingLinkAndRcvdClaim
async def getClaimsUsedForAttrs(self, attributes):
allMatchingClaims = await self.getMatchingConnectionsWithReceivedClaimAsync()
alreadySatisfiedKeys = {}
claimsToUse = []
alreadyAddedClaims = []
for li, cl, issuedAttrs in allMatchingClaims:
issuedClaimKeys = issuedAttrs.keys()
for key in attributes.keys():
if key not in alreadySatisfiedKeys and key in issuedClaimKeys:
if li not in alreadyAddedClaims:
claimsToUse.append((li, cl, issuedAttrs))
alreadySatisfiedKeys[key] = True
alreadyAddedClaims.append(li)
return claimsToUse
| {
"content_hash": "bafe5dc8e652443f20051ef029ee51a6",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 94,
"avg_line_length": 39.37378640776699,
"alnum_prop": 0.6272962643323882,
"repo_name": "keenondrums/sovrin-node",
"id": "12ccac52ea00f462ed8f25b9466dd1e106db3ae1",
"size": "8111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sovrin_client/agent/agent_prover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "1088655"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "15720"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from salt_observer.models import Minion
from . import ApiCommand
import json
class Command(ApiCommand, BaseCommand):
help = 'Fetch and save packagedata'
def save_packages(self, api):
packages = api.get('pkg.list_pkgs')
upgrades = api.get('pkg.list_upgrades')
for minion_fqdn, minion_packages in packages.items():
minion = Minion.objects.filter(fqdn=minion_fqdn).first()
minion_package_data = {}
for minion_package_name, minion_package_version in minion_packages.items():
if type(upgrades.get(minion_fqdn, {})) != dict:
del upgrades[minion_fqdn]
minion_package_data.update({
minion_package_name: {
'version': minion_package_version,
'latest_version': upgrades.get(minion_fqdn, {}).get(minion_package_name, '')
}
})
minion.update_data({'packages': minion_package_data})
minion.save()
def handle(self, *args, **kwargs):
api = super().handle(*args, **kwargs)
self.save_packages(api)
api.logout()
| {
"content_hash": "6d551990a6541a31ab71c3c33174edc8",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 100,
"avg_line_length": 32.39473684210526,
"alnum_prop": 0.5751421608448416,
"repo_name": "hs-hannover/salt-observer",
"id": "bdf99d6de022ec69f3337a2ae77622ef3aa99710",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt_observer/management/commands/fetchpackages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89802"
},
{
"name": "HTML",
"bytes": "59732"
},
{
"name": "JavaScript",
"bytes": "2187488"
},
{
"name": "Python",
"bytes": "52706"
}
],
"symlink_target": ""
} |
from tkinter import *
class App:
def __init__(self, master):
frame = Frame(master)
frame.pack(fill=BOTH, expand=1)
#Listbox
listbox = Listbox(frame)
for item in ['red', 'green', 'blue', 'yellow', 'pink']:
listbox.insert(END, item)
listbox.grid(row=0, column=0, sticky=W+E+N+S)
#Message
text = Text(frame, relief=SUNKEN)
text.grid(row=0, column=1, sticky=W+E+N+S)
text.insert(END, 'word ' * 100)
frame.columnconfigure(1, weight=1)
frame.rowconfigure(0, weight=1)
root = Tk()
app = App(root)
root.geometry("400x300+0+0")
root.mainloop()
| {
"content_hash": "022e79119e41b95196a9f2d63d372791",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 63,
"avg_line_length": 28.304347826086957,
"alnum_prop": 0.5745007680491552,
"repo_name": "simonmonk/prog_pi_ed2",
"id": "e1a92b11f5295ef0e0f132aa0a52a83c587726ee",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "07_06_resizing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "394"
},
{
"name": "Python",
"bytes": "44229"
}
],
"symlink_target": ""
} |
"""Manage Robinhood Sessions."""
import uuid
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any, Dict, Optional, Union, cast
from urllib.request import getproxies
import certifi
import pytz
import requests
from marshmallow import Schema, fields, post_load
from requests.exceptions import HTTPError
from requests.structures import CaseInsensitiveDict
from yarl import URL
from pyrh import urls
from pyrh.exceptions import AuthenticationError, PyrhValueError
from .base import JSON, BaseModel, BaseSchema
from .oauth import CHALLENGE_TYPE_VAL, OAuth, OAuthSchema
# TODO: merge get and post duplicated code into a single function.
# Types
if TYPE_CHECKING: # pragma: no cover
CaseInsensitiveDictType = CaseInsensitiveDict[str]
else:
CaseInsensitiveDictType = CaseInsensitiveDict
Proxies = Dict[str, str]
# Constants
CLIENT_ID: str = "c82SH0WZOsabOXGP2sxqcj34FxkvfnWRZBKlBjFS"
"""Robinhood client id."""
HEADERS: CaseInsensitiveDictType = CaseInsensitiveDict(
{
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, nl;q=0.6, it;q=0.5",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"X-Robinhood-API-Version": "1.0.0",
"Connection": "keep-alive",
"User-Agent": "Robinhood/823 (iPhone; iOS 7.1.2; Scale/2.00)",
}
)
"""Headers used when performing requests with robinhood api."""
# 8.5 days (you have a small window to refresh after this)
# I would refresh the token proactively every day in a script
EXPIRATION_TIME: int = 734000
"""Default expiration time for requests."""
TIMEOUT: int = 1
"""Default timeout in seconds"""
class SessionManager(BaseModel):
"""Mange connectivity with Robinhood API.
Once logged into the session, this class will manage automatic oauth token update
requests allowing for the automation systems to only require multi-factor
authentication on initialization.
Example:
>>> sm = SessionManager(username="USERNAME", password="PASSWORD")
>>> sm.login() # xdoctest: +SKIP
>>> sm.logout() # xdoctest: +SKIP
If you want to cache your session (you should) then you can use the following
functions. This will allow you to re-cover from a script crash without having to
manually re-enter multi-factor authentication codes.
Example:
>>> dump_session(sm) # xdoctest: +SKIP
>>> load_session(sm) # xdoctest: +SKIP
Args:
username: The username to login to Robinhood.
password: The password to login to Robinhood.
challenge_type: Either sms or email. (only if not using mfa)
headers: Any optional header dict modifications for the session.
proxies: Any optional proxy dict modification for the session.
**kwargs: Any other passed parameters as converted to instance attributes.
Attributes:
session: A requests session instance.
expires_at: The time the oauth token will expire at, default is
1970-01-01 00:00:00.
certs: The path to the desired certs to check against.
device_token: A random guid representing the current device.
access_token: An oauth2 token to connect to the Robinhood API.
refresh_token: An oauth2 refresh token to refresh the access_token when
required.
username: The username to login to Robinhood.
password: The password to login to Robinhood.
challenge_type: Either sms or email. (only if not using mfa)
headers: Any optional header dict modifications for the session.
proxies: Any optional proxy dict modification for the session.
"""
def __init__(
self,
username: str,
password: str,
challenge_type: Optional[str] = "email",
headers: Optional[CaseInsensitiveDictType] = None,
proxies: Optional[Proxies] = None,
**kwargs: Any,
) -> None:
self.session: requests.Session = requests.session()
self.session.headers = HEADERS if headers is None else headers
self.session.proxies = getproxies() if proxies is None else proxies
self.session.verify = certifi.where()
self.expires_at = datetime.strptime("1970", "%Y").replace(
tzinfo=pytz.UTC
) # some time in the past
self.username: str = username
self.password: str = password
if challenge_type not in ["email", "sms"]:
raise ValueError("challenge_type must be email or sms")
self.challenge_type: str = challenge_type
self.device_token: str = kwargs.pop("device_token", str(uuid.uuid4()))
self.oauth: OAuth = kwargs.pop("ouath", OAuth())
super().__init__(**kwargs)
@property
def token_expired(self) -> bool:
"""Check if the issued auth token has expired.
Returns:
True if expired otherwise False
"""
return datetime.now(tz=pytz.UTC) > self.expires_at
@property
def login_set(self) -> bool:
"""Check if login info is properly configured.
Returns:
Whether or not username and password are set.
"""
return self.password is not None and self.username is not None
@property
def authenticated(self) -> bool:
"""Check if the session is authenticated.
Returns:
Whether or not the session is logged in.
"""
return "Authorization" in self.session.headers and not self.token_expired
def login(self, force_refresh: bool = False) -> None:
"""Login to the session.
This method logs the user in if they are not already and otherwise refreshes
the oauth token if it is expired.
Args:
force_refresh: If already logged in, whether or not to force a oauth token
refresh.
"""
if "Authorization" not in self.session.headers:
self._login_oauth2()
elif self.oauth.is_valid and (self.token_expired or force_refresh):
self._refresh_oauth2()
def get(
self,
url: Union[str, URL],
params: Optional[Dict[str, Any]] = None,
headers: Optional[CaseInsensitiveDictType] = None,
raise_errors: bool = True,
return_response: bool = False,
auto_login: bool = True,
schema: Optional[Schema] = None,
many: bool = False,
) -> Any:
"""Run a wrapped session HTTP GET request.
Note:
This method automatically prompts the user to log in if not already logged
in.
Args:
url: The url to get from.
params: query string parameters
headers: A dict adding to and overriding the session headers.
raise_errors: Whether or not raise errors on GET request result.
return_response: Whether or not return a `requests.Response` object or the
JSON response from the request.
auto_login: Whether or not to automatically login on restricted endpoint
errors.
schema: An instance of a `marshmallow.Schema` that represents the object
to build.
many: Whether to treat the output as a list of the passed schema.
Returns:
A JSON dictionary or a constructed object if a schema is passed. If \
`return_response` is set then a tuple of (response, data) is passed.
Raises:
PyrhValueError: If the schema is not an instance of `Schema` and is instead
a class.
"""
# Guard against common gotcha, passing schema class instead of instance.
if isinstance(schema, type):
raise PyrhValueError("Passed Schema should be an instance not a class.")
params = {} if params is None else params
res = self.session.get(
str(url),
params=params,
timeout=TIMEOUT,
headers={} if headers is None else headers,
)
if res.status_code == 401 and auto_login:
self.login(force_refresh=True)
res = self.session.get(
str(url),
params=params,
timeout=TIMEOUT,
headers={} if headers is None else headers,
)
if raise_errors:
res.raise_for_status()
data = res.json() if schema is None else schema.load(res.json(), many=many)
return (data, res) if return_response else data
def post(
self,
url: Union[str, URL],
data: Optional[JSON] = None,
headers: Optional[CaseInsensitiveDictType] = None,
raise_errors: bool = True,
return_response: bool = False,
auto_login: bool = True,
schema: Optional[Schema] = None,
many: bool = False,
) -> Any:
"""Run a wrapped session HTTP POST request.
Note:
This method automatically prompts the user to log in if not already logged
in.
Args:
url: The url to post to.
data: The payload to POST to the endpoint.
headers: A dict adding to and overriding the session headers.
return_response: Whether or not return a `requests.Response` object or the
JSON response from the request.
raise_errors: Whether or not raise errors on POST request.
auto_login: Whether or not to automatically login on restricted endpoint
errors.
schema: An instance of a `marshmallow.Schema` that represents the object
to build.
many: Whether to treat the output as a list of the passed schema.
Returns:
A JSON dictionary or a constructed object if a schema is passed. If \
`return_response` is set then a tuple of (response, data) is passed.
Raises:
PyrhValueError: If the schema is not an instance of `Schema` and is instead
a class.
"""
# Guard against common gotcha, passing schema class instead of instance.
if isinstance(schema, type):
raise PyrhValueError("Passed Schema should be an instance not a class.")
res = self.session.post(
str(url),
data=data,
timeout=TIMEOUT,
headers={} if headers is None else headers,
)
if (res.status_code == 401) and auto_login:
self.login(force_refresh=True)
res = self.session.post(
str(url),
data=data,
timeout=TIMEOUT,
headers={} if headers is None else headers,
)
if raise_errors:
res.raise_for_status()
data = res.json() if schema is None else schema.load(res.json(), many=many)
return (data, res) if return_response else data
def _configure_manager(self, oauth: OAuth) -> None:
"""Process an authentication response dictionary.
This method updates the internal state of the session based on a login or
token refresh request.
Args:
oauth: An oauth response model from a login request.
"""
self.oauth = oauth
self.expires_at = datetime.now(tz=pytz.UTC) + timedelta(
seconds=self.oauth.expires_in
)
self.session.headers.update(
{"Authorization": f"Bearer {self.oauth.access_token}"}
)
def _challenge_oauth2(self, oauth: OAuth, oauth_payload: JSON) -> OAuth:
"""Process the ouath challenge flow.
Args:
oauth: An oauth response model from a login request.
oauth_payload: The payload to use once the challenge has been processed.
Returns:
An OAuth response model from the login request.
Raises:
AuthenticationError: If there is an error in the initial challenge response.
.. # noqa: DAR202
.. https://github.com/terrencepreilly/darglint/issues/81
"""
# login challenge
challenge_url = urls.build_challenge(oauth.challenge.id)
print(
f"Input challenge code from {oauth.challenge.type.capitalize()} "
f"({oauth.challenge.remaining_attempts}/"
f"{oauth.challenge.remaining_retries}):"
)
challenge_code = input()
challenge_payload = {"response": str(challenge_code)}
challenge_header = CaseInsensitiveDict(
{"X-ROBINHOOD-CHALLENGE-RESPONSE-ID": str(oauth.challenge.id)}
)
oauth_inner, res = self.post(
challenge_url,
data=challenge_payload,
raise_errors=False,
headers=challenge_header,
auto_login=False,
return_response=True,
schema=OAuthSchema(),
)
if res.status_code == requests.codes.ok:
try:
# the cast is required for mypy
return cast(
OAuth,
self.post(
urls.OAUTH,
data=oauth_payload,
headers=challenge_header,
auto_login=False,
schema=OAuthSchema(),
),
)
except HTTPError:
raise AuthenticationError("Error in finalizing auth token")
elif oauth_inner.is_challenge and oauth_inner.challenge.can_retry:
print("Invalid code entered")
return self._challenge_oauth2(oauth, oauth_payload)
else:
raise AuthenticationError("Exceeded available attempts or code expired")
def _mfa_oauth2(self, oauth_payload: JSON, attempts: int = 3) -> OAuth:
"""Mfa auth flow.
For people with 2fa.
Args:
oauth_payload: JSON payload to send on mfa approval.
attempts: The number of attempts to allow for mfa approval.
Returns:
An OAuth response model object.
Raises:
AuthenticationError: If the mfa code is incorrect more than specified \
number of attempts.
"""
print(f"Input mfa code:")
mfa_code = input()
oauth_payload["mfa_code"] = mfa_code
oauth, res = self.post(
urls.OAUTH,
data=oauth_payload,
raise_errors=False,
auto_login=False,
return_response=True,
schema=OAuthSchema(),
)
attempts -= 1
if (res.status_code != requests.codes.ok) and (attempts > 0):
print("Invalid mfa code")
return self._mfa_oauth2(oauth_payload, attempts)
elif res.status_code == requests.codes.ok:
# TODO: Write mypy issue on why this needs to be casted?
return cast(OAuth, oauth)
else:
raise AuthenticationError("Too many incorrect mfa attempts")
def _login_oauth2(self) -> None:
"""Create a new oauth2 token.
Raises:
AuthenticationError: If the login credentials are not set, if a challenge
wasn't accepted, or if an mfa code is not accepted.
"""
self.session.headers.pop("Authorization", None)
oauth_payload = {
"password": self.password,
"username": self.username,
"grant_type": "password",
"client_id": CLIENT_ID,
"expires_in": EXPIRATION_TIME,
"scope": "internal",
"device_token": self.device_token,
"challenge_type": self.challenge_type,
}
oauth = self.post(
urls.OAUTH,
data=oauth_payload,
raise_errors=False,
auto_login=False,
schema=OAuthSchema(),
)
if oauth.is_challenge:
oauth = self._challenge_oauth2(oauth, oauth_payload)
elif oauth.is_mfa:
oauth = self._mfa_oauth2(oauth_payload)
if not oauth.is_valid:
if hasattr(oauth, "error"):
msg = f"{oauth.error}"
elif hasattr(oauth, "detail"):
msg = f"{oauth.detail}"
else:
msg = "Unknown login error"
raise AuthenticationError(msg)
else:
self._configure_manager(oauth)
def _refresh_oauth2(self) -> None:
"""Refresh an oauth2 token.
Raises:
AuthenticationError: If refresh_token is missing or if there is an error
when trying to refresh a token.
"""
if not self.oauth.is_valid:
raise AuthenticationError("Cannot refresh login with unset refresh token")
relogin_payload = {
"grant_type": "refresh_token",
"refresh_token": self.oauth.refresh_token,
"scope": "internal",
"client_id": CLIENT_ID,
"expires_in": EXPIRATION_TIME,
}
self.session.headers.pop("Authorization", None)
try:
oauth = self.post(
urls.OAUTH,
data=relogin_payload,
auto_login=False,
schema=OAuthSchema(),
)
except HTTPError:
raise AuthenticationError("Failed to refresh token")
self._configure_manager(oauth)
def logout(self) -> None:
"""Logout from the session.
Raises:
AuthenticationError: If there is an error when logging out.
"""
logout_payload = {"client_id": CLIENT_ID, "token": self.oauth.refresh_token}
try:
self.post(urls.OAUTH_REVOKE, data=logout_payload, auto_login=False)
self.oauth = OAuth()
self.session.headers.pop("Authorization", None)
except HTTPError:
raise AuthenticationError("Could not log out")
def __repr__(self) -> str:
"""Return the object as a string.
Returns:
The string representation of the object.
"""
return f"SessionManager<{self.username}>"
class SessionManagerSchema(BaseSchema):
"""Schema class for the SessionManager model."""
__model__ = SessionManager
# Call untyped "Email" in typed context
username = fields.Email() # type: ignore
password = fields.Str()
challenge_type = fields.Str(validate=CHALLENGE_TYPE_VAL)
oauth = fields.Nested(OAuthSchema)
expires_at = fields.AwareDateTime()
device_token = fields.Str()
headers = fields.Dict()
proxies = fields.Dict()
@post_load
def make_object(self, data: JSON, **kwargs: Any) -> SessionManager:
"""Override default method to configure SessionManager object on load.
Args:
data: The JSON dictionary to process
**kwargs: Not used but matches signature of `BaseSchema.make_object`
Returns:
A configured instance of SessionManager.
"""
oauth = data.pop("oauth", None)
expires_at = data.pop("expires_at", None)
session_manager = self.__model__(**data)
if oauth is not None and oauth.is_valid:
session_manager.oauth = oauth
session_manager.session.headers.update(
{"Authorization": f"Bearer {session_manager.oauth.access_token}"}
)
if expires_at:
session_manager.expires_at = expires_at
return session_manager
| {
"content_hash": "e900897faa0ebbf726c235ceef191d56",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 88,
"avg_line_length": 35.26353790613718,
"alnum_prop": 0.5951576576576577,
"repo_name": "Jamonek/Robinhood",
"id": "3614d7750c0d3b54a6722083c7e6e4a01de316b8",
"size": "19536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrh/models/sessionmanager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71745"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import torch
import torch.distributed as dist
import logging
import ray
from ray.tune import Trainable
from ray.tune.resources import Resources
from ray.experimental.sgd.pytorch.pytorch_runner import PyTorchRunner
from ray.experimental.sgd.pytorch.distributed_pytorch_runner import (
DistributedPyTorchRunner)
from ray.experimental.sgd.pytorch import pytorch_utils
from ray.experimental.sgd import utils
logger = logging.getLogger(__name__)
class PyTorchTrainer(object):
"""Train a PyTorch model using distributed PyTorch.
Launches a set of actors which connect via distributed PyTorch and
coordinate gradient updates to train the provided model.
"""
def __init__(self,
model_creator,
data_creator,
optimizer_creator=pytorch_utils.sgd_mse_optimizer,
config=None,
num_replicas=1,
use_gpu=False,
batch_size=16,
backend="auto"):
"""Sets up the PyTorch trainer.
Args:
model_creator (dict -> torch.nn.Module): creates the model
using the config.
data_creator (dict -> Dataset, Dataset): creates the training
and validation data sets using the config.
optimizer_creator (torch.nn.Module, dict -> loss, optimizer):
creates the loss and optimizer using the model and the config.
config (dict): configuration passed to 'model_creator',
'data_creator', and 'optimizer_creator'.
num_replicas (int): the number of workers used in distributed
training.
use_gpu (bool): Sets resource allocation for workers to 1 GPU
if true.
batch_size (int): batch size for an update.
backend (string): backend used by distributed PyTorch.
"""
# TODO: add support for mixed precision
# TODO: add support for callbacks
if num_replicas > 1 and not dist.is_available():
raise ValueError(
("Distributed PyTorch is not supported on macOS. "
"To run without distributed PyTorch, set 'num_replicas=1'. "
"For more information, see "
"https://github.com/pytorch/examples/issues/467."))
self.model_creator = model_creator
self.config = {} if config is None else config
self.optimizer_timer = utils.TimerStat(window_size=1)
if backend == "auto":
backend = "nccl" if use_gpu else "gloo"
logger.info("Using {} as backend.".format(backend))
if num_replicas == 1:
# Generate actor class
Runner = ray.remote(
num_cpus=1, num_gpus=int(use_gpu))(PyTorchRunner)
# Start workers
self.workers = [
Runner.remote(model_creator, data_creator, optimizer_creator,
self.config, batch_size)
]
# Get setup tasks in order to throw errors on failure
ray.get(self.workers[0].setup.remote())
else:
# Geneate actor class
Runner = ray.remote(
num_cpus=1, num_gpus=int(use_gpu))(DistributedPyTorchRunner)
# Compute batch size per replica
batch_size_per_replica = batch_size // num_replicas
if batch_size % num_replicas > 0:
new_batch_size = batch_size_per_replica * num_replicas
logger.warning(
("Changing batch size from {old_batch_size} to "
"{new_batch_size} to evenly distribute batches across "
"{num_replicas} replicas.").format(
old_batch_size=batch_size,
new_batch_size=new_batch_size,
num_replicas=num_replicas))
# Start workers
self.workers = [
Runner.remote(model_creator, data_creator, optimizer_creator,
self.config, batch_size_per_replica, backend)
for i in range(num_replicas)
]
# Compute URL for initializing distributed PyTorch
ip = ray.get(self.workers[0].get_node_ip.remote())
port = ray.get(self.workers[0].find_free_port.remote())
address = "tcp://{ip}:{port}".format(ip=ip, port=port)
# Get setup tasks in order to throw errors on failure
ray.get([
worker.setup.remote(address, i, len(self.workers))
for i, worker in enumerate(self.workers)
])
def train(self):
"""Runs a training epoch."""
with self.optimizer_timer:
worker_stats = ray.get([w.step.remote() for w in self.workers])
train_stats = worker_stats[0].copy()
train_stats["train_loss"] = np.mean(
[s["train_loss"] for s in worker_stats])
return train_stats
def validate(self):
"""Evaluates the model on the validation data set."""
worker_stats = ray.get([w.validate.remote() for w in self.workers])
validation_stats = worker_stats[0].copy()
validation_stats["validation_loss"] = np.mean(
[s["validation_loss"] for s in worker_stats])
return validation_stats
def get_model(self):
"""Returns the learned model."""
model = self.model_creator(self.config)
state = ray.get(self.workers[0].get_state.remote())
model.load_state_dict(state["model"])
return model
def save(self, checkpoint):
"""Saves the model at the provided checkpoint.
Args:
checkpoint (str): Path to target checkpoint file.
"""
state = ray.get(self.workers[0].get_state.remote())
torch.save(state, checkpoint)
return checkpoint
def restore(self, checkpoint):
"""Restores the model from the provided checkpoint.
Args:
checkpoint (str): Path to target checkpoint file.
"""
state = torch.load(checkpoint)
state_id = ray.put(state)
ray.get([worker.set_state.remote(state_id) for worker in self.workers])
def shutdown(self):
"""Shuts down workers and releases resources."""
for worker in self.workers:
worker.shutdown.remote()
worker.__ray_terminate__.remote()
class PyTorchTrainable(Trainable):
@classmethod
def default_resource_request(cls, config):
return Resources(
cpu=0,
gpu=0,
extra_cpu=config["num_replicas"],
extra_gpu=int(config["use_gpu"]) * config["num_replicas"])
def _setup(self, config):
self._trainer = PyTorchTrainer(
model_creator=config["model_creator"],
data_creator=config["data_creator"],
optimizer_creator=config["optimizer_creator"],
config=config,
num_replicas=config["num_replicas"],
use_gpu=config["use_gpu"],
batch_size=config["batch_size"],
backend=config["backend"])
def _train(self):
train_stats = self._trainer.train()
validation_stats = self._trainer.validate()
train_stats.update(validation_stats)
return train_stats
def _save(self, checkpoint_dir):
return self._trainer.save(os.path.join(checkpoint_dir, "model.pth"))
def _restore(self, checkpoint_path):
return self._trainer.restore(checkpoint_path)
def _stop(self):
self._trainer.shutdown()
| {
"content_hash": "053b85d2b25e5c6169bdb4360407b70e",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 79,
"avg_line_length": 37.29807692307692,
"alnum_prop": 0.5845578757411704,
"repo_name": "ujvl/ray-ng",
"id": "91fc2b63ec9fada18aad2ac0363eb09b407df0dd",
"size": "7758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/experimental/sgd/pytorch/pytorch_trainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "349753"
},
{
"name": "C++",
"bytes": "6547"
},
{
"name": "CMake",
"bytes": "4927"
},
{
"name": "Makefile",
"bytes": "5285"
},
{
"name": "Python",
"bytes": "260095"
},
{
"name": "Shell",
"bytes": "6666"
}
],
"symlink_target": ""
} |
from PIL import Image
import requests
from io import BytesIO
import webbrowser # This module can control the browser
import json # Json encoder/decoder
from bs4 import BeautifulSoup # Module to sort through HTML
import lxml # Module to prepare html for BeautifulSoup
from urllib.request import urlopen
import sys # Allow more control over printing
import string # More ways to manipulate strings
import unidecode # Decodes weird characters
import youtube_dl # For downloading YouTube videos/audio
import eyed3 # For editing ID3 tags for mp3 file
import os # More control over Mac file system
numShow = 5
# Prompt User for Keywords for Song
userSearch = input("Search for song: ") # Reads input as a string
# userSearch = input("Search for song (use quotes): ") # Reads input as raw code
# print("Searching for " + userSearch)
userSearch = userSearch.strip() # Remove extraneous white space
# Search for song in iTunes Store
# Documentation: http://www.apple.com/itunes/affiliates/resources/documentation/itunes-store-web-service-search-api.html
baseURL = "https://itunes.apple.com/search?"
searchKeys = [
["term", userSearch],
["country", "US"],
["media", "music"],
["entity", "song"],
["limit", "50"],
["lang", "en_us"],
["explicit", "yes"]
]
finalURL = baseURL
for i in range(0, len(searchKeys)): # len() returns length of a variable
# print "Term: %d" % (i)
currentKey = searchKeys[i]
criteria = str(currentKey[1]) #Make sure it's a string
criteria = criteria.replace(" ", "%20") # %20 represents a space
appendStr = currentKey[0] + "=" + criteria # Build url
# print(appendStr)
if i < (len(searchKeys) - 1):
appendStr += "&"
finalURL += appendStr
# print("Final URL: " + finalURL) # Debugging
# webbrowser.open(finalURL)
# Retrieve and Save iTunes JSON Data
response = urlopen(finalURL) #Get HTML source code
html = response.read() #HTML source code
soup = BeautifulSoup(html, "lxml") # Using lxml parser
print("")
print("*********** Found iTunes data ***********")
print("")
# print(soup.prettify()) # Feedback
rawJSON = soup.find('p').text # Just the json text
rawJSON.strip() # Trim the white space
# Parse iTunes JSON Data
iTunesObj = json.loads(rawJSON) # Decode JSON
# print(iTunesObj)
results = iTunesObj['results']
b = numShow
if len(results) < numShow:
b = len(results)
for i in range(0, b):
sys.stdout.write("(%i) Track Name: " % i)
sys.stdout.flush() # No line break
print(results[i]['trackName']) # Adds a line break after
print(" Artist: %s" % results[i]['artistName'])
print(" Album: %s" % results[i]['collectionName'])
print(" Genre: %s" % results[i]['primaryGenreName'])
print("")
print("Which song is the one you were looking for?")
iTunesSearchSelection = input("Type the respective index: ")
songData = results[int(iTunesSearchSelection)]
print() # Line break
print("Selected:")
print("%s by %s" % (songData['trackName'], songData['artistName']))
print(songData)
print() # Line break
# ******************* Find song on YouTube *******************
searchAudio = input("Search for audio video? (y/n) ") # Ask if want to search for audio on YouTube
extra = ""
if searchAudio is "y": # If only want to search for audio videos
extra = " Audio" # add on 'audio' to search
baseURL = "https://www.youtube.com/results?search_query="
YouTubeSearch = songData['trackName'] + " " + songData['artistName'] + extra
print() # Line break
YouTubeSearch = unidecode.unidecode(YouTubeSearch) # Remove complex unicode characters
print("Searching for '%s' on YouTube" % YouTubeSearch)
print() # Line break
# out = YouTubeSearch.translate(string.maketrans("",""), string.punctuation) # Remove punctuation
YouTubeSearch = YouTubeSearch.replace(" ", "+") # Remove spaces with '+'
finalURL = baseURL + YouTubeSearch # Final URL
print(finalURL)
"""
response = urllib.urlopen(finalURL) #Get HTML source code
html = response.read() #HTML source code
soup = BeautifulSoup(html, "lxml") # Using lxml parser
links = soup.find_all("a")
print(links)
videoLinks = [] # Start empty
# videoTitleElements = soup.findAll("h3", { "class": "title-and-badge style-scope ytd-video-renderer" }) # Get video titles then get video links
videoTitleElements = soup.findAll("a", { "id": "video-title" }) # Get video titles then get video links
print(videoTitleElements)
for title in videoTitleElements:
print("Found title %s" % title)
link = title.findAll("a") # Get link within the title
videoLinks.append(link[0]) # Add link to master list
videoUploaders = [] # Start empty
videoUploaderElements = soup.findAll("div", { "class": "yt-lockup-byline " }) # Get video uploader divs
for element in videoUploaderElements:
uploader = element.findAll("a") # Extract the uploader link
if len(uploader) is not 0:
videoUploaders.append(uploader[0]) # Append to master list
videoTimes = soup.findAll("div", { "class": "ytd-thumbnail-overlay-time-status-renderer" }) # In case there are playlists, find the div
videos = [];
# Stores all the results on the page except for the last 3 hits on the page
upper = len(videoTimes) - 3
numPlaylists = 0
for i in range(0, upper):
# print i
# print(videoTimes[i])
time = videoTimes[i].findAll("span", { "class": "video-time" }) # Find within the larger div
if not time: # If array is empty (ie. no time found for that video)
numPlaylists += 1
# print "Found a playlist"
else: # If not a playlists
# The video must be a playlist
time = time[0] # First result
link = "https://www.youtube.com" + videoLinks[i].get('href')
# print(videoLinks[i].contents[0])
# print(link)
# print videoUploaders[i]
# Structure of array:
# [name, link, uploader, length]
videos.append(
[
videoLinks[i].contents[0],
link,
videoUploaders[i].contents[0],
time.text
]
)
# Only returns up to specified number
print "Found %s playlist(s)" % numPlaylists
for i in range(0, numShow):
video = videos[i]
sys.stdout.write("(%i) Video name: " % i)
sys.stdout.flush() # No line break
print video[0] # Adds a line break after
print " Link: %s" % video[1]
print " Uploader: %s" % video[2]
print " Length: %s" % video[3]
print("")
milliseconds = songData['trackTimeMillis']
x = milliseconds / 1000
seconds = x % 60
x /= 60
minutes = x % 60
time = str(minutes) + ":" + str(seconds)
print "Which video is the one you were looking for?"
print "The iTunes version is: %s" % time
YouTubeSelection = input("Type the respective index: ")
print "" # Line break
data = videos[YouTubeSelection]
"""
# Manual link input
print("Which video is the one you were looking for?")
link = input("copy paste the link: ")
data = ['', link]
fileName = songData['artistName'] + " - " + songData['trackName'] # Declare file name
filePath = "~/Desktop/" # Declare file path
ydl_opts = { # Set options
'format': 'bestaudio/best',
# 'outtmpl': u'%(title)s-%(id)s.%(ext)s',
'outtmpl': filePath + fileName + ".%(ext)s",
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192' # 128, 160, 192, 210, 256
}],
'quiet': False
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print(data[1])
ydl.download([data[1]]) # Download the song
# ******************* Find Image Artwork *******************
# print("Finding Google Image for album artwork")
# Add code here:
# ******************* Update ID3 Tags *******************
mp3Path = os.path.expanduser(filePath + fileName + ".mp3")
year = str(songData['releaseDate'])
year = int(year[:4])
audiofile = eyed3.load(mp3Path)
audiofile.tag.title = songData['trackName']
audiofile.tag.artist = songData['artistName']
audiofile.tag.album = songData['collectionName']
audiofile.tag.album_artist = songData['artistName'] # This needs to be changed - need to be able to find album artist, not song artist
audiofile.tag.track_num = (songData['trackNumber'], songData['trackCount'])
audiofile.tag.disc_num = (songData['discNumber'], songData['discCount'])
audiofile.tag.genre = songData['primaryGenreName']
audiofile.tag.release_date = year
audiofile.tag.orig_release_date = year
audiofile.tag.recording_date = year
audiofile.tag.encoding_date = year
audiofile.tag.taggin_date = year
# Append Image
# Reference: http://tuxpool.blogspot.com/2013/02/how-to-store-images-in-mp3-files-using.html
image_url = songData['artworkUrl100'].replace('100x100', '500x500')
response = requests.get(image_url)
# img = Image.open(BytesIO(response.content).read())
# imageData = open("test.jpg", "rb").read() # Stores image data
audiofile.tag.images.set(3, BytesIO(response.content).read(), "image/jpeg", "Description") # 3 for front cover, 4 for back, 0 for other
audiofile.tag.save()
print() # Line break
print("Updated ID3 Tags")
# print "Song Year (Must Manually Add): %s" % year
print() # Line break
print("************** Complete **************")
| {
"content_hash": "3abc2b15aa436b0cb3bd754a012350a5",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 144,
"avg_line_length": 35.17307692307692,
"alnum_prop": 0.6649535265172225,
"repo_name": "khou22/MusicDownloader",
"id": "f0d944d0c86d8b4a44b4d9c302ca5f53c8803986",
"size": "9303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MusicDownloader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24404"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.test import TestCase
from django.utils import unittest
from django.conf.urls import patterns
from rest_framework import permissions, status
from rest_framework.authentication import OAuth2Authentication
from rest_framework.compat import oauth2_provider
from rest_framework.test import APIRequestFactory, APIClient
from rest_framework.views import APIView
from rest_framework_jwt import utils
from rest_framework_jwt.settings import api_settings, DEFAULTS
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
DJANGO_OAUTH2_PROVIDER_NOT_INSTALLED = 'django-oauth2-provider not installed'
factory = APIRequestFactory()
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = patterns(
'',
(r'^jwt/$', MockView.as_view(
authentication_classes=[JSONWebTokenAuthentication])),
(r'^jwt-oauth2/$', MockView.as_view(
authentication_classes=[
JSONWebTokenAuthentication, OAuth2Authentication])),
(r'^oauth2-jwt/$', MockView.as_view(
authentication_classes=[
OAuth2Authentication, JSONWebTokenAuthentication])),
)
class JSONWebTokenAuthenticationTests(TestCase):
"""JSON Web Token Authentication"""
urls = 'tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'jpueblo'
self.email = 'jpueblo@example.com'
self.user = User.objects.create_user(self.username, self.email)
def test_post_form_passing_jwt_auth(self):
"""
Ensure POSTing form over JWT auth with correct credentials
passes and does not require CSRF
"""
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_jwt_auth(self):
"""
Ensure POSTing JSON over JWT auth with correct credentials
passes and does not require CSRF
"""
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_failing_jwt_auth(self):
"""
Ensure POSTing form over JWT auth without correct credentials fails
"""
response = self.csrf_client.post('/jwt/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_jwt_auth(self):
"""
Ensure POSTing json over JWT auth without correct credentials fails
"""
response = self.csrf_client.post('/jwt/', {'example': 'example'},
format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_no_jwt_header_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth without credentials fails
"""
auth = 'JWT'
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = 'Invalid Authorization header. No credentials provided.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_invalid_jwt_header_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth without correct credentials fails
"""
auth = 'JWT abc abc'
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = ('Invalid Authorization header. Credentials string '
'should not contain spaces.')
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_expired_token_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth with expired token fails
"""
payload = utils.jwt_payload_handler(self.user)
payload['exp'] = 1
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = 'Signature has expired.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_invalid_token_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth with invalid token fails
"""
auth = 'JWT abc123'
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = 'Error decoding signature.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
@unittest.skipUnless(oauth2_provider, DJANGO_OAUTH2_PROVIDER_NOT_INSTALLED)
def test_post_passing_jwt_auth_with_oauth2_priority(self):
"""
Ensure POSTing over JWT auth with correct credentials
passes and does not require CSRF when OAuth2Authentication
has priority on authentication_classes
"""
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/oauth2-jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response)
@unittest.skipUnless(oauth2_provider, DJANGO_OAUTH2_PROVIDER_NOT_INSTALLED)
def test_post_passing_oauth2_with_jwt_auth_priority(self):
"""
Ensure POSTing over OAuth2 with correct credentials
passes and does not require CSRF when JSONWebTokenAuthentication
has priority on authentication_classes
"""
Client = oauth2_provider.oauth2.models.Client
AccessToken = oauth2_provider.oauth2.models.AccessToken
oauth2_client = Client.objects.create(
user=self.user,
client_type=0,
)
access_token = AccessToken.objects.create(
user=self.user,
client=oauth2_client,
)
auth = 'Bearer {0}'.format(access_token.token)
response = self.csrf_client.post(
'/jwt-oauth2/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response)
def test_post_form_passing_jwt_invalid_payload(self):
"""
Ensure POSTing json over JWT auth with invalid payload fails
"""
payload = dict(email=None)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
msg = 'Invalid payload'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_different_auth_header_prefix(self):
"""
Ensure using a different setting for `JWT_AUTH_HEADER_PREFIX` and
with correct credentials passes.
"""
api_settings.JWT_AUTH_HEADER_PREFIX = 'Bearer'
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'Bearer {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Restore original settings
api_settings.JWT_AUTH_HEADER_PREFIX = DEFAULTS['JWT_AUTH_HEADER_PREFIX']
| {
"content_hash": "88375af39f77cddd3ea3fa9be1dea031",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 36.50813008130081,
"alnum_prop": 0.6412426233158891,
"repo_name": "vforgione/django-rest-framework-jwt",
"id": "f12778520d363d8b24d0bdb7f5ab5758a0a83401",
"size": "8981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_authentication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44130"
}
],
"symlink_target": ""
} |
import os
import mock
from pecan.testing import load_test_app
from tuskar.manager import models as manager_models
from tuskar.storage import exceptions as storage_exceptions
from tuskar.tests import base
URL_ROLES = '/v2/roles'
URL_PLAN_ROLES = '/v2/plans/plan_uuid/roles'
class RolesTests(base.TestCase):
def setUp(self):
super(RolesTests, self).setUp()
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', '..', '..', '..', 'api', 'config.py')
self.app = load_test_app(config_file)
@mock.patch('tuskar.manager.role.RoleManager.list_roles')
def test_get_all(self, mock_list):
# Setup
mock_list.return_value = [
manager_models.Role('a', 'n1', 1, 'd1', 't1'),
manager_models.Role('b', 'n2', 2, 'd2', 't2'),
]
# Test
response = self.app.get(URL_ROLES)
result = response.json
# Verify
mock_list.assert_called_once_with(only_latest=False)
self.assertEqual(response.status_int, 200)
self.assertTrue(isinstance(result, list))
self.assertEqual(2, len(result))
self.assertEqual(result[0]['uuid'], 'a')
self.assertEqual(result[0]['name'], 'n1')
self.assertEqual(result[0]['description'], 'd1')
self.assertEqual(result[1]['uuid'], 'b')
self.assertEqual(result[1]['name'], 'n2')
self.assertEqual(result[1]['description'], 'd2')
@mock.patch('tuskar.manager.plan.PlansManager.add_role_to_plan')
def test_post(self, mock_add):
# Setup
p = manager_models.DeploymentPlan('a', 'n', 'd')
mock_add.return_value = p
# Test
role_data = {'uuid': 'qwerty12345'}
response = self.app.post_json(URL_PLAN_ROLES, params=role_data)
result = response.json
# Verify
mock_add.assert_called_once_with('plan_uuid', 'qwerty12345')
self.assertEqual(response.status_int, 201)
self.assertEqual(result['uuid'], 'a')
self.assertEqual(result['name'], 'n')
@mock.patch('tuskar.manager.plan.PlansManager.add_role_to_plan')
def test_post_duplicate(self, mock_add):
# Setup
mock_add.side_effect = ValueError()
# Test
role_data = {'uuid': 'qwerty12345'}
response = self.app.post_json(URL_PLAN_ROLES, params=role_data,
status=409)
# Verify
mock_add.assert_called_once_with('plan_uuid', 'qwerty12345')
self.assertEqual(response.status_int, 409)
@mock.patch('tuskar.manager.plan.PlansManager.add_role_to_plan')
def test_post_unkown_uuid(self, mock_add):
# Setup
mock_add.side_effect = storage_exceptions.UnknownUUID()
# Test
role_data = {'uuid': 'qwerty12345'}
response = self.app.post_json(URL_PLAN_ROLES, params=role_data,
status=404)
# Verify
mock_add.assert_called_once_with('plan_uuid', 'qwerty12345')
self.assertEqual(response.status_int, 404)
@mock.patch('tuskar.manager.plan.PlansManager.remove_role_from_plan')
def test_delete(self, mock_remove):
# Setup
p = manager_models.DeploymentPlan('a', 'n', 'd')
mock_remove.return_value = p
# Test
response = self.app.delete_json(URL_PLAN_ROLES + '/role_uuid')
result = response.json
# Verify
mock_remove.assert_called_once_with('plan_uuid', 'role_uuid')
self.assertEqual(response.status_int, 200)
self.assertEqual(result['uuid'], 'a')
self.assertEqual(result['name'], 'n')
@mock.patch('tuskar.manager.plan.PlansManager.remove_role_from_plan')
def test_delete_unkown_uuid(self, mock_remove):
# Setup
mock_remove.side_effect = storage_exceptions.UnknownUUID()
# Test
response = self.app.delete_json(URL_PLAN_ROLES + '/qwerty12345',
status=404)
# Verify
mock_remove.assert_called_once_with('plan_uuid', 'qwerty12345')
self.assertEqual(response.status_int, 404)
| {
"content_hash": "29ea56d383a723999cf88d5bff6a5b7e",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 78,
"avg_line_length": 34.71666666666667,
"alnum_prop": 0.5960153624579932,
"repo_name": "rdo-management/tuskar",
"id": "fbe0fa30911ae04c1415cb359e34a0f6322229b0",
"size": "4709",
"binary": false,
"copies": "1",
"ref": "refs/heads/mgt-master",
"path": "tuskar/tests/api/controllers/v2/test_roles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "115"
},
{
"name": "Mako",
"bytes": "5046"
},
{
"name": "Python",
"bytes": "564511"
},
{
"name": "Shell",
"bytes": "4469"
}
],
"symlink_target": ""
} |
import argparse
import copy
import logging
import os
from typing import Any, Dict, Iterator, List
import torch
from omegaconf import open_dict
from torch import nn
from fairseq import utils
from fairseq.data import encoders
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == "checkpoint_file":
checkpoint_file = v
elif (
k != "path"
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path["path"]
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith("."):
kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
"merges.txt": "bpe_merges",
"vocab.json": "bpe_vocab",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if "user_dir" in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"]))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
"args": args,
"task": task,
"models": models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, cfg, task, models):
super().__init__()
self.cfg = cfg
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(cfg.generation.replace_unk)
self.tokenizer = encoders.build_tokenizer(cfg.tokenizer)
self.bpe = encoders.build_bpe(cfg.bpe)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(
self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs
) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(
self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs
) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos]
def score(
self, sentences: List[str], replace_newline_with_eos: bool = False, **kwargs
):
if isinstance(sentences, str):
return self.score(
[sentences], replace_newline_with_eos=replace_newline_with_eos, **kwargs
)[0]
def encode(sentence):
if replace_newline_with_eos:
return torch.cat([self.encode(line) for line in sentence.splitlines()])
else:
return self.encode(sentence)
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [encode(sentence) for sentence in sentences]
return [
hypos[0]
for hypos in self.generate(
tokenized_sentences, score_reference=True, **kwargs
)
]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
prefix_allowed_tokens_fn=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.deepcopy(self.cfg.generation)
with open_dict(gen_args):
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(
self.models,
gen_args,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
)
inference_step_args = inference_step_args or {}
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.cfg, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info("S\t{}".format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo["tokens"])
logger.info("H\t{}\t{}".format(hypo["score"], hypo_str))
logger.info(
"P\t{}".format(
" ".join(
map(
lambda x: "{:.4f}".format(x),
hypo["positional_scores"].tolist(),
)
)
)
)
if hypo["alignment"] is not None and getarg(
"print_alignment", False
):
logger.info(
"A\t{}".format(
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in hypo["alignment"]
]
)
)
)
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| {
"content_hash": "8af6dbdbf8090df4d05fd4c2fb261f97",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 88,
"avg_line_length": 36.188311688311686,
"alnum_prop": 0.5700699802619774,
"repo_name": "pytorch/fairseq",
"id": "b6fa2cb97d713be5b81f6bad345be3f12cc02d70",
"size": "11350",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fairseq/hub_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.