text stringlengths 4 1.02M | meta dict |
|---|---|
"""\
==================================
Parsing of Linux-DVB channels.conf
==================================
ParseChannelsConf parses lines from a channels.conf file and returns python
outputs a python dictionary for each line - containing the parsed tuning
information.
Example Usage
-------------
Extract from a channels.conf file the tuning parameters needed for a specific
channel and display them::
channelName = "BBC ONE"
channelsConfFile = "path/to/my/channels.conf"
def chooseChannelName(tuningInfo):
name, params, ids = tuningInfo
if name == channelName:
return (name,params,ids)
else:
return None
Pipeline(
RateControlledFileReader(channelsConfFile, readmode="lines", rate=1000, chunksize=1),
ParseChannelsConf(),
PureTransformer(chooseChannelName),
ConsoleEchoer(),
).run()
A slightly more complex example that actually records the specified channel to disk::
channelName = "BBC ONE"
channelsConfFile = "path/to/my/channels.conf"
outFileName = "my_recording_of_BBC_ONE.ts"
def chooseChannelName(tuningInfo):
name, params, ids = tuningInfo
if name == channelName:
return (name,params,ids)
else:
return None
def makeAddPidsMessage(tuningInfo):
name, params, pids = tuningInfo
return ("ADD", [ pids["audio_pid"], pids["video_pid"] ])
def createTuner(tuningInfo):
name, (freq, frontend_params), pids = tuningInfo
return Tuner(freq, frontend_params)
Pipeline(
RateControlledFileReader(channelsConfFile, readmode="lines", rate=1000, chunksize=1),
ParseChannelsConf(),
PureTransformer(chooseChannelName),
Graphline(
SPLIT = TwoWaySplitter(),
TUNER = Carousel(createTuner),
PID_REQ = PureTransformer(makeAddPidsMessage),
linkages = {
("", "inbox") : ("SPLIT", "inbox"),
("SPLIT", "outbox") : ("TUNER", "next"), # trigger creation of tuner
("SPLIT", "outbox2") : ("PID_REQ", "inbox"),
("PID_REQ", "outbox" ) : ("TUNER", "inbox"), # ask tuner to output right packets
("TUNER", "outbox") : ("", "outbox"),
}
),
SimpleFileWriter(outFileName),
).run()
In the above example, when we get the tuning info for the channel we are interested in, it is sent
to two places using a TwoWaySplitter: to a Carousel that creates a Tuner tuned to the correct
frequency; and also to be transformed into a message to request the Tuner outputs packets with the
packet IDs (PIDs) for the particular channel we're interested in.
More detail
-----------
Send strings containing lines from a channels.conf file to the "inbox" inbox of ParseChannelsConf.
Each string must contain a single line.
For each line received, a tuple containing a parsed version of the information in that line will
be sent out of the "outbox" outbox. This tuple is of the form::
( "Channel name", (frequency_MHz, tuning_parameters), packet/service_ids )
Tuning parameters are a dict of the form::
{
"inversion" : dvb3.frontend.INVERSION_????
"bandwidth" : dvb3.frontend.BANDWIDTH_????
"code_rate_HP" : dvb3.frontend.FEC_???
"code_rate_LP" : dvb3.frontend.FEC_???
"constellation" : dvb3.frontend.Q???
"transmission_mode" : dvb3.frontend.TRANSMISSION_MODE_???
"guard_interval" : dvb3.frontend.GUARD_INTERVAL_???
"hierarchy_information" : dvb3.frontend.HIERARCHY_???
}
In practice you do not need to worry about the actual values. You can pass this dict directly to
most Kamaelia DVB tuning/receiving components as the 'feparams' (front-end tuning parameters)
Packet/Service IDs are a dict of the form::
{ "video_pid" : packet_id_number,
"audio_pid" : packet_id_number,
"service_id" : service_id_number,
}
The video and audio PIDs are the packet ids of packets carrying video and audio for this service.
The service id is the id number associated with this service/channel.
If a producerFinished or shutdownMicroprocess message is sent to ParseChannelConf's "control" inbox,
then the message will be sent on out of the "signal" outbox and this component will immediately terminate.
Any pending strings in its "inbox" inbox will be processed and sent out before termination.
"""
from Axon.Component import component
from Axon.Ipc import shutdownMicroprocess, producerFinished
import dvb3.frontend
class ParseChannelsConf(component):
"""\
ParseChannelsConf() -> new ParseChannelsConf component
Parses channels.conf file fed, line by line, as strings into the
"inbox" inbox and outputs (channelname, dict(tuning params),dict(pids))
pairs out of the "outbox" outbox.
"""
def __init__(self):
super(ParseChannelsConf,self).__init__()
self.shutdownMsg = None
def main(self):
while 1:
while self.dataReady("inbox"):
line = self.recv("inbox")
data = self.parse(line)
if data is not None:
for _ in self.safesend(data,"outbox"): yield _
if self.checkShutdown():
self.send(self.shutdownMsg,"signal")
return
self.pause()
yield 1
def parse(self,line):
try :
line = line.strip()
if not line:
return None
name, freq, inv, bw, fec_hi, fec_lo, qam, tm, gi, h, vpid, apid, sid = line.split(":")
return name, ( float(freq)/1000.0/1000.0,
{ "inversion" : _inversion[inv.upper()],
"bandwidth" : _bandwidth[bw.upper()],
"code_rate_HP" : _fec[fec_hi.upper()],
"code_rate_LP" : _fec[fec_lo.upper()],
"constellation" : _qam[qam.upper()],
"transmission_mode" : _tm[tm.upper()],
"guard_interval" : _gi[gi.upper()],
"hierarchy_information" : _h[h.upper()],
}, ), \
{ "video_pid" : int(vpid),
"audio_pid" : int(apid),
"service_id" : int(sid),
}
except:
return None
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
returns "NOW" if immediate shutdown is required, or "WHENEVER" if the
component can shutdown when it has finished processing pending data.
"""
while self.dataReady("control"):
newMsg = self.recv("control")
if isinstance(newMsg, shutdownMicroprocess):
self.shutdownMsg = newMsg
elif self.shutdownMsg is None and isinstance(newMsg, producerFinished):
self.shutdownMsg = newMsg
if isinstance(self.shutdownMsg, shutdownMicroprocess):
return "NOW"
elif self.shutdownMsg is not None:
return "WHENEVER"
else:
return None
def safesend(self, data, boxname):
"""\
Generator.
Sends data out of the named outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space and retries
until it succeeds.
If a shutdownMicroprocess message is received, returns early.
"""
while 1:
try:
self.send(data, boxname)
return
except noSpaceInBox:
if self.checkShutdown() == "NOW":
return
self.pause()
yield 1
_inversion = {
"INVERSION_OFF" : dvb3.frontend.INVERSION_OFF,
"INVERSION_ON" : dvb3.frontend.INVERSION_ON,
"INVERSION_AUTO" : dvb3.frontend.INVERSION_AUTO,
}
_bandwidth = {
"BANDWIDTH_8_MHZ" : dvb3.frontend.BANDWIDTH_8_MHZ,
"BANDWIDTH_7_MHZ" : dvb3.frontend.BANDWIDTH_7_MHZ,
"BANDWIDTH_6_MHZ" : dvb3.frontend.BANDWIDTH_6_MHZ,
"BANDWIDTH_AUTO" : dvb3.frontend.BANDWIDTH_AUTO,
}
_fec = {
"FEC_NONE" : dvb3.frontend.FEC_NONE,
"FEC_1_2" : dvb3.frontend.FEC_1_2,
"FEC_2_3" : dvb3.frontend.FEC_2_3,
"FEC_3_4" : dvb3.frontend.FEC_3_4,
"FEC_4_5" : dvb3.frontend.FEC_4_5,
"FEC_5_6" : dvb3.frontend.FEC_5_6,
"FEC_6_7" : dvb3.frontend.FEC_6_7,
"FEC_7_8" : dvb3.frontend.FEC_7_8,
"FEC_8_9" : dvb3.frontend.FEC_8_9,
"FEC_AUTO" : dvb3.frontend.FEC_AUTO,
}
_qam = {
"QPSK" : dvb3.frontend.QPSK,
"QAM_16" : dvb3.frontend.QAM_16,
"QAM_32" : dvb3.frontend.QAM_32,
"QAM_64" : dvb3.frontend.QAM_64,
"QAM_128" : dvb3.frontend.QAM_128,
"QAM_256" : dvb3.frontend.QAM_256,
"QAM_AUTO" : dvb3.frontend.QAM_AUTO,
}
_tm = {
"TRANSMISSION_MODE_2K" : dvb3.frontend.TRANSMISSION_MODE_2K,
"TRANSMISSION_MODE_8K" : dvb3.frontend.TRANSMISSION_MODE_8K,
"TRANSMISSION_MODE_AUTO" : dvb3.frontend.TRANSMISSION_MODE_AUTO,
}
_gi = {
"GUARD_INTERVAL_1_32" : dvb3.frontend.GUARD_INTERVAL_1_32,
"GUARD_INTERVAL_1_16" : dvb3.frontend.GUARD_INTERVAL_1_16,
"GUARD_INTERVAL_1_8" : dvb3.frontend.GUARD_INTERVAL_1_8,
"GUARD_INTERVAL_1_4" : dvb3.frontend.GUARD_INTERVAL_1_4,
"GUARD_INTERVAL_AUTO" : dvb3.frontend.GUARD_INTERVAL_AUTO,
}
_h = {
"HIERARCHY_NONE" : dvb3.frontend.HIERARCHY_NONE,
"HIERARCHY_1" : dvb3.frontend.HIERARCHY_1,
"HIERARCHY_2" : dvb3.frontend.HIERARCHY_2,
"HIERARCHY_4" : dvb3.frontend.HIERARCHY_4,
"HIERARCHY_AUTO" : dvb3.frontend.HIERARCHY_AUTO,
}
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.Util.PureTransformer import PureTransformer
from Kamaelia.Util.TwoWaySplitter import TwoWaySplitter
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.Chassis.Carousel import Carousel
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Device.DVB.Tuner import Tuner
import sys
if len(sys.argv) != 4:
print "Usage:"
print
print " %s <channels.conf file> \"channel name\" <output ts filename>" % sys.argv[0]
print
sys.exit(1)
channelsConfFile = sys.argv[1]
channelName = sys.argv[2].upper().strip()
outFileName = sys.argv[3]
def chooseChannelName((name,params,ids)):
if name == channelName:
return (name,params,ids)
else:
return None
Pipeline(
RateControlledFileReader(channelsConfFile, readmode="lines", rate=1000, chunksize=1),
ParseChannelsConf(),
PureTransformer(chooseChannelName),
Graphline(
Router = TwoWaySplitter(),
DVBReceiver = Carousel(lambda (_,(freq,params),__) : Tuner(freq, params)),
PidReq = PureTransformer(lambda (n,(f, p),pids)
: ("ADD", [pids["audio_pid"],pids["video_pid"]])),
linkages = {
("", "inbox") : ("Router", "inbox"),
("Router", "outbox") : ("DVBReceiver", "next"),
("Router", "outbox2") : ("PidReq", "inbox"),
("PidReq", "outbox") : ("DVBReceiver", "inbox"),
("DVBReceiver", "outbox") : ("", "outbox"),
}
),
SimpleFileWriter(outFileName),
).run()
| {
"content_hash": "5a5b9a60cb7ce755326e9a7d086771d4",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 106,
"avg_line_length": 34.91740412979351,
"alnum_prop": 0.5894229956914758,
"repo_name": "bbc/kamaelia",
"id": "9ec8cac8acdd6d6631da69ffe8b856d00e8b6b77",
"size": "12740",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/MH/DVB_PSI/ParseChannelsConf.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
} |
"""Test the flow classes."""
import asyncio
from unittest.mock import patch
import pytest
import voluptuous as vol
from homeassistant import config_entries, data_entry_flow
from homeassistant.core import HomeAssistant
from homeassistant.util.decorator import Registry
from tests.common import async_capture_events
@pytest.fixture
def manager():
"""Return a flow manager."""
handlers = Registry()
entries = []
class FlowManager(data_entry_flow.FlowManager):
"""Test flow manager."""
async def async_create_flow(self, handler_key, *, context, data):
"""Test create flow."""
handler = handlers.get(handler_key)
if handler is None:
raise data_entry_flow.UnknownHandler
flow = handler()
flow.init_step = context.get("init_step", "init")
return flow
async def async_finish_flow(self, flow, result):
"""Test finish flow."""
if result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
result["source"] = flow.context.get("source")
entries.append(result)
return result
mgr = FlowManager(None)
mgr.mock_created_entries = entries
mgr.mock_reg_handler = handlers.register
return mgr
async def test_configure_reuses_handler_instance(manager):
"""Test that we reuse instances."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
handle_count = 0
async def async_step_init(self, user_input=None):
self.handle_count += 1
return self.async_show_form(
errors={"base": str(self.handle_count)}, step_id="init"
)
form = await manager.async_init("test")
assert form["errors"]["base"] == "1"
form = await manager.async_configure(form["flow_id"])
assert form["errors"]["base"] == "2"
assert manager.async_progress() == [
{
"flow_id": form["flow_id"],
"handler": "test",
"step_id": "init",
"context": {},
}
]
assert len(manager.mock_created_entries) == 0
async def test_configure_two_steps(manager):
"""Test that we reuse instances."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 1
async def async_step_first(self, user_input=None):
if user_input is not None:
self.init_data = user_input
return await self.async_step_second()
return self.async_show_form(step_id="first", data_schema=vol.Schema([str]))
async def async_step_second(self, user_input=None):
if user_input is not None:
return self.async_create_entry(
title="Test Entry", data=self.init_data + user_input
)
return self.async_show_form(step_id="second", data_schema=vol.Schema([str]))
form = await manager.async_init("test", context={"init_step": "first"})
with pytest.raises(vol.Invalid):
form = await manager.async_configure(form["flow_id"], "INCORRECT-DATA")
form = await manager.async_configure(form["flow_id"], ["INIT-DATA"])
form = await manager.async_configure(form["flow_id"], ["SECOND-DATA"])
assert form["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
result = manager.mock_created_entries[0]
assert result["handler"] == "test"
assert result["data"] == ["INIT-DATA", "SECOND-DATA"]
async def test_show_form(manager):
"""Test that we can show a form."""
schema = vol.Schema({vol.Required("username"): str, vol.Required("password"): str})
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
async def async_step_init(self, user_input=None):
return self.async_show_form(
step_id="init",
data_schema=schema,
errors={"username": "Should be unique."},
)
form = await manager.async_init("test")
assert form["type"] == data_entry_flow.RESULT_TYPE_FORM
assert form["data_schema"] is schema
assert form["errors"] == {"username": "Should be unique."}
async def test_abort_removes_instance(manager):
"""Test that abort removes the flow from progress."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
is_new = True
async def async_step_init(self, user_input=None):
old = self.is_new
self.is_new = False
return self.async_abort(reason=str(old))
form = await manager.async_init("test")
assert form["reason"] == "True"
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 0
form = await manager.async_init("test")
assert form["reason"] == "True"
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 0
async def test_create_saves_data(manager):
"""Test creating a config entry."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, user_input=None):
return self.async_create_entry(title="Test Title", data="Test Data")
await manager.async_init("test")
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
entry = manager.mock_created_entries[0]
assert entry["version"] == 5
assert entry["handler"] == "test"
assert entry["title"] == "Test Title"
assert entry["data"] == "Test Data"
assert entry["source"] is None
async def test_discovery_init_flow(manager):
"""Test a flow initialized by discovery."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, info):
return self.async_create_entry(title=info["id"], data=info)
data = {"id": "hello", "token": "secret"}
await manager.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data=data
)
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
entry = manager.mock_created_entries[0]
assert entry["version"] == 5
assert entry["handler"] == "test"
assert entry["title"] == "hello"
assert entry["data"] == data
assert entry["source"] == config_entries.SOURCE_DISCOVERY
async def test_finish_callback_change_result_type(hass):
"""Test finish callback can change result type."""
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 1
async def async_step_init(self, input):
"""Return init form with one input field 'count'."""
if input is not None:
return self.async_create_entry(title="init", data=input)
return self.async_show_form(
step_id="init", data_schema=vol.Schema({"count": int})
)
class FlowManager(data_entry_flow.FlowManager):
async def async_create_flow(self, handler_name, *, context, data):
"""Create a test flow."""
return TestFlow()
async def async_finish_flow(self, flow, result):
"""Redirect to init form if count <= 1."""
if result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
if result["data"] is None or result["data"].get("count", 0) <= 1:
return flow.async_show_form(
step_id="init", data_schema=vol.Schema({"count": int})
)
else:
result["result"] = result["data"]["count"]
return result
manager = FlowManager(hass)
result = await manager.async_init("test")
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await manager.async_configure(result["flow_id"], {"count": 0})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
assert "result" not in result
result = await manager.async_configure(result["flow_id"], {"count": 2})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"] == 2
async def test_external_step(hass, manager):
"""Test external step logic."""
manager.hass = hass
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
data = None
async def async_step_init(self, user_input=None):
if not user_input:
return self.async_external_step(
step_id="init", url="https://example.com"
)
self.data = user_input
return self.async_external_step_done(next_step_id="finish")
async def async_step_finish(self, user_input=None):
return self.async_create_entry(title=self.data["title"], data=self.data)
events = async_capture_events(
hass, data_entry_flow.EVENT_DATA_ENTRY_FLOW_PROGRESSED
)
result = await manager.async_init("test")
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert len(manager.async_progress()) == 1
assert len(manager.async_progress_by_handler("test")) == 1
assert manager.async_get(result["flow_id"])["handler"] == "test"
# Mimic external step
# Called by integrations: `hass.config_entries.flow.async_configure(…)`
result = await manager.async_configure(result["flow_id"], {"title": "Hello"})
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP_DONE
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data == {
"handler": "test",
"flow_id": result["flow_id"],
"refresh": True,
}
# Frontend refreshses the flow
result = await manager.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Hello"
async def test_show_progress(hass, manager):
"""Test show progress logic."""
manager.hass = hass
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
data = None
task_one_done = False
async def async_step_init(self, user_input=None):
if not user_input:
if not self.task_one_done:
self.task_one_done = True
progress_action = "task_one"
else:
progress_action = "task_two"
return self.async_show_progress(
step_id="init",
progress_action=progress_action,
)
self.data = user_input
return self.async_show_progress_done(next_step_id="finish")
async def async_step_finish(self, user_input=None):
return self.async_create_entry(title=self.data["title"], data=self.data)
events = async_capture_events(
hass, data_entry_flow.EVENT_DATA_ENTRY_FLOW_PROGRESSED
)
result = await manager.async_init("test")
assert result["type"] == data_entry_flow.RESULT_TYPE_SHOW_PROGRESS
assert result["progress_action"] == "task_one"
assert len(manager.async_progress()) == 1
assert len(manager.async_progress_by_handler("test")) == 1
assert manager.async_get(result["flow_id"])["handler"] == "test"
# Mimic task one done and moving to task two
# Called by integrations: `hass.config_entries.flow.async_configure(…)`
result = await manager.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_SHOW_PROGRESS
assert result["progress_action"] == "task_two"
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data == {
"handler": "test",
"flow_id": result["flow_id"],
"refresh": True,
}
# Mimic task two done and continuing step
# Called by integrations: `hass.config_entries.flow.async_configure(…)`
result = await manager.async_configure(result["flow_id"], {"title": "Hello"})
assert result["type"] == data_entry_flow.RESULT_TYPE_SHOW_PROGRESS_DONE
await hass.async_block_till_done()
assert len(events) == 2
assert events[1].data == {
"handler": "test",
"flow_id": result["flow_id"],
"refresh": True,
}
# Frontend refreshes the flow
result = await manager.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Hello"
async def test_abort_flow_exception(manager):
"""Test that the AbortFlow exception works."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
async def async_step_init(self, user_input=None):
raise data_entry_flow.AbortFlow("mock-reason", {"placeholder": "yo"})
form = await manager.async_init("test")
assert form["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert form["reason"] == "mock-reason"
assert form["description_placeholders"] == {"placeholder": "yo"}
async def test_initializing_flows_canceled_on_shutdown(hass, manager):
"""Test that initializing flows are canceled on shutdown."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
async def async_step_init(self, user_input=None):
await asyncio.sleep(1)
task = asyncio.create_task(manager.async_init("test"))
await hass.async_block_till_done()
await manager.async_shutdown()
with pytest.raises(asyncio.exceptions.CancelledError):
await task
async def test_init_unknown_flow(manager):
"""Test that UnknownFlow is raised when async_create_flow returns None."""
with pytest.raises(data_entry_flow.UnknownFlow), patch.object(
manager, "async_create_flow", return_value=None
):
await manager.async_init("test")
async def test_async_get_unknown_flow(manager):
"""Test that UnknownFlow is raised when async_get is called with a flow_id that does not exist."""
with pytest.raises(data_entry_flow.UnknownFlow):
await manager.async_get("does_not_exist")
async def test_async_has_matching_flow(
hass: HomeAssistant, manager: data_entry_flow.FlowManager
):
"""Test we can check for matching flows."""
manager.hass = hass
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, user_input=None):
return self.async_show_progress(
step_id="init",
progress_action="task_one",
)
result = await manager.async_init(
"test",
context={"source": config_entries.SOURCE_HOMEKIT},
data={"properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_SHOW_PROGRESS
assert result["progress_action"] == "task_one"
assert len(manager.async_progress()) == 1
assert len(manager.async_progress_by_handler("test")) == 1
assert manager.async_get(result["flow_id"])["handler"] == "test"
assert (
manager.async_has_matching_flow(
"test",
{"source": config_entries.SOURCE_HOMEKIT},
{"properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
is True
)
assert (
manager.async_has_matching_flow(
"test",
{"source": config_entries.SOURCE_SSDP},
{"properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
is False
)
assert (
manager.async_has_matching_flow(
"other",
{"source": config_entries.SOURCE_HOMEKIT},
{"properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
is False
)
async def test_move_to_unknown_step_raises_and_removes_from_in_progress(manager):
"""Test that moving to an unknown step raises and removes the flow from in progress."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 1
with pytest.raises(data_entry_flow.UnknownStep):
await manager.async_init("test", context={"init_step": "does_not_exist"})
assert manager.async_progress() == []
async def test_configure_raises_unknown_flow_if_not_in_progress(manager):
"""Test configure raises UnknownFlow if the flow is not in progress."""
with pytest.raises(data_entry_flow.UnknownFlow):
await manager.async_configure("wrong_flow_id")
async def test_abort_raises_unknown_flow_if_not_in_progress(manager):
"""Test abort raises UnknownFlow if the flow is not in progress."""
with pytest.raises(data_entry_flow.UnknownFlow):
await manager.async_abort("wrong_flow_id")
| {
"content_hash": "9b1194d024fe654a5f8e76fb432377e9",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 102,
"avg_line_length": 34.8200408997955,
"alnum_prop": 0.6182533623069243,
"repo_name": "jawilson/home-assistant",
"id": "b4b40b6b6c61839e205aa290aee0ffc3f750a3c7",
"size": "17033",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_data_entry_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from . import browser
from . import users
from . import messages
from . import rooms
from . import events
from . import client
from . import _utils
Browser = browser.Browser
Client = client.Client
__all__ = [
'browser', 'users', 'messages', 'rooms', 'events', 'client',
'Browser', 'Client']
| {
"content_hash": "07efa7c8a34e9931253fcdcce3b87478",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 64,
"avg_line_length": 17.88235294117647,
"alnum_prop": 0.6710526315789473,
"repo_name": "ByteCommander/ChatExchange6",
"id": "743a9aeb6b2b36d89fa6b93258003c659ea34e5c",
"size": "304",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "chatexchange/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "Python",
"bytes": "82364"
},
{
"name": "Shell",
"bytes": "911"
}
],
"symlink_target": ""
} |
import re
from marshmallow import Schema, fields, ValidationError
def must_not_be_blank(data):
if not data:
raise ValidationError('Data not provided.')
class KahaResourceSchema(Schema):
active = fields.Boolean(attribute='is_active')
contact_number = fields.Method('format_contactnumber')
contact_name = fields.Str(attribute='contactname')
created = fields.DateTime()
updated = fields.DateTime()
types = fields.Nested('KahaResourceTypeSchema', many=True, only='resource_type')
props = fields.Nested('KahaResourcePropSchema', many=True, exclude=('resource',))
def format_contactnumber(self, resource):
try:
if resource.contactnumber:
return [int(d) for d in re.split('\s|,', resource.contactnumber)]
return []
except ValueError:
return None
class Meta:
fields = ('resource_id',
'uuid',
'title',
'district',
'district_code',
'tole',
'vdc_code',
'description',
'contact_name',
'contact_number',
'updated',
'created',
'types',
'props'
)
class KahaResourceTypeSchema(Schema):
resource = fields.Nested(KahaResourceSchema)
class Meta:
fields = (
'resource_type',
'resource',
)
class KahaResourcePropSchema(Schema):
resource = fields.Nested(KahaResourceSchema)
class Meta:
fields = (
'key',
'value',
'resource',
)
| {
"content_hash": "c33230d6a6c4b2064d797518c44758c8",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 85,
"avg_line_length": 29.413793103448278,
"alnum_prop": 0.5310668229777257,
"repo_name": "kahaco/kahadata",
"id": "cb75c72621b3212793aa82d25c14124ef60a6341",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaha/schemas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "39501"
},
{
"name": "Shell",
"bytes": "526"
}
],
"symlink_target": ""
} |
import os
import sys
import arcpy
import _winreg
from datetime import datetime
from xml.dom import minidom
scriptPath = os.path.dirname(__file__)
try:
import MDCS_UC
except Exception as inf:
print 'User-Code functions disabled.'
class Base(object):
#begin - constansts
const_general_text = 0
const_warning_text = 1
const_critical_text = 2
const_status_text = 3
const_statistics_path_ = os.path.join(scriptPath, '..\\..\\parameter\\Statistics')
const_raster_function_templates_path_ = os.path.join(scriptPath, '..\\..\\parameter\\RasterFunctionTemplates')
const_raster_type_path_ = os.path.join(scriptPath, '..\\..\\parameter\\Rastertype')
const_workspace_path_ = os.path.join(scriptPath, '..\\..\\') #.gdb output
const_import_geometry_features_path_ = os.path.join(scriptPath, '..\\..\\parameter')
const_cmd_default_text = "#defaults"
const_geodatabase_ext = '.GDB'
const_geodatabase_SDE_ext = '.SDE'
# base init codes. (const_strings)
const_init_ret_version = 'version'
const_init_ret_sde = 'sde'
const_init_ret_patch = 'patch'
# ends
#version specific
const_ver_len = 4
CMAJOR = 0
CMINOR = 1
CSP = 2
CBUILD = 3
CVERSION_ATTRIB = 'version'
# ends
# externally user defined functions specific
CCLASS_NAME = 'UserCode'
CMODULE_NAME = 'MDCS_UC'
# ends
#ends
def __init__(self):
self.m_log = None
self.m_doc = None
#the follwoing variables could be overridden by the command-line to replace respective values in XML config file.
self.m_workspace = ''
self.m_geodatabase = ''
self.m_mdName = '' #mosaic dataset name.
#ends
self.m_sources = ''
self.m_gdbName = ''
self.m_geoPath = ''
self.m_config = ''
self.m_commands = ''
self.m_sources = '' #source data paths for adding new rasters.
self.m_dynamic_params = {}
# art file update specific variables
self.m_art_apply_changes = ''
self.m_art_ws = ''
self.m_art_ds = ''
# ends
# To keep track of the last objectID before any new data items could be added.
self.m_last_AT_ObjectID = 0 #by default, take in all the previous records for any operation.
# SDE specific variables
self.m_IsSDE = False
self.m_SDE_database_user = ''
# ends
def init(self): #return (status [true|false], reason)
if (self.m_doc == None):
return False
#version check.
try:
min = self.getXMLXPathValue("Application/ArcGISVersion/Product/Min", "Min").split('.')
max = self.getXMLXPathValue("Application/ArcGISVersion/Product/Max", "Max").split('.')
if (len(min) == self.const_ver_len): #version check is disabled if no values have been defined in the MDCS for min and max.
CMAJOR = 0
CBUILD = self.const_ver_len
if (len(max) != self.const_ver_len):
max = [0, 0, 0, 0] # zero up max if max version isn't defined / has errors.
for n in range(CMAJOR, CBUILD):
if (min[n] == ''):
min[n] = 0
if (max[n] == ''):
max[n] = 0
min[n] = int(min[n])
max[n] = int(max[n])
if (self.CheckMDCSVersion(min, max) == False):
return (False, self.const_init_ret_version) #version check failed.
except Exception as inst:
self.log('Version check failure/' + str(inst), self.const_critical_text)
return False
#ends
# ArcGIS patch test.
if (self.isArcGISPatched() == False):
self.log('An ArcGIS patch required to run MDCS is not yet installed. Unable to proceed.', self.const_critical_text)
return (False, self.const_init_ret_patch)
# ends
self.setUserDefinedValues() #replace user defined dynamic variables in config file with values provided at the command-line.
if (self.m_workspace == ''):
self.m_workspace = self.prefixFolderPath(self.getAbsPath(self.getXMLNodeValue(self.m_doc, "WorkspacePath")), self.const_workspace_path_)
if (self.m_geodatabase == ''):
self.m_geodatabase = self.getXMLNodeValue(self.m_doc, "Geodatabase")
if (self.m_mdName == ''):
self.m_mdName = self.getXMLXPathValue("Application/Workspace/MosaicDataset/Name", "Name")
const_len_ext = len(self.const_geodatabase_ext)
ext = self.m_geodatabase[-const_len_ext:].upper()
if (ext != self.const_geodatabase_ext and
ext != self.const_geodatabase_SDE_ext):
self.m_geodatabase += self.const_geodatabase_ext.lower() #if no extension specified, defaults to '.gdb'
self.m_gdbName = self.m_geodatabase[:len(self.m_geodatabase) - const_len_ext] #.gdb
self.m_geoPath = os.path.join(self.m_workspace, self.m_geodatabase)
self.m_commands = self.getXMLNodeValue(self.m_doc, "Command")
if (ext == self.const_geodatabase_SDE_ext):
self.m_IsSDE = True
try:
self.log('Reading SDE connection properties from (%s)' % (self.m_geoPath))
conProperties = arcpy.Describe(self.m_geoPath).connectionProperties
self.m_SDE_database_user = ('%s.%s.') % (conProperties.database, conProperties.user)
except Exception as inst:
self.log(str(inst), self.const_critical_text)
return (False, self.const_init_ret_sde)
return (True, 'OK')
def getXMLXPathValue(self, xPath, key):
nodes = self.m_doc.getElementsByTagName(key)
for node in nodes:
parents = []
c = node
while(c.parentNode != None):
parents.insert(0, c.nodeName)
c = c.parentNode
p = '/'.join(parents)
if (p == xPath):
if (node.hasChildNodes() == False):
return ''
return str(node.firstChild.data).strip()
return ''
def setLog(self, log):
self.m_log = log
return True
def isLog(self):
return (not self.m_log == None)
def log(self, msg, level = const_general_text):
if (self.m_log != None):
return self.m_log.Message(msg, level)
errorTypeText = 'msg'
if (level > self.const_general_text):
errorTypeText = 'warning'
elif(level == self.const_critical_text):
errorTypeText = 'critical'
print 'log-' + errorTypeText + ': ' + msg
return True
# user defined functions implementation code
def isUser_Function(self, name):
try:
frame = sys._getframe(0).f_globals
module = frame[self.CMODULE_NAME]
cls = getattr(module, self.CCLASS_NAME)
instance = cls()
fnc = getattr(instance, name)
except:
return False
return True
def invoke_user_function(self, name, data): # MDCS is always passed on which is the MD Configuration Script XML DOM
ret = False
try:
frame = sys._getframe(0).f_globals # default to first stack.
module = frame[self.CMODULE_NAME]
cls = getattr(module, self.CCLASS_NAME)
instance = cls()
fnc = getattr(instance, name)
try:
ret = fnc(data)
except Exception as inf:
self.log('Error: executing user defined function (%s)' % (name), self.const_critical_text)
self.log(str(inf), self.const_critical_text)
return False
except Exception as inf:
self.log('Error: please check if user function (%s) is found in class (%s) of MDCS_UC module.' % (name, CCLASS_NAME), self.const_critical_text)
self.log(str(inf), self.const_critical_text)
return False
return ret
#ends
def processEnv(self, node, pos, json): #support fnc for 'SE' command.
while(node.nextSibling != None):
if(node.nodeType != minidom.Node.TEXT_NODE):
k = str(pos)
if (json.has_key(k) == False):
json[k] = {'key' : [], 'val' : [], 'type' : [] }
json[k]['key'].append(node.nodeName)
v = ''
if (node.firstChild != None):
v = node.firstChild.nodeValue.strip()
json[k]['val'].append(v)
json[k]['parent'] = node.parentNode.nodeName
json[k]['type'].append('c')
if (node.firstChild != None):
if (node.firstChild.nextSibling != None):
pos = len(json)
json[k]['type'][len(json[k]['type']) - 1] = 'p'
self.processEnv(node.firstChild.nextSibling, pos, json)
pos = 0 # defaults to root always, assuming only 1 level deep xml.
node = node.nextSibling
return True
def getAbsPath(self, input):
absPath = input
if (os.path.exists(absPath) == True):
absPath = os.path.abspath(input)
return absPath
def prefixFolderPath(self, input, prefix):
_file = input.strip()
_p, _f = os.path.split(_file)
_indx = _p.lower().find('.gdb')
if (_p == '' or _indx >= 0):
if (_indx >= 0):
_f = _p + '\\' + _f
_file = os.path.join(prefix, _f)
return _file
def isArcGISPatched(self): # return values [true | false]
# if the patch XML node is not properly formatted in structure/with values, MDCS returns an error and will abort the operation.
patch_node = self.getXMLNode(self.m_doc, "Patch")
if (patch_node ==''):
return True
if (patch_node.attributes.length == 0):
return False
if (patch_node.attributes.has_key(self.CVERSION_ATTRIB) == False):
return False
target_ver = patch_node.attributes.getNamedItem(self.CVERSION_ATTRIB).nodeValue.strip()
if (len(target_ver) == 0):
return False
search_key = ''
patch_desc_node = patch_node.firstChild.nextSibling
while (patch_desc_node != None):
node_name = patch_desc_node.nodeName
if (node_name == 'Name'):
if (patch_desc_node.hasChildNodes() == True):
search_key = patch_desc_node.firstChild.nodeValue
break
patch_desc_node = patch_desc_node.nextSibling.nextSibling
if (len(search_key) == 0): # if no patch description could be found, return False
return False
ver = (target_ver + '.0.0.0.0').split('.')
for n in range(self.CMAJOR, self.CBUILD + 1):
if (ver[n] == ''):
ver[n] = 0
ver[n] = int(ver[n])
ver = ver[:4] # accept only the first 4 digits.
target_v_str = installed_v_str = ''
for i in range (self.CMAJOR, self.CBUILD + 1):
target_v_str += "%04d" % ver[i]
installed_ver = self.getDesktopVersion()
for i in range (self.CMAJOR, self.CBUILD + 1):
installed_v_str += "%04d" % installed_ver[i]
tVersion = int(target_v_str)
iVersion = int(installed_v_str)
if (iVersion > tVersion): # the first priority is to check for the patch version against the installed version
return True # if the installed ArcGIS version is greater than the patch's, it's OK to proceed.
# if the installed ArcGIS version is lower than the intended target patch version, continue with the registry key check for the
# possible patches installed.
#HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\ESRI\Desktop10.2\Updates
CPRODUCT_NAME = 'ProductName'
CVERSION = 'Version'
setupInfo = arcpy.GetInstallInfo()
if (CVERSION in setupInfo.keys() == False or
CPRODUCT_NAME in setupInfo.keys() == False):
return False
key = setupInfo[CPRODUCT_NAME] + setupInfo[CVERSION]
try:
reg_path = "Software\\Wow6432Node\\ESRI\\%s\\Updates" % (key)
arcgis = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE, reg_path)
i = 0
while 1:
name = _winreg.EnumKey(arcgis, i)
arcgis_sub = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE, reg_path + '\\' + name)
try:
value, type = _winreg.QueryValueEx(arcgis_sub, "Name")
if (type == 1): # reg_sz
if (value.lower().find(search_key.lower()) >= 0):
return True # return true if the value is found!
except:
pass
i += 1
except:
pass
return False
def getDesktopVersion(self): #returns major, minor, sp and the build number.
d = arcpy.GetInstallInfo()
version = []
buildNumber = 0
spNumber = 0
CVERSION = 'version'
CBUILDNUMBER = 'buildnumber'
CSPNUMBER = 'spnumber'
ValError = False
for k in d:
key = k.lower()
if (key == CVERSION or
key == CBUILDNUMBER or
key == CSPNUMBER):
try:
if (key == CVERSION):
[version.append(int(x)) for x in d[k].split(".")]
elif (key == CBUILDNUMBER):
buildNumber = int(d[k])
elif (key == CSPNUMBER):
spNumber = int(d[k]) # could be N/A
except:
ValError = True
CMAJOR_MINOR_REVISION = 3
if (len(version) < CMAJOR_MINOR_REVISION): # On a system with full-install, ArcGIS version piece of information could return 3 numbers (major, minor, revision/SP)
version.append(spNumber) # and thus the SP number shouldn't be added to the version sperately.
version.append(buildNumber)
return version
def CheckMDCSVersion(self, min, max, print_err_msg = True):
if (len(min) != self.const_ver_len or
len(max) != self.const_ver_len):
return False
CMAJOR = 0
CMINOR = 1
CSP = 2
CBUILD = 3
min_major = min[CMAJOR]
min_minor = min[CMINOR]
min_sp = min[CSP]
min_build = min[CBUILD]
max_major = max[CMAJOR]
max_minor = max[CMINOR]
max_cp = max[CSP]
max_build = max[CBUILD]
try:
version = self.getDesktopVersion()
if (len(version) >= self.const_ver_len): # major, minor, sp, build
inst_major = version[CMAJOR]
inst_minor = version[CMINOR]
inst_sp = version[CSP]
inst_build = version[CBUILD]
ver_failed = False
if (max_major > 0 and
inst_major > max_major):
ver_failed = True
elif (max_minor > 0 and
inst_minor > max_minor):
ver_failed = True
elif (max_cp > 0 and
inst_sp > max_cp):
ver_failed = True
elif (max_build > 0 and
inst_build > max_build):
ver_failed = True
elif (inst_major < min_major):
ver_failed = True
elif (inst_minor < min_minor):
ver_failed = True
elif (inst_sp < min_sp):
ver_failed = True
elif (min_build > 0 and
inst_build < min_build):
ver_failed = True
if (ver_failed):
if (print_err_msg == True):
self.log('MDCS can\'t proceed due to ArcGIS version incompatiblity.', self.const_critical_text)
self.log('ArcGIS Desktop version is (%s.%s.%s.%s). MDCS min and max versions are (%s.%s.%s.%s) and (%s.%s.%s.%s) respectively.' % \
(inst_major, inst_minor, inst_sp, inst_build, min_major, min_minor, min_sp, min_build, max_major, max_minor, max_cp, max_build), self.const_critical_text)
return False
except Exception as inst:
self.log('Version check failed: (%s)' % (str(inst)), self.const_critical_text)
return False
return True
def getXMLNodeValue(self, doc, nodeName) :
if (doc == None):
return ''
node = doc.getElementsByTagName(nodeName)
if (node == None or
node.length == 0 or
node[0].hasChildNodes() == False or
node[0].firstChild.nodeType != minidom.Node.TEXT_NODE):
return ''
return node[0].firstChild.data
def updateART(self, doc, workspace, dataset):
if (doc == None):
return False
if (workspace.strip() == ''
and dataset.strip() == ''):
return False # nothing to do.
try:
nodeName = 'NameString'
node_list = doc.getElementsByTagName(nodeName)
for node in node_list:
if (node.hasChildNodes() == True):
vals = node.firstChild.nodeValue.split(';')
upd_buff = []
for v in vals:
vs = v.split('=')
for vs_ in vs:
vs_ = vs_.lower()
if (vs_.find('workspace') > 0):
if (workspace != ''):
vs[1] = ' ' + workspace
if (node.nextSibling != None):
if (node.nextSibling.nodeName == 'PathName'):
node.nextSibling.firstChild.nodeValue = workspace
elif (vs_.find('rasterdataset') > 0):
if (dataset != ''):
vs[1] = ' ' + dataset
if (node.previousSibling != None):
if (node.previousSibling.nodeName == 'Name'):
node.previousSibling.firstChild.nodeValue = dataset
upd_buff.append('='.join(vs))
if (len(upd_buff) > 0):
upd_nodeValue = ';'.join(upd_buff)
node.firstChild.nodeValue = upd_nodeValue
nodeName = 'ConnectionProperties'
node_list = doc.getElementsByTagName(nodeName)
found = False
for node in node_list: # only one node should exist.
for n in node.firstChild.childNodes:
if (n.firstChild != None):
if (n.firstChild.firstChild != None):
if (n.firstChild.nodeName.lower() == 'key'):
if (n.firstChild.firstChild.nodeValue.lower() == 'database'):
n.firstChild.nextSibling.firstChild.nodeValue = workspace
found = True
break;
if (found == True):
break
except Exception as inst:
self.log(str(inst), self.const_critical_text)
return False
return True
def getInternalPropValue(self, dic, key):
if (dic.has_key(key)):
return dic[key]
else:
return ''
def setUserDefinedValues(self):
nodes = self.m_doc.getElementsByTagName('*')
for node in nodes:
if (node.firstChild != None):
v = node.firstChild.data.strip()
if (v.find('$') == -1):
continue
usr_key = v
default = ''
d = v.split(';')
if (len(d) > 1):
default = d[0].strip()
usr_key = d[1].strip()
revalue = []
first = usr_key.find('$')
first += 1
second = first + usr_key[first+1:].find('$') + 1
if (first > 1):
revalue.append(usr_key[0:first - 1])
while(second >= 0):
uValue = usr_key[first:second]
if (self.m_dynamic_params.has_key(uValue.upper())):
revalue.append(self.m_dynamic_params[uValue.upper()])
else:
if (uValue.find('\$') >= 0):
uValue = uValue.replace('\$', '$')
else:
if (default == ''):
default = uValue
if (first == 1
and second == (len(usr_key) - 1)):
uValue = default
revalue.append(uValue)
first = second + 1
indx = usr_key[first+1:].find('$')
if (indx == -1):
if (first != len(usr_key)):
revalue.append(usr_key[first:len(usr_key)])
break
second = first + indx + 1
updateVal = ''.join(revalue)
node.firstChild.data = updateVal
def getXMLNode(self, doc, nodeName) :
if (doc == None):
return ''
node = doc.getElementsByTagName(nodeName)
if (node == None or
node.length == 0 or
node[0].hasChildNodes() == False or
node[0].firstChild.nodeType != minidom.Node.TEXT_NODE):
return ''
return node[0]
def foundLockFiles(self, folder_path):
file_list_ = os.listdir(folder_path)
found_lock_ = False
for i in file_list_:
if (i[-5:].lower() == '.lock'):
sp = i.split('.')
pid = os.getpid()
if (pid == int(sp[3])): #indx 3 == process id
found_lock_ = True
break;
return found_lock_
def waitForLockRelease(self, folder_path_):
if (os.path.exists(folder_path_) == False):
self.log('lock file path does not exist!. Quitting...', self.const_critical_text)
return -2 #path does not exist error code!
t0 = datetime.now()
duration_req_sec_ = 3
max_time_to_wait_sec_ = 10
tot_count_sec_ = 0
while True:
if (tot_count_sec_ == 0):
if (self.foundLockFiles(folder_path_) == False): #try to see if we could get lucky on the first try, else enter periodic check.
break;
t1 = datetime.now() - t0
if (t1.seconds > duration_req_sec_):
if (self.foundLockFiles(folder_path_) == False):
break;
tot_count_sec_ += duration_req_sec_
if (tot_count_sec_ > max_time_to_wait_sec_):
self.log('lock file release timed out!. Quitting...', self.const_critical_text)
tot_count_sec_ = -1
break;
t0 = datetime.now()
return tot_count_sec_
| {
"content_hash": "a4610bf6df8b816124b6dea1d0abf85f",
"timestamp": "",
"source": "github",
"line_count": 715,
"max_line_length": 178,
"avg_line_length": 33.71608391608392,
"alnum_prop": 0.4985688804081802,
"repo_name": "rkelson/weighted-raster-overlay-service-toolbox",
"id": "c968a2cfb328b6680e96cddd7e9c4ee54377942f",
"size": "25111",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "WROS/scripts/Base/Base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "196"
},
{
"name": "Python",
"bytes": "202404"
}
],
"symlink_target": ""
} |
"""
Tests of errors that are common across a wide group of pywbemcli groups and
commands.
This includes:
1. Tests of connection timeout on bad server with all commands. Note that the
command options in these tests are just sufficient to pass command parsing
since all tests should fail with connection error.
2. Test of the --namespace option with namespace that is not in the target
wbem server.
"""
from __future__ import absolute_import, print_function
import os
import pytest
from .cli_test_extensions import CLITestsBase
SCRIPT_DIR = os.path.dirname(__file__)
SIMPLE_MOCK_FILE = 'simple_mock_model.mof'
SIMPLE_MOCK_FILE_PATH = os.path.join(SCRIPT_DIR, SIMPLE_MOCK_FILE)
OK = True
RUN = True
FAIL = False
SKIP = False
TEST_CASES_CONNECTION_FAIL = [
# desc - Description of test
# group - String, defining test group to be executed.
# cmd - string/required arguments defining the command string
# condition - If True, the test is executed, Otherwise it is skipped.
['class', 'enumerate', OK],
['class', 'get CIM_BLAH', OK],
['class', 'delete CIM_BLAH', OK],
['class', 'associators CIM_BLAH', OK],
['class', 'references CIM_BLAH', OK],
['class', 'invokemethod CIM_BLAH methodx', OK],
['class', 'find CIM_*', OK],
['instance', 'enumerate CIM_Blah', OK],
['instance', 'get CIM_BLAH.x=3', OK],
['instance', 'create CIM_blah -p x=3', OK],
['instance', 'modify CIM_blah.x=3 -p x=4', OK],
['instance', 'delete CIM_BLAH.x=3', OK],
['instance', 'associators CIM_BLAH.x=3', OK],
['instance', 'references CIM_BLAH.x=4', OK],
['instance', 'invokemethod CIM_BLAH.x=4 methodx', OK],
['instance', 'query select', FAIL],
['instance', 'count CIM_*', OK],
['connection', 'test', OK],
# The other connection commands do not connect to a server
['qualifier', 'get qualblah', OK],
['qualifier', 'enumerate', OK],
['server', 'brand', OK],
['server', 'info', OK],
['profile', 'list', OK],
['profile', 'centralinsts', OK],
]
class TestConnectionFail(CLITestsBase):
"""
Test of the return for a connection error.
"""
@pytest.mark.parametrize(
"grp, cmd, condition", TEST_CASES_CONNECTION_FAIL)
def test_execute_pywbemcli(self, grp, cmd, condition):
"""
Execute pybemcli with the defined input and test output.
This tests builds the inputs dictionary nad exp_response dictionary
from the cmd line inputs.
"""
desc = "Verify {} args {} fails with connection error".format(grp, cmd)
# Build inputs dictionary for the test with bad svr name and cmd/args
inputs = {'general': ['--server', 'http://blahblah', '--timeout', '1'],
'args': cmd.split(' ')}
# Build expected response dictionary that tests for ConnectionError
exp_response = {'stderr': ['ConnectionError'],
'rc': 1,
'test': 'innows'}
mock = None
self.command_test(desc, grp, inputs, exp_response,
mock, condition)
TEST_CASES_NAMESPACE_ERR = [
# desc - Description of test
# group - String, defining test group to be executed.
# cmd - string/required arguments defining the command string
# condition - If True, the test is executed, Otherwise it is skipped.
['class', 'enumerate --namespace blah', OK],
['class', 'get CIM_Foo --namespace blah', OK],
['class', 'delete CIM_Foo --namespace blah', OK],
['class', 'associators CIM_Foo --namespace blah', OK],
['class', 'references CIM_Foo --namespace blah', OK],
['class', 'invokemethod CIM_Foo methodx --namespace blah', OK],
['class', 'find CIM_* --namespace blah', OK],
['instance', 'enumerate CIM_Foo --namespace blah', OK],
['instance', 'get CIM_Foo.x=3 --namespace blah', OK],
['instance', 'create CIM_Foo -p x=3 --namespace blah', OK],
['instance', 'modify CIM_Foo.x=3 -p x=4 --namespace blah', OK],
['instance', 'delete CIM_Foo.x=3 --namespace blah', OK],
['instance', 'associators CIM_Foo.x=3 --namespace blah', OK],
['instance', 'references CIM_Foo.x=4 --namespace blah', OK],
['instance', 'invokemethod CIM_Foo.x=4 methodx --namespace blah', OK],
# pywbem issue # 2313 - Fails with QueryLanguage, not namespace error
# ['instance', 'query select --namespace blah', OK],
['instance', 'count CIM_* --namespace blah', OK],
['qualifier', 'get qualblah --namespace blah', OK],
['qualifier', 'enumerate --namespace blah', OK],
]
class TestNamespaceError(CLITestsBase):
"""
Test of the return for a connection error.
"""
@pytest.mark.parametrize(
"grp, cmd, condition", TEST_CASES_NAMESPACE_ERR)
def test_execute_pywbemcli(self, grp, cmd, condition):
"""
Execute pybemcli with the defined input and test output.
This tests builds the inputs dictionary nad exp_response dictionary
from the cmd line inputs.
"""
desc = "Verify {} args {} fails with namespace error".format(grp, cmd)
# Build inputs dictionary for the test with bad svr name and cmd/args
inputs = {'args': cmd.split(' ')}
# Build expected response dictionary that tests for ConnectionError
exp_response = {'stderr': ['CIMError', 'CIM_ERR_INVALID_NAMESPACE'],
'rc': 1,
'test': 'innows'}
mock = SIMPLE_MOCK_FILE_PATH
self.command_test(desc, grp, inputs, exp_response,
mock, condition)
| {
"content_hash": "93dab4c71f35bc0899c6fc8db86abaae",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 79,
"avg_line_length": 36.07096774193548,
"alnum_prop": 0.6206403147916294,
"repo_name": "pywbem/pywbemtools",
"id": "b67fb9451963e9797a9c20e713a68c2dd8cd17eb",
"size": "5592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/pywbemcli/test_misc_errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "595"
},
{
"name": "Makefile",
"bytes": "32698"
},
{
"name": "Python",
"bytes": "1960612"
},
{
"name": "Shell",
"bytes": "18849"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name = 'django-redis-sessions',
version = '0.1',
author = 'David Paccoud',
author_email = 'dpaccoud@gmail.com',
description = 'Redis Sessions Backend for Django',
license = 'BSD',
url = 'http://bitbucket.org/dpaccoud/django-redis-sessions/',
keywords=['django', 'redis', 'sessions'],
packages = find_packages(),
zip_safe=False,
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
| {
"content_hash": "f17c3e7318d725782f0667e3c58fee98",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 34.370370370370374,
"alnum_prop": 0.6120689655172413,
"repo_name": "dpaccoud/django-redis-sessions",
"id": "dd75d616e24fb9f8ea978f51e77c1dbc5c867ee9",
"size": "928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3836"
}
],
"symlink_target": ""
} |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.resize_volume, 'vm1', 5*1024*1024],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.clone_vm, 'vm1', 'vm2'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot9'],
[TestAction.batch_delete_volume_snapshot, ['vm1-snapshot5','volume2-snapshot1',]],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'volume2-snapshot5'],
[TestAction.start_vm, 'vm1'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot9'],
[TestAction.create_vm_snapshot, 'vm2', 'vm2-snapshot13'],
[TestAction.delete_vm_snapshot, 'vm2-snapshot13'],
])
'''
The final status:
Running:['vm2', 'vm1']
Stopped:[]
Enadbled:['vm1-snapshot1', 'volume1-snapshot1', 'volume3-snapshot1', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'volume1-backup1']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:['vm1-snapshot5', 'volume2-snapshot1', 'vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9', 'vm2-snapshot13']
Expunged:[]
Ha:[]
Group:
'''
| {
"content_hash": "fcc7be53adadfea3809a244db515c111",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 150,
"avg_line_length": 39.714285714285715,
"alnum_prop": 0.7014388489208633,
"repo_name": "zstackio/zstack-woodpecker",
"id": "43c7fdd5a15e6f1a0cc3a74523157f22ca8f1980",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/multihosts/vm_snapshots/paths/ceph_path52.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
import traceback
from dateutil.parser import parse as dateparse
import string
import datetime
import time
import praw
import sqlite3
import re
""" USER CONFIG """
USERAGENT = ""
#Describe the bot and what it does. Include your username
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
SUBREDDIT = "Goldtesting"
#This is the subreddit where the bot finds the schedules
#It should be private with only the team of moderators
TITLESEPARATOR = "||"
#This is what demarcates the timestamp from the sub from the title
#This should not be a naturally occuring part of any title
#Example: "15 December 2014 ||| GoldTesting ||| Welcome to the subreddit"
# ^Time to post ^Sub to post ^Title of post
IGNORE_FLAG = "#"
#If this character is THE FIRST CHARACTER IN THE TITLE,
#The bot will ignore that post. Used for meta / discussion.
SCHEDULEDFLAIR_TEXT = "Scheduled!"
SCHEDULEDFLAIR_CSS = "scheduled"
#This flair will be assigned to the source when the source is scheduled
POSTEDFLAIR_TEXT = "Post made!"
POSTEDFLAIR_CSS = "posted"
#This flair will be assigned to the source when the post is made
MAXPOSTS = 3
#The number of items you want to get from /new. Recommended 100
ALLOWOTHEREDITS = False
#Are users allowed to edit other peoples' post schedules?
WAIT = 30
#How many seconds in between loop cycles. Completely inactive during this time.
ADMINS = ["ApexRedditr", "GoldenSights"]
#These are the people who will get tracebacks when the bot has problems.
TRACEBACK_SUBJECT = "SchedulizerM Error traceback"
POSTEDCOMMENT = "Your post to /r/%s has been created. %s"
#Made in the source when the post is made
FOOTER = """
_____
If any information is incorrect, reply to this comment with the incorrect key,
a colon, and new value. See the
[Bot code](https://github.com/voussoir/reddit/tree/master/Schedulizer-ModTeam)
page for examples. Only make 1 edit per line.
A foolproof time format is
"DD Monthname YYYY HH:MM". All times are in UTC
([Timezone map](http://www.timeanddate.com/time/map/))
Deleting your post will cause it to be removed from the schedule.
If you think the bot is down, send it
[this message](http://www.reddit.com/message/compose?to=??????&subject=Ping&message=Ping).
""" # Don't forget to put your username in this message
SCHEDULECOMMENT = """
Your post has been scheduled. Please check that this information is correct:
"""
#Made in the source when the source is made
ERRORCOMMENT = """
Encountered the following errors:
%s
The post will use placeholder values until you correct the information
_______
"""
ERRORDISTINGUISHFAIL = "Attempted to distinguish post and failed."
ERRORSTICKYFAIL = "Attempted to sticky post and failed."
ERRORDATETIME = '!! DateTime: Could not understand time format, or date is invalid. You entered: `%s`'
ERRORTOOEARLY = '!! DateTime: The time you have entered is before present time. You entered `%s`'
ERRORTITLEFORM = '!! Title: Title expected 3 attributes separated by `' + TITLESEPARATOR + '`'
ERRORLONGTITLE = "!! Title: Your title is too long. Max 300 characters, you have %d"
ERRORSUBREDDIT = '!! Reddit: Subbreddit /r/%s could not be found'
ERRORNOTALLOWED = "!! Reddit: Bot is not allowed to submit to /r/%s."
ERRORUNKNOWNCOMMAND = "Did not understand the command: `%s`"
ERRORCRITICAL = '\n\nBecause of a critical post error, your chosen timestamp has been forfeited. You will need to edit it along with the other keys.\n\n'
IMPOSSIBLETIME = 2147483646
""" All done! """
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
print('Loading database')
sql = sqlite3.connect('sql.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS schedules(ID TEXT, TIME INT, REDDIT TEXT, TITLE TEXT, DIST INT, STICKY INT, FLAIR TEXT, FLCSS TEXT, POST TEXT)')
# 0 1 2 3 4 5 6 7 *
cur.execute('CREATE INDEX IF NOT EXISTS schedule_idindex ON schedules(id)')
cur.execute('CREATE INDEX IF NOT EXISTS schedule_postindex ON schedules(post)')
sql.commit()
print('Logging in')
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool is False:
return timeNow
else:
return timeUnix
def processpost(inputpost):
if isinstance(inputpost, str):
if 't3_' not in inputpost:
inputpost = 't3_' + inputpost
inputpost = r.get_info(thing_id=inputpost)
sourceid = inputpost.id
print('Schedulizing post ' + sourceid)
nowstamp = getTime(True)
sourcetitle = inputpost.title
sourcesplit = sourcetitle.split(TITLESEPARATOR)
errors = []
critical = False
dosticky = 0
dodist = 0
try:
posttime = "?"
postsub = "?"
posttitle = "?"
postflair = ""
postflcss = ""
posttime = sourcesplit[0]
postsub = sourcesplit[1]
postsub = postsub.replace('/r/', '')
if '[d]' in postsub.lower():
dodist = 1
if '[s]' in postsub.lower():
dosticky = 1
regex = re.search("\[f:[^\]]*\]", postsub, re.IGNORECASE)
if regex:
postflair = regex.group(0)
postflair = postflair[3:-1]
regex = re.search("\[fc:[^\]]*\]", postsub, re.IGNORECASE)
if regex:
postflcss = regex.group(0)
postflcss = postflcss[4:-1]
elif postflair != "":
postflcss = removespecial(postflair)
postsubsplit = postsub.split(' ')
while '' in postsubsplit:
postsubsplit.remove('')
postsub = postsubsplit[0]
posttitle = '||'.join(sourcesplit[2:])
except IndexError:
errors.append(ERRORTITLEFORM)
critical = True
try:
posttimerender = dateparse(posttime)
posttimerender = posttimerender.replace(tzinfo=datetime.timezone.utc)
posttimestamp = posttimerender.timestamp()
if posttimestamp < nowstamp:
errors.append(ERRORTOOEARLY % posttime)
critical = True
except:
#December 31, 2500
posttimestamp = IMPOSSIBLETIME
errors.append(ERRORDATETIME % posttime)
critical = True
try:
validatesubreddit(postsub)
except:
errors.append(ERRORSUBREDDIT % postsub)
critical = True
#ID TEXT, TIME INT, REDDIT TEXT, TITLE TEXT, DIST INT, STICKY INT, FLAIR TEXT, FLCSS TEXT, POST TEXT
# 0 1 2 3 4 5 6 7 8
if critical:
posttimestamp = IMPOSSIBLETIME
datalist = [sourceid, posttimestamp, postsub, posttitle, dodist, dosticky, postflair, postflcss, "None"]
cur.execute('SELECT * FROM schedules WHERE ID=?', [sourceid])
fetch = cur.fetchone()
if not fetch:
cur.execute('INSERT INTO schedules VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', datalist)
sql.commit()
schedulecomment = buildcomment(datalist, errors, critical)
print('Writing comment')
inputpost.add_comment(schedulecomment)
inputpost.set_flair(flair_text=SCHEDULEDFLAIR_TEXT, flair_css_class=SCHEDULEDFLAIR_CSS)
def updatepost(comment):
source = comment.submission
print('Updating schedule for ' + source.id + ' via comment ' + comment.id)
pauthor = source.author.name
cauthor = comment.author.name
if ALLOWOTHEREDITS or (pauthor == cauthor) or any(pauthor.lower() == admin.lower() for admin in ADMINS):
cur.execute('SELECT * FROM schedules WHERE ID=?', [source.id])
data=cur.fetchone()
if data:
data= list(data)
errors = []
commentsplit = comment.body.split('\n')
while '' in commentsplit:
commentsplit.remove('')
for line in commentsplit:
line = line.split(':')
line[0] = line[0].replace(' ', '')
command = line[0].lower()
arg = ':'.join(line[1:])
if command in ['time', 'timestamp']:
try:
posttimerender = dateparse(arg)
posttimerender = posttimerender.replace(tzinfo=datetime.timezone.utc)
posttimestamp = posttimerender.timestamp()
except:
#December 31, 2500
posttimestamp = IMPOSSIBLETIME
errors.append(ERRORDATETIME % posttime)
data[1] = posttimestamp
elif command in ['reddit', 'subreddit', 'sr']:
try:
arg = arg.replace(' ', '')
arg=arg.replace('/r/', '')
validatesubreddit(arg)
except:
#This will be errored in the upcoming `ispostvalid` line
pass
data[2] = arg
elif command in ['title']:
data[3] = arg
elif command in ['distinguish', 'dist', 'd']:
if arg.lower() in ['0', 'no', 'false', 'off']:
arg = 0
if arg.lower() in ['1', 'yes', 'true', 'on']:
arg = 1
data[4] = arg
elif command in ['sticky', 's']:
if arg.lower() in ['0', 'no', 'false', 'off']:
arg = 0
if arg.lower() in ['1', 'yes', 'true', 'on']:
arg = 1
data[5] = arg
elif command in ['flair-text', 'flairtext', 'flair_text']:
data[6] = arg
elif command in ['flair-css', 'flaircss', 'flair_css']:
data[7] = removespecial(arg)
else:
errors.append(ERRORUNKNOWNCOMMAND % command)
print('\tChecking schedule validity')
status = ispostvalid(data, errors)
if status[0] is False:
data[1] = IMPOSSIBLETIME
critical = True
else:
critical = False
schedulecomment = buildcomment(data[:], errors, critical)
print('\tWriting comment')
comment.reply(schedulecomment)
cur.execute('DELETE FROM schedules WHERE ID=?', [source.id])
cur.execute('INSERT INTO schedules VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
sql.commit()
print('\tDone.')
else:
print(cauthor + ' may not edit ' + pauthor + "'s post")
def validatesubreddit(sr):
#This will intentionall crash if /r/sr does not exist
sr = sr.replace('/r/', '')
sr = sr.replace('r/', '')
sr = sr.replace('/', '')
r.get_subreddit(sr, fetch=True)
def ispostvalid(inputdata, errors):
nowstamp = getTime(True)
status = True
if inputdata[1] < nowstamp:
n = datetime.datetime.utcfromtimestamp(inputdata[1])
n = datetime.datetime.strftime(n, "%B %d %Y %H:%M")
errors.append(ERRORTOOEARLY % n)
status = False
try:
validatesubreddit(inputdata[2])
except:
print('\tBad subreddit: ' + inputdata[2])
errors.append(ERRORSUBREDDIT % inputdata[2])
status = False
if len(inputdata[3]) > 300:
errors.append(ERRORLONGTITLE % len(inputdata[3]))
status = False
return [status, errors]
def buildcomment(datalist, errors, critical=False):
schedulecomment = SCHEDULECOMMENT
if len(errors) > 0:
errors = "\n\n".join(errors)
schedulecomment = ERRORCOMMENT % errors
if critical:
schedulecomment += ERRORCRITICAL
schedulecomment += buildtable(datalist)
schedulecomment += FOOTER
return schedulecomment
#ID TEXT, TIME INT, REDDIT TEXT, TITLE TEXT, DIST INT, STICKY INT, FLAIR TEXT, FLCSS TEXT, POST TEXT
# 0 1 2 3 4 5 6 7 8
def buildtable(inputdata):
print(inputdata[1], type(inputdata[1])) #Troubleshooting with Apex
timeobj = datetime.datetime.utcfromtimestamp(inputdata[1])
inputdata[1] = datetime.datetime.strftime(timeobj, "%B %d %Y %H:%M UTC")
inputdata[2] = '/r/' + inputdata[2]
inputdata[3] = '`' + inputdata[3] + '`'
inputdata[4] = "True" if inputdata[4] == 1 else "False"
inputdata[5] = "True" if inputdata[5] == 1 else "False"
inputdata = inputdata[1:-1]
table = """
Key | Value
:- | :-
Time | {0}
Subreddit | {1}
Title | {2}
Distinguish | {3}
Sticky | {4}
Flair-text | {5}
Flair-CSS | {6}
""".format(*inputdata)
return table
def removespecial(inputstr):
ok = string.ascii_letters + string.digits
outstr = "".join([x for x in inputstr if x in ok])
return outstr
def manage_new():
print('Managing ' + SUBREDDIT + '/new')
subreddit = r.get_subreddit(SUBREDDIT)
new = list(subreddit.get_new(limit=MAXPOSTS))
for post in new:
pid = post.id
cur.execute('SELECT * FROM schedules WHERE ID=?', [pid])
if not cur.fetchone():
if post.title[0] != IGNORE_FLAG:
processpost(post)
else:
data = [post.id, 1, "", "", 0, 0, "", "", "meta"]
cur.execute('INSERT INTO schedules VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
sql.commit()
def manage_unread():
print('Managing inbox')
inbox = list(r.get_unread(limit=100))
for message in inbox:
if isinstance(message, praw.objects.Message):
if "ping" in message.subject.lower():
message.reply("Pong")
print('Responding to ping')
try:
mauthor = message.author.name
if any(mauthor.lower() == admin.lower() for admin in ADMINS):
if "kill" in message.subject.lower():
alertadmins("Hard shutdown", "The bot is being killed by " + mauthor)
quit()
except AttributeError:
pass
elif isinstance(message, praw.objects.Comment):
commentsub = message.subreddit.display_name
if commentsub.lower() == SUBREDDIT.lower():
updatepost(message)
message.mark_as_read()
def manage_schedule():
print('Managing schedules')
cur.execute('SELECT * FROM schedules WHERE POST =?', ['None'])
fetch = cur.fetchall()
fetch = list(fetch)
fetch.sort(key=lambda x: x[1])
reread = False
idlist = ['t3_'+i[0] for i in fetch]
submissionlist = []
print('Checking for deletions')
while len(idlist) > 0:
submissionlist += r.get_info(thing_id=idlist[:100])
idlist = idlist[100:]
for item in submissionlist:
if (not item.author) or (item.banned_by):
print('\t' + item.id + ' has been deleted')
cur.execute('DELETE FROM schedules WHERE ID=?', [item.id])
sql.commit()
reread = True
if reread:
cur.execute('SELECT * FROM schedules WHERE POST =?', ['None'])
fetch = cur.fetchall()
fetch = list(fetch)
fetch.sort(key=lambda x: x[1])
nowstamp = getTime(True)
for schedule in fetch:
postid = schedule[0]
print('Checking schedule ' + postid, end="")
posttime = int(schedule[1])
if posttime < nowstamp:
print()
print('\tPreparing to post')
post = r.get_info(thing_id="t3_" + postid)
ptitle = schedule[3]
psub = schedule[2]
print('\tSubmitting post')
try:
if post.is_self:
pbody = post.selftext
newpost = r.submit(psub, ptitle, text=pbody)
else:
purl = post.url
newpost = r.submit(psub, ptitle, url=purl, resubmit=True)
errors = []
if schedule[4] == 1:
try:
print('\tDistinguishing')
newpost.distinguish()
except:
print('\tDistinguish failed')
errors.append(ERRORDISTINGUISHFAIL)
if schedule[5] == 1:
try:
print('\tStickying')
newpost.sticky()
except:
print('\tSticky failed')
errors.append(ERRORSTICKYFAIL)
if schedule[6] != "" or schedule[7] != "":
try:
print('\tFlairing')
newpost.set_flair(flair_text=schedule[6], flair_css_class=schedule[7])
except:
print('\tFlair failed')
newsub = newpost.subreddit.display_name
newlink = newpost.short_link
newid = newpost.id
newcomment = POSTEDCOMMENT % (newsub, newlink)
newcomment += '\n\n'.join(errors)
cur.execute('UPDATE schedules SET POST=? WHERE ID=?', [newid, postid])
sql.commit()
print('Flairing source.')
post.add_comment(newcomment)
post.set_flair(flair_text=POSTEDFLAIR_TEXT, flair_css_class=POSTEDFLAIR_CSS)
except praw.errors.APIException as error:
if error.error_type == "SUBREDDIT_NOTALLOWED":
print("\tNOT ALLOWED IN SUBREDDIT!")
cur.execute('UPDATE schedules SET TIME=? WHERE ID=?', [IMPOSSIBLETIME, postid])
sql.commit()
scheduledata = list(schedule)
scheduledata[1] = IMPOSSIBLETIME
comment=buildcomment(scheduledata, [ERRORNOTALLOWED%psub], critical=True)
post.add_comment(comment)
else:
print(" : T-" + str(round(posttime - nowstamp)))
def alertadmins(messagesubject, messagetext):
for admin in ADMINS:
print('Messaging ' + admin)
try:
r.send_message(admin, messagesubject, messagetext)
except:
print('COULD NOT MESSAGE ADMIN')
while True:
try:
manage_new()
manage_unread()
manage_schedule()
except Exception as e:
error_message = traceback.format_exc()
print(error_message)
now = getTime(False)
now = datetime.datetime.strftime(now, "%B %d %H:%M:%S UTC")
error_message = ' ' + error_message
error_message = error_message.replace('\n', '\n ')
error_message += '\n' + str(now)
alertadmins(TRACEBACK_SUBJECT, error_message)
print("Sleeping\n")
time.sleep(WAIT) | {
"content_hash": "da331be7d7ec4f50f0798e370754c285",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 153,
"avg_line_length": 31.436046511627907,
"alnum_prop": 0.6672831514703162,
"repo_name": "voussoir/reddit",
"id": "42a8c50adc6ec8df6a495284eba9863676794e1f",
"size": "16238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_old/Schedulizer-ModTeam/schedulizerm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "69"
},
{
"name": "CSS",
"bytes": "22797"
},
{
"name": "HTML",
"bytes": "1078988"
},
{
"name": "Procfile",
"bytes": "38"
},
{
"name": "Python",
"bytes": "513914"
}
],
"symlink_target": ""
} |
"""
Classes representing uploaded files.
"""
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile')
class UploadedFile(object):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file_name=None, content_type=None, file_size=None, charset=None):
self.file_name = file_name
self.file_size = file_size
self.content_type = content_type
self.charset = charset
def __repr__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__, self.file_name, self.content_type)
def _set_file_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
name = name[:255 - len(ext)] + ext
self._file_name = name
def _get_file_name(self):
return self._file_name
file_name = property(_get_file_name, _set_file_name)
def chunk(self, chunk_size=None):
"""
Read the file and yield chucks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = UploadedFile.DEFAULT_CHUNK_SIZE
if hasattr(self, 'seek'):
self.seek(0)
# Assume the pointer is at zero...
counter = self.file_size
while counter > 0:
yield self.read(chunk_size)
counter -= chunk_size
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = UploadedFile.DEFAULT_CHUNK_SIZE
return self.file_size < chunk_size
# Abstract methods; subclasses *must* default read() and probably should
# define open/close.
def read(self, num_bytes=None):
raise NotImplementedError()
def open(self):
pass
def close(self):
pass
# Backwards-compatible support for uploaded-files-as-dictionaries.
def __getitem__(self, key):
import warnings
warnings.warn(
message = "The dictionary access of uploaded file objects is deprecated. Use the new object interface instead.",
category = DeprecationWarning,
stacklevel = 2
)
backwards_translate = {
'filename': 'file_name',
'content-type': 'content_type',
}
if key == 'content':
return self.read()
elif key == 'filename':
return self.file_name
elif key == 'content-type':
return self.content_type
else:
return getattr(self, key)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, file, file_name, content_type, file_size, charset):
super(TemporaryUploadedFile, self).__init__(file_name, content_type, file_size, charset)
self.file = file
self.path = file.name
self.file.seek(0)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.path
def read(self, *args, **kwargs):
return self.file.read(*args, **kwargs)
def open(self):
self.seek(0)
def seek(self, *args, **kwargs):
self.file.seek(*args, **kwargs)
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, file_name, content_type, file_size, charset):
super(InMemoryUploadedFile, self).__init__(file_name, content_type, file_size, charset)
self.file = file
self.field_name = field_name
self.file.seek(0)
def seek(self, *args, **kwargs):
self.file.seek(*args, **kwargs)
def open(self):
self.seek(0)
def read(self, *args, **kwargs):
return self.file.read(*args, **kwargs)
def chunk(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
self.file = StringIO(content or '')
self.file_name = name
self.field_name = None
self.file_size = len(content or '')
self.content_type = content_type
self.charset = None
self.file.seek(0)
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
from_dict = classmethod(from_dict)
| {
"content_hash": "22b33e53ba86d73860f9d4e956fd14d7",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 124,
"avg_line_length": 31.189473684210526,
"alnum_prop": 0.5892676341545731,
"repo_name": "diofeher/django-nfa",
"id": "637609d0850567c4dd7439c778d46ce0dcaadb8a",
"size": "5926",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/django/core/files/uploadedfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "66105"
},
{
"name": "Python",
"bytes": "5174003"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
import os
import aiohttp_jinja2
import jinja2
from aiohttp import web
from aiohttp.test_utils import AioHTTPTestCase
from app.routes import routes
from config import UPLOAD_FOLDER, TEMPLATES_FOLDER
class BaseTestCase(AioHTTPTestCase):
test_files = ["somebook.pdf", "someotherbook.pdf"]
test_paths = []
for item in test_files:
test_paths.append(os.path.join("./tests/_src/", item))
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
for item in self.test_files:
full_path = os.path.join(UPLOAD_FOLDER, item)
if os.path.isfile(full_path):
os.remove(full_path)
def get_app(self, loop):
"""Override the get_app method to return your application.
"""
# it's important to use the loop passed here.
app = web.Application(loop=loop, debug=True)
# Define the views path
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(TEMPLATES_FOLDER))
# route part
for route in routes:
app.router.add_route(route[0], route[1], route[2], name=route[3])
return app | {
"content_hash": "1a4483a94a6be802c543d6e7c3eb2aa9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 83,
"avg_line_length": 28.24390243902439,
"alnum_prop": 0.6355785837651122,
"repo_name": "bndr/basic-uploader",
"id": "fbf4af1fd6370fa838bcb7415704a81a380a67ee",
"size": "1158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6289"
},
{
"name": "Python",
"bytes": "11674"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ik_links', '0006_authorlink_exclude_from_contributions'),
]
operations = [
migrations.AddField(
model_name='authorlink',
name='exclude_from_authorship',
field=models.BooleanField(default=False, help_text=b'Exclude this author from the list of authors on the page.'),
),
]
| {
"content_hash": "01d6080e468bad0cf456bf5c77fc344c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 125,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6443089430894309,
"repo_name": "ic-labs/django-icekit",
"id": "d686d9f57d24cb1e4afbec6de17b5af50e40a9e2",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "icekit/plugins/links/migrations/0007_authorlink_exclude_from_authorship.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18019"
},
{
"name": "HTML",
"bytes": "92605"
},
{
"name": "JavaScript",
"bytes": "27803"
},
{
"name": "Python",
"bytes": "1476354"
},
{
"name": "Shell",
"bytes": "37850"
}
],
"symlink_target": ""
} |
import krpc
import json
import time
from collections import OrderedDict, defaultdict
import threading
class TelemetryController(threading.Thread):
def __init__(self, a_ship, a_graphite, in_telems, interval=1):
super(TelemetryController,self).__init__(group=None, target=None, name=None, args=(), kwargs={})
self.vessel = a_ship
self.g = a_graphite
self.telemetries = in_telems
self.interval = interval
print("created a telemetry controller")
def run(self):
print("started thread")
while True:
self.read_and_forward()
time.sleep(self.interval)
def read_and_forward(self):
#cur_game_time = self.vessel.met*1000
for one_telem_name, one_telem_callback in self.telemetries:
self.g.send(one_telem_name,one_telem_callback(self.vessel))
print "forwarded"
import graphitesend
if __name__ == "__main__":
#GRAPHITE
g = graphitesend.init(graphite_server="192.168.254.100",system_name="active_vessel",prefix="ksp")
#KRPC
conn = krpc.connect(address="localhost",name="telemetry_monitor")
list_o_telems = [
("resource.ElectricCharge",lambda x: x.resources.amount("ElectricCharge")),
("resource.LiquidFuel",lambda x: x.resources.amount("LiquidFuel")),
("resource.Oxidizer",lambda x:x.resources.amount("Oxidizer")),
("resource.Monopropellant",lambda x:x.resources.amount("Monopropellant")),
("resource.Ablator",lambda x:x.resources.amount("Ablator")),
("resource.Ore",lambda x:x.resources.amount("Ore")),
("physics.thrust",lambda x:x.thrust)
]
t_control = TelemetryController(conn.space_center.active_vessel,g, list_o_telems)
t_control.start()
#conn.close()
| {
"content_hash": "59125f14233095b9c1c8631aa63a0663",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 101,
"avg_line_length": 34.96,
"alnum_prop": 0.6664759725400458,
"repo_name": "uiuc-cs-ksp/krpc_telemetry_server",
"id": "f6f634e281a19fd0123346cd0a81e86ae030986f",
"size": "1748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphite/src/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16619"
}
],
"symlink_target": ""
} |
import os
import copy
from robot.errors import DataError
from robot.variables import is_var
from robot.output import LOGGER
from robot.writer import DataFileWriter
from robot.utils import abspath, is_string, normalize, NormalizedDict
from .comments import Comment
from .populators import FromFilePopulator, FromDirectoryPopulator
from .settings import (Documentation, Fixture, Timeout, Tags, Metadata,
Library, Resource, Variables, Arguments, Return,
Template, MetadataList, ImportList)
def TestData(parent=None, source=None, include_suites=None,
warn_on_skipped=False):
"""Parses a file or directory to a corresponding model object.
:param parent: (optional) parent to be used in creation of the model object.
:param source: path where test data is read from.
:returns: :class:`~.model.TestDataDirectory` if `source` is a directory,
:class:`~.model.TestCaseFile` otherwise.
"""
if os.path.isdir(source):
return TestDataDirectory(parent, source).populate(include_suites,
warn_on_skipped)
return TestCaseFile(parent, source).populate()
class _TestData(object):
_setting_table_names = 'Setting', 'Settings', 'Metadata'
_variable_table_names = 'Variable', 'Variables'
_testcase_table_names = 'Test Case', 'Test Cases'
_keyword_table_names = 'Keyword', 'Keywords', 'User Keyword', 'User Keywords'
def __init__(self, parent=None, source=None):
self.parent = parent
self.source = abspath(source) if source else None
self.children = []
self._tables = NormalizedDict(self._get_tables())
def _get_tables(self):
for names, table in [(self._setting_table_names, self.setting_table),
(self._variable_table_names, self.variable_table),
(self._testcase_table_names, self.testcase_table),
(self._keyword_table_names, self.keyword_table)]:
for name in names:
yield name, table
def start_table(self, header_row):
try:
table = self._tables[header_row[0]]
except (KeyError, IndexError):
return None
if not self._table_is_allowed(table):
return None
table.set_header(header_row)
return table
@property
def name(self):
return self._format_name(self._get_basename()) if self.source else None
def _get_basename(self):
return os.path.splitext(os.path.basename(self.source))[0]
def _format_name(self, name):
name = self._strip_possible_prefix_from_name(name)
name = name.replace('_', ' ').strip()
return name.title() if name.islower() else name
def _strip_possible_prefix_from_name(self, name):
return name.split('__', 1)[-1]
@property
def keywords(self):
return self.keyword_table.keywords
@property
def imports(self):
return self.setting_table.imports
def report_invalid_syntax(self, message, level='ERROR'):
initfile = getattr(self, 'initfile', None)
path = os.path.join(self.source, initfile) if initfile else self.source
LOGGER.write("Error in file '%s': %s" % (path, message), level)
def save(self, **options):
"""Writes this datafile to disk.
:param options: Configuration for writing. These are passed to
:py:class:`~robot.writer.datafilewriter.WritingContext` as
keyword arguments.
See also :py:class:`robot.writer.datafilewriter.DataFileWriter`
"""
return DataFileWriter(**options).write(self)
class TestCaseFile(_TestData):
"""The parsed test case file object.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = TestCaseFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._validate()
return self
def _validate(self):
if not self.testcase_table.is_started():
raise DataError('File has no test case table.')
def _table_is_allowed(self, table):
return True
def has_tests(self):
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.testcase_table, self.keyword_table]:
yield table
class ResourceFile(_TestData):
"""The parsed resource file object.
:param source: path where resource file is read from.
"""
def __init__(self, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = ResourceFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, source=source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._report_status()
return self
def _report_status(self):
if self.setting_table or self.variable_table or self.keyword_table:
LOGGER.info("Imported resource file '%s' (%d keywords)."
% (self.source, len(self.keyword_table.keywords)))
else:
LOGGER.warn("Imported resource file '%s' is empty." % self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
raise DataError("Resource file '%s' contains a test case table "
"which is not allowed." % self.source)
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class TestDataDirectory(_TestData):
"""The parsed test data directory object. Contains hiearchical structure
of other :py:class:`.TestDataDirectory` and :py:class:`.TestCaseFile`
objects.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = source
self.initfile = None
self.setting_table = InitFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self, include_suites=None, warn_on_skipped=False, recurse=True):
FromDirectoryPopulator().populate(self.source, self, include_suites,
warn_on_skipped, recurse)
self.children = [ch for ch in self.children if ch.has_tests()]
return self
def _get_basename(self):
return os.path.basename(self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
LOGGER.error("Test suite init file in '%s' contains a test case "
"table which is not allowed." % self.source)
return False
return True
def add_child(self, path, include_suites):
self.children.append(TestData(parent=self,source=path,
include_suites=include_suites))
def has_tests(self):
return any(ch.has_tests() for ch in self.children)
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class _Table(object):
def __init__(self, parent):
self.parent = parent
self._header = None
def set_header(self, header):
self._header = self._prune_old_style_headers(header)
def _prune_old_style_headers(self, header):
if len(header) < 3:
return header
if self._old_header_matcher.match(header):
return [header[0]]
return header
@property
def header(self):
return self._header or [self.type.title() + 's']
@property
def name(self):
return self.header[0]
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def __bool__(self):
return bool(self._header or len(self))
#PY2
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return sum(1 for item in self)
class _WithSettings(object):
def get_setter(self, setting_name):
normalized = self.normalize(setting_name)
if normalized in self._setters:
return self._setters[normalized](self)
self.report_invalid_syntax("Non-existing setting '%s'." % setting_name)
def is_setting(self, setting_name):
return self.normalize(setting_name) in self._setters
def normalize(self, setting):
result = normalize(setting)
return result[0:-1] if result and result[-1]==':' else result
class _SettingTable(_Table, _WithSettings):
type = 'setting'
def __init__(self, parent):
_Table.__init__(self, parent)
self.doc = Documentation('Documentation', self)
self.suite_setup = Fixture('Suite Setup', self)
self.suite_teardown = Fixture('Suite Teardown', self)
self.test_setup = Fixture('Test Setup', self)
self.test_teardown = Fixture('Test Teardown', self)
self.force_tags = Tags('Force Tags', self)
self.default_tags = Tags('Default Tags', self)
self.test_template = Template('Test Template', self)
self.test_timeout = Timeout('Test Timeout', self)
self.metadata = MetadataList(self)
self.imports = ImportList(self)
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add_metadata(self, name, value='', comment=None):
self.metadata.add(Metadata(self, name, value, comment))
return self.metadata[-1]
def add_library(self, name, args=None, comment=None):
self.imports.add(Library(self, name, args, comment=comment))
return self.imports[-1]
def add_resource(self, name, invalid_args=None, comment=None):
self.imports.add(Resource(self, name, invalid_args, comment=comment))
return self.imports[-1]
def add_variables(self, name, args=None, comment=None):
self.imports.add(Variables(self, name, args, comment=comment))
return self.imports[-1]
def __len__(self):
return sum(1 for setting in self if setting.is_set())
class TestCaseFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'forcetags': lambda s: s.force_tags.populate,
'defaulttags': lambda s: s.default_tags.populate,
'testtemplate': lambda s: s.test_template.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.default_tags, self.test_template, self.test_timeout] \
+ self.metadata.data + self.imports.data:
yield setting
class ResourceFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables}
def __iter__(self):
for setting in [self.doc] + self.imports.data:
yield setting
class InitFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'forcetags': lambda s: s.force_tags.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.test_timeout] + self.metadata.data + self.imports.data:
yield setting
class VariableTable(_Table):
type = 'variable'
def __init__(self, parent):
_Table.__init__(self, parent)
self.variables = []
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add(self, name, value, comment=None):
self.variables.append(Variable(self, name, value, comment))
def __iter__(self):
return iter(self.variables)
class TestCaseTable(_Table):
type = 'test case'
def __init__(self, parent):
_Table.__init__(self, parent)
self.tests = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.tests.append(TestCase(self, name))
return self.tests[-1]
def __iter__(self):
return iter(self.tests)
def is_started(self):
return bool(self._header)
def __bool__(self):
return True
#PY2
def __nonzero__(self):
return self.__bool__()
class KeywordTable(_Table):
type = 'keyword'
def __init__(self, parent):
_Table.__init__(self, parent)
self.keywords = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.keywords.append(UserKeyword(self, name))
return self.keywords[-1]
def __iter__(self):
return iter(self.keywords)
class Variable(object):
def __init__(self, parent, name, value, comment=None):
self.parent = parent
self.name = name.rstrip('= ')
if name.startswith('$') and value == []:
value = ''
if is_string(value):
value = [value]
self.value = value
self.comment = Comment(comment)
def as_list(self):
if self.has_data():
return [self.name] + self.value + self.comment.as_list()
return self.comment.as_list()
def is_set(self):
return True
def is_for_loop(self):
return False
def has_data(self):
return bool(self.name or ''.join(self.value))
def __bool__(self):
return self.has_data()
#PY2
def __nonzero__(self):
return self.__bool__()
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax("Setting variable '%s' failed: %s"
% (self.name, message), level)
class _WithSteps(object):
def add_step(self, content, comment=None):
self.steps.append(Step(content, comment))
return self.steps[-1]
def copy(self, name):
new = copy.deepcopy(self)
new.name = name
self._add_to_parent(new)
return new
class TestCase(_WithSteps, _WithSettings):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.template = Template('[Template]', self)
self.tags = Tags('[Tags]', self)
self.setup = Fixture('[Setup]', self)
self.teardown = Fixture('[Teardown]', self)
self.timeout = Timeout('[Timeout]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'template': lambda s: s.template.populate,
'setup': lambda s: s.setup.populate,
'precondition': lambda s: s.setup.populate,
'teardown': lambda s: s.teardown.populate,
'postcondition': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate,
'timeout': lambda s: s.timeout.populate}
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def add_for_loop(self, declaration, comment=None):
self.steps.append(ForLoop(declaration, comment))
return self.steps[-1]
def report_invalid_syntax(self, message, level='ERROR'):
type_ = 'test case' if type(self) is TestCase else 'keyword'
message = "Invalid syntax in %s '%s': %s" % (type_, self.name, message)
self.parent.report_invalid_syntax(message, level)
def _add_to_parent(self, test):
self.parent.tests.append(test)
@property
def settings(self):
return [self.doc, self.tags, self.setup, self.template, self.timeout,
self.teardown]
def __iter__(self):
for element in [self.doc, self.tags, self.setup,
self.template, self.timeout] \
+ self.steps + [self.teardown]:
yield element
class UserKeyword(TestCase):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.args = Arguments('[Arguments]', self)
self.return_ = Return('[Return]', self)
self.timeout = Timeout('[Timeout]', self)
self.teardown = Fixture('[Teardown]', self)
self.tags = Tags('[Tags]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'arguments': lambda s: s.args.populate,
'return': lambda s: s.return_.populate,
'timeout': lambda s: s.timeout.populate,
'teardown': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate}
def _add_to_parent(self, test):
self.parent.keywords.append(test)
@property
def settings(self):
return [self.args, self.doc, self.tags, self.timeout, self.teardown, self.return_]
def __iter__(self):
for element in [self.args, self.doc, self.tags, self.timeout] \
+ self.steps + [self.teardown, self.return_]:
yield element
class ForLoop(_WithSteps):
"""The parsed representation of a for-loop.
:param list declaration: The literal cell values that declare the loop
(excluding ":FOR").
:param str comment: A comment, default None.
:ivar str flavor: The value of the 'IN' item, uppercased.
Typically 'IN', 'IN RANGE', 'IN ZIP', or 'IN ENUMERATE'.
:ivar list vars: Variables set per-iteration by this loop.
:ivar list items: Loop values that come after the 'IN' item.
:ivar str comment: A comment, or None.
:ivar list steps: A list of steps in the loop.
"""
def __init__(self, declaration, comment=None):
self.flavor, index = self._get_flavors_and_index(declaration)
self.vars = declaration[:index]
self.items = declaration[index+1:]
self.comment = Comment(comment)
self.steps = []
def _get_flavors_and_index(self, declaration):
for index, item in enumerate(declaration):
item = item.upper()
if item.replace(' ', '').startswith('IN'):
return item, index
return 'IN', len(declaration)
def is_comment(self):
return False
def is_for_loop(self):
return True
def as_list(self, indent=False, include_comment=True):
comments = self.comment.as_list() if include_comment else []
return [': FOR'] + self.vars + [self.flavor] + self.items + comments
def __iter__(self):
return iter(self.steps)
def is_set(self):
return True
class Step(object):
def __init__(self, content, comment=None):
self.assign = list(self._get_assigned_vars(content))
try:
self.name = content[len(self.assign)]
except IndexError:
self.name = None
self.args = content[len(self.assign)+1:]
self.comment = Comment(comment)
def _get_assigned_vars(self, content):
for item in content:
if not is_var(item.rstrip('= ')):
return
yield item
def is_comment(self):
return not (self.assign or self.name or self.args)
def is_for_loop(self):
return False
def is_set(self):
return True
def as_list(self, indent=False, include_comment=True):
kw = [self.name] if self.name is not None else []
comments = self.comment.as_list() if include_comment else []
data = self.assign + kw + self.args + comments
if indent:
data.insert(0, '')
return data
class OldStyleSettingAndVariableTableHeaderMatcher(object):
def match(self, header):
return all((True if e.lower() == 'value' else False)
for e in header[1:])
class OldStyleTestAndKeywordTableHeaderMatcher(object):
def match(self, header):
if header[1].lower() != 'action':
return False
for h in header[2:]:
if not h.lower().startswith('arg'):
return False
return True
| {
"content_hash": "4634a0c56b544d2b372680563718e53f",
"timestamp": "",
"source": "github",
"line_count": 693,
"max_line_length": 90,
"avg_line_length": 34.08080808080808,
"alnum_prop": 0.6019984757388432,
"repo_name": "userzimmermann/robotframework",
"id": "fb78ffa55e6059597c40f4b4513eccfde2e09912",
"size": "24226",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "src/robot/parsing/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140949"
},
{
"name": "Java",
"bytes": "59815"
},
{
"name": "JavaScript",
"bytes": "160761"
},
{
"name": "Python",
"bytes": "2179296"
},
{
"name": "RobotFramework",
"bytes": "2033202"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import pytest
import seaborn as sns
class TestPlotterMixin(object):
pass
class TestScatterPlotter(object):
pass
@pytest.fixture
def iris(self):
return sns.load_dataset('iris')
def test_scatterplots(self, iris):
"""Smoke test of high level scatterplot options"""
from cupcake.scatter import scatterplot
scatterplot(iris)
plt.close('all')
scatterplot(iris, text=True)
plt.close('all')
scatterplot(iris, hue='species')
plt.close('all')
scatterplot(iris, linewidth='species')
plt.close('all')
scatterplot(iris, edgecolor='species')
plt.close('all')
| {
"content_hash": "a57dbd7fc2659a55c0e43e2582816f39",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 58,
"avg_line_length": 20.941176470588236,
"alnum_prop": 0.6306179775280899,
"repo_name": "olgabot/cupcake",
"id": "366dd1fe183044e6b2fad0c5ada00decbcc027e0",
"size": "712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cupcake/tests/test_scatter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1652"
},
{
"name": "Python",
"bytes": "2057"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(MachinecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| {
"content_hash": "22320cca04c1ea0eaae2f51369a3bb40",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 112,
"avg_line_length": 43.17333333333333,
"alnum_prop": 0.6738727609635577,
"repo_name": "machinecoin-project/machinecoin",
"id": "0c74b38ff957fd8d0fe80a85f147db174d9335eb",
"size": "3240",
"binary": false,
"copies": "2",
"ref": "refs/heads/0.17",
"path": "test/functional/mining_getblocktemplate_longpoll.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "342684"
},
{
"name": "C++",
"bytes": "3521961"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18048"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "Makefile",
"bytes": "66797"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7246"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "211880"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Shell",
"bytes": "40513"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from moderation.views import FlagObjectView
urlpatterns = patterns('',
url(r'^flag/$', FlagObjectView.as_view(), name='moderation_flagobject'),
) | {
"content_hash": "eb9a3b45bf2e41dfca1d5403dd280270",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 76,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.7512953367875648,
"repo_name": "nikdoof/vapemap",
"id": "1b19c5dce192b1385b351072b5155981f8b57708",
"size": "193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/moderation/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "107"
},
{
"name": "JavaScript",
"bytes": "3216"
},
{
"name": "Puppet",
"bytes": "7746"
},
{
"name": "Python",
"bytes": "204060"
}
],
"symlink_target": ""
} |
from frappe.tests.utils import FrappeTestCase
class TestLogSettingUser(FrappeTestCase):
pass
| {
"content_hash": "c52d502004e61adc4c9f3ef39e01320e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 19.2,
"alnum_prop": 0.84375,
"repo_name": "StrellaGroup/frappe",
"id": "556dc36dc908ee0d0588794042c69a4638e1f735",
"size": "199",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/log_setting_user/test_log_setting_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250858"
},
{
"name": "JavaScript",
"bytes": "2515308"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3605011"
},
{
"name": "SCSS",
"bytes": "261492"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
} |
from gfregression import (
Family,
Font,
FontStyle,
family_from_googlefonts,
family_from_github_dir,
familyname_from_filename,
get_families,
diff_families,
families_glyphs_all,
)
import tempfile
import os
import shutil
import unittest
from glob import glob
class TestFamily(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(__file__)
roboto_fonts_dir = os.path.join(current_dir, "data", "Roboto")
self.roboto_fonts = glob(os.path.join(roboto_fonts_dir, "*.ttf"))
def test_append(self):
family = Family()
for path in self.roboto_fonts:
family.append(path)
family.append(path)
self.assertEqual(family.name, 'Roboto')
# def test_append_font_which_does_not_belong_to_family(self):
# family = Family()
# family.append(self.path_1)
# family.append(self.path_2)
# self.failureException(family.append(self.path_3))
class TestFont(unittest.TestCase):
def setUp(self):
cwd = os.path.dirname(__file__)
font_path = os.path.join(cwd, 'data', 'Roboto', 'Roboto-BoldItalic.ttf')
self.font = Font(font_path)
vf_font_path = os.path.join(cwd, 'data', 'Cabin', 'Cabin-VF.ttf')
self.vf_font = Font(vf_font_path)
def test_get_family_name(self):
"""Test case taken from Roboto Black, https://www.github.com/google/roboto
For fonts which are non RIBBI, the family name field (id 1) often includes
the style name. The font will also have the preferred family name (id 16)
included. For RIBBI fonts, nameid 1 is fine, for non RIBBI we want nameid 16
"""
self.assertEqual(self.font.family_name, u'Roboto')
def test_static_get_style(self):
self.assertEqual(self.font.styles[0].name, 'BoldItalic')
def test_vf_get_styles(self):
styles = [s.name for s in self.vf_font.styles]
self.assertIn("CondensedRegular", styles)
self.assertIn("Regular", styles)
class TestFontStyle(unittest.TestCase):
def setUp(self):
cwd = os.path.dirname(__file__)
font_path = os.path.join(cwd, 'data', 'Roboto')
font_file = os.path.join(font_path, "Roboto-Regular.ttf")
self.font = Font(font_file)
def test_weight_class(self):
style = FontStyle('Italic', self.font)
self.assertEqual(400, style.css_weight)
style = FontStyle('Black Italic', self.font)
self.assertEqual(900, style.css_weight)
style = FontStyle('Condensed Medium', self.font)
self.assertEqual(500, style.css_weight)
style = FontStyle('Expanded Thin', self.font)
self.assertEqual(100, style.css_weight)
def test_width_class(self):
style = FontStyle("SemiExpanded Black Italic", self.font)
self.assertEqual(112.5, style.css_width_val)
style = FontStyle("Semi Expanded Black Italic", self.font)
self.assertEqual(112.5, style.css_width_val)
style = FontStyle("Ultra Condensed Thin", self.font)
self.assertEqual(50, style.css_width_val)
style = FontStyle("UltraCondensed Thin Italic", self.font)
self.assertEqual(50, style.css_width_val)
def test_is_italic(self):
style = FontStyle('Italic', self.font)
self.assertEqual(True, style.italic)
style = FontStyle('Bold Italic', self.font)
self.assertEqual(True, style.italic)
style = FontStyle('BoldItalic', self.font)
self.assertEqual(True, style.italic)
class TestFromFamily(unittest.TestCase):
"""TODO (M Foley) these tests should not use network requests.
They should be replaced with mock objects"""
def test_family_from_googlefonts(self):
with tempfile.TemporaryDirectory() as fp:
family = family_from_googlefonts('Amatic SC', fp)
self.assertEqual('Amatic SC', family.name)
def test_family_from_googlefonts_with_width_families(self):
with tempfile.TemporaryDirectory() as fp:
family = family_from_googlefonts("Cabin", fp, include_width_families=True)
styles = [f.styles[0].name for f in family.fonts]
self.assertIn("CondensedBold", styles)
def test_family_from_github_dir(self):
with tempfile.TemporaryDirectory() as fp:
family = family_from_github_dir('https://github.com/googlefonts/comfortaa/tree/main/fonts/TTF', fp)
self.assertEqual('Comfortaa', family.name)
class TestGetFamilies(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(__file__)
roboto_fonts_dir = os.path.join(current_dir, "data", "Roboto")
self.roboto_fonts = glob(os.path.join(roboto_fonts_dir, "*.ttf"))
def test_matching_styles(self):
family_before = Family()
for path in self.roboto_fonts:
family_before.append(path)
family_after = Family()
for path in self.roboto_fonts:
if "Italic" in path:
continue
family_after.append(path)
uuid = "1234"
family_match = get_families(family_before, family_after, uuid)
self.assertEqual(sorted(["Regular", "Bold"]), sorted(family_match["styles"]))
def test_familyname_from_filename(self):
filename = "Kreon[wght].ttf"
self.assertEqual("Kreon", familyname_from_filename(filename))
filename = "Kreon-Regular.ttf"
self.assertEqual("Kreon", familyname_from_filename(filename))
filename = "Kreon-Italic-VF.ttf"
self.assertEqual("Kreon", familyname_from_filename(filename))
def test_matching_styles_with_widths_from_googlefonts(self):
with tempfile.TemporaryDirectory() as fp_before, tempfile.TemporaryDirectory() as fp_after:
family_before = family_from_googlefonts("Cabin", fp_before, include_width_families=True)
family_after = family_from_googlefonts("Cabin Condensed", fp_after)
uuid = "1234"
family_match = get_families(family_before, family_after, uuid)
styles = ["CondensedRegular", "CondensedMedium", "CondensedSemiBold", "CondensedBold"]
self.assertEqual(sorted(styles), sorted(family_match["styles"]), uuid)
class TestDiffFamilies(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(__file__)
roboto_fonts_dir = os.path.join(current_dir, "data", "Roboto")
self.roboto_fonts = glob(os.path.join(roboto_fonts_dir, "*.ttf"))
self.family_before = Family()
self.family_after = Family()
for path in self.roboto_fonts:
self.family_before.append(path)
self.family_after.append(path)
def test_diff_families(self):
uuid = '1234'
diff = diff_families(self.family_before, self.family_after, uuid)
self.assertNotEqual(0, len(diff))
def test_families_glyphs_all(self):
uuid = '1234'
diff = families_glyphs_all(self.family_before, self.family_after, uuid)
self.assertNotEqual(0, len(diff))
class TestGoogleFontsAPI(unittest.TestCase):
def setUp(self):
from gfregression.downloadfonts import GoogleFonts
self.googlefonts = GoogleFonts()
def test_download_family(self):
with tempfile.TemporaryDirectory() as fp:
fonts = self.googlefonts.download_family("Comfortaa", fp)
self.assertGreaterEqual(6, len(fonts))
def test_sibling_families(self):
families = self.googlefonts.related_families("Cabin")
self.assertIn("Cabin Sketch", families)
def test_width_families(self):
families = self.googlefonts.width_families("Cabin")
self.assertIn("Cabin Condensed", families)
def test_has_family(self):
family = self.googlefonts.has_family("Some Generic Family")
self.assertEqual(False, family)
family = self.googlefonts.has_family("Roboto")
self.assertEqual(True, family)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c648535b422b9bf307e1e6b1fef20970",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 111,
"avg_line_length": 35.157205240174676,
"alnum_prop": 0.6435225437833809,
"repo_name": "googlefonts/gfregression",
"id": "55717ae1fae79a1f92ef2de76850c695627331b0",
"size": "8051",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_family.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3289"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "12643"
},
{
"name": "Python",
"bytes": "44502"
},
{
"name": "Shell",
"bytes": "3386"
}
],
"symlink_target": ""
} |
import re
import mechanize
from bs4 import BeautifulSoup
import smtplib
import os.path
from time import *
lt = localtime()
print "################## Neuer Durchgang ######################"
print strftime("Datum: %d.%m.%Y", lt)
print strftime("Uhrzeit: %H:%M:%S", lt)
#----------------------------------------------------------HTML-Datei abrufen-----------------------------------------------------------------------------------------------------------
br = mechanize.Browser()
#Willkomensseite aufrufen
response = br.open("https://dualis.dhbw.de/scripts/mgrqcgi?APPNAME=CampusNet&PRGNAME=EXTERNALPAGES&ARGUMENTS=-N000000000000001,-N000324,-Awelcome")
#Loginform auswaehlen
br.select_form('cn_loginForm')
#und ausfuellen
control = br.form.find_control("usrname")
control.value = "#######USR############"
control = br.form.find_control("pass")
control.value = "#########PASS##############"
response = br.submit()
#Aus den Links auf der Seite nach dem Login den heraussuchen, dessen Text mit 'Leistungs' anfaengt -> 'ae'-Problem
for link in br.links():
str = link.text
if str.startswith('Leistungs'):
#dem Link folgen
response1 = br.follow_link(link)
#Leistungsuebersicht ausgeben
html = response1.read()
#-----------------------------------------------------------Faecher und Noten aus der HTML Datei lesen------------------------------------------------------------------------------------
soup = BeautifulSoup(html)
table = soup.find("table", { "class" : "nb list students_results" })
i = 0
faecher = []
noten = []
for row in table.findAll("tr"):
cells = row.findAll("td")
if len(cells) > 5:
best = cells[5]
img = best.find('img')
bestanden = img.get('title')
if bestanden != 'Offen':
loknoten = []
fachname = cells[1].find('a').text
ausgabeString = cells
note = cells[4].text
faecher.append(fachname)
loknoten.append(fachname)
loknoten.append(bestanden)
loknoten.append(note)
noten.append(loknoten)
faecherStr = ';'.join(faecher)
faecherStr = faecherStr.replace(u"ü","ue")
faecherStr = faecherStr.replace(u"ö","oe")
faecherStr = faecherStr.replace(u"ä","ae")
faecherStr = faecherStr.replace(u"ß","ss")
faecherStr = faecherStr.replace(u"Ä","Ae")
faecherStr = faecherStr.replace(u"Ö","Oe")
faecherStr = faecherStr.replace(u"Ü","Ue")
#-----------------------------------------------------------Daten lesen/speichern, um zu entscheiden, ob Mails gesendet werden muessen----------------------------------------------------
neueFaecher = []
if os.path.isfile("./fachspeicher.dat"):
f = open('./fachspeicher.dat', 'r+')
dateiText = f.read()
print len(dateiText)
if len(dateiText) == len(faecherStr):
#Daten in der Datei sind aktuell
print "Datei ist aktuell"
mailSenden = False
else:
#Daten sind nicht aktuell, werden neu in die Datei geschrieben
f.write(faecherStr)
print "Datei neu geschrieben"
mailSenden = True
#Herausfinden, welches Fach hinzugekommen ist
faecherArr = faecherStr.split(";")
dateiArr = dateiText.split(";")
i = 0
for fach in faecherArr:
if dateiArr[i] == fach:
i = i + 1
else:
neueFaecher.append(fach)
else:
f = open('./fachspeicher.dat', 'a')
mailSenden = False
print "Datei neu erstellt, Skript muss erneut laufen"
#----------------------------------------------------------Emails senden------------------------------------------------------------------------------------------------------------------
if mailSenden:
msgIch = "\n Hallo Lasse,\n Na, alles klar? \n \n Es gab neue Noten: \n \n \n "
for note in noten:
note[0] = note[0].replace(u"ü","ue")
note[0] = note[0].replace(u"ö","oe")
note[0] = note[0].replace(u"ä","ae")
note[0] = note[0].replace(u"ß","ss")
note[0] = note[0].replace(u"Ä","Ae")
note[0] = note[0].replace(u"Ö","Oe")
note[0] = note[0].replace(u"Ü","Ue")
msgIch = msgIch + note[0] + " hast du mit einer Note von " + note[2] + " " + note[1] + "\n \n"
msgIch = msgIch + "Gruss, \n\nDein Lasse :)"
to = '#################@bla.de'
user = '#################@bla.de'
pwd = '##########pass#############'
to2 = '#################@bla.de'
msg = "\n" + "Hallo,"+"\n\n" + "Es sind Noten in den folgenden Fächern online:\n"
print neueFaecher
for fach in neueFaecher:
msg = msg +"\n"+ fach.encode('ascii', 'ignore')+ "\n"
smtpserver = smtplib.SMTP("smtp.1und1.de",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(user, pwd)
header = 'To:' + to + '\n' + 'From: ' + user + '\n' + 'Subject:Neue Noten online! \n'
msgIchk = header + msgIch
smtpserver.sendmail(user, to,msgIchk)
header = 'To:' + to2 + '\n' + 'From: ' + user + '\n' + 'Subject:Neue Noten online! \n'
msgk = header + msg
smtpserver.sendmail(user, to2,msgk)
smtpserver.close()
| {
"content_hash": "245bd5c4060cf730102338d6caa65a2c",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 186,
"avg_line_length": 36.6764705882353,
"alnum_prop": 0.5396952686447474,
"repo_name": "lasrie/Noten",
"id": "d7fcbe9d54929e3bac416d6f21fd870eada45653",
"size": "5079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "noten.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5079"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sms', '0042_infobip_pinpoint_backends'),
]
operations = [
migrations.AlterField(
model_name='email',
name='messaging_subevent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT,
to='sms.MessagingSubEvent'),
),
]
| {
"content_hash": "5386064fd0b1bbf354458e1a8133aa13",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 91,
"avg_line_length": 25.6,
"alnum_prop": 0.595703125,
"repo_name": "dimagi/commcare-hq",
"id": "57857b1b9302e9ee79c364b168608d689d4c021a",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/sms/migrations/0043_auto_20200622_1243.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
"""
Root url's map for application
"""
from django.conf.urls import *
from datebook.views import IndexView
from datebook.views.author import DatebookAuthorView
from datebook.views.year import DatebookYearView
from datebook.views.month import DatebookMonthView, DatebookMonthGetOrCreateView, DatebookMonthCurrentView, DatebookMonthFormView, DatebookNotesFormView
from datebook.views.day import DayEntryFormCreateView, DayEntryDetailView, DayEntryFormEditView, DayEntryCurrentView, DayEntryDeleteFormView
from datebook.views.daymodel import DayModelListView, DayEntryToDayModelFormView, DayModelFormEditView
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^create/$', DatebookMonthFormView.as_view(), name='create'),
url(r'^(?P<author>\w+)/$', DatebookAuthorView.as_view(), name='author-detail'),
url(r'^(?P<author>\w+)/day-models/$', DayModelListView.as_view(), name='day-models'),
url(r'^(?P<author>\w+)/day-models/(?P<pk>\d+)/$', DayModelFormEditView.as_view(), name='day-model-edit'),
url(r'^(?P<author>\w+)/current-day/$', DayEntryCurrentView.as_view(), name='current-day'),
url(r'^(?P<author>\w+)/current-month/$', DatebookMonthCurrentView.as_view(), name='current-month'),
url(r'^(?P<author>\w+)/(?P<year>\d{4})/$', DatebookYearView.as_view(), name='year-detail'),
url(r'^(?P<author>\w+)/(?P<year>\d{4})/add/(?P<month>\d{1,2})/$', DatebookMonthGetOrCreateView.as_view(), name='month-add'),
url(r'^(?P<author>\w+)/(?P<year>\d{4})/(?P<month>\d{1,2})/$', DatebookMonthView.as_view(), name='month-detail'),
url(r'^(?P<author>\w+)/(?P<year>\d{4})/(?P<month>\d{1,2})/notes/$', DatebookNotesFormView.as_view(), name='month-notes'),
url(r'^(?P<author>\w+)/(?P<year>\d{4})/(?P<month>\d{1,2})/add/(?P<day>\d{1,2})/$', DayEntryFormCreateView.as_view(), name='day-add'),
url(r'^(?P<author>\w+)/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/$', DayEntryDetailView.as_view(), name='day-detail'),
url(r'^(?P<author>\w+)/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/edit/$', DayEntryFormEditView.as_view(), name='day-edit'),
url(r'^(?P<author>\w+)/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/remove/$', DayEntryDeleteFormView.as_view(), name='day-remove'),
url(r'^(?P<author>\w+)/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/to-daymodel/$', DayEntryToDayModelFormView.as_view(), name='dayentry-to-daymodel'),
)
| {
"content_hash": "ce88458bbbe8df1af1509b47e539f9d4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 162,
"avg_line_length": 64.89473684210526,
"alnum_prop": 0.6516626115166261,
"repo_name": "sveetch/django-datebook",
"id": "987c21a3cf2bd0e7b72d8ae1b7cf0f44b66bba9a",
"size": "2490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datebook/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "399014"
},
{
"name": "HTML",
"bytes": "28769"
},
{
"name": "JavaScript",
"bytes": "25408"
},
{
"name": "Python",
"bytes": "120935"
},
{
"name": "Ruby",
"bytes": "990"
}
],
"symlink_target": ""
} |
"""A simple filter that thresholds on input data.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2010, Enthought, Inc.
# License: BSD Style.
import numpy as np
# Enthought library imports.
from traits.api import Instance, Range, Float, Bool, \
Property, Enum
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports
from mayavi.core.filter import Filter
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `Threshold` class.
######################################################################
class Threshold(Filter):
# The version of this class. Used for persistence.
__version__ = 0
# The threshold filter used.
threshold_filter = Property(Instance(tvtk.Object, allow_none=False), record=True)
# The filter type to use, specifies if the cells or the points are
# cells filtered via a threshold.
filter_type = Enum('cells', 'points',
desc='if thresholding is done on cells or points')
# Lower threshold (this is a dynamic trait that is changed when
# input data changes).
lower_threshold = Range(value=-1.0e20,
low='_data_min',
high='_data_max',
enter_set=True,
auto_set=False,
desc='the lower threshold of the filter')
# Upper threshold (this is a dynamic trait that is changed when
# input data changes).
upper_threshold = Range(value=1.0e20,
low='_data_min',
high='_data_max',
enter_set=True,
auto_set=False,
desc='the upper threshold of the filter')
# Automatically reset the lower threshold when the upstream data
# changes.
auto_reset_lower = Bool(True, desc='if the lower threshold is '
'automatically reset when upstream '
'data changes')
# Automatically reset the upper threshold when the upstream data
# changes.
auto_reset_upper = Bool(True, desc='if the upper threshold is '
'automatically reset when upstream '
'data changes')
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['poly_data',
'unstructured_grid'],
attribute_types=['any'],
attributes=['any'])
# Our view.
view = View(Group(Group(Item(name='filter_type'),
Item(name='lower_threshold'),
Item(name='auto_reset_lower'),
Item(name='upper_threshold'),
Item(name='auto_reset_upper')),
Item(name='_'),
Group(Item(name='threshold_filter',
show_label=False,
visible_when='object.filter_type == "cells"',
style='custom', resizable=True)),
),
resizable=True
)
########################################
# Private traits.
# These traits are used to set the limits for the thresholding.
# They store the minimum and maximum values of the input data.
_data_min = Float(-1e20)
_data_max = Float(1e20)
# The threshold filter for cell based filtering
_threshold = Instance(tvtk.Threshold, args=(), allow_none=False)
# The threshold filter for points based filtering.
_threshold_points = Instance(tvtk.ThresholdPoints, args=(), allow_none=False)
# Internal data to
_first = Bool(True)
######################################################################
# `object` interface.
######################################################################
def __get_pure_state__(self):
d = super(Threshold, self).__get_pure_state__()
# These traits are dynamically created.
for name in ('_first', '_data_min', '_data_max'):
d.pop(name, None)
return d
######################################################################
# `Filter` interface.
######################################################################
def setup_pipeline(self):
attrs = ['all_scalars', 'attribute_mode',
'component_mode', 'selected_component']
self._threshold.on_trait_change(self._threshold_filter_edited,
attrs)
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if len(self.inputs) == 0:
return
# By default we set the input to the first output of the first
# input.
fil = self.threshold_filter
self.configure_connection(fil, self.inputs[0])
self._update_ranges()
self._set_outputs([self.threshold_filter.output])
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
if len(self.inputs) == 0:
return
self._update_ranges()
# Propagate the data_changed event.
self.data_changed = True
######################################################################
# Non-public interface
######################################################################
def _lower_threshold_changed(self, new_value):
fil = self.threshold_filter
fil.threshold_between(new_value, self.upper_threshold)
fil.update()
self.data_changed = True
def _upper_threshold_changed(self, new_value):
fil = self.threshold_filter
fil.threshold_between(self.lower_threshold, new_value)
fil.update()
self.data_changed = True
def _update_ranges(self):
"""Updates the ranges of the input.
"""
data_range = self._get_data_range()
if len(data_range) == 0:
return
dr = data_range
if self._first:
self._data_min, self._data_max = dr
self.set(lower_threshold = dr[0], trait_change_notify=False)
self.upper_threshold = dr[1]
self._first = False
return
# Decide whether to change 'lower' or 'upper' first, to avoid
# ending up with inconsistent bounds (max < min) in the lower_threshold
# and upper_threshold Range traits.
if dr[0] <= self._data_min:
# Safe to change lower bound first: intermediate range is [dr[0],
# self._data_max], and dr[0] <= self._data_min <= self._data_max.
change_lower_first = True
else:
# Safe to change upper bound first: intermediate range is [self._data_min, dr[1]],
# and self._data_min < dr[0] <= dr[1].
change_lower_first = False
if change_lower_first:
if self.auto_reset_lower:
self._data_min = dr[0]
notify = not self.auto_reset_upper
self.set(lower_threshold = dr[0],
trait_change_notify=notify)
if self.auto_reset_upper:
self._data_max = dr[1]
self.upper_threshold = dr[1]
else:
if self.auto_reset_upper:
self._data_max = dr[1]
notify = not self.auto_reset_lower
self.set(upper_threshold = dr[1],
trait_change_notify=notify)
if self.auto_reset_lower:
self._data_min = dr[0]
self.lower_threshold = dr[0]
def _get_data_range(self):
"""Returns the range of the input scalar data."""
input = self.inputs[0].outputs[0]
data_range = []
ps = input.point_data.scalars
cs = input.cell_data.scalars
# FIXME: need to be able to handle cell and point data
# together.
if ps is not None:
data_range = list(ps.range)
if np.isnan(data_range[0]):
data_range[0] = float(np.nanmin(ps.to_array()))
if np.isnan(data_range[1]):
data_range[1] = float(np.nanmax(ps.to_array()))
elif cs is not None:
data_range = cs.range
if np.isnan(data_range[0]):
data_range[0] = float(np.nanmin(cs.to_array()))
if np.isnan(data_range[1]):
data_range[1] = float(np.nanmax(cs.to_array()))
return data_range
def _auto_reset_lower_changed(self, value):
if len(self.inputs) == 0:
return
if value:
dr = self._get_data_range()
self._data_min = dr[0]
self.lower_threshold = dr[0]
def _auto_reset_upper_changed(self, value):
if len(self.inputs) == 0:
return
if value:
dr = self._get_data_range()
self._data_max = dr[1]
self.upper_threshold = dr[1]
def _get_threshold_filter(self):
if self.filter_type == 'cells':
return self._threshold
else:
return self._threshold_points
def _filter_type_changed(self, value):
if value == 'cells':
old = self._threshold_points
new = self._threshold
else:
old = self._threshold
new = self._threshold_points
self.trait_property_changed('threshold_filter', old, new)
def _threshold_filter_changed(self, old, new):
if len(self.inputs) == 0:
return
fil = new
self.configure_connection(fil, self.inputs[0])
fil.threshold_between(self.lower_threshold,
self.upper_threshold)
fil.update()
self._set_outputs([fil.output])
def _threshold_filter_edited(self):
self.threshold_filter.update()
self.data_changed = True
| {
"content_hash": "9ac45564dc92504a837942d0c897743a",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 94,
"avg_line_length": 36.9204152249135,
"alnum_prop": 0.510215557638238,
"repo_name": "liulion/mayavi",
"id": "66b12a709a0a874f28233e439bc02d6bcc926ae5",
"size": "10670",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mayavi/filters/threshold.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1054"
},
{
"name": "GAP",
"bytes": "34817"
},
{
"name": "Python",
"bytes": "2511883"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
} |
import SokoMap
class HashTable:
def __init__(self):
self.table = {}
# sm is SokoMap
def checkAdd(self, sm):
key = str(sm.getBlocks() + [sm.getPlayer()])
if key in self.table:
return True
else:
self.table[key] = True
return False
| {
"content_hash": "a71c67c632b82be837b3b1657c97662f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 52,
"avg_line_length": 19.5625,
"alnum_prop": 0.5111821086261981,
"repo_name": "lrei/willy",
"id": "b3a16f186c0cfff1fc49d6b399dcac51903b2f37",
"size": "313",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "HashTable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "91"
},
{
"name": "Python",
"bytes": "24963"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
from .uiOmBase import *
########################################################################
class ScenarioValueMonitor(QtWidgets.QTableWidget):
"""情景分析监控工具,某一个数值"""
#----------------------------------------------------------------------
def __init__(self, key, parent=None):
"""Constructor"""
super(ScenarioValueMonitor, self).__init__(parent)
self.key = key
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setEditTriggers(self.NoEditTriggers)
self.setMinimumHeight(600)
#----------------------------------------------------------------------
def updateData(self, result, priceChangeArray, impvChangeArray):
"""更新界面"""
# 清空表格
self.clearContents()
# 设置表头
self.setColumnCount(len(priceChangeArray))
priceChangeHeaders = [('price %s%%' %(priceChange*100)) for priceChange in priceChangeArray]
self.setHorizontalHeaderLabels(priceChangeHeaders)
self.setRowCount(len(impvChangeArray))
impvChangeHeaders = [('impv %s%%' %(impvChange*100)) for impvChange in impvChangeArray]
self.setVerticalHeaderLabels(impvChangeHeaders)
# 设置数据
l = [d[self.key] for d in result.values()]
maxValue = max(l)
minValue = min(l)
# 最大和最小值相等,则说明计算逻辑有问题
if maxValue == minValue:
return
midValue = (maxValue + minValue) / 2
colorRatio = 255*2/(maxValue-minValue)
for column, priceChange in enumerate(priceChangeArray):
for row, impvChange in enumerate(impvChangeArray):
value = result[(priceChange, impvChange)][self.key]
# 计算颜色
red = 255
green = 255
colorValue = (value - midValue) * colorRatio
if colorValue <= 0:
red -= abs(colorValue)
else:
green -= abs(colorValue)
color = QtGui.QColor(red, green, 0)
# 插入单元格到表格中
cell = QtWidgets.QTableWidgetItem('%.1f' %value)
cell.setBackground(color)
cell.setForeground(COLOR_BLACK)
self.setItem(row, column, cell)
self.resizeColumnsToContents()
self.resizeRowsToContents()
########################################################################
class ScenarioAnalysisMonitor(QtWidgets.QTabWidget):
"""情景分析监控组件"""
#----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(ScenarioAnalysisMonitor, self).__init__(parent)
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
""""""
self.valueMonitorList = []
for key in ['pnl', 'delta', 'gamma', 'theta', 'vega']:
valueMonitor = ScenarioValueMonitor(key)
self.addTab(valueMonitor, key)
self.valueMonitorList.append(valueMonitor)
#----------------------------------------------------------------------
def updateData(self, result, priceChangeArray, impvChangeArray):
"""更新数据"""
for valueMonitor in self.valueMonitorList:
valueMonitor.updateData(result, priceChangeArray, impvChangeArray)
########################################################################
class AnalysisManager(QtWidgets.QWidget):
"""研究分析管理"""
#----------------------------------------------------------------------
def __init__(self, omEngine, parent=None):
"""Constructor"""
super(AnalysisManager, self).__init__(parent)
self.omEngine = omEngine
self.portfolio = omEngine.portfolio
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'持仓分析')
self.scenarioAnalysisMonitor = ScenarioAnalysisMonitor()
self.buttonScenarioAnalysis = QtWidgets.QPushButton(u'情景分析')
self.buttonScenarioAnalysis.clicked.connect(self.updateData)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.buttonScenarioAnalysis)
hbox.addStretch()
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(self.scenarioAnalysisMonitor)
self.setLayout(vbox)
#----------------------------------------------------------------------
def updateData(self):
"""更新数据"""
result, priceChangeArray, impvChangeArray = self.runScenarioAnalysis()
if result:
self.scenarioAnalysisMonitor.updateData(result, priceChangeArray, impvChangeArray)
#----------------------------------------------------------------------
def runScenarioAnalysis(self):
"""运行情景分析"""
portfolio = self.portfolio
calculateGreeks = portfolio.model.calculateGreeks
if not portfolio:
return None, None, None
changeRange = 5
priceChangeArray = np.arange(-changeRange, changeRange+1) / 100
impvChangeArray = np.arange(-changeRange, changeRange+1) / 100
expiryChange = 1/240 # 一个交易日对应的时间变化
result = {} # 分析结果
for priceChange in priceChangeArray:
for impvChange in impvChangeArray:
portfolioPnl = 0
portfolioDelta = 0
portfolioGamma = 0
portfolioTheta = 0
portfolioVega = 0
for underlying in portfolio.underlyingDict.values():
portfolioPnl += underlying.midPrice * underlying.netPos * priceChange
portfolioDelta += underlying.theoDelta * underlying.netPos
try:
for option in portfolio.optionDict.values():
if not option.netPos:
continue
price, delta, gamma, theta, vega = calculateGreeks(option.underlying.midPrice*(1+priceChange),
option.k,
option.r,
max(option.t-expiryChange, 0),
option.pricingImpv*(1+impvChange),
option.cp)
portfolioPnl += (price - option.theoPrice) * option.netPos * option.size
portfolioDelta += delta * option.netPos * option.size
portfolioGamma += gamma * option.netPos * option.size
portfolioTheta += theta * option.netPos * option.size
portfolioVega += vega * option.netPos * option.size
except ZeroDivisionError:
return None, None, None
d = {
'pnl': portfolioPnl,
'delta': portfolioDelta,
'gamma': portfolioGamma,
'theta': portfolioTheta,
'vega': portfolioVega
}
result[(priceChange, impvChange)] = d
return result, priceChangeArray, impvChangeArray
| {
"content_hash": "eb567b221da3aabdb822599c646134fd",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 118,
"avg_line_length": 38.863414634146345,
"alnum_prop": 0.45186393874733277,
"repo_name": "wisfern/vnpy",
"id": "156486dd88e28950acae42412619a95998415c19",
"size": "8234",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vnpy/trader/app/optionMaster/uiOmAnalysisManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "341"
},
{
"name": "C",
"bytes": "3151559"
},
{
"name": "C++",
"bytes": "8866606"
},
{
"name": "CMake",
"bytes": "44564"
},
{
"name": "HTML",
"bytes": "807"
},
{
"name": "Makefile",
"bytes": "99693"
},
{
"name": "Objective-C",
"bytes": "22505"
},
{
"name": "PHP",
"bytes": "4107"
},
{
"name": "Python",
"bytes": "5367161"
},
{
"name": "Shell",
"bytes": "3722"
}
],
"symlink_target": ""
} |
"""
A simple sample of using a wx.Overlay to draw a rubberband effect
"""
import wx
print(wx.version())
class TestPanel(wx.Panel):
def __init__(self, *args, **kw):
wx.Panel.__init__(self, *args, **kw)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.startPos = None
self.overlay = wx.Overlay()
wx.TextCtrl(self, pos=(140,20))
def OnPaint(self, evt):
# Just some simple stuff to paint in the window for an example
dc = wx.PaintDC(self)
dc.SetBackground(wx.Brush("sky blue"))
dc.Clear()
dc.DrawLabel("Drag the mouse across this window to see \n"
"a rubber-band effect using wx.Overlay",
(140, 50, -1, -1))
coords = ((40,40),(200,220),(210,120),(120,300))
dc = wx.GCDC(dc)
dc.SetPen(wx.Pen("red", 2))
dc.SetBrush(wx.CYAN_BRUSH)
dc.DrawPolygon(coords)
def OnLeftDown(self, evt):
# Capture the mouse and save the starting position for the
# rubber-band
self.CaptureMouse()
self.startPos = evt.GetPosition()
def OnMouseMove(self, evt):
if evt.Dragging() and evt.LeftIsDown():
rect = wx.Rect(topLeft=self.startPos, bottomRight=evt.GetPosition())
# Draw the rubber-band rectangle using an overlay so it
# will manage keeping the rectangle and the former window
# contents separate.
dc = wx.ClientDC(self)
odc = wx.DCOverlay(self.overlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
dc.SetPen(wx.Pen("black", 2))
dc.SetBrush(wx.Brush(wx.Colour(0xC0, 0xC0, 0xC0, 0x80)))
dc.DrawRectangle(rect)
def OnLeftUp(self, evt):
if self.HasCapture():
self.ReleaseMouse()
self.startPos = None
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
dc = wx.ClientDC(self)
odc = wx.DCOverlay(self.overlay, dc)
odc.Clear()
del odc
self.overlay.Reset()
app = wx.App(redirect=False)
frm = wx.Frame(None, title="wx.Overlay Test", size=(450,450))
#frm.SetDoubleBuffered(True)
pnl = TestPanel(frm)
frm.Show()
app.MainLoop()
| {
"content_hash": "5820d7a522d3095eac2d5afcfd681d61",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 80,
"avg_line_length": 30.25,
"alnum_prop": 0.581893313298272,
"repo_name": "dnxbjyj/python-basic",
"id": "861f701b299f325409bac46bf423472e68c93065",
"size": "2662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/wxpython/wxPython-demo-4.0.1/samples/overlay/overlay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "70"
},
{
"name": "HTML",
"bytes": "274934"
},
{
"name": "Jupyter Notebook",
"bytes": "868723"
},
{
"name": "Python",
"bytes": "4032747"
},
{
"name": "Shell",
"bytes": "446"
}
],
"symlink_target": ""
} |
import copy
from nailgun.db.sqlalchemy.models import NeutronConfig
from nailgun.db.sqlalchemy.models import NovaNetworkConfig
from nailgun.objects import ClusterCollection
from nailgun.objects import MasterNodeSettings
from nailgun.objects import NodeCollection
from nailgun.settings import settings
from nailgun.statistics.utils import get_attr_value
from nailgun.statistics.utils import WhiteListRule
from nailgun import utils
class InstallationInfo(object):
"""Collects info about Fuel installation
Master nodes, clusters, networks, e.t.c.
Used for collecting info for fuel statistics
"""
attributes_white_list = (
# ((path, to, property), 'map_to_name', transform_function)
WhiteListRule(('common', 'libvirt_type', 'value'),
'libvirt_type', None),
WhiteListRule(('common', 'debug', 'value'), 'debug_mode', None),
WhiteListRule(('common', 'use_cow_images', 'value'),
'use_cow_images', None),
WhiteListRule(('common', 'auto_assign_floating_ip', 'value'),
'auto_assign_floating_ip', None),
WhiteListRule(('common', 'nova_quota', 'value'), 'nova_quota', None),
WhiteListRule(('common', 'puppet_debug', 'value'),
'puppet_debug', None),
WhiteListRule(('common', 'resume_guests_state_on_host_boot', 'value'),
'resume_guests_state_on_host_boot', None),
WhiteListRule(('corosync', 'verified', 'value'),
'corosync_verified', None),
WhiteListRule(('public_network_assignment', 'assign_to_all_nodes',
'value'), 'assign_public_to_all_nodes', None),
WhiteListRule(('neutron_advanced_configuration', 'neutron_l2_pop',
'value'), 'neutron_l2_pop', None),
WhiteListRule(('neutron_advanced_configuration', 'neutron_dvr',
'value'), 'neutron_dvr', None),
WhiteListRule(('syslog', 'syslog_transport', 'value'),
'syslog_transport', None),
WhiteListRule(('provision', 'method', 'value'),
'provision_method', None),
WhiteListRule(('kernel_params', 'kernel', 'value'),
'kernel_params', None),
WhiteListRule(('external_mongo', 'mongo_replset', 'value'),
'external_mongo_replset', bool),
WhiteListRule(('external_ntp', 'ntp_list', 'value'),
'external_ntp_list', bool),
WhiteListRule(('repo_setup', 'repos', 'value'), 'repos', bool),
WhiteListRule(('storage', 'volumes_lvm', 'value'),
'volumes_lvm', None),
WhiteListRule(('storage', 'iser', 'value'), 'iser', None),
WhiteListRule(('storage', 'volumes_ceph', 'value'),
'volumes_ceph', None),
WhiteListRule(('storage', 'images_ceph', 'value'),
'images_ceph', None),
WhiteListRule(('storage', 'images_vcenter', 'value'),
'images_vcenter', None),
WhiteListRule(('storage', 'ephemeral_ceph', 'value'),
'ephemeral_ceph', None),
WhiteListRule(('storage', 'objects_ceph', 'value'),
'objects_ceph', None),
WhiteListRule(('storage', 'osd_pool_size', 'value'),
'osd_pool_size', None),
WhiteListRule(('neutron_mellanox', 'plugin', 'value'),
'mellanox', None),
WhiteListRule(('neutron_mellanox', 'vf_num', 'value'),
'mellanox_vf_num', None),
WhiteListRule(('additional_components', 'sahara', 'value'),
'sahara', None),
WhiteListRule(('additional_components', 'murano', 'value'),
'murano', None),
WhiteListRule(('additional_components', 'heat', 'value'),
'heat', None),
WhiteListRule(('additional_components', 'ceilometer', 'value'),
'ceilometer', None),
WhiteListRule(('additional_components', 'mongo', 'value'),
'mongo', None),
WhiteListRule(('workloads_collector', 'enabled', 'value'),
'workloads_collector_enabled', None),
WhiteListRule(('public_ssl', 'horizon', 'value'),
'public_ssl_horizon', None),
WhiteListRule(('public_ssl', 'services', 'value'),
'public_ssl_services', None),
WhiteListRule(('public_ssl', 'cert_source', 'value'),
'public_ssl_cert_source', None),
)
vmware_attributes_white_list = (
# ((path, to, property), 'map_to_name', transform_function)
WhiteListRule(('value', 'availability_zones', 'cinder', 'enable'),
'vmware_az_cinder_enable', None),
# We add 'vsphere_cluster' into path for enter into nested list.
# Private value of 'vsphere_cluster' is not collected, we only
# computes length of the nested list
WhiteListRule(('value', 'availability_zones', 'nova_computes',
'vsphere_cluster'), 'vmware_az_nova_computes_num', len),
)
def fuel_release_info(self):
versions = utils.get_fuel_release_versions(settings.FUEL_VERSION_FILE)
if settings.FUEL_VERSION_KEY not in versions:
versions[settings.FUEL_VERSION_KEY] = settings.VERSION
return versions[settings.FUEL_VERSION_KEY]
def get_network_configuration_info(self, cluster):
network_config = cluster.network_config
result = {}
if isinstance(network_config, NovaNetworkConfig):
result['net_manager'] = network_config.net_manager
result['fixed_networks_vlan_start'] = \
network_config.fixed_networks_vlan_start
result['fixed_network_size'] = network_config.fixed_network_size
result['fixed_networks_amount'] = \
network_config.fixed_networks_amount
elif isinstance(network_config, NeutronConfig):
result['segmentation_type'] = network_config.segmentation_type
result['net_l23_provider'] = network_config.net_l23_provider
return result
def get_clusters_info(self):
clusters = ClusterCollection.all()
clusters_info = []
for cluster in clusters:
release = cluster.release
nodes_num = NodeCollection.filter_by(
None, cluster_id=cluster.id).count()
vmware_attributes_editable = None
if cluster.vmware_attributes:
vmware_attributes_editable = cluster.vmware_attributes.editable
cluster_info = {
'id': cluster.id,
'nodes_num': nodes_num,
'release': {
'os': release.operating_system,
'name': release.name,
'version': release.version
},
'mode': cluster.mode,
'nodes': self.get_nodes_info(cluster.nodes),
'node_groups': self.get_node_groups_info(cluster.node_groups),
'status': cluster.status,
'extensions': cluster.extensions,
'attributes': self.get_attributes(cluster.attributes.editable,
self.attributes_white_list),
'vmware_attributes': self.get_attributes(
vmware_attributes_editable,
self.vmware_attributes_white_list
),
'net_provider': cluster.net_provider,
'fuel_version': cluster.fuel_version,
'is_customized': cluster.is_customized,
'network_configuration': self.get_network_configuration_info(
cluster),
'installed_plugins': self.get_cluster_plugins_info(cluster)
}
clusters_info.append(cluster_info)
return clusters_info
def get_cluster_plugins_info(self, cluster):
plugins_info = []
for plugin_inst in cluster.plugins:
plugin_info = {
"id": plugin_inst.id,
"name": plugin_inst.name,
"version": plugin_inst.version,
"releases": plugin_inst.releases,
"fuel_version": plugin_inst.fuel_version,
"package_version": plugin_inst.package_version,
}
plugins_info.append(plugin_info)
return plugins_info
def get_attributes(self, attributes, white_list):
result_attrs = {}
for path, map_to_name, func in white_list:
try:
result_attrs[map_to_name] = get_attr_value(
path, func, attributes)
except (KeyError, TypeError):
pass
return result_attrs
def get_node_meta(self, node):
meta = copy.deepcopy(node.meta)
result = {}
if not meta:
return result
to_copy = ['cpu', 'memory', 'disks']
for param in to_copy:
result[param] = meta.get(param)
system = meta.get('system', {})
system.pop('fqdn', None)
system.pop('serial', None)
result['system'] = system
interfaces = meta.get('interfaces', [])
result['interfaces'] = []
for interface in interfaces:
interface.pop('mac')
result['interfaces'].append(interface)
return result
def get_nodes_info(self, nodes):
nodes_info = []
for node in nodes:
node_info = {
'id': node.id,
'group_id': node.group_id,
'roles': node.roles,
'os': node.os_platform,
'status': node.status,
'error_type': node.error_type,
'online': node.online,
'manufacturer': node.manufacturer,
'platform_name': node.platform_name,
'meta': self.get_node_meta(node),
'pending_addition': node.pending_addition,
'pending_deletion': node.pending_deletion,
'pending_roles': node.pending_roles,
'nic_interfaces':
self.get_node_intefaces_info(node.nic_interfaces, bond=False),
'bond_interfaces':
self.get_node_intefaces_info(node.bond_interfaces, bond=True),
}
nodes_info.append(node_info)
return nodes_info
def get_node_intefaces_info(self, interfaces, bond):
ifs_info = []
for interface in interfaces:
if_info = {
'id': interface.id
}
if bond:
if_info['slaves'] = [s.id for s in interface.slaves]
ifs_info.append(if_info)
return ifs_info
def get_node_groups_info(self, node_groups):
groups_info = []
for group in node_groups:
group_info = {
'id': group.id,
'nodes': [n.id for n in group.nodes]
}
groups_info.append(group_info)
return groups_info
def get_installation_info(self):
clusters_info = self.get_clusters_info()
allocated_nodes_num = sum([c['nodes_num'] for c in clusters_info])
unallocated_nodes_num = NodeCollection.filter_by(
None, cluster_id=None).count()
info = {
'user_information': self.get_user_info(),
'master_node_uid': self.get_master_node_uid(),
'fuel_release': self.fuel_release_info(),
'clusters': clusters_info,
'clusters_num': len(clusters_info),
'allocated_nodes_num': allocated_nodes_num,
'unallocated_nodes_num': unallocated_nodes_num
}
return info
def get_master_node_uid(self):
return getattr(MasterNodeSettings.get_one(), 'master_node_uid', None)
def get_user_info(self):
try:
stat_settings = MasterNodeSettings.get_one(). \
settings.get("statistics", {})
result = {
"contact_info_provided":
stat_settings.get("user_choice_saved", {}).get("value", False)
and stat_settings.get("send_user_info", {}).get("value", False)
}
if result["contact_info_provided"]:
result["name"] = stat_settings.get("name", {}).get("value")
result["email"] = stat_settings.get("email", {}).get("value")
result["company"] = stat_settings.get("company", {}).\
get("value")
return result
except AttributeError:
return {"contact_info_provided": False}
| {
"content_hash": "feb45b85786443d77a49c6de231a1f67",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 79,
"avg_line_length": 41.62662337662338,
"alnum_prop": 0.5455892676078309,
"repo_name": "SmartInfrastructures/fuel-web-dev",
"id": "d7f7121d220c6e99b6087e0aceb4829cee940f37",
"size": "13431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/statistics/fuel_statistics/installation_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "91131"
},
{
"name": "HTML",
"bytes": "7949"
},
{
"name": "JavaScript",
"bytes": "945307"
},
{
"name": "Mako",
"bytes": "1943"
},
{
"name": "Python",
"bytes": "3961568"
},
{
"name": "Ruby",
"bytes": "14701"
},
{
"name": "Shell",
"bytes": "24392"
}
],
"symlink_target": ""
} |
"""Unit tests for wx.Colour and wx.Color.
Methods yet to test:
__del__, __eq__, __getitem__, __len__, __ne__, __nonzero__, __reduce__,
__repr__, __str__, GetAsString, GetPixel"""
import unittest
import wx
import wxtest
def getColourEquivalents():
# doesn't include wx.Colour instances, only equivalents
return tuple( getColourEquivalentNames() +
getColourEquivalentHexValues() +
getColourEquivalentTuples() )
def getColourEquivalentNames():
return tuple((name, wx.TheColourDatabase.FindColour(name)) for name in getColourNames())
def getColourEquivalentHexValues():
return tuple((hexify(col), col) for name,col in getColourEquivalentNames())
def getColourEquivalentTuples():
return tuple((col.Get(), col) for name,col in getColourEquivalentNames())
def hexify(col):
(r,g,b) = col.Get()
rhex, ghex, bhex = hex(r)[2:], hex(g)[2:], hex(b)[2:]
if len(rhex) == 1:
rhex = '0' + rhex
if len(ghex) == 1:
ghex = '0' + ghex
if len(bhex) == 1:
bhex = '0' + bhex
return '#' + rhex + ghex + bhex
def getColourNames():
"""Colour names from inspection of wx.TheColourDatabase.
These appear to be identical on all platforms"""
return ('BLACK','BLUE','SLATE BLUE','GREEN','SPRING GREEN','CYAN','NAVY',
'STEEL BLUE','FOREST GREEN','SEA GREEN','DARK GREY','MIDNIGHT BLUE',
'DARK GREEN','DARK SLATE GREY','MEDIUM BLUE','SKY BLUE','LIME GREEN',
'MEDIUM AQUAMARINE','CORNFLOWER BLUE','MEDIUM SEA GREEN','INDIAN RED',
'VIOLET','DARK OLIVE GREEN','DIM GREY','CADET BLUE','MEDIUM GREY',
'DARK SLATE BLUE','MEDIUM FOREST GREEN','SALMON','DARK TURQUOISE',
'AQUAMARINE','MEDIUM TURQUOISE','MEDIUM SLATE BLUE','MEDIUM SPRING GREEN',
'GREY','FIREBRICK','MAROON','SIENNA','LIGHT STEEL BLUE','PALE GREEN',
'MEDIUM ORCHID','GREEN YELLOW','DARK ORCHID','YELLOW GREEN','BLUE VIOLET',
'KHAKI','BROWN','TURQUOISE','PURPLE','LIGHT BLUE','LIGHT GREY','ORANGE',
'VIOLET RED','GOLD','THISTLE','WHEAT','MEDIUM VIOLET RED','ORCHID',
'TAN','GOLDENROD','PLUM','MEDIUM GOLDENROD','RED','ORANGE RED',
'LIGHT MAGENTA','CORAL','PINK','YELLOW','WHITE')
# -----------------------------------------------------------
class ColourTest(unittest.TestCase):
def setUp(self):
self.app = wx.PySimpleApp()
def tearDown(self):
self.app.Destroy()
def testColorColourAlias_wxColourOnly(self):
""" """
self.assertEquals(wx.Color, wx.Colour)
def testSetFromName(self):
"""SetFromName"""
for name,colour in getColourEquivalentNames():
newcol = wx.Colour()
newcol.SetFromName(name)
self.assertEquals(colour, newcol)
def testConstructor(self):
"""__init__"""
self.assertRaises(OverflowError, wx.Colour, -1)
self.assertRaises(OverflowError, wx.Colour, 256)
def testGetSetRGB(self):
"""SetRGB, GetRGB"""
for tup,color in getColourEquivalentTuples():
sludge = color.GetRGB()
del color
color = wx.Colour()
color.SetRGB(sludge)
self.assertEquals(sludge, color.GetRGB())
def testMultipleAccessors(self):
"""Get, Set"""
for i in range(256):
color = wx.Color()
color.Set(i,i,i,i)
self.assertEquals((i,i,i), color.Get())
self.assertEquals(i, color.Alpha())
def testOk(self):
"""IsOk, Ok"""
c1 = wx.Colour(255,255,255,255)
c2 = wx.Colour(0,0,0,0)
c3 = wx.Colour()
for color in (c1, c2, c3):
self.assert_(color.IsOk())
self.assert_(color.Ok())
def testOkFalse(self):
"""IsOk, Ok"""
# HACK: to generate an invalid wx.Colour instance
# NOTE: cannot access colBg directly without crashing the interpreter
attr = wx.VisualAttributes()
self.assert_(not attr.colBg.Ok())
self.assert_(not attr.colBg.IsOk())
def testSingleAccessors(self):
"""Red, Green, Blue, Alpha"""
for i in range(256):
colour = wx.Colour(i,i,i,i)
self.assertEquals(i, colour.Red())
self.assertEquals(i, colour.Green())
self.assertEquals(i, colour.Blue())
self.assertEquals(i, colour.Alpha())
if __name__ == '__main__':
# test getColourEquivalents
print "Testing getColourEquivalents... ",
app = wx.PySimpleApp()
f = wx.Frame(None)
for test, colour in getColourEquivalents():
f.SetBackgroundColour(test)
#print 'Test: ', test
#print 'Colour: ', colour
assert colour == f.GetBackgroundColour()
print "Done"
unittest.main() | {
"content_hash": "3172b3663713ff92805ba80ff38e8aad",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 92,
"avg_line_length": 36.43703703703704,
"alnum_prop": 0.5741004269160398,
"repo_name": "ifwe/wxpy",
"id": "c39969f11f35517e5faa6b9d0a550af74c762ead",
"size": "4919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/wxPythonTests/testColour.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import functools
import threading
import time
import traceback
from pystachio import Default, Integer, String
from thrift.protocol import TJSONProtocol
from thrift.transport import TTransport
from twitter.common import log
from twitter.common.concurrent import Timeout, deadline
from twitter.common.quantity import Amount, Time
from twitter.common.zookeeper.kazoo_client import TwitterKazooClient
from twitter.common.zookeeper.serverset import ServerSet
from apache.aurora.common.auth.auth_module_manager import (
SessionKeyError,
get_auth_handler,
make_session_key
)
from apache.aurora.common.cluster import Cluster
from apache.aurora.common.transport import TRequestsTransport
from gen.apache.aurora.api import AuroraAdmin, ReadOnlyScheduler
from gen.apache.aurora.api.constants import THRIFT_API_VERSION
from gen.apache.aurora.api.ttypes import ResponseCode
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class SchedulerClientTrait(Cluster.Trait):
zk = String # noqa
zk_port = Default(Integer, 2181) # noqa
scheduler_zk_path = String # noqa
scheduler_uri = String # noqa
proxy_url = String # noqa
auth_mechanism = Default(String, 'UNAUTHENTICATED') # noqa
class SchedulerClient(object):
THRIFT_RETRIES = 5
RETRY_TIMEOUT = Amount(1, Time.SECONDS)
class Error(Exception): pass
class CouldNotConnect(Error): pass
# TODO(wickman) Refactor per MESOS-3005 into two separate classes with separate traits:
# ZookeeperClientTrait
# DirectClientTrait
@classmethod
def get(cls, cluster, auth_factory=get_auth_handler, **kwargs):
if not isinstance(cluster, Cluster):
raise TypeError('"cluster" must be an instance of Cluster, got %s' % type(cluster))
cluster = cluster.with_trait(SchedulerClientTrait)
auth_handler = auth_factory(cluster.auth_mechanism)
if cluster.zk:
return ZookeeperSchedulerClient(cluster, port=cluster.zk_port, auth=auth_handler, **kwargs)
elif cluster.scheduler_uri:
return DirectSchedulerClient(cluster.scheduler_uri, auth=auth_handler, **kwargs)
else:
raise ValueError('"cluster" does not specify zk or scheduler_uri')
def __init__(self, auth, user_agent, verbose=False):
self._client = None
self._auth = auth
self._user_agent = user_agent
self._verbose = verbose
def get_thrift_client(self):
if self._client is None:
self._client = self._connect()
return self._client
# per-class implementation -- mostly meant to set up a valid host/port
# pair and then delegate the opening to SchedulerClient._connect_scheduler
def _connect(self):
return None
def _connect_scheduler(self, uri, clock=time):
transport = TRequestsTransport(uri, auth=self._auth, user_agent=self._user_agent)
protocol = TJSONProtocol.TJSONProtocol(transport)
schedulerClient = AuroraAdmin.Client(protocol)
for _ in range(self.THRIFT_RETRIES):
try:
transport.open()
return schedulerClient
except TTransport.TTransportException:
clock.sleep(self.RETRY_TIMEOUT.as_(Time.SECONDS))
continue
except Exception as e:
# Monkey-patched proxies, like socks, can generate a proxy error here.
# without adding a dependency, we can't catch those in a more specific way.
raise self.CouldNotConnect('Connection to scheduler failed: %s' % e)
raise self.CouldNotConnect('Could not connect to %s' % uri)
class ZookeeperSchedulerClient(SchedulerClient):
SERVERSET_TIMEOUT = Amount(10, Time.SECONDS)
@classmethod
def get_scheduler_serverset(cls, cluster, port=2181, verbose=False, **kw):
if cluster.zk is None:
raise ValueError('Cluster has no associated zookeeper ensemble!')
if cluster.scheduler_zk_path is None:
raise ValueError('Cluster has no defined scheduler path, must specify scheduler_zk_path '
'in your cluster config!')
hosts = [h + ':{p}' for h in cluster.zk.split(',')]
zk = TwitterKazooClient.make(str(','.join(hosts).format(p=port)), verbose=verbose)
return zk, ServerSet(zk, cluster.scheduler_zk_path, **kw)
def __init__(self, cluster, port=2181, verbose=False, _deadline=deadline, **kwargs):
SchedulerClient.__init__(self, verbose=verbose, **kwargs)
self._cluster = cluster
self._zkport = port
self._endpoint = None
self._uri = None
self._deadline = _deadline
def _resolve(self):
"""Resolve the uri associated with this scheduler from zookeeper."""
joined = threading.Event()
def on_join(elements):
joined.set()
zk, serverset = self.get_scheduler_serverset(self._cluster, verbose=self._verbose,
port=self._zkport, on_join=on_join)
joined.wait(timeout=self.SERVERSET_TIMEOUT.as_(Time.SECONDS))
try:
# Need to perform this operation in a separate thread, because kazoo will wait for the
# result of this serverset evaluation indefinitely, which will prevent people killing
# the client with keyboard interrupts.
serverset_endpoints = self._deadline(lambda: list(serverset),
timeout=self.SERVERSET_TIMEOUT.as_(Time.SECONDS), daemon=True, propagate=True)
except Timeout:
raise self.CouldNotConnect("Failed to connect to Zookeeper within %d seconds." %
self.SERVERSET_TIMEOUT.as_(Time.SECONDS))
if len(serverset_endpoints) == 0:
raise self.CouldNotConnect('No schedulers detected in %s!' % self._cluster.name)
instance = serverset_endpoints[0]
if 'https' in instance.additional_endpoints:
endpoint = instance.additional_endpoints['https']
self._uri = 'https://%s:%s' % (endpoint.host, endpoint.port)
elif 'http' in instance.additional_endpoints:
endpoint = instance.additional_endpoints['http']
self._uri = 'http://%s:%s' % (endpoint.host, endpoint.port)
zk.stop()
def _connect(self):
if self._uri is None:
self._resolve()
if self._uri is not None:
return self._connect_scheduler(urljoin(self._uri, 'api'))
@property
def url(self):
proxy_url = self._cluster.proxy_url
if proxy_url:
return proxy_url
return self.raw_url
@property
def raw_url(self):
if self._uri is None:
self._resolve()
if self._uri:
return self._uri
class DirectSchedulerClient(SchedulerClient):
def __init__(self, uri, verbose=True, **kwargs):
SchedulerClient.__init__(self, verbose=verbose, **kwargs)
self._uri = uri
def _connect(self):
return self._connect_scheduler(urljoin(self._uri, 'api'))
@property
def url(self):
return self._uri
@property
def raw_url(self):
return self._uri
class SchedulerProxy(object):
"""
This class is responsible for creating a reliable thrift client to the
twitter scheduler. Basically all the dirty work needed by the
AuroraClientAPI.
"""
CONNECT_MAXIMUM_WAIT = Amount(1, Time.MINUTES)
RPC_RETRY_INTERVAL = Amount(5, Time.SECONDS)
RPC_MAXIMUM_WAIT = Amount(10, Time.MINUTES)
class Error(Exception): pass
class TimeoutError(Error): pass
class TransientError(Error): pass
class AuthError(Error): pass
class APIVersionError(Error): pass
class ThriftInternalError(Error): pass
def __init__(self, cluster, verbose=False, session_key_factory=make_session_key, **kwargs):
"""A callable session_key_factory should be provided for authentication"""
self.cluster = cluster
# TODO(Sathya): Make this a part of cluster trait when authentication is pushed to the transport
# layer.
self._session_key_factory = session_key_factory
self._client = self._scheduler_client = None
self.verbose = verbose
self._lock = threading.RLock()
self._terminating = threading.Event()
self._kwargs = kwargs
def with_scheduler(method):
"""Decorator magic to make sure a connection is made to the scheduler"""
def _wrapper(self, *args, **kwargs):
if not self._scheduler_client:
self._construct_scheduler()
return method(self, *args, **kwargs)
return _wrapper
def invalidate(self):
self._client = self._scheduler_client = None
def terminate(self):
"""Requests immediate termination of any retry attempts and invalidates client."""
self._terminating.set()
self.invalidate()
@with_scheduler
def client(self):
return self._client
@with_scheduler
def scheduler_client(self):
return self._scheduler_client
def session_key(self):
try:
return self._session_key_factory(self.cluster.auth_mechanism)
except SessionKeyError as e:
raise self.AuthError('Unable to create session key %s' % e)
def _construct_scheduler(self):
"""
Populates:
self._scheduler_client
self._client
"""
self._scheduler_client = SchedulerClient.get(self.cluster, verbose=self.verbose, **self._kwargs)
assert self._scheduler_client, "Could not find scheduler (cluster = %s)" % self.cluster.name
start = time.time()
while (time.time() - start) < self.CONNECT_MAXIMUM_WAIT.as_(Time.SECONDS):
try:
# this can wind up generating any kind of error, because it turns into
# a call to a dynamically set authentication module.
self._client = self._scheduler_client.get_thrift_client()
break
except SchedulerClient.CouldNotConnect as e:
log.warning('Could not connect to scheduler: %s' % e)
except Exception as e:
# turn any auth module exception into an auth error.
log.debug('Warning: got an unknown exception during authentication:')
log.debug(traceback.format_exc())
raise self.AuthError('Error connecting to scheduler: %s' % e)
if not self._client:
raise self.TimeoutError('Timed out trying to connect to scheduler at %s' % self.cluster.name)
def __getattr__(self, method_name):
# If the method does not exist, getattr will return AttributeError for us.
method = getattr(AuroraAdmin.Client, method_name)
if not callable(method):
return method
@functools.wraps(method)
def method_wrapper(*args):
with self._lock:
start = time.time()
while not self._terminating.is_set() and (
time.time() - start) < self.RPC_MAXIMUM_WAIT.as_(Time.SECONDS):
# Only automatically append a SessionKey if this is not part of the read-only API.
auth_args = () if hasattr(ReadOnlyScheduler.Iface, method_name) else (self.session_key(),)
try:
method = getattr(self.client(), method_name)
if not callable(method):
return method
resp = method(*(args + auth_args))
if resp is not None and resp.responseCode == ResponseCode.ERROR_TRANSIENT:
raise self.TransientError(", ".join(
[m.message for m in resp.details] if resp.details else []))
if resp.serverInfo.thriftAPIVersion != THRIFT_API_VERSION:
raise self.APIVersionError("Client Version: %s, Server Version: %s" %
(THRIFT_API_VERSION, resp.serverInfo.thriftAPIVersion))
return resp
except TRequestsTransport.AuthError as e:
raise self.AuthError(e)
except (TTransport.TTransportException, self.TimeoutError, self.TransientError) as e:
if not self._terminating.is_set():
log.warning('Connection error with scheduler: %s, reconnecting...' % e)
self.invalidate()
self._terminating.wait(self.RPC_RETRY_INTERVAL.as_(Time.SECONDS))
except Exception as e:
# Take any error that occurs during the RPC call, and transform it
# into something clients can handle.
if not self._terminating.is_set():
raise self.ThriftInternalError("Error during thrift call %s to %s: %s" %
(method_name, self.cluster.name, e))
if not self._terminating.is_set():
raise self.TimeoutError('Timed out attempting to issue %s to %s' % (
method_name, self.cluster.name))
return method_wrapper
| {
"content_hash": "6655c05b813802cbf7dc8f732c6dc247",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 100,
"avg_line_length": 38,
"alnum_prop": 0.6804276315789474,
"repo_name": "rosmo/aurora",
"id": "8e91788d8cb69ef21df6b045cd07f8cb111b95b3",
"size": "12709",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/main/python/apache/aurora/client/api/scheduler_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7070"
},
{
"name": "Groovy",
"bytes": "13037"
},
{
"name": "HTML",
"bytes": "56610"
},
{
"name": "Java",
"bytes": "3365999"
},
{
"name": "JavaScript",
"bytes": "105302"
},
{
"name": "Makefile",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "1414935"
},
{
"name": "Ruby",
"bytes": "4315"
},
{
"name": "Shell",
"bytes": "59236"
},
{
"name": "Smalltalk",
"bytes": "79"
},
{
"name": "Smarty",
"bytes": "25233"
},
{
"name": "Thrift",
"bytes": "56144"
}
],
"symlink_target": ""
} |
import abc
import bisect
import copy
import math
import six
from rally.common import costilius
from rally.common import streaming_algorithms as streaming
from rally import exceptions
from rally.task.processing import utils
@six.add_metaclass(abc.ABCMeta)
class Chart(object):
"""Base class for charts."""
def __init__(self, benchmark_info, zipped_size=1000):
"""Setup initial values.
:param benchmark_info: dict, generalized info about iterations.
The most important value is `iterations_count'
that should have int value of total data size
:param zipped_size: int maximum number of points on scale
"""
self._data = costilius.OrderedDict() # Container for results
self._benchmark_info = benchmark_info
self.base_size = benchmark_info.get("iterations_count", 0)
self.zipped_size = zipped_size
def add_iteration(self, iteration):
"""Add iteration data.
This method must be called for each iteration.
If overridden, this method must use streaming data processing,
so chart instance could process unlimited number of iterations,
with low memory usage.
"""
for name, value in self._map_iteration_values(iteration):
if name not in self._data:
self._data[name] = utils.GraphZipper(self.base_size,
self.zipped_size)
self._data[name].add_point(value)
def render(self):
"""Generate chart data ready for drawing."""
return [(name, points.get_zipped_graph())
for name, points in self._data.items()]
def _fix_atomic_actions(self, iteration):
"""Set `0' for missed atomic actions.
Since some atomic actions can absent in some iterations
due to failures, this method must be used in all cases
related to atomic actions processing.
"""
for name in self._benchmark_info["atomic"]:
iteration["atomic_actions"].setdefault(name, 0)
return iteration
@abc.abstractmethod
def _map_iteration_values(self, iteration):
"""Get values for processing, from given iteration."""
class MainStackedAreaChart(Chart):
def _map_iteration_values(self, iteration):
if iteration["error"]:
result = [("duration", 0), ("idle_duration", 0)]
if self._benchmark_info["iterations_failed"]:
result.append(
("failed_duration",
iteration["duration"] + iteration["idle_duration"]))
else:
result = [("duration", iteration["duration"]),
("idle_duration", iteration["idle_duration"])]
if self._benchmark_info["iterations_failed"]:
result.append(("failed_duration", 0))
return result
class AtomicStackedAreaChart(Chart):
def _map_iteration_values(self, iteration):
iteration = self._fix_atomic_actions(iteration)
atomics = list(iteration["atomic_actions"].items())
if self._benchmark_info["iterations_failed"]:
if iteration["error"]:
failed_duration = (
iteration["duration"] + iteration["idle_duration"]
- sum([(a[1] or 0) for a in atomics]))
else:
failed_duration = 0
atomics.append(("failed_duration", failed_duration))
return atomics
class OutputStackedAreaChart(Chart):
def _map_iteration_values(self, iteration):
return [(name, iteration["scenario_output"]["data"].get(name, 0))
for name in self._benchmark_info["output_names"]]
class AvgChart(Chart):
"""Base class for charts with average results."""
def add_iteration(self, iteration):
for name, value in self._map_iteration_values(iteration):
if name not in self._data:
self._data[name] = streaming.MeanComputation()
self._data[name].add(value or 0)
def render(self):
return [(k, v.result()) for k, v in self._data.items()]
class AtomicAvgChart(AvgChart):
def _map_iteration_values(self, iteration):
iteration = self._fix_atomic_actions(iteration)
return list(iteration["atomic_actions"].items())
class LoadProfileChart(Chart):
"""Chart for parallel durations."""
def __init__(self, benchmark_info, name="parallel iterations",
scale=200):
"""Setup chart with graph name and scale.
:benchmark_info: dict, generalized info about iterations
:param name: str name for X axis
:param scale: int number of X points
"""
super(LoadProfileChart, self).__init__(benchmark_info)
self._name = name
self._duration = benchmark_info["load_duration"]
self._tstamp_start = benchmark_info["tstamp_start"]
# NOTE(amaretskiy): Determine a chart `step' - duration between
# two X points, rounded with minimal accuracy (digits after point)
# to improve JavaScript drawing performance.
# Examples:
# scale duration step (initial) accuracy step
# 200 30.8043010235 0.154021505117 1 0.2
# 200 1.25884699821 0.00629423499107 3 0.006
step = self._duration / float(scale)
if step == 0:
accuracy = 0
else:
accuracy = max(-int(math.floor(math.log10(step))), 0)
step = round(step, accuracy)
self._time_axis = [step * x
for x in six.moves.range(1, int(scale))
if (step * x) < self._duration]
self._time_axis.append(self._duration)
self._started = [0] * len(self._time_axis)
self._stopped = [0] * len(self._time_axis)
def _map_iteration_values(self, iteration):
return (iteration["timestamp"],
0 if iteration["error"] else iteration["duration"])
def add_iteration(self, iteration):
timestamp, duration = self._map_iteration_values(iteration)
ts_start = timestamp - self._tstamp_start
ts_stop = ts_start + duration
self._started[bisect.bisect(self._time_axis, ts_start)] += 1
self._stopped[bisect.bisect(self._time_axis, ts_stop)] += 1
def render(self):
data = []
running = 0
for ts, started, ended in zip(self._time_axis,
self._started, self._stopped):
running += started
data.append([ts, running])
running -= ended
return [(self._name, data)]
class HistogramChart(Chart):
"""Base class for chart with histograms.
This chart is relatively complex, because actually it is a set
of histograms, that usually can be switched by dropdown select.
And each histogram has several data views.
"""
def _init_views(self, min_value, max_value):
"""Generate initial data for each histogram view."""
if not self.base_size:
return []
min_value, max_value = min_value or 0, max_value or 0
views = []
for view, bins in [
("Square Root Choice",
int(math.ceil(math.sqrt(self.base_size)))),
("Sturges Formula",
int(math.ceil(math.log(self.base_size, 2) + 1))),
("Rice Rule",
int(math.ceil(2 * self.base_size ** (1.0 / 3))))]:
bin_width = float(max_value - min_value) / bins
x_axis = [min_value + (bin_width * x) for x in range(1, bins + 1)]
views.append({"view": view, "bins": bins,
"x": x_axis, "y": [0] * len(x_axis)})
return views
def add_iteration(self, iteration):
for name, value in self._map_iteration_values(iteration):
if name not in self._data:
raise KeyError("Unexpected histogram name: %s" % name)
for i, view in enumerate(self._data[name]["views"]):
for bin_i, bin_v in enumerate(view["x"]):
if (value or 0) <= bin_v:
self._data[name]["views"][i]["y"][bin_i] += 1
break
def render(self):
data = []
for name, hist in self._data.items():
data.append(
[{"key": name, "view": v["view"], "disabled": hist["disabled"],
"values": [{"x": x, "y": y} for x, y in zip(v["x"], v["y"])]}
for v in hist["views"]])
return data
class MainHistogramChart(HistogramChart):
def __init__(self, benchmark_info):
super(MainHistogramChart, self).__init__(benchmark_info)
views = self._init_views(self._benchmark_info["min_duration"],
self._benchmark_info["max_duration"])
self._data["task"] = {"views": views, "disabled": None}
def _map_iteration_values(self, iteration):
return [("task", 0 if iteration["error"] else iteration["duration"])]
class AtomicHistogramChart(HistogramChart):
def __init__(self, benchmark_info):
super(AtomicHistogramChart, self).__init__(benchmark_info)
for i, atomic in enumerate(self._benchmark_info["atomic"].items()):
name, value = atomic
self._data[name] = {
"views": self._init_views(value["min_duration"],
value["max_duration"]),
"disabled": i}
def _map_iteration_values(self, iteration):
iteration = self._fix_atomic_actions(iteration)
return list(iteration["atomic_actions"].items())
class MainStatsTable(Chart):
def _init_row(self, name, iterations_count):
def round_3(stream, no_result):
if no_result:
return "n/a"
return round(stream.result(), 3)
return [
("Action", name),
("Min (sec)", streaming.MinComputation(), round_3),
("Median (sec)",
streaming.PercentileComputation(0.5, iterations_count), round_3),
("90%ile (sec)",
streaming.PercentileComputation(0.9, iterations_count), round_3),
("95%ile (sec)",
streaming.PercentileComputation(0.95, iterations_count), round_3),
("Max (sec)", streaming.MaxComputation(), round_3),
("Avg (sec)", streaming.MeanComputation(), round_3),
("Success", streaming.MeanComputation(),
lambda stream, no_result:
"%.1f%%" % (stream.result() * 100) if not no_result else "n/a"),
("Count", streaming.IncrementComputation(),
lambda x, no_result: x.result())
]
def __init__(self, benchmark_info, zipped_size=1000):
self.rows = list(benchmark_info["atomic"].keys())
self.rows.append("total")
self.rows_index = dict((name, i) for i, name in enumerate(self.rows))
self.table = [self._init_row(name, benchmark_info["iterations_count"])
for name in self.rows]
def add_iteration(self, iteration):
data = copy.copy(iteration["atomic_actions"])
data["total"] = iteration["duration"]
for name, value in data.items():
index = self.rows_index[name]
self.table[index][-1][1].add(None)
if iteration["error"]:
self.table[index][-2][1].add(0)
else:
self.table[index][-2][1].add(1)
for elem in self.table[index][1:-2]:
elem[1].add(value)
def render(self):
rows = []
for i in range(len(self.table)):
row = [self.table[i][0][1]]
# no results if all iterations failed
try:
no_result = self.table[i][-2][1].result() == 0.0
except exceptions.RallyException:
no_result = True
row.extend(x[2](x[1], no_result) for x in self.table[i][1:])
rows.append(row)
return {"cols": list(map(lambda x: x[0], self.table[0])),
"rows": rows}
def _map_iteration_values(self, iteration):
pass
| {
"content_hash": "daa7777ead188b2760f8520b99a43afc",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 37.94753086419753,
"alnum_prop": 0.5628304188694592,
"repo_name": "group-policy/rally",
"id": "a38293b265426965a4b33fa64ee9fe52c2a7ba73",
"size": "12893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rally/task/processing/charts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "35771"
},
{
"name": "Mako",
"bytes": "17389"
},
{
"name": "Python",
"bytes": "2926625"
},
{
"name": "Shell",
"bytes": "40843"
}
],
"symlink_target": ""
} |
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
import subprocess
import sys
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
def __init__(self, spec):
self.spec = spec
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.app'
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library'):
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def _GetSdkVersionInfoItem(self, sdk, infoitem):
job = subprocess.Popen(['xcodebuild', '-version', '-sdk', sdk, infoitem],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running xcodebuild' % job.returncode)
return out.rstrip('\n')
def _SdkPath(self):
sdk_root = self.GetPerTargetSetting('SDKROOT', default='macosx')
if sdk_root not in XcodeSettings._sdk_path_cache:
XcodeSettings._sdk_path_cache[sdk_root] = self._GetSdkVersionInfoItem(
sdk_root, 'Path')
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
if clang_cxx_language_standard == 'c++0x':
cflags_cc.append('-std=c++11')
elif clang_cxx_language_standard == 'gnu++0x':
cflags_cc.append('-std=gnu++11')
elif clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = self.xcode_settings[configname].get(setting, None)
first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def GetTargetPostbuilds(self, configname, output, output_binary, quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _AdjustLibrary(self, library):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath())
def AdjustLibraries(self, libraries):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [ self._AdjustLibrary(library) for library in libraries]
return libraries
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def GetInclude(self, lang):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self.compiled_headers[lang]
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self.compiled_headers[lang] + '.gch'
def GetObjDependencies(self, sources, objs):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang)))
return result
def GetPchBuildCommands(self):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c'), '-x c-header', 'c', self.header),
(self._Gch('cc'), '-x c++-header', 'cc', self.header),
(self._Gch('m'), '-x objective-c-header', 'm', self.header),
(self._Gch('mm'), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = output[0:-3] + 'nib'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the sourc plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerTargetSetting('SDKROOT'):
env['SDKROOT'] = xcode_settings._SdkPath()
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
| {
"content_hash": "100eba7a4e323b29f3e6a1770b37db20",
"timestamp": "",
"source": "github",
"line_count": 1080,
"max_line_length": 191,
"avg_line_length": 39.56944444444444,
"alnum_prop": 0.658032058032058,
"repo_name": "cfsghost/stemmer",
"id": "ed5e27fa686dd256e4e047b9bd27adbaf71dbb3e",
"size": "42892",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "base/overwrite/usr/local/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "D",
"bytes": "24648"
},
{
"name": "JavaScript",
"bytes": "44762"
},
{
"name": "Shell",
"bytes": "3393"
}
],
"symlink_target": ""
} |
__author__ = '0xCCCCCCCC'
from socket import *
SERVER_NAME = '127.0.0.1'
SERVER_PORT = 9876
def main():
buffer_size = 2048
client_socket = socket(AF_INET, SOCK_DGRAM)
while True:
message = input('Please input a sentence:')
if message == 'exit':
break
client_socket.sendto(message.encode('utf-8'), (SERVER_NAME, SERVER_PORT))
modified_msg, server_addr = client_socket.recvfrom(buffer_size)
print('Received reply from server({0})'.format(server_addr))
print('-> ' + str(modified_msg))
client_socket.close()
if __name__ == '__main__':
main()
| {
"content_hash": "d7f3dfb736925f0d13f5ba1bb59c2af6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 26.041666666666668,
"alnum_prop": 0.6016,
"repo_name": "kingsamchen/Eureka",
"id": "477b06ad88b01e59cef1181741801a91b8104770",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "explore-socket/socket-abc/UDPClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AIDL",
"bytes": "320"
},
{
"name": "Assembly",
"bytes": "1523"
},
{
"name": "C",
"bytes": "115900"
},
{
"name": "C#",
"bytes": "21241"
},
{
"name": "C++",
"bytes": "1125565"
},
{
"name": "CMake",
"bytes": "645800"
},
{
"name": "Dockerfile",
"bytes": "717"
},
{
"name": "Go",
"bytes": "111089"
},
{
"name": "HTML",
"bytes": "3869"
},
{
"name": "Java",
"bytes": "101720"
},
{
"name": "Makefile",
"bytes": "110"
},
{
"name": "PowerShell",
"bytes": "9136"
},
{
"name": "Python",
"bytes": "210011"
},
{
"name": "Shell",
"bytes": "9338"
}
],
"symlink_target": ""
} |
from sahara.plugins.vanilla import confighints_helper as ch_helper
from sahara.plugins.vanilla.hadoop2 import edp_engine
from sahara.utils import edp
class EdpOozieEngine(edp_engine.EdpOozieEngine):
@staticmethod
def get_possible_job_config(job_type):
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
return {'job_config': ch_helper.get_possible_hive_config_from(
'plugins/vanilla/v2_6_0/resources/hive-default.xml')}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING):
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
'plugins/vanilla/v2_6_0/resources/mapred-default.xml')}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
return {'job_config': ch_helper.get_possible_pig_config_from(
'plugins/vanilla/v2_6_0/resources/mapred-default.xml')}
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
| {
"content_hash": "5d9730d21f6e6efc119469bed1be25a9",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 54,
"alnum_prop": 0.6425925925925926,
"repo_name": "ekasitk/sahara",
"id": "5d7c208c2ed19c6fda1f0cc9ab272977c6dd1b0c",
"size": "1663",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sahara/plugins/vanilla/v2_6_0/edp_engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "19620"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "3141724"
},
{
"name": "Shell",
"bytes": "52399"
}
],
"symlink_target": ""
} |
import sys
# [START storage_remove_file_owner]
from google.cloud import storage
def remove_blob_owner(bucket_name, blob_name, user_email):
"""Removes a user from the access control list of the given blob in the
given bucket."""
# bucket_name = "your-bucket-name"
# blob_name = "your-object-name"
# user_email = "name@example.com"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
# You can also use `group`, `domain`, `all_authenticated` and `all` to
# remove access for different types of entities.
blob.acl.user(user_email).revoke_read()
blob.acl.user(user_email).revoke_write()
blob.acl.user(user_email).revoke_owner()
blob.acl.save()
print(
f"Removed user {user_email} from blob {blob_name} in bucket {bucket_name}."
)
# [END storage_remove_file_owner]
if __name__ == "__main__":
remove_blob_owner(
bucket_name=sys.argv[1], blob_name=sys.argv[2], user_email=sys.argv[3],
)
| {
"content_hash": "3bfb10e663fcb73c4d55438c983e9535",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 83,
"avg_line_length": 29.485714285714284,
"alnum_prop": 0.6569767441860465,
"repo_name": "googleapis/python-storage",
"id": "315a747adbcc6771e297f51ec931e3277472208f",
"size": "1631",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/storage_remove_file_owner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1392987"
},
{
"name": "Shell",
"bytes": "32171"
}
],
"symlink_target": ""
} |
NET_STATUS_ACTIVE = 'ACTIVE'
NET_STATUS_BUILD = 'BUILD'
NET_STATUS_DOWN = 'DOWN'
NET_STATUS_ERROR = 'ERROR'
PORT_STATUS_ACTIVE = 'ACTIVE'
PORT_STATUS_BUILD = 'BUILD'
PORT_STATUS_DOWN = 'DOWN'
PORT_STATUS_ERROR = 'ERROR'
PORT_STATUS_NOTAPPLICABLE = 'N/A'
FLOATINGIP_STATUS_ACTIVE = 'ACTIVE'
FLOATINGIP_STATUS_DOWN = 'DOWN'
FLOATINGIP_STATUS_ERROR = 'ERROR'
DEVICE_OWNER_ROUTER_HA_INTF = "network:router_ha_interface"
DEVICE_OWNER_ROUTER_INTF = "network:router_interface"
DEVICE_OWNER_ROUTER_GW = "network:router_gateway"
DEVICE_OWNER_FLOATINGIP = "network:floatingip"
DEVICE_OWNER_DHCP = "network:dhcp"
DEVICE_OWNER_DVR_INTERFACE = "network:router_interface_distributed"
DEVICE_OWNER_AGENT_GW = "network:floatingip_agent_gateway"
DEVICE_OWNER_ROUTER_SNAT = "network:router_centralized_snat"
DEVICE_OWNER_LOADBALANCER = "neutron:LOADBALANCER"
DEVICE_OWNER_LOADBALANCERV2 = "neutron:LOADBALANCERV2"
# Collection used to identify devices owned by router interfaces.
# DEVICE_OWNER_ROUTER_HA_INTF is a special case and so is not included.
ROUTER_INTERFACE_OWNERS = (DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_DVR_INTERFACE)
L3_AGENT_MODE_DVR = 'dvr'
L3_AGENT_MODE_DVR_SNAT = 'dvr_snat'
L3_AGENT_MODE_LEGACY = 'legacy'
L3_AGENT_MODE = 'agent_mode'
DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port"
FLOATINGIP_KEY = '_floatingips'
INTERFACE_KEY = '_interfaces'
HA_INTERFACE_KEY = '_ha_interface'
HA_ROUTER_STATE_KEY = '_ha_state'
METERING_LABEL_KEY = '_metering_labels'
FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces'
SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces'
HA_NETWORK_NAME = 'HA network tenant %s'
HA_SUBNET_NAME = 'HA subnet tenant %s'
HA_PORT_NAME = 'HA port tenant %s'
MINIMUM_AGENTS_FOR_HA = 2
HA_ROUTER_STATE_ACTIVE = 'active'
HA_ROUTER_STATE_STANDBY = 'standby'
IPv4 = 'IPv4'
IPv6 = 'IPv6'
IP_VERSION_4 = 4
IP_VERSION_6 = 6
IPv4_BITS = 32
IPv6_BITS = 128
IPv4_ANY = '0.0.0.0/0'
IPv6_ANY = '::/0'
DHCP_RESPONSE_PORT = 68
MIN_VLAN_TAG = 1
MAX_VLAN_TAG = 4094
# For GRE Tunnel
MIN_GRE_ID = 1
MAX_GRE_ID = 2 ** 32 - 1
# For VXLAN Tunnel
MIN_VXLAN_VNI = 1
MAX_VXLAN_VNI = 2 ** 24 - 1
FLOODING_ENTRY = ('00:00:00:00:00:00', '0.0.0.0')
AGENT_TYPE_DHCP = 'DHCP agent'
AGENT_TYPE_OVS = 'Open vSwitch agent'
AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent'
AGENT_TYPE_HYPERV = 'HyperV agent'
AGENT_TYPE_NEC = 'NEC plugin agent'
AGENT_TYPE_OFA = 'OFA driver agent'
AGENT_TYPE_L3 = 'L3 agent'
AGENT_TYPE_LOADBALANCER = 'Loadbalancer agent'
AGENT_TYPE_MLNX = 'Mellanox plugin agent'
AGENT_TYPE_METERING = 'Metering agent'
AGENT_TYPE_METADATA = 'Metadata agent'
AGENT_TYPE_SDNVE = 'IBM SDN-VE agent'
AGENT_TYPE_NIC_SWITCH = 'NIC Switch agent'
AGENT_TYPE_VBOX = 'VBox agent'
L2_AGENT_TOPIC = 'N/A'
PAGINATION_INFINITE = 'infinite'
SORT_DIRECTION_ASC = 'asc'
SORT_DIRECTION_DESC = 'desc'
PORT_BINDING_EXT_ALIAS = 'binding'
L3_AGENT_SCHEDULER_EXT_ALIAS = 'l3_agent_scheduler'
DHCP_AGENT_SCHEDULER_EXT_ALIAS = 'dhcp_agent_scheduler'
LBAAS_AGENT_SCHEDULER_EXT_ALIAS = 'lbaas_agent_scheduler'
L3_DISTRIBUTED_EXT_ALIAS = 'dvr'
L3_HA_MODE_EXT_ALIAS = 'l3-ha'
SUBNET_ALLOCATION_EXT_ALIAS = 'subnet_allocation'
# Protocol names and numbers for Security Groups/Firewalls
PROTO_NAME_TCP = 'tcp'
PROTO_NAME_ICMP = 'icmp'
PROTO_NAME_ICMP_V6 = 'icmpv6'
PROTO_NAME_UDP = 'udp'
PROTO_NUM_TCP = 6
PROTO_NUM_ICMP = 1
PROTO_NUM_ICMP_V6 = 58
PROTO_NUM_UDP = 17
# List of ICMPv6 types that should be allowed by default:
# Multicast Listener Query (130),
# Multicast Listener Report (131),
# Multicast Listener Done (132),
# Neighbor Solicitation (135),
# Neighbor Advertisement (136)
ICMPV6_ALLOWED_TYPES = [130, 131, 132, 135, 136]
ICMPV6_TYPE_RA = 134
DHCPV6_STATEFUL = 'dhcpv6-stateful'
DHCPV6_STATELESS = 'dhcpv6-stateless'
IPV6_SLAAC = 'slaac'
IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC]
IPV6_LLA_PREFIX = 'fe80::/64'
# Human-readable ID to which default_ipv6_subnet_pool should be set to
# indicate that IPv6 Prefix Delegation should be used to allocate subnet CIDRs
IPV6_PD_POOL_ID = 'prefix_delegation'
# Linux interface max length
DEVICE_NAME_MAX_LEN = 15
# Device names start with "tap"
TAP_DEVICE_PREFIX = 'tap'
ATTRIBUTES_TO_UPDATE = 'attributes_to_update'
# Maximum value integer can take in MySQL and PostgreSQL
# In SQLite integer can be stored in 1, 2, 3, 4, 6, or 8 bytes,
# but here it will be limited by this value for consistency.
DB_INTEGER_MAX_VALUE = 2 ** 31 - 1
# TODO(amuller): Re-define the RPC namespaces once Oslo messaging supports
# Targets with multiple namespaces. Neutron will then implement callbacks
# for its RPC clients in order to support rolling upgrades.
# RPC Interface for agents to call DHCP API implemented on the plugin side
RPC_NAMESPACE_DHCP_PLUGIN = None
# RPC interface for the metadata service to get info from the plugin side
RPC_NAMESPACE_METADATA = None
# RPC interface for agent to plugin security group API
RPC_NAMESPACE_SECGROUP = None
# RPC interface for agent to plugin DVR api
RPC_NAMESPACE_DVR = None
# RPC interface for reporting state back to the plugin
RPC_NAMESPACE_STATE = None
# Default network MTU value when not configured
DEFAULT_NETWORK_MTU = 0
| {
"content_hash": "83a3805616cae9a32a44b7b7dba3c621",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 78,
"avg_line_length": 31.132530120481928,
"alnum_prop": 0.7380030959752322,
"repo_name": "alexandrucoman/vbox-neutron-agent",
"id": "67847feb022b7e04014a77cda120501483a39159",
"size": "5852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/common/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7321102"
},
{
"name": "Shell",
"bytes": "12819"
}
],
"symlink_target": ""
} |
from muntjac.api import VerticalLayout, ListSelect
from muntjac.data.property import IValueChangeListener
class ListSelectMultipleExample(VerticalLayout, IValueChangeListener):
_cities = ['Berlin', 'Brussels', 'Helsinki', 'Madrid', 'Oslo',
'Paris', 'Stockholm']
def __init__(self):
super(ListSelectMultipleExample, self).__init__()
self.setSpacing(True)
l = ListSelect('Please select some cities')
for c in self._cities:
l.addItem(c)
l.setRows(7)
l.setNullSelectionAllowed(True)
l.setMultiSelect(True)
l.setImmediate(True)
l.addListener(self, IValueChangeListener)
self.addComponent(l)
# Shows a notification when a selection is made.
def valueChange(self, event):
self.getWindow().showNotification('Selected cities: %s' %
list(event.getProperty().getValue()))
| {
"content_hash": "357a653a29bdce752d815baebb18544a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 70,
"avg_line_length": 30.466666666666665,
"alnum_prop": 0.6487964989059081,
"repo_name": "rwl/muntjac",
"id": "75f82181760686c5f9ab65c6da4373be89fe044c",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "muntjac/demo/sampler/features/selects/ListSelectMultipleExample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8602"
},
{
"name": "Java",
"bytes": "2243"
},
{
"name": "JavaScript",
"bytes": "32438"
},
{
"name": "Python",
"bytes": "3212361"
}
],
"symlink_target": ""
} |
"""this module contains exceptions used in the astng library
"""
__doctype__ = "restructuredtext en"
class ASTNGError(Exception):
"""base exception class for all astng related exceptions"""
class ASTNGBuildingException(ASTNGError):
"""exception class when we are unable to build an astng representation"""
class ResolveError(ASTNGError):
"""base class of astng resolution/inference error"""
class NotFoundError(ResolveError):
"""raised when we are unable to resolve a name"""
class InferenceError(ResolveError):
"""raised when we are unable to infer a node"""
class UnresolvableName(InferenceError):
"""raised when we are unable to resolve a name"""
class NoDefault(ASTNGError):
"""raised by function's `default_value` method when an argument has
no default value
"""
| {
"content_hash": "178f3fc26b76f53d8e7edeea6e06b635",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 28.06896551724138,
"alnum_prop": 0.7297297297297297,
"repo_name": "tlksio/tlksio",
"id": "db33f8b9df618dfd6f14b136a71d5829574ff675",
"size": "1650",
"binary": false,
"copies": "27",
"ref": "refs/heads/develop",
"path": "env/lib/python3.4/site-packages/logilab/astng/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1668"
},
{
"name": "HTML",
"bytes": "65037"
},
{
"name": "JavaScript",
"bytes": "450"
},
{
"name": "Makefile",
"bytes": "1075"
},
{
"name": "Python",
"bytes": "42727"
}
],
"symlink_target": ""
} |
from castellan import options as castellan_opts
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
key_manager_group = cfg.OptGroup(
'key_manager',
title='Key manager options')
key_manager_opts = [
cfg.StrOpt(
'fixed_key',
deprecated_group='keymgr',
help="""
Fixed key returned by key manager, specified in hex.
Possible values:
* Empty string or a key in hex value
"""),
]
barbican_group = cfg.OptGroup(
"barbican",
title="Barbican options")
barbican_opts = [
cfg.StrOpt("catalog_info",
default="key-manager:barbican:public",
deprecated_for_removal=True,
deprecated_reason="This option have been moved to the "
"Castellan library",
help="""
Info to match when looking for barbican in the service
catalog. Format is: separated values of the form:
<service_type>:<service_name>:<endpoint_type>
"""),
cfg.StrOpt("endpoint_template",
deprecated_for_removal=True,
deprecated_reason="This option have been moved to the "
"Castellan library",
help="""
Override service catalog lookup with template for
barbican endpoint e.g.
http://localhost:9311/v1/%(project_id)s
"""),
cfg.StrOpt("os_region_name",
deprecated_for_removal=True,
deprecated_reason="This option have been moved to the "
"Castellan library",
help='Region name of this node'),
]
def register_opts(conf):
castellan_opts.set_defaults(conf)
conf.register_group(key_manager_group)
conf.register_opts(key_manager_opts, group=key_manager_group)
# TODO(raj_singh): Todo: Code block below is deprecated and will be removed
# post Newton
conf.register_group(barbican_group)
conf.register_opts(barbican_opts, group=barbican_group)
ks_loading.register_session_conf_options(conf, barbican_group.name)
def list_opts():
# Castellan library also has a group name barbican and key_manager. So if
# we append list returned from Castellan to this list, oslo will remove
# one group as duplicate and only one group (either from this file or
# Castellan library) will show up. So fix is to merge options of same
# group name from this file and Castellan library
opts = {barbican_group.name: barbican_opts,
key_manager_group.name: key_manager_opts}
for group, options in castellan_opts.list_opts():
if group not in opts.keys():
opts[group] = options
else:
opts[group] = opts[group] + options
return opts
# TODO(raj_singh): Post Newton delete code block from above and comment in
# line below. Castellan already returned a list which can be returned
# directly from list_opts()
# return castellan_opts.list_opts()
| {
"content_hash": "bb2487fb88434f08c3d0e8087909e26f",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 35.670731707317074,
"alnum_prop": 0.6485470085470085,
"repo_name": "xuweiliang/Codelibrary",
"id": "9de5fa3c90b7cf6a3ee0ad8e8962487001fab537",
"size": "3561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/conf/key_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134284"
},
{
"name": "HTML",
"bytes": "830844"
},
{
"name": "JavaScript",
"bytes": "2421484"
},
{
"name": "Makefile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "17185807"
},
{
"name": "Shell",
"bytes": "9144"
}
],
"symlink_target": ""
} |
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
yield "spawnedpanda" + str(n)
def convert_to_panda(c):
a = Spyder.AxisSystem()
a *= 0.001
a.origin = c
p = Spyder.Model3D("models/panda-model", "egg", a)
return p
def convert_axis(axis):
l = []
l.append(Spyder.Coordinate(0, 0, 0))
l.append(Spyder.Coordinate(1, 0, 0))
l.append(Spyder.Coordinate(0, 1, 0))
l.append(Spyder.Coordinate(0, 0, 1))
a = Spyder.AxisSystem()
a.origin += (0.5, 0, 0)
a.rotateY(-90)
c = Spyder.Cylinder(0.2, 1, axis=a, material="red")
l.append(c)
a = Spyder.AxisSystem()
a.origin += (0, 0.5, 0)
a.rotateX(90)
c = Spyder.Cylinder(0.2, 1, axis=a, material="green")
l.append(c)
a = Spyder.AxisSystem()
a.origin += (0, 0, 0.5)
c = Spyder.Cylinder(0.2, 1, axis=a, material="blue")
l.append(c)
og = Spyder.ObjectGroup3D(l, axis)
return og
from bee.spyderhive import SpyderMethod, SpyderConverter
class myspyderframe(dragonfly.pandahive.spyderframe):
SpyderConverter("Coordinate", "Model3D", convert_to_panda)
SpyderConverter("AxisSystem", "ObjectGroup3D", convert_axis)
class myscene(myspyderframe):
scene = bee.parameter("bee")
canvas = bee.parameter("bee")
mousearea = bee.parameter("bee")
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
env = Spyder.Model3D("models/environment", "egg", a)
a = Spyder.AxisSystem()
a *= 0.005
mypanda = Spyder.Actor3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
entityname="mypanda")
a = Spyder.AxisSystem()
a *= 0.005
pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass")
box = Spyder.Box2D(50, 470, 96, 96)
icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True)
m1 = Spyder.NewMaterial("red", (255, 0, 0))
m2 = Spyder.NewMaterial("green", (0, 255, 0))
m3 = Spyder.NewMaterial("blue", (0, 0, 255))
m_ax = Spyder.AxisSystem(origin=(5, -3, 0))
m_ax2 = Spyder.AxisSystem(origin=(-5, -3, 3))
m_ax2.rotateZ(30)
m_ax2.rotateX(-60)
del a, box
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
pandaicon = "pandaicon"
pandaicon_ = bee.attribute("pandaicon")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
animation = dragonfly.scene.unbound.animation()
pandaid = dragonfly.std.variable("id")(pandaname_)
walk = dragonfly.std.variable("str")("walk")
connect(pandaid, animation.actor)
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
pandaicon_click = dragonfly.io.mouseareasensor(pandaicon_)
connect(pandaicon_click, do_spawn)
myscene = myscene(
scene="scene",
canvas=canvas,
mousearea=mousearea,
)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
from direct.task import Task
def spinCameraTask(camera, task):
angleDegrees = task.time * 30.0
angleRadians = angleDegrees * (math.pi / 180.0)
camera.setPos(20 * math.sin(angleRadians), -20.0 * math.cos(angleRadians), 3)
camera.setHpr(angleDegrees, 0, 0)
return Task.cont
main.window.taskMgr.add(functools.partial(spinCameraTask, main.window.camera), "SpinCameraTask")
main.run()
| {
"content_hash": "91bb84a6fd1e8e13b8ac52b78c0bd020",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 109,
"avg_line_length": 27.83977900552486,
"alnum_prop": 0.6471522127406232,
"repo_name": "agoose77/hivesystem",
"id": "53ecb12ec7f2d3f163f6e7e78b535cb9eac6beac",
"size": "5039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manual/movingpanda/panda-8e.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2491478"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
} |
'''
Test Monitors
@author: Michael Eddington
@version: $Id: test.py 780 2008-03-23 02:58:49Z meddingt $
'''
#
# Copyright (c) 2007 Michael Eddington
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Michael Eddington (mike@phed.org)
# $Id: test.py 780 2008-03-23 02:58:49Z meddingt $
import sys, random
from Peach.agent import Monitor
class Test(Monitor):
pass
class TestFault(Monitor):
def DetectedFault(self):
'''
Check if a fault was detected.
'''
ret = random.choice([False, False, False, True])
if ret:
print "TestFault: Returning True"
else:
print "TestFault: Returning False"
#return ret
return False
def StopRun(self):
ret = random.choice([False, False, False, True])
if ret:
print "TestFault: StopRun: Returning True"
else:
print "TestFault: StopRun: Returning False"
return ret
class TestStopOnFirst(Monitor):
'''
Will force a stop after 1 test case
'''
def StopRun(self):
return True
# end
| {
"content_hash": "1170280a0323188326a3aad8518b4d1e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 26.43421052631579,
"alnum_prop": 0.7272274763563962,
"repo_name": "thecrackofdawn/Peach2.3",
"id": "574ea2768381a5c44e8ed188d49bdbf02f224ad5",
"size": "2010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Peach/Agent/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29972"
},
{
"name": "C++",
"bytes": "21544"
},
{
"name": "CSS",
"bytes": "18213"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Objective-C",
"bytes": "403"
},
{
"name": "Python",
"bytes": "25902756"
},
{
"name": "Shell",
"bytes": "898"
},
{
"name": "XSLT",
"bytes": "18658"
}
],
"symlink_target": ""
} |
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisCentralnicComGrComStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.centralnic.com/gr.com/status_registered.txt"
host = "whois.centralnic.com"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, ["ok"])
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "google.gr.com")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 2)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "f1g1ns1.dnspod.net")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "f1g1ns2.dnspod.net")
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(len(self.record.admin_contacts), 0)
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2011-02-07 13:10:14 UTC'))
def test_registrar(self):
eq_(self.record.registrar, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].id, "H1346485")
eq_(self.record.registrant_contacts[0].name, None)
eq_(self.record.registrant_contacts[0].organization, None)
eq_(self.record.registrant_contacts[0].address, None)
eq_(self.record.registrant_contacts[0].city, None)
eq_(self.record.registrant_contacts[0].zip, None)
eq_(self.record.registrant_contacts[0].state, None)
eq_(self.record.registrant_contacts[0].country, None)
eq_(self.record.registrant_contacts[0].country_code, None)
eq_(self.record.registrant_contacts[0].phone, None)
eq_(self.record.registrant_contacts[0].fax, None)
eq_(self.record.registrant_contacts[0].email, None)
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(len(self.record.technical_contacts), 1)
eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.technical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL)
eq_(self.record.technical_contacts[0].id, "H1346485")
eq_(self.record.technical_contacts[0].name, None)
eq_(self.record.technical_contacts[0].organization, None)
eq_(self.record.technical_contacts[0].address, None)
eq_(self.record.technical_contacts[0].city, None)
eq_(self.record.technical_contacts[0].zip, None)
eq_(self.record.technical_contacts[0].state, None)
eq_(self.record.technical_contacts[0].country, None)
eq_(self.record.technical_contacts[0].country_code, None)
eq_(self.record.technical_contacts[0].phone, None)
eq_(self.record.technical_contacts[0].fax, None)
eq_(self.record.technical_contacts[0].email, None)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2012-06-23 10:38:02 UTC'))
def test_domain_id(self):
eq_(self.record.domain_id, "CNIC-DO735168")
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2015-02-07 23:59:59 UTC'))
def test_disclaimer(self):
eq_(self.record.disclaimer, "This whois service is provided by CentralNic Ltd and only contains information pertaining to Internet domain names we have registered for our customers. By using this service you are agreeing (1) not to use any information presented here for any purpose other than determining ownership of domain names, (2) not to store or reproduce this data in any way, (3) not to use any high-volume, automated, electronic processes to obtain data from this service. Abuse of this service is monitored and actions in contravention of these terms will result in being permanently blacklisted. All data is (c) CentralNic Ltd https://www.centralnic.com/")
| {
"content_hash": "3df0205f76bff8ebf1e5b8d6a9d4412c",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 676,
"avg_line_length": 52.52173913043478,
"alnum_prop": 0.6668046357615894,
"repo_name": "huyphan/pyyawhois",
"id": "ce49b0cd52cc0b8368abbb597e97ceda1dc7e055",
"size": "5108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/record/parser/test_response_whois_centralnic_com_gr_com_status_registered.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1859653"
}
],
"symlink_target": ""
} |
"""
Dumps info about switches when they first connect
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str
log = core.getLogger()
# Formatted switch descriptions we've logged
# (We rememeber them so that we only print them once)
_switches = set()
# .. unless always is True in which case we always print them
_always = False
def _format_entry (desc):
def fmt (v):
if not v: return "<Empty>"
return str(v)
dpid = dpid_to_str(desc.connection.dpid)
ofp = desc.ofp.body
s = []
s.append("=" * (len(dpid)+12))
s.append("New Switch: " + dpid)
s.append("-" * (len(dpid)+12))
s.append("Hardware: " + fmt(ofp.hw_desc))
s.append("Software: " + fmt(ofp.sw_desc))
s.append("SerialNum: " + fmt(ofp.serial_num))
s.append("Desc: " + fmt(ofp.dp_desc))
s.append("=" * (len(dpid)+12))
return "\n".join(s)
def _handle_ConnectionUp (event):
msg = of.ofp_stats_request(body=of.ofp_desc_stats_request())
msg.type = 0 # For betta bug, can be removed
event.connection.send(msg)
def _handle_SwitchDescReceived (event):
s = _format_entry(event)
if not _always and s in _switches:
# We've already logged it.
return
_switches.add(s)
ss = s.split("\n")
logger = core.getLogger("info." + dpid_to_str(event.connection.dpid))
for s in ss:
logger.info(s)
def launch (always = False):
global _always
_always = always
core.openflow.addListenerByName("ConnectionUp",
_handle_ConnectionUp)
core.openflow.addListenerByName("SwitchDescReceived",
_handle_SwitchDescReceived)
| {
"content_hash": "697278d387b0b1cdcb2e4983b23b8c60",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 71,
"avg_line_length": 26.098360655737704,
"alnum_prop": 0.6664572864321608,
"repo_name": "damomeen/pox-datapath",
"id": "15cdb574ebe1c80c7c1da97f94153530850c5863",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pox/misc/switch_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15247"
},
{
"name": "JavaScript",
"bytes": "9135"
},
{
"name": "Python",
"bytes": "1037929"
},
{
"name": "Shell",
"bytes": "373"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('author', models.TextField()),
('content', models.TextField()),
('pub_date', models.DateTimeField()),
],
),
]
| {
"content_hash": "62c1937b06349bb5d35ec88e075d50ab",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 114,
"avg_line_length": 25.583333333333332,
"alnum_prop": 0.5342019543973942,
"repo_name": "t-mertz/slurmCompanion",
"id": "1997f95686faf3e3a30a99832c03e7245fcbdc05",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-web/infopage/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5891"
},
{
"name": "HTML",
"bytes": "17411"
},
{
"name": "JavaScript",
"bytes": "3439"
},
{
"name": "PHP",
"bytes": "228"
},
{
"name": "Python",
"bytes": "84204"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
class Theme(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True, max_length=100)
author = models.CharField(max_length=100, default='')
repoUrl = models.URLField(default=None, max_length=250)
def __str__(self):
return self.name
class Project(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE, to_field='username')
contributors = models.ManyToManyField(User, related_name='contributors')
title = models.CharField(max_length=100)
slug = models.CharField(max_length=100)
description = models.CharField(null=True, max_length=250)
repoUrl = models.URLField(default=None, max_length=250)
isPrivate = models.BooleanField(default=False)
createdAt = models.DateField(auto_now_add=True)
updatedAt = models.DateField(auto_now=True)
theme = models.ForeignKey(Theme, on_delete=models.SET_NULL, to_field='slug', default=None, null=True)
def __str__(self):
return self.title
| {
"content_hash": "22468050b7440980b21f36244e861843",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 103,
"avg_line_length": 37.107142857142854,
"alnum_prop": 0.7439846005774784,
"repo_name": "stlim0730/glide",
"id": "d156bdf3524ecb206de344417dbe470c828a143f",
"size": "1039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workspace/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58664"
},
{
"name": "HTML",
"bytes": "50215"
},
{
"name": "JavaScript",
"bytes": "343297"
},
{
"name": "Python",
"bytes": "69146"
},
{
"name": "Shell",
"bytes": "7793"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_dathomir_sing_mt_clan_outcast.iff"
result.attribute_template_id = 9
result.stfName("theme_park_name","singing_mt_clan_outcast")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "d791a9cb67d582786285a52ed8acf6e1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 25.53846153846154,
"alnum_prop": 0.7078313253012049,
"repo_name": "obi-two/Rebelion",
"id": "b65109d48bcd8f44f62dcf8c5cfad93b95ff4b42",
"size": "477",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_dathomir_sing_mt_clan_outcast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""
Proxies for entities in other twinterpreters
"""
from cpy2py.kernel.exceptions import TwinterpeterUnavailable
from cpy2py.kernel import state
from cpy2py.proxy import tracker
class UnboundedMethodProxy(object):
"""
Proxy for unbounded methods
:param real_method: the unbounded method object to be proxied
:note: In Python3 terms, an unbounded method is simply a function bound to a class.
"""
def __init__(self, real_method):
self.__wrapped__ = real_method
for attribute in ('__doc__', '__defaults__', '__name__', '__module__'):
try:
setattr(self, attribute, getattr(real_method, attribute))
except AttributeError:
pass
assert hasattr(self, '__name__'), "%s must be able to extract method __name__" % self.__class__.__name__
def __get__(self, instance, owner):
if instance is None:
subject = owner
kernel = state.get_kernel(subject.__twin_id__)
else:
subject = instance
kernel = subject.__kernel__
return lambda *args, **kwargs: kernel.dispatch_method_call(
subject,
self.__name__,
*args,
**kwargs
)
class InstanceProxy(object):
"""
Proxy for instances of classes
:see: Real object :py:class:`~cpy2py.proxy.proxy_object.TwinObject` for magic attributes.
:warning: This class should never be instantiated or subclassed manually. It
will be subclassed automatically by :py:class:`~.TwinMeta`.
"""
__twin_id__ = None # to be set by metaclass
__instance_id__ = None # to be set on __new__
__kernel__ = None # to be set by metaclass
__import_mod_name__ = (None, None) # to be set by metaclass
__is_twin_proxy__ = True # recreated by metaclass
def __new__(cls, *args, **kwargs):
self = object.__new__(cls)
__kernel__ = state.get_kernel(self.__twin_id__)
object.__setattr__(self, '__kernel__', __kernel__)
try:
# native instance exists, but no proxy yet
__instance_id__ = kwargs.pop('__instance_id__')
except KeyError:
# native instance has not been created yet
__instance_id__ = __kernel__.instantiate_class(
self.__class__, # only real class can be pickled
*args, **kwargs
)
object.__setattr__(self, '__instance_id__', __instance_id__)
else:
object.__setattr__(self, '__instance_id__', __instance_id__)
__kernel__.increment_instance_ref(self)
# store for later use without requiring explicit lookup/converter calls
tracker.__active_instances__[self.__twin_id__, self.__instance_id__] = self
return self
def __repr__(self):
return '<%s.%s twin proxy object at %x>' % (self.__import_mod_name__[0], self.__import_mod_name__[1], id(self))
def __getattr__(self, name):
return self.__kernel__.get_attribute(self, name)
def __setattr__(self, name, value):
return self.__kernel__.set_attribute(self, name, value)
def __delattr__(self, name):
return self.__kernel__.del_attribute(self, name)
def __del__(self):
if hasattr(self, '__instance_id__') and hasattr(self, '__twin_id__'):
# decrement the twin reference count
try:
self.__kernel__.decrement_instance_ref(self)
except (TwinterpeterUnavailable, AttributeError):
# twin already dead, doesn't care for us anymore
return
| {
"content_hash": "462bae6db0a209df18f1c921024265eb",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 119,
"avg_line_length": 37.36082474226804,
"alnum_prop": 0.5736754966887417,
"repo_name": "maxfischer2781/cpy2py",
"id": "803f391a71ee4332b457544fee0827ab417584d8",
"size": "4251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cpy2py/proxy/proxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7860"
},
{
"name": "Python",
"bytes": "230870"
}
],
"symlink_target": ""
} |
VERSION_STRING = '1.0.0.0'
MAJOR_VERSION = 1
MINOR_VERSION = 0
RELEASE_VERSION = 0
SUBREL_VERSION = 0
VERSION = (MAJOR_VERSION, MINOR_VERSION, RELEASE_VERSION,
SUBREL_VERSION, '') | {
"content_hash": "67b10f20208feb1830e70c2d9b3887f0",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 57,
"avg_line_length": 24.625,
"alnum_prop": 0.6598984771573604,
"repo_name": "karadalex/PythonOrganizerAppProject",
"id": "6a5883ad0d613fd0ec6bf8a6ba02c448901f14cc",
"size": "197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "__version__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40671"
}
],
"symlink_target": ""
} |
__all__ = ["StreamCapturer","FileCapturer"]
from .StreamCapturer import StreamCapturer
from .FileCapturer import FileCapturer
| {
"content_hash": "0837c2de19761f1ec09742a8aa7418bf",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 43,
"avg_line_length": 21.5,
"alnum_prop": 0.7829457364341085,
"repo_name": "detorto/mariobot",
"id": "d3ec1cc0238ed5d23e81adc27e4f561eee4aad0c",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Capture/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43257"
}
],
"symlink_target": ""
} |
"""Exceptions used in climatecontrol."""
class SettingsValidationError(ValueError):
"""Failed to validate settings."""
class SettingsLoadError(ValueError):
"""Settings file is neither path nor content."""
class ContentLoadError(SettingsLoadError):
"""Contents could not be loaded."""
class FileLoadError(SettingsLoadError):
"""Contents could not be loaded."""
class NoCompatibleLoaderFoundError(SettingsLoadError):
"""Settings could not be loaded do to format or file being incompatible."""
| {
"content_hash": "a37714ca6a07d06cd217138f02b91e26",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 24.80952380952381,
"alnum_prop": 0.7428023032629558,
"repo_name": "daviskirk/climatecontrol",
"id": "eb32e95d5d51e8bbec8d5f6010f82564110c9574",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "climatecontrol/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106922"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("modeladmintest", "0007_friend"),
]
operations = [
migrations.CreateModel(
name="SoloBook",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=255)),
(
"author",
models.OneToOneField(
on_delete=django.db.models.deletion.PROTECT,
to="modeladmintest.Author",
),
),
],
),
]
| {
"content_hash": "ed6538e98b3eaee24c18d1c4a8ca1601",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 68,
"avg_line_length": 27.264705882352942,
"alnum_prop": 0.39805825242718446,
"repo_name": "wagtail/wagtail",
"id": "f7b9689fa465d29f977b5fe9e08a860ca16e36f0",
"size": "977",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/test/modeladmintest/migrations/0008_solobook.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593672"
},
{
"name": "JavaScript",
"bytes": "624463"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6598232"
},
{
"name": "SCSS",
"bytes": "221911"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "296087"
}
],
"symlink_target": ""
} |
class MongoHandler(object):
def insert(db,table,data):
print "Insert in mongo db" | {
"content_hash": "9d296349be2ab15c46644d0388573a01",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 34,
"avg_line_length": 23.5,
"alnum_prop": 0.6702127659574468,
"repo_name": "labs-js/air-check",
"id": "365f5b9e41d9672a66203d2e310bdc9c855bf0a6",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MongoHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12314"
},
{
"name": "CSS",
"bytes": "19978"
},
{
"name": "HTML",
"bytes": "62525"
},
{
"name": "Java",
"bytes": "13838"
},
{
"name": "JavaScript",
"bytes": "33795"
},
{
"name": "Python",
"bytes": "970"
}
],
"symlink_target": ""
} |
import os
import sys
import collections
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(BUILD_TOOLS_DIR)))
import mock
sys.path.append(BUILD_TOOLS_DIR)
import build_version
ProcInfo = collections.namedtuple('ProcInfo', ['returncode', 'output'])
class TestCase(unittest.TestCase):
def setUp(self):
self.run_git = mock.patch('lastchange.RunGitCommand').start()
def tearDown(self):
mock.patch.stopall()
def mockGitCommand(self, *args):
side_effects = []
for proc_info in args:
mock_proc = mock.MagicMock()
mock_proc.returncode = proc_info.returncode
comm_result = mock_proc.MagicMock()
comm_result.__getitem__.return_value = proc_info.output
mock_proc.communicate.return_value = comm_result
side_effects.append(mock_proc)
self.run_git.side_effect = side_effects
def mockDefaultGitCommand(self):
output = """\
6a8b61d6be4656e682eba005a1dd7f129789129c
[NaCl SDK] Update build_sdk.py to display Cr-Commit-Position in README.
BUG=none
R=bradnelson@google.com, bradnelson@chromium.org
Review URL: https://codereview.chromium.org/495423010
Cr-Commit-Position: refs/heads/master@{#292480}"""
self.mockGitCommand(ProcInfo(0, output))
def mockDepthTwoGitCommand(self):
output0 = """\
ae4b444a0aa09a1fa73e59b180d7d957b9a36bf2
."""
output1 = """\
6a8b61d6be4656e682eba005a1dd7f129789129c
[NaCl SDK] Update build_sdk.py to display Cr-Commit-Position in README.
BUG=none
R=bradnelson@google.com, bradnelson@chromium.org
Review URL: https://codereview.chromium.org/495423010
Cr-Commit-Position: refs/heads/master@{#292480}"""
self.mockGitCommand(ProcInfo(0, output0), ProcInfo(0, output1))
def assertGitShowCalled(self, depth=0):
cmd = ['show', '-s', '--format=%H%n%B', 'HEAD~%d' % depth]
self.run_git.assert_called_with(None, cmd)
def testChromeVersion(self):
self.mockDefaultGitCommand()
result = build_version.ChromeVersion()
self.assertGitShowCalled()
self.assertEqual(result, 'trunk.292480')
def testChromeRevision(self):
self.mockDefaultGitCommand()
result = build_version.ChromeRevision()
self.assertGitShowCalled()
self.assertEqual(result, '292480')
def testChromeCommitPosition(self):
self.mockDefaultGitCommand()
result = build_version.ChromeCommitPosition()
self.assertGitShowCalled()
self.assertEqual(
result,
'6a8b61d6be4656e682eba005a1dd7f129789129c-refs/heads/master@{#292480}')
def testChromeCommitPositionDepthTwo(self):
self.mockDepthTwoGitCommand()
result = build_version.ChromeCommitPosition()
self.assertEqual(
result,
'6a8b61d6be4656e682eba005a1dd7f129789129c-refs/heads/master@{#292480}')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "12864edb755e695f4272fe15ca450057",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 28.92,
"alnum_prop": 0.7257952973720608,
"repo_name": "ric2b/Vivaldi-browser",
"id": "fdb9ec36ffd98d23df2460fb2fce240c6d5f02c7",
"size": "3082",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/native_client_sdk/src/build_tools/tests/build_version_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import pytest
from marshmallow import (
Schema,
fields,
pre_dump,
post_dump,
pre_load,
post_load,
validates,
validates_schema,
ValidationError,
EXCLUDE,
INCLUDE,
RAISE,
)
@pytest.mark.parametrize("partial_val", (True, False))
def test_decorated_processors(partial_val):
class ExampleSchema(Schema):
"""Includes different ways to invoke decorators and set up methods"""
TAG = "TAG"
value = fields.Integer(as_string=True)
# Implicit default raw, pre dump, static method.
@pre_dump
def increment_value(self, item, **kwargs):
assert "many" in kwargs
item["value"] += 1
return item
# Implicit default raw, post dump, class method.
@post_dump
def add_tag(self, item, **kwargs):
assert "many" in kwargs
item["value"] = self.TAG + item["value"]
return item
# Explicitly raw, post dump, instance method.
@post_dump(pass_many=True)
def add_envelope(self, data, many, **kwargs):
key = self.get_envelope_key(many)
return {key: data}
# Explicitly raw, pre load, instance method.
@pre_load(pass_many=True)
def remove_envelope(self, data, many, partial, **kwargs):
assert partial is partial_val
key = self.get_envelope_key(many)
return data[key]
@staticmethod
def get_envelope_key(many):
return "data" if many else "datum"
# Explicitly not raw, pre load, instance method.
@pre_load(pass_many=False)
def remove_tag(self, item, partial, **kwargs):
assert partial is partial_val
assert "many" in kwargs
item["value"] = item["value"][len(self.TAG) :]
return item
# Explicit default raw, post load, instance method.
@post_load()
def decrement_value(self, item, partial, **kwargs):
assert partial is partial_val
assert "many" in kwargs
item["value"] -= 1
return item
schema = ExampleSchema(partial=partial_val)
# Need to re-create these because the processors will modify in place.
make_item = lambda: {"value": 3}
make_items = lambda: [make_item(), {"value": 5}]
item_dumped = schema.dump(make_item())
assert item_dumped == {"datum": {"value": "TAG4"}}
item_loaded = schema.load(item_dumped)
assert item_loaded == make_item()
items_dumped = schema.dump(make_items(), many=True)
assert items_dumped == {"data": [{"value": "TAG4"}, {"value": "TAG6"}]}
items_loaded = schema.load(items_dumped, many=True)
assert items_loaded == make_items()
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/347
@pytest.mark.parametrize("unknown", (EXCLUDE, INCLUDE, RAISE))
def test_decorated_processor_returning_none(unknown):
class PostSchema(Schema):
value = fields.Integer()
@post_load
def load_none(self, item, **kwargs):
return None
@post_dump
def dump_none(self, item, **kwargs):
return None
class PreSchema(Schema):
value = fields.Integer()
@pre_load
def load_none(self, item, **kwargs):
return None
@pre_dump
def dump_none(self, item, **kwargs):
return None
schema = PostSchema(unknown=unknown)
assert schema.dump({"value": 3}) is None
assert schema.load({"value": 3}) is None
schema = PreSchema(unknown=unknown)
assert schema.dump({"value": 3}) == {}
with pytest.raises(ValidationError) as excinfo:
schema.load({"value": 3})
assert excinfo.value.messages == {"_schema": ["Invalid input type."]}
class TestPassOriginal:
def test_pass_original_single(self):
class MySchema(Schema):
foo = fields.Field()
@post_load(pass_original=True)
def post_load(self, data, original_data, **kwargs):
ret = data.copy()
ret["_post_load"] = original_data["sentinel"]
return ret
@post_dump(pass_original=True)
def post_dump(self, data, obj, **kwargs):
ret = data.copy()
ret["_post_dump"] = obj["sentinel"]
return ret
schema = MySchema(unknown=EXCLUDE)
datum = {"foo": 42, "sentinel": 24}
item_loaded = schema.load(datum)
assert item_loaded["foo"] == 42
assert item_loaded["_post_load"] == 24
item_dumped = schema.dump(datum)
assert item_dumped["foo"] == 42
assert item_dumped["_post_dump"] == 24
def test_pass_original_many(self):
class MySchema(Schema):
foo = fields.Field()
@post_load(pass_many=True, pass_original=True)
def post_load(self, data, original, many, **kwargs):
if many:
ret = []
for item, orig_item in zip(data, original):
item["_post_load"] = orig_item["sentinel"]
ret.append(item)
else:
ret = data.copy()
ret["_post_load"] = original["sentinel"]
return ret
@post_dump(pass_many=True, pass_original=True)
def post_dump(self, data, original, many, **kwargs):
if many:
ret = []
for item, orig_item in zip(data, original):
item["_post_dump"] = orig_item["sentinel"]
ret.append(item)
else:
ret = data.copy()
ret["_post_dump"] = original["sentinel"]
return ret
schema = MySchema(unknown=EXCLUDE)
data = [{"foo": 42, "sentinel": 24}, {"foo": 424, "sentinel": 242}]
items_loaded = schema.load(data, many=True)
assert items_loaded == [
{"foo": 42, "_post_load": 24},
{"foo": 424, "_post_load": 242},
]
test_values = [e["_post_load"] for e in items_loaded]
assert test_values == [24, 242]
items_dumped = schema.dump(data, many=True)
assert items_dumped == [
{"foo": 42, "_post_dump": 24},
{"foo": 424, "_post_dump": 242},
]
# Also check load/dump of single item
datum = {"foo": 42, "sentinel": 24}
item_loaded = schema.load(datum, many=False)
assert item_loaded == {"foo": 42, "_post_load": 24}
item_dumped = schema.dump(datum, many=False)
assert item_dumped == {"foo": 42, "_post_dump": 24}
def test_decorated_processor_inheritance():
class ParentSchema(Schema):
@post_dump
def inherited(self, item, **kwargs):
item["inherited"] = "inherited"
return item
@post_dump
def overridden(self, item, **kwargs):
item["overridden"] = "base"
return item
@post_dump
def deleted(self, item, **kwargs):
item["deleted"] = "retained"
return item
class ChildSchema(ParentSchema):
@post_dump
def overridden(self, item, **kwargs):
item["overridden"] = "overridden"
return item
deleted = None
parent_dumped = ParentSchema().dump({})
assert parent_dumped == {
"inherited": "inherited",
"overridden": "base",
"deleted": "retained",
}
child_dumped = ChildSchema().dump({})
assert child_dumped == {"inherited": "inherited", "overridden": "overridden"}
# https://github.com/marshmallow-code/marshmallow/issues/229#issuecomment-138949436
def test_pre_dump_is_invoked_before_implicit_field_generation():
class Foo(Schema):
field = fields.Integer()
@pre_dump
def hook(self, data, **kwargs):
data["generated_field"] = 7
return data
class Meta:
# Removing generated_field from here drops it from the output
fields = ("field", "generated_field")
assert Foo().dump({"field": 5}) == {"field": 5, "generated_field": 7}
class ValidatesSchema(Schema):
foo = fields.Int()
@validates("foo")
def validate_foo(self, value):
if value != 42:
raise ValidationError("The answer to life the universe and everything.")
class TestValidatesDecorator:
def test_validates(self):
class VSchema(Schema):
s = fields.String()
@validates("s")
def validate_string(self, data):
raise ValidationError("nope")
with pytest.raises(ValidationError) as excinfo:
VSchema().load({"s": "bar"})
assert excinfo.value.messages == {"s": ["nope"]}
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/350
def test_validates_with_attribute(self):
class S1(Schema):
s = fields.String(attribute="string_name")
@validates("s")
def validate_string(self, data):
raise ValidationError("nope")
with pytest.raises(ValidationError) as excinfo:
S1().load({"s": "foo"})
assert excinfo.value.messages == {"s": ["nope"]}
with pytest.raises(ValidationError):
S1(many=True).load([{"s": "foo"}])
def test_validates_decorator(self):
schema = ValidatesSchema()
errors = schema.validate({"foo": 41})
assert "foo" in errors
assert errors["foo"][0] == "The answer to life the universe and everything."
errors = schema.validate({"foo": 42})
assert errors == {}
errors = schema.validate([{"foo": 42}, {"foo": 43}], many=True)
assert "foo" in errors[1]
assert len(errors[1]["foo"]) == 1
assert errors[1]["foo"][0] == "The answer to life the universe and everything."
errors = schema.validate([{"foo": 42}, {"foo": 42}], many=True)
assert errors == {}
errors = schema.validate({})
assert errors == {}
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 41})
errors = excinfo.value.messages
result = excinfo.value.valid_data
assert errors
assert result == {}
with pytest.raises(ValidationError) as excinfo:
schema.load([{"foo": 42}, {"foo": 43}], many=True)
errors = excinfo.value.messages
result = excinfo.value.valid_data
assert len(result) == 2
assert result[0] == {"foo": 42}
assert result[1] == {}
assert 1 in errors
assert "foo" in errors[1]
assert errors[1]["foo"] == ["The answer to life the universe and everything."]
def test_field_not_present(self):
class BadSchema(ValidatesSchema):
@validates("bar")
def validate_bar(self, value):
raise ValidationError("Never raised.")
schema = BadSchema()
with pytest.raises(ValueError, match='"bar" field does not exist.'):
schema.validate({"foo": 42})
def test_precedence(self):
class Schema2(ValidatesSchema):
foo = fields.Int(validate=lambda n: n != 42)
bar = fields.Int(validate=lambda n: n == 1)
@validates("bar")
def validate_bar(self, value):
if value != 2:
raise ValidationError("Must be 2")
schema = Schema2()
errors = schema.validate({"foo": 42})
assert "foo" in errors
assert len(errors["foo"]) == 1
assert "Invalid value." in errors["foo"][0]
errors = schema.validate({"bar": 3})
assert "bar" in errors
assert len(errors["bar"]) == 1
assert "Invalid value." in errors["bar"][0]
errors = schema.validate({"bar": 1})
assert "bar" in errors
assert len(errors["bar"]) == 1
assert errors["bar"][0] == "Must be 2"
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/748
def test_validates_with_data_key(self):
class BadSchema(Schema):
foo = fields.String(data_key="foo-name")
@validates("foo")
def validate_string(self, data):
raise ValidationError("nope")
schema = BadSchema()
errors = schema.validate({"foo-name": "data"})
assert "foo-name" in errors
assert errors["foo-name"] == ["nope"]
schema = BadSchema()
errors = schema.validate(
[{"foo-name": "data"}, {"foo-name": "data2"}], many=True
)
assert errors == {0: {"foo-name": ["nope"]}, 1: {"foo-name": ["nope"]}}
class TestValidatesSchemaDecorator:
def test_validator_nested_many_invalid_data(self):
class NestedSchema(Schema):
foo = fields.Int(required=True)
class MySchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = MySchema()
errors = schema.validate({"nested": [1]})
assert errors
assert "nested" in errors
assert 0 in errors["nested"]
assert errors["nested"][0] == {"_schema": ["Invalid input type."]}
def test_validator_nested_many_schema_error(self):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@validates_schema
def validate_schema(self, data, **kwargs):
raise ValidationError("This will never work.")
class MySchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = MySchema()
errors = schema.validate({"nested": [{"foo": 1}]})
assert errors
assert "nested" in errors
assert 0 in errors["nested"]
assert errors["nested"][0] == {"_schema": ["This will never work."]}
def test_validator_nested_many_field_error(self):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@validates_schema
def validate_schema(self, data, **kwargs):
raise ValidationError("This will never work.", "foo")
class MySchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = MySchema()
errors = schema.validate({"nested": [{"foo": 1}]})
assert errors
assert "nested" in errors
assert 0 in errors["nested"]
assert errors["nested"][0] == {"foo": ["This will never work."]}
@pytest.mark.parametrize("data", ([{"foo": 1, "bar": 2}],))
@pytest.mark.parametrize(
"pass_many,expected_data,expected_original_data",
(
[True, [{"foo": 1}], [{"foo": 1, "bar": 2}]],
[False, {"foo": 1}, {"foo": 1, "bar": 2}],
),
)
def test_validator_nested_many_pass_original_and_pass_many(
self, pass_many, data, expected_data, expected_original_data
):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@validates_schema(pass_many=pass_many, pass_original=True)
def validate_schema(self, data, original_data, many, **kwargs):
assert data == expected_data
assert original_data == expected_original_data
assert many is True
raise ValidationError("Method called")
class MySchema(Schema):
nested = fields.Nested(
NestedSchema, required=True, many=True, unknown=EXCLUDE
)
schema = MySchema()
errors = schema.validate({"nested": data})
error = errors["nested"] if pass_many else errors["nested"][0]
assert error["_schema"][0] == "Method called"
def test_decorated_validators(self):
class MySchema(Schema):
foo = fields.Int()
bar = fields.Int()
@validates_schema
def validate_schema(self, data, **kwargs):
if data["foo"] <= 3:
raise ValidationError("Must be greater than 3")
@validates_schema(pass_many=True)
def validate_raw(self, data, many, **kwargs):
if many:
assert type(data) is list
if len(data) < 2:
raise ValidationError("Must provide at least 2 items")
@validates_schema
def validate_bar(self, data, **kwargs):
if "bar" in data and data["bar"] < 0:
raise ValidationError("bar must not be negative", "bar")
schema = MySchema()
errors = schema.validate({"foo": 3})
assert "_schema" in errors
assert errors["_schema"][0] == "Must be greater than 3"
errors = schema.validate([{"foo": 4}], many=True)
assert "_schema" in errors
assert len(errors["_schema"]) == 1
assert errors["_schema"][0] == "Must provide at least 2 items"
errors = schema.validate({"foo": 4, "bar": -1})
assert "bar" in errors
assert len(errors["bar"]) == 1
assert errors["bar"][0] == "bar must not be negative"
def test_multiple_validators(self):
class MySchema(Schema):
foo = fields.Int()
bar = fields.Int()
@validates_schema
def validate_schema(self, data, **kwargs):
if data["foo"] <= 3:
raise ValidationError("Must be greater than 3")
@validates_schema
def validate_bar(self, data, **kwargs):
if "bar" in data and data["bar"] < 0:
raise ValidationError("bar must not be negative")
schema = MySchema()
errors = schema.validate({"foo": 3, "bar": -1})
assert type(errors) is dict
assert "_schema" in errors
assert len(errors["_schema"]) == 2
assert "Must be greater than 3" in errors["_schema"]
assert "bar must not be negative" in errors["_schema"]
errors = schema.validate([{"foo": 3, "bar": -1}, {"foo": 3}], many=True)
assert type(errors) is dict
assert "_schema" in errors[0]
assert len(errors[0]["_schema"]) == 2
assert "Must be greater than 3" in errors[0]["_schema"]
assert "bar must not be negative" in errors[0]["_schema"]
assert len(errors[1]["_schema"]) == 1
assert "Must be greater than 3" in errors[0]["_schema"]
def test_multiple_validators_merge_dict_errors(self):
class NestedSchema(Schema):
foo = fields.Int()
bar = fields.Int()
class MySchema(Schema):
nested = fields.Nested(NestedSchema)
@validates_schema
def validate_nested_foo(self, data, **kwargs):
raise ValidationError({"nested": {"foo": ["Invalid foo"]}})
@validates_schema
def validate_nested_bar_1(self, data, **kwargs):
raise ValidationError({"nested": {"bar": ["Invalid bar 1"]}})
@validates_schema
def validate_nested_bar_2(self, data, **kwargs):
raise ValidationError({"nested": {"bar": ["Invalid bar 2"]}})
with pytest.raises(ValidationError) as excinfo:
MySchema().load({"nested": {"foo": 1, "bar": 2}})
assert excinfo.value.messages == {
"nested": {
"foo": ["Invalid foo"],
"bar": ["Invalid bar 1", "Invalid bar 2"],
}
}
def test_passing_original_data(self):
class MySchema(Schema):
foo = fields.Int()
bar = fields.Int()
@validates_schema(pass_original=True)
def validate_original(self, data, original_data, partial, **kwargs):
if isinstance(original_data, dict) and isinstance(
original_data["foo"], str
):
raise ValidationError("foo cannot be a string")
@validates_schema(pass_many=True, pass_original=True)
def validate_original_bar(self, data, original_data, many, **kwargs):
def check(datum):
if isinstance(datum, dict) and isinstance(datum["bar"], str):
raise ValidationError("bar cannot be a string")
if many:
for each in original_data:
check(each)
else:
check(original_data)
schema = MySchema()
errors = schema.validate({"foo": "4", "bar": 12})
assert errors["_schema"] == ["foo cannot be a string"]
errors = schema.validate({"foo": 4, "bar": "42"})
assert errors["_schema"] == ["bar cannot be a string"]
errors = schema.validate([{"foo": 4, "bar": "42"}], many=True)
assert errors["_schema"] == ["bar cannot be a string"]
def test_allow_reporting_field_errors_in_schema_validator(self):
class NestedSchema(Schema):
baz = fields.Int(required=True)
class MySchema(Schema):
foo = fields.Int(required=True)
bar = fields.Nested(NestedSchema, required=True)
bam = fields.Int(required=True)
@validates_schema(skip_on_field_errors=True)
def consistency_validation(self, data, **kwargs):
errors = {}
if data["bar"]["baz"] != data["foo"]:
errors["bar"] = {"baz": "Non-matching value"}
if data["bam"] > data["foo"]:
errors["bam"] = "Value should be less than foo"
if errors:
raise ValidationError(errors)
schema = MySchema()
errors = schema.validate({"foo": 2, "bar": {"baz": 5}, "bam": 6})
assert errors["bar"]["baz"] == "Non-matching value"
assert errors["bam"] == "Value should be less than foo"
# https://github.com/marshmallow-code/marshmallow/issues/273
def test_allow_arbitrary_field_names_in_error(self):
class MySchema(Schema):
@validates_schema
def validator(self, data, **kwargs):
raise ValidationError("Error message", "arbitrary_key")
errors = MySchema().validate({})
assert errors["arbitrary_key"] == ["Error message"]
def test_skip_on_field_errors(self):
class MySchema(Schema):
foo = fields.Int(required=True, validate=lambda n: n == 3)
bar = fields.Int(required=True)
@validates_schema(skip_on_field_errors=True)
def validate_schema(self, data, **kwargs):
if data["foo"] != data["bar"]:
raise ValidationError("Foo and bar must be equal.")
@validates_schema(skip_on_field_errors=True, pass_many=True)
def validate_many(self, data, many, **kwargs):
if many:
assert type(data) is list
if len(data) < 2:
raise ValidationError("Must provide at least 2 items")
schema = MySchema()
# check that schema errors still occur with no field errors
errors = schema.validate({"foo": 3, "bar": 4})
assert "_schema" in errors
assert errors["_schema"][0] == "Foo and bar must be equal."
errors = schema.validate([{"foo": 3, "bar": 3}], many=True)
assert "_schema" in errors
assert errors["_schema"][0] == "Must provide at least 2 items"
# check that schema errors don't occur when field errors do
errors = schema.validate({"foo": 3, "bar": "not an int"})
assert "bar" in errors
assert "_schema" not in errors
errors = schema.validate({"foo": 2, "bar": 2})
assert "foo" in errors
assert "_schema" not in errors
errors = schema.validate([{"foo": 3, "bar": "not an int"}], many=True)
assert "bar" in errors[0]
assert "_schema" not in errors
def test_decorator_error_handling(): # noqa: C901
class ExampleSchema(Schema):
foo = fields.Int()
bar = fields.Int()
@pre_load()
def pre_load_error1(self, item, **kwargs):
if item["foo"] != 0:
return item
errors = {"foo": ["preloadmsg1"], "bar": ["preloadmsg2", "preloadmsg3"]}
raise ValidationError(errors)
@pre_load()
def pre_load_error2(self, item, **kwargs):
if item["foo"] != 4:
return item
raise ValidationError("preloadmsg1", "foo")
@pre_load()
def pre_load_error3(self, item, **kwargs):
if item["foo"] != 8:
return item
raise ValidationError("preloadmsg1")
@post_load()
def post_load_error1(self, item, **kwargs):
if item["foo"] != 1:
return item
errors = {"foo": ["postloadmsg1"], "bar": ["postloadmsg2", "postloadmsg3"]}
raise ValidationError(errors)
@post_load()
def post_load_error2(self, item, **kwargs):
if item["foo"] != 5:
return item
raise ValidationError("postloadmsg1", "foo")
def make_item(foo, bar):
data = schema.load({"foo": foo, "bar": bar})
assert data is not None
return data
schema = ExampleSchema()
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 0, "bar": 1})
errors = excinfo.value.messages
assert "foo" in errors
assert len(errors["foo"]) == 1
assert errors["foo"][0] == "preloadmsg1"
assert "bar" in errors
assert len(errors["bar"]) == 2
assert "preloadmsg2" in errors["bar"]
assert "preloadmsg3" in errors["bar"]
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 1, "bar": 1})
errors = excinfo.value.messages
assert "foo" in errors
assert len(errors["foo"]) == 1
assert errors["foo"][0] == "postloadmsg1"
assert "bar" in errors
assert len(errors["bar"]) == 2
assert "postloadmsg2" in errors["bar"]
assert "postloadmsg3" in errors["bar"]
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 4, "bar": 1})
errors = excinfo.value.messages
assert len(errors) == 1
assert "foo" in errors
assert len(errors["foo"]) == 1
assert errors["foo"][0] == "preloadmsg1"
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 5, "bar": 1})
errors = excinfo.value.messages
assert len(errors) == 1
assert "foo" in errors
assert len(errors["foo"]) == 1
assert errors["foo"][0] == "postloadmsg1"
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 8, "bar": 1})
errors = excinfo.value.messages
assert len(errors) == 1
assert "_schema" in errors
assert len(errors["_schema"]) == 1
assert errors["_schema"][0] == "preloadmsg1"
@pytest.mark.parametrize("decorator", [pre_load, post_load])
def test_decorator_error_handling_with_load(decorator):
class ExampleSchema(Schema):
@decorator
def raise_value_error(self, item, **kwargs):
raise ValidationError({"foo": "error"})
schema = ExampleSchema()
with pytest.raises(ValidationError) as exc:
schema.load({})
assert exc.value.messages == {"foo": "error"}
schema.dump(object())
@pytest.mark.parametrize("decorator", [pre_load, post_load])
def test_decorator_error_handling_with_load_dict_error(decorator):
class ExampleSchema(Schema):
@decorator
def raise_value_error(self, item, **kwargs):
raise ValidationError({"foo": "error"}, "nested_field")
schema = ExampleSchema()
with pytest.raises(ValidationError) as exc:
schema.load({})
assert exc.value.messages == {"nested_field": {"foo": "error"}}
schema.dump(object())
@pytest.mark.parametrize("decorator", [pre_dump, post_dump])
def test_decorator_error_handling_with_dump(decorator):
class ExampleSchema(Schema):
@decorator
def raise_value_error(self, item, **kwargs):
raise ValidationError({"foo": "error"})
schema = ExampleSchema()
with pytest.raises(ValidationError) as exc:
schema.dump(object())
assert exc.value.messages == {"foo": "error"}
schema.load({})
class Nested:
def __init__(self, foo):
self.foo = foo
class Example:
def __init__(self, nested):
self.nested = nested
example = Example(nested=[Nested(x) for x in range(1)])
@pytest.mark.parametrize(
"data,expected_data,expected_original_data",
([example, {"foo": 0}, example.nested[0]],),
)
def test_decorator_post_dump_with_nested_original_and_pass_many(
data, expected_data, expected_original_data
):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@post_dump(pass_many=False, pass_original=True)
def check_pass_original_when_pass_many_false(
self, data, original_data, **kwargs
):
assert data == expected_data
assert original_data == expected_original_data
return data
@post_dump(pass_many=True, pass_original=True)
def check_pass_original_when_pass_many_true(
self, data, original_data, many, **kwargs
):
assert many is True
assert data == [expected_data]
assert original_data == [expected_original_data]
return data
class ExampleSchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = ExampleSchema()
assert schema.dump(data) == {"nested": [{"foo": 0}]}
@pytest.mark.parametrize(
"data,expected_data,expected_original_data",
([{"nested": [{"foo": 0}]}, {"foo": 0}, {"foo": 0}],),
)
def test_decorator_post_load_with_nested_original_and_pass_many(
data, expected_data, expected_original_data
):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@post_load(pass_many=False, pass_original=True)
def check_pass_original_when_pass_many_false(
self, data, original_data, **kwargs
):
assert data == expected_data
assert original_data == expected_original_data
return data
@post_load(pass_many=True, pass_original=True)
def check_pass_original_when_pass_many_true(
self, data, original_data, many, **kwargs
):
assert many is True
assert data == [expected_data]
assert original_data == [expected_original_data]
return data
class ExampleSchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = ExampleSchema()
assert schema.load(data) == data
| {
"content_hash": "96f075a6928b9df50a150962f5142cae",
"timestamp": "",
"source": "github",
"line_count": 896,
"max_line_length": 87,
"avg_line_length": 34.526785714285715,
"alnum_prop": 0.5608675976208948,
"repo_name": "mwstobo/marshmallow",
"id": "0d2ffc6baff0c7f6002ed23a44950a25672bb65a",
"size": "30937",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/test_decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "523097"
}
],
"symlink_target": ""
} |
"""Test desiutil.census.
"""
import unittest
from unittest.mock import call, patch, Mock
class TestCensus(unittest.TestCase):
"""Test desiutil.census.
"""
@classmethod
def setUpClass(cls):
from os.path import dirname, join
cls.data_dir = join(dirname(__file__), 't')
@classmethod
def tearDownClass(cls):
pass
def test_ScannedFile(self):
"""Test simple object storing file data.
"""
from ..census import ScannedFile
f = ScannedFile('foo.txt', 12345, 1973)
self.assertEqual(f.filename, 'foo.txt')
self.assertEqual(f.size, 12345)
self.assertEqual(f.year, 1973)
self.assertFalse(f.islink)
self.assertFalse(f.isexternal)
self.assertIsNone(f.linkname)
self.assertIsNone(f.linksize)
self.assertIsNone(f.linkyear)
def test_get_options(self):
"""Test command-line argument parsing.
"""
from ..census import get_options
options = get_options([])
self.assertFalse(options.verbose)
options = get_options(['--verbose'])
self.assertTrue(options.verbose)
options = get_options(['-c', 'foo.yaml'])
self.assertEqual(options.config, 'foo.yaml')
def test_walk_error(self):
"""Test error-handling function for os.walk().
"""
from ..census import walk_error
with patch('desiutil.log.get_logger') as mock_get_logger:
mock = Mock()
mock_get_logger.return_value = mock
try:
raise OSError(2, 'File not found', 'foo.txt')
except OSError as e:
walk_error(e)
calls = [call.error("[Errno 2] File not found: 'foo.txt'")]
self.assertListEqual(mock.mock_calls, calls)
with patch('desiutil.log.get_logger') as mock_get_logger:
mock = Mock()
mock_get_logger.return_value = mock
try:
raise OSError(2, 'File not found', 'foo.txt', None, 'bar.txt')
except OSError as e:
walk_error(e)
calls = [call.error("[Errno 2] File not found: 'foo.txt' -> " +
"'bar.txt'")]
self.assertListEqual(mock.mock_calls, calls)
def test_year(self):
"""Test conversion of mtime to year.
"""
from ..census import year
from time import gmtime
mtime = 1475692367.0
self.assertEqual(year(mtime), 2017)
self.assertEqual(year(mtime, fy=False), 2016)
def test_scan_file(self):
"""Test analysis of a single file.
"""
from os import stat_result
from os.path import join
from ..census import scan_file
mock_os = Mock()
fd = join(self.data_dir, 'test.module')
intlink = join(self.data_dir, 'test.module.link')
extlink = '/foo/bar/t/test.module'
s = stat_result((33188, 83865343, 16777220,
1, 501, 20, 973,
1491428112, 1446143268,
1462630505))
#
# Simulate a simple file.
#
calls = [call.debug("os.stat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd))]
mock_log = Mock()
with patch('desiutil.log.get_logger') as mock_get_logger:
with patch.dict('sys.modules', {'os': mock_os,
'os.path': mock_os.path}):
mock_get_logger.return_value = mock_log
mock_os.environ = dict()
mock_os.stat.return_value = s
mock_os.path.islink.return_value = False
mock_os.path.join.return_value = fd
f = scan_file(self.data_dir, 'test.module', 12345)
self.assertListEqual(mock_log.mock_calls, calls)
self.assertEqual(f.filename, fd)
self.assertEqual(f.size, 973)
self.assertEqual(f.year, 2016)
#
# Simulate an internal link.
#
calls = [call.debug("os.stat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("os.lstat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("Found internal link {0} -> {0}.link.".format(fd))]
mock_log = Mock()
with patch('desiutil.log.get_logger') as mock_get_logger:
with patch.dict('sys.modules', {'os': mock_os,
'os.path': mock_os.path}):
mock_get_logger.return_value = mock_log
mock_os.environ = dict()
mock_os.stat.return_value = s
mock_os.lstat.return_value = s
mock_os.path.commonpath.return_value = self.data_dir
mock_os.path.islink.return_value = True
mock_os.path.join.return_value = fd
mock_os.path.realpath.return_value = intlink
f = scan_file(self.data_dir, 'test.module', 12345)
self.assertListEqual(mock_log.mock_calls, calls)
self.assertEqual(f.filename, fd)
self.assertEqual(f.size, 973)
self.assertTrue(f.islink)
self.assertFalse(f.isexternal)
self.assertEqual(f.linkname, intlink)
#
# Simulate an external link.
#
calls = [call.debug("os.stat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("os.lstat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("Found external link {0} -> {1}.".format(fd, extlink))]
mock_log = Mock()
with patch('desiutil.log.get_logger') as mock_get_logger:
with patch.dict('sys.modules', {'os': mock_os,
'os.path': mock_os.path}):
mock_get_logger.return_value = mock_log
mock_os.environ = dict()
mock_os.stat.return_value = s
mock_os.lstat.return_value = s
mock_os.path.commonpath.return_value = '/'
mock_os.path.islink.return_value = True
mock_os.path.join.return_value = fd
mock_os.path.realpath.return_value = extlink
f = scan_file(self.data_dir, 'test.module', 12345)
self.assertListEqual(mock_log.mock_calls, calls)
self.assertEqual(f.filename, fd)
self.assertEqual(f.size, 973)
self.assertTrue(f.islink)
self.assertTrue(f.isexternal)
self.assertEqual(f.linkname, extlink)
def test_in_path(self):
"""Test directory hierarchy checker.
"""
from ..census import in_path
self.assertTrue(in_path('/foo/bar/baz', '/foo/bar/baz/a/b/c/foo.txt'))
self.assertTrue(in_path('/foo/bar/baz', '/foo/bar/baz/a'))
self.assertFalse(in_path('/foo/bar/baz', '/foo/bar/baz-x2'))
self.assertFalse(in_path('/foo/bar/baz', '/foo/baz/bar'))
def test_output_csv(self):
"""Test CSV writer.
"""
from os import remove
from os.path import join
from collections import OrderedDict
from ..census import output_csv
csvfile = join(self.data_dir, 'test_output_csv.csv')
d = OrderedDict()
d['/foo/bar'] = {2000: {'number': 2, 'size': 20},
2001: {'number': 2, 'size': 20},
2002: {'number': 2, 'size': 20}}
d['/foo/bar/baz'] = {2000: {'number': 1, 'size': 10},
2001: {'number': 1, 'size': 10},
2002: {'number': 1, 'size': 10}}
dd = OrderedDict()
dd['/a/b/c'] = {2001: {'number': 2, 'size': 50},
2002: {'number': 4, 'size': 100},
2003: {'number': 2, 'size': 50}}
dd['/a/b/c/d'] = {2002: {'number': 2, 'size': 50}}
output_data = output_csv([d, dd], csvfile)
datatext = """Directory,FY2000 Number,FY2001 Number,FY2002 Number,FY2003 Number,FY2000 Size,FY2001 Size,FY2002 Size,FY2003 Size
/foo/bar,2,4,6,6,20,40,60,60
/foo/bar/baz,1,2,3,3,10,20,30,30
/a/b/c,0,2,6,8,0,50,150,200
/a/b/c/d,0,0,2,2,0,0,50,50"""
data = [row.split(',') for row in datatext.split('\n')]
self.assertEqual(len(output_data), len(data))
for k in range(len(data)):
self.assertListEqual(output_data[k], data[k])
remove(csvfile)
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| {
"content_hash": "3e70d3917cca30bb59849ef1458269df",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 135,
"avg_line_length": 41.586854460093896,
"alnum_prop": 0.539173628358546,
"repo_name": "desihub/desiutil",
"id": "6dc3d43ba539f824be0a018a579f21ee20c1d488",
"size": "8946",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/desiutil/test/test_census.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "424924"
},
{
"name": "Shell",
"bytes": "12276"
}
],
"symlink_target": ""
} |
import os
import sys
import re
import msvc_common
if __name__ == '__main__':
script, workingDir, logPath, installDir, arch, rspPath = sys.argv
def shell_escape_path(path):
return path.replace(" ", "\\ ")
def link():
cmd = "link \"@%s\" > \"%s\" 2>&1 " % (rspPath, logPath)
exitcode = os.system(cmd)
with open(logPath, "rt") as logFile:
logContents = logFile.read()
if re.search("(warning)|(error)", logContents, re.MULTILINE):
print("%s" % logContents)
if exitcode:
sys.exit(exitcode)
if os.name != 'nt':
raise Exception("msvc is only usable on Windows")
if not (arch == "x86" or arch == "amd64"):
raise Exception("invalid architecture: " + arch)
os.chdir(workingDir)
msvc_common.set_msvc_environment(installDir, arch)
link()
sys.exit(0)
| {
"content_hash": "5000beee9caa80e580a37301e78c55d7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 23.945945945945947,
"alnum_prop": 0.5711060948081265,
"repo_name": "fifoforlifo/pynja",
"id": "83d19ce5dbadcb3da2fa3550dc5cec3664fd4236",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/pynja/scripts/msvc-link-invoke.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1431"
},
{
"name": "C++",
"bytes": "2971018"
},
{
"name": "Java",
"bytes": "701"
},
{
"name": "Protocol Buffer",
"bytes": "353085"
},
{
"name": "Python",
"bytes": "190328"
},
{
"name": "Shell",
"bytes": "5634"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
import pygame #@UnresolvedImport
from vec2d import vec2d
import os
class Creep(pygame.sprite.Sprite):
"""Representerar ett fiende-kryp."""
# Static
explosion_sound = None
def __init__(self, screen, img_filename, init_position,
init_direction, speed):
"""Skapar ett nytt kryp.
@param screen: Ytan där krypet ska målas.
@param image_filname: Image file för krypet.
@param init_position: Startposition.
@param init_direction: Startriktning.
@param speed: Hastighet i pixels/ms
"""
pygame.sprite.Sprite.__init__(self)
if Creep.explosion_sound is None:
# Ladda bara ljudet EN gång, en statisk variabel
Creep.explosion_sound = pygame.mixer.Sound(os.path.join('sound','bomb_explosion.wav'))
self.explosion_sound = Creep.explosion_sound
self.explosion_sound.set_volume(0.2)
self.health = 5
self.state = Creep.ALIVE
self.screen = screen
self.speed = speed
self.explosion_image = pygame.image.load(os.path.join('images','boom.png')).convert_alpha()
self.explosion_timer = 0
# Originalbilden
self.base_image = pygame.image.load(img_filename).convert_alpha()
# Bilden som skall roteras osv
self.image = self.base_image
# Rect behövs för den kolissionshanteringen
self.rect = self.image.get_rect()
# Start-position. En vektor
self.pos = vec2d(init_position)
# Start-riktning. En normaliserad vektor
self.direction = vec2d(init_direction).normalized()
def is_alive(self):
return self.state in (Creep.ALIVE, Creep.EXPLODING)
def update(self, time_passed):
"""Updatera creep.
@param time_passed: Den tid i ms som passerat sedan senaste uppdateringen.
"""
if self.state == Creep.ALIVE:
# Sätt rätt riktning på krypet. Rotate tar ett surface-objekt
# och riktningen det skall rotera! Mot-urs rotation, så negativa
# vinklar innebär rotation med-urs. Vi använder en negativ
# vinkel eftersom xy-planet är inverterat i pygame.
self.image = pygame.transform.rotate(
self.base_image, -self.direction.angle)
# Beräkna förflyttningen. Riktningen, vilket är en normaliserad
# vektor multiplicerat med sträckan dvs. hastighet x tiden
displacement = vec2d(
self.direction.x * self.speed * time_passed,
self.direction.y * self.speed * time_passed)
# Sätt den nya positionen
self.pos += displacement
# Uppdatera dess rect för kollisioner
self.rect = self.image.get_rect()
self.rect.x = self.pos.x
self.rect.y = self.pos.y
# Studsa på väggar.
self.image_w, self.image_h = self.image.get_size()
# Minska skärmens gränser med krypets höjd och bredd,
# vilket gör att krypets centrerade position kommer att
# studsa lite före skärmens gräns. Snyggare
bounds_rect = self.screen.get_rect().inflate(
-self.image_w, -self.image_h)
# Om utanför vänsterkanten
if self.pos.x < bounds_rect.left:
# Sätt pos inte längre än kanten
self.pos.x = bounds_rect.left
# Ändra riktningvektorn till andra hållet
self.direction.x *= -1
elif self.pos.x > bounds_rect.right:
self.pos.x = bounds_rect.right
self.direction.x *= -1
elif self.pos.y < bounds_rect.top:
self.pos.y = bounds_rect.top
self.direction.y *= -1
elif self.pos.y > bounds_rect.bottom:
self.pos.y = bounds_rect.bottom
self.direction.y *= -1
elif self.state == Creep.EXPLODING:
self.explosion_timer += time_passed
if self.explosion_timer > 100:
self.explosion_sound.play()
self.state = Creep.DEAD
self.kill()
elif self.state == Creep.DEAD:
pass
def draw(self):
"""Ritar krypet på den Surface som angavs vid skapandet."""
if self.state == Creep.ALIVE:
# Centrera kryp-bildens position,
# eftersom bilden ändrar storlek när den roterar
draw_pos = self.image.get_rect().move(
# Sätt dess x-position till halva bildens bredd
self.pos.x - self.image_w / 2,
# Sätt dess y-position till halva bildens höjd
self.pos.y - self.image_h / 2)
# Rita kryp-image på screen-image, centrerat
self.screen.blit(self.image, draw_pos)
elif self.state == Creep.EXPLODING:
# Centrera explosionens position,
draw_pos = self.explosion_image.get_rect().move(
# Sätt dess x-position till halva skillnaden
self.rect.x - abs(((self.image.get_width()-self.explosion_image.get_width()) / 2)),
# Sätt dess y-position till halva skillnaden
self.rect.y - abs(((self.image.get_height()-self.explosion_image.get_height()) / 2)))
self.screen.blit(self.explosion_image, draw_pos)
elif self.state == Creep.DEAD:
pass
def decrease_health(self, n):
self.health -= n
if self.health == 0:
self.explode()
def explode(self):
self.state = Creep.EXPLODING
#----------- PRIVATA VARIABLER --------------------------------#
# De tillstånd krypet kan befinna sig i.
# ALIVE: Krypet åker levand och glad omkring.
# EXPLODING: En stund bara, före det dör.
# DEAD: Dött och inaktivt.
(ALIVE, EXPLODING, DEAD) = range(3)
| {
"content_hash": "f7f9e9c62bbdb21be68205f7e2b3e777",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 112,
"avg_line_length": 21.220394736842106,
"alnum_prop": 0.5307704231902031,
"repo_name": "saintdragon2/python-3-lecture-2015",
"id": "e83c0ccabe04feccfb8f4a702d2bad8b19ecc6ea",
"size": "6502",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "civil-final/2nd_presentation/6조/aa/creep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5095"
},
{
"name": "C",
"bytes": "460322"
},
{
"name": "C++",
"bytes": "104809"
},
{
"name": "CSS",
"bytes": "18133"
},
{
"name": "HTML",
"bytes": "1403533"
},
{
"name": "Java",
"bytes": "11586"
},
{
"name": "JavaScript",
"bytes": "57050"
},
{
"name": "Makefile",
"bytes": "5563"
},
{
"name": "PHP",
"bytes": "10268"
},
{
"name": "Python",
"bytes": "4462240"
}
],
"symlink_target": ""
} |
import errno
import logging.handlers
import json
import os
import socket
import subprocess
import sys
import gevent
from datetime import datetime
from threading import Timer
from send_alert import send_alert
WORKING_DIR = os.path.dirname(os.path.realpath(__file__))
ALERT_FILE_HEADING = 'alert_'
class PingTimeout(Exception):
pass
class PingError(Exception):
pass
class Ping(object):
def __init__(self, count, threshold, timeout, ping_src_ip=None,
server=None):
self.server = server
self.count = count
self.time_threshold = threshold
self.timeout = timeout
try:
socket.inet_aton(ping_src_ip)
self.ping_src_ip = ping_src_ip
except (socket.error, TypeError):
self.ping_src_ip = None
def ping_server(self, ip=None):
ping_ip = None
if ip:
ping_ip = ip
elif self.server:
ping_ip = self.server
if ping_ip:
ping_cmd = ['/bin/ping', '-c', str(self.count)]
if self.ping_src_ip:
ping_cmd.extend(['-I', self.ping_src_ip])
ping_cmd.append(str(ping_ip))
log.debug("ping linux cmd: %s", ' '.join(ping_cmd))
pping = subprocess.Popen(ping_cmd,
stdout=subprocess.PIPE)
time = Timer(self.timeout, pping.kill)
time.start()
out, _ = pping.communicate()
if time.is_alive():
time.cancel()
else:
raise PingTimeout
return out
else:
raise ValueError('IP was not defined')
def is_result_ok(self, ping_output):
for line in ping_output.split('\n'):
log.debug(' line: %s' % line)
# When not receiving all packet
if 'Destination Host Unreachable' in line:
log.debug('Destination Host Unreachable')
return False
if '%s packets transmitted' % self.count in line \
and ', 0% packet loss' not in line:
log.debug('Found lost package.')
return False
if 'icmp_seq=' in line:
respond_time = float(line.split()[6].split('=')[1])
if respond_time >= self.time_threshold:
log.debug('Found over threshold ping.')
return False
else:
return True
return False
class Watcher(object):
"""Watcher object to watch for CI IP pingable"""
def __init__(self, json_config, ip_obj):
self.json_config = json_config
self.pinger = Ping(
get_config(self.json_config, 'ping_package_number', 3),
get_config(self.json_config, 'ping_slow_threshold', 500),
get_config(self.json_config, 'ping_timeout', 30),
get_config(self.json_config, 'ping_source_ip', None)
)
self.fail_threshold = get_config(self.json_config,
'looping_check_fail_threshold',
10)
self.ok_threshold = get_config(self.json_config,
'looping_check_ok_threshold',
3)
self.looping_interval = get_config(self.json_config,
'looping_check_interval',
30)
self.ok_list = []
self.fail_list = []
self.ip_obj = {}
(self.ip_obj['address'], self.ip_obj['name']) = self.get_ip_object(
ip_obj)
log.debug("IP_OBJ: {%s ; %s}" % (self.ip_obj['address'],
self.ip_obj['name']))
if self.ip_obj['address'] is None or self.ip_obj['name'] is None:
log.error('Wrong def: %s' % ip_obj)
send_alert('Wrong def: %s' % ip_obj, log)
exit(1)
def get_ip_object(self, ip_obj):
ip = name = None
spliter = ip_obj.split(';')
try:
socket.inet_aton(spliter[0])
ip = spliter[0]
try:
name = spliter[1]
except IndexError:
pass
except (socket.error, TypeError):
try:
socket.inet_aton(spliter[1])
ip = spliter[1]
name = spliter[0]
except (socket.error, IndexError, TypeError):
pass
return ip, name
def check(self, ip):
"""Ping to server and check result"""
try:
out = self.pinger.ping_server(ip['address'])
if not self.pinger.is_result_ok(out):
return False
except PingTimeout, _:
log.warning('Ping result TIMEOUT')
return False
except PingError, _:
log.warning('Ping command ERROR')
return False
except ValueError, e:
log.warning('Ping result ValueError: %s' % e.message)
return False
return True
@staticmethod
def cb_ok(self, ip):
log.info('[%20.20s] looping check total: OK.' % ip['name'])
self.ok_list.append(ip)
@staticmethod
def cb_fail(self, ip):
log.warning('[%20.20s] looping check total: FAIL.' % ip['name'])
self.fail_list.append(ip)
def fail_alert(self):
mess = 'Fail ping: %s' % \
', '.join([obj['name'] for obj in self.fail_list])
log.error(mess)
try:
stop_alert_file = os.path.join(WORKING_DIR,
'stop_alert_file')
with open(stop_alert_file, 'r') as fp:
first_line = fp.readline().rstrip('\n')
if first_line:
try:
log.debug('Got from %s: %s' % (stop_alert_file,
first_line))
if first_line == datetime.now().strftime('%Y%m%d'):
log.debug('Marked to stop announce for today')
return
except Exception:
log.debug('Found %s but wrong content' % stop_alert_file)
pass
except Exception:
log.debug('Cannot open stop-alert-file')
pass
send_alert(mess, log)
try:
for ip in self.fail_list:
file_path = os.path.join(WORKING_DIR, '%s%s' %
(ALERT_FILE_HEADING, ip['address']))
with open(file_path, 'a') as fp:
fp.close()
log.debug('created %s' % file_path)
except Exception:
log.warning('fail_alert wrong in processing alert_<ip> file.')
pass
def ok_alert(self):
try:
for ip in self.ok_list:
file_path = os.path.join(WORKING_DIR, '%s%s' %
(ALERT_FILE_HEADING, ip['address']))
if os.path.isfile(file_path):
mess = '%s back to NORMAL' % ip['name']
send_alert(mess, log)
log.debug(mess)
os.remove(file_path)
except Exception:
log.warning('ok_alert wrong in processing alert_<ip> file.')
pass
def looping_check(self, ip, cb_ok, cb_fail):
"""Do the looping check each 1 minute."""
fail_count = 0
pass_count = 0
log.info('[%20.20s] Looping check started...' % ip['name'])
while True:
gevent.sleep(self.looping_interval)
res = self.check(ip)
if res:
pass_count += 1
log.info('[%20.20s] ... passed (%s passed - %s failed)' %
(ip['name'], pass_count, fail_count))
else:
fail_count += 1
pass_count = 0
log.info('[%20.20s] ... failed (%s passed - %s failed)' %
(ip['name'], pass_count, fail_count))
if pass_count == self.ok_threshold:
cb_ok(self, ip)
break
if fail_count == self.fail_threshold:
cb_fail(self, ip)
break
def first_check(self, ip):
"""Do the first check and issue a looping check if fail at the first"""
if not self.check(ip):
log.info('[%20.20s] First check failed.' % ip['name'])
self.looping_check(ip, self.cb_ok, self.cb_fail)
else:
log.info('[%20.20s] First check ok.' % ip['name'])
self.ok_list.append(ip)
def parallel_check(self):
job_list = [gevent.spawn(self.first_check, self.ip_obj)]
if job_list:
gevent.joinall(job_list)
if len(self.fail_list):
self.fail_alert()
return 1
if len(self.ok_list):
self.ok_alert()
return 0
def get_config(json_config, key, default_value=None):
try:
return json_config[key]
except Exception:
return default_value
if __name__ == "__main__":
config_file = os.path.join(os.path.dirname(__file__), 'ping_server.conf')
with open(config_file) as json_file:
json_data = None
try:
json_data = json.load(json_file)
except Exception:
send_alert('Load %s failed' % config_file)
exit(1)
log_file = get_config(json_data, 'log_file', 'ping_server.log')
if not log_file.startswith('/'):
log_file = os.path.join(WORKING_DIR, log_file)
log_level = {
'debug': logging.DEBUG,
'info': logging.INFO
}[get_config(json_data, 'level', 'info')]
logging.basicConfig()
log = logging.getLogger("ping_server")
try:
os.makedirs(os.path.dirname(log_file))
except OSError as e:
if e.errno != errno.EEXIST:
raise
handler = logging.handlers.RotatingFileHandler(log_file,
maxBytes=1048576,
backupCount=1)
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(process)6s] %(levelname)-7s [%(name)s] "
"%(message)s"))
log.addHandler(handler)
logging.getLogger().setLevel(log_level)
watcher = Watcher(json_data, sys.argv[1])
ret = watcher.parallel_check()
exit(ret)
| {
"content_hash": "384edd26ec7fd30205a147e262fc3449",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 79,
"avg_line_length": 34.60912052117264,
"alnum_prop": 0.4896,
"repo_name": "tuaminx/ping_server",
"id": "9b2ca601a639592f2fdccd8e594fb0ca9b91d64f",
"size": "10625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ping_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11357"
},
{
"name": "Shell",
"bytes": "1163"
}
],
"symlink_target": ""
} |
import sys
from .event import ET
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class EventWriter(object):
"""``EventWriter`` writes events and error messages to Splunk from a modular input.
Its two important methods are ``writeEvent``, which takes an ``Event`` object,
and ``log``, which takes a severity and an error message.
"""
# Severities that Splunk understands for log messages from modular inputs.
# Do not change these
DEBUG = "DEBUG"
INFO = "INFO"
WARN = "WARN"
ERROR = "ERROR"
FATAL = "FATAL"
def __init__(self, output = sys.stdout, error = sys.stderr):
"""
:param output: Where to write the output; defaults to sys.stdout.
:param error: Where to write any errors; defaults to sys.stderr.
"""
self._out = output
self._err = error
# has the opening <stream> tag been written yet?
self.header_written = False
def write_event(self, event):
"""Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object.
"""
if not self.header_written:
self._out.write("<stream>")
self.header_written = True
event.write_to(self._out)
def log(self, severity, message):
"""Logs messages about the state of this modular input to Splunk.
These messages will show up in Splunk's internal logs.
:param severity: ``string``, severity of message, see severities defined as class constants.
:param message: ``string``, message to log.
"""
self._err.write("%s %s\n" % (severity, message))
self._err.flush()
def write_xml_document(self, document):
"""Writes a string representation of an
``ElementTree`` object to the output stream.
:param document: An ``ElementTree`` object.
"""
self._out.write(ET.tostring(document))
self._out.flush()
def close(self):
"""Write the closing </stream> tag to make this XML well formed."""
self._out.write("</stream>") | {
"content_hash": "8bffd51cf9fff7baacaa76783f453b02",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 100,
"avg_line_length": 30.285714285714285,
"alnum_prop": 0.6113207547169811,
"repo_name": "domeger/SplunkTAforPuppetEnterprise",
"id": "418405fcebbe1973af6365931619ed657ffa1bf3",
"size": "2702",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "bin/splunktaforpuppetenterprise/splunklib/modularinput/event_writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "4504"
},
{
"name": "HTML",
"bytes": "5156"
},
{
"name": "Python",
"bytes": "5659367"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from filer.models.imagemodels import Image
from cmsplugin_filer_tests_shared.base import (
BasePluginTestMixin, CmsPluginsFilerBaseTestCase,
)
class CmsPluginFilerVideoTestCase(BasePluginTestMixin,
CmsPluginsFilerBaseTestCase):
plugin_to_test = 'FilerVideoPlugin'
filer_class = Image
def get_plugin_params(self):
return {
'movie_url': 'https://vimeo.com/channels/952478/133154447',
'image': self.get_filer_object(),
}
| {
"content_hash": "811141cd17181be27f300606dddd0131",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 29.105263157894736,
"alnum_prop": 0.6672694394213382,
"repo_name": "creimers/cmsplugin-filer",
"id": "8575a5c962ff8b1bbcefda954003d16c717ff0d6",
"size": "577",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "cmsplugin_filer_video/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "8326"
},
{
"name": "Python",
"bytes": "575695"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect, get_object_or_404
from bootcamp.feeds.views import feeds
from django.contrib.auth.models import User
from bootcamp.feeds.models import Feed
from bootcamp.feeds.views import FEEDS_NUM_PAGES
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from bootcamp.ihealth.forms import ProfileForm, ChangePasswordForm
from django.contrib import messages
from django.conf import settings as django_settings
from PIL import Image
import os
import datetime
from utils import *
DB_NAME = "test"
def home(request):
if request.user.is_authenticated():
return feeds(request)
else:
return render(request, 'core/cover.html')
@login_required
def appointments(request):
user = request.user
session = get_session(DB_NAME, "admin", "x")
# TODO : Include session value in Django session using cookies and stop generating it in every request
appointments_ids = get_appointments(DB_NAME, session)['result']
fields = {}
appointments_details = get_appointments_details(DB_NAME, session, appointments_ids, fields)['result']
# print appointments_details
appointments_list = parse_as(str(json.dumps(appointments_details)), as_appointment)
# Modifications on the parameters
for appointment in appointments_list:
# urgency
if appointment.urgency == 'a':
appointment.urgency = 'Normal'
elif appointment.urgency == 'b':
appointment.urgency = 'Urgent'
elif appointment.urgency == 'c':
appointment.urgency = 'Medical Emergency'
else:
appointment.urgency = ""
# type
appointment.appointment_type = appointment.appointment_type.capitalize()
# specialty
if appointment.speciality is None:
appointment.speciality = ""
# institution
if appointment.institution is None:
appointment.institution = ""
# state
if appointment.state == "free":
appointment.state = "Free"
elif appointment.state == "confirmed":
appointment.state = "Confirmed"
elif appointment.state == "done":
appointment.state = "Done"
elif appointment.state == "user_cancelled":
appointment.state = "Cancelled by patient"
elif appointment.state == "center_cancelled":
appointment.state = "Cancelled by Health Center"
elif appointment.state == "no_show":
appointment.state = "No show"
else: # appointment.state is None:
appointment.state = ""
# appointment date
appointment.appointment_date = date_to_string(appointment.appointment_date)
# TODO : Show 'Patient', 'Specialty', 'Health Prof' and 'Institution' by name (in place of their ids)
return render(request, 'ihealth.html', {
'appointments': appointments_list,
})
@login_required
def patients(request):
user = request.user
session = get_session(DB_NAME, "admin", "x")
patients_ids = get_patients(DB_NAME, session)['result']
fields = {}
patients_details = get_patients_details(DB_NAME, session, patients_ids, fields)['result']
# print patients_details
patients_list = parse_as(str(json.dumps(patients_details)), as_party)
# Modifications on the parameters
for patient in patients_list:
# date of birth
if patient.dob is None:
patient.dob = ""
else:
patient.dob = date_to_string(patient.dob)
return render(request, 'patients.html', {
'patients': patients_list,
})
@login_required
def prescriptions(request):
user = request.user
session = get_session(DB_NAME, "admin", "x")
prescriptions_ids = get_prescriptions(DB_NAME, session)['result']
fields = {}
prescriptions_details = get_prescriptions_details(DB_NAME, session, prescriptions_ids, fields)['result']
prescriptions_list = parse_as(str(json.dumps(prescriptions_details)), as_prescription)
# Modifications on the parameters
for prescription in prescriptions_list:
# date of birth
if prescription.prescription_date is None:
prescription.prescription_date = ""
else:
prescription.prescription_date = date_to_string(prescription.prescription_date)
# TODO : Show 'Patient' and 'Prescribed by' name (in place of their ids)
return render(request, 'prescriptions.html', {
'prescriptions': prescriptions_list
})
# @login_required
# def network(request):
# users = User.objects.filter(is_active=True).order_by('username')
# return render(request, 'core/network.html', {'users': users})
# @login_required
# def profile(request, username):
# page_user = get_object_or_404(User, username=username)
# all_feeds = Feed.get_feeds().filter(user=page_user)
# paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
# feeds = paginator.page(1)
# from_feed = -1
# if feeds:
# from_feed = feeds[0].id
# return render(request, 'core/profile.html', {
# 'page_user': page_user,
# 'feeds': feeds,
# 'from_feed': from_feed,
# 'page': 1
# }) | {
"content_hash": "0dd696c72de94f534c456d50fc2ef190",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 109,
"avg_line_length": 37.43571428571428,
"alnum_prop": 0.658271322266743,
"repo_name": "o5k/bootcamp-ihealth",
"id": "d237026fa5c2af47d4824d4584d41401ccf0e83a",
"size": "5241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootcamp/ihealth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13359"
},
{
"name": "JavaScript",
"bytes": "103755"
},
{
"name": "Python",
"bytes": "84850"
}
],
"symlink_target": ""
} |
from datetime import datetime
from person import Person
class Book(object):
def __init__(self, title):
self.created_at = datetime.now()
self.title = title
self.author = Person()
self.description = 'No Description available'
| {
"content_hash": "068dcceea413aed7b4959079e1560c17",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 23.90909090909091,
"alnum_prop": 0.6463878326996197,
"repo_name": "ReneVolution/pycologne-deserializing-using-python",
"id": "a38e5ed8b60b7ef4dfc73c5ae92d451b5a6d42a9",
"size": "263",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "sample_code/book.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "194692"
},
{
"name": "HTML",
"bytes": "59258"
},
{
"name": "JavaScript",
"bytes": "258543"
},
{
"name": "Python",
"bytes": "4061"
}
],
"symlink_target": ""
} |
"""End to end tests for lib.flows.general.memory."""
from grr.endtoend_tests import base
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import rdfvalue
class TestGrepMemory(base.AutomatedTest):
"""Test ScanMemory."""
platforms = ["Windows", "Darwin"]
flow = "ScanMemory"
test_output_path = "analysis/grep/testing"
args = {"also_download": False,
"grep": rdfvalue.BareGrepSpec(
literal="grr", length=4 * 1024 * 1024 * 1024,
mode=rdfvalue.GrepSpec.Mode.FIRST_HIT,
bytes_before=10, bytes_after=10),
"output": test_output_path}
def CheckFlow(self):
collection = aff4.FACTORY.Open(self.client_id.Add(self.test_output_path),
token=self.token)
self.assertIsInstance(collection, aff4.RDFValueCollection)
self.assertEqual(len(list(collection)), 1)
reference = collection[0]
self.assertEqual(reference.length, 23)
self.assertEqual(reference.data[10:10 + 3], "grr")
class AbstractTestAnalyzeClientMemory(base.ClientTestBase):
"""Test AnalyzeClientMemory (Rekall).
We use the rekall caching profile server for these tests, since we may not
have direct internet access. It may be necessary to manually populate the
cache with lib.rekall_profile_server.GRRRekallProfileServer.GetMissingProfiles
on the console to make these tests pass.
"""
flow = "AnalyzeClientMemory"
test_output_path = "analysis/memory"
args = {"request": rdfvalue.RekallRequest(),
"output": test_output_path}
def setUpRequest(self):
raise NotImplementedError("Implemented by subclasses")
def setUp(self):
self.setUpRequest()
self.old_config = config_lib.CONFIG.Get("Rekall.profile_server")
if "Test Context" in config_lib.CONFIG.context:
# We're running in a test context, where the rekall repository server is
# set to TestRekallRepositoryProfileServer, which won't actually work for
# an end to end test. We change it temporarily to allow the test to pass.
config_lib.CONFIG.Set("Rekall.profile_server", "GRRRekallProfileServer")
# RDFValueCollections need to be deleted recursively.
aff4.FACTORY.Delete(self.client_id.Add(self.test_output_path),
token=self.token)
super(AbstractTestAnalyzeClientMemory, self).setUp()
def tearDown(self):
# RDFValueCollections need to be deleted recursively.
aff4.FACTORY.Delete(self.client_id.Add(self.test_output_path),
token=self.token)
config_lib.CONFIG.Set("Rekall.profile_server", self.old_config)
super(AbstractTestAnalyzeClientMemory, self).tearDown()
def CheckFlow(self):
self.response = aff4.FACTORY.Open(self.client_id.Add(self.test_output_path),
token=self.token)
self.assertIsInstance(self.response, aff4.RDFValueCollection)
self.assertTrue(len(self.response) >= 1)
class AbstractTestAnalyzeClientMemoryWindows(AbstractTestAnalyzeClientMemory,
base.AutomatedTest):
platforms = ["Windows"]
class TestAnalyzeClientMemoryWindowsPSList(
AbstractTestAnalyzeClientMemoryWindows):
def setUpRequest(self):
self.args["request"].plugins = [rdfvalue.PluginRequest(plugin="pslist")]
class TestAnalyzeClientMemoryWindowsModules(
AbstractTestAnalyzeClientMemoryWindows):
def setUpRequest(self):
self.args["request"].plugins = [rdfvalue.PluginRequest(plugin="modules")]
class TestAnalyzeClientMemoryWindowsDLLList(
AbstractTestAnalyzeClientMemoryWindows):
"""Run rekall DLL list and look for the GRR process."""
def setUpRequest(self):
self.binaryname = "svchost.exe"
self.args["request"].plugins = [
rdfvalue.PluginRequest(plugin="dlllist",
args=dict(
proc_regex=self.binaryname,
method="PsActiveProcessHead"
))
]
def CheckFlow(self):
super(TestAnalyzeClientMemoryWindowsDLLList, self).CheckFlow()
# Make sure the dlllist found our process by regex:
response_str = "".join([unicode(x) for x in self.response])
self.assertIn(self.binaryname, response_str)
class TestAnalyzeClientMemoryMac(AbstractTestAnalyzeClientMemory):
"""Runs Rekall on Macs.
This test has been disabled for automated testing since OS X memory analysis
isn't reliable with Yosemite yet.
"""
platforms = ["Darwin"]
def setUpRequest(self):
self.args["request"].plugins = [
rdfvalue.PluginRequest(plugin="pslist")]
def CheckFlow(self):
response = aff4.FACTORY.Open(self.client_id.Add(self.test_output_path),
token=self.token)
binary_name = config_lib.CONFIG.Get(
"Client.binary_name", context=["Client context", "Platform:Darwin"])
self.assertTrue(binary_name in str(response))
| {
"content_hash": "bf9468fa641e2422b32a83e1fa72e2f4",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 80,
"avg_line_length": 35.8768115942029,
"alnum_prop": 0.6822864067865078,
"repo_name": "ksmaheshkumar/grr",
"id": "98c5b5bfec6ac42db0d0ec220cf538aba75c6e77",
"size": "4973",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "endtoend_tests/memory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "14993"
},
{
"name": "C",
"bytes": "9062"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "12047"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "48624"
},
{
"name": "JavaScript",
"bytes": "230351"
},
{
"name": "Makefile",
"bytes": "5863"
},
{
"name": "Protocol Buffer",
"bytes": "181723"
},
{
"name": "Python",
"bytes": "4855590"
},
{
"name": "Ruby",
"bytes": "4931"
},
{
"name": "Shell",
"bytes": "45459"
}
],
"symlink_target": ""
} |
"""
Analysis Framework Presentation by Nathan
Motiviation
- Interact with data
- Easy access to many files
- Structure s.t. no code has to be rewritten, i.e. well-designed generic
base classes
- Standardized plots
Main features we discussed:
- Plotting methods of the analysis objects can take axes as arguments and draw
their plots on
them and also give axes objects as output
-> can further manipulate these plots and easily plot several things on
top of each other
- Options dictionary instead of **kw: options are saved, and for readability
and consistency the
options should be extracted at one single place
-> easier to see available options
- Standard plotting functions in base class. To make a new plot, define plot
dictionaries which
contain the relevant parameters/arguments that are passed to the plotting
funciton.
- Axes objects are stored in self.axs:dict.
- run_analysis() has standard steps (e.g. extract_data, fitting, plotting),
which need to be
implemented in subclasses.
- Fitting is a bit hacky at the moment, but the framework for doing this in a
modular way (i.e.
using models defined in a module) is basically there
-> Maybe also implement this in a way that makes use of dictionaries for
passing arguments?
Idea for making this a nice suite: split in to the following files
1st file: Base class
2nd file: Set of very generic anlyses (spetroscopy, ...)
3rd file: Everyone's own specific stuff, including dirty hacks for work on
day-to-day basis.
"""
| {
"content_hash": "ab63a7bb488efef90e79c8f4d09b2ce2",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 42.26315789473684,
"alnum_prop": 0.7210460772104608,
"repo_name": "QudevETH/PycQED_py3",
"id": "05e6cf7c6c9581b4eb81911ea748dcc036f410c1",
"size": "1631",
"binary": false,
"copies": "2",
"ref": "refs/heads/qudev_master",
"path": "pycqed/analysis_v2/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5431925"
}
],
"symlink_target": ""
} |
from .binary import BinaryModelWithShareParams as BinaryModel
from beastling.util import xml
class PseudoDolloCovarionModel(BinaryModel):
package_notice = ("Pseudo-Dollo Covarion", "Babel")
def __init__(self, model_config, global_config):
BinaryModel.__init__(self, model_config, global_config)
self.subst_model_id = None
def add_state(self, state):
BinaryModel.add_state(self, state)
for fname in self.parameter_identifiers():
# One param for all features
xml.parameter(
state,
text="0.5 0.5",
id="%s:pdcovarion_s.s" % fname,
lower="1.0E-4",
name="stateNode",
dimension="2",
upper="Infinity")
xml.parameter(
state,
text="10",
id="%s:pdcovarion_origin.s" % fname,
lower="1",
name="stateNode",
upper="Infinity")
xml.parameter(
state,
text="1.0 0.1",
id="%s:pdcovarion_death.s" % fname,
lower="1.0E-4",
name="stateNode",
dimension="2",
upper="1.0")
def add_frequency_state(self, state):
for fname in self.parameter_identifiers():
xml.parameter(
state,
text="0.94 0.05 0.01",
id="%s:visiblefrequencies.s" % fname,
name="stateNode",
dimension="3",
lower="0.0",
upper="1.0")
def get_userdatatype(self, feature, fname):
if not self.beastxml._covarion_userdatatype_created:
self.beastxml._covarion_userdatatype_created = True
return xml.userDataType(
None,
id="PseudoDolloCovarionDatatype",
spec="beast.evolution.datatype.UserDataType",
states="5",
codelength="1",
codeMap="""
A = 0,
1 = 1 4,
B = 1,
0 = 0 2 3 ,
? = 0 1 2 3 4,
- = 0 1 2 3 4,
C = 2,
D = 3,
E = 4
""")
else:
return xml.userDataType(None, idref="PseudoDolloCovarionDatatype")
def add_substmodel(self, sitemodel, feature, fname):
# If we're sharing one substmodel across all features and have already
# created it, just reference it and that's it
if self.share_params and self.subst_model_id:
sitemodel.set("substModel", "@%s" % self.subst_model_id)
return
# Otherwise, create a substmodel
if self.share_params:
self._add_substmodel(sitemodel, None, None)
else:
self._add_substmodel(sitemodel, feature, fname)
def _add_substmodel(self, sitemodel, feature, name):
if self.share_params:
name = self.name
self.subst_model_id = "%s:pdcovarion.s" % name
subst_model_id = "%s:pdcovarion.s" % name
substmodel = xml.substModel(
sitemodel,
id=subst_model_id,
spec="BirthDeathCovarion2",
deathprob="@{:}:pdcovarion_death.s".format(name),
originLength="@{:s}:pdcovarion_origin.s".format(name),
switchRate="@{:}:pdcovarion_s.s".format(name))
# Numerical instability is an issue with this model, so we give the
# option of using a more robust method of computing eigenvectors.
if self.use_robust_eigensystem: # pragma: no cover
raise ValueError(
"Currently, Beast's pseudo-Dollo covarion model does not "
"support robust eigensystems.")
substmodel.set("eigenSystem", "beast.evolution.substitutionmodel.RobustEigenSystem")
# The "vfrequencies" parameter here is the frequencies
# of the *visible* states (present/absent) and should
# be based on the data (if we are doing an empirical
# analysis)
if self.frequencies == "estimate":
substmodel.set("vfrequencies","@%s:visiblefrequencies.s" % name)
else:
vfreq = xml.vfrequencies(
substmodel,
id="%s:visiblefrequencies.s" % name,
dimension="3",
spec="parameter.RealParameter")
if self.frequencies == "empirical": # pragma: no cover
raise ValueError("Dollo model {:} cannot derive empirical "
"frequencies from data".format(self.name))
else:
vfreq.text="0.94 0.05 0.01"
# These are the frequencies of the *hidden* states
# (fast / slow), and are just set to 50: 50. They could be estimated,
# in principle, but this seems to lead to serious instability problems
# so we don't expose that possibility to the user.
xml.parameter(
substmodel,
text="0.5 0.5",
id="%s: hiddenfrequencies.s" % name,
dimension="2",
name="hfrequencies",
lower="0.0",
upper="1.0")
def add_prior(self, prior):
BinaryModel.add_prior(self, prior)
for fname in self.parameter_identifiers():
self._add_prior(prior, fname)
def _add_prior(self, prior, name):
switch_prior = xml.prior(
prior,
id="%s:pdcovarion_s_prior.s" % name,
name="distribution",
x="@%s:pdcovarion_s.s" % name)
gamma = xml.Gamma(switch_prior, id="%s: Gamma.0" % name, name="distr")
xml.parameter(
gamma,
text="0.05",
id="%s:pdcovarion_switch_gamma_param1" % name,
name="alpha",
lower="0.0",
upper="0.0")
xml.parameter(
gamma,
text="10.0",
id="%s:pdcovarion_switch_gamma_param2" % name,
name="beta",
lower="0.0",
upper="0.0")
origin_prior = xml.prior(
prior,
id="%s:pdcovarion_origin_prior.s" % name,
name="distribution",
x="@%s:pdcovarion_origin.s" % name)
xml.Uniform(
origin_prior,
id="%s:PDCovOriginUniform" % name,
name="distr",
upper="Infinity")
death_prior = xml.prior(
prior,
id="%s:pdcovarion_death_prior.s" % name,
name="distribution",
x="@{:}:pdcovarion_death.s".format(name))
xml.Exponential(
death_prior,
id="%s:PDCovDeathExp" % name,
name="distr",
mean="1.0")
def add_operators(self, run):
BinaryModel.add_operators(self, run)
for fname in self.parameter_identifiers():
self._add_operators(run, fname)
def _add_operators(self, run, name):
xml.operator(
run,
id="%s:pdcovarion_origin_scaler.s" % name,
spec="ScaleOperator",
parameter="@%s:pdcovarion_origin.s" % name,
scaleFactor="0.75",
weight="0.1")
xml.operator(
run,
id="%s:pdcovarion_s_scaler.s" % name,
spec="ScaleOperator",
parameter="@%s:pdcovarion_s.s" % name,
scaleFactor="0.75",
weight="0.1")
xml.operator(
run,
id="%s:pdcovarion_death_scaler.s" % name,
spec="ScaleOperator",
parameter="@%s:pdcovarion_death.s" % name,
scaleFactor="0.75",
weight="0.1")
def add_frequency_operators(self, run):
for fname in self.parameter_identifiers():
xml.operator(
run,
id="%s:pdcovarion_frequency_sampler.s" % fname,
spec="DeltaExchangeOperator",
parameter="@%s:visiblefrequencies.s" % fname,
delta="0.01",
weight="1.0")
def add_param_logs(self, logger):
BinaryModel.add_param_logs(self, logger)
for fname in self.parameter_identifiers():
xml.log(logger, idref="%s:pdcovarion_s.s" % fname)
xml.log(logger, idref="%s:pdcovarion_origin.s" % fname)
xml.log(logger, idref="%s:pdcovarion_death.s" % fname)
if self.config.admin.log_fine_probs:
xml.log(logger, idref="%s:pdcovarion_s_prior.s" % fname)
xml.log(logger, idref="%s:pdcovarion_origin_prior.s" % fname)
xml.log(logger, idref="%s:pdcovarion_death_prior.s" % fname)
def add_frequency_logs(self, logger):
for fname in self.parameter_identifiers():
xml.log(logger, idref="%s:visiblefrequencies.s" % fname)
| {
"content_hash": "bd238562deec9795122796d4da3cf7b4",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 96,
"avg_line_length": 37.57983193277311,
"alnum_prop": 0.5154293381037567,
"repo_name": "lmaurits/BEASTling",
"id": "ee45fc1a8f859a92118fa9bee16025b50d051db5",
"size": "8944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beastling/models/pseudodollocovarion.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "293847"
}
],
"symlink_target": ""
} |
import requests
from ..ontology_graph import IndraOntology
class VirtualOntology(IndraOntology):
"""A virtual ontology class which uses a remote REST service to perform
all operations. It is particularly useful if the host machine has limited
resources and keeping the ontology graph in memory is not desirable.
Parameters
----------
url : str
The base URL of the ontology graph web service.
ontology : Optional[str]
The identifier of the ontology recognized by the web service.
Default: bio
"""
def __init__(self, url, ontology='bio'):
super().__init__()
self.url = url
self.ontology = ontology
def initialize(self):
self._initialized = True
def child_rel(self, ns, id, rel_types):
res = _send_request(self.url, 'child_rel',
ns=ns, id=id, rel_types=list(rel_types),
ontology=self.ontology)
yield from (tuple(r) for r in res)
def parent_rel(self, ns, id, rel_types):
res = _send_request(self.url, 'parent_rel',
ns=ns, id=id, rel_types=list(rel_types),
ontology=self.ontology)
yield from (tuple(r) for r in res)
def get_node_property(self, ns, id, property):
return _send_request(self.url, 'get_node_property',
ns=ns, id=id, property=property,
ontology=self.ontology)
def get_id_from_name(self, ns, name):
return _send_request(self.url, 'get_id_from_name',
ns=ns, name=name, ontology=self.ontology)
def _send_request(base_url, endpoint, **kwargs):
url = '%s/%s' % (base_url, endpoint)
res = requests.get(url, json=kwargs)
return res.json()
| {
"content_hash": "99c8f62ba30b3166014c0cd3224d2b9a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 35.68627450980392,
"alnum_prop": 0.5807692307692308,
"repo_name": "bgyori/indra",
"id": "c31a737a35a4c48b3cfec344999b7b95c0d054e0",
"size": "1820",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "indra/ontology/virtual/ontology.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "169"
},
{
"name": "Dockerfile",
"bytes": "1710"
},
{
"name": "HTML",
"bytes": "28917"
},
{
"name": "JavaScript",
"bytes": "13276"
},
{
"name": "Python",
"bytes": "3519860"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import TemplateView
from django.contrib.auth.decorators import user_passes_test
from core.views import HomePageView, DashboardView
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'cs490.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^registration/', include('registration.urls')),
url(r'^accounts/', include('accounts.urls')),
url(r'^search/', include('haystack.urls')),
url(r'^classes/', include('core.urls')),
url(r'^qa/', include('qa.urls')),
url(r'^$', HomePageView.as_view(), name='homepage'),
url(r'^dashboard/$', DashboardView.as_view(), name='dashboard'),
)
| {
"content_hash": "c84d2515ae666579d8d51a237d08136d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 68,
"avg_line_length": 37.95238095238095,
"alnum_prop": 0.6687578419071518,
"repo_name": "thebenwaters/openclickio",
"id": "1aa3e311d52d6925529d6fb4470ffe7b1ae95050",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cs490/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "161671"
},
{
"name": "HTML",
"bytes": "225844"
},
{
"name": "JavaScript",
"bytes": "74674"
},
{
"name": "Python",
"bytes": "43633"
},
{
"name": "Shell",
"bytes": "518"
},
{
"name": "TeX",
"bytes": "202658"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from copy import deepcopy
from uuid import uuid4
import six
from confluent_kafka import Producer
from django.conf import settings
from django.core import mail
from django.test.utils import override_settings
from exam import fixture
from freezegun import freeze_time
from sentry.incidents.action_handlers import (
EmailActionHandler,
generate_incident_trigger_email_context,
)
from sentry.incidents.logic import create_alert_rule_trigger, create_alert_rule_trigger_action
from sentry.incidents.models import (
AlertRuleTriggerAction,
Incident,
IncidentStatus,
IncidentType,
TriggerStatus,
)
from sentry.incidents.tasks import INCIDENTS_SNUBA_SUBSCRIPTION_TYPE
from sentry.snuba.query_subscription_consumer import QuerySubscriptionConsumer, subscriber_registry
from sentry.utils import json
from sentry.testutils import TestCase
@freeze_time()
class HandleSnubaQueryUpdateTest(TestCase):
def setUp(self):
super(HandleSnubaQueryUpdateTest, self).setUp()
self.override_settings_cm = override_settings(
KAFKA_TOPICS={self.topic: {"cluster": "default", "topic": self.topic}}
)
self.override_settings_cm.__enter__()
self.orig_registry = deepcopy(subscriber_registry)
def tearDown(self):
super(HandleSnubaQueryUpdateTest, self).tearDown()
self.override_settings_cm.__exit__(None, None, None)
subscriber_registry.clear()
subscriber_registry.update(self.orig_registry)
@fixture
def subscription(self):
return self.rule.snuba_query.subscriptions.get()
@fixture
def rule(self):
with self.tasks():
rule = self.create_alert_rule(
name="some rule",
query="",
aggregate="count()",
time_window=1,
threshold_period=1,
resolve_threshold=10,
)
trigger = create_alert_rule_trigger(rule, "hi", 100)
create_alert_rule_trigger_action(
trigger,
AlertRuleTriggerAction.Type.EMAIL,
AlertRuleTriggerAction.TargetType.USER,
six.text_type(self.user.id),
)
return rule
@fixture
def trigger(self):
return self.rule.alertruletrigger_set.get()
@fixture
def action(self):
return self.trigger.alertruletriggeraction_set.get()
@fixture
def producer(self):
cluster_name = settings.KAFKA_TOPICS[self.topic]["cluster"]
conf = {
"bootstrap.servers": settings.KAFKA_CLUSTERS[cluster_name]["common"][
"bootstrap.servers"
],
"session.timeout.ms": 6000,
}
return Producer(conf)
@fixture
def topic(self):
return uuid4().hex
def test(self):
# Full integration test to ensure that when a subscription receives an update
# the `QuerySubscriptionConsumer` successfully retries the subscription and
# calls the correct callback, which should result in an incident being created.
callback = subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE]
def exception_callback(*args, **kwargs):
# We want to just error after the callback so that we can see the result of
# processing. This means the offset won't be committed, but that's fine, we
# can still check the results.
callback(*args, **kwargs)
raise KeyboardInterrupt()
subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] = exception_callback
message = {
"version": 1,
"payload": {
"subscription_id": self.subscription.subscription_id,
"values": {"data": [{"some_col": self.trigger.alert_threshold + 1}]},
"timestamp": "2020-01-01T01:23:45.1234",
},
}
self.producer.produce(self.topic, json.dumps(message))
self.producer.flush()
def active_incident():
return Incident.objects.filter(
type=IncidentType.ALERT_TRIGGERED.value, alert_rule=self.rule
).exclude(status=IncidentStatus.CLOSED.value)
consumer = QuerySubscriptionConsumer("hi", topic=self.topic)
with self.feature(["organizations:incidents", "organizations:performance-view"]):
with self.assertChanges(
lambda: active_incident().exists(), before=False, after=True
), self.tasks(), self.capture_on_commit_callbacks(execute=True):
consumer.run()
assert len(mail.outbox) == 1
handler = EmailActionHandler(self.action, active_incident().get(), self.project)
message = handler.build_message(
generate_incident_trigger_email_context(
handler.project,
handler.incident,
handler.action.alert_rule_trigger,
TriggerStatus.ACTIVE,
),
TriggerStatus.ACTIVE,
self.user.id,
)
out = mail.outbox[0]
assert out.to == [self.user.email]
assert out.subject == message.subject
built_message = message.build(self.user.email)
assert out.body == built_message.body
| {
"content_hash": "3220ee5bb655289151da43b3a0481525",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 99,
"avg_line_length": 35.43333333333333,
"alnum_prop": 0.6284101599247413,
"repo_name": "beeftornado/sentry",
"id": "f24a837e6ec32e428ae806d2473f00604c0b1c68",
"size": "5315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/snuba/incidents/test_tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
__author__ = 'dorota'
# -*- coding: utf-8 -*-
from model.group import Group
import random
def test_delete_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name = "test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.delete_group_by_id(group.id)
assert len(old_groups) - 1 == app.group.count()
new_groups = db.get_group_list()
old_groups.remove(group)
assert old_groups == new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max) | {
"content_hash": "217a796d00fdca9dd45d652a5af2a2ac",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 113,
"avg_line_length": 34.77777777777778,
"alnum_prop": 0.645367412140575,
"repo_name": "dorotan/pythontraining",
"id": "5200e45f965883eb152b57a9d1a1ab35d8ba1fa2",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_del_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "1747"
},
{
"name": "Python",
"bytes": "44238"
},
{
"name": "RobotFramework",
"bytes": "1954"
}
],
"symlink_target": ""
} |
"""
The majority of this code is based/inspired or directly taken from
SublimeLinter plugin. Because that, SublimeLinter license file has
been added to this package.
This doesn't meant that anaconda is a fork of SublimeLinter in any
way or that anaconda is going to be updated with latest SublimeLinter
updates. Anaconda and SublimeLinter are two completely separated
projects but they did a great job with SublimeLinter so we are reusing
part of their plugin to lint our Python scripts.
The main difference between SublimeLinter linting and anaconda one is
that the former lints always for Python3.3 even if we are coding in
a Python 2 project. Anaconda lints for the configured Python environment
"""
import os
import re
import sys
sys.path.insert(0, os.path.dirname(__file__))
import _ast
import pep8
import pyflakes.checker as pyflakes
if sys.version_info < (2, 7):
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function
"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
def __hash__(self):
raise TypeError('hash not implemented')
return K
else:
from functools import cmp_to_key
pyflakes.messages.Message.__str__ = (
lambda self: self.message % self.message_args
)
class LintError(object):
"""Lint error base class
"""
def __init__(self, filename, loc, level, message, message_args, **kwargs):
self.lineno = loc
self.level = level
self.message = message
self.message_args = message_args
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
"""String represetation of the error
"""
return self.message % self.message_args
class Pep8Error(LintError):
"""
Lint error clss for PEP-8 errors
PEP-8 errors are treated as Warnings
"""
def __init__(self, filename, loc, offset, code, text):
super(Pep8Error, self).__init__(
filename, loc, 'W', '[W] PEP 8 (%s): %s', (code, text),
offset=offset, text=text
)
class Pep8Warning(LintError):
"""
Lint error clss for PEP-8 warnings
PEP-8 warnings are treated as violations
"""
def __init__(self, filename, loc, offset, code, text):
super(Pep8Warning, self).__init__(
filename, loc, 'V', '[V] PEP 8 (%s): %s', (code, text),
offset=offset, text=text
)
class PythonError(LintError):
"""Python errors class
"""
def __init__(self, filename, loc, text):
super(PythonError, self).__init__(
filename, loc, 'E', '[E] %r', (text,), text=text
)
class OffsetError(LintError):
def __init__(self, filename, loc, text, offset):
super(OffsetError, self).__init__(
filename, loc, 'E', '[E] %s', (text,),
offset=offset + 1, text=text
)
class Linter(object):
"""Linter class for Anaconda's Python linter
"""
def __init__(self):
self.enabled = False
def pyflakes_check(self, code, filename, ignore=None):
"""Check the code with pyflakes to find errors
"""
class FakeLoc:
lineno = 0
try:
code = code.encode('utf8') + b'\n'
tree = compile(code, filename or '', 'exec', _ast.PyCF_ONLY_AST)
except (SyntaxError, IndentationError):
return self._handle_syntactic_error(code, filename)
except ValueError as error:
return [PythonError(filename, FakeLoc(), error.args[0])]
else:
# the file is syntactically valid, check it now
w = pyflakes.Checker(tree, filename, ignore)
return w.messages
def pep8_check(self, code, filename, rcfile, ignore, max_line_length):
"""Check the code with pep8 to find PEP 8 errors
"""
messages = []
_lines = code.split('\n')
if _lines:
class FakeCol:
"""Fake class to represent a col object for PyFlakes
"""
def __init__(self, line_number):
self.lineno = line_number
class SublimeLinterReport(pep8.BaseReport):
"""Helper class to report PEP 8 problems
"""
def error(self, line_number, offset, text, check):
"""Report an error, according to options
"""
col = FakeCol(line_number)
code = text[:4]
message = text[5:]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = message
if code in self.expected:
return
self.file_errors += 1
self.total_errors += 1
pep8_error = code.startswith('E')
klass = Pep8Error if pep8_error else Pep8Warning
messages.append(klass(
filename, col, offset, code, message
))
return code
params = {'reporter': SublimeLinterReport}
if not rcfile:
_ignore = ignore + pep8.DEFAULT_IGNORE.split(',')
params['ignore'] = _ignore
else:
params['config_file'] = os.path.expanduser(rcfile)
options = pep8.StyleGuide(**params).options
if not rcfile:
options.max_line_length = max_line_length
good_lines = [l + '\n' for l in _lines]
good_lines[-1] = good_lines[-1].rstrip('\n')
if not good_lines[-1]:
good_lines = good_lines[:-1]
pep8.Checker(filename, good_lines, options=options).check_all()
return messages
def run_linter(self, settings, code, filename):
"""Check the code to find errors
"""
errors = []
if settings.get("pep8", True):
check_params = {
'ignore': settings.get('pep8_ignore', []),
'max_line_length': settings.get(
'pep8_max_line_length', pep8.MAX_LINE_LENGTH
)
}
errors.extend(self.pep8_check(
code, filename, settings.get('pep8_rcfile'), **check_params
))
pyflakes_ignore = settings.get('pyflakes_ignore', None)
pyflakes_disabled = settings.get('pyflakes_disabled', False)
explicit_ignore = settings.get('pyflakes_explicit_ignore', [])
if not pyflakes_disabled and not settings.get('use_pylint'):
errors.extend(self.pyflakes_check(code, filename, pyflakes_ignore))
return self.parse_errors(errors, explicit_ignore)
def sort_errors(self, errors):
"""Sort errors by line number
"""
errors.sort(key=cmp_to_key(lambda a, b: a.lineno < b.lineno))
def prepare_error_level(self, error):
"""Prepare a common error level in case that the error does't define
"""
return 'W' if not hasattr(error, 'level') else error.level
def parse_errors(self, errors, explicit_ignore):
"""Parse errors returned from the PyFlakes and pep8 libraries
"""
errors_list = []
if errors is None:
return errors_list
errors.sort(key=cmp_to_key(lambda a, b: a.lineno < b.lineno))
for error in errors:
error_level = 'W' if not hasattr(error, 'level') else error.level
message = error.message.capitalize()
offset = None
if hasattr(error, 'offset'):
offset = error.offset
elif hasattr(error, 'col'):
offset = error.col
error_data = {
'pep8': False,
'level': error_level,
'lineno': error.lineno,
'offset': offset,
'message': message,
'raw_error': str(error)
}
if isinstance(error, (Pep8Error, Pep8Warning, OffsetError)):
error_data['pep8'] = True
errors_list.append(error_data)
elif (isinstance(
error, (
pyflakes.messages.RedefinedWhileUnused,
pyflakes.messages.UndefinedName,
pyflakes.messages.UndefinedExport,
pyflakes.messages.UndefinedLocal,
pyflakes.messages.Redefined,
pyflakes.messages.UnusedVariable)) and
error.__class__.__name__ not in explicit_ignore):
regex = (
r'((and|or|not|if|elif|while|in)\s+|[+\-*^%%<>=\(\{{])*\s'
'*(?P<underline>[\w\.]*{0}[\w]*)'.format(re.escape(
error.message_args[0]
))
)
error_data['len'] = len(error.message_args[0])
error_data['regex'] = regex
errors_list.append(error_data)
elif isinstance(error, pyflakes.messages.ImportShadowedByLoopVar):
regex = 'for\s+(?P<underline>[\w]*{0}[\w*])'.format(
re.escape(error.message_args[0])
)
error_data['regex'] = regex
errors_list.append(error_data)
elif (isinstance(
error, (
pyflakes.messages.UnusedImport,
pyflakes.messages.ImportStarUsed)) and
error.__class__.__name__ not in explicit_ignore):
if isinstance(error, pyflakes.messages.ImportStarUsed):
word = '*'
else:
word = error.message_args[0]
linematch = '(from\s+[\w_\.]+\s+)?import\s+(?P<match>[^#;]+)'
r = '(^|\s+|,\s*|as\s+)(?P<underline>[\w]*{0}[\w]*)'.format(
re.escape(word)
)
error_data['regex'] = r
error_data['linematch'] = linematch
errors_list.append(error_data)
elif (isinstance(error, pyflakes.messages.DuplicateArgument) and
error.__class__.__name__ not in explicit_ignore):
regex = 'def [\w_]+\(.*?(?P<underline>[\w]*{0}[\w]*)'.format(
re.escape(error.message_args[0])
)
error_data['regex'] = regex
errors_list.append(error_data)
elif isinstance(error, pyflakes.messages.LateFutureImport):
pass
elif isinstance(error, PythonError):
print(error)
else:
print('Oops, we missed an error type!', type(error))
return errors_list
def _handle_syntactic_error(self, code, filename):
"""Handle PythonError and OffsetError
"""
value = sys.exc_info()[1]
msg = value.args[0]
lineno, offset, text = value.lineno, value.offset, value.text
if text is None: # encoding problems
if msg.startswith('duplicate argument'):
arg = msg.split(
'duplicate argument ', 1)[1].split(' ', 1)[0].strip('\'"')
error = pyflakes.messages.DuplicateArgument(
filename, lineno, arg
)
else:
error = PythonError(filename, value, msg)
else:
line = text.splitlines()[-1]
if offset is not None:
offset = offset - (len(text) - len(line))
if offset is not None:
error = OffsetError(filename, value, msg, offset)
else:
error = PythonError(filename, value, msg)
error.lineno = lineno
return [error]
| {
"content_hash": "261fdae18565f1ba684a3650533c70de",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 79,
"avg_line_length": 32.654639175257735,
"alnum_prop": 0.5184688239936859,
"repo_name": "prisis/sublime-text-packages",
"id": "f011d6e3aae43bb853ffcdf36660b92a33c67759",
"size": "12822",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Packages/Anaconda/anaconda_lib/linting/linter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "318"
},
{
"name": "Batchfile",
"bytes": "786"
},
{
"name": "C++",
"bytes": "56562"
},
{
"name": "CSS",
"bytes": "18339"
},
{
"name": "HTML",
"bytes": "1757"
},
{
"name": "JavaScript",
"bytes": "206342"
},
{
"name": "PHP",
"bytes": "2193174"
},
{
"name": "Pascal",
"bytes": "7460"
},
{
"name": "PowerShell",
"bytes": "397"
},
{
"name": "Python",
"bytes": "19331281"
},
{
"name": "Shell",
"bytes": "1903"
},
{
"name": "Smarty",
"bytes": "4883"
},
{
"name": "SourcePawn",
"bytes": "4479"
},
{
"name": "Tcl",
"bytes": "88877"
}
],
"symlink_target": ""
} |
import unittest
from datetime import time, timedelta
import pytest
from airflow import DAG, exceptions, settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout
from airflow.models import DagBag, TaskInstance
from airflow.operators.bash import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.sensors.external_task_sensor import ExternalTaskMarker, ExternalTaskSensor
from airflow.sensors.time_sensor import TimeSensor
from airflow.utils.state import State
from airflow.utils.timezone import datetime
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
TEST_TASK_ID = 'time_sensor_check'
DEV_NULL = '/dev/null'
class TestExternalTaskSensor(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL,
include_examples=True
)
self.args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def test_time_sensor(self):
op = TimeSensor(
task_id=TEST_TASK_ID,
target_time=time(0),
dag=self.dag
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
dag=self.dag
)
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_dag_sensor(self):
other_dag = DAG(
'other_dag',
default_args=self.args,
end_date=DEFAULT_DATE,
schedule_interval='@once')
other_dag.create_dagrun(
run_id='test',
start_date=DEFAULT_DATE,
execution_date=DEFAULT_DATE,
state=State.SUCCESS)
op = ExternalTaskSensor(
task_id='test_external_dag_sensor_check',
external_dag_id='other_dag',
external_task_id=None,
dag=self.dag
)
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_templated_sensor(self):
with self.dag:
sensor = ExternalTaskSensor(
task_id='templated_task',
external_dag_id='dag_{{ ds }}',
external_task_id='task_{{ ds }}'
)
instance = TaskInstance(sensor, DEFAULT_DATE)
instance.render_templates()
self.assertEqual(sensor.external_dag_id,
"dag_{}".format(DEFAULT_DATE.date()))
self.assertEqual(sensor.external_task_id,
"task_{}".format(DEFAULT_DATE.date()))
def test_external_task_sensor_fn_multiple_execution_dates(self):
bash_command_code = """
{% set s=execution_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
dag_external_id = TEST_DAG_ID + '_external'
dag_external = DAG(
dag_external_id,
default_args=self.args,
schedule_interval=timedelta(seconds=1))
task_external_with_failure = BashOperator(
task_id="task_external_with_failure",
bash_command=bash_command_code,
retries=0,
dag=dag_external)
task_external_without_failure = DummyOperator(
task_id="task_external_without_failure",
retries=0,
dag=dag_external)
task_external_without_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + timedelta(seconds=1),
ignore_ti_state=True)
session = settings.Session()
TI = TaskInstance
try:
task_external_with_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + timedelta(seconds=1),
ignore_ti_state=True)
# The test_with_failure task is excepted to fail
# once per minute (the run on the first second of
# each minute).
except Exception as e: # pylint: disable=broad-except
failed_tis = session.query(TI).filter(
TI.dag_id == dag_external_id,
TI.state == State.FAILED,
TI.execution_date == DEFAULT_DATE + timedelta(seconds=1)).all()
if len(failed_tis) == 1 and \
failed_tis[0].task_id == 'task_external_with_failure':
pass
else:
raise e
dag_id = TEST_DAG_ID
dag = DAG(
dag_id,
default_args=self.args,
schedule_interval=timedelta(minutes=1))
task_without_failure = ExternalTaskSensor(
task_id='task_without_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i)
for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag)
task_with_failure = ExternalTaskSensor(
task_id='task_with_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i)
for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag)
task_without_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True)
with self.assertRaises(AirflowSensorTimeout):
task_with_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_external_task_sensor_delta(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
allowed_states=['success'],
dag=self.dag
)
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_task_sensor_fn(self):
self.test_time_sensor()
# check that the execution_fn works
op1 = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(0),
allowed_states=['success'],
dag=self.dag
)
op1.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
# double check that the execution is being called by failing the test
op2 = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(days=1),
allowed_states=['success'],
timeout=1,
poke_interval=1,
dag=self.dag
)
with self.assertRaises(exceptions.AirflowSensorTimeout):
op2.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_task_sensor_error_delta_and_fn(self):
self.test_time_sensor()
# Test that providing execution_delta and a function raises an error
with self.assertRaises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
execution_date_fn=lambda dt: dt,
allowed_states=['success'],
dag=self.dag
)
def test_catch_invalid_allowed_states(self):
with self.assertRaises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=['invalid_state'],
dag=self.dag
)
with self.assertRaises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=None,
allowed_states=['invalid_state'],
dag=self.dag
)
def test_external_task_sensor_waits_for_task_check_existence(self):
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id="example_bash_operator",
external_task_id="non-existing-task",
check_existence=True,
dag=self.dag
)
with self.assertRaises(AirflowException):
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_task_sensor_waits_for_dag_check_existence(self):
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id="non-existing-dag",
external_task_id=None,
check_existence=True,
dag=self.dag
)
with self.assertRaises(AirflowException):
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
@pytest.fixture
def dag_bag_ext():
"""
Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies
set up using ExternalTaskMarker and ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
|
|
dag_1: ---> task_a_1 >> task_b_1
|
|
dag_2: ---> task_a_2 >> task_b_2
|
|
dag_3: ---> task_a_3 >> task_b_3
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_0 = DummyOperator(task_id="task_a_0", dag=dag_0)
task_b_0 = ExternalTaskMarker(task_id="task_b_0",
external_dag_id="dag_1",
external_task_id="task_a_1",
recursion_depth=3,
dag=dag_0)
task_a_0 >> task_b_0
dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_1 = ExternalTaskSensor(task_id="task_a_1",
external_dag_id=dag_0.dag_id,
external_task_id=task_b_0.task_id,
dag=dag_1)
task_b_1 = ExternalTaskMarker(task_id="task_b_1",
external_dag_id="dag_2",
external_task_id="task_a_2",
recursion_depth=2,
dag=dag_1)
task_a_1 >> task_b_1
dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_2 = ExternalTaskSensor(task_id="task_a_2",
external_dag_id=dag_1.dag_id,
external_task_id=task_b_1.task_id,
dag=dag_2)
task_b_2 = ExternalTaskMarker(task_id="task_b_2",
external_dag_id="dag_3",
external_task_id="task_a_3",
recursion_depth=1,
dag=dag_2)
task_a_2 >> task_b_2
dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_3 = ExternalTaskSensor(task_id="task_a_3",
external_dag_id=dag_2.dag_id,
external_task_id=task_b_2.task_id,
dag=dag_3)
task_b_3 = DummyOperator(task_id="task_b_3", dag=dag_3)
task_a_3 >> task_b_3
for dag in [dag_0, dag_1, dag_2, dag_3]:
dag_bag.bag_dag(dag, None, dag)
return dag_bag
def run_tasks(dag_bag):
"""
Run all tasks in the DAGs in the given dag_bag. Return the TaskInstance objects as a dict
keyed by task_id.
"""
tis = {}
for dag in dag_bag.dags.values():
for task in dag.tasks:
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
tis[task.task_id] = ti
ti.run()
assert_ti_state_equal(ti, State.SUCCESS)
return tis
def assert_ti_state_equal(task_instance, state):
"""
Assert state of task_instances equals the given state.
"""
task_instance.refresh_from_db()
assert task_instance.state == state
def clear_tasks(dag_bag, dag, task):
"""
Clear the task and its downstream tasks recursively for the dag in the given dagbag.
"""
subdag = dag.sub_dag(task_regex="^{}$".format(task.task_id), include_downstream=True)
subdag.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, dag_bag=dag_bag)
# pylint: disable=redefined-outer-name
def test_external_task_marker_transitive(dag_bag_ext):
"""
Test clearing tasks across DAGs.
"""
tis = run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
clear_tasks(dag_bag_ext, dag_0, task_a_0)
ti_a_0 = tis["task_a_0"]
ti_b_3 = tis["task_b_3"]
assert_ti_state_equal(ti_a_0, State.NONE)
assert_ti_state_equal(ti_b_3, State.NONE)
def test_external_task_marker_exception(dag_bag_ext):
"""
Clearing across multiple DAGs should raise AirflowException if more levels are being cleared
than allowed by the recursion_depth of the first ExternalTaskMarker being cleared.
"""
run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
task_b_0 = dag_0.get_task("task_b_0")
task_b_0.recursion_depth = 2
with pytest.raises(AirflowException, match="Maximum recursion depth 2"):
clear_tasks(dag_bag_ext, dag_0, task_a_0)
@pytest.fixture
def dag_bag_cyclic():
"""
Create a DagBag with DAGs having cyclic dependenceis set up by ExternalTaskMarker and
ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
^ |
| |
dag_1: | ---> task_a_1 >> task_b_1
| |
---------------------------------
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_0 = DummyOperator(task_id="task_a_0", dag=dag_0)
task_b_0 = ExternalTaskMarker(task_id="task_b_0",
external_dag_id="dag_1",
external_task_id="task_a_1",
recursion_depth=3,
dag=dag_0)
task_a_0 >> task_b_0
dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_1 = ExternalTaskSensor(task_id="task_a_1",
external_dag_id=dag_0.dag_id,
external_task_id=task_b_0.task_id,
dag=dag_1)
task_b_1 = ExternalTaskMarker(task_id="task_b_1",
external_dag_id="dag_0",
external_task_id="task_a_0",
recursion_depth=2,
dag=dag_1)
task_a_1 >> task_b_1
for dag in [dag_0, dag_1]:
dag_bag.bag_dag(dag, None, dag)
return dag_bag
def test_external_task_marker_cyclic(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies. AirflowException should be
raised.
"""
run_tasks(dag_bag_cyclic)
dag_0 = dag_bag_cyclic.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
with pytest.raises(AirflowException, match="Maximum recursion depth 3"):
clear_tasks(dag_bag_cyclic, dag_0, task_a_0)
| {
"content_hash": "06fb62803f8fc70d42ea9dc609c36c16",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 97,
"avg_line_length": 35.86128364389234,
"alnum_prop": 0.5275099590092951,
"repo_name": "wileeam/airflow",
"id": "a575ee91a06aacb2cbb43fd6f1ea4757bec87d11",
"size": "18108",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sensors/test_external_task_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148281"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9763694"
},
{
"name": "Shell",
"bytes": "221331"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
import logging
from cinder.scheduler import filters
LOG = logging.getLogger(__name__)
class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
"""Filter out previously attempted hosts
A host passes this filter if it has not already been attempted for
scheduling. The scheduler needs to add previously attempted hosts
to the 'retry' key of filter_properties in order for this to work
correctly. For example::
{
'retry': {
'hosts': ['host1', 'host2'],
'num_attempts': 3,
}
}
"""
def host_passes(self, host_state, filter_properties):
"""Skip nodes that have already been attempted."""
attempted = filter_properties.get('retry')
if not attempted:
# Re-scheduling is disabled
LOG.debug("Re-scheduling is disabled.")
return True
hosts = attempted.get('hosts', [])
host = host_state.host
passes = host not in hosts
pass_msg = "passes" if passes else "fails"
LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: "
"%(hosts)s" % {'host': host,
'pass_msg': pass_msg,
'hosts': hosts})
return passes
| {
"content_hash": "468e76c552d5b77f6e53bf0111f2e316",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 73,
"avg_line_length": 30.785714285714285,
"alnum_prop": 0.5699922660479505,
"repo_name": "apporc/cinder",
"id": "db1071d820acfba4f451311fbf62fd277910fc61",
"size": "1934",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/scheduler/filters/ignore_attempted_hosts_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13595277"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
} |
from ...maths import (
parametricEllipse, parametricSuperEllipse,
parametricHippopede, parametricCassini)
from .arc import ArclikePathElement
class _EllipticPathElement(ArclikePathElement):
"""
Base PathElement for elliptic shapes.
"""
def initialize(self, **config):
super().initialize(**config)
self.hradius = config["hradius"]
self.vradius = config["vradius"]
self.pivot = self._PARAMETRIC_FUNCTION(
self.hradius, self.vradius, self.initial_angle)
def updateDisplacement(self):
"""
Update this PathElement's displacement.
"""
self.displacement = self._PARAMETRIC_FUNCTION(
self.hradius, self.vradius, self.current_angle) - self.pivot
self.current_angle += self.speed
self._transition()
self._checkDone()
class EllipsePathElement(_EllipticPathElement):
"""
A PathElement that represents motion in an ellipse.
"""
_PARAMETRIC_FUNCTION = parametricEllipse
class SuperEllipsePathElement(_EllipticPathElement):
"""
A PathElement that represents motion in a super-ellipse.
"""
def initialize(self, **config):
super().initialize(**config)
self.exponent = config["exponent"]
self.pivot = parametricSuperEllipse(
self.hradius, self.vradius, self.exponent, self.current_angle)
def updateDisplacement(self):
"""
Update this PathElement's displacement.
"""
self.displacement = parametricSuperEllipse(
self.hradius, self.vradius, self.exponent, self.current_angle) - self.pivot
self.current_angle += self.speed
self._transition()
self._checkDone()
class HippopedePathElement(_EllipticPathElement):
"""
A PathElement that represents motion in a hippopede.
"""
_PARAMETRIC_FUNCTION = parametricHippopede
class CassiniOvalPathElement(_EllipticPathElement):
"""
A PathElement that represents motion in a Cassini oval.
"""
_PARAMETRIC_FUNCTION = parametricCassini | {
"content_hash": "9e906865d5df15bc6f28c0f6332dab67",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 22.864197530864196,
"alnum_prop": 0.7435205183585313,
"repo_name": "FCDM/py-dml",
"id": "4a82b45caa087450766a5d8b01afad6a9edb9157",
"size": "1852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dml/components/paths/ellipse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77357"
}
],
"symlink_target": ""
} |
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from System import System
class MipsSystem(System):
type = 'MipsSystem'
console = Param.String("file that contains the console code")
bare_iron = Param.Bool(False, "Using Bare Iron Mode?")
hex_file_name = Param.String("test.hex","hex file that contains [address,data] pairs")
system_type = Param.UInt64("Type of system we are emulating")
system_rev = Param.UInt64("Revision of system we are emulating")
if buildEnv['FULL_SYSTEM']:
class LinuxMipsSystem(MipsSystem):
type = 'LinuxMipsSystem'
system_type = 34
system_rev = 1 << 10
class BareIronMipsSystem(MipsSystem):
type = 'BareIronMipsSystem'
bare_iron = True
system_type = 34
system_rev = 1 << 10
hex_file_name = Param.String('test.hex',"hex file that contains [address,data] pairs")
| {
"content_hash": "4718f32b29fb288452fe866bfe6abbb5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 94,
"avg_line_length": 33.96296296296296,
"alnum_prop": 0.6706652126499455,
"repo_name": "liangwang/m5",
"id": "d271bd3872eab50cedcd155f8709da7693642e52",
"size": "2501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/arch/mips/MipsSystem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "490228"
},
{
"name": "C++",
"bytes": "8617145"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Python",
"bytes": "2567844"
},
{
"name": "Shell",
"bytes": "6722"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
} |
import os
from flask import Flask
from flask_sslify import SSLify
from scytale.views import bp
from scytale import jinja, login, models
def boolify(s):
if s.lower() == 'true':
return True
if s.lower() == 'false':
return False
raise ValueError("%s is not one of 'True' or 'False'" % s)
def auto_convert(s):
for fn in (boolify, int, float):
try:
return fn(s)
except ValueError:
pass
return s
def create_app():
app = Flask(__name__)
SSLify(app)
app.config.from_object('scytale.config')
app.config.update({
k: auto_convert(os.environ[k])
for k in app.config
if k in os.environ})
jinja.init_app(app)
login.init_app(app)
models.init_app(app)
app.register_blueprint(bp)
return app
| {
"content_hash": "7cf1fc4858cf09d5ffa39d60c61644b7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 62,
"avg_line_length": 19.547619047619047,
"alnum_prop": 0.5980511571254568,
"repo_name": "WilliamMayor/scytale.xyz",
"id": "ce05937649cc66b72d520069940225f099e8d620",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scytale/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17988"
},
{
"name": "Dockerfile",
"bytes": "103"
},
{
"name": "HTML",
"bytes": "50260"
},
{
"name": "JavaScript",
"bytes": "8771"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "82862"
},
{
"name": "Shell",
"bytes": "2802"
}
],
"symlink_target": ""
} |
"""
This package includes tools to predict and plot neighborhoods.
"""
import nbdpred
| {
"content_hash": "902078aca5527bf946dafe6bfbd4a563",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 62,
"avg_line_length": 14.666666666666666,
"alnum_prop": 0.75,
"repo_name": "gautsi/gentrySeattle",
"id": "6cfd8dbf693fa9893b40d3705d62340001ab24e9",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nbdtools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34552"
}
],
"symlink_target": ""
} |
import logging
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import os.path
import uuid
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/websocket", WebSocketHandler),
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
debug=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
class WebSocketHandler(tornado.websocket.WebSocketHandler):
waiters = set()
cache = []
obj = None
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
def open(self):
WebSocketHandler.waiters.add(self)
print "=======", WebSocketHandler.obj
if WebSocketHandler.obj:
self.write_message(self.obj)
logging.info(self.request.remote_ip)
def on_close(self):
WebSocketHandler.waiters.remove(self)
@classmethod
def update_cache(cls, obj, self):
cls.cache.append(obj)
@classmethod
def send_updates(cls, obj, self):
logging.info("sending message to %d waiters", len(cls.waiters))
for waiter in cls.waiters:
if waiter is not self:
try:
waiter.write_message(obj)
except:
logging.error("Error sending message", exc_info=True)
def on_message(self, message):
logging.info("got message %s %r" % (self.request.full_url(), message))
obj = tornado.escape.json_decode(message)
if "DOUYU_Code" in obj:
WebSocketHandler.obj = obj
WebSocketHandler.update_cache(obj, self)
WebSocketHandler.send_updates(obj, self)
def main():
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| {
"content_hash": "b608946ae7be3ba10a61cfadf87b9ff7",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 27.829545454545453,
"alnum_prop": 0.6153532053899551,
"repo_name": "dragondjf/DouyuApp",
"id": "570dc1ba50ddfe9d8680cd005a462f1e8c93321f",
"size": "3047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "154069"
},
{
"name": "HTML",
"bytes": "2531"
},
{
"name": "JavaScript",
"bytes": "458569"
},
{
"name": "Python",
"bytes": "8992"
}
],
"symlink_target": ""
} |
import math, sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.OpenMayaAnim as OpenMayaAnim
kPluginNodeName = "brownie"
kPluginNodeId = OpenMaya.MTypeId(0x531AB36)
kPluginAuthor = "Bazillou2011"
kPluginVersion = "0.37"
nurbIntersect = OpenMaya.MNurbsIntersector()
surfMFn = OpenMaya.MFnNurbsSurface()
curveFn = OpenMaya.MFnNurbsCurve()
pointList = OpenMaya.MPointArray()
tempPnt = OpenMaya.MPoint()
outMeshFn = OpenMaya.MFnMesh()
meshDataFn = OpenMaya.MFnMeshData()
class brownie(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
def check_curve_surface_plugs(self,argList):
actionData = argList[0]
inputNurbsConnected = False
if actionData.isNull() == False :
inputNurbsConnected = True
return inputNurbsConnected
def getPatchInfos(self,surfMFn):
uCv = surfMFn.numCVsInU()
vCv = surfMFn.numCVsInV()
formInU_val = surfMFn.formInU()
formInV_val = surfMFn.formInV()
degreeU_val = surfMFn.degreeU()
degreeV_val = surfMFn.degreeV()
path_knotsU = OpenMaya.MDoubleArray()
path_knotsV = OpenMaya.MDoubleArray()
surfMFn.getKnotsInU( path_knotsU )
surfMFn.getKnotsInV( path_knotsV )
startUParam = OpenMaya.MScriptUtil()
startUParam.createFromDouble(0.0)
startUPtr = startUParam.asDoublePtr()
endUParam = OpenMaya.MScriptUtil()
endUParam.createFromDouble(0.0)
endUPtr = endUParam.asDoublePtr()
startVParam = OpenMaya.MScriptUtil()
startVParam.createFromDouble(0.0)
startVPtr = startVParam.asDoublePtr()
endVParam = OpenMaya.MScriptUtil()
endVParam.createFromDouble(0.0)
endVPtr = endVParam.asDoublePtr()
surfMFn.getKnotDomain( startUPtr, endUPtr, startVPtr, endVPtr )
startU = OpenMaya.MScriptUtil(startUPtr).asDouble()
endU = OpenMaya.MScriptUtil(endUPtr).asDouble()
startV = OpenMaya.MScriptUtil(startVPtr).asDouble()
endV = OpenMaya.MScriptUtil(endVPtr).asDouble()
return [ uCv ,vCv ,degreeU_val ,degreeV_val,formInU_val ,formInV_val,path_knotsU ,path_knotsV,startU,endU , startV, endV ]
def compute(self,Plug,Data):
if Plug == self.outLattice :
InputData = Data.inputValue(self.input ).asNurbsSurface()
InputShapeConnected = self.check_curve_surface_plugs([InputData])
if InputShapeConnected == False:
return
else:
self.compute_outLattice( Data,InputData)
if Plug == self.outCage :
self.compute_outCage( Data )
if Plug == self.outMesh :
self.compute_outMesh( Data)
if Plug == self.outTube :
self.compute_outTube( Data)
def compute_outTube(self,Data):
splineMatrixData = self.composeSplineMatrix( Data)
radius_Val = Data.inputValue( self.radius).asDouble()
if splineMatrixData == None:
return
else:
radius_Val = Data.inputValue( self.radius).asDouble()
knotCount = 8
offsetPoint = OpenMaya.MPoint(0,radius_Val,0,1)
offsetArray = OpenMaya.MPointArray(knotCount+1)
u_len = splineMatrixData.length()
indexTable = [[5,4,3],
[6,8,2],
[7,0,1]]
shiftVal = OpenMaya.MEulerRotation(0,0,0, 0)
shftRatio = 360.0/(knotCount*1.0)
locSpace = OpenMaya.MSpace.kObject
for j in range(knotCount):
shiftVal.x = math.radians(j*shftRatio)
offsetArray.set(offsetPoint*shiftVal.asMatrix(),j)
newMeshObj = meshDataFn.create()
resultPoint = OpenMaya.MPoint()
VtxPos = OpenMaya.MPointArray(knotCount*u_len )
polygonCounts = OpenMaya.MIntArray(knotCount*(u_len-1) ,4)
polygonConnects = OpenMaya.MIntArray()
indexTableB = [1,2,3,4,5,6,7,0 ]
for i in range(u_len-1):
for j in range(knotCount):
vertID_A = j+(i*knotCount)
vertID_B = j+((i+1)*knotCount)
vertID_C = indexTableB[j]+((i+1)*knotCount)
vertID_D = indexTableB[j] +(i*knotCount)
polygonConnects.append(vertID_A)
polygonConnects.append(vertID_D)
polygonConnects.append(vertID_C)
polygonConnects.append(vertID_B)
knotOffset_obj = Data.inputValue( self.knotOffset).data()
knotChk = False
knotOffsetList = None
if knotOffset_obj.isNull() == False:
knotOffsetFn = OpenMaya.MFnVectorArrayData(knotOffset_obj)
knotOffsetList = knotOffsetFn.array()
if knotOffsetList.length() > 0 :
knotChk = True
if knotChk == False:
for i in range(u_len):
for j in range(knotCount):
VtxPos.set(offsetArray[j] * splineMatrixData[i] ,j+(i*knotCount))
else:
tmpPnt = OpenMaya.MPoint()
for i in range(u_len):
for j in range(knotCount):
VtxPos.set((tmpPnt +knotOffsetList[j+(i*knotCount)] ) * splineMatrixData[i] ,j+(i*knotCount))
outMeshFn.create (VtxPos.length(), polygonCounts.length(), VtxPos, polygonCounts, polygonConnects, newMeshObj)
outHandle = Data.outputValue( self.outTube)
outHandle.setMObject(newMeshObj)
outHandle.setClean()
def composeSplineMatrix(self,Data):
splineMatrix_obj = Data.inputValue( self.splineMatrix).data()
if splineMatrix_obj.isNull() == True:
return None
else:
splineMatrixFn = OpenMaya.MFnVectorArrayData(splineMatrix_obj)
vecList = splineMatrixFn.array()
vec_len = vecList.length()/4
if vec_len < 1 :
return None
else:
targLen = int(vecList[0].x)
sliceMatList = OpenMaya.MMatrixArray(targLen)
for i in range(targLen):
matrixValueList = [ vecList[1+i*4+0].x, vecList[1+i*4+0].y, vecList[1+i*4+0].z, 0.0,
vecList[1+i*4+1].x, vecList[1+i*4+1].y, vecList[1+i*4+1].z, 0.0,
vecList[1+i*4+2].x, vecList[1+i*4+2].y, vecList[1+i*4+2].z, 0.0,
vecList[1+i*4+3].x, vecList[1+i*4+3].y, vecList[1+i*4+3].z,1.0]
rotMatrix = OpenMaya.MMatrix()
utilB = OpenMaya.MScriptUtil()
utilB.createMatrixFromList( matrixValueList, rotMatrix )
sliceMatList.set(rotMatrix,i)
return sliceMatList
def compute_outMesh(self,Data):
splineMatrixData = self.composeSplineMatrix( Data)
radius_Val = Data.inputValue( self.radius).asDouble()
if splineMatrixData == None:
return
else:
radius_Val = Data.inputValue( self.radius).asDouble()
knotCount = 8
offsetPoint = OpenMaya.MPoint(0,radius_Val,0,1)
offsetArray = OpenMaya.MPointArray(knotCount+1)
u_len = splineMatrixData.length()
indexTable = [[5,4,3],
[6,8,2],
[7,0,1]]
shiftVal = OpenMaya.MEulerRotation(0,0,0, 0)
shftRatio = 360.0/(knotCount*1.0)
sidenum = int(math.sqrt(knotCount+1))
locSpace = OpenMaya.MSpace.kObject
for j in range(knotCount):
shiftVal.x = math.radians(j*shftRatio)
offsetArray.set(offsetPoint*shiftVal.asMatrix(),j)
newMeshObj = meshDataFn.create()
resultPoint = OpenMaya.MPoint()
VtxPos = OpenMaya.MPointArray(knotCount*u_len+2)
polygonCounts = OpenMaya.MIntArray(knotCount*(u_len-1)+8,4)
polygonConnects = OpenMaya.MIntArray()
indexTableB = [1,2,3,4,5,6,7,0 ]
for i in range(u_len-1):
for j in range(knotCount):
vertID_A = j+(i*knotCount)
vertID_B = j+((i+1)*knotCount)
vertID_C = indexTableB[j]+((i+1)*knotCount)
vertID_D = indexTableB[j] +(i*knotCount)
polygonConnects.append(vertID_A)
polygonConnects.append(vertID_D)
polygonConnects.append(vertID_C)
polygonConnects.append(vertID_B)
capA = knotCount*u_len
VtxPos.set(resultPoint*splineMatrixData[0] ,capA)
capB = knotCount*u_len+1
VtxPos.set(resultPoint*splineMatrixData[u_len-1] ,capB)
for i in range(u_len):
for j in range(knotCount):
VtxPos.set(offsetArray[j] * splineMatrixData[i] ,j+(i*knotCount))
capList = [0,capA,2,1,7,6,capA,0,6,5,4,capA,capA,4,3,2 ]
for j in range(len(capList)):
polygonConnects.append(capList[j])
lastRow = []
for j in range(knotCount):
lastRow.append(j+((u_len-1)*knotCount))
capListB = [lastRow[0],lastRow[1],lastRow[2],capB,
lastRow[7],lastRow[0],capB,lastRow[6],
lastRow[6],capB,lastRow[4],lastRow[5],
capB,lastRow[2],lastRow[3],lastRow[4] ]
for j in range(len(capListB)):
polygonConnects.append(capListB[j] )
outMeshFn.create (VtxPos.length(), polygonCounts.length(), VtxPos, polygonCounts, polygonConnects, newMeshObj)
outHandle = Data.outputValue( self.outMesh)
outHandle.setMObject(newMeshObj)
outHandle.setClean()
def compute_outCage(self,Data):
splineMatrixData = self.composeSplineMatrix( Data)
radius_Val = Data.inputValue( self.radius).asDouble()
if splineMatrixData == None:
return
else:
knotCount = 8
offsetPoint = OpenMaya.MPoint(0,radius_Val,0,1)
offsetArray = OpenMaya.MPointArray(knotCount+1)
indexTable = [[5,4,3],
[6,8,2],
[7,0,1]]
shiftVal = OpenMaya.MEulerRotation(0,0,0, 0)
shftRatio = 360.0/(knotCount*1.0)
sidenum = int(math.sqrt(knotCount+1))
for j in range(knotCount):
shiftVal.x = math.radians(j*shftRatio)
offsetArray.set(offsetPoint*shiftVal.asMatrix(),j)
outCage_Hndle = Data.outputValue(self.outCage )
latDat = OpenMaya.MFnLatticeData ()
latObj = latDat.create()
lafFn = OpenMayaAnim.MFnLattice()
divX = splineMatrixData.length()
divY = sidenum
divZ = sidenum
lafFn.create( divX,divY,divZ,latObj)
knotOffset_obj = Data.inputValue( self.knotOffset).data()
knotChk = False
knotOffsetList = None
if knotOffset_obj.isNull() == False:
knotOffsetFn = OpenMaya.MFnVectorArrayData(knotOffset_obj)
knotOffsetList = knotOffsetFn.array()
if knotOffsetList.length() > 0 :
knotChk = True
if knotChk == False:
for i in range(divX):
for j in range(divY):
for k in range(divZ):
outPoint = lafFn.point( i, j, k )
outPointB = offsetArray[ indexTable[j][k] ] * splineMatrixData[i]
outPoint.x = outPointB.x
outPoint.y = outPointB.y
outPoint.z = outPointB.z
else:
idx = 0
outPointB = OpenMaya.MPoint()
tmpPnt = OpenMaya.MPoint()
for i in range(divX):
for j in range(divY):
for k in range(divZ):
outPoint = lafFn.point( i, j, k )
idx = indexTable[j][k]
if idx != 8 :
outPointB = (tmpPnt +knotOffsetList[idx+(i*knotCount)]) * splineMatrixData[i]
else :
outPointB = offsetArray[ 8 ] * splineMatrixData[i]
outPoint.x = outPointB.x
outPoint.y = outPointB.y
outPoint.z = outPointB.z
outCage_Hndle.setMObject(latObj)
outCage_Hndle.setClean()
def compute_matrix_from_2_vectors_and_u_Point(self,trX_Mat,trY_Mat,u_Point):
trZ_Mat = (trX_Mat^trY_Mat).normal();
matrixValueList = [ trX_Mat.x, trX_Mat.y, trX_Mat.z, 0.0,
trY_Mat.x, trY_Mat.y, trY_Mat.z, 0.0,
trZ_Mat.x, trZ_Mat.y, trZ_Mat.z, 0.0,
u_Point.x,u_Point.y,u_Point.z,u_Point.w ]
rotMatrix = OpenMaya.MMatrix()
utilB = OpenMaya.MScriptUtil()
utilB.createMatrixFromList( matrixValueList, rotMatrix )
return rotMatrix.homogenize()
def compute_outLattice(self,Data,InputData):
widthA_Val = Data.inputValue( self.widthA).asDouble()
widthB_Val = Data.inputValue( self.widthB).asDouble()
uDensity_Val = Data.inputValue( self.uDensity).asShort()
vDensity_Val = Data.inputValue( self.vDensity).asShort()
surfMFn.setObject(InputData )
surfData = self.getPatchInfos(surfMFn)
surfMFn.getCVs (pointList,OpenMaya.MSpace.kObject)
uCv = surfData[0]
vCv = surfData[1]
path_knotsU = surfData[6]
path_knotsV = surfData[7]
startU = surfData[8]
endU = surfData[9]
startV = surfData[10]
endV = surfData[11]
uRatio = (endU-startU)/(uCv*uDensity_Val*1.0)
vRatio = (endV-startV)/(vCv*vDensity_Val*1.0)
uRange = []
vRange = []
for k in xrange(uCv*uDensity_Val+1):
uRange.append(k*uRatio)
for k in xrange(vCv*vDensity_Val+1):
vRange.append(k*vRatio)
widthData = [widthA_Val,widthB_Val]
mat = OpenMaya.MMatrix()
nurbIntersect.create(InputData, mat)
ptON = OpenMaya.MPointOnNurbs()
outLattice_Hndle = Data.outputValue(self.outLattice )
latDat = OpenMaya.MFnLatticeData ()
latObj = latDat.create()
lafFn = OpenMayaAnim.MFnLattice()
divX = len(uRange)
divY = len(vRange)
divZ = 2
lafFn.create( divX,divY,divZ,latObj)
resultPoint = OpenMaya.MPoint()
vecList = [widthData[0]*-0.01,widthData[1]*0.01]
for i in range(divX):
for j in range(divY):
surfMFn.getPointAtParam (uRange[i],vRange[j],resultPoint,OpenMaya.MSpace.kObject )
outV = surfMFn.normal(uRange[i],vRange[j],OpenMaya.MSpace.kObject).normal()
for k in range(divZ):
outPoint = lafFn.point( i, j, k )
outPointB = resultPoint+(outV*vecList[k])
outPoint.x = outPointB.x
outPoint.y = outPointB.y
outPoint.z = outPointB.z
outLattice_Hndle.setMObject(latObj)
outLattice_Hndle.setClean()
def nodeCreator():
return OpenMayaMPx.asMPxPtr(brownie())
def nodeInitializer():
typed_Attr = OpenMaya.MFnTypedAttribute()
mAttr = OpenMaya.MFnNumericAttribute()
nAttr = OpenMaya.MFnNumericAttribute()
brownie.input = typed_Attr.create( "input", "in", OpenMaya.MFnNurbsCurveData.kNurbsSurface )
typed_Attr.setStorable(0)
typed_Attr.setKeyable(0)
typed_Attr.setHidden(True)
brownie.addAttribute( brownie.input )
brownie.widthA = mAttr.create("widthA","wA", OpenMaya.MFnNumericData.kDouble,1.0)
mAttr.setStorable(1)
mAttr.setKeyable(1)
mAttr.setHidden(0)
brownie.addAttribute( brownie.widthA)
brownie.widthB = mAttr.create("widthB","wB", OpenMaya.MFnNumericData.kDouble,1.0)
mAttr.setStorable(1)
mAttr.setKeyable(1)
mAttr.setHidden(0)
brownie.addAttribute( brownie.widthB)
brownie.uDensity = mAttr.create("uDensity","uD", OpenMaya.MFnNumericData.kShort,1)
mAttr.setStorable(1)
mAttr.setKeyable(1)
mAttr.setHidden(0)
mAttr.setMin(1)
mAttr.setSoftMax(5)
brownie.addAttribute( brownie.uDensity)
brownie.vDensity = mAttr.create("vDensity","vD", OpenMaya.MFnNumericData.kShort,1)
mAttr.setStorable(1)
mAttr.setKeyable(1)
mAttr.setHidden(0)
mAttr.setMin(1)
mAttr.setSoftMax(5)
brownie.addAttribute( brownie.vDensity)
brownie.outLattice = typed_Attr.create("outLattice", "upLat", OpenMaya.MFnData.kLattice)
typed_Attr.setHidden(1)
brownie.addAttribute(brownie.outLattice)
brownie.attributeAffects( brownie.input , brownie.outLattice )
brownie.attributeAffects( brownie.widthA , brownie.outLattice )
brownie.attributeAffects( brownie.widthB , brownie.outLattice )
brownie.attributeAffects( brownie.uDensity , brownie.outLattice )
brownie.attributeAffects( brownie.vDensity , brownie.outLattice )
brownie.splineMatrix = typed_Attr.create( "splineMatrix", "sMat", OpenMaya.MFnData.kVectorArray) #1 matrix is decompose into 4 vector....
typed_Attr.setStorable(0)
typed_Attr.setKeyable(0)
typed_Attr.setHidden(1)
brownie.addAttribute(brownie.splineMatrix)
brownie.radius = mAttr.create("radius","rds", OpenMaya.MFnNumericData.kDouble,1.0)
mAttr.setStorable(1)
mAttr.setKeyable(1)
mAttr.setHidden(0)
brownie.addAttribute( brownie.radius)
brownie.outCage = typed_Attr.create("outCage", "oCg", OpenMaya.MFnData.kLattice)
typed_Attr.setHidden(1)
brownie.addAttribute(brownie.outCage)
brownie.attributeAffects( brownie.splineMatrix , brownie.outCage )
brownie.attributeAffects( brownie.radius , brownie.outCage )
brownie.outMesh = typed_Attr.create( "outMesh", "oMsh", OpenMaya.MFnMeshData.kMesh)
typed_Attr.setHidden(1)
brownie.addAttribute( brownie.outMesh )
brownie.attributeAffects( brownie.splineMatrix , brownie.outMesh )
brownie.attributeAffects( brownie.radius , brownie.outMesh )
brownie.knotOffset = typed_Attr.create( "knotOffset", "kOff", OpenMaya.MFnData.kVectorArray)
typed_Attr.setHidden(1)
brownie.addAttribute( brownie.knotOffset )
brownie.outTube = typed_Attr.create( "outTube", "oTbe", OpenMaya.MFnMeshData.kMesh)
typed_Attr.setHidden(1)
brownie.addAttribute( brownie.outTube )
brownie.attributeAffects( brownie.splineMatrix , brownie.outTube )
brownie.attributeAffects( brownie.radius , brownie.outTube )
brownie.attributeAffects( brownie.knotOffset , brownie.outTube )
return
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, kPluginAuthor, kPluginVersion, "Any")
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator, nodeInitializer, OpenMayaMPx.MPxNode.kDependNode)
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName ); raise
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName ); raise
| {
"content_hash": "72b5ca19afd24b38a962f81036682db0",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 141,
"avg_line_length": 40.46139705882353,
"alnum_prop": 0.5270092226613966,
"repo_name": "cedricB/circeCharacterWorksTools",
"id": "dec4937c8e0655fa23e0f87fd965d7da57b16453",
"size": "22011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maya/plug-ins/brownie.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "225927"
}
],
"symlink_target": ""
} |
"""
Provides various string-related commands.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
__author__ = supybot.authors.jemfinch
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| {
"content_hash": "c4bf3b8efd361d63e2c2015b9dd2a2e6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 25.774193548387096,
"alnum_prop": 0.7471839799749687,
"repo_name": "kblin/supybot-gsoc",
"id": "8dc45d5c061595f3fb6287b2d9527a004035fd3e",
"size": "2388",
"binary": false,
"copies": "15",
"ref": "refs/heads/stable",
"path": "plugins/String/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2238011"
}
],
"symlink_target": ""
} |
"""module to run the harvester"""
import subprocess
class Theharvester():
"""module class"""
def __init__(self):
#init lists
self.theharvester_result = []
self.harvester_sources = ['google', 'linkedin']
def run(self, args, lookup, report_directory):
"""main function"""
#based on domain or ip, enumerate with index and value
for i, l in enumerate(lookup):
#open file to write to
harvesterFile = open(report_directory+l+'/'+l+'_theharvester.txt', 'w')
for source in self.harvester_sources:
try:
print('[+] Running theHarvester -b {} -d {} '.format(source, l))
bash_command = subprocess.Popen(['theharvester', '-b', '{}'.format(source), '-d', '{}'.format(str(l)), '-l', '500', '-h'], stdout=subprocess.PIPE).communicate()[0].split('\r\n')
except:
print('[-] Error running theHarvester. Make sure it is in your PATH and you are connected to the Internet')
self.theharvester_result.append('Error running theHarvester')
continue
#append lists together
self.theharvester_result.append(bash_command)
#append resutls and write to lookup result file
for r in self.theharvester_result:
harvesterFile.writelines(r)
#verbosity
if args.verbose is True:
for h in self.theharvester_result:
print(''.join(h))
#return list object
return self.theharvester_result
| {
"content_hash": "e6153e95018d7dec7ff8e061d62656a2",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 197,
"avg_line_length": 45.8,
"alnum_prop": 0.5664379288833438,
"repo_name": "bharshbarger/AutOSINT",
"id": "3783c679989c3fbe766c2c1768ef540511e6da33",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/theharvester.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51372"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import markdown
from django.utils.encoding import smart_unicode, smart_str
from django import forms
from django.utils.safestring import mark_safe
from django.forms import widgets, Select
from django.forms.fields import DateField
from django.db.models import Q
from coreExtend.models import Account
from replica.pulse.models import Entry, Media, Draft, Topic, EntryType
from replica.pulse.widgets import DateTimeWidget, SplitSelectDateTimeWidget
dateTimeOptions = {
'format': 'dd/mm/yyyy HH:ii P',
'autoclose': 'true',
'showMeridian' : 'true'
}
class CheckboxSelectMultipleP(forms.CheckboxSelectMultiple):
def render(self, *args, **kwargs):
output = super(CheckboxSelectMultipleP, self).render(*args,**kwargs)
return mark_safe(output.replace(u'<ul>', u'').replace(u'</ul>', u'').replace(u'<li>', u'<div class="col-md-2">').replace(u'</li>', u'</div>'))
class EntryModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EntryModelForm, self).__init__(*args, **kwargs)
self.user = kwargs.get('instance').user
self.fields['image'].queryset = Media.objects.filter(user=self.user)
self.fields['topic'].queryset = Topic.objects.filter(
Q(user=self.user) |
Q(members=self.user)
)
class Meta:
model = Entry
exclude = ('slug', 'deck_html', 'body_html', 'user', 'guid', 'content_format', )
widgets = {
'title': forms.Textarea(attrs={'class':'form-control ReplicaForm nano_title autosize', 'placeholder':'Title', 'rows':'1', 'value':''}),
'deck': forms.Textarea(attrs={'class':'form-control ReplicaForm nano_deck autosize', 'placeholder':'Optional Summary', 'rows':'1'}),
'pub_date': SplitSelectDateTimeWidget(minute_step=15, second_step=30),
'url': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Link to another url?',}),
'body': forms.Textarea(attrs={'class':'form-control ReplicaForm markdown', 'placeholder':'Start typing', 'data-provide':'markdown'}),
'is_active': forms.RadioSelect,
'topic': forms.CheckboxSelectMultiple(),
'post_type': forms.Select(attrs={'class':'form-control',}),
'image': forms.Select(attrs={'class':'form-control',}),
}
class QuickEntryModelForm(forms.ModelForm):
title = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control ReplicaForm nano_title', 'placeholder':'Optional Title', 'value':''}))
body = forms.CharField(required=False, widget=forms.Textarea(attrs={'class': 'form-control ReplicaForm', 'placeholder':'Just start typing...', 'rows':'1',}))
class Meta:
model = Entry
exclude = ('slug', 'deck_html', 'body_html', 'user', 'guid', 'image','content_format', 'post_type', 'Topic', 'deck',)
widgets = {
'url': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Link to another url?',}),
'pub_date': forms.TextInput(attrs={'id': 'datetimepicker', 'class':'form-control', 'placeholder':'Publish Date', 'data-date-format': 'yyyy-mm-dd hh:ii' }),
'is_active': forms.RadioSelect,
}
class DraftModelForm(forms.ModelForm):
class Meta:
model = Draft
exclude = ('last_edit', 'slug', 'deck_html', 'body_html', 'user')
class TopicModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TopicModelForm, self).__init__(*args, **kwargs)
self.fields['members'].queryset = Account.objects.filter(is_staff=True)
class Meta:
model = Topic
exclude = ['pub_date', 'slug', 'user', 'guid', 'thumbnail']
#fields = ['title', 'description', 'is_public', 'image']
widgets = {
'title': forms.TextInput(attrs={'class':'form-control ReplicaForm nano_title', 'placeholder':'Topic name',}),
'description': forms.Textarea(attrs={'class':'form-control ReplicaForm', 'placeholder':'Describe your topic', 'rows':'1'}),
'is_public': forms.RadioSelect,
'members': CheckboxSelectMultipleP(),
'image': forms.ClearableFileInput
}
class EntryTypeModelForm(forms.ModelForm):
title = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control ReplicaForm', 'placeholder':'Entry Type Name', 'value':''}))
slug = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control ReplicaForm', 'placeholder':'Entry Type Name', 'value':''}))
class Meta:
model = EntryType
exclude = ('user', 'guid',)
class MediaModelForm(forms.ModelForm):
class Meta:
model = Media
exclude = ('pub_date', 'user', 'guid', 'thumbnail',)
widgets = {
'title': forms.TextInput(attrs={'class':'form-control ReplicaForm', 'placeholder':'Image Title',}),
'description': forms.Textarea(attrs={'class':'form-control ReplicaForm', 'placeholder':'Brief media description', 'rows':'1'}),
'url': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Link to an image?',}),
'image': forms.ClearableFileInput,
}
| {
"content_hash": "f8a94cc9d540c7c69feeca565a2599e6",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 164,
"avg_line_length": 42.75,
"alnum_prop": 0.693609022556391,
"repo_name": "underlost/Replica",
"id": "faa8aa5fdafc337a0b2a8355f1f023ccbd46330d",
"size": "4788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "replica/pulse/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "478632"
},
{
"name": "HTML",
"bytes": "200438"
},
{
"name": "JavaScript",
"bytes": "777387"
},
{
"name": "Python",
"bytes": "104357"
},
{
"name": "Ruby",
"bytes": "108"
}
],
"symlink_target": ""
} |
import functools
import json
import logging
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
from typing import (IO, Any, Callable, Iterable, List, MutableMapping, Text,
Tuple, Union, cast)
import shellescape
from . import docker
from .builder import Builder
from .docker_uid import docker_vm_uid
from .errors import WorkflowException
from .pathmapper import PathMapper
from .process import (UnsupportedRequirement, empty_subtree, get_feature,
stageFiles)
_logger = logging.getLogger("cwltool")
needs_shell_quoting_re = re.compile(r"""(^$|[\s|&;()<>\'"$@])""")
FORCE_SHELLED_POPEN = os.getenv("CWLTOOL_FORCE_SHELL_POPEN", "0") == "1"
SHELL_COMMAND_TEMPLATE = """#!/bin/bash
python "run_job.py" "job.json"
"""
PYTHON_RUN_SCRIPT = """
import json
import sys
import subprocess
with open(sys.argv[1], "r") as f:
popen_description = json.load(f)
commands = popen_description["commands"]
cwd = popen_description["cwd"]
env = popen_description["env"]
stdin_path = popen_description["stdin_path"]
stdout_path = popen_description["stdout_path"]
stderr_path = popen_description["stderr_path"]
if stdin_path is not None:
stdin = open(stdin_path, "rb")
else:
stdin = subprocess.PIPE
if stdout_path is not None:
stdout = open(stdout_path, "wb")
else:
stdout = sys.stderr
if stderr_path is not None:
stderr = open(stderr_path, "wb")
else:
stderr = sys.stderr
sp = subprocess.Popen(commands,
shell=False,
close_fds=True,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd)
if sp.stdin:
sp.stdin.close()
rcode = sp.wait()
if isinstance(stdin, file):
stdin.close()
if stdout is not sys.stderr:
stdout.close()
if stderr is not sys.stderr:
stderr.close()
sys.exit(rcode)
"""
def deref_links(outputs): # type: (Any) -> None
if isinstance(outputs, dict):
if outputs.get("class") == "File":
st = os.lstat(outputs["path"])
if stat.S_ISLNK(st.st_mode):
outputs["path"] = os.readlink(outputs["path"])
else:
for v in outputs.values():
deref_links(v)
if isinstance(outputs, list):
for v in outputs:
deref_links(v)
def relink_initialworkdir(pathmapper, inplace_update=False):
# type: (PathMapper, bool) -> None
for src, vol in pathmapper.items():
if not vol.staged:
continue
if vol.type in ("File", "Directory") or (inplace_update and
vol.type in ("WritableFile", "WritableDirectory")):
if os.path.islink(vol.target) or os.path.isfile(vol.target):
os.remove(vol.target)
elif os.path.isdir(vol.target):
os.rmdir(vol.target)
os.symlink(vol.resolved, vol.target)
class JobBase(object):
def __init__(self): # type: () -> None
self.builder = None # type: Builder
self.joborder = None # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
self.stdin = None # type: Text
self.stderr = None # type: Text
self.stdout = None # type: Text
self.successCodes = None # type: Iterable[int]
self.temporaryFailCodes = None # type: Iterable[int]
self.permanentFailCodes = None # type: Iterable[int]
self.requirements = None # type: List[Dict[Text, Text]]
self.hints = None # type: Dict[Text,Text]
self.name = None # type: Text
self.command_line = None # type: List[Text]
self.pathmapper = None # type: PathMapper
self.generatemapper = None # type: PathMapper
self.collect_outputs = None # type: Union[Callable[[Any], Any], functools.partial[Any]]
self.output_callback = None # type: Callable[[Any, Any], Any]
self.outdir = None # type: Text
self.tmpdir = None # type: Text
self.environment = None # type: MutableMapping[Text, Text]
self.generatefiles = None # type: Dict[Text, Union[List[Dict[Text, Text]], Dict[Text, Text], Text]]
self.stagedir = None # type: Text
self.inplace_update = None # type: bool
def _setup(self): # type: () -> None
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
for knownfile in self.pathmapper.files():
p = self.pathmapper.mapper(knownfile)
if p.type == "File" and not os.path.isfile(p[0]):
raise WorkflowException(
u"Input file %s (at %s) not found or is not a regular "
"file." % (knownfile, self.pathmapper.mapper(knownfile)[0]))
if self.generatefiles["listing"]:
self.generatemapper = PathMapper(cast(List[Any], self.generatefiles["listing"]),
self.outdir, self.outdir, separateDirs=False)
_logger.debug(u"[job %s] initial work dir %s", self.name,
json.dumps({p: self.generatemapper.mapper(p) for p in self.generatemapper.files()}, indent=4))
def _execute(self, runtime, env, rm_tmpdir=True, move_outputs="move"):
# type: (List[Text], MutableMapping[Text, Text], bool, Text) -> None
scr, _ = get_feature(self, "ShellCommandRequirement")
shouldquote = None # type: Callable[[Any], Any]
if scr:
shouldquote = lambda x: False
else:
shouldquote = needs_shell_quoting_re.search
_logger.info(u"[job %s] %s$ %s%s%s%s",
self.name,
self.outdir,
" \\\n ".join([shellescape.quote(Text(arg)) if shouldquote(Text(arg)) else Text(arg) for arg in
(runtime + self.command_line)]),
u' < %s' % self.stdin if self.stdin else '',
u' > %s' % os.path.join(self.outdir, self.stdout) if self.stdout else '',
u' 2> %s' % os.path.join(self.outdir, self.stderr) if self.stderr else '')
outputs = {} # type: Dict[Text,Text]
try:
stdin_path = None
if self.stdin:
stdin_path = self.pathmapper.reversemap(self.stdin)[1]
stderr_path = None
if self.stderr:
abserr = os.path.join(self.outdir, self.stderr)
dnerr = os.path.dirname(abserr)
if dnerr and not os.path.exists(dnerr):
os.makedirs(dnerr)
stderr_path = abserr
stdout_path = None
if self.stdout:
absout = os.path.join(self.outdir, self.stdout)
dn = os.path.dirname(absout)
if dn and not os.path.exists(dn):
os.makedirs(dn)
stdout_path = absout
build_job_script = self.builder.build_job_script # type: Callable[[List[str]], Text]
rcode = _job_popen(
[Text(x).encode('utf-8') for x in runtime + self.command_line],
stdin_path=stdin_path,
stdout_path=stdout_path,
stderr_path=stderr_path,
env=env,
cwd=self.outdir,
build_job_script=build_job_script,
)
if self.successCodes and rcode in self.successCodes:
processStatus = "success"
elif self.temporaryFailCodes and rcode in self.temporaryFailCodes:
processStatus = "temporaryFail"
elif self.permanentFailCodes and rcode in self.permanentFailCodes:
processStatus = "permanentFail"
elif rcode == 0:
processStatus = "success"
else:
processStatus = "permanentFail"
if self.generatefiles["listing"]:
relink_initialworkdir(self.generatemapper, inplace_update=self.inplace_update)
outputs = self.collect_outputs(self.outdir)
except OSError as e:
if e.errno == 2:
if runtime:
_logger.error(u"'%s' not found", runtime[0])
else:
_logger.error(u"'%s' not found", self.command_line[0])
else:
_logger.exception("Exception while running job")
processStatus = "permanentFail"
except WorkflowException as e:
_logger.error(u"[job %s] Job error:\n%s" % (self.name, e))
processStatus = "permanentFail"
except Exception as e:
_logger.exception("Exception while running job")
processStatus = "permanentFail"
if processStatus != "success":
_logger.warn(u"[job %s] completed %s", self.name, processStatus)
else:
_logger.info(u"[job %s] completed %s", self.name, processStatus)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(u"[job %s] %s", self.name, json.dumps(outputs, indent=4))
self.output_callback(outputs, processStatus)
if self.stagedir and os.path.exists(self.stagedir):
_logger.debug(u"[job %s] Removing input staging directory %s", self.name, self.stagedir)
shutil.rmtree(self.stagedir, True)
if rm_tmpdir:
_logger.debug(u"[job %s] Removing temporary directory %s", self.name, self.tmpdir)
shutil.rmtree(self.tmpdir, True)
class CommandLineJob(JobBase):
def run(self, pull_image=True, rm_container=True,
rm_tmpdir=True, move_outputs="move", **kwargs):
# type: (bool, bool, bool, Text, **Any) -> Union[Tuple[Text, Dict[None, None]], None]
self._setup()
env = self.environment
if not os.path.exists(self.tmpdir):
os.makedirs(self.tmpdir)
vars_to_preserve = kwargs.get("preserve_environment")
if kwargs.get("preserve_entire_environment"):
vars_to_preserve = os.environ
if vars_to_preserve is not None:
for key, value in os.environ.items():
if key in vars_to_preserve and key not in env:
env[key] = value
env["HOME"] = self.outdir
env["TMPDIR"] = self.tmpdir
stageFiles(self.pathmapper, os.symlink, ignoreWritable=True)
if self.generatemapper:
stageFiles(self.generatemapper, os.symlink, ignoreWritable=self.inplace_update)
relink_initialworkdir(self.generatemapper, inplace_update=self.inplace_update)
self._execute([], env, rm_tmpdir=rm_tmpdir, move_outputs=move_outputs)
class DockerCommandLineJob(JobBase):
def add_volumes(self, pathmapper, runtime, stage_output):
# type: (PathMapper, List[Text], bool) -> None
host_outdir = self.outdir
container_outdir = self.builder.outdir
for src, vol in pathmapper.items():
if not vol.staged:
continue
if stage_output:
containertgt = container_outdir + vol.target[len(host_outdir):]
else:
containertgt = vol.target
if vol.type in ("File", "Directory"):
if not vol.resolved.startswith("_:"):
runtime.append(u"--volume=%s:%s:ro" % (vol.resolved, containertgt))
elif vol.type == "WritableFile":
if self.inplace_update:
runtime.append(u"--volume=%s:%s:rw" % (vol.resolved, containertgt))
else:
shutil.copy(vol.resolved, vol.target)
elif vol.type == "WritableDirectory":
if vol.resolved.startswith("_:"):
os.makedirs(vol.target, 0o0755)
else:
if self.inplace_update:
runtime.append(u"--volume=%s:%s:rw" % (vol.resolved, containertgt))
else:
shutil.copytree(vol.resolved, vol.target)
elif vol.type == "CreateFile":
createtmp = os.path.join(host_outdir, os.path.basename(vol.target))
with open(createtmp, "w") as f:
f.write(vol.resolved.encode("utf-8"))
runtime.append(u"--volume=%s:%s:ro" % (createtmp, vol.target))
def run(self, pull_image=True, rm_container=True,
rm_tmpdir=True, move_outputs="move", **kwargs):
# type: (bool, bool, bool, Text, **Any) -> Union[Tuple[Text, Dict[None, None]], None]
(docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
img_id = None
env = None # type: MutableMapping[Text, Text]
try:
env = cast(MutableMapping[Text, Text], os.environ)
if docker_req and kwargs.get("use_container") is not False:
img_id = docker.get_from_requirements(docker_req, True, pull_image)
elif kwargs.get("default_container", None) is not None:
img_id = kwargs.get("default_container")
if docker_req and img_id is None and kwargs.get("use_container"):
raise Exception("Docker image not available")
except Exception as e:
_logger.debug("Docker error", exc_info=True)
if docker_is_req:
raise UnsupportedRequirement(
"Docker is required to run this tool: %s" % e)
else:
raise WorkflowException(
"Docker is not available for this tool, try --no-container"
" to disable Docker: %s" % e)
self._setup()
runtime = [u"docker", u"run", u"-i"]
runtime.append(u"--volume=%s:%s:rw" % (os.path.realpath(self.outdir), self.builder.outdir))
runtime.append(u"--volume=%s:%s:rw" % (os.path.realpath(self.tmpdir), "/tmp"))
self.add_volumes(self.pathmapper, runtime, False)
if self.generatemapper:
self.add_volumes(self.generatemapper, runtime, True)
runtime.append(u"--workdir=%s" % (self.builder.outdir))
runtime.append(u"--read-only=true")
if kwargs.get("custom_net", None) is not None:
runtime.append(u"--net={0}".format(kwargs.get("custom_net")))
elif kwargs.get("disable_net", None):
runtime.append(u"--net=none")
if self.stdout:
runtime.append("--log-driver=none")
euid = docker_vm_uid() or os.geteuid()
if kwargs.get("no_match_user", None) is False:
runtime.append(u"--user=%s" % (euid))
if rm_container:
runtime.append(u"--rm")
runtime.append(u"--env=TMPDIR=/tmp")
# spec currently says "HOME must be set to the designated output
# directory." but spec might change to designated temp directory.
# runtime.append("--env=HOME=/tmp")
runtime.append(u"--env=HOME=%s" % self.builder.outdir)
for t, v in self.environment.items():
runtime.append(u"--env=%s=%s" % (t, v))
runtime.append(img_id)
self._execute(runtime, env, rm_tmpdir=rm_tmpdir, move_outputs=move_outputs)
def _job_popen(
commands, # type: List[str]
stdin_path, # type: Text
stdout_path, # type: Text
stderr_path, # type: Text
env, # type: Union[MutableMapping[Text, Text], MutableMapping[str, str]]
cwd, # type: Text
job_dir=None, # type: Text
build_job_script=None, # type: Callable[[List[str]], Text]
):
# type: (...) -> int
job_script_contents = None # type: Text
if build_job_script:
job_script_contents = build_job_script(commands)
if not job_script_contents and not FORCE_SHELLED_POPEN:
stdin = None # type: Union[IO[Any], int]
stderr = None # type: IO[Any]
stdout = None # type: IO[Any]
if stdin_path is not None:
stdin = open(stdin_path, "rb")
else:
stdin = subprocess.PIPE
if stdout_path is not None:
stdout = open(stdout_path, "wb")
else:
stdout = sys.stderr
if stderr_path is not None:
stderr = open(stderr_path, "wb")
else:
stderr = sys.stderr
sp = subprocess.Popen(commands,
shell=False,
close_fds=True,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd)
if sp.stdin:
sp.stdin.close()
rcode = sp.wait()
if isinstance(stdin, file):
stdin.close()
if stdout is not sys.stderr:
stdout.close()
if stderr is not sys.stderr:
stderr.close()
return rcode
else:
if job_dir is None:
job_dir = tempfile.mkdtemp(prefix="cwltooljob")
if not job_script_contents:
job_script_contents = SHELL_COMMAND_TEMPLATE
env_copy = {}
for key in env:
key = key.encode("utf-8")
env_copy[key] = env[key]
job_description = dict(
commands=commands,
cwd=cwd,
env=env_copy,
stdout_path=stdout_path,
stderr_path=stderr_path,
stdin_path=stdin_path,
)
with open(os.path.join(job_dir, "job.json"), "w") as f:
json.dump(job_description, f)
try:
job_script = os.path.join(job_dir, "run_job.bash")
with open(job_script, "w") as f:
f.write(job_script_contents)
job_run = os.path.join(job_dir, "run_job.py")
with open(job_run, "w") as f:
f.write(PYTHON_RUN_SCRIPT)
sp = subprocess.Popen(
["bash", job_script.encode("utf-8")],
shell=False,
cwd=job_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
if sp.stdin:
sp.stdin.close()
rcode = sp.wait()
return rcode
finally:
shutil.rmtree(job_dir)
| {
"content_hash": "4878735c4deca7aaa4e482115e02cd1f",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 120,
"avg_line_length": 37.33467741935484,
"alnum_prop": 0.5476833351333837,
"repo_name": "SciDAP/cwltool",
"id": "60573cbad8e542f7aef5d36737b973310b3c5e67",
"size": "18518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cwltool/job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "166"
},
{
"name": "JavaScript",
"bytes": "241420"
},
{
"name": "Makefile",
"bytes": "17870"
},
{
"name": "Python",
"bytes": "976631"
},
{
"name": "Shell",
"bytes": "9982"
},
{
"name": "Web Ontology Language",
"bytes": "15694896"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from hashlib import sha1
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.tasks.jar_import_products import JarImportProducts
from pants.base.build_environment import get_buildroot
from pants.base.fingerprint_strategy import DefaultFingerprintStrategy
from pants.build_graph.address import Address
from pants.fs.archive import ZIP
from pants.java.jar.jar_dependency import JarDependency
from pants.java.jar.jar_dependency_utils import M2Coordinate
from pants.task.task import Task
from pants.contrib.android.targets.android_binary import AndroidBinary
from pants.contrib.android.targets.android_library import AndroidLibrary
from pants.contrib.android.targets.android_resources import AndroidResources
class AndroidLibraryFingerprintStrategy(DefaultFingerprintStrategy):
def compute_fingerprint(self, target):
"""AndroidLibrary targets need to be re-unpacked if any of the imported jars have changed."""
# TODO(mateor) Create a utility function to add a block of fingerprints to a hasher with caller
# handing in list of items of the same type and a function to extract a fingerprint from each.
if isinstance(target, AndroidLibrary):
hasher = sha1()
for cache_key in sorted(jar.cache_key() for jar in target.imported_jars):
hasher.update(cache_key)
hasher.update(target.payload.fingerprint())
return hasher.hexdigest()
return None
class UnpackLibraries(Task):
"""Unpack AndroidDependency artifacts, including .jar and .aar libraries.
The UnpackLibraries task unpacks artifacts imported by AndroidLibraries, as .aar or .jar files,
through a 'libraries' attribute. The .aar files may contain components which require creation
of some synthetic targets, as well as a classes.jar. The classes.jar is packaged into a
JarDependency target and sent to javac compilation. All jar files are then unpacked-
android_binaries repack the class files of all the android_libraries in their transitive
dependencies into a dex file.
All archives are unpacked only once, regardless of differing include/exclude patterns or how many
targets depend upon it. All targets that depend on a particular artifact will be passed the
unpack_libraries product, which is a directory containing the entire source of the unpacked jars.
These sources are filtered against the AndroidLibrary's include/exclude patterns during the
creation of the dex file.
"""
class MissingElementException(Exception):
"""Raised if an unpacked file or directory unexpectedly does not exist."""
class UnexpectedArchiveType(Exception):
"""Raised if an archive has an extension that is not explicitly handled by this class."""
@classmethod
def prepare(cls, options, round_manager):
super(UnpackLibraries, cls).prepare(options, round_manager)
round_manager.require_data(JarImportProducts)
@classmethod
def product_types(cls):
return ['unpacked_libraries']
@staticmethod
def is_binary(target):
"""Return True for AndroidBinary targets."""
return isinstance(target, AndroidBinary)
@staticmethod
def is_library(target):
"""Return True for AndroidLibrary targets."""
# TODO(mateor) add AndroidBinary support. If include/exclude patterns aren't needed, an
# android_binary should be able to simply declare an android_dependency as a dep.
return isinstance(target, AndroidLibrary)
def __init__(self, *args, **kwargs):
super(UnpackLibraries, self).__init__(*args, **kwargs)
self._created_targets = {}
self._unpacked_archives = set()
def create_classes_jar_target(self, library, coordinate, jar_file):
"""Create a JarLibrary target containing the jar_file as a JarDependency.
:param library: The new JarLibrary will be derived from this AndroidLibrary.
:type target: :class:`pants.contrib.android.targets.android_library.AndroidLibrary`
:param coordinate: Archive coordinate fetched by ivy, e.g. 'org.pantsbuild:example::1.0:aar'.
:type coordinate: :class:`pants.contrib.jvm.jar_dependency_utils.M2Coordinate`
:param string jar_file: Full path of the classes.jar contained within unpacked aar files.
:returns: A new jar library target.
:rtype: :class:`pants.contrib.jvm.targets.jar_library.JarLibrary`
"""
# TODO(mateor) add another JarDependency for every jar under 'libs'.
jar_url = 'file://{0}'.format(jar_file)
jar_dep = JarDependency(org=library.id, name=coordinate.artifact_filename, rev=coordinate.rev,
url=jar_url)
address = Address(os.path.relpath(self.workdir, get_buildroot()),
'{}-classes.jar'.format(coordinate.artifact_filename))
new_target = self.context.add_new_target(address, JarLibrary, jars=[jar_dep],
derived_from=library)
return new_target
def create_resource_target(self, library, coordinate, manifest, resource_dir):
"""Create an AndroidResources target.
:param library: AndroidLibrary that the new AndroidResources target derives from.
:type target: :class:`pants.contrib.android.targets.android_library.AndroidLibrary`
:param coordinate: Archive coordinate fetched by ivy, e.g. 'org.pantsbuild:example::1.0:aar'.
:type coordinate: :class:`pants.contrib.jvm.jar_dependency_utils.M2Coordinate`
:param string manifest: The path of 'AndroidManifest.xml'
:param string resource_dir: Full path of the res directory contained within aar files.
:return: A new android resources target.
:rtype::class:`pants.contrib.android.targets.AndroidResources`
"""
address = Address(os.path.relpath(self.workdir, get_buildroot()),
'{}-resources'.format(coordinate.artifact_filename))
new_target = self.context.add_new_target(address, AndroidResources,
manifest=manifest, resource_dir=resource_dir,
derived_from=library)
return new_target
def create_android_library_target(self, binary, library, coordinate, unpacked_aar_location):
"""Create an AndroidLibrary target.
The aar files are unpacked and the contents used to create a new AndroidLibrary target.
:param AndroidBinary binary: AndroidBinary that depends on the AndroidLibrary being processed.
:param AndroidLibrary library: AndroidLibrary that the new AndroidLibrary target derives from.
:param coordinate: Archive coordinate fetched by ivy, e.g. 'org.pantsbuild:example::1.0:aar'.
:type coordinate: :class:`pants.contrib.jvm.jar_dependency_utils.M2Coordinate`
:param string unpacked_aar_location: Full path of dir holding contents of an unpacked aar file.
:return: A new android library target.
:rtype::class:`pants.contrib.android.targets.AndroidLibrary`
"""
# The following three elements of an aar file have names mandated by the aar spec:
# http://tools.android.com/tech-docs/new-build-system/aar-format
# They are said to be mandatory although in practice that assumption only holds for manifest.
manifest = os.path.join(unpacked_aar_location, 'AndroidManifest.xml')
jar_file = os.path.join(unpacked_aar_location, 'classes.jar')
resource_dir = os.path.join(unpacked_aar_location, 'res')
# Sanity check to make sure all .aar files we expect to be unpacked are actually unpacked.
if not os.path.isfile(manifest):
raise self.MissingElementException("An AndroidManifest.xml is expected in every unpacked "
".aar file but none was found in the {} archive "
"for the {} target".format(coordinate, library))
# Depending on the contents of the unpacked aar file, create the dependencies.
deps = []
if os.path.isdir(resource_dir) and os.listdir(resource_dir):
new_resource_target = self.create_resource_target(library, coordinate, manifest, resource_dir)
# # The new libraries resources must be compiled both by themselves and along with the dependent library.
deps.append(new_resource_target)
if os.path.isfile(jar_file):
if jar_file not in self._created_targets:
# TODO(mateo): So the binary needs the classes on the classpath. I should probably bundle up the include/exclude
# filtered classes and put them on the compile classpath, either as a jar or as source.
self._created_targets[jar_file] = self.create_classes_jar_target(library, coordinate, jar_file)
binary.inject_dependency(self._created_targets[jar_file].address)
address = Address(os.path.relpath(self.workdir, get_buildroot()),
'{}-android_library'.format(coordinate.artifact_filename))
new_target = self.context.add_new_target(address, AndroidLibrary,
manifest=manifest,
include_patterns=library.payload.include_patterns,
exclude_patterns=library.payload.exclude_patterns,
dependencies=deps,
derived_from=library)
return new_target
def _unpack_artifacts(self, jar_imports):
# Unpack the aar and jar library artifacts. If the aar files have a jar in the contents,
# unpack that jar as well.
for coordinate, aar_or_jar in jar_imports:
jar_outdir = self.unpacked_jar_location(coordinate)
if 'jar' == coordinate.ext:
jar_file = aar_or_jar
elif 'aar' == coordinate.ext:
unpacked_aar_destination = self.unpacked_aar_location(coordinate)
jar_file = os.path.join(unpacked_aar_destination, 'classes.jar')
# Unpack .aar files.
if coordinate not in self._unpacked_archives:
ZIP.extract(aar_or_jar, unpacked_aar_destination)
self._unpacked_archives.add(aar_or_jar)
# Create an .aar/classes.jar signature for self._unpacked_archives.
coordinate = M2Coordinate(org=coordinate.org,
name=coordinate.name,
rev=coordinate.rev,
classifier=coordinate.classifier,
ext='classes.jar')
else:
raise self.UnexpectedArchiveType('Android dependencies can be .aar or .jar archives '
'(was: {} at {})'.format(coordinate, aar_or_jar))
# Unpack the jar files.
if coordinate not in self._unpacked_archives and os.path.isfile(jar_file):
ZIP.extract(jar_file, jar_outdir)
self._unpacked_archives.add(aar_or_jar)
def _create_target(self, binary, library, coordinates):
# Create a target for the components of an unpacked .aar file.
for coordinate in coordinates:
# The contents of the unpacked aar file must be made into an AndroidLibrary target.
if 'aar' == coordinate.ext:
if coordinate not in self._created_targets:
unpacked_location = self.unpacked_aar_location(coordinate)
if not os.path.isdir(unpacked_location):
raise self.MissingElementException('{}: Expected to unpack {} at {} but did not!'
.format(library.address.spec, coordinate, unpacked_location))
# The binary is being threaded through because android binaries need the classes.jar on their classpath
# in jvm_compile.
new_target = self.create_android_library_target(binary,
library,
coordinate,
unpacked_location)
self._created_targets[coordinate] = new_target
library.inject_dependency(self._created_targets[coordinate].address)
# The unpacked_libraries product is a dir containing the full unpacked source. The files
# that match the include/exclude patterns are calculated during DxCompile.
unpacked_products = self.context.products.get('unpacked_libraries')
unpacked_products.add(library, get_buildroot()).append(self.unpacked_jar_location(coordinate))
def execute(self):
jar_import_products = self.context.products.get_data(JarImportProducts)
library_targets = self.context.targets(predicate=self.is_library)
with self.invalidated(library_targets,
fingerprint_strategy=AndroidLibraryFingerprintStrategy(),
invalidate_dependents=True) as invalidation_check:
for vt in invalidation_check.invalid_vts:
jar_imports = jar_import_products.imports(vt.target)
if jar_imports:
self._unpack_artifacts(jar_imports)
# Create the new targets from the contents of unpacked aar files.
binary_targets = self.context.targets(predicate=self.is_binary)
for binary in binary_targets:
library_dependencies = [x for x in binary.dependencies if isinstance(x, AndroidLibrary)]
for library in library_dependencies:
jar_imports = jar_import_products.imports(library)
if jar_imports:
self._create_target(binary, library, (jar_import.coordinate for jar_import in jar_imports))
def unpacked_jar_location(self, coordinate):
"""Location for unpacked jar files, whether imported as-is or found inside an aar file."""
return os.path.join(self.workdir, 'explode-jars', coordinate.artifact_filename)
def unpacked_aar_location(self, coordinate):
"""Output location for unpacking .aar archives."""
return os.path.join(self.workdir, coordinate.artifact_filename)
| {
"content_hash": "af30ed300f68566e5fbfeb401b9bb1f8",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 120,
"avg_line_length": 54.0739299610895,
"alnum_prop": 0.6869108440670648,
"repo_name": "fkorotkov/pants",
"id": "7873bba2fd3a76fb54c018cb1deb46195d3c1887",
"size": "14044",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/android/src/python/pants/contrib/android/tasks/unpack_libraries.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "481460"
},
{
"name": "JavaScript",
"bytes": "35417"
},
{
"name": "Python",
"bytes": "5931594"
},
{
"name": "Rust",
"bytes": "271643"
},
{
"name": "Scala",
"bytes": "76239"
},
{
"name": "Shell",
"bytes": "74734"
},
{
"name": "Thrift",
"bytes": "2795"
}
],
"symlink_target": ""
} |
from rest_framework import permissions
from api.base.utils import get_user_auth
from api.comments.serializers import CommentReport
from website.models import Node, Comment
class CanCommentOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (Node, Comment)), 'obj must be a Node or Comment, got {}'.format(obj)
auth = get_user_auth(request)
if isinstance(obj, Comment):
node = obj.node
elif isinstance(obj, Node):
node = obj
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
else:
return node.can_comment(auth)
class CommentDetailPermissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, Comment), 'obj must be a Comment, got {}'.format(obj)
auth = get_user_auth(request)
comment = obj
node = obj.node
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
else:
return comment.user._id == auth.user._id and node.can_comment(auth)
class CommentReportsPermissions(permissions.BasePermission):
"""Permissions for comment reports. Only users who have permission to comment on the project
can access the comment reports endpoint."""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (Comment, CommentReport)), 'obj must be a Comment or Comment Report, got {}'.format(obj)
auth = get_user_auth(request)
if isinstance(obj, Comment):
node = obj.node
elif isinstance(obj, CommentReport):
comment = view.get_comment()
node = comment.node
return node.can_comment(auth)
| {
"content_hash": "67afb97f802c8dd99b290cc775063a24",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 119,
"avg_line_length": 38,
"alnum_prop": 0.6638023630504833,
"repo_name": "hmoco/osf.io",
"id": "8d9ed751a8725ab95c938e80ab8625f00bd1c043",
"size": "1886",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api/comments/permissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "175175"
},
{
"name": "HTML",
"bytes": "193496"
},
{
"name": "JavaScript",
"bytes": "1690469"
},
{
"name": "Mako",
"bytes": "672179"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7856328"
}
],
"symlink_target": ""
} |
import os
import sys
import zmq
import pprint
import logging
logger = logging.getLogger("python_client")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == "__main__":
logger.info("starting")
PROTOCOL = "tcp"
HOSTNAME = "127.0.0.1"
PORTS = [5556, 5557]
FILTER = ""
connections = ["%s://%s:%s" % (PROTOCOL, HOSTNAME, port) for port in PORTS]
logger.debug("Collecting updates from: %s" % (pprint.pformat(connections), ))
context = zmq.Context()
socket = context.socket(zmq.SUB)
map(socket.connect, connections)
socket.setsockopt(zmq.SUBSCRIBE, FILTER)
while 1:
incoming = socket.recv()
logger.debug("Update: '%s'" % (incoming, ))
| {
"content_hash": "a848652918b627f1eae69a6651685c49",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 81,
"avg_line_length": 26.235294117647058,
"alnum_prop": 0.6412556053811659,
"repo_name": "asimihsan/masspinger",
"id": "8d5caf1fec0038ff6c1c9ee20146e4889521d4f1",
"size": "1260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/python_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5161319"
},
{
"name": "C++",
"bytes": "2150895"
},
{
"name": "Java",
"bytes": "2899"
},
{
"name": "Perl",
"bytes": "37138"
},
{
"name": "Python",
"bytes": "9749"
},
{
"name": "Shell",
"bytes": "463105"
}
],
"symlink_target": ""
} |
"""
test_django-publica-templates
------------
Tests for `django-publica-templates` models module.
"""
import unittest
from templates import models
from django.template import loader, Context
import test_settings as settings
from templates.utils.cache import get_cache_key
from templates.utils.template import check_template_syntax
class TemplatesTestCase(unittest.TestCase):
def setUp(self):
self.old_template_loaders = settings.TEMPLATE_LOADERS
if 'templates.loader.Loader' not in settings.TEMPLATE_LOADERS:
loader.template_source_loaders = None
settings.TEMPLATE_LOADERS = (list(settings.TEMPLATE_LOADERS) +
['templates.loader.Loader'])
self.t1, _ = models.Template.objects.get_or_create(
name='base.html', content='base')
self.t2, _ = models.Template.objects.get_or_create(
name='sub.html', content='sub')
def tearDown(self):
loader.template_source_loaders = None
settings.TEMPLATE_LOADERS = self.old_template_loaders
def test_basiscs(self):
self.assertTrue("base" in self.t1.content)
def test_load_templates(self):
result = loader.get_template("templates/test.html").render(Context({}))
self.assertEqual(result, 'test')
def test_check_template_syntax(self):
bad_template, _ = models.Template.objects.get_or_create(
name='bad.html', content='{% if foo %}Bar')
good_template, _ = models.Template.objects.get_or_create(
name='good.html', content='{% if foo %}Bar{% endif %}')
self.assertFalse(check_template_syntax(bad_template)[0])
self.assertTrue(check_template_syntax(good_template)[0])
def test_get_cache_name(self):
self.assertEqual(get_cache_key('name with spaces'),
'templates::name-with-spaces')
class TemplateMixins(unittest.TestCase):
def setUp(self):
self.old_template_loaders = settings.TEMPLATE_LOADERS
if 'templates.loader.Loader' not in settings.TEMPLATE_LOADERS:
loader.template_source_loaders = None
settings.TEMPLATE_LOADERS = (list(settings.TEMPLATE_LOADERS) +
['templates.loader.Loader'])
self.t1, _ = models.Template.objects.get_or_create(
name='templates/test.html', content='detail')
self.t2, _ = models.Template.objects.get_or_create(
name='templates/test.html', content='preview')
self.temp = models.Templateable()
def tearDown(self):
loader.template_source_loaders = None
settings.TEMPLATE_LOADERS = self.old_template_loaders
def test_basics(self):
self.temp.template = self.t1
self.assertTrue("test" in self.temp.render())
def test_load_templates_render(self):
# Set the tempalte t1 as template name and then load seperately and
# see render works
self.temp.template = self.t1
result = loader.get_template("templates/test.html").render(Context({}))
self.assertEqual(result, self.temp.render())
def test_load_templates_render_preview(self):
# Set the tempalte t1 as template name and then load seperately and
# see render works
self.temp.preview_template = self.t2
result = loader.get_template("templates/test.html").render(Context({}))
self.assertEqual(result, self.temp.render_preview())
| {
"content_hash": "8392ee25f75e4dec4f09ca910a2494e1",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 38.175824175824175,
"alnum_prop": 0.6462291306850892,
"repo_name": "publica-io/django-publica-templates",
"id": "58e725b27647b09cc5bab59f66adf6de48dfc8f6",
"size": "3521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22674"
},
{
"name": "Shell",
"bytes": "6467"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class Policy(A10BaseClass):
"""Class Description::
Manage WAF Policy files.
Class policy supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param max_filesize: {"description": "Set maximum WAF policy file size (Maximum file size in KBytes, default is 32K)", "partition-visibility": "shared", "default": 32, "optional": true, "format": "number", "maximum": 256, "minimum": 16, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/waf/policy`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "policy"
self.a10_url="/axapi/v3/waf/policy"
self.DeviceProxy = ""
self.uuid = ""
self.max_filesize = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "e9d258dc4e11432534b72a1abb7780f9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 258,
"avg_line_length": 34.32432432432432,
"alnum_prop": 0.6307086614173228,
"repo_name": "a10networks/a10sdk-python",
"id": "4bfef0cdf3c0c9b33e1c83fc1ac8483e32d334b7",
"size": "1270",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/waf/waf_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
} |
from Queue import Queue
from threading import Thread
from pyglet.window import mouse
import time
from pyglet.gl import *
import math
import numpy as np
import os
import random
import urllib2
import pyglet
from os.path import expanduser
from geoplotlib import utils
from geoplotlib.utils import BoundingBox, parse_raw_str
VERT_PER_POINT = 2
FPS = 30
TILE_SIZE = 256
MIN_ZOOM = 2
MAX_ZOOM = 20
KEYBOARD_PAN = 0.2
TOTAL_INVALIDATE_DELAY = 50
FONT_COLOR = (0,0,0,255)
FONT_NAME = 'Helvetica'
FONT_SCALING = 1./100
class UiManager:
class Colorbar():
def __init__(self, cmap, vmax, colormap_scale, font_size, size=.5):
self.cmap = cmap
self.vmax = vmax
self.colormap_scale = colormap_scale
self.font_size = font_size
self.size = size
def draw(self, painter):
total_h = SCREEN_H*self.size
step = total_h / self.cmap.levels
bar_w = SCREEN_W/25
lab = pyglet.text.Label('',
color=FONT_COLOR,
font_name=FONT_NAME,
font_size=int(.8*self.font_size),
x=SCREEN_W, y=SCREEN_H,
anchor_x='right', anchor_y='center')
edges, colors = self.cmap.get_boundaries(self.vmax, self.colormap_scale)
for i in range(self.cmap.levels+1):
if i < self.cmap.levels:
painter.set_color(colors[i][:-1])
painter.rect(SCREEN_W-2*bar_w/2, SCREEN_H-total_h*1.5+step*i,
SCREEN_W-bar_w/2, SCREEN_H-total_h*1.5+step*(i+1))
lab.x = SCREEN_W-2*bar_w/2*1.1
lab.y = SCREEN_H-total_h*1.5+step*i
if self.colormap_scale == 'log':
lab.text = '%.2E' % edges[i]
else:
lab.text = '%d' % edges[i]
lab.draw()
def __init__(self):
self.font_size = int(SCREEN_W*FONT_SCALING)
self.padding = 2
self.labels = {}
self.labels['status'] = pyglet.text.Label('',
color=FONT_COLOR,
font_name=FONT_NAME,
font_size=self.font_size,
x=20, y=10,
anchor_x='left', anchor_y='bottom')
self.labels['tooltip'] = pyglet.text.Label('',
color=FONT_COLOR,
font_name=FONT_NAME,
font_size=self.font_size,
x=SCREEN_W, y=SCREEN_H,
anchor_x='left', anchor_y='bottom')
self.labels['info'] = pyglet.text.Label('',
color=FONT_COLOR,
font_name=FONT_NAME,
font_size=self.font_size,
x=SCREEN_W, y=SCREEN_H,
anchor_x='right', anchor_y='top')
self.colorbar = None
def tooltip(self, text):
self.labels['tooltip'].text = parse_raw_str(text)
def status(self, text):
self.labels['status'].text = parse_raw_str(text)
def info(self, text):
self.labels['info'].text = parse_raw_str(text)
@staticmethod
def get_label_bbox(label):
if label.anchor_x == 'left':
left = label.x
elif label.anchor_x == 'right':
left = label.x - label.content_width
if label.anchor_y == 'bottom':
top = label.y
elif label.anchor_y == 'top':
top = label.y - label.content_height
return left, top, left + label.content_width, top + label.content_height
def draw_label_background(self, label, painter):
if len(label.text) > 0:
left, top, right, bottom = UiManager.get_label_bbox(label)
painter.rect(left - self.padding, top - self.padding, right + self.padding, bottom + self.padding)
def draw(self, mouse_x, mouse_y):
painter = BatchPainter()
if self.colorbar:
self.colorbar.draw(painter)
painter.set_color([255,255,255])
self.labels['tooltip'].x = mouse_x
self.labels['tooltip'].y = mouse_y
for l in self.labels.values():
self.draw_label_background(l, painter)
painter.batch_draw()
for l in self.labels.values():
l.draw()
def clear(self):
for l in self.labels.values():
l.text = ''
def add_colorbar(self, cmap, vmax, colormap_scale):
self.colorbar = UiManager.Colorbar(cmap, vmax, colormap_scale, self.font_size)
class GeoplotlibApp(pyglet.window.Window):
def __init__(self, geoplotlib_config):
super(GeoplotlibApp, self).__init__(geoplotlib_config.screen_w, geoplotlib_config.screen_h,
fullscreen=False, caption='geoplotlib')
global SCREEN_W, SCREEN_H
SCREEN_W = geoplotlib_config.screen_w
SCREEN_H = geoplotlib_config.screen_h
self.geoplotlib_config = geoplotlib_config
self.ticks = 0
self.ui_manager = UiManager()
self.proj = Projector()
self.map_layer = MapLayer(geoplotlib_config.tiles_provider, skipdl=False)
self.scroll_delay = 0
self.invalidate_delay = 0
self.drag_x = self.drag_y = 0
self.dragging = False
self.drag_start_timestamp = 0
self.mouse_x = self.mouse_y = 0
self.show_map = True
self.show_layers = True
self.show_coordinates = False
glEnable(GL_LINE_SMOOTH)
glEnable(GL_POLYGON_SMOOTH)
# glHint(GL_LINE_SMOOTH_HINT, GL_NICEST);
# glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST);
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
pyglet.clock.schedule_interval(self.on_update, 1. / FPS)
def on_draw(self):
self.clear()
# needed to avoid diagonal artifacts on the tiles
glDisable(GL_LINE_SMOOTH)
glDisable(GL_POLYGON_SMOOTH)
self.ui_manager.clear()
if self.show_map:
self.map_layer.draw(self.proj)
if self.geoplotlib_config.map_alpha < 255:
painter = BatchPainter()
painter.set_color([0,0,0, 255 - self.geoplotlib_config.map_alpha])
painter.rect(0,0,SCREEN_W, SCREEN_H)
painter.batch_draw()
if abs(self.drag_x) > 1e-3 or abs(self.drag_y) > 1e-3:
self.drag_x *= 0.93
self.drag_y *= 0.93
if self.dragging == False:
self.proj.pan(self.drag_x, self.drag_y)
if self.scroll_delay > 0:
self.scroll_delay -= 1
if self.invalidate_delay > 0:
self.invalidate_delay -= 1
if self.invalidate_delay == 1:
for l in self.geoplotlib_config.layers:
l.invalidate(self.proj)
if self.show_layers and self.invalidate_delay == 0:
if self.geoplotlib_config.smoothing:
glEnable(GL_LINE_SMOOTH)
glEnable(GL_POLYGON_SMOOTH)
glPushMatrix()
glTranslatef(-self.proj.xtile * TILE_SIZE, self.proj.ytile * TILE_SIZE, 0)
for l in self.geoplotlib_config.layers:
l.draw(self.proj,
self.mouse_x + self.proj.xtile * TILE_SIZE,
SCREEN_H - self.mouse_y - self.proj.ytile * TILE_SIZE,
self.ui_manager)
glPopMatrix()
#self.ui_manager.status('T: %.1f, FPS:%d' % (self.ticks / 1000., pyglet.clock.get_fps()))
if self.show_coordinates:
self.ui_manager.status('%.6f %.6f' % self.proj.screen_to_latlon(self.mouse_x, SCREEN_H - self.mouse_y))
if self.invalidate_delay == 2:
self.ui_manager.status('rendering...')
attribution = pyglet.text.Label(self.map_layer.attribution,
color=FONT_COLOR,
font_name=FONT_NAME,
font_size=int(.8*SCREEN_W*FONT_SCALING),
x=SCREEN_W-int(.2*SCREEN_W*FONT_SCALING),
y=int(1.2*SCREEN_W*FONT_SCALING),
anchor_x='right', anchor_y='top')
attribution.draw()
self.ui_manager.draw(self.mouse_x, SCREEN_H - self.mouse_y)
if self.geoplotlib_config.savefig is not None:
GeoplotlibApp.screenshot(self.geoplotlib_config.savefig + '.png')
pyglet.app.exit()
def on_mouse_motion(self, x, y, dx, dy):
self.mouse_x = x
self.mouse_y = SCREEN_H - y
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
if buttons & mouse.LEFT:
self.drag_start_timestamp = self.ticks
self.drag_x = -1. * dx / TILE_SIZE
self.drag_y = -1. * dy / TILE_SIZE
self.proj.pan(self.drag_x, self.drag_y)
if self.invalidate_delay > 0:
self.invalidate_delay = TOTAL_INVALIDATE_DELAY
def on_mouse_release(self, x, y, buttons, modifiers):
if buttons == mouse.LEFT:
self.dragging = False
if self.ticks - self.drag_start_timestamp > 200:
self.drag_x = self.drag_y = 0
def on_mouse_press(self, x, y, buttons, modifiers):
if buttons == mouse.LEFT:
if not self.dragging:
self.dragging = True
self.drag_start_timestamp = self.ticks
self.drag_x = self.drag_y = 0
if self.invalidate_delay > 0:
self.invalidate_delay = TOTAL_INVALIDATE_DELAY
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
if self.scroll_delay == 0:
if scroll_y < 0:
self.proj.zoomin(self.mouse_x, self.mouse_y)
self.invalidate_delay = TOTAL_INVALIDATE_DELAY
self.scroll_delay = 3
elif scroll_y > 0:
self.proj.zoomout(self.mouse_x, self.mouse_y)
self.invalidate_delay = TOTAL_INVALIDATE_DELAY
self.scroll_delay = 3
def on_key_release(self, symbol, modifiers):
if symbol == pyglet.window.key.P:
fname = '%d.png' % (time.time()*1000)
GeoplotlibApp.screenshot(fname)
print(fname + ' saved')
elif symbol == pyglet.window.key.M:
self.show_map = not self.show_map
elif symbol == pyglet.window.key.L:
self.show_layers = not self.show_layers
elif symbol == pyglet.window.key.I:
self.proj.zoomin(SCREEN_W/2, SCREEN_H/2)
self.invalidate_delay = TOTAL_INVALIDATE_DELAY
elif symbol == pyglet.window.key.O:
self.proj.zoomout(SCREEN_W/2, SCREEN_H/2)
self.invalidate_delay = TOTAL_INVALIDATE_DELAY
elif symbol == pyglet.window.key.R:
# hack to force invalidate
self.invalidate_delay = 3
elif symbol == pyglet.window.key.A:
self.proj.pan(-KEYBOARD_PAN, 0)
elif symbol == pyglet.window.key.D:
self.proj.pan(+KEYBOARD_PAN, 0)
elif symbol == pyglet.window.key.W:
self.proj.pan(0, +KEYBOARD_PAN)
elif symbol == pyglet.window.key.S:
self.proj.pan(0, -KEYBOARD_PAN)
elif symbol == pyglet.window.key.B:
print(self.proj.bbox())
elif symbol == pyglet.window.key.C:
self.show_coordinates = not self.show_coordinates
else:
for l in self.geoplotlib_config.layers:
need_invalidate = l.on_key_release(symbol, modifiers)
if need_invalidate:
l.invalidate(self.proj)
@staticmethod
def screenshot(fname):
glPixelTransferf(gl.GL_ALPHA_BIAS, 1.0)
image = pyglet.image.ColorBufferImage(0, 0, SCREEN_W, SCREEN_H)
image.save(fname)
glPixelTransferf(gl.GL_ALPHA_BIAS, 0.0)
def on_update(self, dt):
self.ticks += dt*1000
def start(self):
#pyglet.options['debug_gl'] = False
if self.geoplotlib_config.bbox is not None:
self.proj.fit(self.geoplotlib_config.bbox, force_zoom=self.geoplotlib_config.requested_zoom)
elif len(self.geoplotlib_config.layers) > 0:
self.proj.fit(BoundingBox.from_bboxes([l.bbox() for l in self.geoplotlib_config.layers]),
force_zoom=self.geoplotlib_config.requested_zoom)
for l in self.geoplotlib_config.layers:
l.invalidate(self.proj)
pyglet.app.run()
def _flatten_xy(x, y):
return np.vstack((x, y)).T.flatten()
class BatchPainter:
"""
This class batches OpenGL calls. The usage pattern is to instantiate a BatchPainter,
perform all the drawing and finally render using batch_draw
"""
def __init__(self):
self._batch = pyglet.graphics.Batch()
self._color = [0, 0, 255, 255]
self._sprites = []
self._labels = []
def set_color(self, color):
if color == 'k' or color == 'black':
self._color = [0,0,0,255]
elif color == 'w' or color == 'white':
self._color = [255,255,255,255]
elif color == 'r' or color == 'red':
self._color = [255,0,0,255]
elif color == 'g' or color == 'green':
self._color = [0,255,0,255]
elif color == 'b' or color == 'blue':
self._color = [0,0,255,255]
elif len(color) == 4:
for c in color:
if c < 0 or c > 255:
raise Exception('color components must be between 0 and 255')
self._color = color
elif len(color) == 3:
for c in color:
if c < 0 or c > 255:
raise Exception('color components must be between 0 and 255')
self._color = color + [255]
else:
raise Exception('invalid color format')
def lines(self, x0, y0, x1, y1, colors=None, width=1.0):
glLineWidth(width)
x = _flatten_xy(x0, x1)
y = _flatten_xy(y0, y1)
vertices = _flatten_xy(x, y)
if colors is None:
colors = self._color * int(len(vertices)/VERT_PER_POINT)
self._batch.add(int(len(vertices)/VERT_PER_POINT), GL_LINES, None,
('v2f', vertices),
('c4B', np.array(colors).flatten()))
def linestrip(self, x, y, width=1.0, closed=False):
glLineWidth(width)
vertices = _flatten_xy(x, y)
indices = [i // 2 for i in range(len(vertices))]
indices = indices[1:-1]
if closed:
indices.append(indices[-1])
indices.append(indices[0])
self._batch.add_indexed(int(len(vertices)/VERT_PER_POINT), GL_LINES, None,
indices,
('v2f', vertices),
('c4B', self._color * int(len(vertices)/VERT_PER_POINT)))
def poly(self, x, y, width=1.0):
glLineWidth(width)
vertices = _flatten_xy(x, y)
indices = []
for i in range(1, len(x) - 1):
indices.append(0)
indices.append(i)
indices.append(i+1)
self._batch.add_indexed(int(len(vertices)/VERT_PER_POINT), GL_TRIANGLES, None,
indices,
('v2f', vertices),
('c4B', self._color * int(len(vertices)/VERT_PER_POINT)))
def triangle(self, vertices):
self._batch.add(int(len(vertices)/VERT_PER_POINT), GL_TRIANGLES, None,
('v2f', vertices),
('c4B', self._color * int(len(vertices)/VERT_PER_POINT)))
def circle(self, cx, cy, r, width=2.0):
glLineWidth(width)
precision = int(10*math.log(r))
vertices = []
for alpha in np.linspace(0, 6.28, precision):
vertices.append(cx + r * math.cos(alpha))
vertices.append(cy + r * math.sin(alpha))
indices = []
for i in range(precision - 1):
indices.append(i)
indices.append(i+1)
indices.append(precision-1)
indices.append(0)
self._batch.add_indexed(int(len(vertices)/VERT_PER_POINT), GL_LINES, None,
indices,
('v2f', vertices),
('c4B', self._color * int(len(vertices)/VERT_PER_POINT)))
def circle_filled(self, cx, cy, r):
vertices = []
vertices.append(cx)
vertices.append(cy)
precision = int(10*math.log(r))
for alpha in np.linspace(0, 6.28, precision):
vertices.append(cx + r * math.cos(alpha))
vertices.append(cy + r * math.sin(alpha))
indices = []
for i in range(1, precision):
indices.append(0)
indices.append(i)
indices.append(i+1)
indices.append(0)
indices.append(precision)
indices.append(1)
self._batch.add_indexed(int(len(vertices)/VERT_PER_POINT), GL_TRIANGLES, None,
indices,
('v2f', vertices),
('c4B', self._color * int(len(vertices)/VERT_PER_POINT)))
def points(self, x, y, point_size=10, rounded=False):
glPointSize(point_size)
if rounded:
glEnable(GL_POINT_SMOOTH)
else:
glDisable(GL_POINT_SMOOTH)
vertices = np.vstack((x, y)).T.flatten()
self._batch.add(int(len(vertices)/VERT_PER_POINT), GL_POINTS, None,
('v2f', vertices),
('c4B', self._color * int(len(vertices)/VERT_PER_POINT)))
def rect(self, left, top, right, bottom):
self.triangle([left, top, right, top, right, bottom, right, bottom, left, top, left, bottom])
def batch_rects(self, rects_vertices, rects_colors):
triangles = []
colors = []
for i in range(len(rects_vertices)):
r = rects_vertices[i]
c = rects_colors[i]
left, top, right, bottom = r
triangles.extend([left, top, right, top, right, bottom, right, bottom, left, top, left, bottom])
colors.extend(c * 6)
self._batch.add(int(len(triangles)/VERT_PER_POINT), GL_TRIANGLES, None,
('v2f', triangles),
('c4B', colors))
def sprites(self, image, x, y, scale=1.0):
from pyglet.sprite import Sprite
for i in range(len(x)):
sprite = Sprite(image, batch=self._batch)
sprite.x = x[i]
sprite.y = y[i]
sprite.scale = scale
self._sprites.append(sprite)
def labels(self, x, y, texts, font_name=FONT_NAME, font_size=14, anchor_x='left', anchor_y='top'):
for i in range(len(x)):
lab = pyglet.text.Label(parse_raw_str(texts if type(texts) == str else texts[i]),
batch=self._batch,
color=self._color,
font_name=font_name,
font_size=font_size,
x=x[i], y=y[i],
anchor_x=anchor_x,
anchor_y=anchor_y)
self._labels.append(lab)
def convexhull(self, x, y, fill=False, smooth=False):
try:
from scipy.spatial import ConvexHull
from scipy.spatial.qhull import QhullError
except:
raise Exception('ConvexHull requires scipy')
if len(x) < 3:
raise Exception('convexhull requires at least 3 points')
points = np.vstack((x,y)).T
try:
hull = ConvexHull(points)
xhull = points[hull.vertices,0]
yhull = points[hull.vertices,1]
if smooth:
xhull, yhull = self.__generate_spline(xhull, yhull, closed=True)
if fill:
self.poly(xhull,yhull)
else:
self.linestrip(xhull, yhull, 3, closed=True)
except QhullError as qerr:
self.linestrip(x, y, 3, closed=False)
def __generate_spline(self, x, y, closed=False, steps=20):
"""
catmullrom spline
http://www.mvps.org/directx/articles/catmull/
"""
if closed:
x = x.tolist()
x.insert(0, x[-1])
x.append(x[1])
x.append(x[2])
y = y.tolist()
y.insert(0, y[-1])
y.append(y[1])
y.append(y[2])
points = np.vstack((x,y)).T
curve = []
if not closed:
curve.append(points[0])
for j in range(1, len(points)-2):
for s in range(steps):
t = 1. * s / steps
p0, p1, p2, p3 = points[j-1], points[j], points[j+1], points[j+2]
pnew = 0.5 *((2 * p1) + (-p0 + p2) * t + (2*p0 - 5*p1 + 4*p2 - p3) * t**2 + (-p0 + 3*p1- 3*p2 + p3) * t**3)
curve.append(pnew)
if not closed:
curve.append(points[-1])
curve = np.array(curve)
return curve[:, 0], curve[:, 1]
def spline(self, x, y, width=3):
xcurve, ycurve = self.__generate_spline(x, y, closed=False)
self.linestrip(xcurve, ycurve, width)
def batch_draw(self):
self._batch.draw()
class Projector():
def __init__(self):
self.tiles_horizontally = 1.*SCREEN_W / TILE_SIZE
self.tiles_vertically = 1.*SCREEN_H / TILE_SIZE
self.fit(BoundingBox.WORLD)
def set_to(self, north, west, zoom):
self.zoom = zoom
self.xtile, self.ytile = self.deg2num(north, west, zoom)
def fit(self, bbox, max_zoom=MAX_ZOOM, force_zoom=None):
"""
Fits the projector to a BoundingBox
:param bbox: BoundingBox
:param max_zoom: max zoom allowed
:param force_zoom: force this specific zoom value even if the whole bbox does not completely fit
"""
BUFFER_FACTOR = 1.1
if force_zoom is not None:
self.zoom = force_zoom
else:
for zoom in range(max_zoom, MIN_ZOOM-1, -1):
self.zoom = zoom
left, top = self.lonlat_to_screen([bbox.west], [bbox.north])
right, bottom = self.lonlat_to_screen([bbox.east], [bbox.south])
if (top - bottom < SCREEN_H*BUFFER_FACTOR) and (right - left < SCREEN_W*BUFFER_FACTOR):
break
west_tile, north_tile = self.deg2num(bbox.north, bbox.west, self.zoom)
east_tile, south_tile = self.deg2num(bbox.south, bbox.east, self.zoom)
self.xtile = west_tile - self.tiles_horizontally/2. + (east_tile - west_tile)/2
self.ytile = north_tile - self.tiles_vertically/2. + (south_tile - north_tile)/2
self.calculate_viewport_size()
@staticmethod
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = (lon_deg + 180.0) / 360.0 * n
ytile = (1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n
return (xtile, ytile)
@staticmethod
def num2deg(xtile, ytile, zoom):
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
def bbox(self):
north, west = self.num2deg(self.xtile, self.ytile, self.zoom)
south, east = self.num2deg(self.xtile + self.tiles_horizontally, self.ytile + self.tiles_vertically, self.zoom)
return BoundingBox(north=north, west=west, south=south, east=east)
def pan(self, deltax, deltay):
self.xtile += deltax
self.ytile -= deltay
def zoomin(self, mouse_x, mouse_y):
mouse_lat, mouse_lon = self.screen_to_latlon(mouse_x, mouse_y)
self.zoom = min(self.zoom + 1, MAX_ZOOM)
self.xtile, self.ytile = self.deg2num(mouse_lat, mouse_lon, self.zoom)
self.xtile -= 1. * mouse_x / TILE_SIZE
self.ytile -= 1. * mouse_y / TILE_SIZE
self.calculate_viewport_size()
def zoomout(self, mouse_x, mouse_y):
mouse_lat, mouse_lon = self.screen_to_latlon(mouse_x, mouse_y)
self.zoom = max(self.zoom - 1, MIN_ZOOM)
self.xtile, self.ytile = self.deg2num(mouse_lat, mouse_lon, self.zoom)
self.xtile -= 1. * mouse_x / TILE_SIZE
self.ytile -= 1. * mouse_y / TILE_SIZE
self.calculate_viewport_size()
def calculate_viewport_size(self):
lat1, lon1 = Projector.num2deg(self.xtile, self.ytile, self.zoom)
lat2, lon2 = Projector.num2deg(self.xtile + self.tiles_horizontally, self.ytile + self.tiles_vertically, self.zoom)
self.viewport_w = utils.haversine(lat1=lat1, lon1=lon1, lat2=lat1, lon2=lon2)
self.viewport_h = utils.haversine(lat1=lat1, lon1=lon1, lat2=lat2, lon2=lon1)
def lonlat_to_screen(self, lon, lat):
"""
Projects geodesic coordinates to screen
:param lon: longitude
:param lat: latitude
:return: x,y screen coordinates
"""
if type(lon) == list:
lon = np.array(lon)
if type(lat) == list:
lat = np.array(lat)
lat_rad = np.radians(lat)
n = 2.0 ** self.zoom
xtile = (lon + 180.0) / 360.0 * n
ytile = (1.0 - np.log(np.tan(lat_rad) + (1 / np.cos(lat_rad))) / math.pi) / 2.0 * n
x = (xtile * TILE_SIZE).astype(int)
y = (SCREEN_H - ytile * TILE_SIZE).astype(int)
return x, y
def screen_to_latlon(self, x, y):
"""
Return the latitude and longitude corresponding to a screen point
:param x: screen x
:param y: screen y
:return: latitude and longitude at x,y
"""
xtile = 1. * x / TILE_SIZE + self.xtile
ytile = 1. * y / TILE_SIZE + self.ytile
return self.num2deg(xtile, ytile, self.zoom)
class SetQueue(Queue):
def _init(self, maxsize):
self.queue = set()
def _put(self, item):
self.queue.add(item)
def _get(self):
return self.queue.pop()
class TileDownloaderThread(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
self.daemon = True
def run(self):
while True:
url, download_path = self.queue.get()
assert download_path.endswith('.png')
try:
# print "downloading %s as %s" % (url, download_path)
source = urllib2.urlopen(url)
content = source.read()
source.close()
destination = open(download_path,'wb')
destination.write(content)
destination.close()
except Exception as e:
print(url, e)
_GEOPLOTLIB_ATTRIBUTION = u'made with geoplotlib | '
_DEFAULT_TILE_PROVIDERS = {
'watercolor': { 'url': lambda zoom, xtile, ytile:
'http://%s.tile.stamen.com/watercolor/%d/%d/%d.png' % (random.choice(['a', 'b', 'c', 'd']), zoom, xtile, ytile),
'attribution': _GEOPLOTLIB_ATTRIBUTION + 'Map tiles by Stamen Design, under CC BY 3.0. Data by OpenStreetMap, under ODbL.'
},
'toner': { 'url': lambda zoom, xtile, ytile:
"http://%s.tile.stamen.com/toner/%d/%d/%d.png" % (random.choice(['a', 'b', 'c', 'd']), zoom, xtile, ytile),
'attribution': _GEOPLOTLIB_ATTRIBUTION + 'Map tiles by Stamen Design, under CC BY 3.0. Data by OpenStreetMap, under ODbL.'
},
'toner-lite': { 'url': lambda zoom, xtile, ytile:
"http://%s.tile.stamen.com/toner-lite/%d/%d/%d.png" % (random.choice(['a', 'b', 'c', 'd']), zoom, xtile, ytile),
'attribution': _GEOPLOTLIB_ATTRIBUTION + 'Map tiles by Stamen Design, under CC BY 3.0. Data by OpenStreetMap, under ODbL.'
},
'darkmatter': { 'url': lambda zoom, xtile, ytile:
'http://%s.basemaps.cartocdn.com/dark_all/%d/%d/%d.png' % (random.choice(['a', 'b', 'c']), zoom, xtile, ytile),
'attribution': _GEOPLOTLIB_ATTRIBUTION + u'© OpenStreetMap contributors © CartoDB'
},
'positron': { 'url': lambda zoom, xtile, ytile:
'http://%s.basemaps.cartocdn.com/light_all/%d/%d/%d.png' % (random.choice(['a', 'b', 'c']), zoom, xtile, ytile),
'attribution': _GEOPLOTLIB_ATTRIBUTION + u'© OpenStreetMap contributors © CartoDB'
}
}
class MapLayer():
def __init__(self, tiles_provider, skipdl=False):
if type(tiles_provider) == str:
if tiles_provider in _DEFAULT_TILE_PROVIDERS:
self.tiles_dir = tiles_provider
self.url_generator = _DEFAULT_TILE_PROVIDERS[tiles_provider]['url']
self.attribution = _DEFAULT_TILE_PROVIDERS[tiles_provider]['attribution']
else:
raise Exception('unknown style ' + tiles_provider)
else:
self.tiles_dir = tiles_provider['tiles_dir']
self.url_generator = tiles_provider['url']
self.attribution = tiles_provider['attribution']
self.skipdl = skipdl
self.tiles_cache = {}
self.download_queue = SetQueue()
self.download_threads = [TileDownloaderThread(self.download_queue) for i in range(2)]
for t in self.download_threads:
t.start()
def get_tile(self, zoom, xtile, ytile):
if xtile < 0 or ytile < 0 or xtile >= 2**zoom or ytile >= 2**zoom:
return None
tile_image = self.tiles_cache.get((zoom, xtile, ytile))
if tile_image is not None:
return tile_image
url = self.url_generator(zoom, xtile, ytile)
dir_path = expanduser('~') + '/geoplotlib_tiles/%s/%d/%d/' % (self.tiles_dir, zoom, xtile)
download_path = dir_path + '%d.png' % ytile
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if not os.path.isfile(download_path):
if self.skipdl:
return None
else:
self.download_queue.put((url, download_path))
else:
try:
tile_image = pyglet.image.load(download_path)
tile_image.blit(2*SCREEN_W, 2*SCREEN_H, 0) # blit offscreen to check if valid
self.tiles_cache[(zoom, xtile, ytile)] = pyglet.sprite.Sprite(tile_image)
return self.tiles_cache[(zoom, xtile, ytile)]
except Exception as exc:
print(exc)
assert download_path.endswith('.png')
os.unlink(download_path)
return None
def draw(self, proj):
for x in range(int(proj.xtile), int(proj.xtile + proj.tiles_horizontally + 1)):
for y in range(int(proj.ytile), int(proj.ytile + proj.tiles_vertically + 1)):
tilesurf = self.get_tile(proj.zoom, x, y)
if tilesurf is not None:
try:
tilesurf.x = int((x - proj.xtile)*TILE_SIZE)
tilesurf.y = int(SCREEN_H - (y - proj.ytile + 1)*TILE_SIZE)
tilesurf.draw()
except Exception as e:
print('exception blitting', x, y, proj.zoom, e)
| {
"content_hash": "91a1e9ef6b00884109250a6c0f78ff71",
"timestamp": "",
"source": "github",
"line_count": 895,
"max_line_length": 142,
"avg_line_length": 35.67039106145251,
"alnum_prop": 0.5317462803445575,
"repo_name": "andrea-cuttone/geoplotlib",
"id": "a52769c2b7d998ca41514a6c9a26bdc090968ac0",
"size": "31977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geoplotlib/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94433"
}
],
"symlink_target": ""
} |
import logging
from sn_agent.job.job_descriptor import JobDescriptor
from sn_agent.service_adapter import ServiceAdapterABC
logger = logging.getLogger(__name__)
class EntityExtracter(ServiceAdapterABC):
type_name = "EntityExtracter"
def perform(self, job: JobDescriptor):
item_count = 0
for job_item in job:
file_name = job[item_count]['output_url']
with open(file_name, 'w') as file:
file.write("entity:\n")
file.write(" pig\n")
file.write(" farmer\n")
file.write(" tractor\n")
file.write(" cornfield\n")
| {
"content_hash": "2f637f956291d9bad6c79cc14b0084bb",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 54,
"avg_line_length": 31,
"alnum_prop": 0.5852534562211982,
"repo_name": "singnet/singnet",
"id": "f864b8bfd89653b31492adcb6493b9b1371bd7e8",
"size": "823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agent/examples/multi_agent_adapter/entity_extracter/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46708"
},
{
"name": "JavaScript",
"bytes": "1346099"
},
{
"name": "Makefile",
"bytes": "635"
},
{
"name": "Python",
"bytes": "171956"
},
{
"name": "Shell",
"bytes": "7305"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 2, s, t 5.1, s, q"
tags = "ShakyTiles3D"
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
def main():
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
# In real code after a sequence of grid actions the StopGrid() action
# should be called. Omited here to stay in the last grid action render
main_scene.do( ShakyTiles3D( randrange=6, grid=(8,16), duration=5) )
director.run (main_scene)
if __name__ == '__main__':
main()
| {
"content_hash": "43fb62cee13ef2412e292ec66364aa17",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 25.75,
"alnum_prop": 0.6446601941747573,
"repo_name": "eevee/cocos2d-mirror",
"id": "b6099a5d951542e95f83900a35bb5f6fe85d4674",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_shakytiles3d.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1196228"
}
],
"symlink_target": ""
} |
"""
Functions to convert between NMR file formats
"""
import datetime
from warnings import warn
import numpy as np
from . import pipe
from . import varian
from . import bruker
from . import sparky
from . import rnmrtk
from . import fileiobase
class converter(object):
"""
Object which allows conversion between NMR file formats, including low
memory data objects.
Conversion between NMR file formats with this class involves three steps.
First a new converter object must be created. Then the converter must be
loaded with data using a ``from_`` method. Finally, the dictionary and
data representation of a NMR data in the desired format is extracted using
a ``to_`` method. This can then be written to disk.
Example conversion::
vdic, vdata = ng.varian.read("varian_dir")
C = ng.convert.converter()
C.from_varian(vdic, vdata)
pdic, pdata = C.to_pipe()
ng.pipe.write("test.fid", pdic, pdata)
Spectral parameters can be provided directly by passing a Universal
dictionary to any of the ``from_`` methods. If not provided the spectral
parameters are guessed from the file format's dictionary of parameters.
"""
def __init__(self):
"""
Create a converter object
"""
pass
# utility functions
def __returndata(self):
"""
Return data or emulated data after error checking
"""
# Error checking
if "_data" not in self.__dict__:
raise IOError("converter not loaded with data")
if "_udic" not in self.__dict__:
raise IOError("converter not loaded with dictionary")
if "_iproc" not in self.__dict__:
raise IOError("converter not loaded with processing parameters")
if "_oproc" not in self.__dict__:
raise IOError("converter not loaded with processing parameters")
if "_odtype" not in self.__dict__:
raise IOError("converter not loaded with output dtype")
# Warnings
if self._data.dtype.kind != np.dtype(self._odtype).kind:
warn("Incompatible dtypes, conversion not recommended")
# Return data
if isinstance(self._data, np.ndarray): # in memory data
return self.__procdata()
else: # return emulated data
iproc = self._iproc
oproc = self._oproc
odtype = self._odtype
order = self._data.order
return udata_nd(self._data, iproc, oproc, odtype, order)
def __procdata(self):
"""
Process data as indicated by flags
"""
# copy the data
data = np.copy(self._data)
# processing for input type
# sign alt. indirect dimension
if data.ndim >= 2 and "alt_id_sign" in self._iproc:
# data[1::2] = -data[1::2]
s = [slice(None, None, None)] * data.ndim
for i in range(data.ndim - 1):
s[i] = slice(1, None, 2)
data[s] = -data[s]
s[i] = slice(None, None, None)
if "realfactor" in self._iproc:
data.real = data.real * self._iproc['realfactor']
if "imagfactor" in self._iproc and np.iscomplexobj(data):
data.imag = data.imag * self._iproc['imagfactor']
# processing for output
# sign alt. indirect dimension
if data.ndim >= 2 and "alt_id_sign" in self._oproc:
s = [slice(None, None, None)] * data.ndim
for i in range(data.ndim - 1):
s[i] = slice(1, None, 2)
data[s] = -data[s]
s[i] = slice(None, None, None)
if "realfactor" in self._oproc:
data.real = data.real * self._oproc['realfactor']
if "imagfactor" in self._oproc and np.iscomplexobj(data):
data.imag = data.imag * self._oproc['imagfactor']
return data.astype(self._odtype)
# IMPORTERS (from_*)
def from_universal(self, dic, data):
"""
Load converter with Universal data.
Parameters
----------
dic : dict
Dictionary of universal parameters.
data : array_like
NMR data.
"""
# set data
self._data = data
self._iproc = {}
# set the dictionary
self._udic = dic
def from_varian(self, dic, data, udic=None):
"""
Load converter with Agilent/Varian data.
Parameters
----------
dic : dict
Dictionary of Agilent/Varian parameters.
data : array_like
NMR data.
udic : dict, optional
Universal dictionary, if not provided will be guesses from dic.
"""
# set data
self._data = data
if udic is not None and udic[0]['encoding'].lower() == "tppi":
self._iproc = {"imagfactor": -1.0}
else: # states, etc needs sign alt. of indirect dim.
self._iproc = {"alt_id_sign": True, "imagfactor": -1.0}
# set the universal dictionary
if udic is not None:
self._udic = udic
else:
self._udic = varian.guess_udic(dic, data)
def from_rnmrtk(self, dic, data, udic=None, agilent_compatible=False):
"""
Load converter with RNMRTK data.
Parameters
----------
dic : dict
Dictionary of RNMRTK parameters.
data : array_like
NMR data.
udic : dict, optional
Universal dictionary, if not provided will be guesses from dic.
agilent_compatible : bool, optional
True when RNMRTK data is being compared to Agilent/Varian data.
"""
# set data
self._data = data
# set input processing filters.
if agilent_compatible:
self._iproc = {"alt_id_sign": True, "imagfactor": -1.0}
else:
self._iproc = {}
# set the universal dictionary
if udic is not None:
self._udic = udic
else:
self._udic = rnmrtk.guess_udic(dic, data)
def from_pipe(self, dic, data, udic=None):
"""
Load converter with NMRPipe data.
Parameters
----------
dic : dict
Dictionary of NMRPipe parameters.
data : array_like
NMR data.
udic : dict, optional
Universal dictionary, if not provided will be guesses from dic.
"""
# set data
self._data = data
self._iproc = {}
# set the universal dictionary
if udic is not None:
self._udic = udic
else:
self._udic = pipe.guess_udic(dic, data)
def from_sparky(self, dic, data, udic=None):
"""
Load converter with Sparky data.
Parameters
----------
dic : dict
Dictionary of Sparky parameters.
data : array_like
NMR data.
udic : dict, optional
Universal dictionary, if not provided will be guesses from dic.
"""
# set data
self._data = data
self._iproc = {}
# set the universal dictionary
if udic is not None:
self._udic = udic
else:
self._udic = sparky.guess_udic(dic, data)
def from_bruker(self, dic, data, udic=None, remove_digital_filter=False):
"""
Load converter with Bruker data.
Parameters
----------
dic : dict
Dictionary of Bruker parameters.
data : array_like
NMR data.
udic : dict, optional
Universal dictionary, if not provided will be guesses from dic.
remove_digital_filter : bool, optional
True to remove the Bruker digital filter. Do not use this
option with low memory data or when the `udic` parameter is
specified. False leave the digital filter in place.
"""
# set data, possibly with removal of filter delay
if remove_digital_filter:
self._data = bruker.remove_digital_filter(dic, data)
else:
self._data = data
self._iproc = {}
# set the universal dictionary
if udic is not None:
self._udic = udic
else:
self._udic = bruker.guess_udic(dic, self._data)
# EXPORTERS (to_*)
def to_universal(self):
"""
Return Universal format data.
Returns
-------
dic : dict
Dictionary of Universal parameters.
data : array_like
NMR data in format as provided.
"""
# create dictionary
dic = dict(self._udic)
# add processing flags for output
self._oproc = {}
self._odtype = self._data.dtype
return dic, self.__returndata()
def to_pipe(self, datetimeobj=datetime.datetime.now()):
"""
Return NMRPipe format data.
Parameters
----------
datetime : datetime object, optional
Datetime object to include in the NMRPipe parameters. The current
date and time is used by default.
Returns
-------
dic : dict
Dictionary of NMRPipe parameters.
data : array_like
NMR data in NMRPipe format.
"""
# create dictionary
dic = pipe.create_dic(self._udic, datetimeobj)
# add processing flags for output
self._oproc = {}
if self._udic[self._udic["ndim"] - 1]["complex"]:
self._odtype = "complex64"
else:
self._odtype = "float32"
return dic, self.__returndata()
def to_rnmrtk(self, agilent_compatible=False, dim_order=None):
"""
Return RNMRTK format data.
Parameters
----------
agilent_compatible : bool, optional
True when RNMRTK data is being compared to Agilent/Varian data.
dim_order : list, optional
List mapping axis numbers in the universal dictionary to the to the
order in which they will appear in the RNMRTK dictionary. If None,
the default, [0, 1, 2, ...] will be used.
Returns
-------
dic : dict
Dictionary of RNMRTK parameters.
data : array_like
NMR data in RNMRTK format.
"""
# create dictionary
dic = rnmrtk.create_dic(self._udic)
# add processing flags for output
if agilent_compatible:
self._oproc = {"alt_id_sign": True, "imagfactor": -1.0}
else:
self._oproc = {}
if self._udic[self._udic["ndim"] - 1]["complex"]:
self._odtype = "complex64"
else:
self._odtype = "float32"
return dic, self.__returndata()
def to_varian(self):
"""
Return Agilent/Varian format data.
Returns
-------
dic : dict
Dictionary of Agilent/Varian parameters.
data : array_like
NMR data in Agilent/Varian format.
"""
# create dictionary
dic = varian.create_dic(self._udic)
# add processing flags for output
self._oproc = {"alt_id_sign": True, "imagfactor": -1.0}
self._odtype = "complex64"
return dic, self.__returndata()
def to_sparky(self, datetimeobj=datetime.datetime.now(), user='user'):
"""
Return Sparky format data.
Parameters
----------
datetime : datetime object, optional
Datetime object to include in the Sparky parameters. The current
date and time is used by default.
user : str, optional
Username to include in the Sparky parameters. 'user' is the
default.
Returns
-------
dic : dict
Dictionary of Sparky parameters.
data : array_like
NMR data in Sparky format.
"""
# create dictionary
dic = sparky.create_dic(self._udic, datetimeobj, user)
# add processing flags for output
self._oproc = {}
self._odtype = "float32"
return dic, self.__returndata()
def to_bruker(self):
"""
Return Bruker format data.
Returns
-------
dic : dict
Dictionary of Bruker parameters.
data : array_like
NMR data in Bruker format.
"""
# create dictionary
dic = bruker.create_dic(self._udic)
# add processing flags for output
self._oproc = {}
self._odtype = "complex128"
return dic, self.__returndata()
def to_csdm(self):
"""
Return a csdm object containing data.
Returns
-------
data : csdm object
CSDM object containing parameters and data
"""
try:
import csdmpy as cp
# self._oproc = {}
# self._odtype = str(self._data.dtype)
# create a list of dimension objects
dimensions = [
cp.LinearDimension(
count=value["size"],
increment=f'{1 / value["sw"]} s',
reciprocal={
"coordinates_offset": f'{value["car"]} Hz',
"origin_offset": f'{value["obs"]} MHz',
},
label=value["label"],
)
for key, value in list(self._udic.items())
if type(key) == int and value["size"] != 1
]
return cp.CSDM(
dimensions=dimensions[::-1],
dependent_variables=[cp.as_dependent_variable(self._data.copy())],
)
except:
raise ImportError("csdmpy must be installed to use this function. Please install by typing 'pip install csdmpy' in the terminal.")
class udata_nd(fileiobase.data_nd):
"""
Wrap other fileiobase.data_nd derived objects with input/output conversion
when slices are requested.
* slicing operations return ndarray objects.
* can iterate over with expected results.
* transpose and swapaxes methods create a new objects with correct axes
ordering.
* has ndim, shape, and dtype attributes.
Parameters
----------
edata : fileiobase.data_nd derived object
Data object to wrap.
iproc : dict
Dictionary of processing required by input format.
oproc :
Dictionary of processing required by output format.
odtype : dtype
Output dtype.
order : tuple
Axis ordering relative to input data.
Notes
-----
The iproc and oproc dictionary can contains the following keys and values.
=========== ========== ==========================================
key value Description
=========== ========== ==========================================
alt_id_sign True/False True alternates signs along indirect dims.
realfactor float Real channel scaling factor.
imagfactor float Imaginary channel scaling factor.
=========== ========== ==========================================
"""
def __init__(self, edata, iproc, oproc, odtype, order=None):
"""
create and set up
"""
# set converter attributes
self._iproc = iproc # input processing dictionary
self._oproc = oproc # output processing dictionary
self._odtype = odtype # output dtype
self.edata = edata # file
# required data_nd attributes
self.order = order
self.fshape = edata.fshape
self.dtype = odtype
self.__setdimandshape__() # set ndim and shape attributes
def __fcopy__(self, order):
"""
Create a copy
"""
n = udata_nd(self.edata, self._iproc, self._oproc, self._odtype, order)
return n
def __fgetitem__(self, slices):
"""
Return ndarray of selected values
slices is a well formateed n-tuple of slices
"""
data = self.edata.__fgetitem__(slices)
# input processing
if "alt_id_sign" in self._iproc: # sign alt. indirect dimension
if "alt_id_sign" not in self._oproc: # skip if in both
fslice = slices[:-1]
ffshape = self.fshape[:-1]
nd_iter = fileiobase.ndtofrom_iter(ffshape, fslice)
for out_index, in_index in nd_iter:
# negate the trace if there is an odd number of
# odd number indices in the slice
if np.mod(in_index, 2).sum() % 2 == 1:
data[out_index] = -data[out_index]
if "realfactor" in self._iproc:
data.real = data.real * self._iproc['realfactor']
if "imagfactor" in self._iproc:
data.imag = data.imag * self._iproc['imagfactor']
# output processing
if "alt_id_sign" in self._oproc:
if "alt_id_sign" not in self._iproc:
fslice = slices[:-1]
ffshape = self.fshape[:-1]
nd_iter = fileiobase.ndtofrom_iter(ffshape, fslice)
for out_index, in_index in nd_iter:
# negate the trace if there is an odd number of
# odd number indices in the slice
if np.mod(in_index, 2).sum() % 2 == 1:
data[out_index] = -data[out_index]
if "realfactor" in self._oproc:
data.real = data.real * self._oproc['realfactor']
if "imagfactor" in self._oproc:
data.imag = data.imag * self._oproc['imagfactor']
return data.astype(self._odtype)
| {
"content_hash": "d53b877f0b8e5e6ea595e6f34625f10e",
"timestamp": "",
"source": "github",
"line_count": 584,
"max_line_length": 142,
"avg_line_length": 30.65582191780822,
"alnum_prop": 0.5362229793889293,
"repo_name": "jjhelmus/nmrglue",
"id": "9e8188fd17273f2ae272d3ef6579b77891ed9199",
"size": "17903",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nmrglue/fileio/convert.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "978743"
},
{
"name": "Shell",
"bytes": "35758"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import adminsortable.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0014_auto_20170102_0508'),
]
operations = [
migrations.AlterField(
model_name='image',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='image',
name='project',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.Project'),
),
migrations.AlterField(
model_name='link',
name='category',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.LinkCategory'),
),
migrations.AlterField(
model_name='link',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='link',
name='project',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.Project'),
),
migrations.AlterField(
model_name='project',
name='category',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.ProjectCategory'),
),
migrations.AlterField(
model_name='project',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='projectcategory',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='video',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
migrations.AlterField(
model_name='video',
name='project',
field=adminsortable.fields.SortableForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.Project'),
),
]
| {
"content_hash": "2894441ce2b68c7cd78d1d4c4ff13d60",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 135,
"avg_line_length": 37.06153846153846,
"alnum_prop": 0.6093814860938148,
"repo_name": "Arlefreak/ApiArlefreak",
"id": "4a19f2f37f5f3faee70274b14593e80475d15b02",
"size": "2481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portfolio/migrations/0015_auto_20170519_1722.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64132"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CellCountInstance'
db.create_table('main_cellcountinstance', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('datetime_submitted', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('datetime_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('tissue_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('overall_comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['CellCountInstance'])
# Adding model 'BoneMarrowBackground'
db.create_table('main_bonemarrowbackground', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('trail_cellularity', self.gf('django.db.models.fields.CharField')(max_length=50)),
('particle_cellularity', self.gf('django.db.models.fields.CharField')(max_length=50)),
('particulate', self.gf('django.db.models.fields.CharField')(max_length=50)),
('haemodilution', self.gf('django.db.models.fields.CharField')(max_length=50)),
('site', self.gf('django.db.models.fields.CharField')(max_length=50)),
('ease_of_aspiration', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('main', ['BoneMarrowBackground'])
# Adding model 'CellType'
db.create_table('main_celltype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('readable_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('machine_name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['CellType'])
# Adding model 'CellCount'
db.create_table('main_cellcount', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CellCountInstance'])),
('cell', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CellType'])),
('normal_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('abnormal_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['CellCount'])
# Adding model 'ErythropoiesisFindings'
db.create_table('main_erythropoiesisfindings', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('no_dysplasia', self.gf('django.db.models.fields.BooleanField')(default=True)),
('nuclear_asynchrony', self.gf('django.db.models.fields.BooleanField')(default=False)),
('multinucleated_forms', self.gf('django.db.models.fields.BooleanField')(default=False)),
('ragged_haemoglobinisation', self.gf('django.db.models.fields.BooleanField')(default=False)),
('megaloblastic_change', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['ErythropoiesisFindings'])
# Adding model 'GranulopoiesisFindings'
db.create_table('main_granulopoiesisfindings', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('no_dysplasia', self.gf('django.db.models.fields.BooleanField')(default=True)),
('hypogranular', self.gf('django.db.models.fields.BooleanField')(default=False)),
('pelger', self.gf('django.db.models.fields.BooleanField')(default=False)),
('nuclear_atypia', self.gf('django.db.models.fields.BooleanField')(default=False)),
('dohle_bodies', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['GranulopoiesisFindings'])
# Adding model 'MegakaryocyteFeatures'
db.create_table('main_megakaryocytefeatures', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('relative_count', self.gf('django.db.models.fields.CharField')(max_length=50)),
('no_dysplasia', self.gf('django.db.models.fields.BooleanField')(default=True)),
('hypolobulated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('fragmented', self.gf('django.db.models.fields.BooleanField')(default=False)),
('micromegakaryocytes', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['MegakaryocyteFeatures'])
# Adding model 'IronStain'
db.create_table('main_ironstain', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cell_count_instance', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.CellCountInstance'], unique=True)),
('stain_performed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('iron_content', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('ringed_sideroblasts', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('main', ['IronStain'])
# Adding model 'CellImage'
db.create_table('main_cellimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')()),
('file', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('celltype', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.CellType'])),
))
db.send_create_signal('main', ['CellImage'])
# Adding model 'SimilarLookingGroup'
db.create_table('main_similarlookinggroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('main', ['SimilarLookingGroup'])
# Adding M2M table for field cell_image on 'SimilarLookingGroup'
db.create_table('main_similarlookinggroup_cell_image', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('similarlookinggroup', models.ForeignKey(orm['main.similarlookinggroup'], null=False)),
('cellimage', models.ForeignKey(orm['main.cellimage'], null=False))
))
db.create_unique('main_similarlookinggroup_cell_image', ['similarlookinggroup_id', 'cellimage_id'])
def backwards(self, orm):
# Deleting model 'CellCountInstance'
db.delete_table('main_cellcountinstance')
# Deleting model 'BoneMarrowBackground'
db.delete_table('main_bonemarrowbackground')
# Deleting model 'CellType'
db.delete_table('main_celltype')
# Deleting model 'CellCount'
db.delete_table('main_cellcount')
# Deleting model 'ErythropoiesisFindings'
db.delete_table('main_erythropoiesisfindings')
# Deleting model 'GranulopoiesisFindings'
db.delete_table('main_granulopoiesisfindings')
# Deleting model 'MegakaryocyteFeatures'
db.delete_table('main_megakaryocytefeatures')
# Deleting model 'IronStain'
db.delete_table('main_ironstain')
# Deleting model 'CellImage'
db.delete_table('main_cellimage')
# Deleting model 'SimilarLookingGroup'
db.delete_table('main_similarlookinggroup')
# Removing M2M table for field cell_image on 'SimilarLookingGroup'
db.delete_table('main_similarlookinggroup_cell_image')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.bonemarrowbackground': {
'Meta': {'object_name': 'BoneMarrowBackground'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'ease_of_aspiration': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'haemodilution': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'particle_cellularity': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'particulate': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'trail_cellularity': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'main.cellcount': {
'Meta': {'object_name': 'CellCount'},
'abnormal_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'cell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.CellType']"}),
'cell_count_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.CellCountInstance']"}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'normal_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'main.cellcountinstance': {
'Meta': {'object_name': 'CellCountInstance'},
'datetime_submitted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'overall_comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tissue_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'main.cellimage': {
'Meta': {'object_name': 'CellImage'},
'celltype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.CellType']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.celltype': {
'Meta': {'object_name': 'CellType'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'readable_name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'main.erythropoiesisfindings': {
'Meta': {'object_name': 'ErythropoiesisFindings'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'megaloblastic_change': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'multinucleated_forms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'no_dysplasia': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'nuclear_asynchrony': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ragged_haemoglobinisation': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.granulopoiesisfindings': {
'Meta': {'object_name': 'GranulopoiesisFindings'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dohle_bodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypogranular': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_dysplasia': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'nuclear_atypia': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pelger': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.ironstain': {
'Meta': {'object_name': 'IronStain'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iron_content': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ringed_sideroblasts': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'stain_performed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.megakaryocytefeatures': {
'Meta': {'object_name': 'MegakaryocyteFeatures'},
'cell_count_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.CellCountInstance']", 'unique': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fragmented': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypolobulated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'micromegakaryocytes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'no_dysplasia': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'relative_count': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'main.similarlookinggroup': {
'Meta': {'object_name': 'SimilarLookingGroup'},
'cell_image': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.CellImage']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['main'] | {
"content_hash": "1f85e1292d1d1516d60b1933e7f30c01",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 182,
"avg_line_length": 66.55102040816327,
"alnum_prop": 0.5951650822855975,
"repo_name": "haematologic/cellcountr",
"id": "0a9464569fed2d9b71b1d6a54ba7193d20a6a0b0",
"size": "19590",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cellcounter/main/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "22151"
},
{
"name": "Python",
"bytes": "107429"
}
],
"symlink_target": ""
} |
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../googletest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../googletest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
# pylint: disable=C6204
import gtest_test_utils
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| {
"content_hash": "bcb05801918c5fc9600a55c200e2101d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 26.358024691358025,
"alnum_prop": 0.7128805620608899,
"repo_name": "FALCONN-LIB/FALCONN",
"id": "b51300015a312d70a39a84486e43ba6a9118d9d1",
"size": "3664",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "external/googletest/googlemock/test/gmock_test_utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1897734"
},
{
"name": "C++",
"bytes": "432139"
},
{
"name": "Makefile",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "65975"
},
{
"name": "Shell",
"bytes": "1121"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/shield_generator/shared_shd_incom_rayshield_x1.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","shd_incom_rayshield_x1_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "9a05f1a456c3a82ef9cafae5f0086abf",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 103,
"avg_line_length": 27.23076923076923,
"alnum_prop": 0.7175141242937854,
"repo_name": "obi-two/Rebelion",
"id": "ee1ffd23f541f4e0de1130c0d97b66fe3f3723c3",
"size": "499",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/ship/components/shield_generator/shared_shd_incom_rayshield_x1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import yaml
import os
import logging
import html2text
class HugoPrinter:
def __init__(self, *args, **kwargs):
accept_attributes = ["meta", "categories", "tags", "posts", "drafts", "basedir"]
for attr in accept_attributes:
setattr(self, attr, None)
for k, v in kwargs.items():
if k in accept_attributes:
setattr(self, k, v)
if self.basedir:
self.basedir = os.path.join(self.basedir, "hugo")
else:
self.basedir = os.path.join('.', "hugo")
self.config_path = os.path.join(self.basedir, "config.yaml")
self.post_dir = os.path.join(self.basedir, "content/post")
self.__prepare_dir(self.basedir)
self.__prepare_dir(self.post_dir)
def __prepare_dir(self, dir_path):
if not os.path.exists(dir_path):
logging.warn("Directory %s not exists, creating it now..." % dir_path)
os.makedirs(dir_path)
elif not os.path.isdir(dir_path):
logging.critical("%s is existing and not a dir" % dir_path)
raise ValueError("%s is existing and not a dir" % dir_path)
def author(self):
try:
return self.meta["author"]["name"]
except KeyError:
return ""
def gen_config(self):
if self.meta is None:
return
conf = {
"baseurl": self.meta["baseurl"],
"title": self.meta["title"],
"languageCode": self.meta["language"],
"params": {
"Description": self.meta["description"],
"Author": self.meta["author"]["name"],
"AuthorEmail": self.meta["author"]["email"],
},
}
with open(self.config_path, "w") as fp:
fp.write(yaml.dump(conf, default_flow_style=False, explicit_start=True, allow_unicode=True))
def gen_posts(self, download_assets=False):
if self.posts is None:
return
for p in self.posts:
meta_info = {
"title": p["title"],
"author": p["creator"],
"categories": p["categories"],
"tags": p["tags"],
"date": 'T'.join(p["post_date"].split(' ')),
}
page_path = os.path.join(self.post_dir, "%s-%s.md" % (p["post_date"].split(" ")[0], p["post_name"]))
more_tag = '<!--more-->'
content = more_tag.join([self.__convert_to_markdown(data) for data in p["content"].split(more_tag)])
with open(page_path, "w") as fp:
fp.write(yaml.dump(meta_info, default_flow_style=False, explicit_start=True, allow_unicode=True))
fp.write("---\n")
fp.write(content)
def __convert_to_markdown(self, content):
if "<br" in content or '<p' in content:
return html2text.html2text(content).strip()
else:
return content.strip()
| {
"content_hash": "1b34c51cddd0abcf684ac539cd4a8f5c",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 113,
"avg_line_length": 32.369565217391305,
"alnum_prop": 0.525520483546004,
"repo_name": "hzmangel/wp2hugo",
"id": "918ea548e4e30701d785269c63ef2bce7d6728dc",
"size": "2978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hugo_printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7316"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
"""
Richtext definition
"""
from openpyxl.compat import unicode
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Typed,
Integer,
Set,
NoneSet,
Bool,
String,
Sequence,
)
from openpyxl.descriptors.nested import (
NestedBool,
NestedInteger,
NestedString,
NestedText,
)
from openpyxl.styles.fonts import Font
class PhoneticProperties(Serialisable):
tagname = "phoneticPr"
fontId = Integer()
type = NoneSet(values=(['halfwidthKatakana', 'fullwidthKatakana',
'Hiragana', 'noConversion']))
alignment = NoneSet(values=(['noControl', 'left', 'center', 'distributed']))
def __init__(self,
fontId=None,
type=None,
alignment=None,
):
self.fontId = fontId
self.type = type
self.alignment = alignment
class PhoneticText(Serialisable):
tagname = "rPh"
sb = Integer()
eb = Integer()
t = NestedText(expected_type=unicode)
text = Alias('t')
def __init__(self,
sb=None,
eb=None,
t=None,
):
self.sb = sb
self.eb = eb
self.t = t
class InlineFont(Font):
"""
Font for inline text because, yes what you need are different objects with the same elements but different constraints.
"""
tagname = "RPrElt"
rFont = NestedString(allow_none=True)
charset = Font.charset
family = Font.family
b =Font.b
i = Font.i
strike = Font.strike
outline = Font.outline
shadow = Font.shadow
condense = Font.condense
extend = Font.extend
color = Font.color
sz = Font.sz
u = Font.u
vertAlign = Font.vertAlign
scheme = Font.scheme
__elements__ = ('rFont', 'charset', 'family', 'b', 'i', 'strike',
'outline', 'shadow', 'condense', 'extend', 'color', 'sz', 'u',
'vertAlign', 'scheme')
def __init__(self,
rFont=None,
charset=None,
family=None,
b=None,
i=None,
strike=None,
outline=None,
shadow=None,
condense=None,
extend=None,
color=None,
sz=None,
u=None,
vertAlign=None,
scheme=None,
):
self.rFont = rFont
self.charset = charset
self.family = family
self.b = b
self.i = i
self.strike = strike
self.outline = outline
self.shadow = shadow
self.condense = condense
self.extend = extend
self.color = color
self.sz = sz
self.u = u
self.vertAlign = vertAlign
self.scheme = scheme
class RichText(Serialisable):
tagname = "RElt"
rPr = Typed(expected_type=InlineFont, allow_none=True)
font = Alias("rPr")
t = NestedText(expected_type=unicode, allow_none=True)
text = Alias("t")
__elements__ = ('rPr', 't')
def __init__(self,
rPr=None,
t=None,
):
self.rPr = rPr
self.t = t
class Text(Serialisable):
tagname = "text"
t = NestedText(allow_none=True, expected_type=unicode)
plain = Alias("t")
r = Sequence(expected_type=RichText, allow_none=True)
formatted = Alias("r")
rPh = Sequence(expected_type=PhoneticText, allow_none=True)
phonetic = Alias("rPh")
phoneticPr = Typed(expected_type=PhoneticProperties, allow_none=True)
PhoneticProperties = Alias("phoneticPr")
__elements__ = ('t', 'r', 'rPh', 'phoneticPr')
def __init__(self,
t=None,
r=(),
rPh=(),
phoneticPr=None,
):
self.t = t
self.r = r
self.rPh = rPh
self.phoneticPr = phoneticPr
@property
def content(self):
"""
Text stripped of all formatting
"""
snippets = []
if self.plain is not None:
snippets.append(self.plain)
for block in self.formatted:
if block.t is not None:
snippets.append(block.t)
return u"".join(snippets)
| {
"content_hash": "6ae1ac20c3b0d3cc9e568d33629f4e9e",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 123,
"avg_line_length": 23.946236559139784,
"alnum_prop": 0.5273911091154019,
"repo_name": "kawamon/hue",
"id": "b3abf99d18c10223f864783b886cf09600ed1547",
"size": "4454",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/openpyxl-2.6.4/openpyxl/cell/text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150828_1427'),
]
operations = [
migrations.CreateModel(
name='LoginCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(unique=True, max_length=20, verbose_name='code')),
('email', models.EmailField(max_length=63, verbose_name='email address')),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| {
"content_hash": "269de330c184fa50fd5e6c0747ae0be4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 114,
"avg_line_length": 32.13636363636363,
"alnum_prop": 0.5841584158415841,
"repo_name": "slava-sh/messenger",
"id": "16ff429a81608ccebbf46f4229806d10e0fc515c",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/accounts/migrations/0003_logincode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3710"
},
{
"name": "HTML",
"bytes": "321"
},
{
"name": "JavaScript",
"bytes": "57843"
},
{
"name": "Makefile",
"bytes": "745"
},
{
"name": "Nginx",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "32572"
},
{
"name": "Shell",
"bytes": "446"
}
],
"symlink_target": ""
} |
'''
Chosen plaintext attack
'''
import sys
import os
#Adding directory to the path where Python searches for modules
cmd_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/Crypto/modules/')
sys.path.insert(0, cmd_folder)
#Importing common crypto module
import block
if __name__ == "__main__":
plaintext= 'a' * 1000
block.break_ecb(plaintext)
str= ''
answer= ''
for count in range(143,0,-1):
encrypted_all_ascii= {}
encrypted_short= block.break_ecb(plaintext[0:count])
for asc in range(0,256):
t1= plaintext[0:count]+str+chr(asc)
encrypted_all_ascii[chr(asc)]= block.break_ecb(t1)
for key,value in encrypted_all_ascii.items():
if value[0:144] == encrypted_short[0:144]:
str += key
else:
continue
answer= ''.join(str)
print answer[:-1]
| {
"content_hash": "2e109872e7897e24ff56c65437f07cdb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 89,
"avg_line_length": 27.96875,
"alnum_prop": 0.6067039106145251,
"repo_name": "arvinddoraiswamy/blahblah",
"id": "910581fc1d1f7d02fc540152b2258e9704acfaa8",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cryptopals/Set2/c12.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4477"
},
{
"name": "Python",
"bytes": "149921"
},
{
"name": "Ruby",
"bytes": "2455"
},
{
"name": "Shell",
"bytes": "20492"
}
],
"symlink_target": ""
} |
from .canque import Submission # NOQA
| {
"content_hash": "3825403f8e786f62a302b340f0624515",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 38,
"avg_line_length": 39,
"alnum_prop": 0.7692307692307693,
"repo_name": "jonathansick/canque",
"id": "cf29ca9844e7b40ec4d098e3fd77f226a30cc946",
"size": "39",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canque/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1678"
}
],
"symlink_target": ""
} |
"""
Volume driver for NetApp NFS storage.
"""
import os
import uuid
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
@interface.volumedriver
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
data_motion.DataMotionMixin):
"""NetApp NFS driver for Data ONTAP (Cluster-mode)."""
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
def __init__(self, *args, **kwargs):
super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs)
self.driver_name = 'NetApp_NFS_Cluster_direct'
self.driver_mode = 'cluster'
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
self.failed_over_backend_name = kwargs.get('active_backend_id')
self.failed_over = self.failed_over_backend_name is not None
self.replication_enabled = (
True if self.get_replication_backend_names(
self.configuration) else False)
def do_setup(self, context):
"""Do the customized set up on client for cluster mode."""
super(NetAppCmodeNfsDriver, self).do_setup(context)
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
# cDOT API client
self.zapi_client = dot_utils.get_client_for_backend(
self.failed_over_backend_name or self.backend_name)
self.vserver = self.zapi_client.vserver
# Storage service catalog
self.ssc_library = capabilities.CapabilitiesLibrary(
'nfs', self.vserver, self.zapi_client, self.configuration)
self.ssc_library.check_api_permissions()
self.using_cluster_credentials = (
self.ssc_library.cluster_user_supported())
# Performance monitoring library
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
def _update_zapi_client(self, backend_name):
"""Set cDOT API client for the specified config backend stanza name."""
self.zapi_client = dot_utils.get_client_for_backend(backend_name)
self.vserver = self.zapi_client.vserver
self.ssc_library._update_for_failover(self.zapi_client,
self._get_flexvol_to_pool_map())
ssc = self.ssc_library.get_ssc()
self.perf_library._update_for_failover(self.zapi_client, ssc)
@utils.trace_method
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
self._add_looping_tasks()
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
# Note(cknight): Run the update once in the current thread to prevent a
# race with the first invocation of _update_volume_stats.
self._update_ssc()
# Add the task that updates the slow-changing storage service catalog
self.loopingcalls.add_task(self._update_ssc,
loopingcalls.ONE_HOUR,
loopingcalls.ONE_HOUR)
# Add the task that runs other housekeeping tasks, such as deletion
# of previously soft-deleted storage artifacts.
self.loopingcalls.add_task(
self._handle_housekeeping_tasks,
loopingcalls.TEN_MINUTES,
0)
super(NetAppCmodeNfsDriver, self)._add_looping_tasks()
def _handle_ems_logging(self):
"""Log autosupport messages."""
base_ems_message = dot_utils.build_ems_log_message_0(
self.driver_name, self.app_version)
self.zapi_client.send_ems_log_message(base_ems_message)
pool_ems_message = dot_utils.build_ems_log_message_1(
self.driver_name, self.app_version, self.vserver,
self._get_backing_flexvol_names(), [])
self.zapi_client.send_ems_log_message(pool_ems_message)
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
active_backend = self.failed_over_backend_name or self.backend_name
# Add the task that harvests soft-deleted QoS policy groups.
if self.using_cluster_credentials:
self.zapi_client.remove_unused_qos_policy_groups()
LOG.debug("Current service state: Replication enabled: %("
"replication)s. Failed-Over: %(failed)s. Active Backend "
"ID: %(active)s",
{
'replication': self.replication_enabled,
'failed': self.failed_over,
'active': active_backend,
})
# Create pool mirrors if whole-backend replication configured
if self.replication_enabled and not self.failed_over:
self.ensure_snapmirrors(
self.configuration, self.backend_name,
self.ssc_library.get_ssc_flexvol_names())
def _do_qos_for_volume(self, volume, extra_specs, cleanup=True):
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume, extra_specs)
self.zapi_client.provision_qos_policy_group(qos_policy_group_info)
self._set_qos_policy_group_on_volume(volume, qos_policy_group_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Setting QoS for %s failed", volume['id'])
if cleanup:
LOG.debug("Cleaning volume %s", volume['id'])
self._cleanup_volume_on_failure(volume)
def _get_volume_model_update(self, volume):
"""Provide model updates for a volume being created."""
if self.replication_enabled:
return {'replication_status': fields.ReplicationStatus.ENABLED}
def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info):
if qos_policy_group_info is None:
return
qos_policy_group_name = na_utils.get_qos_policy_group_name_from_info(
qos_policy_group_info)
if qos_policy_group_name is None:
return
target_path = '%s' % (volume['name'])
share = volume_utils.extract_host(volume['host'], level='pool')
export_path = share.split(':')[1]
flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver,
export_path)
self.zapi_client.file_assign_qos(flex_vol_name,
qos_policy_group_name,
target_path)
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False,
source_snapshot=None):
"""Clone backing file for Cinder volume."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
vserver, is_snapshot=is_snapshot)
def _get_vserver_and_exp_vol(self, volume_id=None, share=None):
"""Gets the vserver and export volume for share."""
(host_ip, export_path) = self._get_export_ip_path(volume_id, share)
ifs = self.zapi_client.get_if_info_by_ip(host_ip)
vserver = ifs[0].get_child_content('vserver')
exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver,
export_path)
return vserver, exp_volume
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
LOG.debug('Updating volume stats')
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.driver_name
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'nfs'
data['pools'] = self._get_pool_stats(
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function())
data['sparse_copy_volume'] = True
# Used for service state report
data['replication_enabled'] = self.replication_enabled
self._spawn_clean_cache_job()
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (Data ONTAP flexvol) stats.
Pool statistics are assembled from static driver capabilities, the
Storage Service Catalog of flexvol attributes, and real-time capacity
and controller utilization metrics. The pool name is the NFS share
path.
"""
pools = []
ssc = self.ssc_library.get_ssc()
if not ssc:
return pools
# Utilization and performance metrics require cluster-scoped
# credentials
if self.using_cluster_credentials:
# Get up-to-date node utilization metrics just once
self.perf_library.update_performance_cache(ssc)
# Get up-to-date aggregate capacities just once
aggregates = self.ssc_library.get_ssc_aggregates()
aggr_capacities = self.zapi_client.get_aggregate_capacities(
aggregates)
else:
aggr_capacities = {}
for ssc_vol_name, ssc_vol_info in ssc.items():
pool = dict()
# Add storage service catalog data
pool.update(ssc_vol_info)
# Add driver capabilities and config info
pool['QoS_support'] = self.using_cluster_credentials
pool['consistencygroup_support'] = True
pool['consistent_group_snapshot_enabled'] = True
pool['multiattach'] = False
# Add up-to-date capacity info
nfs_share = ssc_vol_info['pool_name']
capacity = self._get_share_capacity_info(nfs_share)
pool.update(capacity)
if self.using_cluster_credentials:
dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent(
ssc_vol_name)
else:
dedupe_used = 0.0
pool['netapp_dedupe_used_percent'] = na_utils.round_down(
dedupe_used)
aggregate_name = ssc_vol_info.get('netapp_aggregate')
aggr_capacity = aggr_capacities.get(aggregate_name, {})
pool['netapp_aggregate_used_percent'] = aggr_capacity.get(
'percent-used', 0)
# Add utilization data
utilization = self.perf_library.get_node_utilization_for_pool(
ssc_vol_name)
pool['utilization'] = na_utils.round_down(utilization)
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
# Add replication capabilities/stats
pool.update(
self.get_replication_backend_stats(self.configuration))
pools.append(pool)
return pools
def _update_ssc(self):
"""Refresh the storage service catalog with the latest set of pools."""
self._ensure_shares_mounted()
self.ssc_library.update_ssc(self._get_flexvol_to_pool_map())
def _get_flexvol_to_pool_map(self):
"""Get the flexvols that back all mounted shares.
The map is of the format suitable for seeding the storage service
catalog: {<flexvol_name> : {'pool_name': <share_path>}}
"""
pools = {}
vserver_addresses = self.zapi_client.get_operational_lif_addresses()
for share in self._mounted_shares:
host = share.split(':')[0]
junction_path = share.split(':')[1]
address = na_utils.resolve_hostname(host)
if address not in vserver_addresses:
LOG.warning('Address not found for NFS share %s.', share)
continue
try:
flexvol = self.zapi_client.get_flexvol(
flexvol_path=junction_path)
pools[flexvol['name']] = {'pool_name': share}
except exception.VolumeBackendAPIException:
LOG.exception('Flexvol not found for NFS share %s.', share)
return pools
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(vserver, exp_volume) = self._get_vserver_and_exp_vol(
volume_id=None, share=share)
for old_file in old_files:
path = '/vol/%s/%s' % (exp_volume, old_file)
u_bytes = self.zapi_client.get_file_usage(path, vserver)
file_list.append((old_file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
ip_vserver = self._get_vserver_for_ip(ip)
if ip_vserver and shares:
for share in shares:
ip_sh = share.split(':')[0]
sh_vserver = self._get_vserver_for_ip(ip_sh)
if sh_vserver == ip_vserver:
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _get_vserver_for_ip(self, ip):
"""Get vserver for the mentioned ip."""
try:
ifs = self.zapi_client.get_if_info_by_ip(ip)
vserver = ifs[0].get_child_content('vserver')
return vserver
except Exception:
return None
def _is_share_clone_compatible(self, volume, share):
"""Checks if share is compatible with volume to host its clone."""
flexvol_name = self._get_flexvol_name_for_share(share)
return self._is_share_vol_type_match(volume, share, flexvol_name)
def _is_share_vol_type_match(self, volume, share, flexvol_name):
"""Checks if share matches volume type."""
LOG.debug("Found volume %(vol)s for share %(share)s.",
{'vol': flexvol_name, 'share': share})
extra_specs = na_utils.get_volume_extra_specs(volume)
flexvol_names = self.ssc_library.get_matching_flexvols_for_extra_specs(
extra_specs)
return flexvol_name in flexvol_names
def _get_flexvol_name_for_share(self, nfs_share):
"""Queries the SSC for the flexvol containing an NFS share."""
ssc = self.ssc_library.get_ssc()
for ssc_vol_name, ssc_vol_info in ssc.items():
if nfs_share == ssc_vol_info.get('pool_name'):
return ssc_vol_name
return None
@utils.trace_method
def delete_volume(self, volume):
"""Deletes a logical volume."""
self._delete_backing_file_for_volume(volume)
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume)
self.zapi_client.mark_qos_policy_group_for_deletion(
qos_policy_group_info)
except Exception:
# Don't blow up here if something went wrong de-provisioning the
# QoS policy for the volume.
pass
def _delete_backing_file_for_volume(self, volume):
"""Deletes file on nfs share that backs a cinder volume."""
try:
LOG.debug('Deleting backing file for volume %s.', volume['id'])
self._delete_file(volume['id'], volume['name'])
except Exception:
LOG.exception('Could not delete volume %s on backend, '
'falling back to exec of "rm" command.',
volume['id'])
try:
super(NetAppCmodeNfsDriver, self).delete_volume(volume)
except Exception:
LOG.exception('Exec of "rm" command on backing file for '
'%s was unsuccessful.', volume['id'])
def _delete_file(self, file_id, file_name):
(_vserver, flexvol) = self._get_export_ip_path(volume_id=file_id)
path_on_backend = '/vol' + flexvol + '/' + file_name
LOG.debug('Attempting to delete file %(path)s for ID %(file_id)s on '
'backend.', {'path': path_on_backend, 'file_id': file_id})
self.zapi_client.delete_file(path_on_backend)
@utils.trace_method
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self._delete_backing_file_for_snapshot(snapshot)
def _delete_backing_file_for_snapshot(self, snapshot):
"""Deletes file on nfs share that backs a cinder volume."""
try:
LOG.debug('Deleting backing file for snapshot %s.', snapshot['id'])
self._delete_file(snapshot['volume_id'], snapshot['name'])
except Exception:
LOG.exception('Could not delete snapshot %s on backend, '
'falling back to exec of "rm" command.',
snapshot['id'])
try:
# delete_file_from_share
super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot)
except Exception:
LOG.exception('Exec of "rm" command on backing file for'
' %s was unsuccessful.', snapshot['id'])
def _get_ip_verify_on_cluster(self, host):
"""Verifies if host on same cluster and returns ip."""
ip = na_utils.resolve_hostname(host)
vserver = self._get_vserver_for_ip(ip)
if not vserver:
raise exception.NotFound(_("Unable to locate an SVM that is "
"managing the IP address '%s'") % ip)
return ip
def _copy_from_cache(self, volume, image_id, cache_result):
"""Try copying image file_name from cached file_name."""
copied = False
cache_copy, found_local = self._find_image_location(cache_result,
volume)
try:
if found_local:
LOG.debug("Trying copy from cache using cloning.")
(nfs_share, file_name) = cache_copy
self._clone_file_dst_exists(
nfs_share, file_name, volume['name'], dest_exists=True)
LOG.debug("Copied image from cache to volume %s using "
"cloning.", volume['id'])
copied = True
elif (cache_copy and
self.configuration.netapp_copyoffload_tool_path):
LOG.debug("Trying copy from cache using copy offload.")
self._copy_from_remote_cache(volume, image_id, cache_copy)
copied = True
except Exception:
LOG.exception('Error in workflow copy from cache.')
return copied
def _find_image_location(self, cache_result, volume):
"""Finds the location of a cached image.
Returns image location local to the NFS share, that matches the
volume_id, if it exists. Otherwise returns the last entry in
cache_result or None if cache_result is empty.
"""
found_local_copy = False
cache_copy = None
provider_location = volume_utils.extract_host(volume['host'],
level='pool')
for res in cache_result:
(share, file_name) = res
if share == provider_location:
cache_copy = res
found_local_copy = True
break
else:
cache_copy = res
return cache_copy, found_local_copy
def _copy_from_remote_cache(self, volume, image_id, cache_copy):
"""Copies the remote cached image to the provided volume.
Executes the copy offload binary which copies the cached image to
the destination path of the provided volume. Also registers the new
copy of the image as a cached image.
"""
(nfs_share, file_name) = cache_copy
col_path = self.configuration.netapp_copyoffload_tool_path
src_ip, src_path = self._get_source_ip_and_path(nfs_share, file_name)
dest_ip, dest_path = self._get_destination_ip_and_path(volume)
# Always run copy offload as regular user, it's sufficient
# and rootwrap doesn't allow copy offload to run as root anyways.
self._execute(col_path, src_ip, dest_ip, src_path, dest_path,
run_as_root=False, check_exit_code=0)
self._register_image_in_cache(volume, image_id)
LOG.debug("Copied image from cache to volume %s using copy offload.",
volume['id'])
def _get_source_ip_and_path(self, nfs_share, file_name):
src_ip = self._get_ip_verify_on_cluster(nfs_share.split(':')[0])
src_path = os.path.join(nfs_share.split(':')[1], file_name)
return src_ip, src_path
def _get_destination_ip_and_path(self, volume):
share = volume_utils.extract_host(volume['host'], level='pool')
share_ip_and_path = share.split(":")
dest_ip = self._get_ip_verify_on_cluster(share_ip_and_path[0])
dest_path = os.path.join(share_ip_and_path[1], volume['name'])
return dest_ip, dest_path
def _clone_file_dst_exists(self, share, src_name, dst_name,
dest_exists=False):
"""Clone file even if dest exists."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share)
self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver,
dest_exists=dest_exists)
def _copy_from_img_service(self, context, volume, image_service,
image_id):
"""Copies from the image service using copy offload."""
LOG.debug("Trying copy from image service using copy offload.")
image_loc = image_service.get_location(context, image_id)
locations = self._construct_image_nfs_url(image_loc)
src_ip = None
selected_loc = None
cloned = False
# this will match the first location that has a valid IP on cluster
for location in locations:
conn, dr = self._check_get_nfs_path_segs(location)
if conn:
try:
src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
selected_loc = location
break
except exception.NotFound:
pass
if src_ip is None:
raise exception.NotFound(_("Source host details not found."))
(__, ___, img_file) = selected_loc.rpartition('/')
src_path = os.path.join(dr, img_file)
dst_ip, vol_path = self._get_destination_ip_and_path(volume)
share_path = vol_path.rsplit("/", 1)[0]
dst_share = dst_ip + ':' + share_path
# tmp file is required to deal with img formats
tmp_img_file = six.text_type(uuid.uuid4())
col_path = self.configuration.netapp_copyoffload_tool_path
img_info = image_service.show(context, image_id)
self._check_share_can_hold_size(dst_share, img_info['size'])
run_as_root = self._execute_as_root
dst_dir = self._get_mount_point_for_share(dst_share)
dst_img_local = os.path.join(dst_dir, tmp_img_file)
try:
dst_img_serv_path = os.path.join(
share_path, tmp_img_file)
# Always run copy offload as regular user, it's sufficient
# and rootwrap doesn't allow copy offload to run as root
# anyways.
self._execute(col_path, src_ip, dst_ip, src_path,
dst_img_serv_path, run_as_root=False,
check_exit_code=0)
self._discover_file_till_timeout(dst_img_local, timeout=120)
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.',
{'img': image_id, 'tmp': tmp_img_file})
dst_img_cache_local = os.path.join(dst_dir,
'img-cache-%s' % image_id)
if img_info['disk_format'] == 'raw':
LOG.debug('Image is raw %s.', image_id)
self._clone_file_dst_exists(dst_share, tmp_img_file,
volume['name'], dest_exists=True)
self._move_nfs_file(dst_img_local, dst_img_cache_local)
LOG.debug('Copied raw image %(img)s to volume %(vol)s.',
{'img': image_id, 'vol': volume['id']})
else:
LOG.debug('Image will be converted to raw %s.', image_id)
img_conv = six.text_type(uuid.uuid4())
dst_img_conv_local = os.path.join(dst_dir, img_conv)
# Checking against image size which is approximate check
self._check_share_can_hold_size(dst_share, img_info['size'])
try:
image_utils.convert_image(dst_img_local,
dst_img_conv_local, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst_img_conv_local,
run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but format is now %s.")
% data.file_format)
else:
self._clone_file_dst_exists(dst_share, img_conv,
volume['name'],
dest_exists=True)
self._move_nfs_file(dst_img_conv_local,
dst_img_cache_local)
LOG.debug('Copied locally converted raw image'
' %(img)s to volume %(vol)s.',
{'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
self._delete_file_at_path(dst_img_conv_local)
cloned = True
finally:
if os.path.exists(dst_img_local):
self._delete_file_at_path(dst_img_local)
return cloned
@utils.trace_method
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. A log entry
will be made to notify the Admin that the volume is no longer being
managed.
:param volume: Cinder volume to unmanage
"""
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume)
self.zapi_client.mark_qos_policy_group_for_deletion(
qos_policy_group_info)
except Exception:
# Unmanage even if there was a problem deprovisioning the
# associated qos policy group.
pass
super(NetAppCmodeNfsDriver, self).unmanage(volume)
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover a backend to a secondary replication target."""
return self._failover_host(volumes, secondary_id=secondary_id)
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
return self.ssc_library.get_ssc().keys()
def _get_flexvol_names_from_hosts(self, hosts):
"""Returns a set of flexvol names."""
flexvols = set()
ssc = self.ssc_library.get_ssc()
for host in hosts:
pool_name = volume_utils.extract_host(host, level='pool')
for flexvol_name, ssc_volume_data in ssc.items():
if ssc_volume_data['pool_name'] == pool_name:
flexvols.add(flexvol_name)
return flexvols
@utils.trace_method
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Delete files backing each snapshot in the group snapshot.
:return: An implicit update of snapshot models that the manager will
interpret and subsequently set the model state to deleted.
"""
for snapshot in snapshots:
self._delete_backing_file_for_snapshot(snapshot)
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None
@utils.trace_method
def create_group(self, context, group):
"""Driver entry point for creating a generic volume group.
ONTAP does not maintain an actual group construct. As a result, no
communtication to the backend is necessary for generic volume group
creation.
:returns: Hard-coded model update for generic volume group model.
"""
model_update = {'status': fields.GroupStatus.AVAILABLE}
return model_update
@utils.trace_method
def delete_group(self, context, group, volumes):
"""Driver entry point for deleting a generic volume group.
:returns: Updated group model and list of volume models for the volumes
that were deleted.
"""
model_update = {'status': fields.GroupStatus.DELETED}
volumes_model_update = []
for volume in volumes:
try:
self._delete_file(volume['id'], volume['name'])
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except Exception:
volumes_model_update.append(
{'id': volume['id'],
'status': fields.GroupStatus.ERROR_DELETING})
LOG.exception("Volume %(vol)s in the group could not be "
"deleted.", {'vol': volume})
return model_update, volumes_model_update
@utils.trace_method
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
"""Driver entry point for updating a generic volume group.
Since no actual group construct is ever created in ONTAP, it is not
necessary to update any metadata on the backend. Since this is a NO-OP,
there is guaranteed to be no change in any of the volumes' statuses.
"""
return None, None, None
@utils.trace_method
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a Cinder group snapshot object.
The Cinder group snapshot object is created by making use of an ONTAP
consistency group snapshot in order to provide write-order consistency
for a set of flexvols snapshots. First, a list of the flexvols backing
the given Cinder group must be gathered. An ONTAP group-snapshot of
these flexvols will create a snapshot copy of all the Cinder volumes in
the generic volume group. For each Cinder volume in the group, it is
then necessary to clone its backing file from the ONTAP cg-snapshot.
The naming convention used to for the clones is what indicates the
clone's role as a Cinder snapshot and its inclusion in a Cinder group.
The ONTAP cg-snapshot of the flexvols is deleted after the cloning
operation is completed.
:returns: An implicit update for the group snapshot and snapshot models
that is then used by the manager to set the models to
available.
"""
try:
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
self._create_consistent_group_snapshot(group_snapshot,
snapshots)
else:
for snapshot in snapshots:
self._clone_backing_file_for_volume(
snapshot['volume_name'], snapshot['name'],
snapshot['volume_id'], is_snapshot=True)
except Exception as ex:
err_msg = (_("Create group snapshot failed (%s).") % ex)
LOG.exception(err_msg, resource=group_snapshot)
raise exception.NetAppDriverException(err_msg)
return None, None
def _create_consistent_group_snapshot(self, group_snapshot, snapshots):
hosts = [snapshot['volume']['host'] for snapshot in snapshots]
flexvols = self._get_flexvol_names_from_hosts(hosts)
# Create snapshot for backing flexvol
self.zapi_client.create_cg_snapshot(flexvols, group_snapshot['id'])
# Start clone process for snapshot files
for snapshot in snapshots:
self._clone_backing_file_for_volume(
snapshot['volume']['name'], snapshot['name'],
snapshot['volume']['id'], source_snapshot=group_snapshot['id'])
# Delete backing flexvol snapshots
for flexvol_name in flexvols:
try:
self.zapi_client.wait_for_busy_snapshot(
flexvol_name, group_snapshot['id'])
self.zapi_client.delete_snapshot(
flexvol_name, group_snapshot['id'])
except exception.SnapshotIsBusy:
self.zapi_client.mark_snapshot_for_deletion(
flexvol_name, group_snapshot['id'])
@utils.trace_method
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, sorted_snapshots=None,
source_group=None, sorted_source_vols=None):
"""Creates a group from a group snapshot or a group of cinder vols.
:returns: An implicit update for the volumes model that is
interpreted by the manager as a successful operation.
"""
LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes]))
model_update = None
volumes_model_update = []
if group_snapshot:
vols = zip(volumes, sorted_snapshots)
for volume, snapshot in vols:
update = self.create_volume_from_snapshot(
volume, snapshot)
update['id'] = volume['id']
volumes_model_update.append(update)
elif source_group and sorted_source_vols:
hosts = [source_vol['host'] for source_vol in sorted_source_vols]
flexvols = self._get_flexvol_names_from_hosts(hosts)
# Create snapshot for backing flexvol
snapshot_name = 'snapshot-temp-' + source_group['id']
self.zapi_client.create_cg_snapshot(flexvols, snapshot_name)
# Start clone process for new volumes
vols = zip(volumes, sorted_source_vols)
for volume, source_vol in vols:
self._clone_backing_file_for_volume(
source_vol['name'], volume['name'],
source_vol['id'], source_snapshot=snapshot_name)
volume_model_update = (
self._get_volume_model_update(volume) or {})
volume_model_update.update({
'id': volume['id'],
'provider_location': source_vol['provider_location'],
})
volumes_model_update.append(volume_model_update)
# Delete backing flexvol snapshots
for flexvol_name in flexvols:
self.zapi_client.wait_for_busy_snapshot(
flexvol_name, snapshot_name)
self.zapi_client.delete_snapshot(flexvol_name, snapshot_name)
else:
LOG.error("Unexpected set of parameters received when "
"creating group from source.")
model_update = {'status': fields.GroupStatus.ERROR}
return model_update, volumes_model_update
| {
"content_hash": "355e155c6565c3061b22a9d58aea50a5",
"timestamp": "",
"source": "github",
"line_count": 860,
"max_line_length": 79,
"avg_line_length": 43.30232558139535,
"alnum_prop": 0.5797261009667025,
"repo_name": "j-griffith/cinder",
"id": "fd1d1c6a61a132350f8569a30fc286349c88e33c",
"size": "38215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/netapp/dataontap/nfs_cmode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20155959"
},
{
"name": "Shell",
"bytes": "16354"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.