text stringlengths 4 1.02M | meta dict |
|---|---|
""" The instance type extra specs extension"""
from webob import exc
from nova import db
from nova import quota
from nova.api.openstack import extensions
from nova.api.openstack import faults
from nova.api.openstack import wsgi
class FlavorExtraSpecsController(object):
""" The flavor extra specs API controller for the Openstack API """
def _get_extra_specs(self, context, flavor_id):
extra_specs = db.api.instance_type_extra_specs_get(context, flavor_id)
specs_dict = {}
for key, value in extra_specs.iteritems():
specs_dict[key] = value
return dict(extra_specs=specs_dict)
def _check_body(self, body):
if body == None or body == "":
expl = _('No Request Body')
raise exc.HTTPBadRequest(explanation=expl)
def index(self, req, flavor_id):
""" Returns the list of extra specs for a givenflavor """
context = req.environ['nova.context']
return self._get_extra_specs(context, flavor_id)
def create(self, req, flavor_id, body):
self._check_body(body)
context = req.environ['nova.context']
specs = body.get('extra_specs')
try:
db.api.instance_type_extra_specs_update_or_create(context,
flavor_id,
specs)
except quota.QuotaError as error:
self._handle_quota_error(error)
return body
def update(self, req, flavor_id, id, body):
self._check_body(body)
context = req.environ['nova.context']
if not id in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
try:
db.api.instance_type_extra_specs_update_or_create(context,
flavor_id,
body)
except quota.QuotaError as error:
self._handle_quota_error(error)
return body
def show(self, req, flavor_id, id):
""" Return a single extra spec item """
context = req.environ['nova.context']
specs = self._get_extra_specs(context, flavor_id)
if id in specs['extra_specs']:
return {id: specs['extra_specs'][id]}
else:
return faults.Fault(exc.HTTPNotFound())
def delete(self, req, flavor_id, id):
""" Deletes an existing extra spec """
context = req.environ['nova.context']
db.api.instance_type_extra_specs_delete(context, flavor_id, id)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""
if error.code == "MetadataLimitExceeded":
raise exc.HTTPBadRequest(explanation=error.message)
raise error
class Flavorextraspecs(extensions.ExtensionDescriptor):
def get_name(self):
return "FlavorExtraSpecs"
def get_alias(self):
return "os-flavor-extra-specs"
def get_description(self):
return "Instance type (flavor) extra specs"
def get_namespace(self):
return \
"http://docs.openstack.org/ext/flavor_extra_specs/api/v1.1"
def get_updated(self):
return "2011-06-23T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-extra_specs',
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
resources.append(res)
return resources
| {
"content_hash": "2af722eb1300e34e9fdc53247ac371cf",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 34.908256880733944,
"alnum_prop": 0.5802890932982917,
"repo_name": "xushiwei/nova",
"id": "2d897a1dae308f697942e4075b9ccde0dcc99ec1",
"size": "4499",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/api/openstack/contrib/flavorextraspecs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "4170357"
},
{
"name": "Shell",
"bytes": "33002"
}
],
"symlink_target": ""
} |
"""The tests for Select device conditions."""
from __future__ import annotations
import pytest
import voluptuous_serialize
from homeassistant.components import automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.select import DOMAIN
from homeassistant.components.select.device_condition import (
async_get_condition_capabilities,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import (
config_validation as cv,
device_registry,
entity_registry,
)
from homeassistant.helpers.entity import EntityCategory
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass: HomeAssistant) -> device_registry.DeviceRegistry:
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass: HomeAssistant) -> entity_registry.EntityRegistry:
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass: HomeAssistant) -> list[ServiceCall]:
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(
hass: HomeAssistant,
device_reg: device_registry.DeviceRegistry,
entity_reg: entity_registry.EntityRegistry,
) -> None:
"""Test we get the expected conditions from a select."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "selected_option",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": False},
}
]
conditions = await async_get_device_automations(
hass, DeviceAutomationType.CONDITION, device_entry.id
)
assert_lists_same(conditions, expected_conditions)
@pytest.mark.parametrize(
"hidden_by,entity_category",
(
(entity_registry.RegistryEntryHider.INTEGRATION, None),
(entity_registry.RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_conditions_hidden_auxiliary(
hass,
device_reg,
entity_reg,
hidden_by,
entity_category,
):
"""Test we get the expected conditions from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": True},
}
for condition in ["selected_option"]
]
conditions = await async_get_device_automations(
hass, DeviceAutomationType.CONDITION, device_entry.id
)
assert_lists_same(conditions, expected_conditions)
async def test_if_selected_option(
hass: HomeAssistant, calls: list[ServiceCall]
) -> None:
"""Test for selected_option conditions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "select.entity",
"type": "selected_option",
"option": "option1",
}
],
"action": {
"service": "test.automation",
"data": {
"result": "option1 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "select.entity",
"type": "selected_option",
"option": "option2",
}
],
"action": {
"service": "test.automation",
"data": {
"result": "option2 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
# Test with non existing entity
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(
"select.entity", "option1", {"options": ["option1", "option2"]}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["result"] == "option1 - event - test_event1"
hass.states.async_set(
"select.entity", "option2", {"options": ["option1", "option2"]}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["result"] == "option2 - event - test_event2"
async def test_get_condition_capabilities(hass: HomeAssistant) -> None:
"""Test we get the expected capabilities from a select condition."""
config = {
"platform": "device",
"domain": DOMAIN,
"type": "selected_option",
"entity_id": "select.test",
"option": "option1",
}
# Test when entity doesn't exists
capabilities = await async_get_condition_capabilities(hass, config)
assert capabilities
assert "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "option",
"required": True,
"type": "select",
"options": [],
},
{
"name": "for",
"optional": True,
"type": "positive_time_period_dict",
},
]
# Mock an entity
hass.states.async_set("select.test", "option1", {"options": ["option1", "option2"]})
# Test if we get the right capabilities now
capabilities = await async_get_condition_capabilities(hass, config)
assert capabilities
assert "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "option",
"required": True,
"type": "select",
"options": [("option1", "option1"), ("option2", "option2")],
},
{
"name": "for",
"optional": True,
"type": "positive_time_period_dict",
},
]
| {
"content_hash": "878f7d909bf0ce8acdd280593a806e56",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 105,
"avg_line_length": 32.62992125984252,
"alnum_prop": 0.556949806949807,
"repo_name": "nkgilley/home-assistant",
"id": "7c1dc443e5626cdb246bbc9a3f633cbd756d466c",
"size": "8288",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/select/test_device_condition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import config
import httplib
import urllib
import base64
import sys
import requests
# #### HTTP Headers + Encoding the URL
def vision(filepath):
file = open(filepath, 'rb').read()
files = {'file': file}
headers = {
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': config.api_key
}
params = urllib.urlencode ({
## I will tweak these lolssss
'visualFeatures': 'Categories,Tags,Description',
'language': 'en'
})
# #### Image URL and API Call
#body = "{'url': 'http://data.whicdn.com/images/21298747/thumb.jpg'}"
body = file
try:
conn = httplib.HTTPSConnection('westus.api.cognitive.microsoft.com')
conn.request("POST", "/vision/v1.0/analyze?%s" % params, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
return data
except Exception as e:
print("[Errno {0}] {1}".format(e.message, e.message))
# For DEMO purposes
# vision('demo.jpg')
| {
"content_hash": "1c2be30ba53f15a019439f892658aaec",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 25.5,
"alnum_prop": 0.5994397759103641,
"repo_name": "therealAJ/torch",
"id": "68e22c170eb45eac8ce47d0e63d8e9a567483950",
"size": "1071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7353"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Galah Interact'
copyright = u'2013, Galah Group LLC and other contibuters as specified in CONTRIBUTERS'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import os.path
version_file_path = \
os.path.normpath(os.path.join(os.path.abspath(__file__), "../../VERSION"))
version = release = open(version_file_path).read()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for AutoDoc -------------------------------------------------------
autodoc_docstring_signature = True
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "nature"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GalahInteractdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GalahInteract.tex', u'Galah Interact Documentation',
u'Galah Group LLC and other contibuters as specified in CONTRIBUTERS', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'galahinteract', u'Galah Interact Documentation',
[u'Galah Group LLC and other contibuters as specified in CONTRIBUTERS'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GalahInteract', u'Galah Interact Documentation',
u'Galah Group LLC and other contibuters as specified in CONTRIBUTERS', 'GalahInteract', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "f3444b39ea4decd6fc4c87023fdee27c",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 126,
"avg_line_length": 33.79831932773109,
"alnum_prop": 0.7033814022874192,
"repo_name": "ucrcsedept/galah-interact-python",
"id": "43bd33a4a2361c8e0442f0b6db9383976f8382ee",
"size": "8469",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "418"
},
{
"name": "Python",
"bytes": "107116"
},
{
"name": "Shell",
"bytes": "2596"
}
],
"symlink_target": ""
} |
from unittest import TestCase
import os
import pytest
import numpy as np
from pyspark.sql.types import ArrayType, DoubleType
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca.data import SparkXShards
from bigdl.orca.data.image.utils import chunks
from bigdl.orca.learn.utils import convert_predict_rdd_to_dataframe, _dataframe_to_xshards, \
convert_predict_xshards_to_dataframe, convert_predict_rdd_to_xshard, update_predict_xshards
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
class TestUtil(TestCase):
def setUp(self):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
self.sc = init_orca_context(cores=4)
def to_array_(v):
return v.toArray().tolist()
def flatten_(v):
result = []
for elem in v:
result.extend(elem.toArray().tolist())
return result
from pyspark.sql import SparkSession
spark = SparkSession(self.sc)
spark.udf.register("to_array", to_array_, ArrayType(DoubleType()))
spark.udf.register("flatten", flatten_, ArrayType(DoubleType()))
def tearDown(self):
""" teardown any state that was previously setup with a setup_method
call.
"""
stop_orca_context()
def test_convert_predict_rdd_to_dataframe(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_rdd = rdd.map(lambda x: np.array([float(x)] * 50))
result_df = convert_predict_rdd_to_dataframe(df, pred_rdd)
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_convert_predict_rdd_to_dataframe_multi_output(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_rdd = rdd.map(lambda x: [np.array([float(x)] * 25), np.array([float(x)] * 25)])
result_df = convert_predict_rdd_to_dataframe(df, pred_rdd)
expr = "sum(cast(feature <> flatten(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_convert_predict_rdd_to_xshard(self):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x]*50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(lambda x: {"x": np.stack(x)})
shards = SparkXShards(shards)
pred_rdd = self.sc.range(0, 110).map(lambda x: np.array([x]*50))
result_shards = convert_predict_rdd_to_xshard(shards, pred_rdd)
result = np.concatenate([shard["prediction"] for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_convert_predict_rdd_to_xshard_multi_output(self):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x]*50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(lambda x: {"x": np.stack(x)})
shards = SparkXShards(shards)
pred_rdd = self.sc.range(0, 110).map(lambda x: [np.array([x]*24), np.array([x]*26)])
result_shards = convert_predict_rdd_to_xshard(shards, pred_rdd)
result = np.concatenate([np.concatenate(shard["prediction"], axis=1)
for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_update_predict_xshard(self):
def get_xshards(key):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x] * 50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(
lambda x: {key: np.stack(x)})
shards = SparkXShards(shards)
return shards
data_shards = get_xshards("x")
pred_shards = get_xshards("prediction")
result_shards = update_predict_xshards(data_shards, pred_shards)
result = np.concatenate([shard["prediction"] for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_update_predict_xshard_multi_output(self):
def get_data_xshards(key):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x] * 50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(
lambda x: {key: np.stack(x)})
shards = SparkXShards(shards)
return shards
def get_pred_xshards(key):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x] * 50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(
lambda x: {key: np.stack(x)}).map(lambda x: {key: [x[key][:, :24], x[key][:, 24:]]})
shards = SparkXShards(shards)
return shards
data_shards = get_data_xshards("x")
pred_shards = get_pred_xshards("prediction")
result_shards = update_predict_xshards(data_shards, pred_shards)
result = np.concatenate([np.concatenate(shard["prediction"], axis=1)
for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_convert_predict_xshards_to_dataframe(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_shards = _dataframe_to_xshards(df, feature_cols=["feature"]).transform_shard(
lambda x: {"prediction": x["x"]})
result_df = convert_predict_xshards_to_dataframe(df, pred_shards)
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_convert_predict_xshards_to_dataframe_multi_output(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_shards = _dataframe_to_xshards(df, feature_cols=["feature"]).transform_shard(
lambda x: {"prediction": [x["x"][:, :25], x["x"][:, 25:]]})
result_df = convert_predict_xshards_to_dataframe(df, pred_shards)
expr = "sum(cast(feature <> flatten(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_array2dict(self):
from bigdl.orca.learn.utils import arrays2dict
record_num = 100
shard_size = 30
data = [(np.float32(np.random.randn(1, 50)), np.float32([np.random.randint(0, 2,)]))
for i in range(record_num)]
result = arrays2dict(data, feature_cols=["feature"], label_cols=["label"],
shard_size=shard_size)
for i, d in enumerate(result):
if (record_num % shard_size == 0) or (i != record_num // shard_size):
assert d['x'].shape[0] == shard_size
assert d['y'].shape[0] == shard_size
else:
assert d['x'].shape[0] == record_num % shard_size
assert d['y'].shape[0] == record_num % shard_size
def test_array2dict_shard_size_none(self):
from bigdl.orca.learn.utils import arrays2dict
record_num = 100
data = [(np.float32(np.random.randn(1, 50)), np.float32([np.random.randint(0, 2,)]))
for i in range(record_num)]
result = arrays2dict(data, feature_cols=["feature"], label_cols=["label"], shard_size=None)
for i, d in enumerate(result):
assert d['x'].shape[0] == record_num
assert d['y'].shape[0] == record_num
def test_dataframe_to_xshards(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
num_partitions = df.rdd.getNumPartitions()
# test shard_size = None
shards = _dataframe_to_xshards(df, feature_cols=["feature"], label_cols=["label"])
num_shards = shards.rdd.count()
assert num_shards == num_partitions
from bigdl.orca import OrcaContext
OrcaContext._shard_size = 1
shards = _dataframe_to_xshards(df, feature_cols=["feature"], label_cols=["label"])
num_shards = shards.rdd.count()
assert num_shards == df.rdd.count()
OrcaContext._shard_size = None
if __name__ == "__main__":
pytest.main([__file__])
| {
"content_hash": "910251d99a5ce3e8aec8ac23f498f86e",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 100,
"avg_line_length": 45.875621890547265,
"alnum_prop": 0.5856197809348227,
"repo_name": "intel-analytics/BigDL",
"id": "8a215e9a568c14d8260472426f173b40fc07fbb0",
"size": "9809",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/orca/test/bigdl/orca/learn/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "139304"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54112822"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8825782"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216148"
},
{
"name": "Shell",
"bytes": "848241"
}
],
"symlink_target": ""
} |
import asyncio
import LeagueStats
import psycopg2
from discord.ext.commands import Bot
import botinfo
# import mysql.connector
# import MySQLdb
import pymysql.cursors
mid_bot = Bot(command_prefix="!")
try:
# cnx = MySQLdb.connect(host='127.0.0.1', user=botinfo.user, password=botinfo.password, database=botinfo.dbname)
# Connect to the database
cnx = pymysql.connect(host='127.0.0.1',
user='root',
password='password',
database=botinfo.dbname,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
except:
print("didn't connect")
cur = cnx.cursor
# curB = cnx.cursor(buffered=True)
# try:
# conn = psycopg2.connect(
# "dbname=" + botinfo.dbname + " user=" + botinfo.user + " host=" + botinfo.host + " password=" + botinfo.password)
# conn.autocommit = True
# except:
# print("didn't connect")
#
# cur = conn.cursor()
@mid_bot.event
@asyncio.coroutine
def on_read():
print("Client logged in")
#Command for the sake of testing, prints serverid ten times
@mid_bot.command(pass_context=True)
@asyncio.coroutine
def test(ctx, *args):
strtest = "```"
# for i in range(10):
strtest += str(ctx.message.server.id)
print(strtest)
strtest += '```'
yield from mid_bot.say(strtest)
#Displays win-loss of the past 10 games
@mid_bot.command(pass_context=True)
@asyncio.coroutine
def last10(ctx, *args):
if len(args) == 1: # a username has been given, look up that name
yield from mid_bot.say((LeagueStats.last10Games(args[0])))
elif len(args) == 0: #no username has been given
sql = "select summoner from discordinfo where discordName='" + str(
ctx.message.author) + "' and serverID=" + str(ctx.message.server.id) + ";" # construct sql query
print(sql) # log it
try:
cur.execute(sql) #execute sql query
except:
print("failed to find username")
try:
username = cur.fetchall() #use what the database returns to look up stats
print(str(username[0][0]).rstrip())
yield from mid_bot.say(LeagueStats.last10Games(str(username[0][0]).rstrip()))
except:
print("failed to fetch username")
else: #error
yield from mid_bot.say("Too many parameters")
#In progress
@mid_bot.command()
@asyncio.coroutine
def ranking(*args):
print(args)
#Insert user into database
@mid_bot.command(pass_context=True)
@asyncio.coroutine
def setup(ctx, *args):
member = ctx.message.author
print(member) #log messages
print(ctx.message)
print(args)
try: #insert user into database
sql = "INSERT INTO DiscordInfo VALUES ('" + str(member) + "', '" + args[0] + "', " + str(
ctx.message.server.id) + ");"
print(sql)
print(cur.execute(sql))
try:
sql = "select * from discordinfo;"
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
print(row) #log user in database
except: #error
print("didnt select")
yield from mid_bot.say("Tied @" + str(member) + " to " + args[0]) #success
# print(cur.fetchall)
except: #error
print("didn't insert")
@mid_bot.command(pass_context=True)
@asyncio.coroutine
def predict(ctx, *args):
print(args)
print(len(args))
username = ctx.message.author
print(username)
if len(args) == 10:
sql = "INSERT INTO ranking VALUES ('" + str(username) + "', '" + args[0] + "', '" + args[1] + "', '" + args[
2] + "', '" + args[3] + "', '" + args[4] + "', '" + args[5] + "', '" + args[6] + "', '" + args[7] + "', '" + \
args[8] + "', '" + args[9] + "');"
print(sql)
try:
print(cur.execute(sql))
try:
sql = "select * from ranking;"
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
print(" ", row)
yield from mid_bot.say("Stored @" + str(username) + "'s prediction")
except:
print("didnt select")
except:
print("failed to insert")
else:
yield from mid_bot.say("Please list 10 teams")
#Displays a table into server of players fantasy score
@mid_bot.command()
@asyncio.coroutine
def fantasy():
# Starts formatting
result = "Fantasy Predictions \n\n ```Username | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | Score \n" \
"------------------------+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----|\n"
sql = "select * from ranking;"
try: #recieve table
cur.execute(sql)
except: #error
print("didn't select")
try:
#format by going row by row
rows = cur.fetchall()
for i in range(len(rows)):
for item in rows[i]:
if len(item) > 3:
result += item.ljust(23) + " | " #pad username
elif len(item) == 3:
result += item + " | "#delimiter
else:
result += item + " | " #delimiter
if i < len(rows) - 1:
result += "\n------------------------+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----|\n"
else: #last row
result += "\n-------------------------------------------------------------------------------------\n"
except: #error
print("didn't fetch")
result += "```" #finish formatting
yield from mid_bot.say(result) #output
#displays stats about players last game
@mid_bot.command(pass_context=True)
@asyncio.coroutine
def lastgame(ctx, *args):
if len(args) == 1: # username been given
yield from mid_bot.say((LeagueStats.lastGame(args[0])))
elif len(args) == 0: #no username been given, user default
sql = "select summoner from discordinfo where discordName='" + str(
ctx.message.author) + "' and serverID=" + str(ctx.message.server.id) + ";" #construct sql query
print(sql)
try:
cur.execute(sql) # execute sql query
except:
print("failed to find username") #error
try:
username = cur.fetchall() #fetch
print(str(username[0][0]).rstrip())
except: #error
print("failed to fetch username")
try: #output
yield from mid_bot.say(LeagueStats.lastGame(str(username[0][0]).rstrip()))
except: #error
print ("stats problem")
else: #error
yield from mid_bot.say("Too many parameters")
#lists all commands
@mid_bot.command()
@asyncio.coroutine
def commands():
commands = """List of commands : \n
!setup <League of Legends Summoner Name>
\t - Ties your discord account to your League of Legends account \n
!shitter
\t - Outs the shitter of the sever \n
!last10 <Summoner Name>
\t - Win - Loss of the most recent 10 games of a League of Legends account \n
!predict <team> <team> <team> <team> <team> <team> <team> <team> <team> <team>
\t - Stores LCS prediction \n
!lastgame <Summoner Name>
\t - Displays details of last ranked game \n
!fantasy
\t - Displays all LCS predictions \n
!commands
\t - Lists all possible commands
"""
yield from mid_bot.say(commands)
#inprogress
@mid_bot.command()
@asyncio.coroutine
def lcs():
yield from mid_bot.say("123")
mid_bot.run(botinfo.BOT_TOKEN)
| {
"content_hash": "cec804dc9304fda3fc5026e4718a872d",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 140,
"avg_line_length": 34.09090909090909,
"alnum_prop": 0.5287619047619048,
"repo_name": "MarkFranciscus/DevMIDbot",
"id": "6572dd6103ed6be4aa1a112190703ed92f405c58",
"size": "7875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonServer/MIDBot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1616632"
},
{
"name": "Shell",
"bytes": "118"
}
],
"symlink_target": ""
} |
from twisted.application.service import ServiceMaker
TwistedProcmon = ServiceMaker(
"Twisted Process Monitor",
"twisted.runner.procmontap",
("A process watchdog / supervisor"),
"procmon")
| {
"content_hash": "fdb3d3f6e4f13f248643be30a8cee5b5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 52,
"avg_line_length": 26.75,
"alnum_prop": 0.7009345794392523,
"repo_name": "timkrentz/SunTracker",
"id": "2cd0d96acd009fc5dc12788dda5a0d3db1c13cec",
"size": "288",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/plugins/twisted_runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
import sys
import os
class Mock(object):
def __init__(self, *_):
pass
@classmethod
def __getattr__(cls, _):
return cls()
modules = ['Foundation', 'AppKit', 'PyObjCTools']
sys.modules.update((module, Mock()) for module in modules)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rumps'
copyright = u'2014, Jared Suttles'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rumpsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'rumps.tex', u'rumps Documentation',
u'Jared Suttles', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rumps', u'rumps Documentation',
[u'Jared Suttles'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rumps', u'rumps Documentation',
u'Jared Suttles', 'rumps', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "9564f6c7a13441bed0a85bc407e1474f",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 79,
"avg_line_length": 31.035019455252918,
"alnum_prop": 0.7004764292878636,
"repo_name": "jaredks/rumps",
"id": "a4b8ac3b703ac59e6ba20ec02281726c7853d570",
"size": "8394",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "104"
},
{
"name": "Python",
"bytes": "82841"
}
],
"symlink_target": ""
} |
"""COCO-style evaluation metrics.
Implements the interface of COCO API and metric_fn in tf.TPUEstimator.
COCO API: github.com/cocodataset/cocoapi/
"""
import json
import os
import sys
from absl import logging
import numpy as np
import tensorflow as tf
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import label_util
try:
# pylint: disable=g-import-not-at-top
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
# pylint: enable=g-import-not-at-top
except ImportError:
COCO = None
COCOeval = None
def block_print(log_level):
"""Disables print function when current logging level > log_level."""
if tf.get_logger().getEffectiveLevel() > log_level:
sys.stdout = open(os.devnull, 'w')
def enable_print(original_stdout):
"""Enables print function."""
sys.stdout = original_stdout
class EvaluationMetric():
"""COCO evaluation metric class.
This class cannot inherit from tf.keras.metrics.Metric due to numpy.
"""
def __init__(self, filename=None, testdev_dir=None, label_map=None):
"""Constructs COCO evaluation class.
The class provides the interface to metrics_fn in TPUEstimator. The
_update_op() takes detections from each image and push them to
self.detections. The _evaluate() loads a JSON file in COCO annotation format
as the groundtruth and runs COCO evaluation.
Args:
filename: Ground truth JSON file name. If filename is None, use
groundtruth data passed from the dataloader for evaluation. filename is
ignored if testdev_dir is not None.
testdev_dir: folder name for testdev data. If None, run eval without
groundtruth, and filename will be ignored.
label_map: a dict from id to class name. Used for per-class AP.
"""
self.label_map = label_map
self.filename = filename
self.testdev_dir = testdev_dir
self.metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',
'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']
self.reset_states()
def reset_states(self):
"""Reset COCO API object."""
self.detections = []
self.dataset = {
'images': [],
'annotations': [],
'categories': []
}
self.image_id = 1
self.annotation_id = 1
self.category_ids = []
self.metric_values = None
def evaluate(self, log_level=tf.compat.v1.logging.INFO):
"""Evaluates with detections from all images with COCO API.
Args:
log_level: Logging lavel to print logs.
Returns:
coco_metric: float numpy array with shape [12] representing the
coco-style evaluation metrics.
Raises:
ImportError: if the pip package `pycocotools` is not installed.
"""
if COCO is None or COCOeval is None:
message = ('You must install pycocotools (`pip install pycocotools`) '
'(see github repo at https://github.com/cocodataset/cocoapi) '
'for efficientdet/coco_metric to work.')
raise ImportError(message)
original_stdout = sys.stdout
block_print(log_level)
if self.filename:
coco_gt = COCO(self.filename)
else:
coco_gt = COCO()
coco_gt.dataset = self.dataset
coco_gt.createIndex()
enable_print(original_stdout)
if self.testdev_dir:
# Run on test-dev dataset.
box_result_list = []
for det in self.detections:
box_result_list.append({
'image_id': int(det[0]),
'category_id': int(det[6]),
'bbox': np.around(
det[1:5].astype(np.float64), decimals=2).tolist(),
'score': float(np.around(det[5], decimals=3)),
})
json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')
# Must be in the formst of 'detections_test-dev2017_xxx_results'.
fname = 'detections_test-dev2017_test_results'
output_path = os.path.join(self.testdev_dir, fname + '.json')
logging.info('Writing output json file to: %s', output_path)
with tf.io.gfile.GFile(output_path, 'w') as fid:
json.dump(box_result_list, fid)
return np.array([-1.], dtype=np.float32)
else:
# Run on validation dataset.
block_print(log_level)
detections = np.array(self.detections)
image_ids = list(set(detections[:, 0]))
coco_dt = coco_gt.loadRes(detections)
coco_eval = COCOeval(coco_gt, coco_dt, iouType='bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
enable_print(original_stdout)
coco_metrics = coco_eval.stats
if self.label_map:
# Get per_class AP, see pycocotools/cocoeval.py:334
# TxRxKxAxM: iouThrs x recThrs x catIds x areaRng x maxDets
# Use areaRng_id=0 ('all') and maxDets_id=-1 (200) in default
precision = coco_eval.eval['precision'][:, :, :, 0, -1]
# Ideally, label_map should match the eval set, but it is possible that
# some classes has no data in the eval set.
ap_perclass = [0] * max(precision.shape[-1], len(self.label_map))
for c in range(precision.shape[-1]): # iterate over all classes
precision_c = precision[:, :, c]
# Only consider values if > -1.
precision_c = precision_c[precision_c > -1]
ap_c = np.mean(precision_c) if precision_c.size else -1.
ap_perclass[c] = ap_c
coco_metrics = np.concatenate((coco_metrics, ap_perclass))
# Return the concat normal and per-class AP.
return np.array(coco_metrics, dtype=np.float32)
def result(self, log_level=tf.compat.v1.logging.INFO):
"""Return the metric values (and compute it if needed)."""
if self.metric_values is None:
self.metric_values = self.evaluate(log_level)
return self.metric_values
def update_state(self, groundtruth_data, detections):
"""Update detection results and groundtruth data.
Append detection results to self.detections to aggregate results from
all validation set. The groundtruth_data is parsed and added into a
dictionary with the same format as COCO dataset, which can be used for
evaluation.
Args:
groundtruth_data: Groundtruth annotations in a tensor with each row
representing [y1, x1, y2, x2, is_crowd, area, class].
detections: Detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class].
"""
for i, det in enumerate(detections):
# Filter out detections with predicted class label = -1.
indices = np.where(det[:, -1] > -1)[0]
det = det[indices]
if det.shape[0] == 0:
continue
# Append groundtruth annotations to create COCO dataset object.
# Add images.
image_id = det[0, 0]
if image_id == -1:
image_id = self.image_id
det[:, 0] = image_id
self.detections.extend(det)
if not self.filename and not self.testdev_dir:
# process groudtruth data only if filename is empty and no test_dev.
self.dataset['images'].append({
'id': int(image_id),
})
# Add annotations.
indices = np.where(groundtruth_data[i, :, -1] > -1)[0]
for data in groundtruth_data[i, indices]:
box = data[0:4]
is_crowd = data[4]
area = (box[3] - box[1]) * (box[2] - box[0])
category_id = data[6]
if category_id < 0:
break
self.dataset['annotations'].append({
'id': int(self.annotation_id),
'image_id': int(image_id),
'category_id': int(category_id),
'bbox': [box[1], box[0], box[3] - box[1], box[2] - box[0]],
'area': area,
'iscrowd': int(is_crowd)
})
self.annotation_id += 1
self.category_ids.append(category_id)
self.image_id += 1
if not self.filename:
self.category_ids = list(set(self.category_ids))
self.dataset['categories'] = [
{'id': int(category_id)} for category_id in self.category_ids
]
def estimator_metric_fn(self, detections, groundtruth_data):
"""Constructs the metric function for tf.TPUEstimator.
For each metric, we return the evaluation op and an update op; the update op
is shared across all metrics and simply appends the set of detections to the
`self.detections` list. The metric op is invoked after all examples have
been seen and computes the aggregate COCO metrics. Please find details API
in: https://www.tensorflow.org/api_docs/python/tf/contrib/learn/MetricSpec
Args:
detections: Detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class]
groundtruth_data: Groundtruth annotations in a tensor with each row
representing [y1, x1, y2, x2, is_crowd, area, class].
Returns:
metrics_dict: A dictionary mapping from evaluation name to a tuple of
operations (`metric_op`, `update_op`). `update_op` appends the
detections for the metric to the `self.detections` list.
"""
with tf.name_scope('coco_metric'):
if self.testdev_dir:
update_op = tf.numpy_function(self.update_state,
[groundtruth_data, detections], [])
metrics = tf.numpy_function(self.result, [], tf.float32)
metrics_dict = {'AP': (metrics, update_op)}
return metrics_dict
else:
update_op = tf.numpy_function(self.update_state,
[groundtruth_data, detections], [])
metrics = tf.numpy_function(self.result, [], tf.float32)
metrics_dict = {}
for i, name in enumerate(self.metric_names):
metrics_dict[name] = (metrics[i], update_op)
if self.label_map:
# process per-class AP.
label_map = label_util.get_label_map(self.label_map)
for i, cid in enumerate(sorted(label_map.keys())):
name = 'AP_/%s' % label_map[cid]
metrics_dict[name] = (metrics[i + len(self.metric_names)],
update_op)
return metrics_dict
| {
"content_hash": "34e9eb286021d82743d879c51805b23a",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 90,
"avg_line_length": 38.29323308270677,
"alnum_prop": 0.6253681523659925,
"repo_name": "tensorflow/examples",
"id": "92d87dbece92a9c88eda6af1a5bff57a482064a0",
"size": "10868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_examples/lite/model_maker/third_party/efficientdet/coco_metric.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "106227"
},
{
"name": "CMake",
"bytes": "1553"
},
{
"name": "CSS",
"bytes": "4746"
},
{
"name": "Dockerfile",
"bytes": "467"
},
{
"name": "HTML",
"bytes": "12491"
},
{
"name": "Java",
"bytes": "305092"
},
{
"name": "JavaScript",
"bytes": "24461"
},
{
"name": "Jupyter Notebook",
"bytes": "1733035"
},
{
"name": "Kotlin",
"bytes": "631463"
},
{
"name": "Objective-C",
"bytes": "14639"
},
{
"name": "Objective-C++",
"bytes": "14293"
},
{
"name": "Python",
"bytes": "1232357"
},
{
"name": "Ruby",
"bytes": "3744"
},
{
"name": "Shell",
"bytes": "41573"
},
{
"name": "Starlark",
"bytes": "17498"
},
{
"name": "Swift",
"bytes": "553535"
}
],
"symlink_target": ""
} |
from ccxt.pro.base.exchange import Exchange
import ccxt.async_support
from ccxt.pro.base.cache import ArrayCache
class luno(Exchange, ccxt.async_support.luno):
def describe(self):
return self.deep_extend(super(luno, self).describe(), {
'has': {
'ws': True,
'watchTicker': False,
'watchTickers': False,
'watchTrades': True,
'watchMyTrades': False,
'watchOrders': None, # is in beta
'watchOrderBook': True,
'watchOHLCV': False,
},
'urls': {
'api': {
'ws': 'wss://ws.luno.com/api/1',
},
},
'options': {
'sequenceNumbers': {},
},
'streaming': {
},
'exceptions': {
},
})
async def watch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
see https://www.luno.com/en/developers/api#tag/Streaming-API
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the luno api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.check_required_credentials()
await self.load_markets()
market = self.market(symbol)
symbol = market['symbol']
subscriptionHash = '/stream/' + market['id']
subscription = {'symbol': symbol}
url = self.urls['api']['ws'] + subscriptionHash
messageHash = 'trades:' + symbol
subscribe = {
'api_key_id': self.apiKey,
'api_key_secret': self.secret,
}
request = self.deep_extend(subscribe, params)
trades = await self.watch(url, messageHash, request, subscriptionHash, subscription)
if self.newUpdates:
limit = trades.getLimit(symbol, limit)
return self.filter_by_since_limit(trades, since, limit, 'timestamp', True)
def handle_trades(self, client, message, subscription):
#
# {
# sequence: '110980825',
# trade_updates: [],
# create_update: {
# order_id: 'BXHSYXAUMH8C2RW',
# type: 'ASK',
# price: '24081.09000000',
# volume: '0.07780000'
# },
# delete_update: null,
# status_update: null,
# timestamp: 1660598775360
# }
#
rawTrades = self.safe_value(message, 'trade_updates', [])
length = len(rawTrades)
if length == 0:
return
symbol = subscription['symbol']
market = self.market(symbol)
messageHash = 'trades:' + symbol
stored = self.safe_value(self.trades, symbol)
if stored is None:
limit = self.safe_integer(self.options, 'tradesLimit', 1000)
stored = ArrayCache(limit)
self.trades[symbol] = stored
for i in range(0, len(rawTrades)):
rawTrade = rawTrades[i]
trade = self.parse_trade(rawTrade, market)
stored.append(trade)
self.trades[symbol] = stored
client.resolve(self.trades[symbol], messageHash)
def parse_trade(self, trade, market):
#
# watchTrades(public)
#
# {
# "base": "69.00000000",
# "counter": "113.6499000000000000",
# "maker_order_id": "BXEEU4S2BWF5WRB",
# "taker_order_id": "BXKNCSF7JDHXY3H",
# "order_id": "BXEEU4S2BWF5WRB"
# }
#
return self.safe_trade({
'info': trade,
'id': None,
'timestamp': None,
'datetime': None,
'symbol': market['symbol'],
'order': None,
'type': None,
'side': None,
# takerOrMaker has no meaning for public trades
'takerOrMaker': None,
'price': None,
'amount': self.safe_string(trade, 'base'),
'cost': self.safe_string(trade, 'counter'),
'fee': None,
}, market)
async def watch_order_book(self, symbol, limit=None, params={}):
"""
watches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dictConstructor params: extra parameters specific to the luno api endpoint
:param str|None params['type']: accepts l2 or l3 for level 2 or level 3 order book
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.check_required_credentials()
await self.load_markets()
market = self.market(symbol)
symbol = market['symbol']
subscriptionHash = '/stream/' + market['id']
subscription = {'symbol': symbol}
url = self.urls['api']['ws'] + subscriptionHash
messageHash = 'orderbook:' + symbol
subscribe = {
'api_key_id': self.apiKey,
'api_key_secret': self.secret,
}
request = self.deep_extend(subscribe, params)
orderbook = await self.watch(url, messageHash, request, subscriptionHash, subscription)
return orderbook.limit(limit)
def handle_order_book(self, client, message, subscription):
#
# {
# "sequence": "24352",
# "asks": [{
# "id": "BXMC2CJ7HNB88U4",
# "price": "1234.00",
# "volume": "0.93"
# }],
# "bids": [{
# "id": "BXMC2CJ7HNB88U5",
# "price": "1201.00",
# "volume": "1.22"
# }],
# "status": "ACTIVE",
# "timestamp": 1528884331021
# }
#
# update
# {
# sequence: '110980825',
# trade_updates: [],
# create_update: {
# order_id: 'BXHSYXAUMH8C2RW',
# type: 'ASK',
# price: '24081.09000000',
# volume: '0.07780000'
# },
# delete_update: null,
# status_update: null,
# timestamp: 1660598775360
# }
#
symbol = subscription['symbol']
messageHash = 'orderbook:' + symbol
timestamp = self.safe_string(message, 'timestamp')
storedOrderBook = self.safe_value(self.orderbooks, symbol)
if storedOrderBook is None:
storedOrderBook = self.indexed_order_book({})
self.orderbooks[symbol] = storedOrderBook
asks = self.safe_value(message, 'asks')
if asks is not None:
snapshot = self.parse_order_book(message, symbol, timestamp, 'bids', 'asks', 'price', 'volume', 'id')
storedOrderBook.reset(snapshot)
else:
self.handle_delta(storedOrderBook, message)
storedOrderBook['timestamp'] = timestamp
storedOrderBook['datetime'] = self.iso8601(timestamp)
nonce = self.safe_integer(message, 'sequence')
storedOrderBook['nonce'] = nonce
client.resolve(storedOrderBook, messageHash)
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='bids', asksKey='asks', priceKey=0, amountKey=1, thirdKey=None):
bids = self.parse_bids_asks(self.safe_value(orderbook, bidsKey, []), priceKey, amountKey, thirdKey)
asks = self.parse_bids_asks(self.safe_value(orderbook, asksKey, []), priceKey, amountKey, thirdKey)
return {
'symbol': symbol,
'bids': self.sort_by(bids, 0, True),
'asks': self.sort_by(asks, 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
def parse_bids_asks(self, bidasks, priceKey=0, amountKey=1, thirdKey=None):
bidasks = self.to_array(bidasks)
result = []
for i in range(0, len(bidasks)):
result.append(self.parse_bid_ask(bidasks[i], priceKey, amountKey, thirdKey))
return result
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1, thirdKey=None):
price = self.safe_number(bidask, priceKey)
amount = self.safe_number(bidask, amountKey)
result = [price, amount]
if thirdKey is not None:
thirdValue = self.safe_string(bidask, thirdKey)
result.append(thirdValue)
return result
def handle_delta(self, orderbook, message):
#
# create
# {
# sequence: '110980825',
# trade_updates: [],
# create_update: {
# order_id: 'BXHSYXAUMH8C2RW',
# type: 'ASK',
# price: '24081.09000000',
# volume: '0.07780000'
# },
# delete_update: null,
# status_update: null,
# timestamp: 1660598775360
# }
# del # {
# sequence: '110980825',
# trade_updates: [],
# create_update: null,
# delete_update: {
# "order_id": "BXMC2CJ7HNB88U4"
# },
# status_update: null,
# timestamp: 1660598775360
# }
# trade
# {
# sequence: '110980825',
# trade_updates: [
# {
# "base": "0.1",
# "counter": "5232.00",
# "maker_order_id": "BXMC2CJ7HNB88U4",
# "taker_order_id": "BXMC2CJ7HNB88U5"
# }
# ],
# create_update: null,
# delete_update: null,
# status_update: null,
# timestamp: 1660598775360
# }
#
createUpdate = self.safe_value(message, 'create_update')
asksOrderSide = orderbook['asks']
bidsOrderSide = orderbook['bids']
if createUpdate is not None:
array = self.parse_bid_ask(createUpdate, 'price', 'volume', 'order_id')
type = self.safe_string(createUpdate, 'type')
if type == 'ASK':
asksOrderSide.storeArray(array)
elif type == 'BID':
bidsOrderSide.storeArray(array)
deleteUpdate = self.safe_value(message, 'delete_update')
if deleteUpdate is not None:
orderId = self.safe_string(deleteUpdate, 'order_id')
asksOrderSide.storeArray(0, 0, orderId)
bidsOrderSide.storeArray(0, 0, orderId)
return message
def handle_message(self, client, message):
if message == '':
return
subscriptions = list(client.subscriptions.values())
handlers = [self.handle_order_book, self.handle_trades]
for j in range(0, len(handlers)):
handler = handlers[j]
handler(client, message, subscriptions[0])
return message
| {
"content_hash": "a5132ed7ca6c2fd8313cad2b09de294b",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 155,
"avg_line_length": 39.52173913043478,
"alnum_prop": 0.5144283659135144,
"repo_name": "ccxt/ccxt",
"id": "0fc89c5d97e1b8e07ff99da8fc1139eca711fa86",
"size": "11998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ccxt/pro/luno.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "246"
},
{
"name": "JavaScript",
"bytes": "11619228"
},
{
"name": "PHP",
"bytes": "10272973"
},
{
"name": "Python",
"bytes": "9037496"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
} |
import xml.sax
class PartXMLHandler(xml.sax.ContentHandler):
def __init__(self):
self.currentData = ''
self.isStructure = False
self.row_name = ''
self.type = ''
def startElement(self, tag, attributes):
self.currentData = tag
if tag == 'table_structure':
self.isStructure = True
print '-------Table-------'
print attributes['name']
print '---'
elif tag == 'table_data':
self.isStructure = False
elif tag == 'row':
self.isStructure = False
elif tag == 'field' and self.isStructure:
self.row_name = attributes['Field']
self.type = attributes['Type']
def endElement(self, tag):
if self.currentData == 'table_structure':
self.isStructure = False
elif self.currentData == 'field' and self.isStructure:
print self.row_name + ' Type: ' + self.type
if __name__ == '__main__':
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
handler = PartXMLHandler()
parser.setContentHandler(handler)
parser.parse('new.xml') | {
"content_hash": "c199540d169e3109ef62b0f6906b9600",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 62,
"avg_line_length": 33.22222222222222,
"alnum_prop": 0.560200668896321,
"repo_name": "igemsoftware/HFUT-China_2015",
"id": "ae8d8832bffa39c87ccf0b4706e3e21f9ce05bda",
"size": "1196",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "anaXML.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "612404"
},
{
"name": "HTML",
"bytes": "43479"
},
{
"name": "JavaScript",
"bytes": "210114"
},
{
"name": "Python",
"bytes": "201615"
}
],
"symlink_target": ""
} |
from time import time
from traceback import format_exc
from urllib import unquote
from uuid import uuid4
from hashlib import sha1
import hmac
import base64
from eventlet import Timeout
from swift.common.swob import Response, Request
from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \
HTTPUnauthorized
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
from swift.common.utils import cache_from_env, get_logger, \
split_path, config_true_value
class TempAuth(object):
"""
Test authentication and authorization system.
Add to your pipeline in proxy-server.conf, such as::
[pipeline:main]
pipeline = catch_errors cache tempauth proxy-server
Set account auto creation to true in proxy-server.conf::
[app:proxy-server]
account_autocreate = true
And add a tempauth filter section, such as::
[filter:tempauth]
use = egg:swift#tempauth
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing .admin
user_test2_tester2 = testing2 .admin
user_test_tester3 = testing3
# To allow accounts/users with underscores you can base64 encode them.
# Here is the account "under_score" and username "a_b" (note the lack
# of padding equal signs):
user64_dW5kZXJfc2NvcmU_YV9i = testing4
See the proxy-server.conf-sample for more information.
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = get_logger(conf, log_route='tempauth')
self.log_headers = config_true_value(conf.get('log_headers', 'f'))
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
self.reseller_prefix += '_'
self.logger.set_statsd_prefix('tempauth.%s' % (
self.reseller_prefix if self.reseller_prefix else 'NONE',))
self.auth_prefix = conf.get('auth_prefix', '/auth/')
if not self.auth_prefix or not self.auth_prefix.strip('/'):
self.logger.warning('Rewriting invalid auth prefix "%s" to '
'"/auth/" (Non-empty auth prefix path '
'is required)' % self.auth_prefix)
self.auth_prefix = '/auth/'
if self.auth_prefix[0] != '/':
self.auth_prefix = '/' + self.auth_prefix
if self.auth_prefix[-1] != '/':
self.auth_prefix += '/'
self.token_life = int(conf.get('token_life', 86400))
self.allow_overrides = config_true_value(
conf.get('allow_overrides', 't'))
self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
self.users = {}
for conf_key in conf:
if conf_key.startswith('user_') or conf_key.startswith('user64_'):
account, username = conf_key.split('_', 1)[1].split('_')
if conf_key.startswith('user64_'):
# Because trailing equal signs would screw up config file
# parsing, we auto-pad with '=' chars.
account += '=' * (len(account) % 4)
account = base64.b64decode(account)
username += '=' * (len(username) % 4)
username = base64.b64decode(username)
values = conf[conf_key].split()
if not values:
raise ValueError('%s has no key set' % conf_key)
key = values.pop(0)
if values and ('://' in values[-1] or '$HOST' in values[-1]):
url = values.pop()
else:
url = '$HOST/v1/%s%s' % (self.reseller_prefix, account)
self.users[account + ':' + username] = {
'key': key, 'url': url, 'groups': values}
def __call__(self, env, start_response):
"""
Accepts a standard WSGI application call, authenticating the request
and installing callback hooks for authorization and ACL header
validation. For an authenticated request, REMOTE_USER will be set to a
comma separated list of the user's groups.
With a non-empty reseller prefix, acts as the definitive auth service
for just tokens and accounts that begin with that prefix, but will deny
requests outside this prefix if no other auth middleware overrides it.
With an empty reseller prefix, acts as the definitive auth service only
for tokens that validate to a non-empty set of groups. For all other
requests, acts as the fallback auth service when no other auth
middleware overrides it.
Alternatively, if the request matches the self.auth_prefix, the request
will be routed through the internal auth request handler (self.handle).
This is to handle granting tokens, etc.
"""
if self.allow_overrides and env.get('swift.authorize_override', False):
return self.app(env, start_response)
if env.get('PATH_INFO', '').startswith(self.auth_prefix):
return self.handle(env, start_response)
s3 = env.get('HTTP_AUTHORIZATION')
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
if s3 or (token and token.startswith(self.reseller_prefix)):
# Note: Empty reseller_prefix will match all tokens.
groups = self.get_groups(env, token)
if groups:
user = groups and groups.split(',', 1)[0] or ''
trans_id = env.get('swift.trans_id')
self.logger.debug('User: %s uses token %s (trans_id %s)' %
(user, 's3' if s3 else token, trans_id))
env['REMOTE_USER'] = groups
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
if '.reseller_admin' in groups:
env['reseller_request'] = True
else:
# Unauthorized token
if self.reseller_prefix:
# Because I know I'm the definitive auth for this token, I
# can deny it outright.
self.logger.increment('unauthorized')
return HTTPUnauthorized()(env, start_response)
# Because I'm not certain if I'm the definitive auth for empty
# reseller_prefixed tokens, I won't overwrite swift.authorize.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
else:
if self.reseller_prefix:
# With a non-empty reseller_prefix, I would like to be called
# back for anonymous access to accounts I know I'm the
# definitive auth for.
try:
version, rest = split_path(env.get('PATH_INFO', ''),
1, 2, True)
except ValueError:
version, rest = None, None
self.logger.increment('errors')
if rest and rest.startswith(self.reseller_prefix):
# Handle anonymous access to accounts I'm the definitive
# auth for.
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
# Not my token, not my account, I can't authorize this request,
# deny all is a good idea if not already set...
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
# Because I'm not certain if I'm the definitive auth for empty
# reseller_prefixed accounts, I won't overwrite swift.authorize.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
return self.app(env, start_response)
def _get_user_groups(self, account, account_user, account_id):
"""
:param account: example: test
:param account_user: example: test:tester
"""
groups = [account, account_user]
groups.extend(self.users[account_user]['groups'])
if '.admin' in groups:
groups.remove('.admin')
groups.append(account_id)
groups = ','.join(groups)
return groups
def get_groups(self, env, token):
"""
Get groups for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a group string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user.
"""
groups = None
memcache_client = cache_from_env(env)
if not memcache_client:
raise Exception('Memcache required')
memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token)
cached_auth_data = memcache_client.get(memcache_token_key)
if cached_auth_data:
expires, groups = cached_auth_data
if expires < time():
groups = None
if env.get('HTTP_AUTHORIZATION'):
account_user, sign = \
env['HTTP_AUTHORIZATION'].split(' ')[1].rsplit(':', 1)
if account_user not in self.users:
return None
account, user = account_user.split(':', 1)
account_id = self.users[account_user]['url'].rsplit('/', 1)[-1]
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace(account_user, account_id, 1)
msg = base64.urlsafe_b64decode(unquote(token))
key = self.users[account_user]['key']
s = base64.encodestring(hmac.new(key, msg, sha1).digest()).strip()
if s != sign:
return None
groups = self._get_user_groups(account, account_user, account_id)
return groups
def authorize(self, req):
"""
Returns None if the request is authorized to continue or a standard
WSGI response callable if not.
"""
try:
version, account, container, obj = req.split_path(1, 4, True)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if not account or not account.startswith(self.reseller_prefix):
self.logger.debug("Account name: %s doesn't start with "
"reseller_prefix: %s."
% (account, self.reseller_prefix))
return self.denied_response(req)
user_groups = (req.remote_user or '').split(',')
account_user = user_groups[1] if len(user_groups) > 1 else None
if '.reseller_admin' in user_groups and \
account != self.reseller_prefix and \
account[len(self.reseller_prefix)] != '.':
req.environ['swift_owner'] = True
self.logger.debug("User %s has reseller admin authorizing."
% account_user)
return None
if account in user_groups and \
(req.method not in ('DELETE', 'PUT') or container):
# If the user is admin for the account and is not trying to do an
# account DELETE or PUT...
req.environ['swift_owner'] = True
self.logger.debug("User %s has admin authorizing."
% account_user)
return None
if (req.environ.get('swift_sync_key')
and (req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None))
and 'x-timestamp' in req.headers):
self.logger.debug("Allow request with container sync-key: %s."
% req.environ['swift_sync_key'])
return None
if req.method == 'OPTIONS':
#allow OPTIONS requests to proceed as normal
self.logger.debug("Allow OPTIONS request.")
return None
referrers, groups = parse_acl(getattr(req, 'acl', None))
if referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in groups:
self.logger.debug("Allow authorizing %s via referer ACL."
% req.referer)
return None
for user_group in user_groups:
if user_group in groups:
self.logger.debug("User %s allowed in ACL: %s authorizing."
% (account_user, user_group))
return None
return self.denied_response(req)
def denied_response(self, req):
"""
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
self.logger.increment('forbidden')
return HTTPForbidden(request=req)
else:
self.logger.increment('unauthorized')
return HTTPUnauthorized(request=req)
def handle(self, env, start_response):
"""
WSGI entry point for auth requests (ones that match the
self.auth_prefix).
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
req = Request(env)
if self.auth_prefix:
req.path_info_pop()
req.bytes_transferred = '-'
req.client_disconnect = False
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
return self.handle_request(req)(env, start_response)
except (Exception, Timeout):
print "EXCEPTION IN handle: %s: %s" % (format_exc(), env)
self.logger.increment('errors')
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return ['Internal server error.\n']
def handle_request(self, req):
"""
Entry point for auth requests (ones that match the self.auth_prefix).
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
req.start_time = time()
handler = None
try:
version, account, user, _junk = req.split_path(1, 4, True)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if version in ('v1', 'v1.0', 'auth'):
if req.method == 'GET':
handler = self.handle_get_token
if not handler:
self.logger.increment('errors')
req.response = HTTPBadRequest(request=req)
else:
req.response = handler(req)
return req.response
def handle_get_token(self, req):
"""
Handles the various `request for token and service end point(s)` calls.
There are various formats to support the various auth servers in the
past. Examples::
GET <auth-prefix>/v1/<act>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/v1.0
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
On successful authentication, the response will have X-Auth-Token and
X-Storage-Token set to the token to use with Swift and X-Storage-URL
set to the URL to the default Swift cluster to use.
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success with data set as explained
above.
"""
# Validate the request info
try:
pathsegs = split_path(req.path_info, 1, 3, True)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if pathsegs[0] == 'v1' and pathsegs[2] == 'auth':
account = pathsegs[1]
user = req.headers.get('x-storage-user')
if not user:
user = req.headers.get('x-auth-user')
if not user or ':' not in user:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req)
account2, user = user.split(':', 1)
if account != account2:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req)
key = req.headers.get('x-storage-pass')
if not key:
key = req.headers.get('x-auth-key')
elif pathsegs[0] in ('auth', 'v1.0'):
user = req.headers.get('x-auth-user')
if not user:
user = req.headers.get('x-storage-user')
if not user or ':' not in user:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req)
account, user = user.split(':', 1)
key = req.headers.get('x-auth-key')
if not key:
key = req.headers.get('x-storage-pass')
else:
return HTTPBadRequest(request=req)
if not all((account, user, key)):
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req)
# Authenticate user
account_user = account + ':' + user
if account_user not in self.users:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req)
if self.users[account_user]['key'] != key:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req)
account_id = self.users[account_user]['url'].rsplit('/', 1)[-1]
# Get memcache client
memcache_client = cache_from_env(req.environ)
if not memcache_client:
raise Exception('Memcache required')
# See if a token already exists and hasn't expired
token = None
memcache_user_key = '%s/user/%s' % (self.reseller_prefix, account_user)
candidate_token = memcache_client.get(memcache_user_key)
if candidate_token:
memcache_token_key = \
'%s/token/%s' % (self.reseller_prefix, candidate_token)
cached_auth_data = memcache_client.get(memcache_token_key)
if cached_auth_data:
expires, old_groups = cached_auth_data
old_groups = old_groups.split(',')
new_groups = self._get_user_groups(account, account_user,
account_id)
if expires > time() and \
set(old_groups) == set(new_groups.split(',')):
token = candidate_token
# Create a new token if one didn't exist
if not token:
# Generate new token
token = '%stk%s' % (self.reseller_prefix, uuid4().hex)
expires = time() + self.token_life
groups = self._get_user_groups(account, account_user, account_id)
# Save token
memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token)
memcache_client.set(memcache_token_key, (expires, groups),
time=float(expires - time()))
# Record the token with the user info for future use.
memcache_user_key = \
'%s/user/%s' % (self.reseller_prefix, account_user)
memcache_client.set(memcache_user_key, token,
time=float(expires - time()))
resp = Response(request=req, headers={
'x-auth-token': token, 'x-storage-token': token})
url = self.users[account_user]['url'].replace('$HOST', resp.host_url)
if self.storage_url_scheme != 'default':
url = self.storage_url_scheme + ':' + url.split(':', 1)[1]
resp.headers['x-storage-url'] = url
return resp
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return TempAuth(app, conf)
return auth_filter
| {
"content_hash": "7158f8bfe888c80ad1b8e101cabb52a9",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 79,
"avg_line_length": 44.02910602910603,
"alnum_prop": 0.5571819813013504,
"repo_name": "citrix-openstack-build/swift",
"id": "c871a293af98c7fa8e157df27433a165b7777e03",
"size": "21763",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "swift/common/middleware/tempauth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15048"
},
{
"name": "Python",
"bytes": "3183466"
},
{
"name": "Shell",
"bytes": "685"
}
],
"symlink_target": ""
} |
"""Department - the department in the laboratory.
"""
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.CMFCore.utils import getToolByName
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
from AccessControl import ClassSecurityInfo
import sys
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from zope.interface import implements
schema = BikaSchema.copy() + Schema((
ReferenceField('Manager',
vocabulary = 'getContacts',
vocabulary_display_path_bound = sys.maxint,
allowed_types = ('LabContact',),
referenceClass = HoldingReference,
relationship = 'DepartmentLabContact',
widget = ReferenceWidget(
checkbox_bound = 0,
label=_("Manager"),
description = _(
"Select a manager from the available personnel configured under the "
"'lab contacts' setup item. Departmental managers are referenced on "
"analysis results reports containing analyses by their department."),
),
),
ComputedField('ManagerName',
expression = "context.getManager() and context.getManager().getFullname() or ''",
widget = ComputedWidget(
visible = False,
),
),
ComputedField('ManagerPhone',
expression = "context.getManager() and context.getManager().getBusinessPhone() or ''",
widget = ComputedWidget(
visible = False,
),
),
ComputedField('ManagerEmail',
expression = "context.getManager() and context.getManager().getEmailAddress() or ''",
widget = ComputedWidget(
visible = False,
),
),
))
schema['description'].widget.visible = True
schema['description'].schemata = 'default'
class Department(BaseContent):
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
registerType(Department, PROJECTNAME)
| {
"content_hash": "5ca70b2108971c9d6ae1435923a8260b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 94,
"avg_line_length": 35.67741935483871,
"alnum_prop": 0.6717902350813744,
"repo_name": "hocinebendou/bika.gsoc",
"id": "5013ed51750d399bde20d68f6bc35cf2c2b6bac8",
"size": "2212",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bika/lims/content/department.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
} |
"""Tools to parse and validate a MongoDB URI."""
from urllib import unquote_plus
from pymongo.common import validate
from pymongo.errors import (ConfigurationError,
InvalidURI,
UnsupportedOption)
SCHEME = 'mongodb://'
SCHEME_LEN = len(SCHEME)
DEFAULT_PORT = 27017
def _partition(entity, sep):
"""Python2.4 doesn't have a partition method so we provide
our own that mimics str.partition from later releases.
Split the string at the first occurrence of sep, and return a
3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
found, return a 3-tuple containing the string itself, followed
by two empty strings.
"""
parts = entity.split(sep, 1)
if len(parts) == 2:
return parts[0], sep, parts[1]
else:
return entity, '', ''
def _rpartition(entity, sep):
"""Python2.4 doesn't have an rpartition method so we provide
our own that mimics str.rpartition from later releases.
Split the string at the last occurrence of sep, and return a
3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
found, return a 3-tuple containing two empty strings, followed
by the string itself.
"""
idx = entity.rfind(sep)
if idx == -1:
return '', '', entity
return entity[:idx], sep, entity[idx + 1:]
def parse_userinfo(userinfo):
"""Validates the format of user information in a MongoDB URI.
Reserved characters like ':', '/', '+' and '@' must be escaped
following RFC 2396.
Returns a 2-tuple containing the unescaped username followed
by the unescaped password.
:Paramaters:
- `userinfo`: A string of the form <username>:<password>
.. versionchanged:: 2.2
Now uses `urllib.unquote_plus` so `+` characters must be escaped.
"""
if '@' in userinfo or userinfo.count(':') > 1:
raise InvalidURI("':' or '@' characters in a username or password "
"must be escaped according to RFC 2396.")
user, _, passwd = _partition(userinfo, ":")
# No password is expected with GSSAPI authentication.
if not user:
raise InvalidURI("The empty string is not valid username.")
user = unquote_plus(user)
passwd = unquote_plus(passwd)
return user, passwd
def parse_ipv6_literal_host(entity, default_port):
"""Validates an IPv6 literal host:port string.
Returns a 2-tuple of IPv6 literal followed by port where
port is default_port if it wasn't specified in entity.
:Parameters:
- `entity`: A string that represents an IPv6 literal enclosed
in braces (e.g. '[::1]' or '[::1]:27017').
- `default_port`: The port number to use when one wasn't
specified in entity.
"""
if entity.find(']') == -1:
raise ConfigurationError("an IPv6 address literal must be "
"enclosed in '[' and ']' according "
"to RFC 2732.")
i = entity.find(']:')
if i == -1:
return entity[1:-1], default_port
return entity[1: i], entity[i + 2:]
def parse_host(entity, default_port=DEFAULT_PORT):
"""Validates a host string
Returns a 2-tuple of host followed by port where port is default_port
if it wasn't specified in the string.
:Parameters:
- `entity`: A host or host:port string where host could be a
hostname or IP address.
- `default_port`: The port number to use when one wasn't
specified in entity.
"""
host = entity
port = default_port
if entity[0] == '[':
host, port = parse_ipv6_literal_host(entity, default_port)
elif entity.find(':') != -1:
if entity.count(':') > 1:
raise ConfigurationError("Reserved characters such as ':' must be "
"escaped according RFC 2396. An IPv6 "
"address literal must be enclosed in '[' "
"and ']' according to RFC 2732.")
host, port = host.split(':', 1)
if isinstance(port, basestring):
if not port.isdigit():
raise ConfigurationError("Port number must be an integer.")
port = int(port)
return host, port
def validate_options(opts):
"""Validates and normalizes options passed in a MongoDB URI.
Returns a new dictionary of validated and normalized options.
:Parameters:
- `opts`: A dict of MongoDB URI options.
"""
normalized = {}
for option, value in opts.iteritems():
option, value = validate(option, value)
# str(option) to ensure that a unicode URI results in plain 'str'
# option names. 'normalized' is then suitable to be passed as kwargs
# in all Python versions.
normalized[str(option)] = value
return normalized
def _parse_options(opts, delim):
"""Helper method for split_options which creates the options dict.
Also handles the creation of a list of dicts for the URI tag_sets/
readpreferencetags portion."""
options = {}
for opt in opts.split(delim):
key, val = opt.split("=")
if key.lower() == 'readpreferencetags':
options.setdefault('readpreferencetags', []).append(val)
else:
options[key] = val
if 'readpreferencetags' in options:
new_tag_sets = []
for tag_set in options['readpreferencetags']:
tag_dict = {}
try:
for tag in tag_set.split(","):
tag_parts = tag.split(":")
tag_dict[tag_parts[0]] = tag_parts[1]
new_tag_sets.append(tag_dict)
except IndexError:
new_tag_sets.append({})
options['readpreferencetags'] = new_tag_sets
return options
def split_options(opts):
"""Takes the options portion of a MongoDB URI, validates each option
and returns the options in a dictionary. The option names will be returned
lowercase even if camelCase options are used.
:Parameters:
- `opt`: A string representing MongoDB URI options.
"""
and_idx = opts.find("&")
semi_idx = opts.find(";")
try:
if and_idx >= 0 and semi_idx >= 0:
raise InvalidURI("Can not mix '&' and ';' for option separators.")
elif and_idx >= 0:
options = _parse_options(opts, "&")
elif semi_idx >= 0:
options = _parse_options(opts, ";")
elif opts.find("=") != -1:
options = _parse_options(opts, None)
else:
raise ValueError
except ValueError:
raise InvalidURI("MongoDB URI options are key=value pairs.")
return validate_options(options)
def split_hosts(hosts, default_port=DEFAULT_PORT):
"""Takes a string of the form host1[:port],host2[:port]... and
splits it into (host, port) tuples. If [:port] isn't present the
default_port is used.
Returns a set of 2-tuples containing the host name (or IP) followed by
port number.
:Parameters:
- `hosts`: A string of the form host1[:port],host2[:port],...
- `default_port`: The port number to use when one wasn't specified
for a host.
"""
nodes = []
for entity in hosts.split(','):
if not entity:
raise ConfigurationError("Empty host "
"(or extra comma in host list).")
port = default_port
# Unix socket entities don't have ports
if entity.endswith('.sock'):
port = None
nodes.append(parse_host(entity, port))
return nodes
def parse_uri(uri, default_port=DEFAULT_PORT):
"""Parse and validate a MongoDB URI.
Returns a dict of the form::
{
'nodelist': <list of (host, port) tuples>,
'username': <username> or None,
'password': <password> or None,
'database': <database name> or None,
'collection': <collection name> or None,
'options': <dict of MongoDB URI options>
}
:Parameters:
- `uri`: The MongoDB URI to parse.
- `default_port`: The port number to use when one wasn't specified
for a host in the URI.
"""
if not uri.startswith(SCHEME):
raise InvalidURI("Invalid URI scheme: URI "
"must begin with '%s'" % (SCHEME,))
scheme_free = uri[SCHEME_LEN:]
if not scheme_free:
raise InvalidURI("Must provide at least one hostname or IP.")
nodes = None
user = None
passwd = None
dbase = None
collection = None
options = {}
# Check for unix domain sockets in the uri
if '.sock' in scheme_free:
host_part, _, path_part = _rpartition(scheme_free, '/')
try:
parse_uri('%s%s' % (SCHEME, host_part))
except (ConfigurationError, InvalidURI):
host_part = scheme_free
path_part = ""
else:
host_part, _, path_part = _partition(scheme_free, '/')
if not path_part and '?' in host_part:
raise InvalidURI("A '/' is required between "
"the host list and any options.")
if '@' in host_part:
userinfo, _, hosts = _rpartition(host_part, '@')
user, passwd = parse_userinfo(userinfo)
else:
hosts = host_part
nodes = split_hosts(hosts, default_port=default_port)
if path_part:
if path_part[0] == '?':
opts = path_part[1:]
else:
dbase, _, opts = _partition(path_part, '?')
if '.' in dbase:
dbase, collection = dbase.split('.', 1)
if opts:
options = split_options(opts)
return {
'nodelist': nodes,
'username': user,
'password': passwd,
'database': dbase,
'collection': collection,
'options': options
}
if __name__ == '__main__':
import pprint
import sys
try:
pprint.pprint(parse_uri(sys.argv[1]))
except (InvalidURI, UnsupportedOption), e:
print e
sys.exit(0)
| {
"content_hash": "73c1906c62b230c2a0b01ab8402eda56",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 79,
"avg_line_length": 33.17628205128205,
"alnum_prop": 0.582069365278717,
"repo_name": "otherness-space/myProject002",
"id": "dc7359fab8964fe17042b36763f72b2c3984498a",
"size": "10933",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "my_project_002/lib/python2.7/site-packages/pymongo/uri_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41612"
},
{
"name": "Groff",
"bytes": "83"
},
{
"name": "HTML",
"bytes": "84945"
},
{
"name": "JavaScript",
"bytes": "93321"
},
{
"name": "Python",
"bytes": "6458978"
},
{
"name": "Shell",
"bytes": "3787"
}
],
"symlink_target": ""
} |
def test_cached_property():
from planar.util import cached_property
class Thingy(object):
not_cached_calls = 0
cached_calls = 0
@property
def not_cached(self):
"""Nay"""
self.not_cached_calls += 1
return 'not cached'
@cached_property
def cached(self):
"""Yay"""
self.cached_calls += 1
return 'cached'
thing = Thingy()
assert thing.not_cached_calls == 0
assert Thingy.not_cached.__doc__ == 'Nay'
assert thing.cached_calls == 0
assert Thingy.cached.__doc__ == 'Yay'
not_cached_value = thing.not_cached
assert thing.not_cached_calls == 1
cached_value = thing.cached
assert thing.cached_calls == 1
assert not_cached_value == thing.not_cached
assert thing.not_cached_calls == 2
assert cached_value == thing.cached
assert thing.cached_calls == 1
assert not_cached_value == thing.not_cached
assert thing.not_cached_calls == 3
assert cached_value == thing.cached
assert thing.cached_calls == 1
# vim: ai ts=4 sts=4 et sw=4 tw=78
| {
"content_hash": "d82d5dae226f4c36a20eb167ef2daf7a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 47,
"avg_line_length": 25.177777777777777,
"alnum_prop": 0.5922330097087378,
"repo_name": "wrightjb/bolt-planar",
"id": "48f6d1bbaf9fd61a42634dc21687b3a8a379e95e",
"size": "1134",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "331040"
},
{
"name": "Shell",
"bytes": "3852"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coding', '0009_auto_20170906_1228'),
]
operations = [
migrations.AlterField(
model_name='codeuserstatus',
name='remaining_time',
field=models.IntegerField(blank=True, null=True),
),
]
| {
"content_hash": "53d14889d12f9430893d2b5a47744dd5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.6089108910891089,
"repo_name": "garrykevin-ep/Skillet",
"id": "c5e4b2158b0fbd077a41b574dc0942c13928d633",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coding/migrations/0010_auto_20170906_1229.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38877"
},
{
"name": "HTML",
"bytes": "25220"
},
{
"name": "JavaScript",
"bytes": "3365"
},
{
"name": "Python",
"bytes": "54707"
}
],
"symlink_target": ""
} |
"""
``numpy.linalg``
================
The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient
low level implementations of standard linear algebra algorithms. Those
libraries may be provided by NumPy itself using C versions of a subset of their
reference implementations but, when possible, highly optimized libraries that
take advantage of specialized processor functionality are preferred. Examples
of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries
are multithreaded and processor dependent, environmental variables and external
packages such as threadpoolctl may be needed to control the number of threads
or specify the processor architecture.
- OpenBLAS: https://www.openblas.net/
- threadpoolctl: https://github.com/joblib/threadpoolctl
Please note that the most-used linear algebra functions in NumPy are present in
the main ``numpy`` namespace rather than in ``numpy.linalg``. There are:
``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``,
``einsum_path`` and ``kron``.
Functions present in numpy.linalg are listed below.
Matrix and vector products
--------------------------
multi_dot
matrix_power
Decompositions
--------------
cholesky
qr
svd
Matrix eigenvalues
------------------
eig
eigh
eigvals
eigvalsh
Norms and other numbers
-----------------------
norm
cond
det
matrix_rank
slogdet
Solving equations and inverting matrices
----------------------------------------
solve
tensorsolve
lstsq
inv
pinv
tensorinv
Exceptions
----------
LinAlgError
"""
from __future__ import division, absolute_import, print_function
# To get sub-modules
from .linalg import *
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| {
"content_hash": "39f4b2214ed5e4cf50e8538242e4e1bf",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 23.10126582278481,
"alnum_prop": 0.6915068493150685,
"repo_name": "jorisvandenbossche/numpy",
"id": "55560815d00535f5fa8203e4a68881038fb11690",
"size": "1825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/linalg/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9068647"
},
{
"name": "C++",
"bytes": "189527"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8307898"
},
{
"name": "Shell",
"bytes": "8482"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
} |
"""Support for testing internet speed via Speedtest.net."""
from __future__ import annotations
from datetime import timedelta
import logging
import speedtest
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_SCAN_INTERVAL,
EVENT_HOMEASSISTANT_STARTED,
)
from homeassistant.core import CoreState, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
CONF_MANUAL,
CONF_SERVER_ID,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SERVER,
DOMAIN,
PLATFORMS,
SENSOR_TYPES,
SPEED_TEST_SERVICE,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
vol.All(
# Deprecated in Home Assistant 2021.6
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_SERVER_ID): cv.positive_int,
vol.Optional(
CONF_SCAN_INTERVAL,
default=timedelta(minutes=DEFAULT_SCAN_INTERVAL),
): cv.positive_time_period,
vol.Optional(CONF_MANUAL, default=False): cv.boolean,
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(list(SENSOR_TYPES))]),
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
def server_id_valid(server_id: str) -> bool:
"""Check if server_id is valid."""
try:
api = speedtest.Speedtest()
api.get_servers([int(server_id)])
except (speedtest.ConfigRetrievalError, speedtest.NoMatchedServers):
return False
return True
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Import integration from config."""
if DOMAIN in config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config[DOMAIN]
)
)
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up the Speedtest.net component."""
coordinator = SpeedTestDataCoordinator(hass, config_entry)
await coordinator.async_setup()
async def _enable_scheduled_speedtests(*_):
"""Activate the data update coordinator."""
coordinator.update_interval = timedelta(
minutes=config_entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
)
await coordinator.async_refresh()
if not config_entry.options.get(CONF_MANUAL, False):
if hass.state == CoreState.running:
await _enable_scheduled_speedtests()
else:
# Running a speed test during startup can prevent
# integrations from being able to setup because it
# can saturate the network interface.
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, _enable_scheduled_speedtests
)
hass.data[DOMAIN] = coordinator
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload SpeedTest Entry from config_entry."""
hass.services.async_remove(DOMAIN, SPEED_TEST_SERVICE)
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
hass.data.pop(DOMAIN)
return unload_ok
class SpeedTestDataCoordinator(DataUpdateCoordinator):
"""Get the latest data from speedtest.net."""
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Initialize the data object."""
self.hass = hass
self.config_entry: ConfigEntry = config_entry
self.api: speedtest.Speedtest | None = None
self.servers: dict[str, dict] = {DEFAULT_SERVER: {}}
super().__init__(
self.hass,
_LOGGER,
name=DOMAIN,
update_method=self.async_update,
)
def update_servers(self):
"""Update list of test servers."""
test_servers = self.api.get_servers()
test_servers_list = []
for servers in test_servers.values():
for server in servers:
test_servers_list.append(server)
if test_servers_list:
for server in sorted(
test_servers_list,
key=lambda server: (
server["country"],
server["name"],
server["sponsor"],
),
):
self.servers[
f"{server['country']} - {server['sponsor']} - {server['name']}"
] = server
def update_data(self):
"""Get the latest data from speedtest.net."""
self.update_servers()
self.api.closest.clear()
if self.config_entry.options.get(CONF_SERVER_ID):
server_id = self.config_entry.options.get(CONF_SERVER_ID)
self.api.get_servers(servers=[server_id])
best_server = self.api.get_best_server()
_LOGGER.debug(
"Executing speedtest.net speed test with server_id: %s",
best_server["id"],
)
self.api.download()
self.api.upload()
return self.api.results.dict()
async def async_update(self) -> dict[str, str]:
"""Update Speedtest data."""
try:
return await self.hass.async_add_executor_job(self.update_data)
except speedtest.NoMatchedServers as err:
raise UpdateFailed("Selected server is not found.") from err
except speedtest.SpeedtestException as err:
raise UpdateFailed(err) from err
async def async_set_options(self):
"""Set options for entry."""
if not self.config_entry.options:
data = {**self.config_entry.data}
options = {
CONF_SCAN_INTERVAL: data.pop(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL),
CONF_MANUAL: data.pop(CONF_MANUAL, False),
CONF_SERVER_ID: str(data.pop(CONF_SERVER_ID, "")),
}
self.hass.config_entries.async_update_entry(
self.config_entry, data=data, options=options
)
async def async_setup(self) -> None:
"""Set up SpeedTest."""
try:
self.api = await self.hass.async_add_executor_job(speedtest.Speedtest)
await self.hass.async_add_executor_job(self.update_servers)
except speedtest.SpeedtestException as err:
raise ConfigEntryNotReady from err
async def request_update(call):
"""Request update."""
await self.async_request_refresh()
await self.async_set_options()
self.hass.services.async_register(DOMAIN, SPEED_TEST_SERVICE, request_update)
self.config_entry.async_on_unload(
self.config_entry.add_update_listener(options_updated_listener)
)
async def options_updated_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
if entry.options[CONF_MANUAL]:
hass.data[DOMAIN].update_interval = None
return
hass.data[DOMAIN].update_interval = timedelta(
minutes=entry.options[CONF_SCAN_INTERVAL]
)
await hass.data[DOMAIN].async_request_refresh()
| {
"content_hash": "404dc6acf93875a8cfda206da01e7a20",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 88,
"avg_line_length": 33.921052631578945,
"alnum_prop": 0.6110680113783294,
"repo_name": "sander76/home-assistant",
"id": "b049b3a2d2c37ac1e89b834c5480524725e06d53",
"size": "7734",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/speedtestdotnet/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import abc
from neutron_lib.api.definitions import agent as apidef
from neutron_lib.api import extensions as api_extensions
from neutron_lib import exceptions
from neutron_lib.plugins import directory
from neutron.api import extensions
from neutron.api.v2 import base
class Agent(api_extensions.APIExtensionDescriptor):
"""Agent management extension."""
api_definition = apidef
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plugin = directory.get_plugin()
params = apidef.RESOURCE_ATTRIBUTE_MAP.get(apidef.COLLECTION_NAME)
controller = base.create_resource(apidef.COLLECTION_NAME,
apidef.RESOURCE_NAME,
plugin, params)
ex = extensions.ResourceExtension(apidef.COLLECTION_NAME,
controller)
return [ex]
class AgentPluginBase(object, metaclass=abc.ABCMeta):
"""REST API to operate the Agent.
All of method must be in an admin context.
"""
def create_agent(self, context, agent):
"""Create agent.
This operation is not allow in REST API.
@raise exceptions.BadRequest:
"""
raise exceptions.BadRequest()
@abc.abstractmethod
def delete_agent(self, context, id):
"""Delete agent.
Agents register themselves on reporting state.
But if an agent does not report its status
for a long time (for example, it is dead forever. ),
admin can remove it. Agents must be disabled before
being removed.
"""
pass
@abc.abstractmethod
def update_agent(self, context, agent):
"""Disable or Enable the agent.
Description also can be updated. Some agents cannot be disabled, such
as plugins, services. An error code should be reported in this case.
@raise exceptions.BadRequest:
"""
pass
@abc.abstractmethod
def get_agents(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_agent(self, context, id, fields=None):
pass
| {
"content_hash": "301cdde3c2196330d989ede72444b2f3",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 77,
"avg_line_length": 29.2027027027027,
"alnum_prop": 0.6316520129569644,
"repo_name": "openstack/neutron",
"id": "99f79a9ca3c7ee8316ec408d913d5b5415f5b4e1",
"size": "2752",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/extensions/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
import collections
import envy.logger as log
import envy.static as static
import os
import os.path
import shutil
import stat
config = collections.namedtuple('config',
['location', 'plugins'])
class ConfigurationException(Exception):
pass
# TODO: Create catchable exceptions for these failures
def ensure_envy_dir(path, autocreate):
if os.path.isfile(path):
err = 'Directory [%s] is actually a file!' % path
log.error(err)
raise ConfigurationException(err)
if not os.path.exists(path):
if autocreate:
log.warning('Directory [%s] does not exist, creating it now' % path)
os.makedirs(path)
else:
err = 'No envy directory exists at path [%s]. ' % path \
+ 'Try creating one first with "nv c"'
log.error(err)
raise ConfigurationException(err)
return path
def delete_envy_dir(basedir):
root_dir = os.path.join(basedir, '.envy')
shutil.rmtree(root_dir)
def find_envy_dir(basedir, autocreate):
root_dir = os.path.join(basedir, '.envy')
ensure_envy_dir(root_dir, autocreate)
ensure_envy_dir(os.path.join(root_dir, 'bin'), autocreate)
ensure_envy_dir(os.path.join(root_dir, 'macros'), autocreate)
ensure_envy_dir(os.path.join(root_dir, 'plugins'), autocreate)
return root_dir
def load_system_config():
log.debug('Loading system-level envy configuration')
root_dir = find_envy_dir(os.environ['HOME'], autocreate=True)
return config(location=root_dir, plugins=[])
def p(*parts):
return os.path.join(*parts)
def create_file(fn, contents):
with open(fn, 'w') as f:
f.write(contents)
def create_executable(fn, contents):
create_file(fn, contents)
st = os.stat(fn)
os.chmod(fn, st.st_mode | stat.S_IEXEC)
def load_config(basedir, autocreate):
log.debug('Loading envy configuration')
root_dir = find_envy_dir(basedir, autocreate=autocreate)
# TODO: Don't recreate each time?
create_executable(p(root_dir, '.activator'), static.__ACTIVATOR)
create_executable(p(root_dir, '.init'), static.__INIT)
create_file(p(root_dir, 'todos.md'), static.__TODOS_MD)
create_executable(p(root_dir, 'bin', 'todos'), static.__TODOS)
create_executable(p(root_dir, 'bin', 'record'), static.__RECORDING)
create_executable(p(root_dir, 'bin', 'recording_sub'), static.__RECORDING_SUB)
return config(location=root_dir, plugins=[])
class Environment(object):
def __init__(self, basedir):
self.basedir = basedir
self.name = os.path.basename(self.basedir)
# TODO: Break out into separate `create`, `check`, and `load` methods
def init(self, autocreate=False):
self.system_config = load_system_config()
self.config = load_config(self.basedir, autocreate)
def destroy(self):
delete_envy_dir(self.basedir)
@property
def extra_path(self):
config_locations = [self.config.location, self.system_config.location]
dirs = [os.path.join(p, 'bin') for p in config_locations]
return ':'.join(dirs)
| {
"content_hash": "bd8eac508e5ba8e26888e1dfbd63ee63",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 82,
"avg_line_length": 28.045045045045047,
"alnum_prop": 0.653389013813042,
"repo_name": "jerluc/envy",
"id": "2835a42fe6a3c718a65e8913bede77faf5caed66",
"size": "3138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envy/env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9670"
}
],
"symlink_target": ""
} |
"""
This is part of HashBruteStation software
Docs EN: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station_en
Docs RU: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Api for work with DB
"""
import time
import mysql.connector
from classes.Registry import Registry
class Database(object):
""" Api for work with DB """
_db = None
_restart_by_deadlock_limit = None
_sleep_by_deadlock_restart = None
_host = None
_user = None
_password = None
_basename = None
def __init__(self, host, user, password, basename):
self._host = host
self._user = user
self._password = password
self._basename = basename
self.connect()
self._restart_by_deadlock_limit = int(Registry().get('config')['main']['restarts_by_deadlock_limit'])
self._sleep_by_deadlock_restart = int(Registry().get('config')['main']['sleep_by_deadlock_restart'])
def connect(self):
self._db = mysql.connector.connect(
host=self._host,
user=self._user,
password=self._password,
database=self._basename,
#raise_on_warnings=True,
)
self._db.autocommit = True
def q(self, sql, return_curs=False):
""" Usual query, return cursor """
for i in range(1, self._restart_by_deadlock_limit + 1):
try:
curs = self._db.cursor(buffered=True)
curs.execute(sql)
except mysql.connector.errors.OperationalError as ex:
if "MySQL Connection not available" in str(ex):
self.connect()
Registry().get('logger').log("database", "Reconnect on '{0}'".format(sql))
return self.q(sql, return_curs)
else:
raise ex
except mysql.connector.errors.DatabaseError as e:
if str(e).count("Lock wait timeout exceeded") or str(e).count("Deadlock found when trying to get lock"):
Registry().get('logger').log("database", "Deadlock '{0}', try {1} ".format(sql, i))
if i == self._restart_by_deadlock_limit:
curs = self._db.cursor()
curs.execute(sql)
else:
time.sleep(self._sleep_by_deadlock_restart)
continue
else:
raise e
break
if return_curs:
return curs
else:
curs.close()
def fetch_all(self, sql):
""" Fetch result of sql query as assoc dict """
result = []
curs = self.q(sql, True)
cols = curs.column_names
for row in curs:
row_result = {}
for field in cols:
k = cols.index(field)
row_result[cols[k]] = row[k]
#print cols[k], row[k]
result.append(row_result)
curs.close()
return result
def fetch_row(self, sql):
""" Fetch result of sql query as one row """
curs = self.q(sql, True)
cols = curs.column_names
row = curs.fetchone()
if curs._have_unread_result():
curs.fetchall()
curs.close()
if row:
result = {}
for field in cols:
k = cols.index(field)
result[cols[k]] = row[k]
return result
else:
return None
def fetch_one(self, sql):
""" Fetch first value of sql query from first row """
curs = self.q(sql, True)
row = curs.fetchone()
if curs._have_unread_result():
curs.fetchall()
curs.close()
if row:
return row[0]
else:
return None
def fetch_col(self, sql):
""" Fetch first column of sql query as list """
result = []
curs = self.q(sql, True)
for row in curs:
result.append(row[0])
curs.close()
return result
def fetch_pairs(self, sql):
""" Fetch result of sql query as dict {first_col: second_col} """
result = {}
curs = self.q(sql, True)
for row in curs:
result[row[0]] = row[1]
curs.close()
return result
def escape(self, _str):
""" Escape special chars from str """
return mysql.connector.conversion.MySQLConverter().escape(_str)
def quote(self, _str):
""" Escape special chars from str and put it into quotes """
return "NULL" if _str is None else "'" + self.escape(str(_str)) + "'"
def close(self):
""" Close db connection """
self._db.close()
def insert(self, table_name, data, ignore=False):
"""
Insert data into table
:param table_name: target table
:param data: dict with data {col_name: value}
:param ignore: Its INSERT IGNORE request or no?
:return:
"""
fields = map((lambda s: "`" + str(s) + "`"), data.keys())
values = map(self.quote, data.values())
curs = self.q(
"INSERT " + ("IGNORE" if ignore else "") + " INTO `{0}` ({1}) VALUES({2})".format(
table_name, ", ".join(fields),
", ".join(values)
),
True)
last_row_id = curs.lastrowid
curs.close()
return last_row_id
def insert_mass(self, table_name, data, ignore=False):
"""
Insert data into table with many VALUES sections
:param table_name: target table
:param data: list of dicts with data {col_name: value}
:param ignore: Its INSERT IGNORE request or no?
:return:
"""
fields = []
to_insert = []
for row in data:
if not len(fields):
fields = map((lambda s: "`" + str(s) + "`"), row.keys())
values = map(self.quote, row.values())
to_insert.append("({0})".format(", ".join(values)))
if len(to_insert)%50 == 0:
self.q(
"INSERT " + ("IGNORE" if ignore else "") + " INTO `{0}` ({1}) VALUES {2}"
.format(table_name, ", ".join(fields), ", ".join(to_insert))
)
to_insert = []
if len(to_insert):
self.q(
"INSERT " + ("IGNORE" if ignore else "") + " INTO `{0}` ({1}) VALUES {2}"
.format(table_name, ", ".join(fields), ", ".join(to_insert))
)
def update_mass(self, table_name, field, data):
"""
Mass update data in table (UPDATE ... CASE)
:param table_name: Target table
:param field: Field what will be change
:param data: Dict with update data in format {case: value}
:param where: Where condition (example: "id = 3")
:return:
"""
sqlTplStart = "UPDATE `{0}` SET `{1}` = CASE \n".format(table_name, field)
sqlTplEnd = "ELSE `{0}` \n END".format(field)
sqls = []
for case in data:
sqls.append("WHEN {0} THEN {1} \n".format(case, self.quote(data[case])))
if len(sqls)%50 == 0:
self.q(sqlTplStart + "".join(sqls) + sqlTplEnd)
sqls = []
if len(sqls):
self.q(sqlTplStart + "".join(sqls) + sqlTplEnd)
def replace(self, table_name, data):
"""
Replace data in table
:param table_name: target table
:param data: dict with data {col_name: value}
:param ignore: Its INSERT IGNORE request or no?
:return:
"""
fields = map((lambda s: "`" + str(s) + "`"), data.keys())
values = map(self.quote, data.values())
curs = self.q("REPLACE INTO `{0}` ({1}) VALUES({2})".format(table_name, ", ".join(fields), ", ".join(values)))
last_row_id = curs.lastrowid
curs.close()
return last_row_id
def update(self, table_name, data, where):
"""
Update data in table
:param table_name: Target table
:param data: Dict with update data in format {col: value}
:param where: Where condition (example: "id = 3")
:return:
"""
fields = []
for fname in data:
fields.append("`{0}` = '{1}'".format(fname, self.escape(data[fname])))
self.q("UPDATE `{0}` SET {1} WHERE {2}".format(table_name, ", ".join(fields), where))
| {
"content_hash": "36a804e7188b6d48b1090882178998ed",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 120,
"avg_line_length": 33.63529411764706,
"alnum_prop": 0.5140492013524542,
"repo_name": "hack4sec/hbs-cli",
"id": "411adc5225f3d0198dc8733ad06b0f9afa142f34",
"size": "8601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/Database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186124"
}
],
"symlink_target": ""
} |
import unittest
from nose.plugins.attrib import attr
import os
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
from jnpr.junos import Device
from jnpr.junos.utils.fs import FS
from jnpr.junos.exception import RpcError
from mock import patch, MagicMock, call
from lxml import etree
__author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
@attr("unit")
class TestFS(unittest.TestCase):
@patch("ncclient.manager.connect")
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(
host="1.1.1.1", user="rick", password="password123", gather_facts=False
)
self.dev.open()
self.fs = FS(self.dev)
@patch("jnpr.junos.device.warnings")
def test_cat_wrong_path_return_none(self, mock_warnings):
path = "test/report"
self.assertEqual(self.fs.cat(path), None)
def test_cat(self):
self.fs._dev.rpc.file_show = MagicMock(side_effect=self._mock_manager)
path = "test/cat.txt"
self.assertTrue("testing cat functionality" in self.fs.cat(path))
self.fs._dev.rpc.file_show.assert_called_with(filename="test/cat.txt")
def test_cwd(self):
self.fs._dev.rpc.set_cli_working_directory = MagicMock(
side_effect=self._mock_manager
)
folder = "change/directory"
self.assertEqual("change/directory", self.fs.cwd(folder))
self.fs._dev.rpc.set_cli_working_directory.assert_called_with(
directory="change/directory"
)
@patch("jnpr.junos.Device.execute")
def test_pwd(self, mock_execute):
mock_execute.side_effect = MagicMock(side_effect=self._mock_manager)
self.fs.pwd()
self.assertEqual(self.fs.pwd(), "/cf/var/home/rick")
@patch("jnpr.junos.device.warnings")
def test_checksum_return_none(self, mock_warnings):
path = "test/report"
self.assertEqual(self.fs.checksum(path), None)
def test_checksum_unknown_calc(self):
path = "test/report"
self.assertRaises(ValueError, self.fs.checksum, path=path, calc="abc")
def test_checksum_return_rsp(self):
self.fs.dev.rpc.get_sha256_checksum_information = MagicMock(
side_effect=self._mock_manager
)
path = "test/checksum"
self.assertEqual(self.fs.checksum(path, "sha256"), "xxxx")
self.fs.dev.rpc.get_sha256_checksum_information.assert_called_with(
path="test/checksum"
)
def test_stat_calling___decode_file(self):
path = "test/stat/decode_file"
self.fs.dev.rpc.file_list = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
self.fs.stat(path),
{
"owner": "pqr",
"path": "/var/abc.sh",
"permissions": 755,
"permissions_text": "-rwxr-xr-x",
"size": 2,
"ts_date": "Mar 13 06:54",
"ts_epoc": "1394693680",
"type": "file",
},
)
def test_stat_calling___decode_dir(self):
path = "test/stat/decode_dir"
self.fs.dev.rpc.file_list = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
self.fs.stat(path),
{"path": "/var", "type": "dir", "file_count": 1, "size": 2},
)
def test_stat_return_none(self):
path = "test/abc"
self.fs.dev.rpc.file_list = MagicMock()
self.fs.dev.rpc.file_list.find.return_value = "output"
self.assertEqual(self.fs.stat(path), None)
def test_ls_calling___decode_file(self):
path = "test/stat/decode_file"
self.fs.dev.rpc.file_list = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
self.fs.ls(path),
{
"owner": "pqr",
"path": "/var/abc.sh",
"permissions": 755,
"permissions_text": "-rwxr-xr-x",
"size": 2,
"ts_date": "Mar 13 06:54",
"ts_epoc": "1394693680",
"type": "file",
},
)
def test_ls_calling___decode_dir(self):
path = "test/stat/decode_dir"
self.fs.dev.rpc.file_list = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
self.fs.ls(path),
{
"files": {
"abc": {
"permissions_text": "drwxr-xr-x",
"ts_date": "Feb 17 15:30",
"ts_epoc": "1392651039",
"owner": "root",
"path": "abc",
"size": 2,
"type": "dir",
"permissions": 555,
}
},
"path": "/var",
"type": "dir",
"file_count": 1,
"size": 2,
},
)
def test_ls_return_none(self):
path = "test/abc"
self.fs.dev.rpc.file_list = MagicMock()
self.fs.dev.rpc.file_list.find.return_value = "output"
self.assertEqual(self.fs.ls(path), None)
@patch("jnpr.junos.utils.fs.FS._decode_file")
def test_ls_link_path_false(self, mock_decode_file):
mock_decode_file.get.return_value = False
path = "test/stat/decode_file"
self.fs.dev.rpc.file_list = MagicMock(side_effect=self._mock_manager)
self.fs.ls(path, followlink=False)
mock_decode_file.assert_has_calls([call().get("link")])
def test_ls_brief_true(self):
path = "test/stat/decode_dir"
self.fs.dev.rpc.file_list = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
self.fs.ls(path, brief=True),
{
"files": ["abc"],
"path": "/var",
"type": "dir",
"file_count": 1,
"size": 2,
},
)
def test_ls_calling___decode_dir_type_symbolic_link(self):
path = "test/stat/decode_symbolic_link"
self.fs.dev.rpc.file_list = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
self.fs.ls(path),
{
"files": {
"abc": {
"permissions_text": "drwxr-xr-x",
"ts_date": "Feb 17 15:30",
"link": "symlink test",
"ts_epoc": "1392651039",
"owner": "root",
"path": "abc",
"size": 2,
"type": "link",
"permissions": 555,
}
},
"path": "/var",
"type": "dir",
"file_count": 1,
"size": 2,
},
)
def test_rm_return_true(self):
self.fs.dev.rpc.file_delete = MagicMock(return_value=True)
path = "test/abc"
self.assertTrue(self.fs.rm(path))
self.fs.dev.rpc.file_delete.assert_called_once_with(path="test/abc")
def test_rm_return_false(self):
path = "test/abc"
self.fs.dev.rpc.file_delete = MagicMock(return_value=False)
self.assertFalse(self.fs.rm(path))
self.fs.dev.rpc.file_delete.assert_called_once_with(path="test/abc")
def test_copy_return_true(self):
self.fs.dev.rpc.file_copy = MagicMock()
initial = "test/abc"
final = "test/xyz"
self.assertTrue(self.fs.cp(initial, final))
self.fs.dev.rpc.file_copy.assert_called_once_with(
source="test/abc", destination="test/xyz"
)
def test_copy_return_false(self):
initial = "test/abc"
final = "test/xyz"
self.fs.dev.rpc.file_copy = MagicMock(side_effect=Exception)
self.assertFalse(self.fs.cp(initial, final))
self.fs.dev.rpc.file_copy.assert_called_once_with(
source="test/abc", destination="test/xyz"
)
def test_move_return_true(self):
self.fs.dev.rpc.file_rename = MagicMock(return_value=True)
initial = "test/abc"
final = "test/xyz"
self.assertTrue(self.fs.mv(initial, final))
self.fs.dev.rpc.file_rename.assert_called_once_with(
source="test/abc", destination="test/xyz"
)
def test_move_return_false(self):
initial = "test/abc"
final = "test/xyz"
self.fs.dev.rpc.file_rename = MagicMock(return_value=False)
self.assertFalse(self.fs.mv(initial, final))
self.fs.dev.rpc.file_rename.assert_called_once_with(
source="test/abc", destination="test/xyz"
)
def test_tgz_return_true(self):
src = "test/tgz.txt"
dst = "test/xyz"
self.fs.dev.rpc.file_archive = MagicMock(return_value=True)
self.assertTrue(self.fs.tgz(src, dst))
self.fs.dev.rpc.file_archive.assert_called_once_with(
source="test/tgz.txt", destination="test/xyz", compress=True
)
@patch("jnpr.junos.Device.execute")
def test_tgz_return_error(self, mock_execute):
mock_execute.side_effect = self._mock_manager
src = "test/tgz.txt"
dst = "test/xyz"
self.assertTrue("testing tgz" in self.fs.tgz(src, dst))
@patch("jnpr.junos.utils.fs.StartShell")
def test_rmdir(self, mock_StartShell):
path = "test/rmdir"
print(self.fs.rmdir(path))
calls = [
call().__enter__(),
call().__enter__().run("rmdir test/rmdir"),
call().__exit__(None, None, None),
]
mock_StartShell.assert_has_calls(calls)
@patch("jnpr.junos.utils.fs.StartShell")
def test_mkdir(self, mock_StartShell):
path = "test/mkdir"
print(self.fs.mkdir(path))
calls = [
call().__enter__(),
call().__enter__().run("mkdir -p test/mkdir"),
call().__exit__(None, None, None),
]
mock_StartShell.assert_has_calls(calls)
@patch("jnpr.junos.utils.fs.StartShell")
def test_symlink(self, mock_StartShell):
src = "test/tgz.txt"
dst = "test/xyz"
print(self.fs.symlink(src, dst))
calls = [
call().__enter__(),
call().__enter__().run("ln -sf test/tgz.txt test/xyz"),
call().__exit__(None, None, None),
]
mock_StartShell.assert_has_calls(calls)
@patch("jnpr.junos.Device.execute")
def test_storage_usage(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(
self.fs.storage_usage(),
{
"/dev/abc": {
"avail_block": 234234,
"used_blocks": 2346455,
"used_pct": "1",
"mount": "/",
"total_blocks": 567431,
"avail": "2F",
"used": "481M",
"total": "4F",
}
},
)
@patch("jnpr.junos.Device.execute")
def test_storage_usage_linux(self, mock_execute):
mock_execute.side_effect = self._mock_manager_linux
self.assertEqual(
self.fs.storage_usage(),
{
"re0": {
"/dev/sda6": {
"avail": "916M",
"avail_block": 1874712,
"mount": "/data/config",
"total": "984M",
"total_blocks": 2015024,
"used": "1.4M",
"used_blocks": 2688,
"used_pct": "1",
}
}
},
)
@patch("jnpr.junos.Device.execute")
def test_directory_usage(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(
self.fs.directory_usage(path="/var/tmp", depth=1),
{
"/var/tmp": {"blocks": 456076, "bytes": 233510912, "size": "223M"},
"/var/tmp/gres-tp": {"blocks": 68, "bytes": 34816, "size": "34K"},
"/var/tmp/install": {"blocks": 4, "bytes": 2048, "size": "2.0K"},
"/var/tmp/pics": {"blocks": 4, "bytes": 2048, "size": "2.0K"},
"/var/tmp/rtsdb": {"blocks": 4, "bytes": 2048, "size": "2.0K"},
"/var/tmp/sec-download": {"blocks": 8, "bytes": 4096, "size": "4.0K"},
"/var/tmp/vi.recover": {"blocks": 4, "bytes": 2048, "size": "2.0K"},
},
)
@patch("jnpr.junos.Device.execute")
def test_directory_usage_error(self, mock_execute):
mock_execute.return_value = etree.fromstring(
"""
<directory-usage-information>
<directory>
<used-space used-blocks="456076">
223M
</used-space>
</directory>
</directory-usage-information>"""
)
self.assertRaises(RpcError, self.fs.directory_usage, path="/var/tmp", depth=1)
@patch("jnpr.junos.Device.execute")
def test_directory_usage_no_directory(self, mock_execute):
mock_execute.side_effect = self._mock_manager_error1
self.assertRaises(RpcError, self.fs.directory_usage, path="/var/tmp", depth="1")
@patch("jnpr.junos.Device.execute")
def test_directory_usage_no_dir_name(self, mock_execute):
mock_execute.side_effect = self._mock_manager_error2
self.assertRaises(RpcError, self.fs.directory_usage, path="/var/tmp", depth="1")
@patch("jnpr.junos.Device.execute")
def test_storage_cleanup(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(
self.fs.storage_cleanup(),
{"/var/abc.txt": {"ts_date": "Apr 25 10:38", "size": 11}},
)
@patch("jnpr.junos.Device.execute")
def test_storage_cleanup_check(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(
self.fs.storage_cleanup_check(),
{"/var/abc.txt": {"ts_date": "Apr 25 10:38", "size": 11}},
)
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__), "rpc-reply", fname)
foo = open(fpath).read()
if (
fname == "get-rpc-error.xml"
or fname == "get-index-error.xml"
or fname == "get-system-core-dumps.xml"
):
rpc_reply = NCElement(foo, self.dev._conn._device_handler.transform_reply())
elif (
fname == "show-configuration.xml"
or fname == "show-system-alarms.xml"
or fname == "set-cli-working-directory.xml"
):
rpc_reply = NCElement(
foo, self.dev._conn._device_handler.transform_reply()
)._NCElement__doc
else:
rpc_reply = NCElement(
foo, self.dev._conn._device_handler.transform_reply()
)._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
# if 'path' in kwargs and 'detail' in kwargs:
# return self._read_file('dir_list_detail.xml')
if "path" in kwargs:
if kwargs["path"] == "test/stat/decode_dir":
return self._read_file("file-list_dir.xml")
elif kwargs["path"] == "test/stat/decode_file":
return self._read_file("file-list_file.xml")
elif kwargs["path"] == "test/checksum":
return self._read_file("checksum.xml")
elif kwargs["path"] == "test/stat/decode_symbolic_link":
return self._read_file("file-list_symlink.xml")
if "directory" in kwargs:
if kwargs["directory"] == "change/directory":
return self._read_file("set-cli-working-directory.xml")
if "filename" in kwargs:
if kwargs["filename"] == "test/cat.txt":
return self._read_file("file-show.xml")
device_params = kwargs["device_params"]
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
elif args:
if args[0].tag == "command":
if args[0].text == "show cli directory":
return self._read_file("show-cli-directory.xml")
elif args[0].tag == "get-system-storage":
return self._read_file("get-system-storage.xml")
elif args[0].tag == "get-directory-usage-information":
return self._read_file("get-directory-usage-information.xml")
elif args[0].tag == "request-system-storage-cleanup":
return self._read_file("request-system-storage-cleanup.xml")
elif args[0].tag == "file-archive":
return self._read_file("file-archive.xml")
def _mock_manager_error1(self, *args, **kwargs):
if args:
if args[0].tag == "get-directory-usage-information":
return self._read_file("get-directory-usage-information_error1.xml")
def _mock_manager_error2(self, *args, **kwargs):
if args:
if args[0].tag == "get-directory-usage-information":
return self._read_file("get-directory-usage-information_error2.xml")
def _mock_manager_linux(self, *args, **kwargs):
if args:
if args[0].tag == "get-system-storage":
return self._read_file("get-storage-usage-linux.xml")
| {
"content_hash": "46fc46f2117e7f42a92b42eaeee80b55",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 88,
"avg_line_length": 37.734177215189874,
"alnum_prop": 0.5257184390025719,
"repo_name": "Juniper/py-junos-eznc",
"id": "475bc8ecba6b66e0932740c0060f1533533c3729",
"size": "17886",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/utils/test_fs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "856"
},
{
"name": "Pascal",
"bytes": "408"
},
{
"name": "Puppet",
"bytes": "2263"
},
{
"name": "Python",
"bytes": "1101958"
},
{
"name": "Ruby",
"bytes": "134"
},
{
"name": "Shell",
"bytes": "1516"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Mineral',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255)),
('image_filename', models.CharField(default='', max_length=255)),
('image_caption', models.CharField(default='', max_length=255)),
('category', models.CharField(default='', max_length=255)),
('formula', models.CharField(default='', max_length=255)),
('strunz_classification', models.CharField(default='', max_length=255)),
('crystal_system', models.CharField(default='', max_length=255)),
('unit_cell', models.CharField(default='', max_length=255)),
('color', models.CharField(default='', max_length=255)),
('crystal_symmetry', models.CharField(default='', max_length=255)),
('cleavage', models.CharField(default='', max_length=255)),
('mohs_scale_hardness', models.CharField(default='', max_length=255)),
('luster', models.CharField(default='', max_length=255)),
('streak', models.CharField(default='', max_length=255)),
('diaphaneity', models.CharField(default='', max_length=255)),
('optical_properties', models.CharField(default='', max_length=255)),
('group', models.CharField(default='', max_length=255)),
],
),
]
| {
"content_hash": "63191f70d6b583f4ec6b783f74136ad0",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 114,
"avg_line_length": 47.54054054054054,
"alnum_prop": 0.5656623081296192,
"repo_name": "squadran2003/filtering-searching-mineral-catalogue",
"id": "a48f01777c2eedd37e8af2c9116ce6ec9ccbe6a3",
"size": "1830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filtering-searching-mineral-catalogue/minerals/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11548"
},
{
"name": "HTML",
"bytes": "17118"
},
{
"name": "Python",
"bytes": "23470"
}
],
"symlink_target": ""
} |
class QuadratureSettings:
overall_quadrature_points: int = 1
quadrature_method: str = "midpoint"
distribute_quad_points_based_on_arc_length: bool = False
function_name: str
def __init__(self, overall_quadrature_points, quadrature_method, distribute_quad_points_based_on_arc_length, function_name):
self.overall_quadrature_points = overall_quadrature_points
self.quadrature_method = quadrature_method
self.distribute_quad_points_based_on_arc_length = distribute_quad_points_based_on_arc_length
self.function_name = function_name
| {
"content_hash": "ec8e94438905fc862bb4c61e9b5df4cb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 128,
"avg_line_length": 52.81818181818182,
"alnum_prop": 0.7314974182444062,
"repo_name": "schreiberx/sweet",
"id": "4454ca2a9fbb0bb8035812edd8eeea1a04f7a797",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mule_local/python/mule_local/rexi/pcirexi/section/QuadratureSettings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "133036"
},
{
"name": "C++",
"bytes": "2947985"
},
{
"name": "Fortran",
"bytes": "109460"
},
{
"name": "GLSL",
"bytes": "27428"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "2503502"
},
{
"name": "Shell",
"bytes": "490940"
},
{
"name": "TeX",
"bytes": "3093"
}
],
"symlink_target": ""
} |
import logging
import json
import uuid
from quokka.core.models.subcontent import SubContentPurpose
from quokka.core.models.channel import Channel, ChannelType
from quokka.core.models.config import Config, Quokka
from quokka.core.models.content import License
from quokka.core.models.custom_values import CustomValue
from quokka.modules.accounts.models import User, Role
from quokka.modules.posts.models import Post
logger = logging.getLogger()
class Populate(object):
def __init__(self, db, *args, **kwargs):
self.db = db
self.args = args
self.kwargs = kwargs
self.roles = {}
self.users = {}
self.channels = {}
self.channel_types = {}
self.purposes = {}
self.custom_values = {}
self.load_fixtures()
self.baseurl = self.kwargs.get('baseurl')
self.app = self.kwargs.get('app')
def __call__(self, *args, **kwargs):
if self.baseurl and self.app:
with self.app.test_request_context(base_url=self.baseurl):
self.pipeline()
else:
self.pipeline()
def pipeline(self):
self.load_existing_users()
self.create_users()
self.create_configs()
self.create_channel_types()
self.create_base_channels()
self.create_channels()
self.create_purposes()
self.create_posts()
def generate_random_password(self):
return uuid.uuid4().hex
def create_initial_superuser(self):
password = self.generate_random_password()
user_data = {
"name": "Quokka Admin",
"email": "admin@quokkaproject.org",
"gravatar_email": "rochacbruno+quokka@gmail.com",
"password": password[:6],
"roles": ["admin"],
"bio": "Quokka Example Admin",
"tagline": "Quokka is the best CMS!",
"links": [
{
"title": "facebook",
"link": "http://facebook.com/quokkaproject",
"icon": "facebook",
"css_class": "facebook",
"order": 0
},
{
"title": "github",
"link": "http://github.com/quokkaproject",
"icon": "github",
"css_class": "github",
"order": 0
},
{
"title": "twitter",
"link": "http://twitter.com/quokkaproject",
"icon": "twitter",
"css_class": "twitter",
"order": 0
}
]
}
user_obj = self.create_user(user_data)
return user_data, user_obj
def load_fixtures(self):
filepath = self.kwargs.get('filepath',
'./etc/fixtures/initial_data.json')
_file = open(filepath)
self.json_data = json.load(_file)
def role(self, name):
if name not in self.roles:
role, created = Role.objects.get_or_create(name=name)
self.roles[name] = role
if created:
logger.info("Created role: %s", name)
return self.roles.get(name)
def load_existing_users(self):
users = User.objects.all()
for user in users:
self.users[user.name] = user
def create_user(self, data):
name = data.get('name')
if name not in self.users:
pwd = data.get("password")
data['roles'] = [self.role(role) for role in data.get('roles')]
user = User.createuser(**data)
self.users[name] = user
logger.info("Created: User: mail:%s pwd:%s", user.email, pwd)
return user
else:
logger.info("Exist: User: mail: %s", data.get('email'))
def create_users(self, data=None):
self.users_data = data or self.json_data.get('users')
for data in self.users_data:
self.create_user(data)
@staticmethod
def create_config(data):
try:
return Config.objects.get(group=data.get('group'))
except:
return Config.objects.create(**data)
def custom_value(self, **data):
if data.get('name') in self.custom_values:
return self.custom_values[data.get('name')]
value = CustomValue(**data)
self.custom_values[value.name] = value
return value
def create_configs(self):
self.configs_data = self.json_data.get('configs')
for config in self.configs_data:
config['values'] = [self.custom_value(**args)
for args in config.get('values')]
self.create_config(config)
def create_channel(self, data):
if 'childs' in data:
childs = data.pop('childs')
else:
childs = []
data['created_by'] = data['last_updated_by'] = self.users.get('admin')
_type = data.get('channel_type')
data['channel_type'] = self.channel_types.get(_type)
try:
channel = Channel.objects.get(slug=data.get('slug'))
created = False
except:
channel, created = Channel.objects.get_or_create(**data)
if created:
logger.info("Created channel: %s", channel.title)
else:
logger.info("Channel get: %s", channel.title)
for child in childs:
child['parent'] = channel
self.create_channel(child)
if channel.slug not in self.channels:
self.channels[channel.slug] = channel
return channel
def create_channel_type(self, data):
try:
channel_type = ChannelType.objects.get(
identifier=data.get('identifier'))
created = False
except:
channel_type, created = ChannelType.objects.get_or_create(
**data
)
if created:
logger.info("Created channel_type: %s", channel_type.title)
else:
logger.info("ChannelType get: %s", channel_type.title)
if channel_type.identifier not in self.channel_types:
self.channel_types[channel_type.identifier] = channel_type
return channel_type
def create_base_channels(self):
self.channel_data = self.json_data.get('base_channels')
for data in self.channel_data:
self.create_channel(data)
def create_channels(self):
self.channel_data = self.json_data.get('channels')
for data in self.channel_data:
self.create_channel(data)
def create_channel_types(self):
self.channel_type_data = self.json_data.get('channel_types')
for data in self.channel_type_data:
self.create_channel_type(data)
def create_purpose(self, data):
if data.get('identifier') in self.purposes:
return self.purposes[data.get('identifier')]
purpose, created = SubContentPurpose.objects.get_or_create(
title=data.get('title'),
identifier=data.get('identifier'),
module=data.get('module')
)
self.purposes[purpose.identifier] = purpose
return purpose
def create_purposes(self):
self.purpose_data = self.json_data.get('purposes')
for purpose in self.purpose_data:
self.create_purpose(purpose)
def create_initial_post(self, user_data=None, user_obj=None):
post_data = dict(
title="Try Quokka CMS! write a post.",
summary=(
"Use default credentials to access "
"/admin \r\n"
"user: {user[email]} \r\n"
"pass: {user[password]} \r\n"
).format(user=user_data),
slug="try-quokka-cms",
tags=["quokka"],
body=(
"## You can try Quokka ADMIN\r\n\r\n"
"Create some posts\r\n\r\n"
"> Use default credentials to access "
"[/admin](/admin) \r\n\r\n"
"- user: {user[email]}\r\n"
"- password: {user[password]}\r\n"
"> ATTENTION! Copy the credentials and delete this post"
).format(user=user_data),
license=dict(
title="Creative Commons",
link="http://creativecommons.com",
identifier="creative_commons_by_nc_nd"
),
content_format="markdown"
)
post_data['channel'] = self.channels.get("home")
post_data["created_by"] = user_obj or self.users.get('author')
post = self.create_post(post_data)
return post
def create_post(self, data):
if not data.get('created_by'):
data['created_by'] = self.users.get('author')
data['last_updated_by'] = data['created_by']
data['published'] = True
if 'license' in data and not isinstance(data['license'], License):
data['license'] = License(**data['license'])
try:
post = Post.objects.get(slug=data.get('slug'))
logger.info("Post get: %s", post.title)
except:
post = Post.objects.create(**data)
logger.info("Post created: %s", post.title)
# post.created_by = self.users.get('admin')
# post.save()
return post
def create_posts(self):
self.post_data = self.json_data.get('posts')
for data in self.post_data:
_channel = data.get('channel')
data['channel'] = self.channels.get(_channel)
related_channels = data.get('related_channels', [])
data['related_channels'] = [
self.channels.get(_related)
for _related in related_channels
]
try:
self.create_post(data)
except:
self.create_channels()
self.create_post(data)
def reset(self):
Post.objects(
slug__in=[item['slug'] for item in self.json_data.get('posts')]
).delete()
SubContentPurpose.objects(
identifier__in=[
item['identifier'] for item in self.json_data.get('purposes')
]
).delete()
for channel in Channel.objects(
slug__in=[
item['slug'] for item in self.json_data.get('channels')]):
for subchannel in channel.get_children():
for subsubchannel in subchannel.get_children():
subsubchannel.delete()
subchannel.delete()
channel.delete()
User.objects(
email__in=[item['email'] for item in self.json_data.get('users')]
).delete()
if self.kwargs.get('first_install'):
Quokka.objects.delete()
| {
"content_hash": "a299ef9b8e5171fad592b6b12c7e5873",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 78,
"avg_line_length": 33.26139817629179,
"alnum_prop": 0.5335831124920041,
"repo_name": "alexandre/quokka",
"id": "c52b6bf2e8ec65ea03422e8874c8584da5250d75",
"size": "10959",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "quokka/utils/populate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "104"
},
{
"name": "Makefile",
"bytes": "440"
},
{
"name": "Python",
"bytes": "219395"
},
{
"name": "Shell",
"bytes": "12305"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.ops.rnn_cell import RNNCell
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
import tensorflow as tf
class NewGRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None, activation=tanh, simple_gate=True):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self.simple_gate = simple_gate
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or "gru_cell"):
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
if self.simple_gate:
gate=_linear(
[inputs, state], 2 * self._num_units, True, 1.0, scope=scope)
else:
gate=self.maxout(inputs, state, 5, 0, scope=scope, output_size=2 * self._num_units)
r, u = array_ops.split(
gate,
num_or_size_splits=2,
axis=1)
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("candidate"):
c = self.maxout(inputs, r * state, 5, 0, scope=scope, output_size=self._num_units)
# c = tf.nn.tanh(_linear([inputs, r * state],
# self._num_units, True,
# scope=scope))
new_h = u * state + (1 - u) * c
return new_h, new_h
def maxout(self, input1, input2, num_units, ini_value, output_size, scope=None):
shape = input1.get_shape().as_list()
dim = shape[-1]
outputs = None
for i in range(num_units):
with tf.variable_scope(str(i)):
y = self._activation(_linear([input1, input2],
output_size, True, ini_value,
scope=scope))
if outputs is None:
outputs = y
else:
outputs = tf.maximum(outputs, y)
c = outputs
return c
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: (optional) Variable scope to create parameters in.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
"weights", [total_arg_size, output_size], dtype=dtype)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = vs.get_variable(
"biases", [output_size],
dtype=dtype,
initializer=init_ops.constant_initializer(bias_start, dtype=dtype))
return nn_ops.bias_add(res, biases)
| {
"content_hash": "22257143ff36c44f44a293a25460f53f",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 93,
"avg_line_length": 35.46762589928058,
"alnum_prop": 0.6393509127789047,
"repo_name": "xiangdal/TrajectoryNet",
"id": "d487b020e7448e449cf205b35be555b9bb934118",
"size": "5020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "customized_rnncell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53131"
},
{
"name": "R",
"bytes": "6025"
}
],
"symlink_target": ""
} |
import socket
class udpserver():
def __init__(self):
AMOUNT_BYTES = 1024
BROADCAST_PORT_SEND = 9001 # Porta que o cliente estará escutando
BROADCAST_PORT_RECV = 9000 # Porta que o cliente irá enviar mensagem
BROADCAST_LISTEN = '' # Interface que será utilizada, se você pôr 127.255.255.255, ele só responderá a chamadas locais
bsock = socket.socket(socket.AF_INET, #Internet Address Family IPv4
socket.SOCK_DGRAM) #UDP Protocol
bsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
bsock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
bsock.bind((BROADCAST_LISTEN, BROADCAST_PORT_RECV))
while True :
message , address = bsock.recvfrom(AMOUNT_BYTES)
print("message '{0}' from : {1}".format(message, address))
if message == b'DISCOVER':
bsock.sendto(b"ACK", (address[0] ,BROADCAST_PORT_SEND))
| {
"content_hash": "90d4f16aff8962483a2573347b8d465c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 130,
"avg_line_length": 45.25,
"alnum_prop": 0.6729281767955801,
"repo_name": "fabtrompet/bomberman",
"id": "4fd57b349cd1883d018843f8c003f97d1e6a1770",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "broadcast_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34734"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
} |
from nova import availability_zones
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
class Service(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
# Version 1.3: ComputeNode version 1.5
# Version 1.4: Added use_slave to get_by_compute_host
VERSION = '1.4'
fields = {
'id': fields.IntegerField(read_only=True),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'compute_node': fields.ObjectField('ComputeNode'),
}
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 3) and 'compute_node' in primitive:
self.compute_node.obj_make_compatible(
primitive['compute_node']['nova_object.data'], '1.4')
primitive['compute_node']['nova_object.version'] = '1.4'
@staticmethod
def _do_compute_node(context, service, db_service):
try:
# NOTE(danms): The service.compute_node relationship returns
# a list, which should only have one item in it. If it's empty
# or otherwise malformed, ignore it.
db_compute = db_service['compute_node'][0]
except Exception:
return
service.compute_node = objects.ComputeNode._from_db_object(
context, objects.ComputeNode(), db_compute)
@staticmethod
def _from_db_object(context, service, db_service):
allow_missing = ('availability_zone',)
for key in service.fields:
if key in allow_missing and key not in db_service:
continue
if key == 'compute_node':
service._do_compute_node(context, service, db_service)
else:
service[key] = db_service[key]
service._context = context
service.obj_reset_changes()
return service
def obj_load_attr(self, attrname):
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s id %(id)s",
{'attr': attrname,
'name': self.obj_name(),
'id': self.id,
})
if attrname != 'compute_node':
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
self.compute_node = objects.ComputeNode.get_by_service_id(
self._context, self.id)
@base.remotable_classmethod
def get_by_id(cls, context, service_id):
db_service = db.service_get(context, service_id)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_topic(cls, context, host, topic):
db_service = db.service_get_by_host_and_topic(context, host, topic)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_compute_host(cls, context, host, use_slave=False):
db_service = db.service_get_by_compute_host(context, host)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_args(cls, context, host, binary):
db_service = db.service_get_by_args(context, host, binary)
return cls._from_db_object(context, cls(), db_service)
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
db_service = db.service_create(context, updates)
self._from_db_object(context, self, db_service)
@base.remotable
def save(self, context):
updates = self.obj_get_changes()
updates.pop('id', None)
db_service = db.service_update(context, self.id, updates)
self._from_db_object(context, self, db_service)
@base.remotable
def destroy(self, context):
db.service_destroy(context, self.id)
class ServiceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Service <= version 1.2
# Version 1.1 Service version 1.3
# Version 1.2: Service version 1.4
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Service'),
}
child_versions = {
'1.0': '1.2',
# NOTE(danms): Service was at 1.2 before we added this
'1.1': '1.3',
'1.2': '1.4',
}
@base.remotable_classmethod
def get_by_topic(cls, context, topic):
db_services = db.service_get_all_by_topic(context, topic)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_by_host(cls, context, host):
db_services = db.service_get_all_by_host(context, host)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all(cls, context, disabled=None, set_zones=False):
db_services = db.service_get_all(context, disabled=disabled)
if set_zones:
db_services = availability_zones.set_availability_zones(
context, db_services)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
| {
"content_hash": "d58db7c38e8ed773a6a8ecf275292598",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 75,
"avg_line_length": 39.13125,
"alnum_prop": 0.6078901134004153,
"repo_name": "vmthunder/nova",
"id": "693755ee5616642909b79d519f2040d8365a14e0",
"size": "6866",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/objects/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import os
import shutil
import sys
import numpy as np
import subprocess
from astropy.io import fits
import pyfits
param = {}
execfile(sys.argv[1])
def make_folder_list(i):
t = [i+d+'/' for d in sorted(os.listdir(i)) if d[:2] == '20' and os.path.exists(i+d+'/'+param['field_name'])]
return t
def make_cat_folder(o):
if os.path.exists(o):
shutil.rmtree(o)
os.makedirs(o)
def make_image_list(i, fl):
t = [d for d in sorted(os.listdir(i)) if d[-5:] == '.fits' and i+d in fl]
return t
def call_sextractor(i, im, sl):
header = pyfits.getheader(i+'/cal/'+im)
if int(header['MJD']) < 57416:
GAIN = 12.5
PIXEL_SCALE = 3.9
else:
GAIN = 0.34
PIXEL_SCALE = 2.37
cat_name = i+'/cat/'+im[:-6]+'.cat'
cmd = 'sex {} -c {} -CATALOG_NAME {} -SATUR_LEVEL {} -GAIN {} -PIXEL_SCALE {}'.format(i+'/cal/'+im, param['sextractor_file'], cat_name, sl, GAIN, PIXEL_SCALE)
if param['disable_analysis_extraction'] == 0:
subprocess.call(cmd, shell=True)
return cat_name
def create_catalog_arrays(i, il, sl):
cat_list_ra = []
cat_list_dec = []
cat_list_mag = []
for im in il:
cat_name = call_sextractor(i, im, sl)
mag, x, y, flag = np.loadtxt(cat_name, usecols=(0, 2, 3, 4), unpack=True)
# SExtractor is unable to read the tan-sip wcs produced by Astrometry.net
from astropy import wcs
w = wcs.WCS(i+'/cal/'+im)
ra, dec = w.all_pix2world(x, y, 1)
cat_list_mag.append(mag[flag == 0])
cat_list_ra.append(ra[flag == 0])
cat_list_dec.append(dec[flag == 0])
return cat_list_ra, cat_list_dec, cat_list_mag
def remove_cat_folder(i):
if os.path.exists(i):
shutil.rmtree(i)
def create_mjd_catalog(i, il):
return [fits.getheader(i+fn)['MJD'] for fn in il]
def perform_extraction(i, frame_list, testing=1):
folder_list_aux = make_folder_list(i)
field_name = param['field_name']
sl = param['saturation_level_post_calibration']
cat_mjd = []
cat_ra = []
cat_dec = []
cat_mag = []
folder_list = []
frame_list_flat = [item for sublist in frame_list for item in sublist]
[folder_list.extend([f]) for f in folder_list_aux if f in [frame[:len(f)] for frame in frame_list_flat]]
nnights = len(folder_list)
for (n, f) in enumerate(folder_list):
sys.stdout.write('\r Computing night {} of {}'.format(n+1, nnights))
sys.stdout.flush()
if param['disable_analysis_extraction'] == 0:
make_cat_folder(f+'phot/'+field_name+'/cat')
il = make_image_list(f+'phot/'+field_name+'/cal/', frame_list_flat)
cat_list_mjd = create_mjd_catalog(f+'phot/'+field_name+'/cal/', il)
cat_mjd.append(cat_list_mjd)
cat_list_ra, cat_list_dec, cat_list_mag = create_catalog_arrays(f+'phot/'+field_name, il, sl)
cat_ra.append(cat_list_ra)
cat_dec.append(cat_list_dec)
cat_mag.append(cat_list_mag)
if testing == 0:
remove_cat_folder(f+'phot/'+field_name+'/cat')
print '\n'
return cat_ra, cat_dec, cat_mag, cat_mjd
if __name__ == '__main__':
# Testing
perform_extraction(param['data_path'])
print 'DONE'
| {
"content_hash": "5602e514edbb3607a33014d2148550ca",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 162,
"avg_line_length": 30.771428571428572,
"alnum_prop": 0.5911482513153823,
"repo_name": "xparedesfortuny/Phot",
"id": "d41b8e5064885086972b4fe3e6b5a4a46d31ec6c",
"size": "3325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/source_extraction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118034"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0012_auto_20170730_1559'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=2048, unique=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='course',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='courses.Category'),
),
]
| {
"content_hash": "8bda6ed8dc14c52e3df15974d7d74f7e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 128,
"avg_line_length": 32.25806451612903,
"alnum_prop": 0.567,
"repo_name": "crowd-course/scholars",
"id": "0807a8157cdc8bb9fed3a173a7ecef6c6fcd6960",
"size": "1073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scholars/courses/migrations/0013_auto_20170810_1216.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "418417"
},
{
"name": "HTML",
"bytes": "552468"
},
{
"name": "JavaScript",
"bytes": "531650"
},
{
"name": "Python",
"bytes": "222619"
}
],
"symlink_target": ""
} |
from osc_lib import utils
class Display(object):
"""Provide functions for display resource"""
# get columns used for listing resource(multiple records)
# Resource inherit Display must override this field to specify
# the columns which should be used when display resource list.
# Return Examples: ("Column A", "columnb", "column c")
list_column_names = ()
# if not specified, will use list column names by default
# get columns used for show resource(single record)
show_column_names = list_column_names
# column to resource property mapping
column_2_property = {}
def get_mapped_properties(self, column_names):
"""get mapped fields mapping to an exists field"""
mapped = []
for column_name in column_names:
if column_name in self.column_2_property:
mapped.append(self.column_2_property[column_name])
else:
mapped.append(column_name)
return mapped
def get_display_data(self, column_names=[], formatter=None):
"""get data mapped to column names
column names will be auto transferred(convert to lowercase and
blank with be replaced with underscore) to find mapping attributes
example: "Attr A" --> attr_a, "Attr" --> "attr".
** all column names after transferred should be a property of resource
:param formatter: column formatter
:param column_names: columns to be returned
:return: data mapping to column_names
:rtype: tuple
"""
properties = self.get_mapped_properties(column_names)
return utils.get_item_properties(self,
properties,
formatters=formatter)
| {
"content_hash": "da7560e928eda156ce9006972dbc5677",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 78,
"avg_line_length": 38.76086956521739,
"alnum_prop": 0.6298373527762199,
"repo_name": "Huawei/OpenStackClient_VBS",
"id": "8f6844bb26aa711644d052f3af0a75f107ba39c5",
"size": "2394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vbclient/common/display.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "82795"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_auth
short_description: Authenticate to Kubernetes clusters which require an explicit login step
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
description:
- "This module handles authenticating to Kubernetes clusters requiring I(explicit) authentication procedures,
meaning ones where a client logs in (obtains an authentication token), performs API operations using said
token and then logs out (revokes the token). An example of a Kubernetes distribution requiring this module
is OpenShift."
- "On the other hand a popular configuration for username+password authentication is one utilizing HTTP Basic
Auth, which does not involve any additional login/logout steps (instead login credentials can be attached
to each and every API call performed) and as such is handled directly by the C(k8s) module (and other
resource–specific modules) by utilizing the C(host), C(username) and C(password) parameters. Please
consult your preferred module's documentation for more details."
options:
state:
description:
- If set to I(present) connect to the API server using the URL specified in C(host) and attempt to log in.
- If set to I(absent) attempt to log out by revoking the authentication token specified in C(api_key).
default: present
choices:
- present
- absent
host:
description:
- Provide a URL for accessing the API server.
required: true
username:
description:
- Provide a username for authenticating with the API server.
password:
description:
- Provide a password for authenticating with the API server.
ca_cert:
description:
- "Path to a CA certificate file used to verify connection to the API server. The full certificate chain
must be provided to avoid certificate validation errors."
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- "Whether or not to verify the API server's SSL certificates."
type: bool
default: true
aliases: [ verify_ssl ]
api_key:
description:
- When C(state) is set to I(absent), this specifies the token to revoke.
requirements:
- python >= 2.7
- urllib3
- requests
- requests-oauthlib
'''
EXAMPLES = '''
- hosts: localhost
module_defaults:
group/k8s:
host: https://k8s.example.com/
ca_cert: ca.pem
tasks:
- block:
# It's good practice to store login credentials in a secure vault and not
# directly in playbooks.
- include_vars: k8s_passwords.yml
- name: Log in (obtain access token)
k8s_auth:
username: admin
password: "{{ k8s_admin_password }}"
register: k8s_auth_results
# Previous task provides the token/api_key, while all other parameters
# are taken from module_defaults
- name: Get a list of all pods from any namespace
k8s_facts:
api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
kind: Pod
register: pod_list
always:
- name: If login succeeded, try to log out (revoke access token)
when: k8s_auth_results.k8s_auth.api_key is defined
k8s_auth:
state: absent
api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
'''
# Returned value names need to match k8s modules parameter names, to make it
# easy to pass returned values of k8s_auth to other k8s modules.
# Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899
RETURN = '''
k8s_auth:
description: Kubernetes authentication facts.
returned: success
type: complex
contains:
api_key:
description: Authentication token.
returned: success
type: str
host:
description: URL for accessing the API server.
returned: success
type: str
ca_cert:
description: Path to a CA certificate file used to verify connection to the API server.
returned: success
type: str
validate_certs:
description: "Whether or not to verify the API server's SSL certificates."
returned: success
type: bool
username:
description: Username for authenticating with the API server.
returned: success
type: str
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib_parse import urlparse, parse_qs, urlencode
# 3rd party imports
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
from requests_oauthlib import OAuth2Session
HAS_REQUESTS_OAUTH = True
except ImportError:
HAS_REQUESTS_OAUTH = False
try:
from urllib3.util import make_headers
HAS_URLLIB3 = True
except ImportError:
HAS_URLLIB3 = False
K8S_AUTH_ARG_SPEC = {
'state': {
'default': 'present',
'choices': ['present', 'absent'],
},
'host': {'required': True},
'username': {},
'password': {'no_log': True},
'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']},
'validate_certs': {
'type': 'bool',
'default': True,
'aliases': ['verify_ssl']
},
'api_key': {'no_log': True},
}
class KubernetesAuthModule(AnsibleModule):
def __init__(self):
AnsibleModule.__init__(
self,
argument_spec=K8S_AUTH_ARG_SPEC,
required_if=[
('state', 'present', ['username', 'password']),
('state', 'absent', ['api_key']),
]
)
if not HAS_REQUESTS:
self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
if not HAS_REQUESTS_OAUTH:
self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.")
if not HAS_URLLIB3:
self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.")
def execute_module(self):
state = self.params.get('state')
verify_ssl = self.params.get('validate_certs')
ssl_ca_cert = self.params.get('ca_cert')
self.auth_username = self.params.get('username')
self.auth_password = self.params.get('password')
self.auth_api_key = self.params.get('api_key')
self.con_host = self.params.get('host')
# python-requests takes either a bool or a path to a ca file as the 'verify' param
if verify_ssl and ssl_ca_cert:
self.con_verify_ca = ssl_ca_cert # path
else:
self.con_verify_ca = verify_ssl # bool
# Get needed info to access authorization APIs
self.openshift_discover()
if state == 'present':
new_api_key = self.openshift_login()
result = dict(
host=self.con_host,
validate_certs=verify_ssl,
ca_cert=ssl_ca_cert,
api_key=new_api_key,
username=self.auth_username,
)
else:
self.openshift_logout()
result = dict()
self.exit_json(changed=False, k8s_auth=result)
def openshift_discover(self):
url = '{0}/.well-known/oauth-authorization-server'.format(self.con_host)
ret = requests.get(url, verify=self.con_verify_ca)
if ret.status_code != 200:
self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url,
reason=ret.reason, status_code=ret.status_code)
try:
oauth_info = ret.json()
self.openshift_auth_endpoint = oauth_info['authorization_endpoint']
self.openshift_token_endpoint = oauth_info['token_endpoint']
except Exception as e:
self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.",
exception=traceback.format_exc())
def openshift_login(self):
os_oauth = OAuth2Session(client_id='openshift-challenging-client')
authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint,
state="1", code_challenge_method='S256')
auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password))
# Request authorization code using basic auth credentials
ret = os_oauth.get(
authorization_url,
headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')},
verify=self.con_verify_ca,
allow_redirects=False
)
if ret.status_code != 302:
self.fail_request("Authorization failed.", method='GET', url=authorization_url,
reason=ret.reason, status_code=ret.status_code)
# In here we have `code` and `state`, I think `code` is the important one
qwargs = {}
for k, v in parse_qs(urlparse(ret.headers['Location']).query).items():
qwargs[k] = v[0]
qwargs['grant_type'] = 'authorization_code'
# Using authorization code given to us in the Location header of the previous request, request a token
ret = os_oauth.post(
self.openshift_token_endpoint,
headers={
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
# This is just base64 encoded 'openshift-challenging-client:'
'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo='
},
data=urlencode(qwargs),
verify=self.con_verify_ca
)
if ret.status_code != 200:
self.fail_request("Failed to obtain an authorization token.", method='POST',
url=self.openshift_token_endpoint,
reason=ret.reason, status_code=ret.status_code)
return ret.json()['access_token']
def openshift_logout(self):
url = '{0}/apis/oauth.openshift.io/v1/oauthaccesstokens/{1}'.format(self.con_host, self.auth_api_key)
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.auth_api_key)
}
json = {
"apiVersion": "oauth.openshift.io/v1",
"kind": "DeleteOptions"
}
ret = requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca)
# Ignore errors, the token will time out eventually anyway
def fail(self, msg=None):
self.fail_json(msg=msg)
def fail_request(self, msg, **kwargs):
req_info = {}
for k, v in kwargs.items():
req_info['req_' + k] = v
self.fail_json(msg=msg, **req_info)
def main():
module = KubernetesAuthModule()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| {
"content_hash": "21d6638ccaaae4a0fa7d168dc3de7f2a",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 122,
"avg_line_length": 33.93939393939394,
"alnum_prop": 0.6221428571428571,
"repo_name": "SergeyCherepanov/ansible",
"id": "b1ea7c23862649492b57dc584d99d78e538e9661",
"size": "11387",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/clustering/k8s/k8s_auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = patterns('ti.views',
url(r'^$', 'home', name='home'),
url(r'^login$', 'login_view', name='login'),
url(r'^logout$', 'logout_view', name='logout'),
url(r'^overview$', 'overview_view', name='overview'),
url(r'^base.js$', 'base_js', name='base_js'),
url(r'^page/(?P<page_id>\d+)/post/(?P<single_post_id>\d+)', 'page_info', name='page_info_single'),
url(r'^page/(?P<page_id>\d+)/search', 'page_info', name='page_search'),
url(r'^page/(?P<page_id>\d+)/clusters', 'page_cluster', name='page_clusters'),
url(r'^user/(?P<user_id>\d+)', 'user_info', name='user_info'),
url(r'^page/(?P<page_id>\d+)', 'page_info', name='page_info'),
url(r'^json$', 'json_serve', name='json'),
url(r'^base.css$', 'base_css', name='base_css'),
url(r'^template$', 'template', name='template'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| {
"content_hash": "fdad0ef7df26caa212b780ca6fb69c19",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 102,
"avg_line_length": 54.05263157894737,
"alnum_prop": 0.6163583252190847,
"repo_name": "FrankGrimm/text-insights",
"id": "047be8356f5fa714eedc2d3926e85a802a36f768",
"size": "1027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/ti/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103299"
},
{
"name": "JavaScript",
"bytes": "1470677"
},
{
"name": "PHP",
"bytes": "36056"
},
{
"name": "Python",
"bytes": "130473"
},
{
"name": "Shell",
"bytes": "1368"
}
],
"symlink_target": ""
} |
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.servers_operations import ServersOperations
from .operations.firewall_rules_operations import FirewallRulesOperations
from .operations.virtual_network_rules_operations import VirtualNetworkRulesOperations
from .operations.databases_operations import DatabasesOperations
from .operations.configurations_operations import ConfigurationsOperations
from .operations.log_files_operations import LogFilesOperations
from .operations.performance_tiers_operations import PerformanceTiersOperations
from .operations.location_based_performance_tier_operations import LocationBasedPerformanceTierOperations
from .operations.check_name_availability_operations import CheckNameAvailabilityOperations
from .operations.operations import Operations
from . import models
class MySQLManagementClientConfiguration(AzureConfiguration):
"""Configuration for MySQLManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The subscription ID that identifies an Azure
subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(MySQLManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('mysqlmanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class MySQLManagementClient(object):
"""The Microsoft Azure management API provides create, read, update, and delete functionality for Azure MySQL resources including servers, databases, firewall rules, VNET rules, log files and configurations.
:ivar config: Configuration for client.
:vartype config: MySQLManagementClientConfiguration
:ivar servers: Servers operations
:vartype servers: azure.mgmt.rdbms.mysql.operations.ServersOperations
:ivar firewall_rules: FirewallRules operations
:vartype firewall_rules: azure.mgmt.rdbms.mysql.operations.FirewallRulesOperations
:ivar virtual_network_rules: VirtualNetworkRules operations
:vartype virtual_network_rules: azure.mgmt.rdbms.mysql.operations.VirtualNetworkRulesOperations
:ivar databases: Databases operations
:vartype databases: azure.mgmt.rdbms.mysql.operations.DatabasesOperations
:ivar configurations: Configurations operations
:vartype configurations: azure.mgmt.rdbms.mysql.operations.ConfigurationsOperations
:ivar log_files: LogFiles operations
:vartype log_files: azure.mgmt.rdbms.mysql.operations.LogFilesOperations
:ivar performance_tiers: PerformanceTiers operations
:vartype performance_tiers: azure.mgmt.rdbms.mysql.operations.PerformanceTiersOperations
:ivar location_based_performance_tier: LocationBasedPerformanceTier operations
:vartype location_based_performance_tier: azure.mgmt.rdbms.mysql.operations.LocationBasedPerformanceTierOperations
:ivar check_name_availability: CheckNameAvailability operations
:vartype check_name_availability: azure.mgmt.rdbms.mysql.operations.CheckNameAvailabilityOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.rdbms.mysql.operations.Operations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The subscription ID that identifies an Azure
subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = MySQLManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2017-04-30-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.servers = ServersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.firewall_rules = FirewallRulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_network_rules = VirtualNetworkRulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.databases = DatabasesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.configurations = ConfigurationsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.log_files = LogFilesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.performance_tiers = PerformanceTiersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.location_based_performance_tier = LocationBasedPerformanceTierOperations(
self._client, self.config, self._serialize, self._deserialize)
self.check_name_availability = CheckNameAvailabilityOperations(
self._client, self.config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
| {
"content_hash": "313f94bca84e0dfa43aa39810a5a2672",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 211,
"avg_line_length": 52.333333333333336,
"alnum_prop": 0.7532255430344602,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "011bb63cb449fed5c50ec699f98923312dc872e3",
"size": "6597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/my_sql_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""Tests for util module."""
import os
import stat
from absl.testing import absltest
import mock
from cauliflowervest.client import util
MOUNT_OUTPUT_NOMINAL = """
/dev/disk0s2 on / (hfs, local, journaled)
devfs on /dev (devfs, local, nobrowse)
/dev/disk0s4 on /Volumes/Untitled 2 (hfs, local, journaled)
map -hosts on /net (autofs, nosuid, automounted, nobrowse)
map auto_home on /home (autofs, automounted, nobrowse)
""".strip()
MOUNT_OUTPUT_OUT_OF_ORDER = """
devfs on /dev (devfs, local, nobrowse)
/dev/disk0s4 on /Volumes/Untitled 2 (hfs, local, journaled)
/dev/disk0s2 on / (hfs, local, journaled)
map -hosts on /net (autofs, nosuid, automounted, nobrowse)
map auto_home on /home (autofs, automounted, nobrowse)
""".strip()
MOUNT_OUTPUT_TRAILING_BLANK = """
devfs on /dev (devfs, local, nobrowse)
/dev/disk0s4 on /Volumes/Untitled 2 (hfs, local, journaled)
map -hosts on /net (autofs, nosuid, automounted, nobrowse)
map auto_home on /home (autofs, automounted, nobrowse)
""".lstrip()
class GetRootDiskTest(absltest.TestCase):
"""Test the GetRootDisk() function."""
@mock.patch.object(util, 'Exec', return_value=(1, '', ''))
def testEnumerationFailure(self, exec_mock):
self.assertRaises(util.Error, util.GetRootDisk)
exec_mock.assert_called_once_with('/sbin/mount')
@mock.patch.object(util, 'Exec', return_value=(0, MOUNT_OUTPUT_NOMINAL, ''))
def testOk(self, exec_mock):
self.assertEquals('/dev/disk0s2', util.GetRootDisk())
exec_mock.assert_called_once_with('/sbin/mount')
@mock.patch.object(
util, 'Exec', return_value=(0, MOUNT_OUTPUT_OUT_OF_ORDER, ''))
def testOutOfOrder(self, exec_mock):
self.assertEquals('/dev/disk0s2', util.GetRootDisk())
exec_mock.assert_called_once_with('/sbin/mount')
@mock.patch.object(
util, 'Exec', return_value=(0, MOUNT_OUTPUT_TRAILING_BLANK, ''))
def testTrailingBlank(self, exec_mock):
self.assertRaises(util.Error, util.GetRootDisk)
exec_mock.assert_called_once_with('/sbin/mount')
@mock.patch.object(util, 'Exec', side_effect=util.ExecError)
def testException(self, exec_mock):
self.assertRaises(util.Error, util.GetRootDisk)
exec_mock.assert_called_once_with('/sbin/mount')
class SafeOpenTest(absltest.TestCase):
"""Test the SafeOpen() function."""
dir = '/var/root/Library/cauliflowervest'
path = '/var/root/Library/cauliflowervest/access_token.dat'
@mock.patch.object(os, 'makedirs', side_effect=OSError)
def testDirExists(self, makedirs_mock):
result = object()
open_mock = mock.Mock()
open_mock.return_value = result
self.assertEqual(
util.SafeOpen(self.path, 'r', open_=open_mock), result)
open_mock.assert_called_with(self.path, 'r')
makedirs_mock.assert_called_once_with(self.dir, 0700)
@mock.patch.object(os, 'mknod', side_effect=OSError)
@mock.patch.object(os, 'makedirs')
def testFileExists(self, makedirs_mock, mknod_mock):
result = object()
open_mock = mock.Mock()
open_mock.return_value = result
self.assertEqual(
util.SafeOpen(self.path, 'r', open_=open_mock), result)
open_mock.assert_called_with(self.path, 'r')
makedirs_mock.assert_called_once_with(self.dir, 0700)
mknod_mock.assert_called_once_with(self.path, 0600 | stat.S_IFREG)
@mock.patch.object(os, 'mknod')
@mock.patch.object(os, 'makedirs')
def testOk(self, makedirs_mock, mknod_mock):
result = object()
open_mock = mock.Mock(return_value=result)
self.assertEqual(
util.SafeOpen(self.path, 'r', open_=open_mock), result)
open_mock.assert_called_with(self.path, 'r')
makedirs_mock.assert_called_once_with(self.dir, 0700)
mknod_mock.assert_called_once_with(self.path, 0600 | stat.S_IFREG)
class UtilModuleTest(absltest.TestCase):
"""Test module level functions in util."""
@mock.patch.object(util.plistlib, 'readPlistFromString', return_value='plist')
@mock.patch.object(util, 'Exec', return_value=(0, 'stdout', 'stderr'))
def testGetPlistFromExec(self, exec_mock, read_plist_mock):
self.assertEqual('plist', util.GetPlistFromExec('cmd', stdin='stdin'))
exec_mock.assert_called_once_with('cmd', stdin='stdin')
read_plist_mock.assert_called_once_with('stdout')
@mock.patch.object(util, 'Exec', return_value=(1, 'stdout', 'stderr'))
def testGetPlistFromExecNonZeroReturncode(self, exec_mock):
self.assertRaises(util.ExecError, util.GetPlistFromExec, 'cmd')
exec_mock.assert_called_once_with('cmd', stdin=None)
@mock.patch.object(
util.plistlib, 'readPlistFromString', side_effect=util.expat.ExpatError)
@mock.patch.object(util, 'Exec', return_value=(0, 'stdout', 'stderr'))
def testGetPlistFromExecPlistParseError(self, exec_mock, _):
self.assertRaises(util.ExecError, util.GetPlistFromExec, 'cmd')
exec_mock.assert_called_once_with('cmd', stdin=None)
def testJoinURL(self):
base_url = 'http://example.com'
part1 = 'foo'
part2 = 'bar'
out = util.JoinURL(base_url, part1, part2)
self.assertEqual(out, 'http://example.com/foo/bar')
def testJoinURLWithTrailingSlashOnBaseURL(self):
base_url = 'http://example.com/'
part1 = 'foo'
part2 = 'bar'
out = util.JoinURL(base_url, part1, part2)
self.assertEqual(out, 'http://example.com/foo/bar')
def testJoinURLWithLeadingSlashOnInnerURLPart(self):
base_url = 'http://example.com'
part1 = '/foo'
part2 = 'bar'
out = util.JoinURL(base_url, part1, part2)
self.assertEqual(out, 'http://example.com/foo/bar')
def testJoinURLWithLeadingAndTrailingSlashOnInnerURLPart(self):
base_url = 'http://example.com'
part1 = '/foo/'
part2 = '/bar'
out = util.JoinURL(base_url, part1, part2)
self.assertEqual(out, 'http://example.com/foo/bar')
def testJoinURLWithTrailingSlashOnInnerURLPart(self):
base_url = 'http://example.com'
part1 = 'foo/'
part2 = 'bar'
out = util.JoinURL(base_url, part1, part2)
self.assertEqual(out, 'http://example.com/foo/bar')
def testJoinURLWithTrailingSlashOnLastURLPart(self):
base_url = 'http://example.com'
part1 = 'foo'
part2 = 'bar/'
out = util.JoinURL(base_url, part1, part2)
self.assertEqual(out, 'http://example.com/foo/bar/')
@mock.patch.object(util, 'Exec')
def testRetrieveEntropy(self, exec_mock):
rc = 0
stdout = 'HIDIdleTime=100\nWhateverOtherCrap\n'
stderr = ''
expected_entropy = 'HIDIdleTime=100'
exec_mock.return_value = (rc, stdout, stderr)
self.assertEqual(expected_entropy, util.RetrieveEntropy())
exec_mock.assert_called_once_with(['/usr/sbin/ioreg', '-l'])
@mock.patch.object(util, 'Exec')
def testRetrieveEntropyWhenNoOutputResult(self, exec_mock):
rc = 0
stdout = 'CrapThatWontMatchTheRegex\n'
stderr = ''
exec_mock.return_value = (rc, stdout, stderr)
self.assertRaises(util.RetrieveEntropyError, util.RetrieveEntropy)
exec_mock.assert_called_once_with(['/usr/sbin/ioreg', '-l'])
@mock.patch.object(util, 'Exec')
def testRetrieveEntropyWhenErrorIoRegOutput(self, exec_mock):
rc = 0
stdout = ''
stderr = ''
exec_mock.return_value = (rc, stdout, stderr)
self.assertRaises(util.RetrieveEntropyError, util.RetrieveEntropy)
exec_mock.assert_called_once_with(['/usr/sbin/ioreg', '-l'])
@mock.patch.object(util, 'Exec')
def testRetrieveEntropyWhenErrorIoRegRc(self, exec_mock):
rc = 1
stdout = ''
stderr = ''
exec_mock.return_value = (rc, stdout, stderr)
self.assertRaises(util.RetrieveEntropyError, util.RetrieveEntropy)
exec_mock.assert_called_once_with(['/usr/sbin/ioreg', '-l'])
@mock.patch.object(util, 'Exec')
def testRetrieveEntropyWhenErrorIoRegExec(self, exec_mock):
rc = 1
stdout = ''
stderr = ''
exec_mock.return_value = (rc, stdout, stderr)
self.assertRaises(util.RetrieveEntropyError, util.RetrieveEntropy)
exec_mock.assert_called_once_with(['/usr/sbin/ioreg', '-l'])
def testSupplyEntropy(self):
entropy = 'entropy'
file_mock = mock.Mock(spec=file)
mock_open = mock.Mock(spec=open)
mock_open.return_value = file_mock
util.SupplyEntropy(entropy, open_=mock_open)
mock_open.assert_called_once_with('/dev/random', 'w')
file_mock.write.assert_called_once_with(entropy)
def testSupplyEntropyWhenIOErrorOpen(self):
entropy = 'entropy'
mock_open = mock.Mock(spec=open)
mock_open.side_effect = IOError
self.assertRaises(
util.SupplyEntropyError, util.SupplyEntropy, entropy, open_=mock_open)
def testSupplyEntropyWhenIOErrorWrite(self):
entropy = 'entropy'
file_mock = mock.Mock(spec=file)
file_mock.write.side_effect = IOError
mock_open = mock.Mock(spec=open)
mock_open.return_value = file_mock
self.assertRaises(
util.SupplyEntropyError, util.SupplyEntropy, entropy, open_=mock_open)
def testSupplyEntropyWhenIOErrorClose(self):
entropy = 'entropy'
file_mock = mock.Mock(spec=file)
file_mock.close.side_effect = IOError
mock_open = mock.Mock(spec=open)
mock_open.return_value = file_mock
self.assertRaises(
util.SupplyEntropyError, util.SupplyEntropy, entropy, open_=mock_open)
def testSupplyEntropyWhenNoneSupplied(self):
entropy = None
self.assertRaises(util.SupplyEntropyError, util.SupplyEntropy, entropy)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "cc7f177ea69689de33455ae27b8bfc58",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 80,
"avg_line_length": 31.616161616161616,
"alnum_prop": 0.6921192758253462,
"repo_name": "maximermilov/cauliflowervest",
"id": "0e968078512da64c0f5a97d61fe1d27f0c04f508",
"size": "9987",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cauliflowervest/client/util_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "552"
},
{
"name": "HTML",
"bytes": "30245"
},
{
"name": "JavaScript",
"bytes": "33420"
},
{
"name": "Python",
"bytes": "459426"
}
],
"symlink_target": ""
} |
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("test-helper", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
publish_packet = mosq_test.gen_publish("bridge/ssl/test", qos=0, payload="message")
disconnect_packet = mosq_test.gen_disconnect()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", 1889))
sock.send(connect_packet)
if mosq_test.expect_packet(sock, "connack", connack_packet):
sock.send(publish_packet)
sock.send(disconnect_packet)
sock.close()
exit(0)
| {
"content_hash": "30122e711ba4b1b97ffb386df89bd855",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 129,
"avg_line_length": 28.78125,
"alnum_prop": 0.743756786102063,
"repo_name": "zhkzyth/better-mosquitto",
"id": "7ec4b03f855f3a6044347c93d978c2fb5d9f25ae",
"size": "944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/broker/08-ssl-bridge-helper.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "723187"
},
{
"name": "C++",
"bytes": "34223"
},
{
"name": "JavaScript",
"bytes": "8597"
},
{
"name": "Perl",
"bytes": "3271"
},
{
"name": "Python",
"bytes": "265033"
},
{
"name": "Shell",
"bytes": "3991"
},
{
"name": "XSLT",
"bytes": "1151"
}
],
"symlink_target": ""
} |
import htmls
from django.test import TestCase
from django.test.client import RequestFactory
from unittest import mock
from django_cradmin.viewhelpers import formview
class TestDelete(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_get(self):
class SimpleDeleteView(formview.WithinRoleDeleteView):
def get_queryset_for_role(self):
queryset = mock.MagicMock()
return queryset
def get_object(self, queryset=None):
obj = mock.MagicMock()
obj.__str__.return_value = 'Simple Test Item'
obj._meta = mock.MagicMock()
obj._meta.verbose_name = 'TestModel'
return obj
request = self.factory.get('/test')
request.cradmin_app = mock.MagicMock()
request.cradmin_instance = mock.MagicMock()
response = SimpleDeleteView.as_view()(request, pk=10)
response.render()
selector = htmls.S(response.content)
self.assertEqual(selector.one('form')['action'], 'http://testserver/test')
self.assertEqual(
selector.one('h1.test-primary-h1').alltext_normalized,
'Confirm delete')
self.assertEqual(
selector.one('.test-confirm-message').alltext_normalized,
'Are you sure you want to delete "Simple Test Item"?')
def test_post(self):
obj = mock.MagicMock()
class SimpleDeleteView(formview.WithinRoleDeleteView):
def get_queryset_for_role(self):
queryset = mock.MagicMock()
return queryset
def get_object(self, queryset=None):
return obj
request = self.factory.post('/test')
request._messages = mock.MagicMock()
request.cradmin_app = mock.MagicMock()
request.cradmin_app.reverse_appindexurl.return_value = '/success'
SimpleDeleteView.as_view()(request, pk=10)
obj.delete.assert_called_once_with()
| {
"content_hash": "dca8ba1249357f3f1bf46c451f71f35a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 82,
"avg_line_length": 36,
"alnum_prop": 0.6111111111111112,
"repo_name": "appressoas/django_cradmin",
"id": "731ef32950350f572bfaed2844883b4bde7daa5d",
"size": "2016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_cradmin/tests/test_viewhelpers/test_delete.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "192105"
},
{
"name": "JavaScript",
"bytes": "1951677"
},
{
"name": "Python",
"bytes": "771868"
},
{
"name": "SCSS",
"bytes": "679114"
}
],
"symlink_target": ""
} |
"""Tests for broadcast_util."""
from tensorflow_probability.python.internal import broadcast_util as bu
from tensorflow_probability.python.internal import test_util
class BroadcastUtilTest(test_util.TestCase):
@test_util.numpy_disable_test_missing_functionality('tf.unsorted_segment_sum')
@test_util.jax_disable_test_missing_functionality('tf.unsorted_segment_sum')
def test_right_justified_unsorted_segment_sum(self):
# If the segment indices are range(num_segments), segment sum
# should be the identity. Here we check dimension alignment:
# data.shape == [3, 2]; segment_ids.shape == [2]
data = [[1, 2], [4, 8], [16, 32]]
segment_ids = [0, 1]
expected = [[1, 2], [4, 8], [16, 32]]
self.assertAllEqual(
expected,
bu.right_justified_unsorted_segment_sum(
data, segment_ids, num_segments=2))
# Check the same but with nontrivial summation, and with a
# too-large max_segments. The latter determines the innermost
# dimension.
data = [[1, 2], [4, 8], [16, 32]]
segment_ids = [1, 1]
expected = [[0, 3, 0, 0], [0, 12, 0, 0], [0, 48, 0, 0]]
self.assertAllEqual(
expected,
bu.right_justified_unsorted_segment_sum(
data, segment_ids, num_segments=4))
# If the segment_ids have the same shape as the data,
# we expect a vector of size num_segments as output
data = [[1, 2], [4, 8], [16, 32]]
segment_ids = [[1, 1], [0, 1], [0, 0]]
expected = [52, 11, 0, 0]
self.assertAllEqual(
expected,
bu.right_justified_unsorted_segment_sum(
data, segment_ids, num_segments=4))
# Same as the previous example but with a batch dimension for the data
data = [[[1, 2], [4, 8], [16, 32]],
[[64, 128], [256, 512], [1024, 2048]]]
segment_ids = [[1, 1], [0, 1], [0, 0]]
expected = [[52, 11, 0, 0], [64 * 52, 64 * 11, 0, 0]]
self.assertAllEqual(
expected,
bu.right_justified_unsorted_segment_sum(
data, segment_ids, num_segments=4))
if __name__ == '__main__':
test_util.main()
| {
"content_hash": "421b30adbc9aa28236ad41665124abce",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 80,
"avg_line_length": 37.357142857142854,
"alnum_prop": 0.6080305927342257,
"repo_name": "tensorflow/probability",
"id": "175af5aee19fb5ae32ec42b5afe7b11ad03fc110",
"size": "2770",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_probability/python/internal/broadcast_util_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55552121"
},
{
"name": "Python",
"bytes": "17339674"
},
{
"name": "Shell",
"bytes": "24852"
},
{
"name": "Starlark",
"bytes": "663851"
}
],
"symlink_target": ""
} |
"""Implementation of sample attack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from scipy.misc import imread
from scipy.misc import imsave
import tensorflow as tf
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'input_dir', '', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_dir', '', 'Output directory with images.')
tf.flags.DEFINE_float(
'max_epsilon', 16.0, 'Maximum size of adversarial perturbation.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 16, 'How many images process at one time.')
FLAGS = tf.flags.FLAGS
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Length of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
images[idx, :, :, :] = imread(f, mode='RGB').astype(np.float) / 255.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
imsave(f, images[i, :, :, :], format='png')
def main(_):
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
save_images(images, filenames, FLAGS.output_dir)
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "ec50c3c5a0ab59910bb5ff63a370733b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 28.870967741935484,
"alnum_prop": 0.6800744878957169,
"repo_name": "cihangxie/cleverhans",
"id": "1ab1ddc10cdb8e10ad0448762043bdd448a38c29",
"size": "2685",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/nips17_adversarial_competition/sample_attacks/noop/attack_noop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "320002"
}
],
"symlink_target": ""
} |
import unittest
import unreal_engine as ue
class TestPlugin(unittest.TestCase):
def test_find(self):
uep = ue.find_plugin('UnrealEnginePython')
self.assertEqual(uep.get_name(), 'UnrealEnginePython')
def test_enabled(self):
uep = ue.find_plugin('UnrealEnginePython')
self.assertTrue(uep.is_enabled)
| {
"content_hash": "b088b46c87bdae6365563df8f3a7acf9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 59,
"avg_line_length": 20.8125,
"alnum_prop": 0.7057057057057057,
"repo_name": "Orav/UnrealEnginePython",
"id": "9f3e5762397f0674536925d8fbd17de9e8ffb3f3",
"size": "333",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "16483"
},
{
"name": "C#",
"bytes": "9980"
},
{
"name": "C++",
"bytes": "633519"
},
{
"name": "Python",
"bytes": "14482"
}
],
"symlink_target": ""
} |
import cgi
import os
import datetime
import HTMLParser
import json
import logging
import re
import ushlex as shlex
import urllib
from bson.objectid import ObjectId
from django.conf import settings
from django.contrib.auth import authenticate, login as user_login
from django.core.urlresolvers import reverse, resolve, get_script_prefix
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.html import escape as html_escape
from django.utils.http import urlencode
from mongoengine.base import ValidationError
from operator import itemgetter
from crits.config.config import CRITsConfig
from crits.core.audit import AuditLog
from crits.core.bucket import Bucket
from crits.core.class_mapper import class_from_id, class_from_type, key_descriptor_from_obj_type
from crits.core.crits_mongoengine import Releasability, json_handler
from crits.core.crits_mongoengine import CritsSourceDocument
from crits.core.source_access import SourceAccess
from crits.core.data_tools import create_zip, format_file
from crits.core.mongo_tools import mongo_connector, get_file
from crits.core.sector import Sector
from crits.core.user import CRITsUser, EmbeddedSubscriptions
from crits.core.user import EmbeddedLoginAttempt
from crits.core.user_tools import user_sources, is_admin
from crits.core.user_tools import save_user_secret
from crits.core.user_tools import get_user_email_notification
from crits.actors.actor import Actor
from crits.backdoors.backdoor import Backdoor
from crits.campaigns.campaign import Campaign
from crits.certificates.certificate import Certificate
from crits.comments.comment import Comment
from crits.domains.domain import Domain
from crits.events.event import Event
from crits.exploits.exploit import Exploit
from crits.ips.ip import IP
from crits.notifications.handlers import get_user_notifications, generate_audit_notification
from crits.pcaps.pcap import PCAP
from crits.raw_data.raw_data import RawData
from crits.emails.email import Email
from crits.samples.sample import Sample
from crits.screenshots.screenshot import Screenshot
from crits.targets.target import Target
from crits.indicators.indicator import Indicator
from crits.core.totp import valid_totp
logger = logging.getLogger(__name__)
def description_update(type_, id_, description, analyst):
"""
Change the description of a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param description: The description to use.
:type description: str
:param analyst: The user setting the description.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
klass = class_from_type(type_)
if not klass:
return {'success': False, 'message': 'Could not find object.'}
if hasattr(klass, 'source'):
sources = user_sources(analyst)
obj = klass.objects(id=id_, source__name__in=sources).first()
else:
obj = klass.objects(id=id_).first()
if not obj:
return {'success': False, 'message': 'Could not find object.'}
# Have to unescape the submitted data. Use unescape() to escape
# < and friends. Use urllib2.unquote() to escape %3C and friends.
h = HTMLParser.HTMLParser()
description = h.unescape(description)
try:
obj.description = description
obj.save(username=analyst)
return {'success': True, 'message': "Description set."}
except ValidationError, e:
return {'success': False, 'message': e}
def get_favorites(analyst):
"""
Get all favorites for a user.
:param analyst: The username.
:type analyst: str
:returns: dict with keys "success" (boolean) and "results" (string)
"""
user = CRITsUser.objects(username=analyst).first()
if not user:
return {'success': False, 'message': '<div id="favorites_results">Could not find user.</div>'}
favorites = user.favorites.to_dict()
if not favorites:
return {'success': True, 'message': '<div id="favorites_results">You have no favorites.</div>'}
field_dict = {
'Actor': 'name',
'Backdoor': 'name',
'Campaign': 'name',
'Certificate': 'filename',
'Comment': 'object_id',
'Domain': 'domain',
'Email': 'id',
'Event': 'title',
'Exploit': 'name',
'Indicator': 'id',
'IP': 'ip',
'PCAP': 'filename',
'RawData': 'title',
'Sample': 'filename',
'Screenshot': 'id',
'Target': 'email_address'
}
results = '''
<table>
<tbody>
'''
for type_, attr in field_dict.iteritems():
if type_ in favorites:
ids = [ObjectId(s) for s in favorites[type_]]
objs = class_from_type(type_).objects(id__in=ids).only(attr)
for obj in objs:
obj_attr = getattr(obj, attr)
results += '<tr><td>%s</td><td><a href="%s">%s</a></td>' % (type_,
reverse('crits.core.views.details',
args=(type_, str(obj.id))),
obj_attr)
results += '<td><span class="ui-icon ui-icon-trash remove_favorite favorites_icon_active" '
results += 'data-type="%s" data-id="%s"></span></td><td width="5px"></td></tr>' % (type_, str(obj.id))
results += '</tbody></table>'
return {'success': True, 'results': results}
def favorite_update(type_, id_, analyst):
"""
Toggle the favorite of a top-level object in a user profile on or off.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param analyst: The user toggling the favorite.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
user = CRITsUser.objects(username=analyst).first()
if not user:
return {'success': False, 'message': 'Could not find user.'}
if id_ in user.favorites[type_]:
user.favorites[type_].remove(id_)
else:
user.favorites[type_].append(id_)
try:
user.save()
except:
pass
return {'success': True}
def status_update(type_, id_, value="In Progress", analyst=None):
"""
Update the status of a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param value: The status to set it to.
:type value: str
:param analyst: The user setting the status.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False, 'message': 'Could not find object.'}
try:
obj.set_status(value)
obj.save(username=analyst)
return {'success': True, 'value': value}
except ValidationError, e:
return {'success': False, 'message': e}
def get_data_for_item(item_type, item_id):
"""
Get a minimal amount of data for the passed item.
Used by the clipboard to provide selected item information.
:param item_type: Item type (Domain, Indicator, etc...)
:type item_type: str
:param item_id: Item database ID (_id)
:type item_id: str
:returns: dict -- Contains the item data
"""
type_to_fields = {
'Actor': ['name', ],
'Backdoor': ['name', ],
'Campaign': ['name', ],
'Certificate': ['filename', ],
'Domain': ['domain', ],
'Email': ['from_address', 'date', ],
'Event': ['title', 'event_type', ],
'Exploit': ['name', 'cve', ],
'Indicator': ['value', 'ind_type', ],
'IP': ['ip', 'type', ],
'PCAP': ['filename', ],
'RawData': ['title', ],
'Sample': ['filename', ],
'Target': ['email_address', ],
}
response = {'OK': 0, 'Msg': ''}
if not item_id or not item_type:
response['Msg'] = "No item data provided"
return response
if not item_type in type_to_fields:
response['Msg'] = "Invalid item type: %s" % item_type
return response
doc = class_from_id(item_type, item_id)
if not doc:
response['Msg'] = "Item not found"
return response
response['OK'] = 1
response['data'] = {}
for field in type_to_fields[item_type]:
if field in doc:
value = doc[field]
if len(value) > 30:
saved = value
value = saved[:15]
value += '...'
value += saved[-15:]
response['data'][field.title()] = value
return response
def add_releasability(type_, id_, name, user, **kwargs):
"""
Add releasability to a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The source to add releasability for.
:type name: str
:param user: The user adding the releasability.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False,
'message': "Could not find object."}
try:
obj.add_releasability(name=name, analyst=user, instances=[])
obj.save(username=user)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['releasability']}
except Exception, e:
return {'success': False,
'message': "Could not add releasability: %s" % e}
def add_releasability_instance(type_, _id, name, analyst):
"""
Add releasability instance to a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The source to add releasability instance for.
:type name: str
:param analyst: The user adding the releasability instance.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, _id)
if not obj:
return {'success': False,
'message': "Could not find object."}
try:
date = datetime.datetime.now()
ri = Releasability.ReleaseInstance(analyst=analyst, date=date)
obj.add_releasability_instance(name=name, instance=ri)
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['releasability']}
except Exception, e:
return {'success': False,
'message': "Could not add releasability instance: %s" % e}
def remove_releasability_instance(type_, _id, name, date, analyst):
"""
Remove releasability instance from a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The source to remove releasability instance from.
:type name: str
:param date: The date of the instance being removed.
:type date: datetime.datetime
:param analyst: The user removing the releasability instance.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, _id)
if not obj:
return {'success': False,
'message': "Could not find object."}
try:
obj.remove_releasability_instance(name=name, date=date)
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['releasability']}
except Exception, e:
return {'success': False,
'message': "Could not remove releasability instance: %s" % e}
def remove_releasability(type_, _id, name, analyst):
"""
Remove releasability from a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param name: The source to remove from releasability.
:type name: str
:param analyst: The user removing the releasability.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(type_, _id)
if not obj:
return {'success': False,
'message': "Could not find object."}
try:
obj.remove_releasability(name=name)
obj.save(username=analyst)
obj.reload()
return {'success': True,
'obj': obj.to_dict()['releasability']}
except Exception, e:
return {'success': False,
'message': "Could not remove releasability: %s" % e}
def sanitize_releasability(releasability, user_sources):
"""
Remove any releasability that is for sources a user does not have access to
see.
:param releasability: The releasability list for a top-level object.
:type releasability: list
:param user_sources: The sources a user has access to.
:type user_sources: list
:returns: list
"""
# currently this uses dictionary lookups.
# when we move to classes, this should use attributes
return [r for r in releasability if r['name'] in user_sources]
def ui_themes():
"""
Return a list of available UI themes.
:returns: list
"""
ui_themes = os.listdir(os.path.join(settings.MEDIA_ROOT,
'css/jquery-themes'))
return ui_themes
def does_source_exist(source, active=False):
"""
Determine if a source exists.
:param source: The name of the source to search for.
:type source: str
:param active: Whether the source also needs to be marked as active or not.
:type active: boolean
:returns: True, False
"""
query = {'name': source}
if active:
query['active'] = 'on'
if len(SourceAccess.objects(__raw__=query)) > 0:
return True
else:
return False
def add_new_source(source, analyst):
"""
Add a new source to CRITs.
:param source: The name of the new source.
:type source: str
:param analyst: The user adding the new source.
:type analyst: str
:returns: True, False
"""
try:
source = source.strip()
src = SourceAccess.objects(name=source).first()
if src:
return False
src = SourceAccess()
src.name = source
src.save(username=analyst)
return True
except ValidationError:
return False
def merge_source_lists(left, right):
"""
Merge source lists takes two source list objects and merges them together.
Left can be an empty list and it will set the list to be the right list for
you. We will always return the left list.
:param left: Source list one.
:type left: list
:param right: Source list two.
:type right: list
:returns: list
"""
if left is None:
return right
elif len(left) < 1:
return right
else:
#if two sources have the same name and same date, we can assume they're
# the same instance
left_name_dates = {}
for i in left:
left_name_dates[i['name']] = [inst['date'] for inst in i['instances']]
for src in right:
match = False
for s in left:
if src['name'] == s['name']:
match = True
left_dates = left_name_dates[s['name']]
for i in src['instances']:
if i['date'] not in left_dates:
s['instances'].append(i)
if not match:
left.append(src)
return left
def source_add_update(obj_type, obj_id, action, source, method='',
reference='', date=None, analyst=None):
"""
Add or update a source for a top-level object.
:param obj_type: The CRITs type of the top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param action: Whether or not we are doing an "add" or "update".
:type action: str
:param source: The name of the source.
:type source: str
:param method: The method of data acquisition for the source.
:type method: str
:param reference: The reference to the data for the source.
:type reference: str
:param date: The date of the instance to add/update.
:type date: datetime.datetime
:param analyst: The user performing the add/update.
:type analyst: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"object" (if successful)
:class:`crits.core.crits_mongoengine.EmbeddedSource.SourceInstance`
"""
obj = class_from_id(obj_type, obj_id)
if not obj:
return {'success': False,
'message': 'Unable to find object in database.'}
try:
if action == "add":
obj.add_source(source=source,
method=method,
reference=reference,
date=date,
analyst=analyst)
else:
obj.edit_source(source=source,
method=method,
reference=reference,
date=date,
analyst=analyst)
obj.save(username=analyst)
obj.reload()
obj.sanitize_sources(username=analyst)
if not obj.source:
return {'success': False,
'message': 'Object has no sources.'}
for s in obj.source:
if s.name == source:
if action == "add":
return {'success': True,
'object': s,
'message': "Source addition successful!"}
else:
for i in s.instances:
if i.date == date:
return {'success': True,
'object': s,
'instance': i,
'message': "Source addition successful!"}
break
return {'success': False,
'message': ('Could not make source changes. '
'Refresh page and try again.')}
except ValidationError, e:
return {'success':False, 'message': e}
def source_remove(obj_type, obj_id, name, date, analyst=None):
"""
Remove a source instance from a top-level object.
:param obj_type: The CRITs type of the top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param name: The name of the source.
:type name: str
:param date: The date of the instance to remove.
:type date: datetime.datetime
:param analyst: The user performing the removal.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(obj_type, obj_id)
if not obj:
return {'success': False,
'message': 'Unable to find object in database.'}
try:
result = obj.remove_source(source=name,
date=date)
obj.save(username=analyst)
return result
except ValidationError, e:
return {'success':False, 'message': e}
def source_remove_all(obj_type, obj_id, name, analyst=None):
"""
Remove a source from a top-level object.
:param obj_type: The CRITs type of the top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param name: The name of the source.
:type name: str
:param analyst: The user performing the removal.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
obj = class_from_id(obj_type, obj_id)
if not obj:
return {'success': False,
'message': 'Unable to find object in database.'}
try:
result = obj.remove_source(source=name,
remove_all=True)
obj.save(username=analyst)
return result
except ValidationError, e:
return {'success':False, 'message': e}
def get_sources(obj_type, obj_id, analyst):
"""
Get a list of sources for a top-level object.
:param obj_type: The CRITs type of the top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param analyst: The user performing the search.
:type analyst: str
:returns: list if successful or dict with keys "success" (boolean) and
"message" (str)
"""
obj = class_from_id(obj_type, obj_id)
if not obj:
return {'success': False,
'message': 'Unable to find object in database.'}
obj.sanitize_sources(username=analyst)
return obj.source
def get_source_names(active=False, limited=False, username=None):
"""
Get a list of available sources in CRITs sorted alphabetically.
:param active: Whether or not the sources returned should be active.
:type active: boolean
:param limited: If the sources should be limited to only those the user has
access to.
:type limited: boolean
:param username: The user requesting the source list.
:type username: str
:returns: list
"""
query = {}
if limited:
user_src_list = user_sources(username)
query["name"] = {'$in': user_src_list}
if active:
query['active'] = 'on'
c = SourceAccess.objects(__raw__=query).order_by('+name')
return c
def get_item_names(obj, active=None):
"""
Get a list of item names for a specific item in CRITs.
:param obj: The class representing the item to get names for.
:type obj: class
:param active: Return:
None: active and inactive items.
True: active items.
False: inactive items.
:type active: boolean
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
# Don't use this to get sources.
if isinstance(obj, SourceAccess):
return []
if active is None:
c = obj.objects().order_by('+name')
else:
if active:
c = obj.objects(active='on').order_by('+name')
else:
c = obj.objects(active='off').order_by('+name')
return c
def promote_bucket_list(bucket, confidence, name, related, description, analyst):
"""
Promote a bucket to a Campaign. Every top-level object which is tagged with
this specific bucket will get attributed to the provided campaign.
:param bucket: The bucket to promote.
:type bucket: str
:param confidence: The Campaign confidence.
:type confidence: str
:param name: The Campaign name.
:type name: str
:param related: If we should extend this attribution to top-level objects
related to these top-level objects.
:type related: boolean
:param description: A description of this Campaign attribution.
:type description: str
:param analyst: The user promoting this bucket.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
from crits.campaigns.handlers import campaign_add
bucket = Bucket.objects(name=bucket).first()
if not bucket:
return {'success': False, 'message': 'Unable to find bucket.'}
for ctype in [k for k in Bucket._meta['schema_doc'].keys() if k != 'name' and k != 'Campaign']:
# Don't bother if the count for this type is 0
if getattr(bucket, ctype, 0) == 0:
continue
klass = class_from_type(ctype)
if not klass:
continue
objs = klass.objects(bucket_list=bucket.name)
for obj in objs:
campaign_add(name, confidence, description, related, analyst, obj=obj)
return {'success': True,
'message': 'Bucket successfully promoted. <a href="%s">View campaign.</a>' % reverse('crits.campaigns.views.campaign_details', args=(name,))}
def alter_bucket_list(obj, buckets, val):
"""
Given a list of buckets on this object, increment or decrement
the bucket_list objects accordingly. This is used when adding
or removing a bucket list to an item, and when deleting an item.
:param obj: The top-level object instantiated class.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`.
:param buckets: List of buckets.
:type buckets: list
:param val: The amount to change the count by.
:type val: int
"""
# This dictionary is used to set values on insert only.
# I haven't found a way to get mongoengine to use the defaults
# when doing update_one() on the queryset.
from crits.core.bucket import Bucket
soi = { k: 0 for k in Bucket._meta['schema_doc'].keys() if k != 'name' and k != obj._meta['crits_type'] }
soi['schema_version'] = Bucket._meta['latest_schema_version']
# We are using mongo_connector here because mongoengine does not have
# support for a setOnInsert option. If mongoengine were to gain support
# for this we should switch to using it instead of pymongo here.
buckets_col = mongo_connector(settings.COL_BUCKET_LISTS)
for name in buckets:
buckets_col.update({'name': name},
{'$inc': {obj._meta['crits_type']: val},
'$setOnInsert': soi},
upsert=True)
# Find and remove this bucket if, and only if, all counts are zero.
if val == -1:
Bucket.objects(name=name,
Actor=0,
Backdoor=0,
Campaign=0,
Certificate=0,
Domain=0,
Email=0,
Event=0,
Exploit=0,
Indicator=0,
IP=0,
PCAP=0,
RawData=0,
Sample=0,
Target=0).delete()
def generate_bucket_csv(request):
"""
Generate CSV output for the Bucket list.
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
return csv_export(request, Bucket)
def generate_bucket_jtable(request, option):
"""
Generate the jtable data for rendering in the bucket list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == 'jtlist':
details_url = 'crits.core.views.bucket_list'
details_key = 'name'
response = jtable_ajax_list(Bucket,
details_url,
details_key,
request,
includes=['name',
'Actor',
'Backdoor',
'Campaign',
'Certificate',
'Domain',
'Email',
'Event',
'Exploit',
'Indicator',
'IP',
'PCAP',
'RawData',
'Sample',
'Target'])
return HttpResponse(json.dumps(response, default=json_handler),
content_type='application/json')
fields = ['name', 'Actor', 'Backdoor', 'Campaign', 'Certificate', 'Domain',
'Email', 'Event', 'Exploit', 'Indicator', 'IP', 'PCAP', 'RawData',
'Sample', 'Target', 'Promote']
jtopts = {'title': 'Buckets',
'fields': fields,
'listurl': 'jtlist',
'searchurl': reverse('crits.core.views.global_search_listing'),
'default_sort': 'name ASC',
'no_sort': ['Promote'],
'details_link': ''}
jtable = build_jtable(jtopts, request)
for ctype in fields:
if ctype == 'id':
continue
elif ctype == 'name':
url = reverse('crits.core.views.global_search_listing') + '?search_type=bucket_list&search=Search&force_full=1'
elif ctype == 'Promote':
url = reverse('crits.core.views.bucket_promote')
else:
lower = ctype.lower()
if lower != "rawdata":
url = reverse('crits.%ss.views.%ss_listing' % (lower, lower))
else:
lower = "raw_data"
url = reverse('crits.%s.views.%s_listing' % (lower, lower))
for field in jtable['fields']:
if field['fieldname'].startswith("'" + ctype):
if ctype == 'name':
field['display'] = """ function (data) {
return '<a href="%s&q='+encodeURIComponent(data.record.name)+'">' + data.record.name + '</a>';
}
""" % url
elif ctype == 'Promote':
# This is really ugly. I don't know of a better way to
# use the campaign addition form and also submit name of
# the bucket. So the form is POSTed but the URL also
# has a bucket parameter that is for the name of the
# to operate on.
field['display'] = """ function (data) {
return '<div class="icon-container"><span class="add_button" data-intro="Add a campaign" data-position="right"><a href="#" action="%s?name='+encodeURIComponent(data.record.name)+'" class="ui-icon ui-icon-plusthick dialogClick" dialog="campaign-add" persona="promote" title="Promote to campaign"></a></span></div>'
}
""" % url
else:
field['display'] = """ function (data) {
return '<a href="%s?bucket_list='+encodeURIComponent(data.record.name)+'">'+data.record.%s+'</a>';
}
""" % (url, ctype)
return render_to_response('bucket_lists.html',
{'jtable': jtable,
'jtid': 'bucket_lists'},
RequestContext(request))
def modify_bucket_list(itype, oid, tags, analyst):
"""
Modify the bucket list for a top-level object.
:param itype: The CRITs type of the top-level object to modify.
:type itype: str
:param oid: The ObjectId to search for.
:type oid: str
:param tags: The list of buckets.
:type tags: list
:param analyst: The user making the modifications.
"""
obj = class_from_id(itype, oid)
if not obj:
return
obj.add_bucket_list(tags, analyst, append=False)
try:
obj.save(username=analyst)
except ValidationError:
pass
def download_object_handler(total_limit, depth_limit, rel_limit, rst_fmt,
bin_fmt, object_types, objs, sources,
make_zip=True):
"""
Given a list of tuples, collect the objects for each given the total
number of objects to return for each, the depth to traverse for each
and the maximum number of relationships to consider before ignoring.
NOTE: This function can collect more than total_limit number of objects
because total_limit applies only to each call to collect_objects() and
not to the total number of things collected.
:param total_limit: The max number of objects to return.
:type total_limit: int
:param depth_limit: The level of relationships to recurse into.
:type depth_limit: int
:param rel_limit: The limit on how many relationhips a top-level object
should have before we ignore its relationships.
:type rel_limit: int
:param rst_fmt: The format the results should be in ("zip", "json",
"json_no_bin").
:type rst_fmt: str
:param object_types: The types of top-level objects to include.
:type object_types: list
:param objs: A list of types (<obj_type>, <obj_id>) that we should use as
our basis to collect for downloading.
:type objs: list
:param sources: A list of sources to limit results against.
:type sources: list
:returns: A dict with the keys:
"success" (boolean),
"filename" (str),
"data" (str),
"mimetype" (str)
"""
result = {'success': False}
json_docs = []
to_zip = []
need_filedata = rst_fmt != 'json_no_bin'
if not need_filedata:
bin_fmt = None
# If bin_fmt is not zlib or base64, force it to base64.
if rst_fmt == 'json' and bin_fmt not in ['zlib', 'base64']:
bin_fmt = 'base64'
for (obj_type, obj_id) in objs:
# get related objects
new_objects = collect_objects(obj_type, obj_id, depth_limit,
total_limit, rel_limit, object_types,
sources, need_filedata=need_filedata)
# if result format calls for binary data to be zipped, loop over
# collected objects and convert binary data to bin_fmt specified, then
# add to the list of data to zip up
for (oid, (otype, obj)) in new_objects.items():
if ((otype == PCAP._meta['crits_type'] or
otype == Sample._meta['crits_type'] or
otype == Certificate._meta['crits_type']) and
rst_fmt == 'zip'):
if obj.filedata: # if data is available
if bin_fmt == 'raw':
to_zip.append((obj.filename, obj.filedata.read()))
else:
(data, ext) = format_file(obj.filedata.read(),
bin_fmt)
to_zip.append((obj.filename + ext, data))
obj.filedata.seek(0)
else:
try:
json_docs.append(obj.to_json())
except:
pass
zip_count = len(to_zip)
if zip_count <= 0:
result['success'] = True
result['data'] = json_docs
result['filename'] = "crits.json"
result['mimetype'] = 'text/json'
else:
zip_data = to_zip
for doc in json_docs:
inner_filename = "%s.xml" % doc['id']
zip_data.append((inner_filename, doc))
result['success'] = True
result['data'] = create_zip(zip_data, True)
result['filename'] = "CRITS_%s.zip" % datetime.datetime.today().strftime("%Y-%m-%d")
result['mimetype'] = 'application/zip'
return result
def collect_objects(obj_type, obj_id, depth_limit, total_limit, rel_limit,
object_types, sources, need_filedata=True, depth=0):
"""
Collects an object from the database, along with its related objects, to
the specified depth, or until the total limit is reached. This is a
breadth first traversal because I think it's better to get objects as
close to the initial one as possible, rather than traversing to the
bottom of a tree first.
If depth_limit is 0, relationships are not examined.
If an object has too many relationships (configurable system wide)
then it is ignored and that branch of the relationship tree is not
taken.
The returned object types will be only those in object_types. If
a sample is found without a valid filedata attribute it will be
collected only if need_fildata is False.
Objects are returned as a dictionary with the following key/value
mapping:
_id: (obj_type, crits_obj)
Sources should be a list of the names of the sources the user has
permission to access.
:param obj_type: The CRITs top-level object type to work with.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param depth_limit: The level of relationships to recurse into.
:type depth_limit: int
:param total_limit: The max number of objects to return.
:type total_limit: int
:param rel_limit: The limit on how many relationhips a top-level object
should have before we ignore its relationships.
:type rel_limit: int
:param object_types: The types of top-level objects to include.
:type object_types: list
:param sources: A list of sources to limit results against.
:type sources: list
:param need_filedata: Include data from GridFS if applicable.
:type need_filedata: boolean
:param depth: Depth tracker. Default is 0 to start at no relationships and
work our way down.
:returns: A dict with ObjectIds as keys, and values of tuples
(<object_type>, <object>).
"""
objects = {}
# This dictionary is used to keep track of nodes that have been
# seen already. This ensures that we do not circle back on the graph.
seen_objects = {}
def inner_collect(obj_type, obj, sources, depth, depth_limit, total_limit,
object_types, need_filedata):
# Don't keep going if the total number of objects is reached.
if len(objects) >= total_limit:
return objects
# Be cognizant of the need to collect samples with no backing binary
# if the user asked for no binaries (need_filedata is False).
#
# If the object has a filedata attribute we need to collect it
# if need_filedata is true and the filedata attribute is valid.
# If the object does not have a valid filedata attribute and
# need_filedata is False, then collect it (metadata only).
#
# If the object is not one we want to collect we will still traverse
# down that path of the graph, but will not collect the object.
if obj_type in object_types:
if hasattr(obj, 'filedata'):
if obj.filedata and need_filedata:
objects[obj.id] = (obj_type, obj)
elif not need_filedata:
objects[obj.id] = (obj_type, obj)
else:
objects[obj.id] = (obj_type, obj)
seen_objects[obj.id] = True
# If not recursing (depth_limit == 0), return.
# If at depth limit, return.
if depth_limit == 0 or depth >= depth_limit:
return objects
new_objs = []
for r in obj.relationships:
# Don't touch objects we have already seen.
if r.object_id in seen_objects:
continue
seen_objects[r.object_id] = True
new_class = class_from_type(r.rel_type)
if not new_class:
continue
new_obj = new_class.objects(id=str(r.object_id),
source__name__in=sources).first()
if not new_obj:
continue
# Don't go down this branch if there are too many relationships.
# This most often happens when a common resource is extracted
# from many samples.
if len(new_obj.relationships) > rel_limit:
continue
# Save the objects so we can recurse into them later.
new_objs.append((r.rel_type, new_obj))
# Try to collect the new object, but don't handle relationships.
# Do this by setting depth_limit to 0.
inner_collect(r.rel_type, new_obj, sources, depth, 0, total_limit,
object_types, need_filedata)
# Each of the new objects become a new starting point for traverse.
depth += 1
for (new_type, new_obj) in new_objs:
inner_collect(new_type, new_obj, sources, depth, depth_limit,
total_limit, object_types, need_filedata)
# END OF INNER COLLECT
klass = class_from_type(obj_type)
if not klass:
return objects
obj = klass.objects(id=str(obj_id), source__name__in=sources).first()
if not obj:
return objects
inner_collect(obj_type, obj, sources, 0, depth_limit, total_limit,
object_types, need_filedata)
return objects
def modify_source_access(analyst, data):
"""
Update a user profile.
:param analyst: The user to update.
:type analyst: str
:param data: The user profile fields to change and their values.
:type data: dict
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
user = CRITsUser.objects(username=data['username']).first()
if not user:
user = CRITsUser.create_user(
data.get('username', ''),
data.get('password', ''),
data.get('email') )
if not user:
return {'success': False,
'message': 'Missing user information username/password/email'}
user.first_name = data['first_name']
user.last_name = data['last_name']
user.email = data['email']
user.role = data['role']
user.sources = data['sources']
user.organization = data['organization']
user.totp = data['totp']
user.secret = data['secret']
if len(data.get('password', '')) > 1:
if user.set_password(data['password']) == False:
config = CRITsConfig.objects().first()
pc = config.password_complexity_desc
return {'success': False,
'message': 'Password does not meet complexity policy: %s' % pc}
if data['subscriptions'] == '':
user.subscriptions = EmbeddedSubscriptions()
try:
user.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False,
'message': format_error(e)}
def datetime_parser(d):
"""
Iterate over a dictionary for any key of "date" and try to convert its value
into a datetime object.
:param d: A dictionary to iterate over.
:type d: dict
:returns: dict
"""
for k,v in d.items():
if k == "date":
d[k] = datetime.datetime.strptime(v, settings.PY_DATETIME_FORMAT)
return d
def format_error(e):
"""
Takes an Exception and returns a nice string representation.
:param e: An exception.
:type e: Exception
:returns: str
"""
return e.__class__.__name__+": "+unicode(e)
def toggle_item_state(type_, oid, analyst):
"""
Toggle an item active/inactive.
:param type_: The CRITs type for this item.
:type type_: str
:param oid: The ObjectId to search for.
:type oid: str
:param analyst: The user toggling this item.
:type analyst: str
:returns: dict with key "success" (boolean)
"""
obj = class_from_id(type_, oid)
if not obj:
return {'success': False}
if obj.active == 'on':
obj.active = 'off'
else:
obj.active = 'on'
try:
obj.save(username=analyst)
return {'success': True}
except ValidationError:
return {'success': False}
def get_item_state(type_, name):
"""
Get the state of an item.
:param type_: The CRITs type for this item.
:type type_: str
:param name: The name of the item.
:type name: str
:returns: True if active, False if inactive.
"""
query = {'name': name}
obj = class_from_type(type_).objects(__raw__=query).first()
if not obj:
return False
if obj.active == 'on':
return True
else:
return False
def remove_quotes(val):
"""
Remove surrounding quotes from a string.
:param val: The string to remove quotes from.
:type val: str
:returns: str
"""
if val.startswith(('"', "'",)) and val.endswith(('"', "'",)):
val = val[1:-1]
return val
def generate_regex(val):
"""
Takes the value, removes surrounding quotes, and generates a PyMongo $regex
query for use on a field.
:param val: The string to use for a regex.
:type val: str
:returns: dict with key '$regex' if successful, 'error' if failed.
"""
try:
return {'$regex': re.compile('%s' % remove_quotes(val), re.I)}
except Exception, e:
return {'error': 'Invalid Regular Expression: %s\n\n\t%s' % (val,
str(e))}
def parse_search_term(term, force_full=False):
"""
Parse a search term to break it into search operators that we can use to
enhance the search results.
:param term: Search term
:type term: str
:returns: search string or dictionary for regex search
"""
# decode the term so we aren't dealing with weird encoded characters
if force_full == False:
term = urllib.unquote(term)
search = {}
# setup lexer, parse our term, and define operators
try:
sh = shlex.shlex(term.strip())
sh.wordchars += '!@#$%^&*()-_=+[]{}|\:;<,>.?/~`'
sh.commenters = ''
parsed = list(iter(sh.get_token, ''))
except Exception as e:
search['query'] = {'error': str(e)}
return search
operators = ['regex', 'full', 'type', 'field']
# for each parsed term, check to see if we have an operator and a value
regex_term = ""
if len(parsed) > 0:
for p in parsed:
s = p.split(':')
if len(s) >= 2:
so = s[0]
st = ':'.join(s[1:])
if so in operators:
# can make this more flexible for regex?
if so == 'regex':
search['query'] = generate_regex(st)
elif so == 'full':
regex_term += "%s " % (st,)
force_full = True
elif so == 'type':
search['type'] = st.title()
elif so == 'field':
search['field'] = remove_quotes(st.lower())
else:
regex_term += "%s:%s " % (so, st)
else:
regex_term += "%s " % p
if regex_term:
if force_full:
search['query'] = remove_quotes(regex_term.strip())
else:
search['query'] = generate_regex(regex_term.strip())
return search
def gen_global_query(obj,user,term,search_type="global",force_full=False):
"""
Generate a search query. Also calls :func:`check_query` for validation.
:param obj: CRITs Document Object
:type obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param user: CRITs user
:type user: str
:param term: Search term
:type term: str
:param search_type: Search type
:type search_type: str
:returns: dict -- The validated query dictionary
"""
type_ = obj._meta['crits_type']
search_list = []
query = {}
# Some terms, regardless of the query, will want to be full search terms and
# not regex terms.
force_full_terms = ['analysis_result', 'ssdeephash']
force = False
# Exclude searches for 'source' or 'releasability'
# This is required because the check_query function doesn't handle
# regex searches for these two fields
if 'source' in search_type or 'releasability' in search_type:
return query
if search_type in force_full_terms or force_full != False:
force = True
parsed_search = parse_search_term(term, force_full=force)
if 'query' not in parsed_search:
return {'success': False,
'ignore': False,
'error': 'No query to search'}
if 'error' in parsed_search['query']:
return {'success': False,
'ignore': False,
'error': parsed_search['query']['error']}
search_query = parsed_search['query']
if 'type' in parsed_search:
t = class_from_type(parsed_search['type'])
if t:
type_ = parsed_search['type']
if obj._meta['crits_type'] != type_:
return {'success': False,
'ignore': True,
'error': 'This type is being ignored.'}
if 'field' in parsed_search:
query = {parsed_search['field']: parsed_search['query']}
defaultquery = check_query({search_type: search_query},user,obj)
sample_queries = {
'size' : {'size': search_query},
'md5hash': {'md5': search_query},
'sha1hash': {'sha1': search_query},
'ssdeephash': {'ssdeep': search_query},
'sha256hash': {'sha256': search_query},
# slow in larger collections
'filename': {'$or': [
{'filename': search_query},
{'filenames': search_query},
]},
'campaign': {'campaign.name': search_query},
# slightly slow in larger collections
'object_value': {'objects.value': search_query},
'bucket_list': {'bucket_list': search_query},
'sectors': {'sectors': search_query},
'source': {'source.name': search_query},
}
# if a specific field is being defined to search against, return early
if 'field' in parsed_search:
if 'filedata' in query:
query = {'filedata': None}
return query
elif search_type == "bucket_list":
query = {'bucket_list': search_query}
elif search_type == "sectors":
query = {'sectors': search_query}
elif search_type == "actor_identifier":
query = {'identifiers.identifier_id': search_query}
# object_ comes from the core/views.py search function.
# It joins search_type with otype
elif search_type.startswith("object_"):
if search_type == "object_value":
query = {"objects.value": search_query}
else:
otypes = search_type.split("_")[1].split(" - ")
if len(otypes) == 1:
query = {"objects": {"$elemMatch": {"name": otypes[0],
"value": search_query}}}
else:
query = {"objects": {"$elemMatch": {"name": otypes[1],
"type": otypes[0],
"value": search_query}}}
elif search_type == "byobject":
query = {'comment': search_query}
elif search_type == "global":
if type_ == "Sample":
search_list.append(sample_queries["object_value"])
search_list.append(sample_queries["filename"])
if len(term) == 32:
search_list.append(sample_queries["md5hash"])
elif type_ == "AnalysisResult":
search_list = [
{'results.result': search_query},
]
elif type_ == "Actor":
search_list = [
{'name': search_query},
{'objects.value': search_query},
]
elif type_ == "Certificate":
search_list = [
{'md5': search_query},
{'objects.value': search_query},
]
elif type_ == "PCAP":
search_list = [
{'md5': search_query},
{'objects.value': search_query},
]
elif type_ == "RawData":
search_list = [
{'md5': search_query},
{'data': search_query},
{'objects.value': search_query},
]
elif type_ == "Indicator":
search_list = [
{'value': search_query},
{'objects.value': search_query}
]
elif type_ == "Domain":
search_list = [
{'domain': search_query},
{'objects.value': search_query}
]
elif type_ == "Email":
search_list = [
{'from': search_query},
{'subject': search_query},
{'raw_body': search_query},
{'raw_headers': search_query},
{'objects.value': search_query},
{'x_originating_ip': search_query},
{'originating_ip': search_query}
]
elif type_ == "Event":
search_list = [
{'description': search_query},
{'title': search_query},
{'objects.value': search_query}
]
elif type_ == "IP":
search_list = [
{'ip': search_query},
{'objects.value': search_query}
]
elif type_ == "Comment":
search_list = [
{'comment': search_query},
]
elif type_ == "Campaign":
search_list = [
{'name': search_query},
{'aliases': search_query},
]
elif type_ == "Screenshot":
search_list = [
{'description': search_query},
{'tags': search_query},
]
elif type_ == "Target":
search_list = [
{'email_address': search_query},
{'firstname': search_query},
{'lastname': search_query},
]
else:
search_list = [{'name': search_query}]
search_list.append({'source.instances.reference':search_query})
search_list.append({'bucket_list': search_query})
search_list.append({'sectors': search_query})
query = {'$or': search_list}
else:
if type_ == "Domain":
query = {'domain': search_query}
elif type_ == "Email":
if search_type == "ip":
query = {'$or': [{'originating_ip': search_query},
{'x_originating_ip': search_query}]}
elif search_type == "reference":
query = {'source.instances.reference': search_query}
else:
query = defaultquery
elif type_ == "RawData":
if search_type == "data":
query = {'data': search_query}
elif search_type == "data_type":
query = {'data_type': search_query}
elif search_type == "title":
query = {'title': search_query}
elif search_type == "tool":
query = {'tool.name': search_query}
else:
query = defaultquery
elif type_ == "Event":
if search_type == "campaign":
query = {'campaign.name': search_query}
elif search_type == "source":
query = {'source.name': search_query}
else:
query = defaultquery
elif type_ == "Indicator":
if search_type == "campaign":
query = {'campaign.name': search_query}
elif search_type == "ticket_number":
query = {'tickets.ticket_number': search_query}
elif search_type == "source":
query = {'source.name': search_query}
elif search_type == "confidence":
query = {'confidence.rating': search_query}
elif search_type == "impact":
query = {'impact.rating': search_query}
else:
query = defaultquery
elif type_ == "IP":
query = {'ip': search_query}
elif type_ == "Sample":
if search_type not in sample_queries:
return {'success': None,
'ignore': False,
'error': 'Search type not in sample queries.'}
query = sample_queries[search_type]
if 'size' in query:
try:
query = {'size': int(query['size'])}
except ValueError:
return {'success': None,
'ignore': False,
'error': 'Size must be an integer.'}
else:
query = defaultquery
return query
def check_query(qparams,user,obj):
"""
Remove and/or filter queries which may cause issues
:param qparams: MongoDB query
:type qparams: dict
:param user: CRITs user
:type user: str
:param obj: CRITs Document Object
:type obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:returns: dict -- The validated query dictionary
"""
# Iterate over the supplied query keys and make sure they start
# with a valid field from the document
goodkeys = {}
for key,val in qparams.items():
# Skip anything with Mongo's special $
if '$' in key:
continue
# Grab the base field for doing the key checks
try:
indx = key.index('.')
field = key[:indx]
except:
field = key
# Check for mapping, reverse because we're going the other way
invmap = dict((v,k) for k, v in obj._db_field_map.iteritems())
if field in invmap:
field = invmap[field]
# Only allow query keys that exist in the object
if hasattr(obj,field):
goodkeys[key] = val
# Filter out invalid queries regarding source/releasability
sourcefilt = user_sources(user)
newquery = goodkeys.copy()
for key in goodkeys:
# Sources
if "source" in key:
if key != "source.name" and key != "source":
del newquery[key]
else:
if goodkeys[key] not in sourcefilt:
del newquery[key]
# Releasability
if "releasability" in key:
if key != "releasability.name" and key != "releasability":
del newquery[key]
else:
if goodkeys[key] not in sourcefilt:
del newquery[key]
return newquery
def data_query(col_obj, user, limit=25, skip=0, sort=[], query={},
projection=[], count=False):
"""
Basic query function
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param user: CRITs user (Required)
:type user: str
:param limit: Limit on returned rows
:type limit: int `(25)`
:param skip: Number of rows to skip
:type skip: int `(0)`
:param sort: Fields to sort by (Prepend field name with '-' to reverse sort)
:type sort: list
:param query: MongoDB query
:type query: dict
:param projection: Projection filter to apply to query
:type projection: list
:returns: dict -- Keys are result, data, count, msg, crits_type. 'data'
contains a :class:`crits.core.crits_mongoengine.CritsQuerySet` object.
"""
results = {'result':'ERROR'}
results['data'] = []
results['count'] = 0
results['msg'] = ""
results['crits_type'] = col_obj._meta['crits_type']
sourcefilt = user_sources(user)
if isinstance(sort,basestring):
sort = sort.split(',')
if isinstance(projection,basestring):
projection = projection.split(',')
docs = None
try:
if not issubclass(col_obj,CritsSourceDocument):
results['count'] = col_obj.objects(__raw__=query).count()
if count:
results['result'] = "OK"
return results
if col_obj._meta['crits_type'] == 'User':
docs = col_obj.objects(__raw__=query).exclude('password',
'password_reset',
'api_keys').\
order_by(*sort).skip(skip).\
limit(limit).only(*projection)
else:
docs = col_obj.objects(__raw__=query).order_by(*sort).\
skip(skip).limit(limit).only(*projection)
# Else, all other objects that have sources associated with them
# need to be filtered appropriately
else:
results['count'] = col_obj.objects(source__name__in=sourcefilt,
__raw__=query).count()
if count:
results['result'] = "OK"
return results
docs = col_obj.objects(source__name__in=sourcefilt,__raw__=query).\
order_by(*sort).skip(skip).limit(limit).\
only(*projection)
for doc in docs:
if hasattr(doc, "sanitize_sources"):
doc.sanitize_sources(username="%s" % user, sources=sourcefilt)
except Exception, e:
results['msg'] = "ERROR: %s. Sort performed on: %s" % (e,
', '.join(sort))
return results
results['data'] = docs
results['result'] = "OK"
return results
def csv_query(col_obj,user,fields=[],limit=10000,skip=0,sort=[],query={}):
"""
Runs query and returns items in CSV format with fields as row headers
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param user: CRITs user (Required)
:type user: str
:param fields: Fields to return in the CSV
:type fields: list
:param limit: Limit on returned rows
:type limit: int
:param skip: Number of rows to skip
:type skip: int
:param sort: Fields to sort by (Prepend field name with '-' to reverse sort)
:type sort: list
:param query: MongoDB query
:type query: dict
"""
results = data_query(col_obj, user=user, limit=limit,
skip=skip, sort=sort, query=query,
projection=fields)
if results['result'] == "OK":
return results['data'].to_csv(fields)
else:
return results['msg']
def parse_query_request(request,col_obj):
"""
Get query modifiers from a request
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: dict -- Keys are fields, sort, limit, skip
"""
resp = {}
resp['fields'] = request.GET.get('fields',[])
if resp['fields']:
try:
resp['fields'] = resp['fields'].split(',')
except:
return render_to_response("error.html",
{"error": "Invalid fields specified"},
RequestContext(request))
goodfields = []
for field in resp['fields']:
# Skip anything with Mongo's special $
if '$' in field:
continue
# Grab the base field for doing the key checks
try:
indx = field.index('.')
base = field[:indx]
extra = field[indx:]
except:
base = field
extra = ""
# Check for mapping, reverse because we're going the other way
invmap = dict((v,k) for k, v in col_obj._db_field_map.iteritems())
if base in invmap:
base = invmap[base]
# Only allow query keys that exist in the object
if hasattr(col_obj,base):
goodfields.append(base+extra)
resp['fields'] = goodfields
resp['sort'] = request.GET.get('sort',[])
resp['limit'] = int(request.GET.get('limit',10000))
resp['skip'] = int(request.GET.get('skip',0))
return resp
def csv_export(request, col_obj, query={}):
"""
Returns a :class:`django.http.HttpResponse` object which prompts the user
to download a CSV file containing the results from :func:`csv_query`.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param query: MongoDB query
:type query: dict
:returns: :class:`django.http.HttpResponse` -- CSV download response
"""
opts = parse_query_request(request,col_obj)
if not query:
resp = get_query(col_obj, request)
if resp['Result'] == "ERROR":
response = render_to_response("error.html",
{"error": resp['Message'] },
RequestContext(request)
)
return response
query = resp['query']
result = csv_query(col_obj, request.user.username, fields=opts['fields'],
sort=opts['sort'], query=query, limit=opts['limit'],
skip=opts['skip'])
if isinstance(result, basestring):
response = HttpResponse(result, content_type="text/csv")
response['Content-Disposition'] = "attachment;filename=crits-%s-export.csv" % col_obj._meta['crits_type']
else:
response = render_to_response("error.html",
{"error" : result },
RequestContext(request))
return response
def get_query(col_obj,request):
"""
Pull out a query from a request object
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: dict -- The MongoDB query
"""
keymaps = {
"actor_identifier": "identifiers.identifier_id",
"campaign": "campaign.name",
"source": "source.name",
"confidence": "confidence.rating",
"impact": "impact.rating",
"object_value":"objects.value",
"analysis_result":"results.result",
}
term = ""
query = {}
response = {}
params_escaped = {}
for k,v in request.GET.items():
params_escaped[k] = html_escape(v)
urlparams = "?%s" % urlencode(params_escaped)
if "q" in request.GET:
force_full = request.GET.get('force_full', False)
term = request.GET.get('q')
search_type = request.GET.get('search_type',None)
if not search_type:
response['Result'] = "ERROR"
response['Message'] = "No search_type defined"
return response
otype = request.GET.get('otype', None)
if otype:
search_type = search_type + "_" + otype
term = HTMLParser.HTMLParser().unescape(term)
qdict = gen_global_query(col_obj,
request.user.username,
term,
search_type,
force_full=force_full
)
if not qdict.get('success', True):
if qdict.get('ignore', False):
response['Result'] = "IGNORE"
else:
response['Result'] = "ERROR"
response['Message'] = qdict.get('error', 'Unable to process query')
return response
query.update(qdict)
term = request.GET['q']
qparams = request.REQUEST.copy()
qparams = check_query(qparams,request.user.username,col_obj)
for key,value in qparams.items():
if key in keymaps:
key = keymaps[key]
# This one is not a straight rename like the others. If
# searching for x_originating_ip also search for originating_ip,
# and vice versa. This means we have to logically or the query
# where the others do not.
if key in ['x_originating_ip', 'originating_ip']:
query["$or"] = [
{"x_originating_ip": value},
{"originating_ip": value}
]
elif key in ['size', 'length']:
try:
query[key] = int(value)
except ValueError:
results = {}
results['Result'] = "ERROR"
results['Message'] = "'size' requires integer, not %s" % value
return results
else:
query[key] = value
term = term + " " + value
results = {}
results['Result'] = "OK"
results['query'] = query
results['term'] = term
results['urlparams'] = urlparams
return results
def jtable_ajax_list(col_obj,url,urlfieldparam,request,excludes=[],includes=[],query={}):
"""
Handles jTable listing POST requests
:param col_obj: MongoEngine collection object (Required)
:type col_obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param url: Base URL for objects. Ex ``crits.domains.views.domain_detail``
:type url: str
:param urlfieldparam: Field to use for the item detail's URL key. Passed
as arg with ``url`` to :func:`django.core.urlresolvers.reverse`
:type urlfieldparam: str
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param excludes: Fields to exclude
:type excludes: list
:param includes: Fields to include
:type includes: list
:param query: MongoDB query
:type query: dict
"""
response = {"Result": "ERROR"}
users_sources = user_sources(request.user.username)
if request.is_ajax():
pageSize = request.user.get_preference('ui','table_page_size',25)
# Thought these were POSTs...GET works though
skip = int(request.GET.get("jtStartIndex", "0"))
if "jtLimit" in request.GET:
pageSize = int(request.GET['jtLimit'])
else:
pageSize = int(request.GET.get("jtPageSize", pageSize))
# Set the sort order
sort = request.GET.get("jtSorting", urlfieldparam+" ASC")
keys = sort.split(',')
multisort = []
keymaps = {
"actor_identifier": "identifiers.identifier_id",
"campaign": "campaign.name",
"source": "source.name",
"confidence": "confidence.rating",
"impact": "impact.rating",
"object_value": "objects.value",
"analysis_result": "results.result",
}
for key in keys:
(keyname, keyorder) = key.split()
if keyname in keymaps:
keyname = keymaps[keyname]
if keyorder == "DESC":
keyname = "-%s" % keyname
multisort.append(keyname)
# Build the query
term = ""
if not query:
resp = get_query(col_obj, request)
if resp['Result'] in ["ERROR", "IGNORE"]:
return resp
query = resp['query']
term = resp['term']
response = data_query(col_obj, user=request.user.username, limit=pageSize,
skip=skip, sort=multisort, query=query,
projection=includes)
if response['result'] == "ERROR":
return {'Result': "ERROR", 'Message': response['msg']}
response['crits_type'] = col_obj._meta['crits_type']
# Escape term for rendering in the UI.
response['term'] = cgi.escape(term)
response['data'] = response['data'].to_dict(excludes, includes)
# Convert data_query to jtable stuff
response['Records'] = response.pop('data')
response['TotalRecordCount'] = response.pop('count')
response['Result'] = response.pop('result')
for doc in response['Records']:
for key, value in doc.items():
# all dates should look the same
if isinstance(value, datetime.datetime):
doc[key] = datetime.datetime.strftime(value,
"%Y-%m-%d %H:%M:%S")
if key == "password_reset":
doc['password_reset'] = None
if key == "campaign":
camps = []
for campdict in value:
camps.append(campdict['name'])
doc[key] = "|||".join(camps)
elif key == "source":
srcs = []
for srcdict in doc[key]:
if srcdict['name'] in users_sources:
srcs.append(srcdict['name'])
doc[key] = "|||".join(srcs)
elif key == "tags":
tags = []
for tag in doc[key]:
tags.append(tag)
doc[key] = "|||".join(tags)
elif key == "is_active":
if value:
doc[key] = "True"
else:
doc[key] = "False"
elif key == "datatype":
doc[key] = value.keys()[0]
elif key == "results":
doc[key] = len(doc[key])
elif isinstance(value, list):
if value:
for item in value:
if not isinstance(item, basestring):
break
else:
doc[key] = ",".join(value)
else:
doc[key] = ""
doc[key] = html_escape(doc[key])
if col_obj._meta['crits_type'] == "Comment":
mapper = {
"Actor": 'crits.actors.views.actor_detail',
"Campaign": 'crits.campaigns.views.campaign_details',
"Certificate": 'crits.certificates.views.certificate_details',
"Domain": 'crits.domains.views.domain_detail',
"Email": 'crits.emails.views.email_detail',
"Event": 'crits.events.views.view_event',
"Indicator": 'crits.indicators.views.indicator',
"IP": 'crits.ips.views.ip_detail',
"PCAP": 'crits.pcaps.views.pcap_details',
"RawData": 'crits.raw_data.views.raw_data_details',
"Sample": 'crits.samples.views.detail',
}
doc['url'] = reverse(mapper[doc['obj_type']],
args=(doc['url_key'],))
elif col_obj._meta['crits_type'] == "AuditLog":
if doc.get('method', 'delete()') != 'delete()':
doc['url'] = details_from_id(doc['type'],
doc.get('target_id', None))
elif not url:
doc['url'] = None
else:
doc['url'] = reverse(url, args=(unicode(doc[urlfieldparam]),))
return response
def jtable_ajax_delete(obj,request):
"""
Delete a document specified in the jTable POST.
:param obj: MongoEngine collection object (Required)
:type obj: :class:`crits.core.crits_mongoengine.CritsDocument`
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: bool -- True if item was deleted
"""
# Only admins can delete
if not is_admin(request.user.username):
return False
# Make sure we are supplied _id
if not "id" in request.POST:
return False
docid = request.POST['id']
if not docid:
return False
# Finally, make sure there is a related document
doc = obj.objects(id=docid).first()
if not doc:
return False
if "delete_all_relationships" in dir(doc):
doc.delete_all_relationships()
# For samples/pcaps
if "filedata" in dir(doc):
doc.filedata.delete()
doc.delete(username=request.user.username)
return True
def build_jtable(jtopts, request):
"""
Build a dictionary containing proper jTable options.
:param jtopts: Python dictionary containing jTable options.
:type jtopts: dict.
:param request: Current Django request
:type request: :class:`django.http.HttpRequest`
:returns: dict -- Contains the jTable configuration used by the template.
**jtopts supports the following keys**
**Required**
*title*
Contains the jTable title.
*listurl*
URL for the Django view that returns the data in JSON.
*searchurl*
URL to use when filtering data, usually the base URL for the view,
without any options.
*fields*
Python list containing the fields to show for a document. The
first item will be linked to the details view.
**Optional**
*default_sort*
Defines the field and order to sort by.
Ex. "field <ASC|DESC>"
Default: FirstField ASC
*deleteurl*
URL for Django view to delete an item
*no_sort*
Python list containing which fields to disable sorting
*hidden_fields*
Python list containing which fields to hide. This list is a
subset of 'fields'
*linked_fields*
Python list containing which fields should allow filtering.
*paging*
Allow paging on this jTable.
Default: true
*pageSize*
Number of rows per page
Deafult: User Preference (defaults to 25)
*sorting*
Allow sorting by column on this jTable
Default: true
*multiSorting*
Allow sorting by multiple columns on this jTable
Default: true
*details_link*
Define the field that should link to the details
Default: First field
If specified as '__disable__', then no linking will occur
If specified as 'details', an icon is used for the link
"""
# Check for required values
if not all(required in jtopts for required in ['listurl','searchurl','fields','title']):
raise KeyError("Missing required key for jtopts in build_jtable")
return None
# jTable requires a key for the field
# Mongo provides _id as a unique identifier, so we will require that
if "id" not in jtopts['fields']:
jtopts['fields'].append("id")
# If we push the _id field on, we will also hide it by default
if 'hidden_fields' in jtopts:
jtopts['hidden_fields'].append("id")
else:
jtopts['hidden_fields'] = ["id",]
pageSize = request.user.get_preference('ui','table_page_size',25)
# Default jTable options
default_options = {
"paging" : "true",
"pageSize": pageSize,
"sorting": "true",
"multiSorting": "true",
}
# Default widths for certain columns in the jTable
colwidths = {
"details": "'2%'",
'recip': "'2%'",
"comment":"'15%'",
"date":"'8%'",
"isodate":"'8%'",
"id":"'4%'",
"favorite":"'4%'",
"size":"'4%'",
"added":"'8%'",
"created":"'8%'",
"modified":"'8%'",
"subject":"'17%'",
"value":"'18%'",
"type":"'10%'",
"filetype":"'15%'",
"status":"'5%'",
"source":"'7%'",
"campaign":"'7%'",
}
# Mappings for the column titles
titlemaps = {
"Isodate": "Date",
"Created": "Added",
"Ip": "IP",
"Id": "Store ID",
}
jtable = {}
# This allows overriding of default options if they are specified in jtopts
for defopt,defval in default_options.items():
if defopt in jtopts:
jtable[defopt] = jtopts[defopt]
else:
jtable[defopt] = defval
# Custom options
if 'title' in jtopts:
jtable['title'] = jtopts['title']
else:
jtable['title'] = ""
jtable['defaultSorting'] = jtopts['default_sort']
# Define jTable actions
jtable['actions'] = {}
# List action
# If we have get parameters, append them
if request.GET:
jtable['actions']['listAction'] = jtopts['listurl'] + "?"+request.GET.urlencode(safe='@')
else:
jtable['actions']['listAction'] = jtopts['listurl']
# Delete action
# If user is admin and deleteurl is set, provide a delete action in jTable
if ( is_admin(request.user.username) and
'deleteurl' in jtopts and jtopts['deleteurl'] ):
jtable['actions']['deleteAction'] = jtopts['deleteurl']
# We don't have any views available for these actions
#jtable['actions']['createAction'] = reverse()
#jtable['actions']['updateAction'] = reverse()
# Generate the fields
jtable['fields'] = []
for field in jtopts['fields']:
fdict = {}
# Create the column title here
title = field.replace("_"," ").title().strip()
if title in titlemaps:
title = titlemaps[title]
# Some options require quotes, so we use "'%s'" to quote them
fdict['title'] = "'%s'" % title
fdict['fieldname'] = "'%s'" % field
if field in colwidths:
fdict['width'] = colwidths[field]
# Every jTable needs a key. All our items in Mongo have a unique _id
# identifier, so by default we always include that here as the key
if field == "id":
fdict['key'] = "true"
fdict['display'] = """function (data) { return '<div class="icon-container"><span id="'+data.record.id+'" class="id_copy ui-icon ui-icon-copy"></span></div>';}"""
if field == "favorite":
fdict['display'] = """function (data) { return '<div class="icon-container"><span id="'+data.record.id+'" class="favorites_icon_jtable ui-icon ui-icon-star"></span></div>';}"""
if field == "thumb":
fdict['display'] = """function (data) { return '<img src="%s'+data.record.id+'/thumb/" />';}""" % reverse('crits.screenshots.views.render_screenshot')
if field == "description" and jtable['title'] == "Screenshots":
fdict['display'] = """function (data) { return '<span class="edit_underline edit_ss_description" data-id="'+data.record.id+'">'+data.record.description+'</span>';}"""
if 'no_sort' in jtopts and field in jtopts['no_sort']:
fdict['sorting'] = "false"
if 'hidden_fields' in jtopts and field in jtopts['hidden_fields']:
# hide the row but allow the user to show it
fdict['visibility'] = '"hidden"'
# This creates links for certain jTable columns
# It will link anything listed in 'linked_fields'
campbase = reverse('crits.campaigns.views.campaign_details',args=('__CAMPAIGN__',))
# If linked_fields is not specified lets link source and campaign
# if they exist as fields in the jTable
if 'linked_fields' not in jtopts:
jtopts['linked_fields'] = []
if 'source' in jtopts['fields']:
jtopts['linked_fields'].append("source")
if 'campaign' in jtopts['fields']:
jtopts['linked_fields'].append("campaign")
if field in jtopts['linked_fields']:
fdict['display'] = """function (data) {
return link_jtable_column(data, '%s', '%s', '%s');
} """ % (field, jtopts['searchurl'], campbase)
jtable['fields'].append(fdict)
if 'details_link' in jtopts:
if jtopts['details_link'] == "__disabled__":
return jtable
else:
if jtopts['details_link'] not in jtopts['fields']:
return jtable
# Link the field in details_link
linkfield = "'%s'" % jtopts["details_link"]
for i,field in enumerate(jtable['fields']):
if field['fieldname'] != linkfield:
continue
if field['fieldname'] == "'details'":
jtable['fields'][i]['display'] = 'function (data) {if (!data.record.url) { return '';}; return \'<a href="\'+data.record.url+\'" target="_parent"><div class="icon-container"><span class="ui-icon ui-icon-document" title="View Details"></span></div></a>\';}'
else:
jtable['fields'][i]['display'] = "function (data) {return '<a href=\"'+data.record.url+'\">'+data.record."+jtopts['fields'][i]+"+'</a>';}"
else:
# Provide default behavior
if jtable['fields'][0]['fieldname'] == "'details'":
jtable['fields'][0]['display'] = 'function (data) {return \'<a href="\'+data.record.url+\'"><div class="icon-container"><span class="ui-icon ui-icon-document" title="View Details"></span></div></a>\';}'
else:
jtable['fields'][0]['display'] = "function (data) {return '<a href=\"'+data.record.url+'\">'+data.record."+jtopts['fields'][0]+"+'</a>';}"
return jtable
def generate_items_jtable(request, itype, option):
"""
Generate a jtable list for the Item provided.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param itype: The CRITs item we want to list.
:type itype: str
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = class_from_type(itype)
if itype == 'ActorThreatIdentifier':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#actor_identifier_type_add').click();}"
elif itype == 'Campaign':
fields = ['name', 'description', 'active', 'id']
click = "function () {window.parent.$('#new-campaign').click();}"
elif itype == 'IndicatorAction':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#indicator_action_add').click();}"
elif itype == 'RawDataType':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#raw_data_type_add').click();}"
elif itype == 'SourceAccess':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#source_create').click();}"
elif itype == 'UserRole':
fields = ['name', 'active', 'id']
click = "function () {window.parent.$('#user_role').click();}"
if option == 'jtlist':
details_url = None
details_url_key = 'name'
response = jtable_ajax_list(obj_type, details_url, details_url_key,
request, includes=fields)
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
jtopts = {
'title': "%ss" % itype,
'default_sort': 'name ASC',
'listurl': reverse('crits.core.views.items_listing',
args=(itype, 'jtlist',)),
'deleteurl': None,
'searchurl': None,
'fields': fields,
'hidden_fields': ['id'],
'linked_fields': [],
'details_link': '',
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'Add %s'" % itype,
'text': "'Add %s'" % itype,
'click': click,
},
]
for field in jtable['fields']:
if field['fieldname'].startswith("'active"):
field['display'] = """ function (data) {
return '<a id="is_active_' + data.record.id + '" href="#" onclick=\\'javascript:toggleItemActive("%s","'+data.record.id+'");\\'>' + data.record.active + '</a>';
}
""" % itype
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%ss_listing' % itype.lower(),
'button': '%ss_tab' % itype.lower()},
RequestContext(request))
else:
return render_to_response("item_editor.html",
{'jtable': jtable,
'jtid': 'items_listing'},
RequestContext(request))
def generate_users_jtable(request, option):
"""
Generate a jtable list for Users.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = CRITsUser
if option == 'jtlist':
details_url = None
details_url_key = 'username'
fields = ['username', 'first_name', 'last_name', 'email',
'last_login', 'organization', 'role', 'is_active',
'id']
excludes = ['login_attempts']
response = jtable_ajax_list(obj_type, details_url, details_url_key,
request, includes=fields,
excludes=excludes)
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
jtopts = {
'title': "Users",
'default_sort': 'username ASC',
'listurl': reverse('crits.core.views.users_listing', args=('jtlist',)),
'deleteurl': None,
'searchurl': None,
'fields': ['username', 'first_name', 'last_name', 'email',
'last_login', 'organization', 'role', 'is_active',
'id'],
'hidden_fields': ['id'],
'linked_fields': []
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'Add User'",
'text': "'Add User'",
'click': "function () {editUser('');}",
},
]
for field in jtable['fields']:
if field['fieldname'].startswith("'username"):
field['display'] = """ function (data) {
return '<a class="user_edit" href="#" onclick=\\'javascript:editUser("'+data.record.username+'");\\'>' + data.record.username + '</a>';
}
"""
if field['fieldname'].startswith("'is_active"):
field['display'] = """ function (data) {
return '<a id="is_active_' + data.record.username + '" href="#" onclick=\\'javascript:toggleUserActive("'+data.record.username+'");\\'>' + data.record.is_active + '</a>';
}
"""
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': 'users_listing'},
RequestContext(request))
else:
return render_to_response("user_editor.html",
{'jtable': jtable,
'jtid': 'users_listing'},
RequestContext(request))
def generate_dashboard(request):
"""
Generate the Dashboard.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
from crits.dashboards.handlers import get_dashboard
args = get_dashboard(request.user)
return render_to_response('dashboard.html', args, RequestContext(request))
def dns_timeline(query, analyst, sources):
"""
Query for domains, format that data for timeline view, and return them.
:param query: The query to use to find the Domains.
:type query: dict
:param analyst: The user requesting the timeline.
:type analyst: str
:param sources: List of user's sources.
:type sources: list
:returns: list of dictionaries.
"""
domains = Domain.objects(__raw__=query)
offline = ['255.255.255.254', '127.0.0.1', '127.0.0.2', '0.0.0.0']
event_id = 0
events = []
for d in domains:
d.sanitize_sources(username=analyst,
sources=sources)
domain = d.domain
state = "off"
ip_list = [r for r in d.relationships if r.rel_type == 'IP']
ip_list = sorted(ip_list, key=itemgetter('relationship_date'), reverse=False)
description = ""
e = {}
for ipl in ip_list:
ip = IP.objects(ip=ipl.object_id,
source__name__in=sources).first()
if ipl['relationship_date'] is None:
continue
e['id'] = event_id
e['date_display'] = "hour"
e['importance'] = 20
e['icon'] = "halfcircle_blue.png"
event_id += 1
if ip and ip.ip in offline:
if state == "on":
e['enddate'] = datetime.datetime.strftime(ipl['relationship_date'],
settings.PY_DATETIME_FORMAT)
e['description'] = description
state = "off"
events.append(e)
description = ""
e = {}
elif state == "off":
pass
elif ip:
if state == "on":
description += "<br /><b><a style=\"display: inline;\" href=\"%s\">%s</a>:</b> %s" % (reverse('crits.ips.views.ip_detail', args=[ip.ip]), ip.ip, ipl['relationship_date'])
elif state == "off":
e['startdate'] = datetime.datetime.strftime(ipl['relationship_date'],
settings.PY_DATETIME_FORMAT)
e['title'] = domain
description += "<br /><b><a style=\"display: inline;\" href=\"%s\">%s</a>:</b> %s" % (reverse('crits.ips.views.ip_detail', args=[ip.ip]), ip.ip, ipl['relationship_date'])
state = "on"
return events
def email_timeline(query, analyst, sources):
"""
Query for emails, format that data for timeline view, and return them.
:param query: The query to use to find the Emails.
:type query: dict
:param analyst: The user requesting the timeline.
:type analyst: str
:param sources: List of user's sources.
:type sources: list
:returns: list of dictionaries.
"""
emails = Email.objects(__raw__=query)
events = []
event_id = 0
for email in emails:
email.sanitize_sources(username=analyst,
sources=sources)
email = email.to_dict()
if "source" in email and email["source"][0] is not None:
e = {}
e['title'] = ""
e['id'] = event_id
e['date_display'] = "hour"
e['importance'] = 20
e['icon'] = "halfcircle_blue.png"
event_id += 1
if "from" in email:
if email["from"]:
e['title'] += email["from"]
if "campaign" in email:
try:
if "name" in email["campaign"][0]:
e['title'] += " (%s)" % email["campaign"][0]["name"]
except:
pass
if "source" in email:
if "name" in email["source"][0]:
e['title'] += " (%s)" % email["source"][0]["name"]
description = ""
sources = []
if "from" in email:
description += "<br /><b>%s</b>: <a style=\"display: inline;\" href=\"%s\">%s</a>" % \
(email["from"],
reverse('crits.emails.views.email_detail', args=[email['_id']]),
email["from"])
if "isodate" in email:
e['startdate'] = "%s" % email["isodate"]
else:
if "source" in email:
e['startdate'] = "%s" % email["source"][0]['instances'][0]["date"]
if "source" in email:
description += "<br /><hr><b>Source:</b>"
for source in email["source"]:
if "name" in source and "instances" in source:
description += "<br /><b>%s</b>: %s" % (source["name"],
source['instances'][0]["date"])
e['description'] = description
events.append(e)
return events
def indicator_timeline(query, analyst, sources):
"""
Query for indicators, format that data for timeline view, and return them.
:param query: The query to use to find the Indicators.
:type query: dict
:param analyst: The user requesting the timeline.
:type analyst: str
:param sources: List of user's sources.
:type sources: list
:returns: list of dictionaries.
"""
indicators = Indicator.objects(__raw__=query)
events = []
event_id = 0
for indicator in indicators:
indicator.sanitize_sources(username=analyst,
sources=sources)
indicator = indicator.to_dict()
e = {}
e['title'] = indicator['value']
e['id'] = event_id
e['date_display'] = "hour"
e['importance'] = 20
e['icon'] = "halfcircle_blue.png"
event_id += 1
e['startdate'] = indicator['created'].strftime("%Y-%m-%d %H:%M:%S.%Z")
description = ""
description += "<br /><b>Value</b>: <a style=\"display: inline;\" href=\"%s\">%s</a>" % (reverse('crits.indicators.views.indicator', args=[indicator['_id']]), indicator['value'])
description += "<br /><b>Type</b>: %s" % indicator['type']
description += "<br /><b>Created</b>: %s" % indicator['created']
e['description'] = description
events.append(e)
return events
def generate_user_profile(username, request):
"""
Generate the user profile page.
:param username: The user profile to generate.
:type username: str
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
user_source_access = user_sources(username)
user_source_access.sort()
limit = 5
user_info = CRITsUser.objects(username=username).first()
if not user_info:
return {"status": "ERROR", "message": "User not found"}
# recent indicators worked on
query = {'$or': [{'actions.analyst': "%s" % username},
{'activity.analyst': "%s" % username},
{'objects.analyst': "%s" % username}]}
indicator_list = (Indicator.objects(__raw__=query)
.only('value',
'ind_type',
'created',
'campaign',
'source',
'status')
.order_by('-created')
.limit(limit)
.sanitize_sources(username))
# recent emails worked on
query = {'campaign.analyst': "%s" % username}
email_list = (Email.objects(__raw__=query)
.order_by('-date')
.limit(limit)
.sanitize_sources(username))
# samples
sample_md5s = (AuditLog.objects(user=username,
target_type="Sample")
.order_by('-date')
.limit(limit))
md5s = []
for sample in sample_md5s:
md5s.append(sample.value.split(" ")[0])
filter_data = ('md5', 'source', 'filename', 'mimetype',
'size', 'campaign')
sample_list = (Sample.objects(md5__in=md5s)
.only(*filter_data)
.sanitize_sources(username))
subscriptions = user_info.subscriptions
subscription_count = 0
# collect subscription information
if 'Sample' in subscriptions:
subscription_count += len(subscriptions['Sample'])
final_samples = []
ids = [ObjectId(s['_id']) for s in subscriptions['Sample']]
samples = Sample.objects(id__in=ids).only('md5', 'filename')
m = map(itemgetter('_id'), subscriptions['Sample'])
for sample in samples:
s = sample.to_dict()
s['md5'] = sample['md5']
s['id'] = sample.id
s['date'] = subscriptions['Sample'][m.index(sample.id)]['date']
final_samples.append(s)
subscriptions['Sample'] = final_samples
if 'PCAP' in subscriptions:
subscription_count += len(subscriptions['PCAP'])
final_pcaps = []
ids = [ObjectId(p['_id']) for p in subscriptions['PCAP']]
pcaps = PCAP.objects(id__in=ids).only('md5', 'filename')
m = map(itemgetter('_id'), subscriptions['PCAP'])
for pcap in pcaps:
p = pcap.to_dict()
p['id'] = pcap.id
p['date'] = subscriptions['PCAP'][m.index(pcap.id)]['date']
final_pcaps.append(p)
subscriptions['PCAP'] = final_pcaps
if 'Email' in subscriptions:
subscription_count += len(subscriptions['Email'])
final_emails = []
ids = [ObjectId(e['_id']) for e in subscriptions['Email']]
emails = Email.objects(id__in=ids).only('from_address',
'sender',
'subject')
m = map(itemgetter('_id'), subscriptions['Email'])
for email in emails:
e = email.to_dict()
e['id'] = email.id
e['date'] = subscriptions['Email'][m.index(email.id)]['date']
final_emails.append(e)
subscriptions['Email'] = final_emails
if 'Indicator' in subscriptions:
subscription_count += len(subscriptions['Indicator'])
final_indicators = []
ids = [ObjectId(i['_id']) for i in subscriptions['Indicator']]
indicators = Indicator.objects(id__in=ids).only('value', 'ind_type')
m = map(itemgetter('_id'), subscriptions['Indicator'])
for indicator in indicators:
i = indicator.to_dict()
i['id'] = indicator.id
i['date'] = subscriptions['Indicator'][m.index(indicator.id)]['date']
final_indicators.append(i)
subscriptions['Indicator'] = final_indicators
if 'Event' in subscriptions:
subscription_count += len(subscriptions['Event'])
final_events = []
ids = [ObjectId(v['_id']) for v in subscriptions['Event']]
events = Event.objects(id__in=ids).only('title', 'description')
m = map(itemgetter('_id'), subscriptions['Event'])
for event in events:
e = event.to_dict()
e['id'] = event.id
e['date'] = subscriptions['Event'][m.index(event.id)]['date']
final_events.append(e)
subscriptions['Event'] = final_events
if 'Domain' in subscriptions:
subscription_count += len(subscriptions['Domain'])
final_domains = []
ids = [ObjectId(d['_id']) for d in subscriptions['Domain']]
domains = Domain.objects(id__in=ids).only('domain')
m = map(itemgetter('_id'), subscriptions['Domain'])
for domain in domains:
d = domain.to_dict()
d['id'] = domain.id
d['date'] = subscriptions['Domain'][m.index(domain.id)]['date']
final_domains.append(d)
subscriptions['Domain'] = final_domains
if 'IP' in subscriptions:
subscription_count += len(subscriptions['IP'])
final_ips = []
ids = [ObjectId(a['_id']) for a in subscriptions['IP']]
ips = IP.objects(id__in=ids).only('ip')
m = map(itemgetter('_id'), subscriptions['IP'])
for ip in ips:
i = ip.to_dict()
i['id'] = ip.id
i['date'] = subscriptions['IP'][m.index(ip.id)]['date']
final_ips.append(i)
subscriptions['IP'] = final_ips
if 'Campaign' in subscriptions:
subscription_count += len(subscriptions['Campaign'])
final_campaigns = []
ids = [ObjectId(c['_id']) for c in subscriptions['Campaign']]
campaigns = Campaign.objects(id__in=ids).only('name')
m = map(itemgetter('_id'), subscriptions['Campaign'])
for campaign in campaigns:
c = campaign.to_dict()
c['id'] = campaign.id
c['date'] = subscriptions['Campaign'][m.index(campaign.id)]['date']
final_campaigns.append(c)
subscriptions['Campaign'] = final_campaigns
# Collect favorite information
favorites = user_info.favorites.to_dict()
collected_favorites = {}
total_favorites = 0
for type_ in favorites.keys():
ids = [ObjectId(f) for f in favorites[type_]]
if ids:
count = class_from_type(type_).objects(id__in=ids).count()
else:
count = 0
total_favorites += count
url = reverse('crits.core.views.favorites_list', args=(type_, 'inline'))
collected_favorites[type_] = {
'count': count,
'url': url
}
#XXX: this can be removed after jtable
notifications = get_user_notifications(username)
result = {'username': username,
'user_info': user_info,
'user_sources': user_source_access,
'indicators': indicator_list,
'emails': email_list,
'favorites': collected_favorites,
'total_favorites': total_favorites,
'notifications': notifications,
'samples': sample_list,
'subscriptions': subscriptions,
'subscription_count': subscription_count,
'ui_themes': ui_themes(),
'rt_url': settings.RT_URL}
result['preferences'] = generate_user_preference(request)
return result
def generate_favorites_jtable(request, type_, option):
"""
Generate favorites jtable.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param type_: The type of CRITs object.
:type type_: str
:returns: :class:`django.http.HttpResponse`
"""
klass = class_from_type(type_)
mapper = klass._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
user = CRITsUser.objects(username=request.user.username).only('favorites').first()
favorites = user.favorites.to_dict()
ids = [ObjectId(s) for s in favorites[type_]]
query = {'_id': {'$in': ids}}
response = jtable_ajax_list(klass,
details_url,
details_url_key,
request,
includes=fields,
query=query)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': type_ + 's',
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.core.views.favorites_list', args=(type_, 'jtlist')),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts, request)
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def generate_user_preference(request,section=None,key=None,name=None):
"""
Generate user preferences.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param section: The section of the preferences to return.
:type section: str
:param key: The specific preference field within the section
to be retrieved.
:type key: str
:param name: The section of the preferences to return.
:type name: This is used to differentiate between different
preference under the same "section" and "key".
Otherwise the first "section" name that matches will
be returned. For example there may be two
different "notify" sections and also
two different "toggle" keys. But the "key" matching
the "name" value will be returned.
:returns: list
"""
# Returned as an array to maintain the order
# could also have a key/value and a ordered array
from crits.core.forms import PrefUIForm, NavMenuForm, ToastNotificationConfigForm
toast_notifications_title = "Toast Notifications"
config = CRITsConfig.objects().first()
if not config.enable_toasts:
toast_notifications_title += " (currently globally disabled by an admin)"
preferences = [
{'section': 'notify',
'title': 'Notifications',
'toggle': 'email',
'enabled': get_user_email_notification(request.user.username),
'name': 'Email Notifications'
},
{'section': 'toast_notifications',
'title': toast_notifications_title,
'form': ToastNotificationConfigForm(request),
'formclass': ToastNotificationConfigForm,
},
{'section': 'ui',
'title': 'UI Settings',
'form': PrefUIForm(request),
'formclass': PrefUIForm,
'reload': True },
{'section': 'nav',
'form': NavMenuForm(request),
'formclass': NavMenuForm,
'name': 'Navigation Menu',
'title': 'Navigation Menu',
'reload': True },
]
# Only return the requested section as hash
if section:
for pref in preferences:
if key:
if pref['section'] == section and pref[key] == name:
return pref
else:
if pref['section'] == section:
return pref
return preferences
def reset_user_password(username=None, action=None, email=None,
submitted_rcode=None, new_p=None, new_p_c=None,
analyst=None):
"""
Handle the process of resetting a user's password.
:param username: The user resetting their password.
:type username: str
:param action: What action we need to take:
- send_email: sends email to user with reset code
- submit_reset_code: validate the reset code
- submit_passwords: reset the password
:type action: str
:param email: The user's email address.
:type email: str
:param submitted_rcode: The reset code submitted by the user.
:type submitted_rcode: str
:param new_p: The new password provided by the user.
:type new_p: str
:param new_p_c: The new password confirmation provided by the user.
:type new_p_c: str
:param analyst: The user submitting these changes.
:type analyst: str
:returns: :class:`django.http.HttpResponse`
"""
if action not in ('send_email', 'submit_reset_code', 'submit_passwords'):
response = {'success': False, 'message': 'Invalid action'}
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
user = CRITsUser.objects(username=username, email=email).first()
if not user:
# make it seem like this worked even if it didn't to prevent people
# from brute forcing usernames and email addresses.
response = {'success': True, 'message': 'Instructions sent to %s' % email}
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if action == 'send_email':
rcode = user.set_reset_code(analyst)
crits_config = CRITsConfig.objects().first()
if crits_config.crits_email_end_tag:
subject = "CRITs Password Reset" + crits_config.crits_email_subject_tag
else:
subject = crits_config.crits_email_subject_tag + "CRITs Password Reset"
body = """You are receiving this email because someone has requested a
password reset for your account. If it was not you, please log
into CRITs immediately which will remove the reset code from your
account. If it was you, here is your reset code:\n\n
"""
body += "%s\n\n" % rcode
body += """You have five minutes to reset your password before this
reset code expires.\n\nThank you!
"""
user.email_user(subject, body)
response = {'success': True, 'message': 'Instructions sent to %s' % email}
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if action == 'submit_reset_code':
return HttpResponse(json.dumps(user.validate_reset_code(submitted_rcode,
analyst),
default=json_handler),
content_type="application/json")
if action == 'submit_passwords':
return HttpResponse(json.dumps(user.reset_password(submitted_rcode,
new_p, new_p_c,
analyst),
default=json_handler),
content_type="application/json")
def login_user(username, password, next_url=None, user_agent=None,
remote_addr=None, accept_language=None, request=None,
totp_pass=None):
"""
Handle the process of authenticating a user.
:param username: The user authenticating to the system.
:type username: str
:param password: The password provided by the user.
:type password: str
:param next_url: The URL to redirect to after successful login.
:type next_url: str
:param user_agent: The user-agent of the request.
:type user_agent: str
:param remote_addr: The remote-address of the request.
:type remote_addr: str
:param accept_language: The accept-language of the request.
:type accept_language: str
:param request: The request.
:type request: :class:`django.http.HttpRequest`
:param totp_pass: The TOTP password provided by the user.
:type totp_pass: str
:returns: dict with keys:
"success" (boolean),
"type" (str) - Type of failure,
"message" (str)
"""
error = 'Unknown user or bad password.'
response = {}
crits_config = CRITsConfig.objects().first()
if not crits_config:
response['success'] = False
response['type'] = "login_failed"
response['message'] = error
return response
if request:
totp = crits_config.totp_web
else:
totp = crits_config.totp_cli
# Do the username and password authentication
# TOTP is passed here so that authenticate() can check if
# the threshold has been exceeded.
user = authenticate(username=username,
password=password,
user_agent=user_agent,
remote_addr=remote_addr,
accept_language=accept_language,
totp_enabled=totp)
if user:
if totp == 'Required' or (totp == 'Optional' and user.totp):
# Remote user auth'd but has not seen TOTP screen yet
if crits_config.remote_user and not totp_pass:
response['success'] = False
response['type'] = "totp_required"
response['message'] = "TOTP required"
return response
e = EmbeddedLoginAttempt(user_agent=user_agent,
remote_addr=remote_addr,
accept_language=accept_language)
secret = user.secret
if not secret and not totp_pass:
response['success'] = False
response['type'] = "no_secret"
response['message'] = ("You have no TOTP secret. Please enter "
"a new PIN in the TOTP field.")
return response
elif not secret and totp_pass:
response['success'] = False
response['type'] = "secret_generated"
res = save_user_secret(username, totp_pass, "crits", (200,200))
if res['success']:
user.reload()
secret = res['secret']
if not request:
response['secret'] = secret
return response
message = "Setup your authenticator using: '%s'" % secret
message += "<br />Then authenticate again with your PIN + token."
if res['qr_img']:
message += '<br /><img src="data:image/png;base64,'
message += '%s" />' % res['qr_img']
response['message'] = message
else:
response['message'] = "Secret Generation Failed"
return response
elif not valid_totp(username, totp_pass, secret):
e.success = False
user.login_attempts.append(e)
user.invalid_login_attempts += 1
user.save()
response['success'] = False
response['type'] = "login_failed"
response['message'] = error
return response
e.success = True
user.login_attempts.append(e)
user.save()
if user.is_active:
user.invalid_login_attempts = 0
user.password_reset.reset_code = ""
user.save()
if crits_config and request:
request.session.set_expiry(crits_config.session_timeout * 60 * 60)
elif request:
request.session.set_expiry(settings.SESSION_TIMEOUT)
if request:
user_login(request, user)
response['type'] = "login_successful"
# Redirect to next or default dashboard
if next_url is not None and next_url != '' and next_url != 'None':
try:
# test that we can go from URL to view to URL
# to validate the URL is something we know about.
# We use get_script_prefix() here to tell us what
# the script prefix is configured in Apache.
# We strip it out so resolve can work properly, and then
# redirect to the full url.
prefix = get_script_prefix()
tmp_url = next_url
if next_url.startswith(prefix):
tmp_url = tmp_url.replace(prefix, '/', 1)
res = resolve(tmp_url)
url_name = res.url_name
args = res.args
kwargs = res.kwargs
redir = reverse(url_name, args=args, kwargs=kwargs)
del redir
response['success'] = True
response['message'] = next_url
except:
response['success'] = False
response['message'] = 'ALERT - attempted open URL redirect attack to %s. Please report this to your system administrator.' % next_url
return response
response['success'] = True
if 'message' not in response:
response['message'] = reverse('crits.dashboards.views.dashboard')
return response
else:
logger.info("Attempted login to a disabled account detected: %s" %
user.username)
response['success'] = False
response['type'] = "login_failed"
response['message'] = error
return response
def generate_global_search(request):
"""
Generate global search results.
:param request: The request.
:type request: :class:`django.http.HttpRequest`
:returns: dict with keys:
"url_params" (str),
"term" (str) - the search term,
"results" (list),
"Result" (str of "OK" or "ERROR")
"""
# Perform rapid search for ObjectID strings
searchtext = request.GET['q']
if ObjectId.is_valid(searchtext):
for obj_type, url, key in [
['Actor', 'crits.actors.views.actor_detail', 'id'],
['Backdoor', 'crits.backdoors.views.backdoor_detail', 'id'],
['Campaign', 'crits.campaigns.views.campaign_details', 'name'],
['Certificate', 'crits.certificates.views.certificate_details', 'md5'],
['Domain', 'crits.domains.views.domain_detail', 'domain'],
['Email', 'crits.emails.views.email_detail', 'id'],
['Event', 'crits.events.views.view_event', 'id'],
['Exploit', 'crits.exploits.views.exploit_detail', 'id'],
['Indicator', 'crits.indicators.views.indicator', 'id'],
['IP', 'crits.ips.views.ip_detail', 'ip'],
['PCAP', 'crits.pcaps.views.pcap_details', 'md5'],
['RawData', 'crits.raw_data.views.raw_data_details', 'id'],
['Sample', 'crits.samples.views.detail', 'md5'],
['Target', 'crits.targets.views.target_info', 'email_address']]:
obj = class_from_id(obj_type, searchtext)
if obj:
return {'url': url, 'key': obj[key]}
# Importing here to prevent a circular import with Services and runscript.
from crits.services.analysis_result import AnalysisResult
results = []
for col_obj,url in [
[Actor, "crits.actors.views.actors_listing"],
[AnalysisResult, "crits.services.views.analysis_results_listing"],
[Backdoor, "crits.backdoors.views.backdoors_listing"],
[Campaign, "crits.campaigns.views.campaigns_listing"],
[Certificate, "crits.certificates.views.certificates_listing"],
[Comment, "crits.comments.views.comments_listing"],
[Domain, "crits.domains.views.domains_listing"],
[Email, "crits.emails.views.emails_listing"],
[Event, "crits.events.views.events_listing"],
[Exploit, "crits.exploits.views.exploits_listing"],
[Indicator,"crits.indicators.views.indicators_listing"],
[IP, "crits.ips.views.ips_listing"],
[PCAP, "crits.pcaps.views.pcaps_listing"],
[RawData, "crits.raw_data.views.raw_data_listing"],
[Sample, "crits.samples.views.samples_listing"],
[Screenshot, "crits.screenshots.views.screenshots_listing"],
[Target, "crits.targets.views.targets_listing"]]:
ctype = col_obj._meta['crits_type']
resp = get_query(col_obj, request)
if resp['Result'] == "ERROR":
return resp
elif resp['Result'] == "IGNORE":
results.append({'count': 0,
'url': url,
'name': ctype})
else:
formatted_query = resp['query']
term = resp['term']
urlparams = resp['urlparams']
resp = data_query(col_obj, request.user.username, query=formatted_query, count=True)
results.append({'count': resp['count'],
'url': url,
'name': ctype})
return {'url_params': urlparams,
'term': term,
'results': results,
'Result': "OK"}
def download_grid_file(request, dtype, sample_md5):
"""
Download a file from GriDFS. The file will get zipped up.
This should go away and get roped into our other download feature.
:param request: The request.
:type request: :class:`django.http.HttpRequest`
:param dtype: 'pcap', 'object', or 'cert'.
:type dtype: str
:param sample_md5: The MD5 of the file to download.
:type sample_md5: str
:returns: :class:`django.http.HttpResponse`
"""
if dtype == 'object':
grid = mongo_connector("%s.files" % settings.COL_OBJECTS)
obj = grid.find_one({'md5': sample_md5})
if obj is None:
dtype = 'pcap'
else:
data = [(obj['filename'], get_file(sample_md5, "objects"))]
zip_data = create_zip(data, False)
response = HttpResponse(zip_data, mimetype='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=%s' % obj['filename'] + ".zip"
return response
if dtype == 'pcap':
pcaps = mongo_connector(settings.COL_PCAPS)
pcap = pcaps.find_one({"md5": sample_md5})
if not pcap:
return render_to_response('error.html',
{'data': request,
'error': "File not found."},
RequestContext(request))
data = [(pcap['filename'], get_file(sample_md5, "pcaps"))]
zip_data = create_zip(data, False)
response = HttpResponse(zip_data, mimetype='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=%s' % pcap['filename'] + ".zip"
return response
if dtype == 'cert':
certificates = mongo_connector(settings.COL_CERTIFICATES)
cert = certificates.find_one({"md5": sample_md5})
if not cert:
return render_to_response('error.html',
{'data': request,
'error': "File not found."},
RequestContext(request))
data = [(cert['filename'], get_file(sample_md5, "certificates"))]
zip_data = create_zip(data, False)
response = HttpResponse(zip_data, mimetype='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=%s' % cert['filename'] + ".zip"
return response
def generate_counts_jtable(request, option):
"""
Generate the jtable data for counts.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == "jtlist":
count = mongo_connector(settings.COL_COUNTS)
counts = count.find_one({'name': 'counts'})
response = {}
response['Result'] = "OK"
response['Records'] = []
if counts:
for k, v in sorted(counts['counts'].items()):
record = {}
record['type'] = k
record['count'] = v
record['id'] = 0
record['url'] = ""
response['Records'].append(record)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
else:
return render_to_response('error.html',
{'data': request,
'error': "Invalid request"},
RequestContext(request))
def generate_audit_jtable(request, option):
"""
Generate the jtable data for audit log entries.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = AuditLog
type_ = "audit"
if option == "jtlist":
# Sets display url
details_url = 'crits.core.views.details'
details_url_key = "target_id"
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Audit Log Entries",
'default_sort': "date DESC",
'listurl': reverse('crits.core.views.%s_listing' % type_,
args=('jtlist',)),
'deleteurl': '',
'searchurl': reverse('crits.core.views.%s_listing' % type_),
'fields': ["details",
"user",
"type",
"method",
"value",
"date",
"id"],
'hidden_fields': ["id"],
'linked_fields': [],
'details_link': 'details',
'no_sort': ['details', ],
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = []
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button': '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def details_from_id(type_, id_):
"""
Determine the details URL based on type and ID and redirect there.
:param type_: The CRITs type to search for.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:returns: str
"""
type_map = {'Actor': 'crits.actors.views.actor_detail',
'Backdoor': 'crits.backdoors.views.backdoor_detail',
'Campaign': 'crits.campaigns.views.campaign_details',
'Certificate': 'crits.certificates.views.certificate_details',
'Domain': 'crits.domains.views.domain_detail',
'Email': 'crits.emails.views.email_detail',
'Event': 'crits.events.views.view_event',
'Exploit': 'crits.exploits.views.exploit_detail',
'Indicator': 'crits.indicators.views.indicator',
'IP': 'crits.ips.views.ip_detail',
'PCAP': 'crits.pcaps.views.pcap_details',
'RawData': 'crits.raw_data.views.raw_data_details',
'Sample': 'crits.samples.views.detail',
'Screenshot': 'crits.screenshots.views.render_screenshot',
'Target': 'crits.targets.views.target_info',
}
if type_ in type_map and id_:
if type_ == 'Campaign':
arg = class_from_id(type_, id_)
if arg:
arg = arg.name
elif type_ == 'Certificate':
arg = class_from_id(type_, id_)
if arg:
arg = arg.md5
elif type_ == 'Domain':
arg = class_from_id(type_, id_)
if arg:
arg = arg.domain
elif type_ == 'IP':
arg = class_from_id(type_, id_)
if arg:
arg = arg.ip
elif type_ == 'PCAP':
arg = class_from_id(type_, id_)
if arg:
arg = arg.md5
elif type_ == 'Sample':
arg = class_from_id(type_, id_)
if arg:
arg = arg.md5
elif type_ == 'Target':
arg = class_from_id(type_, id_)
if arg:
arg = arg.email_address
else:
arg = id_
if not arg:
return None
return reverse(type_map[type_], args=(arg,))
else:
return None
def audit_entry(self, username, type_, new_doc=False):
"""
Generate an audit entry.
:param self: The object.
:type self: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param username: The user performing the action.
:type username: str
:param type_: The type of action being performed ("save", "delete").
:type type_: str
:param new_doc: If this is a new document being added to the database.
:type new_doc: boolean
"""
if username is None:
# If no username, skip the audit log
return
my_type = self._meta['crits_type']
# don't audit audits
if my_type in ("AuditLog", "Service"):
return
changed_fields = [f.split('.')[0] for f in self._get_changed_fields() if f not in ("modified",
"save",
"delete")]
# Remove any duplicate fields
changed_fields = list(set(changed_fields))
if new_doc and not changed_fields:
what_changed = "new document"
else:
what_changed = ', '.join(changed_fields)
key_descriptor = key_descriptor_from_obj_type(my_type)
if key_descriptor is not None:
value = getattr(self, key_descriptor, '')
else:
value = ""
if type_ == "save":
a = AuditLog()
a.user = username
a.target_type = my_type
a.target_id = self.id
a.value = what_changed
a.method = "save()"
try:
a.save()
except ValidationError:
pass
elif type_ == "delete":
a = AuditLog()
a.user = username
a.target_type = my_type
a.target_id = self.id
a.value = value
a.method = "delete()"
try:
a.save()
except ValidationError:
pass
# Generate audit notification
generate_audit_notification(username, type_, self, changed_fields, what_changed, new_doc)
def ticket_add(type_, id_, ticket):
"""
Add a ticket to a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param ticket: The ticket to add.
:type ticket: dict with keys "analyst", "date", and "ticket_number".
:returns: dict with keys:
"success" (boolean),
"object" (str) if successful,
"message" (str) if failed.
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False, 'message': 'Could not find object.'}
try:
obj.add_ticket(ticket['ticket_number'],
ticket['analyst'],
ticket['date'])
obj.save(username=ticket['analyst'])
return {'success': True, 'object': ticket}
except ValidationError, e:
return {'success': False, 'message': e}
def ticket_update(type_, id_, ticket):
"""
Update a ticket for a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:type ticket: dict with keys "analyst", "date", and "ticket_number".
:type ticket: str
:returns: dict with keys:
"success" (boolean),
"object" (str) if successful,
"message" (str) if failed.
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False, 'message': 'Could not find object.'}
try:
obj.edit_ticket(ticket['analyst'],
ticket['ticket_number'],
ticket['date'])
obj.save(username=ticket['analyst'])
return {'success': True, 'object': ticket}
except ValidationError, e:
return {'success': False, 'message': e}
def ticket_remove(type_, id_, date, analyst):
"""
Remove a ticket from a top-level object.
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:param date: The date of the ticket to remove.
:type date: datetime.datetime.
:param analyst: The user removing the ticket.
:type analyst: str
:returns: dict with keys:
"success" (boolean),
"message" (str) if failed.
"""
obj = class_from_id(type_, id_)
if not obj:
return {'success': False, 'message': 'Could not find object.'}
try:
obj.delete_ticket(date)
obj.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': e}
def unflatten(dictionary):
"""
Unflatten a dictionary.
:param dictionary: The dictionary to unflatten.
:type dictionary: dict
:returns: dict
"""
resultDict = dict()
for key, value in dictionary.iteritems():
parts = key.split(".")
d = resultDict
for part in parts[:-1]:
if part not in d:
d[part] = dict()
d = d[part]
d[parts[-1]] = value
return resultDict
def alter_sector_list(obj, sectors, val):
"""
Given a list of sectors on this object, increment or decrement
the sectors objects accordingly. This is used when adding
or removing a sector list to an item, and when deleting an item.
:param obj: The top-level object instantiated class.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`.
:param sectors: List of sectors.
:type sectors: list
:param val: The amount to change the count by.
:type val: int
"""
# This dictionary is used to set values on insert only.
# I haven't found a way to get mongoengine to use the defaults
# when doing update_one() on the queryset.
soi = { k: 0 for k in Sector._meta['schema_doc'].keys() if k != 'name' and k != obj._meta['crits_type'] }
soi['schema_version'] = Sector._meta['latest_schema_version']
# We are using mongo_connector here because mongoengine does not have
# support for a setOnInsert option. If mongoengine were to gain support
# for this we should switch to using it instead of pymongo here.
sectors_col = mongo_connector(settings.COL_SECTOR_LISTS)
for name in sectors:
sectors_col.update({'name': name},
{'$inc': {obj._meta['crits_type']: val},
'$setOnInsert': soi},
upsert=True)
# Find and remove this sector if, and only if, all counts are zero.
if val == -1:
Sector.objects(name=name,
Actor=0,
Campaign=0,
Certificate=0,
Domain=0,
Email=0,
Event=0,
Indicator=0,
IP=0,
PCAP=0,
RawData=0,
Sample=0,
Target=0).delete()
def generate_sector_csv(request):
"""
Generate CSV output for the Sector list.
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
return csv_export(request, Sector)
def generate_sector_jtable(request, option):
"""
Generate the jtable data for rendering in the sector list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == 'jtlist':
details_url = 'crits.core.views.sector_list'
details_key = 'name'
response = jtable_ajax_list(Sector,
details_url,
details_key,
request,
includes=['name',
'Actor',
'Backdoor',
'Campaign',
'Certificate',
'Domain',
'Email',
'Event',
'Exploit',
'Indicator',
'IP',
'PCAP',
'RawData',
'Sample',
'Target'])
return HttpResponse(json.dumps(response, default=json_handler),
content_type='application/json')
fields = ['name', 'Actor', 'Backdoor', 'Campaign', 'Certificate', 'Domain',
'Email', 'Event', 'Exploit', 'Indicator', 'IP', 'PCAP', 'RawData',
'Sample', 'Target']
jtopts = {'title': 'Sectors',
'fields': fields,
'listurl': 'jtlist',
'searchurl': reverse('crits.core.views.global_search_listing'),
'default_sort': 'name ASC',
'no_sort': [],
'details_link': ''}
jtable = build_jtable(jtopts, request)
for ctype in fields:
if ctype == 'id':
continue
elif ctype == 'name':
url = reverse('crits.core.views.global_search_listing') + '?search_type=sectors&search=Search&force_full=1'
else:
lower = ctype.lower()
if lower != "rawdata":
url = reverse('crits.%ss.views.%ss_listing' % (lower, lower))
else:
lower = "raw_data"
url = reverse('crits.%s.views.%s_listing' % (lower, lower))
for field in jtable['fields']:
if field['fieldname'].startswith("'" + ctype):
if ctype == 'name':
field['display'] = """ function (data) {
return '<a href="%s&q='+encodeURIComponent(data.record.name)+'">' + data.record.name + '</a>';
}
""" % url
else:
field['display'] = """ function (data) {
return '<a href="%s?sectors='+encodeURIComponent(data.record.name)+'">'+data.record.%s+'</a>';
}
""" % (url, ctype)
return render_to_response('sector_lists.html',
{'jtable': jtable,
'jtid': 'sector_lists'},
RequestContext(request))
def modify_sector_list(itype, oid, sectors, analyst):
"""
Modify the sector list for a top-level object.
:param itype: The CRITs type of the top-level object to modify.
:type itype: str
:param oid: The ObjectId to search for.
:type oid: str
:param sectors: The list of sectors.
:type sectors: list
:param analyst: The user making the modifications.
"""
obj = class_from_id(itype, oid)
if not obj:
return
obj.add_sector_list(sectors, analyst, append=False)
try:
obj.save(username=analyst)
except ValidationError:
pass
def get_bucket_autocomplete(term):
"""
Get existing buckets to autocomplete.
:param term: The current term to look for autocomplete options.
:type term: str
:returns: list
"""
results = Bucket.objects(name__istartswith=term)
buckets = [b.name for b in results]
return HttpResponse(json.dumps(buckets, default=json_handler),
content_type='application/json')
| {
"content_hash": "3198ecb244926c449c79ac092596659d",
"timestamp": "",
"source": "github",
"line_count": 3814,
"max_line_length": 325,
"avg_line_length": 38.39407446250655,
"alnum_prop": 0.5378085840133848,
"repo_name": "Lambdanaut/crits",
"id": "b7f3d5809519370acbc4314aaf1383f8f29d4e10",
"size": "146435",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "crits/core/handlers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "8694"
},
{
"name": "CSS",
"bytes": "390510"
},
{
"name": "HTML",
"bytes": "456214"
},
{
"name": "JavaScript",
"bytes": "3476952"
},
{
"name": "Python",
"bytes": "1870511"
},
{
"name": "Shell",
"bytes": "18052"
}
],
"symlink_target": ""
} |
"""So much practical programming involves string manipulation, which
Python readily accomodates. Still, there are dozens of basic and
common capabilities missing from the standard library, several of them
provided by ``strutils``.
"""
from __future__ import print_function
import re
import sys
import uuid
import zlib
import string
import unicodedata
import collections
try:
unicode, str, bytes, basestring = unicode, str, str, basestring
from HTMLParser import HTMLParser
import htmlentitydefs
except NameError: # basestring not defined in Python 3
unicode, str, bytes, basestring = str, bytes, bytes, (str, bytes)
unichr = chr
from html.parser import HTMLParser
from html import entities as htmlentitydefs
__all__ = ['camel2under', 'under2camel', 'slugify', 'split_punct_ws',
'unit_len', 'ordinalize', 'cardinalize', 'pluralize', 'singularize',
'asciify', 'is_ascii', 'is_uuid', 'html2text', 'strip_ansi',
'bytes2human', 'find_hashtags', 'a10n', 'gunzip_bytes',
'iter_splitlines', 'indent', 'escape_shell_args',
'args2cmd', 'args2sh', 'parse_int_list', 'format_int_list']
_punct_ws_str = string.punctuation + string.whitespace
_punct_re = re.compile('[' + _punct_ws_str + ']+')
_camel2under_re = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
def camel2under(camel_string):
"""Converts a camelcased string to underscores. Useful for turning a
class name into a function name.
>>> camel2under('BasicParseTest')
'basic_parse_test'
"""
return _camel2under_re.sub(r'_\1', camel_string).lower()
def under2camel(under_string):
"""Converts an underscored string to camelcased. Useful for turning a
function name into a class name.
>>> under2camel('complex_tokenizer')
'ComplexTokenizer'
"""
return ''.join(w.capitalize() or '_' for w in under_string.split('_'))
def slugify(text, delim='_', lower=True, ascii=False):
"""
A basic function that turns text full of scary characters
(i.e., punctuation and whitespace), into a relatively safe
lowercased string separated only by the delimiter specified
by *delim*, which defaults to ``_``.
The *ascii* convenience flag will :func:`asciify` the slug if
you require ascii-only slugs.
>>> slugify('First post! Hi!!!!~1 ')
'first_post_hi_1'
>>> slugify("Kurt Gödel's pretty cool.", ascii=True) == \
b'kurt_goedel_s_pretty_cool'
True
"""
ret = delim.join(split_punct_ws(text))
if ascii:
ret = asciify(ret)
if lower:
ret = ret.lower()
return ret
def split_punct_ws(text):
"""While :meth:`str.split` will split on whitespace,
:func:`split_punct_ws` will split on punctuation and
whitespace. This used internally by :func:`slugify`, above.
>>> split_punct_ws('First post! Hi!!!!~1 ')
['First', 'post', 'Hi', '1']
"""
return [w for w in _punct_re.split(text) if w]
def unit_len(sized_iterable, unit_noun='item'): # TODO: len_units()/unitize()?
"""Returns a plain-English description of an iterable's
:func:`len()`, conditionally pluralized with :func:`cardinalize`,
detailed below.
>>> print(unit_len(range(10), 'number'))
10 numbers
>>> print(unit_len('aeiou', 'vowel'))
5 vowels
>>> print(unit_len([], 'worry'))
No worries
"""
count = len(sized_iterable)
units = cardinalize(unit_noun, count)
if count:
return u'%s %s' % (count, units)
return u'No %s' % (units,)
_ORDINAL_MAP = {'1': 'st',
'2': 'nd',
'3': 'rd'} # 'th' is the default
def ordinalize(number, ext_only=False):
"""Turns *number* into its cardinal form, i.e., 1st, 2nd,
3rd, 4th, etc. If the last character isn't a digit, it returns the
string value unchanged.
Args:
number (int or str): Number to be cardinalized.
ext_only (bool): Whether to return only the suffix. Default ``False``.
>>> print(ordinalize(1))
1st
>>> print(ordinalize(3694839230))
3694839230th
>>> print(ordinalize('hi'))
hi
>>> print(ordinalize(1515))
1515th
"""
numstr, ext = unicode(number), ''
if numstr and numstr[-1] in string.digits:
try:
# first check for teens
if numstr[-2] == '1':
ext = 'th'
else:
# all other cases
ext = _ORDINAL_MAP.get(numstr[-1], 'th')
except IndexError:
# single digit numbers (will reach here based on [-2] above)
ext = _ORDINAL_MAP.get(numstr[-1], 'th')
if ext_only:
return ext
else:
return numstr + ext
def cardinalize(unit_noun, count):
"""Conditionally pluralizes a singular word *unit_noun* if
*count* is not one, preserving case when possible.
>>> vowels = 'aeiou'
>>> print(len(vowels), cardinalize('vowel', len(vowels)))
5 vowels
>>> print(3, cardinalize('Wish', 3))
3 Wishes
"""
if count == 1:
return unit_noun
return pluralize(unit_noun)
def singularize(word):
"""Semi-intelligently converts an English plural *word* to its
singular form, preserving case pattern.
>>> singularize('records')
'record'
>>> singularize('FEET')
'FOOT'
"""
orig_word, word = word, word.strip().lower()
if not word or word in _IRR_S2P:
return orig_word
irr_singular = _IRR_P2S.get(word)
if irr_singular:
singular = irr_singular
elif not word.endswith('s'):
return orig_word
elif len(word) == 2:
singular = word[:-1] # or just return word?
elif word.endswith('ies') and word[-5:-4] not in 'aeiou':
singular = word[:-3] + 'y'
elif word.endswith('es'):
singular = word[:-2]
else:
singular = word[:-1]
return _match_case(orig_word, singular)
def pluralize(word):
"""Semi-intelligently converts an English *word* from singular form to
plural, preserving case pattern.
>>> pluralize('friend')
'friends'
>>> pluralize('enemy')
'enemies'
>>> pluralize('Sheep')
'Sheep'
"""
orig_word, word = word, word.strip().lower()
if not word or word in _IRR_P2S:
return orig_word
irr_plural = _IRR_S2P.get(word)
if irr_plural:
plural = irr_plural
elif word.endswith('y') and word[-2:-1] not in 'aeiou':
plural = word[:-1] + 'ies'
elif word[-1] == 's' or word.endswith('ch') or word.endswith('sh'):
plural = word if word.endswith('es') else word + 'es'
else:
plural = word + 's'
return _match_case(orig_word, plural)
def _match_case(master, disciple):
if not master.strip():
return disciple
if master.lower() == master:
return disciple.lower()
elif master.upper() == master:
return disciple.upper()
elif master.capitalize() == master:
return disciple.capitalize()
return disciple
# Singular to plural map of irregular pluralizations
_IRR_S2P = {'addendum': 'addenda', 'alga': 'algae', 'alumna': 'alumnae',
'alumnus': 'alumni', 'analysis': 'analyses', 'antenna': 'antennae',
'appendix': 'appendices', 'axis': 'axes', 'bacillus': 'bacilli',
'bacterium': 'bacteria', 'basis': 'bases', 'beau': 'beaux',
'bison': 'bison', 'bureau': 'bureaus', 'cactus': 'cacti',
'calf': 'calves', 'child': 'children', 'corps': 'corps',
'corpus': 'corpora', 'crisis': 'crises', 'criterion': 'criteria',
'curriculum': 'curricula', 'datum': 'data', 'deer': 'deer',
'diagnosis': 'diagnoses', 'die': 'dice', 'dwarf': 'dwarves',
'echo': 'echoes', 'elf': 'elves', 'ellipsis': 'ellipses',
'embargo': 'embargoes', 'emphasis': 'emphases', 'erratum': 'errata',
'fireman': 'firemen', 'fish': 'fish', 'focus': 'foci',
'foot': 'feet', 'formula': 'formulae', 'formula': 'formulas',
'fungus': 'fungi', 'genus': 'genera', 'goose': 'geese',
'half': 'halves', 'hero': 'heroes', 'hippopotamus': 'hippopotami',
'hoof': 'hooves', 'hypothesis': 'hypotheses', 'index': 'indices',
'knife': 'knives', 'leaf': 'leaves', 'life': 'lives',
'loaf': 'loaves', 'louse': 'lice', 'man': 'men',
'matrix': 'matrices', 'means': 'means', 'medium': 'media',
'memorandum': 'memoranda', 'millennium': 'milennia', 'moose': 'moose',
'mosquito': 'mosquitoes', 'mouse': 'mice', 'nebula': 'nebulae',
'neurosis': 'neuroses', 'nucleus': 'nuclei', 'oasis': 'oases',
'octopus': 'octopi', 'offspring': 'offspring', 'ovum': 'ova',
'ox': 'oxen', 'paralysis': 'paralyses', 'parenthesis': 'parentheses',
'person': 'people', 'phenomenon': 'phenomena', 'potato': 'potatoes',
'radius': 'radii', 'scarf': 'scarves', 'scissors': 'scissors',
'self': 'selves', 'series': 'series', 'sheep': 'sheep',
'shelf': 'shelves', 'species': 'species', 'stimulus': 'stimuli',
'stratum': 'strata', 'syllabus': 'syllabi', 'symposium': 'symposia',
'synopsis': 'synopses', 'synthesis': 'syntheses', 'tableau': 'tableaux',
'that': 'those', 'thesis': 'theses', 'thief': 'thieves',
'this': 'these', 'tomato': 'tomatoes', 'tooth': 'teeth',
'torpedo': 'torpedoes', 'vertebra': 'vertebrae', 'veto': 'vetoes',
'vita': 'vitae', 'watch': 'watches', 'wife': 'wives',
'wolf': 'wolves', 'woman': 'women'}
# Reverse index of the above
_IRR_P2S = dict([(v, k) for k, v in _IRR_S2P.items()])
HASHTAG_RE = re.compile(r"(?:^|\s)[##]{1}(\w+)", re.UNICODE)
def find_hashtags(string):
"""Finds and returns all hashtags in a string, with the hashmark
removed. Supports full-width hashmarks for Asian languages and
does not false-positive on URL anchors.
>>> find_hashtags('#atag http://asite/#ananchor')
['atag']
``find_hashtags`` also works with unicode hashtags.
"""
# the following works, doctest just struggles with it
# >>> find_hashtags(u"can't get enough of that dignity chicken #肯德基 woo")
# [u'\u80af\u5fb7\u57fa']
return HASHTAG_RE.findall(string)
def a10n(string):
"""That thing where "internationalization" becomes "i18n", what's it
called? Abbreviation? Oh wait, no: ``a10n``. (It's actually a form
of `numeronym`_.)
>>> a10n('abbreviation')
'a10n'
>>> a10n('internationalization')
'i18n'
>>> a10n('')
''
.. _numeronym: http://en.wikipedia.org/wiki/Numeronym
"""
if len(string) < 3:
return string
return '%s%s%s' % (string[0], len(string[1:-1]), string[-1])
class StringBuffer(object):
"""
This is meant to be a better file-like string buffer.
Faster than StringIO, better encoding handling than cStringIO.
This one is for unicode text strings. Look for ByteBuffer if you
want to handle byte strings.
(NOTE: not quite done yet)
"""
def __init__(self, default_encoding=None, errors='strict'):
self.data = collections.deque()
self.default_encoding = default_encoding or 'utf-8'
self.errors = errors
def write(self, s):
if not isinstance(s, unicode):
enc = self.default_encoding
errs = self.errors
try:
s = s.decode(enc, errs)
except AttributeError:
raise ValueError('write() expected a unicode or byte string')
self.data.append(s)
def truncate(self):
self.data = collections.deque()
self.write = self.data.append
def getvalue(self):
return unicode().join(self.data)
ANSI_ESCAPE_BEGIN = '\x1b['
ANSI_TERMINATORS = ('H', 'f', 'A', 'B', 'C', 'D', 'R', 's', 'u', 'J',
'K', 'h', 'l', 'p', 'm')
def strip_ansi(text):
"""Strips ANSI escape codes from *text*. Useful for the occasional
time when a log or redirected output accidentally captures console
color codes and the like.
>>> strip_ansi('\x1b[0m\x1b[1;36mart\x1b[46;34m\xdc')
'art'
The test above is an excerpt from ANSI art on
`sixteencolors.net`_. This function does not interpret or render
ANSI art, but you can do so with `ansi2img`_ or `escapes.js`_.
.. _sixteencolors.net: http://sixteencolors.net
.. _ansi2img: http://www.bedroomlan.org/projects/ansi2img
.. _escapes.js: https://github.com/atdt/escapes.js
"""
# TODO: move to cliutils.py
nansi, keep, i, text_len = [], True, 0, len(text)
while i < text_len:
if not keep and text[i] in ANSI_TERMINATORS:
keep = True
elif keep:
keep_end_i = text.find(ANSI_ESCAPE_BEGIN, i)
if keep_end_i < 0:
break
else:
nansi.append(text[i:keep_end_i])
i, keep = keep_end_i, False
i += 1
if not nansi:
return text
return type(text)().join(nansi) # attempted unicode + str support
def asciify(text, ignore=False):
"""Converts a unicode or bytestring, *text*, into a bytestring with
just ascii characters. Performs basic deaccenting for all you
Europhiles out there.
Also, a gentle reminder that this is a **utility**, primarily meant
for slugification. Whenever possible, make your application work
**with** unicode, not against it.
Args:
text (str or unicode): The string to be asciified.
ignore (bool): Configures final encoding to ignore remaining
unasciified unicode instead of replacing it.
>>> asciify('Beyoncé') == b'Beyonce'
True
"""
try:
try:
return text.encode('ascii')
except UnicodeDecodeError:
# this usually means you passed in a non-unicode string
text = text.decode('utf-8')
return text.encode('ascii')
except UnicodeEncodeError:
mode = 'replace'
if ignore:
mode = 'ignore'
transd = unicodedata.normalize('NFKD', text.translate(DEACCENT_MAP))
ret = transd.encode('ascii', mode)
return ret
def is_ascii(text):
"""Check if a unicode or bytestring, *text*, is composed of ascii
characters only. Raises :exc:`ValueError` if argument is not text.
Args:
text (str or unicode): The string to be checked.
>>> is_ascii('Beyoncé')
False
>>> is_ascii('Beyonce')
True
"""
if isinstance(text, unicode):
try:
text.encode('ascii')
except UnicodeEncodeError:
return False
elif isinstance(text, bytes):
try:
text.decode('ascii')
except UnicodeDecodeError:
return False
else:
raise ValueError('expected text or bytes, not %r' % type(text))
return True
class DeaccenterDict(dict):
"A small caching dictionary for deaccenting."
def __missing__(self, key):
ch = self.get(key)
if ch is not None:
return ch
try:
de = unicodedata.decomposition(unichr(key))
p1, _, p2 = de.rpartition(' ')
if int(p2, 16) == 0x308:
ch = self.get(key)
else:
ch = int(p1, 16)
except (IndexError, ValueError):
ch = self.get(key, key)
self[key] = ch
return ch
try:
from collections import defaultdict
except ImportError:
# no defaultdict means that __missing__ isn't supported in
# this version of python, so we define __getitem__
def __getitem__(self, key):
try:
return super(DeaccenterDict, self).__getitem__(key)
except KeyError:
return self.__missing__(key)
else:
del defaultdict
# http://chmullig.com/2009/12/python-unicode-ascii-ifier/
# For something more complete, investigate the unidecode
# or isounidecode packages, which are capable of performing
# crude transliteration.
_BASE_DEACCENT_MAP = {
0xc6: u"AE", # Æ LATIN CAPITAL LETTER AE
0xd0: u"D", # Ð LATIN CAPITAL LETTER ETH
0xd8: u"OE", # Ø LATIN CAPITAL LETTER O WITH STROKE
0xde: u"Th", # Þ LATIN CAPITAL LETTER THORN
0xc4: u'Ae', # Ä LATIN CAPITAL LETTER A WITH DIAERESIS
0xd6: u'Oe', # Ö LATIN CAPITAL LETTER O WITH DIAERESIS
0xdc: u'Ue', # Ü LATIN CAPITAL LETTER U WITH DIAERESIS
0xc0: u"A", # À LATIN CAPITAL LETTER A WITH GRAVE
0xc1: u"A", # Á LATIN CAPITAL LETTER A WITH ACUTE
0xc3: u"A", # Ã LATIN CAPITAL LETTER A WITH TILDE
0xc7: u"C", # Ç LATIN CAPITAL LETTER C WITH CEDILLA
0xc8: u"E", # È LATIN CAPITAL LETTER E WITH GRAVE
0xc9: u"E", # É LATIN CAPITAL LETTER E WITH ACUTE
0xca: u"E", # Ê LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0xcc: u"I", # Ì LATIN CAPITAL LETTER I WITH GRAVE
0xcd: u"I", # Í LATIN CAPITAL LETTER I WITH ACUTE
0xd2: u"O", # Ò LATIN CAPITAL LETTER O WITH GRAVE
0xd3: u"O", # Ó LATIN CAPITAL LETTER O WITH ACUTE
0xd5: u"O", # Õ LATIN CAPITAL LETTER O WITH TILDE
0xd9: u"U", # Ù LATIN CAPITAL LETTER U WITH GRAVE
0xda: u"U", # Ú LATIN CAPITAL LETTER U WITH ACUTE
0xdf: u"ss", # ß LATIN SMALL LETTER SHARP S
0xe6: u"ae", # æ LATIN SMALL LETTER AE
0xf0: u"d", # ð LATIN SMALL LETTER ETH
0xf8: u"oe", # ø LATIN SMALL LETTER O WITH STROKE
0xfe: u"th", # þ LATIN SMALL LETTER THORN,
0xe4: u'ae', # ä LATIN SMALL LETTER A WITH DIAERESIS
0xf6: u'oe', # ö LATIN SMALL LETTER O WITH DIAERESIS
0xfc: u'ue', # ü LATIN SMALL LETTER U WITH DIAERESIS
0xe0: u"a", # à LATIN SMALL LETTER A WITH GRAVE
0xe1: u"a", # á LATIN SMALL LETTER A WITH ACUTE
0xe3: u"a", # ã LATIN SMALL LETTER A WITH TILDE
0xe7: u"c", # ç LATIN SMALL LETTER C WITH CEDILLA
0xe8: u"e", # è LATIN SMALL LETTER E WITH GRAVE
0xe9: u"e", # é LATIN SMALL LETTER E WITH ACUTE
0xea: u"e", # ê LATIN SMALL LETTER E WITH CIRCUMFLEX
0xec: u"i", # ì LATIN SMALL LETTER I WITH GRAVE
0xed: u"i", # í LATIN SMALL LETTER I WITH ACUTE
0xf2: u"o", # ò LATIN SMALL LETTER O WITH GRAVE
0xf3: u"o", # ó LATIN SMALL LETTER O WITH ACUTE
0xf5: u"o", # õ LATIN SMALL LETTER O WITH TILDE
0xf9: u"u", # ù LATIN SMALL LETTER U WITH GRAVE
0xfa: u"u", # ú LATIN SMALL LETTER U WITH ACUTE
0x2018: u"'", # ‘ LEFT SINGLE QUOTATION MARK
0x2019: u"'", # ’ RIGHT SINGLE QUOTATION MARK
0x201c: u'"', # “ LEFT DOUBLE QUOTATION MARK
0x201d: u'"', # ” RIGHT DOUBLE QUOTATION MARK
}
DEACCENT_MAP = DeaccenterDict(_BASE_DEACCENT_MAP)
_SIZE_SYMBOLS = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
_SIZE_BOUNDS = [(1024 ** i, sym) for i, sym in enumerate(_SIZE_SYMBOLS)]
_SIZE_RANGES = list(zip(_SIZE_BOUNDS, _SIZE_BOUNDS[1:]))
def bytes2human(nbytes, ndigits=0):
"""Turns an integer value of *nbytes* into a human readable format. Set
*ndigits* to control how many digits after the decimal point
should be shown (default ``0``).
>>> bytes2human(128991)
'126K'
>>> bytes2human(100001221)
'95M'
>>> bytes2human(0, 2)
'0.00B'
"""
abs_bytes = abs(nbytes)
for (size, symbol), (next_size, next_symbol) in _SIZE_RANGES:
if abs_bytes <= next_size:
break
hnbytes = float(nbytes) / size
return '{hnbytes:.{ndigits}f}{symbol}'.format(hnbytes=hnbytes,
ndigits=ndigits,
symbol=symbol)
class HTMLTextExtractor(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.result = []
def handle_data(self, d):
self.result.append(d)
def handle_charref(self, number):
if number[0] == u'x' or number[0] == u'X':
codepoint = int(number[1:], 16)
else:
codepoint = int(number)
self.result.append(unichr(codepoint))
def handle_entityref(self, name):
try:
codepoint = htmlentitydefs.name2codepoint[name]
except KeyError:
self.result.append(u'&' + name + u';')
else:
self.result.append(unichr(codepoint))
def get_text(self):
return u''.join(self.result)
def html2text(html):
"""Strips tags from HTML text, returning markup-free text. Also, does
a best effort replacement of entities like " "
>>> r = html2text(u'<a href="#">Test &<em>(\u0394ημώ)</em></a>')
>>> r == u'Test &(\u0394\u03b7\u03bc\u03ce)'
True
"""
# based on answers to http://stackoverflow.com/questions/753052/
s = HTMLTextExtractor()
s.feed(html)
return s.get_text()
_EMPTY_GZIP_BYTES = b'\x1f\x8b\x08\x089\xf3\xb9U\x00\x03empty\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_NON_EMPTY_GZIP_BYTES = b'\x1f\x8b\x08\x08\xbc\xf7\xb9U\x00\x03not_empty\x00K\xaa,I-N\xcc\xc8\xafT\xe4\x02\x00\xf3nb\xbf\x0b\x00\x00\x00'
def gunzip_bytes(bytestring):
"""The :mod:`gzip` module is great if you have a file or file-like
object, but what if you just have bytes. StringIO is one
possibility, but it's often faster, easier, and simpler to just
use this one-liner. Use this tried-and-true utility function to
decompress gzip from bytes.
>>> gunzip_bytes(_EMPTY_GZIP_BYTES) == b''
True
>>> gunzip_bytes(_NON_EMPTY_GZIP_BYTES).rstrip() == b'bytesahoy!'
True
"""
return zlib.decompress(bytestring, 16 + zlib.MAX_WBITS)
_line_ending_re = re.compile(r'(\r\n|\n|\x0b|\f|\r|\x85|\x2028|\x2029)',
re.UNICODE)
def iter_splitlines(text):
r"""Like :meth:`str.splitlines`, but returns an iterator of lines
instead of a list. Also similar to :meth:`file.next`, as that also
lazily reads and yields lines from a file.
This function works with a variety of line endings, but as always,
be careful when mixing line endings within a file.
>>> list(iter_splitlines('\nhi\nbye\n'))
['', 'hi', 'bye', '']
>>> list(iter_splitlines('\r\nhi\rbye\r\n'))
['', 'hi', 'bye', '']
>>> list(iter_splitlines(''))
[]
"""
prev_end, len_text = 0, len(text)
# print('last: %r' % last_idx)
# start, end = None, None
for match in _line_ending_re.finditer(text):
start, end = match.start(1), match.end(1)
# print(start, end)
if prev_end <= start:
yield text[prev_end:start]
if end == len_text:
yield ''
prev_end = end
tail = text[prev_end:]
if tail:
yield tail
return
def indent(text, margin, newline='\n', key=bool):
"""The missing counterpart to the built-in :func:`textwrap.dedent`.
Args:
text (str): The text to indent.
margin (str): The string to prepend to each line.
newline (str): The newline used to rejoin the lines (default: ``\\n``)
key (callable): Called on each line to determine whether to
indent it. Default: :class:`bool`, to ensure that empty lines do
not get whitespace added.
"""
indented_lines = [(margin + line if key(line) else line)
for line in iter_splitlines(text)]
return newline.join(indented_lines)
def is_uuid(obj, version=4):
"""Check the argument is either a valid UUID object or string.
Args:
obj (object): The test target. Strings and UUID objects supported.
version (int): The target UUID version, set to 0 to skip version check.
>>> is_uuid('e682ccca-5a4c-4ef2-9711-73f9ad1e15ea')
True
>>> is_uuid('0221f0d9-d4b9-11e5-a478-10ddb1c2feb9')
False
>>> is_uuid('0221f0d9-d4b9-11e5-a478-10ddb1c2feb9', version=1)
True
"""
if not isinstance(obj, uuid.UUID):
try:
obj = uuid.UUID(obj)
except (TypeError, ValueError, AttributeError):
return False
if version and obj.version != int(version):
return False
return True
def escape_shell_args(args, sep=' ', style=None):
"""Returns an escaped version of each string in *args*, according to
*style*.
Args:
args (list): A list of arguments to escape and join together
sep (str): The separator used to join the escaped arguments.
style (str): The style of escaping to use. Can be one of
``cmd`` or ``sh``, geared toward Windows and Linux/BSD/etc.,
respectively. If *style* is ``None``, then it is picked
according to the system platform.
See :func:`args2cmd` and :func:`args2sh` for details and example
output for each style.
"""
if not style:
style = 'cmd' if sys.platform == 'win32' else 'sh'
if style == 'sh':
return args2sh(args, sep=sep)
elif style == 'cmd':
return args2cmd(args, sep=sep)
raise ValueError("style expected one of 'cmd' or 'sh', not %r" % style)
_find_sh_unsafe = re.compile(r'[^a-zA-Z0-9_@%+=:,./-]').search
def args2sh(args, sep=' '):
"""Return a shell-escaped string version of *args*, separated by
*sep*, based on the rules of sh, bash, and other shells in the
Linux/BSD/MacOS ecosystem.
>>> print(args2sh(['aa', '[bb]', "cc'cc", 'dd"dd']))
aa '[bb]' 'cc'"'"'cc' 'dd"dd'
As you can see, arguments with no special characters are not
escaped, arguments with special characters are quoted with single
quotes, and single quotes themselves are quoted with double
quotes. Double quotes are handled like any other special
character.
Based on code from the :mod:`pipes`/:mod:`shlex` modules. Also
note that :mod:`shlex` and :mod:`argparse` have functions to split
and parse strings escaped in this manner.
"""
ret_list = []
for arg in args:
if not arg:
ret_list.append("''")
continue
if _find_sh_unsafe(arg) is None:
ret_list.append(arg)
continue
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
ret_list.append("'" + arg.replace("'", "'\"'\"'") + "'")
return ' '.join(ret_list)
def args2cmd(args, sep=' '):
r"""Return a shell-escaped string version of *args*, separated by
*sep*, using the same rules as the Microsoft C runtime.
>>> print(args2cmd(['aa', '[bb]', "cc'cc", 'dd"dd']))
aa [bb] cc'cc dd\"dd
As you can see, escaping is through backslashing and not quoting,
and double quotes are the only special character. See the comment
in the code for more details. Based on internal code from the
:mod:`subprocess` module.
"""
# technique description from subprocess below
"""
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
or search http://msdn.microsoft.com for
"Parsing C++ Command-Line Arguments"
"""
result = []
needquote = False
for arg in args:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
def parse_int_list(range_string, delim=',', range_delim='-'):
"""Returns a sorted list of positive integers based on
*range_string*. Reverse of :func:`format_int_list`.
Args:
range_string (str): String of comma separated positive
integers or ranges (e.g. '1,2,4-6,8'). Typical of a custom
page range string used in printer dialogs.
delim (char): Defaults to ','. Separates integers and
contiguous ranges of integers.
range_delim (char): Defaults to '-'. Indicates a contiguous
range of integers.
>>> parse_int_list('1,3,5-8,10-11,15')
[1, 3, 5, 6, 7, 8, 10, 11, 15]
"""
output = []
for x in range_string.strip().split(delim):
# Range
if range_delim in x:
range_limits = list(map(int, x.split(range_delim)))
output += list(range(min(range_limits), max(range_limits)+1))
# Empty String
elif not x:
continue
# Integer
else:
output.append(int(x))
return sorted(output)
def format_int_list(int_list, delim=',', range_delim='-', delim_space=False):
"""Returns a sorted range string from a list of positive integers
(*int_list*). Contiguous ranges of integers are collapsed to min
and max values. Reverse of :func:`parse_int_list`.
Args:
int_list (list): List of positive integers to be converted
into a range string (e.g. [1,2,4,5,6,8]).
delim (char): Defaults to ','. Separates integers and
contiguous ranges of integers.
range_delim (char): Defaults to '-'. Indicates a contiguous
range of integers.
delim_space (bool): Defaults to ``False``. If ``True``, adds a
space after all *delim* characters.
>>> format_int_list([1,3,5,6,7,8,10,11,15])
'1,3,5-8,10-11,15'
"""
output = []
contig_range = collections.deque()
for x in sorted(int_list):
# Handle current (and first) value.
if len(contig_range) < 1:
contig_range.append(x)
# Handle current value, given multiple previous values are contiguous.
elif len(contig_range) > 1:
delta = x - contig_range[-1]
# Current value is contiguous.
if delta == 1:
contig_range.append(x)
# Current value is non-contiguous.
elif delta > 1:
range_substr = '{0:d}{1}{2:d}'.format(min(contig_range),
range_delim,
max(contig_range))
output.append(range_substr)
contig_range.clear()
contig_range.append(x)
# Current value repeated.
else:
continue
# Handle current value, given no previous contiguous integers
else:
delta = x - contig_range[0]
# Current value is contiguous.
if delta == 1:
contig_range.append(x)
# Current value is non-contiguous.
elif delta > 1:
output.append('{0:d}'.format(contig_range.popleft()))
contig_range.append(x)
# Current value repeated.
else:
continue
# Handle the last value.
else:
# Last value is non-contiguous.
if len(contig_range) == 1:
output.append('{0:d}'.format(contig_range.popleft()))
contig_range.clear()
# Last value is part of contiguous range.
elif len(contig_range) > 1:
range_substr = '{0:d}{1}{2:d}'.format(min(contig_range),
range_delim,
max(contig_range))
output.append(range_substr)
contig_range.clear()
if delim_space:
output_str = (delim+' ').join(output)
else:
output_str = delim.join(output)
return output_str
| {
"content_hash": "902fc822c6a0ccb3f5215aeebc9f8470",
"timestamp": "",
"source": "github",
"line_count": 973,
"max_line_length": 137,
"avg_line_length": 33.789311408016445,
"alnum_prop": 0.5867019496912735,
"repo_name": "markrwilliams/boltons",
"id": "96ae4f9f33477be8239746b7ae81d813f8d61244",
"size": "32963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "boltons/strutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "353491"
}
],
"symlink_target": ""
} |
"""Helper functions for model."""
import lingvo.compat as tf
from lingvo.core import py_utils
def ComputeSplits(batch_size, num_splits):
"""Creates a tensor of size num_splits of number of values per split.
Assigns each split floor(batch_size/num_splits) and round-robins
the remainder (if any) to each split.
Example::
batch_size: [5]
num_splits: 3
returns: [2, 2, 1]
Args:
batch_size: tensor of rank 0, size of tensor to be split
num_splits: number of splits to split tensor into
Returns:
tensor of length num_splits containing sizes of each split
"""
values = tf.tile(
tf.div([batch_size], num_splits),
tf.constant(
[num_splits], dtype=tf.int32))
mods = tf.tile(tf.constant([1]), tf.math.floormod([batch_size], num_splits))
zeros = tf.tile(tf.constant([0]),
tf.subtract(tf.shape(values), tf.shape(mods)))
mods = tf.concat([mods, zeros], 0)
ret = tf.add(values, mods)
# for some reason TF erases shape information if num_splits is 1
if num_splits == 1:
ret.set_shape([1])
return ret
def SplitTensors(xs, num_splits):
"""Splits tensors in `xs` evenly into num_splits along the 1st dimenion.
Args:
xs: A tuple of tensors. Each tensor's 1st dimension is the same size.
num_splits: A python integer.
Returns:
A tuple of lists of tensors, num elements in the tuple = len(xs).
i-th element in each list corresponds to i-th split of each tensor in xs
along the first dimension of each tensor.
"""
# assert first dim of all tensors in xs is equal
batch_dims = [tf.shape(x)[0] for x in xs]
all_batch_dims = tf.stack(batch_dims)
all_batch_dims = py_utils.with_dependencies([
py_utils.assert_equal(
all_batch_dims,
tf.shape(xs[0])[0],
message='first dim of tensors in xs must match'),
py_utils.assert_greater_equal(
tf.shape(xs[0])[0],
num_splits,
message='first dim of tensors in xs must be greater than num_splits')
], all_batch_dims)
splits = ComputeSplits(tf.shape(xs[0])[0], num_splits)
# add the above assertion into the compute graph
splits = py_utils.with_dependencies([all_batch_dims], splits)
split_xs = [tf.split(axis=0, num_or_size_splits=splits, value=x) for x in xs]
return split_xs
def SplitDictOfTensors(t_dict, num_splits):
"""Splits tensors in `t_dict` evenly into `num_splits` along the 1st dimenion.
Args:
t_dict: A dictionary of tensors. Each tensor's 1st dimension is the same
size.
num_splits: A python integer.
Returns:
A list of dictionaries of tensors, num elements in the list = num_splits
i-th dictionary in the list corresponds to i-th split of each tensor
along the first dimension of each tensor for each key in the original dict.
"""
keys = []
values = []
for k, v in sorted(t_dict.items()):
keys.append(k)
values.append(v)
splits = SplitTensors(tuple(values), num_splits)
assert all(len(lst) == len(splits[0]) for lst in splits)
ret_list = []
for s in range(num_splits):
d = {}
for k in range(len(splits)):
d[keys[k]] = splits[k][s]
ret_list.append(d)
return ret_list
| {
"content_hash": "4286947dc2dd22cc3153fe01e2167561",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 80,
"avg_line_length": 30,
"alnum_prop": 0.6595015576323987,
"repo_name": "tensorflow/lingvo",
"id": "25fd346029139f129f8eccb74222276dcc32b57d",
"size": "3899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lingvo/core/input_generator_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5163"
},
{
"name": "C++",
"bytes": "556344"
},
{
"name": "Dockerfile",
"bytes": "8484"
},
{
"name": "Jupyter Notebook",
"bytes": "36721"
},
{
"name": "Python",
"bytes": "9574124"
},
{
"name": "Shell",
"bytes": "50408"
},
{
"name": "Starlark",
"bytes": "182688"
},
{
"name": "TeX",
"bytes": "37275"
}
],
"symlink_target": ""
} |
from re import compile
from math import pi, log, tan, ceil
import json
from shapely.wkb import loads
from shapely.geometry import asShape
from ... import getTile
from ...Core import KnownUnknown
from .ops import transform
float_pat = compile(r'^-?\d+\.\d+(e-?\d+)?$')
charfloat_pat = compile(r'^[\[,\,]-?\d+\.\d+(e-?\d+)?$')
# floating point lat/lon precision for each zoom level, good to ~1/4 pixel.
precisions = [int(ceil(log(1<<zoom + 8+2) / log(10)) - 2) for zoom in range(23)]
def get_tiles(names, config, coord):
''' Retrieve a list of named GeoJSON layer tiles from a TileStache config.
Check integrity and compatibility of each, looking at known layers,
correct JSON mime-types and "FeatureCollection" in the type attributes.
'''
unknown_layers = set(names) - set(config.layers.keys())
if unknown_layers:
raise KnownUnknown("%s.get_tiles didn't recognize %s when trying to load %s." % (__name__, ', '.join(unknown_layers), ', '.join(names)))
layers = [config.layers[name] for name in names]
mimes, bodies = zip(*[getTile(layer, coord, 'json') for layer in layers])
bad_mimes = [(name, mime) for (mime, name) in zip(mimes, names) if not mime.endswith('/json')]
if bad_mimes:
raise KnownUnknown('%s.get_tiles encountered a non-JSON mime-type in %s sub-layer: "%s"' % ((__name__, ) + bad_mimes[0]))
geojsons = [json.loads(body.decode('utf8')) for body in bodies]
bad_types = [(name, topo['type']) for (topo, name) in zip(geojsons, names) if topo['type'] != 'FeatureCollection']
if bad_types:
raise KnownUnknown('%s.get_tiles encountered a non-FeatureCollection type in %s sub-layer: "%s"' % ((__name__, ) + bad_types[0]))
return geojsons
def mercator(xy):
''' Project an (x, y) tuple to spherical mercator.
'''
_x, _y = xy
x, y = pi * _x/180, pi * _y/180
y = log(tan(0.25 * pi + 0.5 * y))
return 6378137 * x, 6378137 * y
def decode(file):
''' Decode a GeoJSON file into a list of (WKB, property dict) features.
Result can be passed directly to mapnik.PythonDatasource.wkb_features().
'''
data = json.load(file)
features = []
for feature in data['features']:
if feature['type'] != 'Feature':
continue
if feature['geometry']['type'] == 'GeometryCollection':
continue
prop = feature['properties']
geom = transform(asShape(feature['geometry']), mercator)
features.append((geom.wkb, prop))
return features
def encode(file, features, zoom, is_clipped):
''' Encode a list of (WKB, property dict) features into a GeoJSON stream.
Also accept three-element tuples as features: (WKB, property dict, id).
Geometries in the features list are assumed to be unprojected lon, lats.
Floating point precision in the output is truncated to six digits.
'''
try:
# Assume three-element features
features = [dict(type='Feature', properties=p, geometry=loads(g).__geo_interface__, id=i) for (g, p, i) in features]
except ValueError:
# Fall back to two-element features
features = [dict(type='Feature', properties=p, geometry=loads(g).__geo_interface__) for (g, p) in features]
if is_clipped:
for feature in features:
feature.update(dict(clipped=True))
geojson = dict(type='FeatureCollection', features=features)
encoder = json.JSONEncoder(separators=(',', ':'))
encoded = encoder.iterencode(geojson)
flt_fmt = '%%.%df' % precisions[zoom]
for token in encoded:
if charfloat_pat.match(token):
# in python 2.7, we see a character followed by a float literal
piece = token[0] + flt_fmt % float(token[1:])
elif float_pat.match(token):
# in python 2.6, we see a simple float literal
piece = flt_fmt % float(token)
else:
piece = token
file.write(piece.encode('utf8'))
def merge(file, names, config, coord):
''' Retrieve a list of GeoJSON tile responses and merge them into one.
get_tiles() retrieves data and performs basic integrity checks.
'''
inputs = get_tiles(names, config, coord)
output = dict(zip(names, inputs))
encoder = json.JSONEncoder(separators=(',', ':'))
encoded = encoder.iterencode(output)
flt_fmt = '%%.%df' % precisions[coord.zoom]
for token in encoded:
if charfloat_pat.match(token):
# in python 2.7, we see a character followed by a float literal
piece = token[0] + flt_fmt % float(token[1:])
elif float_pat.match(token):
# in python 2.6, we see a simple float literal
piece = flt_fmt % float(token)
else:
piece = token
file.write(piece.encode('utf8'))
| {
"content_hash": "b7297ec26a39eb93ce7f1be29daa23e1",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 144,
"avg_line_length": 37.50381679389313,
"alnum_prop": 0.6130673722776308,
"repo_name": "jbants/TileStache",
"id": "6bbde69fdcf87b49d6f08b4a83d4527770cf9543",
"size": "4913",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "TileStache/Goodies/VecTiles/geojson.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "70312"
},
{
"name": "Makefile",
"bytes": "2774"
},
{
"name": "Python",
"bytes": "557029"
},
{
"name": "Shell",
"bytes": "2414"
}
],
"symlink_target": ""
} |
from dogbot.cqsdk.utils import reply
import getopt
from config import config
import shlex
from dogbot.models import *
def alias_command(bot, message):
"""#alias [-h] [-d 原命令] [命令]
-h : 打印本帮助
-d 命令 : 要操作的命令
命令 : 命令的别名
例:
#alias -d #conne 圆爹 (然后就可以使用 圆爹 狗蛋 的语法了)
#alias (输出所有的alias)
#alias 圆爹 (输出"圆爹"对应的原命令)
"""
try:
cmd, *args = shlex.split(message.text)
except ValueError:
return False
if not cmd[0] in config['trigger']:
return False
if not cmd[1:] == 'alias':
return False
try:
options, args = getopt.gnu_getopt(args, 'hd:')
except getopt.GetoptError:
# 格式不对
reply(bot, message, alias_command.__doc__)
return True
origin = None
for o, a in options:
if o == '-d':
origin = a
elif o == '-h':
# 帮助
reply(bot, message, alias_command.__doc__)
return True
# 没有命令, 列出全部别名
if not args:
# 列出所有名字
alias = Alias.objects()
aliases = []
for a in alias:
aliases.append('{} => {}'.format(a.alias, a.origin))
msg = '{}'.format('\n'.join(aliases))
reply(bot, message, msg)
return True
command = args[0]
alias = Alias.objects(alias=command)
# 没有原命令, 输出该命令的别名
if not origin:
if not alias:
reply(bot, message, '没找到别名{}'.format(command))
return True
alias = alias[0]
msg = '{} => {}'.format(alias.alias, alias.origin)
reply(bot, message, msg)
return True
# 都有但是alias存在了, 报错
if alias:
reply(bot, message, '{}已经存在'.format(command))
return True
alias = Alias(origin=origin, alias=command)
alias.save()
reply(bot, message, '{} => {}'.format(alias.alias, alias.origin))
return True
def unalias_command(bot, message):
"""#unalias [-h] 命令
-h : 打印本帮助
命令 : 要删除的别名
"""
try:
cmd, *args = shlex.split(message.text)
except ValueError:
return False
if not cmd[0] in config['trigger']:
return False
if not cmd[1:] == 'unalias':
return False
try:
options, args = getopt.gnu_getopt(args, 'hd:')
except getopt.GetoptError:
# 格式不对
reply(bot, message, unalias_command.__doc__)
return True
origin = None
for o, a in options:
if o == '-h':
# 帮助
reply(bot, message, unalias_command.__doc__)
return True
# 一定要有命令
if not args:
reply(bot, message, unalias_command.__doc__)
return True
command = args[0]
alias = Alias.objects(alias=command)
if not alias:
reply(bot, message, '没找到别名{}'.format(command))
reply(bot, message, '删掉了别名 {} => {}'.format(alias.alias, alias.origin))
alias.delete()
return True
def alias_parser(bot, message):
"""处理别名命令"""
try:
cmd, *args = shlex.split(message.text)
except ValueError:
cmd = message.text
alias = Alias.objects(alias=cmd).first()
if alias:
text = message.text.replace(cmd, alias.origin, 1)
return message._replace(text=text)
else:
return False
def help(bot, message):
"""#help
"""
try:
cmd, *args = shlex.split(message.text)
except ValueError:
return False
if not cmd[0] in config['trigger']:
return False
if not cmd[1:] == 'help':
return False
msg = ['ケルベロス ver3.0.5']
msg_help = []
for listener in bot.listeners:
if listener.handler.__doc__ and listener.handler.__doc__[0] in config['trigger']:
msg_help.append(listener.handler.__doc__.split('\n')[0])
# 给命令排个序
msg.extend(sorted(msg_help))
msg.append('-----')
for alias in Alias.objects:
msg.append('{} => {}'.format(alias.alias, alias.origin))
msg.append('-----')
msg.append(config.get('extra_comments'))
msg.append('前导触发器可以是: {}'.format(config['trigger']))
reply(bot, message, '\n'.join(msg))
return True
| {
"content_hash": "bbf6d79d340483d4626108abf2e15508",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 89,
"avg_line_length": 24.580838323353294,
"alnum_prop": 0.5490864799025579,
"repo_name": "moondropx/dogbot",
"id": "647f0d8913645497dfb6e03d4db93e2b785d158a",
"size": "4455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dogbot/bot/listeners/helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "25461"
},
{
"name": "Python",
"bytes": "82746"
}
],
"symlink_target": ""
} |
import torch
from torch.utils.data import DataLoader, TensorDataset
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import StepLR
import os
import os.path as osp
from tqdm import tqdm
import argparse
import time
import numpy as np
import random
from rdkit import Chem
from rdkit.Chem import AllChem
### importing OGB-LSC
from ogb.lsc import PCQM4MDataset, PCQM4MEvaluator
reg_criterion = torch.nn.L1Loss()
def train(model, device, loader, optimizer):
model.train()
loss_accum = 0
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
x, y = batch
x = x.to(device).to(torch.float32)
y = y.to(device)
pred = model(x).view(-1,)
optimizer.zero_grad()
loss = reg_criterion(pred, y)
loss.backward()
optimizer.step()
loss_accum += loss.detach().cpu().item()
return loss_accum / (step + 1)
def eval(model, device, loader, evaluator):
model.eval()
y_true = []
y_pred = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
x, y = batch
x = x.to(device).to(torch.float32)
y = y.to(device)
with torch.no_grad():
pred = model(x).view(-1,)
y_true.append(y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim = 0)
y_pred = torch.cat(y_pred, dim = 0)
input_dict = {"y_true": y_true, "y_pred": y_pred}
return evaluator.eval(input_dict)["mae"]
def test(model, device, loader):
model.eval()
y_pred = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
x, y = batch
x = x.to(device).to(torch.float32)
with torch.no_grad():
pred = model(x).view(-1,)
y_pred.append(pred.detach().cpu())
y_pred = torch.cat(y_pred, dim = 0)
return y_pred
class MLP(torch.nn.Module):
def __init__(self, num_mlp_layers = 5, emb_dim = 300, drop_ratio = 0):
super(MLP, self).__init__()
self.num_mlp_layers = num_mlp_layers
self.emb_dim = emb_dim
self.drop_ratio = drop_ratio
# mlp
module_list = [
torch.nn.Linear(2048, self.emb_dim),
torch.nn.BatchNorm1d(self.emb_dim),
torch.nn.ReLU(),
torch.nn.Dropout(p = self.drop_ratio),
]
for i in range(self.num_mlp_layers - 1):
module_list += [torch.nn.Linear(self.emb_dim, self.emb_dim),
torch.nn.BatchNorm1d(self.emb_dim),
torch.nn.ReLU(),
torch.nn.Dropout(p = self.drop_ratio)]
# relu is applied in the last layer to ensure positivity
module_list += [torch.nn.Linear(self.emb_dim, 1)]
self.mlp = torch.nn.Sequential(
*module_list
)
def forward(self, x):
output = self.mlp(x)
if self.training:
return output
else:
# At inference time, relu is applied to output to ensure positivity
return torch.clamp(output, min=0, max=50)
def main_mlp():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines on ogbgmol* data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--num_mlp_layers', type=int, default=6,
help='number of mlp layers (default: 6)')
parser.add_argument('--drop_ratio', type=float, default=0.2,
help='dropout ratio (default: 0.2)')
parser.add_argument('--batch_size', type=int, default=256,
help='input batch size for training (default: 256)')
parser.add_argument('--emb_dim', type=int, default=1600,
help='embedding dimensionality (default: 1600)')
parser.add_argument('--train_subset', action='store_true')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train (default: 100)')
parser.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
parser.add_argument('--radius', type=int, default=2,
help='radius (default: 2)')
parser.add_argument('--log_dir', type=str, default="",
help='tensorboard log directory')
parser.add_argument('--checkpoint_dir', type=str, default = '', help='directory to save checkpoint')
parser.add_argument('--save_test_dir', type=str, default = '', help='directory to save test submission file')
args = parser.parse_args()
print(args)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
random.seed(42)
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
dataset = PCQM4MDataset(root='dataset/', only_smiles=True)
fp_processed_file = preprocess_fp(dataset, args.radius)
data_dict = torch.load(fp_processed_file)
X, Y = data_dict['X'], data_dict['Y']
split_idx = dataset.get_idx_split()
### automatic evaluator. takes dataset name as input
evaluator = PCQM4MEvaluator()
if args.train_subset:
print('train subset')
subset_ratio = 0.1
subset_idx = torch.randperm(len(split_idx["train"]))[:int(subset_ratio*len(split_idx["train"]))]
train_dataset = TensorDataset(X[split_idx['train'][subset_idx]], Y[split_idx['train'][subset_idx]])
else:
train_dataset = TensorDataset(X[split_idx['train']], Y[split_idx['train']])
valid_dataset = TensorDataset(X[split_idx['valid']], Y[split_idx['valid']])
test_dataset = TensorDataset(X[split_idx['test-dev']], Y[split_idx['test']])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)
valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
if args.save_test_dir != '':
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
if args.checkpoint_dir != '':
os.makedirs(args.checkpoint_dir, exist_ok = True)
model = MLP(num_mlp_layers=args.num_mlp_layers, emb_dim=args.emb_dim, drop_ratio=args.drop_ratio).to(device)
num_params = sum(p.numel() for p in model.parameters())
print(f'#Params: {num_params}')
optimizer = optim.Adam(model.parameters(), lr=0.001)
if args.log_dir != '':
writer = SummaryWriter(log_dir=args.log_dir)
best_valid_mae = 1000
if args.train_subset:
scheduler = StepLR(optimizer, step_size=300, gamma=0.25)
args.epochs = 1000
else:
scheduler = StepLR(optimizer, step_size=30, gamma=0.25)
for epoch in range(1, args.epochs + 1):
print("=====Epoch {}".format(epoch))
print('Training...')
train_mae = train(model, device, train_loader, optimizer)
print('Evaluating...')
valid_mae = eval(model, device, valid_loader, evaluator)
print({'Train': train_mae, 'Validation': valid_mae})
if args.log_dir != '':
writer.add_scalar('valid/mae', valid_mae, epoch)
writer.add_scalar('train/mae', train_mae, epoch)
if valid_mae < best_valid_mae:
best_valid_mae = valid_mae
if args.checkpoint_dir != '':
print('Saving checkpoint...')
checkpoint = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), 'best_val_mae': best_valid_mae, 'num_params': num_params}
torch.save(checkpoint, osp.join(args.checkpoint_dir, 'checkpoint.pt'))
if args.save_test_dir != '':
print('Predicting on test data...')
y_pred = test(model, device, test_loader)
print('Saving test submission file...')
evaluator.save_test_submission({'y_pred': y_pred}, args.save_test_dir, mode = 'test-dev')
scheduler.step()
print(f'Best validation MAE so far: {best_valid_mae}')
if args.log_dir != '':
writer.close()
def preprocess_fp(dataset, radius):
fp_processed_dir = osp.join(dataset.folder, 'fp_processed')
fp_processed_file = osp.join(fp_processed_dir, f'data_radius{radius}.pt')
print(fp_processed_file)
if not osp.exists(fp_processed_file):
### automatic dataloading and splitting
os.makedirs(fp_processed_dir, exist_ok=True)
x_list = []
y_list = []
for i in tqdm(range(len(dataset))):
smiles, y = dataset[i]
mol = Chem.MolFromSmiles(smiles)
x = torch.tensor(list(AllChem.GetMorganFingerprintAsBitVect(mol, radius)), dtype=torch.int8)
y_list.append(y)
x_list.append(x)
X = torch.stack(x_list)
Y = torch.tensor(y_list)
print(X)
print(Y)
print(X.shape)
print(Y.shape)
data_dict = {'X': X, 'Y': Y}
torch.save(data_dict, fp_processed_file)
return fp_processed_file
if __name__ == "__main__":
main_mlp()
| {
"content_hash": "70b1d948bcd35c18cfd18a09d642d518",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 239,
"avg_line_length": 34.962825278810406,
"alnum_prop": 0.5996810207336523,
"repo_name": "snap-stanford/ogb",
"id": "430ab498cd5d1f12f12f7707a8e8484ef6b0025e",
"size": "9405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/lsc/pcqm4m/main_mlpfp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "271886"
}
],
"symlink_target": ""
} |
from django.core import checks
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils.version import get_docs_version
class DatabaseValidation(BaseDatabaseValidation):
def check(self, **kwargs):
issues = super().check(**kwargs)
issues.extend(self._check_sql_mode(**kwargs))
return issues
def _check_sql_mode(self, **kwargs):
if not (self.connection.sql_mode & {'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES'}):
return [checks.Warning(
"%s Strict Mode is not set for database connection '%s'"
% (self.connection.display_name, self.connection.alias),
hint=(
"%s's Strict Mode fixes many data integrity problems in "
"%s, such as data truncation upon insertion, by "
"escalating warnings into errors. It is strongly "
"recommended you activate it. See: "
"https://docs.djangoproject.com/en/%s/ref/databases/#mysql-sql-mode"
% (
self.connection.display_name,
self.connection.display_name,
get_docs_version(),
),
),
id='mysql.W002',
)]
return []
def check_field_type(self, field, field_type):
"""
MySQL has the following field length restriction:
No character (varchar) fields can have a length exceeding 255
characters if they have a unique index on them.
MySQL doesn't support a database index on some data types.
"""
errors = []
if (field_type.startswith('varchar') and field.unique and
(field.max_length is None or int(field.max_length) > 255)):
errors.append(
checks.Warning(
'%s may not allow unique CharFields to have a max_length '
'> 255.' % self.connection.display_name,
obj=field,
hint=(
'See: https://docs.djangoproject.com/en/%s/ref/'
'databases/#mysql-character-fields' % get_docs_version()
),
id='mysql.W003',
)
)
if field.db_index and field_type.lower() in self.connection._limited_data_types:
errors.append(
checks.Warning(
'%s does not support a database index on %s columns.'
% (self.connection.display_name, field_type),
hint=(
"An index won't be created. Silence this warning if "
"you don't care about it."
),
obj=field,
id='fields.W162',
)
)
return errors
| {
"content_hash": "4ab58dfa3812bf2f5c72dc9e89c190f0",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 89,
"avg_line_length": 42.31884057971015,
"alnum_prop": 0.5099315068493151,
"repo_name": "ar4s/django",
"id": "41e600a856e16a25d01aa0584801275a62236510",
"size": "2920",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "django/db/backends/mysql/validation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import unittest
import os
from tempfile import mkstemp
class OutputTestCase(unittest.TestCase):
def assertXhtml(self, txt, out):
# result from CZT must be unicode string
self.assertEquals(unicode, type(out))
# for backward compatibility, tests assertEqals could be strings
if isinstance(txt, str):
txt.decode('utf-8')
return self.assertEquals(u''.join([u'''<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="cs" lang="cs"><body class="article">''', txt, u'</body></html>']), out)
def assertDocbook4(self, txt, out):
# result from CZT must be unicode string
self.assertEquals(unicode, type(out))
# for backward compatibility, tests assertEqals could be strings
if isinstance(txt, str):
txt.decode('utf-8')
return self.assertEquals(u''.join([u'''<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE article PUBLIC "-//OASIS//DTD DocBook XML V4.4//EN" "http://www.oasis-open.org/docbook/xml/4.4/docbookx.dtd"><article>''', txt, '</article>']), out)
def assertBbcode(self, txt, out):
# result from CZT must be unicode string
self.assertEquals(unicode, type(out))
# for backward compatibility, tests assertEqals could be strings
if isinstance(txt, str):
txt.decode('utf-8')
return self.assertEquals(txt, out)
def assertMediawiki(self, txt, out):
# result from CZT must be unicode string
self.assertEquals(unicode, type(out))
# for backward compatibility, tests assertEqals could be strings
if isinstance(txt, str):
txt.decode('utf-8')
return self.assertEquals(txt, out)
# slightly modified, taken from PyArticle
def getPersistentTmpfile(suffix='.czt', prefix='czechtile_', object=False):
fd, fn = mkstemp(suffix=suffix,prefix=prefix)
f = os.fdopen(fd, 'w')
f.close()
if object == True:
return open(fn, 'w+b')
else:
return fn
| {
"content_hash": "1cbc782ee65c9f095faff46367626ba1",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 322,
"avg_line_length": 44.520833333333336,
"alnum_prop": 0.644361254094525,
"repo_name": "andrejtokarcik/python-czechtile",
"id": "4c0cc8f4245d65511510ae4e11b0191aa2f6b342",
"size": "2981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "czechtile/test/module_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "122297"
}
],
"symlink_target": ""
} |
from symbol_helper import SymbolHelper
import copy
import inspect
class TreeWalker:
name = None
def __init__(self, function_dictionary):
self.my_symbol_helper = SymbolHelper(function_dictionary)
self.symbol_helper = self.my_symbol_helper
self.debug = False
self.debug_scope = False
self.debug_verbose = False
self.source_file = ''
self.source_file_lines = []
self.errors = []
self.return_value = None
self.return_value_list = []
self.current_file = ''
self.current_function_dict = None
self.should_check_unitless_during_multiplication = False
self.found_units_in_this_tree = False
self.was_some_unit_changed = False
self.cps_unit_checker = None # DEPENDENCY INJECTION. SEE MARTIN FOWLER
self.is_unit_propagation_based_on_unknown_variable = False
self.is_unit_propagation_based_on_constants = False
self.is_assignment_statement = False
self.current_AST_start_line_number = None # PROTECTS FROM MULTI-LINE STATEMENTS
self.current_AST_end_line_number = None # PROTECTS FROM MULTI-LINE STATEMENTS
pass
def generic_recurse_and_apply_function(self, token, function_to_apply):
''' GENERIC RECURSION PATTERN - LEFT RIGHT TOKEN
input: token CPPCHECK token to recurse upon
function_to_apply: the function to apply to the token
after recursing.
returns: None Side effect determined by function_to_apply
'''
if not token:
return
# INITIALIZE
left_token = right_token = None
# LEFT
if token.astOperand1:
left_token = token.astOperand1
self.generic_recurse_and_apply_function(left_token,
function_to_apply)
# RIGHT
if token.astOperand2:
right_token = token.astOperand2
self.generic_recurse_and_apply_function(right_token,
function_to_apply)
function_to_apply(token, left_token, right_token)
def find_assignment_tokens_recursive_target(self,
token,
left_token,
right_token):
''' FIND IF THIS AST IS AN ASSIGNMENT STATEMENT
input: token AN AST TOKEN
left_token (ignored)
right_token (ignored)
returns: None (side effect: modifies class is_assignment_statement
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.isAssignmentOp:
self.is_assignment_statement = True
def find_min_max_line_numbers(self, token, left_token, right_token):
''' FIND THE MIN AND MAX LINE NUMBERS FOR THIS AST,
PROTECT FROM MULTI-LINE STATEMENTS
input: token AN AST TOKEN
left_token (ignored)
right_token (ignored)
returns: None (side effect: modifies class min and max
line number range
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if not self.current_AST_start_line_number or \
token.linenr < self.current_AST_start_line_number:
self.current_AST_start_line_number = token.linenr
if not self.current_AST_end_line_number or \
token.linenr > self.current_AST_end_line_number:
self.current_AST_end_line_number = token.linenr
def reset_min_max_line_numbers(self):
''' INITIALIZES THE AST LINE NUMBERS BACK TO NONE.
SHOULD BE CALLED BEFORE EVERY AST IS EVALUATED
input: None
output: None. side effect is setting class variables
for min max to None
'''
self.current_AST_start_line_number = None
self.current_AST_end_line_number = None
def apply_ROS_units(self, token, left_token, right_token):
''' DECORATE LEAF WITH ROS MESSAGE TYPE UNITS
input: token AN AST TOKEN
left_token (ignored)
right_token (ignored)
returns: None (side effect: adds units to ROS variable leafs)
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.variable:
# LOOKUP VARIABLE TYPE IN SYMBOL DICTIONARY FOR ROS MESSAGES
units_as_dict = self.my_symbol_helper.find_units_for_variable(token)
# ADD THIS DICTIONARY
if units_as_dict and (units_as_dict not in token.units):
self.was_some_unit_changed = True
# CHECK SYMBOL HELPER FOR WEAK INFERENCE IN THE CASE OF
# JOINT_STATES AND getXYZ()
if self.my_symbol_helper.is_weak_inference:
self.is_unit_propagation_based_on_unknown_variable = True
token.is_unit_propagation_based_on_unknown_variable = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
token.units.append(units_as_dict)
self.found_units_in_this_tree = True
def apply_units_from_scope(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.variable:
compound_var_name = self.my_symbol_helper.\
find_compound_variable_name_for_variable_token(token)
# PATH TO THE SCOPE WHERE THIS VAR DEFINED
var_scope = token.variable.nameToken.scope
if token.variable.isArgument:
if self.current_function_dict:
var_scope = self.current_function_dict['scopeObject']
if self.debug and self.debug_scope:
s = "%s attempting to apply previous units for %s "
s += "to scope dict %s"
s = s % (token.linenr,
compound_var_name,
var_scope.var_ordered_dict)
pass
if compound_var_name in var_scope.var_ordered_dict:
# GET MOST RECENT ASSIGNMENT IN SCOPE
# SORT BY LINE NUMBER - WE WANT LAST (MOST RECENT)
# ASSIGNMENT todo: flow sensitive?
# IF len(dict)==1, ASSUME SORT IS FAST
s = sorted(var_scope.var_ordered_dict[compound_var_name])
last_line_number = s[-1]
# REJECT UNITS FOUND IN SCOPE IF THEY CAME FROM THIS AST
# todo: potential BUG when previous assignment was from a different file but the same line number
if last_line_number in range(int(self.current_AST_start_line_number), int(self.current_AST_end_line_number)+1):
return
most_recent_units = var_scope.var_ordered_dict[compound_var_name][last_line_number]['units']
for u in most_recent_units:
if u in token.units:
continue
token.units.append(u)
self.was_some_unit_changed = True
self.found_units_in_this_tree = True
# IF PREVIOUS ASSIGNMENT WAS BASED ON CONSTANTS OR UNKNOWN VARIABLES, PAY ATTENTION TO IT HERE
if var_scope.var_ordered_dict[compound_var_name][last_line_number]['is_unit_propagation_based_on_constants']:
self.is_unit_propagation_based_on_constants = True
token.is_unit_propagation_based_on_constants = True
if var_scope.var_ordered_dict[compound_var_name][last_line_number]['is_unit_propagation_based_on_unknown_variable']:
self.is_unit_propagation_based_on_unknown_variable = True
token.is_unit_propagation_based_on_unknown_variable = True
if self.debug_verbose:
s = "tw. units changed in %s" \
% inspect.stack()[0][3]
print s % inspect.stack()[0][3]
s = "len most recent %d"
print s % len(most_recent_units)
s = "len most recent %d"
print s % len(most_recent_units)
s = "FOUND UNITS for %s on line %s from line "
s += "%s. %s"
print s % (compound_var_name,
token.linenr,
last_line_number,
most_recent_units)
print "FOUND UNITS for %s on line %s from line %s. %s" % (compound_var_name, token.linenr, last_line_number, most_recent_units)
if self.debug and self.debug_scope:
print "FOUND UNITS for %s on line %s from line %s. %s" % (compound_var_name, token.linenr, last_line_number, most_recent_units)
else:
pass # DO NOTHING BECAUSE CURRENT == MOST RECENT UNITS
if token.function:
function_return_units = token.function.return_units
for u in function_return_units:
if u not in token.units:
self.was_some_unit_changed = True
self.found_units_in_this_tree = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
token.units.append(u)
self.is_unit_propagation_based_on_constants = \
token.function.\
is_unit_propagation_based_on_constants
token.is_unit_propagation_based_on_constants = \
token.function.\
is_unit_propagation_based_on_constants
self.is_unit_propagation_based_on_unknown_variable = \
token.function.\
is_unit_propagation_based_on_unknown_variable
token.is_unit_propagation_based_on_unknown_variable = \
token.function.\
is_unit_propagation_based_on_unknown_variable
def apply_units_known_symbols(self, token, left_token, right_token):
''' INCLUDES M_PI
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str == 'M_PI':
if token.str in self.my_symbol_helper.ros_unit_dictionary:
a_dict = self.my_symbol_helper.ros_unit_dictionary\
[token.str][token.str]
if a_dict not in token.units:
self.was_some_unit_changed = True
self.found_units_in_this_tree = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
if self.cps_unit_checker:
if self.cps_unit_checker.SHOULD_FIND_ALL_UNITS:
self.cps_unit_checker.\
add_class_and_units_to_all_list\
(token.str, a_dict)
token.units.append(a_dict)
def apply_units_inverse_trig(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str in ['atan2', 'acos', 'asin', 'atan']:
if token.str in self.my_symbol_helper.ros_unit_dictionary:
a_dict = self.my_symbol_helper.ros_unit_dictionary[
token.str][token.str]
if a_dict not in token.units:
self.was_some_unit_changed = True
self.found_units_in_this_tree = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
if self.cps_unit_checker:
if self.cps_unit_checker.SHOULD_FIND_ALL_UNITS:
self.cps_unit_checker.\
add_class_and_units_to_all_list\
(token.str, a_dict)
token.units.append(a_dict)
def apply_units_toSec(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str == 'toSec':
if token.str in self.my_symbol_helper.ros_unit_dictionary:
toSec_units_dict = self.\
my_symbol_helper.ros_unit_dictionary[
token.str][token.str]
if toSec_units_dict not in token.units:
self.was_some_unit_changed = True
self.found_units_in_this_tree = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
if self.cps_unit_checker:
if self.cps_unit_checker.SHOULD_FIND_ALL_UNITS:
self.cps_unit_checker.\
add_class_and_units_to_all_listi\
(token.str, toSec_units_dict)
token.units.append(toSec_units_dict)
def apply_units_getXYZ(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str not in ['getX', 'getY', 'getZ']:
return
# DECLARE VARIABLE HERE FOR SCOPING
units_for_getXYZ = []
# TRY TO FIGURE OUT TYPE THIS WAS CALLED ON
# ASSUME PARENT TOKEN IS '.'
# OPTION 1 - LOOK FOR getOrigin and getRotation
if token.astParent.astOperand1 and \
token.astParent.astOperand1.str == '(':
paren_token = token.astParent.astOperand1 # CONVENIENCE
if paren_token.astOperand1 and paren_token.astOperand1.str == '.':
dot_token = paren_token.astOperand1
if dot_token.astOperand1 and dot_token.astOperand1.variable:
class_name = self.my_symbol_helper.\
find_variable_type(dot_token.astOperand1.variable)
class_name_sanitized = self.my_symbol_helper.\
sanitize_class_name(class_name)
if class_name_sanitized in self.my_symbol_helper.\
ros_unit_dictionary:
if dot_token.astOperand2 and \
dot_token.astOperand2.str == 'getOrigin':
units_for_getXYZ = self.my_symbol_helper.\
ros_unit_dictionary[
class_name_sanitized]['getOrigin']
elif dot_token.astOperand2 and \
dot_token.astOperand2.str == 'getRotation':
units_for_getXYZ = self.\
my_symbol_helper.ros_unit_dictionary[
class_name_sanitized]['getRotation']
else:
s = 'Failed token: %s %s'
print s % (dot_token.astOperand1.linenr,
dot_token.astOperand1.str)
elif token.astParent.astOperand1:
# GET TYPE SO WE CAN INFER CORRECT UNITS BASED
# ON KNOWLEDGE OF ROS UNIT ASSUMPTIONS
if token.astParent.astOperand1.variable:
class_name = self.my_symbol_helper.\
find_variable_type(token.astParent.
astOperand1.variable)
class_name_sanitized = self.my_symbol_helper.\
sanitize_class_name(class_name)
# FIND APPROPRIATE UNITS
if class_name_sanitized in \
self.my_symbol_helper.ros_unit_dictionary:
units_for_getXYZ = self.my_symbol_helper.\
ros_unit_dictionary[
class_name_sanitized][token.str]
else:
pass
else:
# NO AST PARENT OPERAND1 - SHOULD BE IMPOSSIBLE
# assert False
pass
if units_for_getXYZ and (units_for_getXYZ not in token.units):
self.was_some_unit_changed = True
self.found_units_in_this_tree = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
if self.cps_unit_checker:
if self.cps_unit_checker.SHOULD_FIND_ALL_UNITS:
self.cps_unit_checker.\
add_class_and_units_to_all_list(
token.str,
units_for_getXYZ)
token.units.append(units_for_getXYZ)
def apply_units_quatToRPY(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str not in ['quatToRPY']:
return
if token.str not in self.my_symbol_helper.ros_unit_dictionary:
return
rpy_tokens_list = []
if token.astParent.astOperand2.str == ',':
try:
# THIS MAKES A LOT OF ASSUMPTIONS ABOUT THE AST STRUCTURE
roll_token = token.astParent.astOperand2.\
astOperand1.astOperand1.astOperand2
pitch_token = token.astParent.astOperand2.\
astOperand1.astOperand2
yaw_token = token.astParent.astOperand2.astOperand2
# ADD THESE TOKENS TO A LIST
rpy_tokens_list = [roll_token, pitch_token, yaw_token]
except:
if self.debug:
print "quatToRPY 1st Case exception"
elif token.astParent.str == "::":
try:
roll_token = token.astParent.astParent.astOperand2.\
astOperand1.astOperand1.astOperand2
pitch_token = token.astParent.astParent.astOperand2.\
astOperand1.astOperand2
yaw_token = token.astParent.astParent.astOperand2.astOperand2
# ADD THESE TOKENS TO A LIST
rpy_tokens_list = [roll_token, pitch_token, yaw_token]
except:
if self.debug:
print "quatToRPY 2nd Case exception"
if not rpy_tokens_list:
if self.debug:
print "quatToRPY no rpy_tokens_list tokens"
return
# GET UNITS FROM SYMBOL HELPER
quatToRPY_units_dict = self.my_symbol_helper.\
ros_unit_dictionary[token.str][token.str]
for rpy_token in rpy_tokens_list:
rpy_compound_name = self.recurse_and_collect_string(rpy_token)
# ADD UNITS TO VARIABLE IN SCOPE
self.return_value_list = []
self.generic_recurse_and_apply_function(rpy_token,
self.find_first_variable)
if len(self.return_value_list) != 1:
if self.debug:
s = "quatToRPY exception: multiple variables "
s += "returned in find_first_variable"
print s
return # todo: track count of this error
rpy_var = self.return_value_list[0]
rpy_scope = rpy_var.nameToken.scope
linenr = int(rpy_token.linenr)
if rpy_compound_name in rpy_scope.var_ordered_dict:
# ALREADY HAS ASSIGNMENT, ADD TO IT
rpy_scope.var_ordered_dict[rpy_compound_name][linenr]\
= {'units': [quatToRPY_units_dict],
'token': token, # THIS IS A LIST
'is_unit_propagation_based_on_constants': False,
'is_unit_propagation_based_on_unknown_variable':
False}
else:
rpy_scope.var_ordered_dict[rpy_compound_name]\
= {linenr:
{'units': [quatToRPY_units_dict],
'token': token,
'is_unit_propagation_based_on_constants': False,
'is_unit_propagation_based_on_unknown_variable':
False}}
def propagate_units_across_dot_connectors(self,
token,
left_token,
right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
self.propagate_units_across_connectors(token,
left_token,
right_token,
'.')
# DOT CONNECTOR AFTER PAREN WiTH NOTHING BEFORE
if token and token.str == '.' and \
token.astParent and \
token.astParent.str == '(' and not \
token.astParent.astOperand2:
self.propagate_units_across_connectors(token.astParent,
token,
None,
'(')
def propagate_units_across_parenthesis(self,
token,
left_token,
right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str == '(':
if left_token.function:
# IF LEFT SIDE IS A FUNCTION, PROPATE ITS RETURN UNITS
for u in left_token.units:
if u not in token.units:
self.was_some_unit_changed = True
if self.debug_verbose:
s = "tw. units changed in %s"
s = s % inspect.stack()[0][3]
print s
token.units.append(u)
elif left_token.isName:
return
elif not right_token:
self.propagate_units_across_connectors(token,
left_token,
right_token,
'(')
else:
pass
def propagate_units_across_square_brackets(self,
token,
left_token,
right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
self.propagate_units_across_connectors(token,
left_token,
right_token,
'[')
def propagate_units_across_assignment(self,
token,
left_token,
right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
# PREVENT PROPAGATION ACROSS MULTIPLE INITIALIZATIONS, LIKE
# x = y = z = 0
if token.str == "=":
if left_token and left_token.str == "=":
return
if right_token and right_token.str == "=":
return
self.propagate_units_across_connectors(token,
left_token,
right_token,
'=')
def propagate_units_across_return(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
self.propagate_units_across_connectors(token,
left_token,
right_token,
'return')
def propagate_units_into_function_args(self,
token,
left_token,
right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
def propagate_units_sqrt(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str == 'sqrt':
unit_receiver = None
# CASE: NAMESPACE
if token.astParent.str == '::':
if token.astParent.astParent:
unit_receiver = token.astParent.astParent
# CASE: NAMESPACE STD
elif token.astParent.str == '(':
if token.astParent.astOperand2:
unit_receiver = token.astParent
if not unit_receiver:
# todo:
return
left_units = right_units = None
if unit_receiver.astOperand1:
left_units = unit_receiver.astOperand1.units
if unit_receiver.astOperand2:
right_units = unit_receiver.astOperand2.units
# GET UNITS FROM INSIDE PARENS
new_units = self.merge_units_by_set_union(left_units,
right_units)
# DIVIDE UNITS BY TWO
for u in new_units:
for k, v in u.iteritems():
u[k] = v / 2.
# ATTEMPT TO PROPATE UNITS ACROSS '('
for u in new_units:
if u not in unit_receiver.units:
# PROPAGATE CHILD TOKEN STATUS TO TOKEN
self.apply_propagation_status_to_token(token,
left_token,
right_token)
self.was_some_unit_changed = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
unit_receiver.units.append(u)
def propagate_units_inverse_trig(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str in ['atan2', 'acos', 'asin', 'atan']:
unit_receiver = None
# CASE: NAMESPACE
if token.astParent.str == '::':
# ADD UNITS TO '::"
unit_receiver = token.astParent.astParent
# CASE: NAMESPACE STD
elif token.astParent.str == '(':
unit_receiver = token.astParent
if not unit_receiver:
s = 'unexpected ast value when processing inverse trig '
s += 'linenr=%d lines=%d file=%s source_file=%s'
s = s % (int(token.linenr),
len(self.source_file_lines),
token.file,
self.source_file)
print s
return
for u in token.units:
if u not in unit_receiver.units:
unit_receiver.units.append(u)
# PROPAGATE CHILD TOKEN STATUS TO TOKEN
self.apply_propagation_status_to_token(token,
left_token,
right_token)
self.was_some_unit_changed = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
def propagate_units_getXYZ(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str in ['getX', 'getY', 'getZ']:
unit_receiver = None
if token.astParent.str == '(':
unit_receiver = token.astParent
if not unit_receiver:
if self.debug:
s = 'unexpected ast value when processing getXYX linenr=%d'
s += ' lines=%d file=%s source_file=%s'
s = s % (int(token.linenr),
len(self.source_file_lines),
token.file,
self.source_file)
print s
return
for u in token.units:
if u not in unit_receiver.units:
unit_receiver.units.append(u)
self.was_some_unit_changed = True
# THIS IS TRUE BECAUSE getX is a weaker inference
# - usually meters but might be m/s
self.is_unit_propagation_based_on_unknown_variable = True
token.is_unit_propagation_based_on_unknown_variable = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
def propagate_units_ternary(self, token, left_token, right_token):
''' TERNARY OPERATOR x = 1 > 0 ? true_part : false_part
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str == '?':
if not right_token or not right_token.str == ':':
return
self.propagate_units_across_connectors(right_token,
right_token.astOperand1,
right_token.astOperand2,
':')
if right_token.units:
for u in right_token.units:
if u not in token.units:
# PROPAGATE CHILD TOKEN STATUS TO TOKEN
self.apply_propagation_status_to_token(token,
left_token,
right_token)
self.was_some_unit_changed = True
if self.debug_verbose:
s = "tw. units changed in %s"
s = s % inspect.stack()[0][3]
print s
token.units.append(u)
def propagate_units_pow(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str == 'pow':
comma_token = None
unit_receiver = None
if token.astParent.str == '::':
if token.astParent.astParent:
unit_receiver = token.astParent.astParent
comma_token = None
if unit_receiver.astOperand2 and \
unit_receiver.astOperand2.str == ',':
comma_token = unit_receiver.astOperand2
# CASE: NAMESPACE STD
elif token.astParent.str == '(':
if token.astParent.astOperand2:
comma_token = token.astParent.astOperand2
unit_receiver = token.astParent
if not comma_token or not unit_receiver:
s = 'unexpected ast value when processing pow linenr=%d '
s += 'lines=%d file=%s source_file=%s'
s = s % (int(token.linenr),
len(self.source_file_lines),
token.file,
self.source_file)
return
if comma_token.astOperand2 and comma_token.astOperand2.isNumber:
s = comma_token.astOperand2.str.replace('f', '')
power_exponent = float(s)
if comma_token.astOperand1.units:
# APPLY POWER TO UNITS
new_units = []
# FIRST, DEEPCOPY
new_units = copy.deepcopy(comma_token.astOperand1.units)
# NEXT, APPLY EXPONENET
for unit_dict in new_units:
for k, v in unit_dict.iteritems():
unit_dict[k] = power_exponent * v
for u in new_units:
if u not in unit_receiver.units:
# PROPAGATE CHILD TOKEN STATUS TO TOKEN
self.apply_propagation_status_to_token(token,
left_token,
right_token)
self.was_some_unit_changed = True
if self.debug_verbose:
s = "tw. units changed in %s"
s = s % inspect.stack()[0][3]
print s
unit_receiver.units.append(u)
def propagate_units_math_min_max(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str in ['min', 'max']:
unit_receiver = None
comma_token = None
# CASE: NAMESPACE
if token.astParent.str == '::':
if token.astParent.astParent:
unit_receiver = token.astParent.astParent
if unit_receiver.astOperand2 and \
unit_receiver.astOperand2.str == ',':
comma_token = unit_receiver.astOperand2
# CASE: NAMESPACE STD
elif token.astParent.str == '(':
if token.astParent.astOperand2:
if token.astParent.astOperand2.str == ',':
comma_token = token.astParent.astOperand2
unit_receiver = token.astParent
if not unit_receiver or not comma_token:
s = 'unexpected ast value when processing abs/fabs '
s = 'linenr=%d lines=%d file=%s source_file=%s'
s = s % (int(token.linenr),
len(self.source_file_lines),
token.file,
self.source_file)
return
left = right = None
if comma_token.astOperand1:
left = comma_token.astOperand1
if comma_token.astOperand2:
right = comma_token.astOperand2
# SIDE EFFECT OF NEXT LINE SHOULD ADD UNITS TO '('
self.propagate_units_across_connectors(comma_token,
left,
right,
',')
# ATTEMPT TO PROPATE UNITS ACROSS '('
for u in comma_token.units:
if u not in unit_receiver.units:
# PROPAGATE CHILD TOKEN STATUS TO TOKEN
self.apply_propagation_status_to_token(token,
left_token,
right_token)
self.was_some_unit_changed = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
unit_receiver.units.append(u)
def propagate_units_math_abs_fabs_floor_ceil(self,
token,
left_token,
right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str in ['abs', 'fabs', 'floor', 'ceil']:
paren_token = None
unit_receiver = None
# CASE: NAMESPACE
if token.astParent.str == '::':
if token.astParent.astParent:
unit_receiver = token.astParent.astParent
paren_token = None
if unit_receiver.astOperand2 and \
unit_receiver.astOperand2.str == '(':
paren_token = unit_receiver.astOperand2
# CASE: NAMESPACE STD
elif token.astParent.str == '(':
if token.astParent.astOperand2:
paren_token = token.astParent.astOperand2
unit_receiver = token.astParent
if not paren_token or not unit_receiver:
s = 'unexpected ast value when processing abs/fabs '
s += 'linenr=%d lines=%d file=%s source_file=%s'
s = s % (int(token.linenr),
len(self.source_file_lines),
token.file,
self.source_file)
return
left = right = None
if paren_token.astOperand1:
left = paren_token.astOperand1
if paren_token.astOperand2:
right = paren_token.astOperand2
# SIDE EFFECT OF NEXT LINE SHOULD ADD UNITS TO '('
self.propagate_units_across_connectors(paren_token,
left,
right,
'(')
# ATTEMPT TO PROPATE UNITS ACROSS '('
for u in paren_token.units:
if u not in unit_receiver.units:
# PROPAGATE CHILD TOKEN STATUS TO TOKEN
self.apply_propagation_status_to_token(token,
left_token,
right_token)
self.was_some_unit_changed = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
unit_receiver.units.append(u)
def propagate_units_across_connectors(self,
token,
left_token,
right_token,
connector):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
''' DECORATE TREE WITH ROS MESSAGE TYPE UNITS
input: token AN AST TOKEN
left_token left child of token, or None
right_token left child of token, or None
returns: None side effect on token is to modify units
'''
left_units = right_units = [] # INIT
if left_token:
left_units = left_token.units
if right_token:
right_units = right_token.units
# APPLY SET UNION
if token.str == connector:
new_units = self.merge_units_by_set_union(left_units,
right_units)
# UNIFY TOKENS FROM CHILDREN WITH CURRENT TOKENS
if new_units != token.units:
token.units = new_units
self.was_some_unit_changed = True
# PROPAGATE CHILD TOKEN STATUS TO TOKEN
self.apply_propagation_status_to_token(token,
left_token,
right_token)
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
def error_check_multiple_units(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if len(token.units) > 1:
# MULTIPLE HYPOTHESIS
if token.str == '=':
error_string = "%s : %s MULTIPLE UNITS BY ASSIGNMENT: %s"
error_string = error_string % (token.file,
token.linenr,
token.units)
else:
error_string = "%s : %s %s has MULTIPLE UNITS: %s"
error_string = error_string % (token.file,
token.linenr,
token.str,
token.units)
self.handle_error(token, error_string)
def error_check_logical_operators(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
# NOT << and >> since theses are used in PRINT OUTS
if token.isOp and token.str in ['&&', '||', '!']:
error_units = ''
if left_token and left_token.units:
error_units += str(left_token.units)
if right_token and right_token.units:
error_units += str(right_token.units)
if error_units:
error_string = "%s : %s LOGICAL OP %s used : %s"
error_string = error_string % (token.file,
token.linenr,
token.str,
error_units)
self.handle_error(token, error_string)
def error_check_bitwise_operators(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
# NOT << and >> since theses are used in PRINT OUTS
if token.isOp and token.str in ['&', '|', '^', '~', '~=', '&=', '|=']:
if left_token or right_token:
error_units = ''
if left_token and left_token.units:
error_units += str(left_token.units)
if right_token and right_token.units:
error_units += str(right_token.units)
error_string = "%s : %s BITWISE OP %s used : %s"
error_string = error_string % (token.file,
token.linenr,
token.str,
error_units)
self.handle_error(token, error_string)
def error_check_modulo(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
# NOT << and >> since theses are used in PRINT OUTS
if token.isOp and token.str in ['%', '%=']:
if left_token or right_token:
error_units = ''
if left_token and left_token.units:
error_units += str(left_token.units)
if right_token and right_token.units:
error_units += str(right_token.units)
error_string = "%s : %s MODULO OP %s used : %s"
error_string = error_string % (token.file,
token.linenr,
token.str,
error_units)
self.handle_error(token, error_string)
def error_check_unitless_operators(self, token, left_token, right_token):
''' LOOK FOR STRONG UNITLESS TOKENS
input: token cppcheck token object
left_token, right_token
returns: nothing, with possible side effect of adding errors
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.units:
for token_dict in token.units:
for value in token_dict.values():
# MUST HAVE BECOME UNITLESS THROUGH AN OPERATIONS
if value == 0:
error_units = str(token.units)
error_string = "%s : %s TOKEN IS UNITLESS %s"
error_string = error_string % (token.file,
token.linenr,
error_units)
self.handle_error(token, error_string)
def error_check_comparison_operators(self, token, left_token, right_token):
''' COMPARISON OPERATORS - MUST BE THE SAME ON BOTH SIDES
input: token cppcheck token object
left_token, right_token
returns: nothing, with possible side effect of adding errors
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.isComparisonOp:
if left_token.units and right_token.units: # BOTH HAVE UNITS
if left_token.units != right_token.units:
error_units = str(left_token.units)
error_units += str(right_token.units)
error_string = "%s : %s COMPARISON AGAINST "
error_string += "DIFFERENT UNITS %s"
error_string = error_string % (token.file,
token.linenr,
error_units)
self.handle_error(token, error_string)
def handle_error(self, token, error_string):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
line = ''
if self.source_file_lines:
if str(token.file).endswith('cpp'):
if int(token.linenr) > len(self.source_file_lines):
s = "token line number greater than size of source file?"
s += "linenr=%d lines=%d file=%s error=%s, source_file=%s"
s = s % (int(token.linenr),
len(self.source_file_lines),
token.file,
error_string,
self.source_file)
raise ValueError(s)
line = self.source_file_lines[int(token.linenr)-1]
error_dict = {'token': token,
'error_msg': error_string,
'line': line,
'file': self.current_file}
self.errors.append(error_dict)
def merge_units_by_set_union(self, left_units, right_units):
''' input: {left, right}_units - lists of unit dictionaries.
result: set union of inputs
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
new_units = [] # INIT
if left_units and right_units:
# UNITS ARE SAME - PASS THEM UP
if left_units == right_units:
# COPY EITHER ONE BECAUSE SAME
new_units = copy.deepcopy(right_units)
# BOTH RIGHT AND LEFT SIDE OF AST HAS UNITS -- GET THE 'SET'
new_units = copy.deepcopy(left_units)
for r in right_units:
if r not in new_units:
new_units.append(copy.deepcopy(r))
# ONLY HAVE UNITS FROM ONE SIDE OF AST TREE
else:
if left_units:
new_units = copy.deepcopy(left_units)
elif right_units:
new_units = copy.deepcopy(right_units)
return new_units
def propagate_units_across_operators(self, token, left_token, right_token):
''' PROPAGATE UNITS ACROSS OPERATORS
input: AN AST TOKEN
returns: None
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.str not in ['+',
'-',
'+=',
'-=',
'*',
'/',
'*=',
'/=',
'*',
'*=']:
return
left_units = right_units = [] # INIT
# LEFT BRANCH
if left_token:
left_units = left_token.units
# RIGHT BRANCH
if right_token:
right_units = right_token.units
# PROPAGATE EVEN IF WE DON'T KNOW -- ASSUME SCALAR
if not (left_units and right_units):
new_units = self.merge_units_by_set_union(left_units, right_units)
# CHECK FOR DIVISION AND EMPTY LEFT BRANCH
if token.str in ['/', '/='] and not left_units:
# FLIP SIGNS ON NEW UNITS
for u in new_units:
for k, v in u.iteritems():
u[k] = -1 * v
# WEAKEN INFERENCE IF WE'RE MULTIPLYING OR
# DIVIDING ON CONSTANTS OR UNKNOWN VARIABLES
if token.str in ['*', '/', '*=', '/=']:
if new_units in \
self.my_symbol_helper.dimensionless_units_as_lists:
new_units = []
# DETECT OPERATIONS ON CONSTANTS, like x = y * 1024.23
if left_token and not left_units:
if left_token.isNumber: # TRUE FOR EITHER INT OR FLOAT
self.is_unit_propagation_based_on_constants = True
token.is_unit_propagation_based_on_constants = True
if right_token and not right_units:
if right_token.isNumber: # TRUE FOR EITHER INT OR FLOAT
self.is_unit_propagation_based_on_constants = True
token.is_unit_propagation_based_on_constants = True
# DETECT OPERATIONS ON UNKNOWN VARIABLES
if left_units or right_units: # AT LEAST ONE
if left_token and not left_units:
self.is_unit_propagation_based_on_unknown_variable \
= True
token.is_unit_propagation_based_on_unknown_variable \
= True
if right_token and not right_units:
self.is_unit_propagation_based_on_unknown_variable \
= True
token.is_unit_propagation_based_on_unknown_variable \
= True
if new_units != token.units:
token.units = new_units
# PROPAGATE UNCERTAINTY BASED ON CONSTANTS OR
# UNKNOWN VARIABLES
self.apply_propagation_status_to_token(token,
left_token,
right_token)
self.was_some_unit_changed = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
return
new_units = []
if token.str in ['+', '-', '+=', '-=', '*', '/', '*=', '/='] \
and token.isOp:
# ADDITION / SUBTRACTION - ATTEMPT TO MERGE
if token.str in ['+', '-', '+=', '-=']:
# ADDITION - APPLY SET UNION
new_units = self.merge_units_by_set_union(left_units,
right_units)
# MULTIPLICATION / DIVISION
elif token.str in ['*', '*=', '/', '/=']:
all_unit_dicts_from_multiplication = []
for unit_dict_left in left_units:
for unit_dict_right in right_units:
if self.debug:
s = "info: calling "
s += "apply_multiplication_to_unit_dict: "
s += "%s:%s %s %d %d"
print s % (token.file,
str(token.linenr),
token.str,
len(left_units),
len(right_units))
s = "info: calling "
s += "apply_multiplication_to_unit_dict: "
s += "%s:%s %s %d %d"
result_units = self.apply_multiplication_to_unit_dicts(
unit_dict_left,
unit_dict_right,
token.str)
if result_units:
all_unit_dicts_from_multiplication.append(
result_units)
for u in all_unit_dicts_from_multiplication:
if u not in new_units:
# PROPAGATE UNCERTAINTY BASED ON
# CONSTANTS OR UNKNOWN VARIABLES
self.apply_propagation_status_to_token(token,
left_token,
right_token)
new_units.append(u)
# CHECK FOR UNITLESS TOKENS AS THE RESULT OF OPERATIONS
if self.should_check_unitless_during_multiplication:
for unit_dict in new_units:
for k, v in unit_dict.iteritems():
if v == 0:
# UNITLESS
error_string = "%s : %s UNITLESS %s" % (
token.file,
token.linenr,
new_units)
self.handle_error(token, error_string)
# UNIFY TOKENS FROM CHILDREN WITH CURRENT TOKENS
if new_units != token.units:
are_all_units_zero = True # ASSUME TRUE
are_any_units_zero = False
for i, u in enumerate(new_units):
zero_test_list = [v == 0 for v in u.values()]
are_all_units_zero = all(zero_test_list)
are_any_units_zero = any(zero_test_list)
# WHEN WE HAVE SOME ZERO BUT NOT ALL, FILTER THE ZERO UNITS
if are_any_units_zero and not are_all_units_zero:
new_units[i] = {k: v for k, v in u.iteritems() if v != 0}
if new_units != token.units:
# PROPAGATE UNCERTAINTY BASED ON CONSTANTS OR UNKNOWN VARIABLES
self.apply_propagation_status_to_token(token,
left_token,
right_token)
token.units = new_units
self.was_some_unit_changed = True
if self.debug_verbose:
s = "tw. units changed in %s" % inspect.stack()[0][3]
print s
def apply_propagation_status_to_token(self,
token,
left_token,
right_token):
''' APPLIES PROPAGATION WEAKENING FROM CHILD TOKENS TO PARENT TOKEN
input: token, left_token, right_token all tokens
returns: none. side effect can change variables on 'token'
'''
if left_token:
if left_token.is_unit_propagation_based_on_constants:
token.is_unit_propagation_based_on_constants = True
if left_token.is_unit_propagation_based_on_unknown_variable:
token.is_unit_propagation_based_on_unknown_variable = True
if right_token:
if right_token.is_unit_propagation_based_on_constants:
token.is_unit_propagation_based_on_constants = True
if right_token.is_unit_propagation_based_on_unknown_variable:
token.is_unit_propagation_based_on_unknown_variable = True
def apply_multiplication_to_unit_dicts(self,
unit_dict_left,
unit_dict_right,
op):
''' APPLIES MULTIPLICATION AND DIVISION TO UNIT DICTIONARIES
(BY ADDING EXPONENTS)
input: unit_dict_left dictionary of units.
ex: {'m':1, 's':-1} is meters per second
unit_dict_right same
op string representing mult or div operators
returns: new dict with resulting units ex: {'m':2, 's':-2}
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
return_dict = {}
# SPECIAL HANDLING FOR RADIANS AND QUATERNIONS
if unit_dict_left in self.my_symbol_helper.dimensionless_units \
and unit_dict_right \
in self.my_symbol_helper.dimensionless_units:
# SPECIAL CASE BOTH ARE RADIANS. CLOSED UNDER MULTIPLICATION
return copy.deepcopy(unit_dict_left)
if unit_dict_left not in self.my_symbol_helper.dimensionless_units \
and unit_dict_right \
not in self.my_symbol_helper.dimensionless_units:
return_dict = copy.deepcopy(unit_dict_left)
if unit_dict_left in self.my_symbol_helper.dimensionless_units:
# DON'T PROPAGATE RADIANS
unit_dict_left = {}
return_dict = copy.deepcopy(unit_dict_right)
if unit_dict_right in self.my_symbol_helper.dimensionless_units:
# DON'T PROPAGATE RADIANS
unit_dict_right = {}
return_dict = copy.deepcopy(unit_dict_left)
return_dict = copy.deepcopy(unit_dict_left)
for unit in unit_dict_right:
if unit in return_dict:
# MULT WHEN BOTH DICTS HAVE SAME UNIT
if op in ['*', '*=']:
# ADD OF EXPONENT IS MULT
return_dict[unit] += unit_dict_right[unit]
# DIV WHEN BOTH DICTS HAVE SAME UNIT
elif op in ['/', '/=']:
# SUBTRACTION OF EXPONENT IS DIV
return_dict[unit] -= unit_dict_right[unit]
else:
# ONLY ONE SIDE HAS UNIT
if op in ['*', '*=']:
# COPY - THIS IS NOT A REFERNCE
return_dict[unit] = unit_dict_right[unit]
elif op in ['/', '/=']:
return_dict[unit] = -1 * unit_dict_right[unit] # DIVSION
# FILTER OUT ZEROs - UNITLESS
return_dict = {k: v for k, v in return_dict.items() if v != 0}
return return_dict
def collect_function_parameters_units_and_decorate_function_objects(
self, token, left_token, right_token):
''' COLLECT AVAILABLE UNITS ON FUNCTION PARAMETERS FROM
AST AND ADD THEM TO FUNCTION OBJECT
'''
if token.function:
function_args_units = []
if token.astParent.astOperand2:
if token.astParent.astOperand2.str == ',':
# MULITPLE ARGS
# EXPECT A LIST OF LISTS
function_args_units = \
self.recurse_on_function_args(
token.astParent.astOperand2)
else:
# ONLY ONE ARG
if token.astParent.astOperand2.units:
# LIST OF LIST
function_args_units = \
[token.astParent.astOperand2.units]
if function_args_units:
if len(function_args_units) != len(token.function.arg_units):
if self.debug:
print token.file
s = 'line %s '
s += 'len(function_args_units)'
s += '== %d len(token.function.arg_units) = %d'
print s % (token.linenr,
len(function_args_units),
len(token.function.arg_units))
return
for i, u in enumerate(function_args_units):
new_dict = {'linenr': int(token.linenr),
'units': u,
'functionId': token.function.Id,
'token': token}
if new_dict not in token.function.arg_units[i]:
token.function.arg_units[i].append(new_dict)
def recurse_on_function_args(self, comma_token):
''' RECURSIVELY COLLECT UNITS FOR FUNCTION ARGUMENTS
input: ccpcheck token object str=','
output: list of units in arg order
'''
my_return_list = []
if comma_token.astOperand1:
if comma_token.astOperand1.str == ',':
left_branch_units_list = \
self.recurse_on_function_args(comma_token.astOperand1)
if left_branch_units_list and \
isinstance(left_branch_units_list[0], list):
for u in left_branch_units_list:
my_return_list.append(u)
else:
my_return_list.append(left_branch_units_list)
else:
my_return_list.append(comma_token.astOperand1.units)
if comma_token.astOperand2:
my_return_list.append(comma_token.astOperand2.units)
return my_return_list
def debug_walk(self, token):
''' RUDIMENTARY TERMINAL PRINT HOTWASH DEBUG
'''
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if token.isRoot:
print '- '*42 + ' ROOT'
if self.source_file_lines:
print "lines yes",
if self.source_file in token.file:
print 'source in token'
print self.source_file_lines[int(token.linenr)-1],
else:
print 'self.source_file not in token.file'
self.recursive_debug_walk(token, 0)
def recursive_debug_walk(self, token, indent):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
if not token:
return
print '%s %s %s ' % (token.linenr,
"-" * indent,
token.str),
if token.variable:
print self.my_symbol_helper.find_variable_type(token.variable),
print self.my_symbol_helper.find_compound_variable_name_for_variable_token(token),
if token.units:
print token.units,
if token.function:
print token.function.argument,
print 'ret: ' + str(token.function.return_units),
if token.is_unit_propagation_based_on_constants:
print ' C ',
if token.is_unit_propagation_based_on_unknown_variable:
print ' UV ',
print
self.recursive_debug_walk(token.astOperand1, indent + 1) # LEFT
self.recursive_debug_walk(token.astOperand2, indent + 1) # RIGHT
if token.link:
print '%s %s %s' % (token.link.linenr,
"+" * indent,
token.link.str)
def find_first_variable(self, token, left_token, right_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
''' ASSUME THIS WILL BE CALLED ON LHS OF ASSIGNMENT.
SHOULD ONLY BE ONE VARIABLE
'''
if token.variable:
self.return_value_list.append(token.variable)
def recurse_and_collect_string(self, AST_token):
if self.debug_verbose:
self.cps_unit_checker.handle_debug_verbose(inspect.stack()[0][3])
""" SIMPLE RECURSION FOR LEFT-HAND SIDE OF ASSIGNMENT STATMENTS
"""
# PROTECT FROM NULL
if not AST_token:
return ''
my_return_string = ''
# LEFT RECURSE
if AST_token.astOperand1:
my_return_string += \
self.recurse_and_collect_string(AST_token.astOperand1)
# SELF
my_return_string += AST_token.str
# RIGHT RECURSE
if AST_token.astOperand2:
my_return_string += \
self.recurse_and_collect_string(AST_token.astOperand2)
return my_return_string
| {
"content_hash": "ff7c513606f0bb64cd99ced0ec171337",
"timestamp": "",
"source": "github",
"line_count": 1363,
"max_line_length": 151,
"avg_line_length": 48.9002201027146,
"alnum_prop": 0.4757167934464599,
"repo_name": "unl-nimbus-lab/phriky-units",
"id": "0793c7f41baa7ca58d09e1abe61c8533f38805b8",
"size": "66651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phriky_units/tree_walker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "270"
},
{
"name": "C++",
"bytes": "90711"
},
{
"name": "Makefile",
"bytes": "2315"
},
{
"name": "Python",
"bytes": "391049"
},
{
"name": "Shell",
"bytes": "29687"
}
],
"symlink_target": ""
} |
import argparse
from rpi_doorman_gammu import DoormanGammu
def setup_args():
ap = argparse.ArgumentParser(prog='rpi-doorman-gammu',
description='RPi.Doorman Gammu is using adafruit magnetic contact switch (door sensor), will permanently sense for door status and its changes. Notify via cell phone call on sensor state change.'
)
ap.add_argument('-n', '--number', type=str, help='Cellphone number to call on sensor state change.')
ap.add_argument('-c', action='store_true', help='Current door status will be shown.')
ap.add_argument('--notify', type=str, help='Jabber ID to notify via Ludolph webhook with sensor state change (POST).')
ap.add_argument('-p', '--pin', type=int, help='Pin number, for GPIO magnetic contact switch (door sensor).')
ap.add_argument('-l', '--log', type=str, help='Log path, where logs will be stored.')
ap.add_argument('-d', '--debug', action='store_true', help='Debug mode, wheather store debug info in log.')
return ap.parse_args()
def main():
args = setup_args()
d = DoormanGammu(args)
if hasattr(args, 'c') and args.c:
print(d.get_door_state())
else:
d.sense()
if __name__ == "__main__":
# execute only if run as a script
main()
| {
"content_hash": "52e5520efeafd8023f19dec6adf6047a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 207,
"avg_line_length": 40.806451612903224,
"alnum_prop": 0.658498023715415,
"repo_name": "ricco386/rpi-doorman-gammu",
"id": "52b4eb447b8484cbb4f7e56098477c19cb038e9e",
"size": "1455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpi_doorman_gammu/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5192"
}
],
"symlink_target": ""
} |
"""Functions to visualize matrices of data."""
import itertools
import colorsys
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
from .axisgrid import Grid
from .palettes import cubehelix_palette
from .utils import despine, axis_ticklabels_overlap
from .external.six.moves import range
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(str, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(str, i)) for i in index.values]
else:
return index.values
def _convert_colors(colors):
"""Convert either a list of colors or nested lists of colors to RGB."""
to_rgb = mpl.colors.colorConverter.to_rgb
try:
to_rgb(colors[0])
# If this works, there is only one level of colors
return list(map(to_rgb, colors))
except ValueError:
# If we get here, we have nested lists
return [list(map(to_rgb, l)) for l in colors]
class _HeatMapper(object):
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Reverse the rows so the plot looks like the matrix
plot_data = plot_data[::-1]
data = data.ix[::-1]
if mask is not None:
try:
mask = mask.ix[::-1]
except AttributeError:
mask = mask[::-1]
plot_data = np.ma.masked_where(mask, plot_data)
# Get good names for the rows and columns
if isinstance(xticklabels, bool) and xticklabels:
self.xticklabels = _index_to_ticklabels(data.columns)
elif isinstance(xticklabels, bool) and not xticklabels:
self.xticklabels = ['' for _ in range(data.shape[1])]
else:
self.xticklabels = xticklabels
xlabel = _index_to_label(data.columns)
if isinstance(yticklabels, bool) and yticklabels:
self.yticklabels = _index_to_ticklabels(data.index)
elif isinstance(yticklabels, bool) and not yticklabels:
self.yticklabels = ['' for _ in range(data.shape[0])]
else:
self.yticklabels = yticklabels[::-1]
ylabel = _index_to_label(data.index)
# Get good names for the axis labels
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
calc_data = plot_data.data[~np.isnan(plot_data.data)]
if vmin is None:
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
if vmax is None:
vmax = np.percentile(calc_data, 98) if robust else calc_data.max()
# Simple heuristics for whether these data should have a divergent map
divergent = ((vmin < 0) and (vmax > 0)) or center is not None
# Now set center to 0 so math below makes sense
if center is None:
center = 0
# A divergent map should be symmetric around the center value
if divergent:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
self.divergent = divergent
# Now add in the centering value and set the limits
vmin += center
vmax += center
self.vmin = vmin
self.vmax = vmax
# Choose default colormaps if not provided
if cmap is None:
if divergent:
self.cmap = "RdBu_r"
else:
self.cmap = cubehelix_palette(light=.95, as_cmap=True)
else:
self.cmap = cmap
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
xpos, ypos = np.meshgrid(ax.get_xticks(), ax.get_yticks())
for x, y, val, color in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors()):
_, l, _ = colorsys.rgb_to_hls(*color[:3])
text_color = ".15" if l > .5 else "w"
val = ("{:" + self.fmt + "}").format(val)
ax.text(x, y, val, color=text_color,
ha="center", va="center", **self.annot_kws)
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, vmin=self.vmin, vmax=self.vmax,
cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Add row and column labels
nx, ny = self.data.T.shape
ax.set(xticks=np.arange(nx) + .5, yticks=np.arange(ny) + .5)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
plt.draw()
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
# Possibly add a colorbar
if self.cbar:
ticker = mpl.ticker.MaxNLocator(6)
cb = ax.figure.colorbar(mesh, cax, ax,
ticks=ticker, **self.cbar_kws)
cb.outline.set_linewidth(0)
def heatmap(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=False, fmt=".2g", annot_kws=None,
linewidths=.5, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, ax=None, xticklabels=True, yticklabels=True,
mask=None,
**kwargs):
"""Plot rectangular data as a color-encoded matrix.
This function tries to infer a good colormap to use from the data, but
this is not guaranteed to work, so take care to make sure the kind of
colormap (sequential or diverging) and its limits are appropriate.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
one of these values may be ignored.
cmap : matplotlib colormap name or object, optional
The mapping from data values to color space. If not provided, this
will be either a cubehelix map (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging dataset).
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool, optional
If True, write the data value in each cell.
fmt : string, optional
String formatting code to use when ``annot`` is True.
annot_kws : dict of key, value mappings, optional
Keyword arguments for ``ax.text`` when ``annot`` is True.
linewidths : float, optional
Width of the lines that divide each cell.
linecolor : color, optional
Color of the lines that divide each cell.
cbar : boolean, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for `fig.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : boolean, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
xtickabels : list-like or bool, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels
yticklabels : list-like or bool, optional
If True, plot the row names of the dataframe. If False, don't plot
the row names. If list-like, plot these alternate labels as the
yticklabels
mask : boolean numpy.array, optional
A boolean array indicating where to mask the data so it is not
plotted on the heatmap. Only used for visualizing, not for calculating.
kwargs : other keyword arguments
All other keyword arguments are passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels, yticklabels,
mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
class _DendrogramPlotter(object):
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
if self.rotate:
self.X = self.dendrogram['dcoord']
self.Y = self.dendrogram['icoord']
else:
self.X = self.dendrogram['icoord']
self.Y = self.dendrogram['dcoord']
def _calculate_linkage_scipy(self):
if np.product(self.shape) >= 10000:
UserWarning('This will be slow... (gentle suggestion: '
'"pip install fastcluster")')
pairwise_dists = distance.squareform(
distance.pdist(self.array, metric=self.metric))
linkage = hierarchy.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = fastcluster.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
@property
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_list=['k'], color_threshold=-np.inf)
@property
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
for x, y in zip(self.X, self.Y):
ax.plot(x, y, color='k', linewidth=.5)
if self.rotate and self.axis == 0:
ax.invert_xaxis()
ax.yaxis.set_ticks_position('right')
ymax = min(map(min, self.Y)) + max(map(max, self.Y))
ax.set_ylim(0, ymax)
ax.invert_yaxis()
else:
xmax = min(map(min, self.X)) + max(map(max, self.X))
ax.set_xlim(0, xmax)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
plt.draw()
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
def dendrogram(data, linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, ax=None):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data = pd.DataFrame(data)
self.data2d = self.format_data(self.data, pivot_kws, z_score,
standard_scale)
if figsize is None:
width, height = 10, 10
figsize = (width, height)
self.fig = plt.figure(figsize=figsize)
if row_colors is not None:
row_colors = _convert_colors(row_colors)
self.row_colors = row_colors
if col_colors is not None:
col_colors = _convert_colors(col_colors)
self.col_colors = col_colors
width_ratios = self.dim_ratios(self.row_colors,
figsize=figsize,
axis=1)
height_ratios = self.dim_ratios(self.col_colors,
figsize=figsize,
axis=0)
nrows = 3 if self.col_colors is None else 4
ncols = 3 if self.row_colors is None else 4
self.gs = gridspec.GridSpec(nrows, ncols, wspace=0.01, hspace=0.01,
width_ratios=width_ratios,
height_ratios=height_ratios)
self.ax_row_dendrogram = self.fig.add_subplot(self.gs[nrows - 1, 0:2],
axisbg="white")
self.ax_col_dendrogram = self.fig.add_subplot(self.gs[0:2, ncols - 1],
axisbg="white")
self.ax_row_colors = None
self.ax_col_colors = None
if self.row_colors is not None:
self.ax_row_colors = self.fig.add_subplot(
self.gs[nrows - 1, ncols - 2])
if self.col_colors is not None:
self.ax_col_colors = self.fig.add_subplot(
self.gs[nrows - 2, ncols - 1])
self.ax_heatmap = self.fig.add_subplot(self.gs[nrows - 1, ncols - 1])
# colorbar for scale to left corner
self.cax = self.fig.add_subplot(self.gs[0, 0])
self.dendrogram_row = None
self.dendrogram_col = None
def format_data(self, data, pivot_kws, z_score=None,
standard_scale=None):
"""Extract variables from data or use directly."""
# Either the data is already in 2d matrix format, or need to do a pivot
if pivot_kws is not None:
data2d = data.pivot(**pivot_kws)
else:
data2d = data
if z_score is not None and standard_scale is not None:
raise ValueError(
'Cannot perform both z-scoring and standard-scaling on data')
if z_score is not None:
data2d = self.z_score(data2d, z_score)
if standard_scale is not None:
data2d = self.standard_scale(data2d, standard_scale)
return data2d
@staticmethod
def z_score(data2d, axis=1):
"""Standarize the mean and variance of the data axis
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
normalized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
if axis == 1:
z_scored = data2d
else:
z_scored = data2d.T
z_scored = (z_scored - z_scored.mean()) / z_scored.var()
if axis == 1:
return z_scored
else:
return z_scored.T
@staticmethod
def standard_scale(data2d, axis=1):
"""Divide the data by the difference between the max and min
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
vmin : int
If 0, then subtract the minimum of the data before dividing by
the range.
Returns
-------
standardized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
>>> import numpy as np
>>> d = np.arange(5, 8, 0.5)
>>> ClusterGrid.standard_scale(d)
array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. ])
"""
# Normalize these values to range from 0 to 1
if axis == 1:
standardized = data2d
else:
standardized = data2d.T
subtract = standardized.min()
standardized = (standardized - subtract) / (
standardized.max() - standardized.min())
if axis == 1:
return standardized
else:
return standardized.T
def dim_ratios(self, side_colors, axis, figsize, side_colors_ratio=0.05):
"""Get the proportions of the figure taken up by each axes
"""
figdim = figsize[axis]
# Get resizing proportion of this figure for the dendrogram and
# colorbar, so only the heatmap gets bigger but the dendrogram stays
# the same size.
dendrogram = min(2. / figdim, .2)
# add the colorbar
colorbar_width = .8 * dendrogram
colorbar_height = .2 * dendrogram
if axis == 0:
ratios = [colorbar_width, colorbar_height]
else:
ratios = [colorbar_height, colorbar_width]
if side_colors is not None:
# Add room for the colors
ratios += [side_colors_ratio]
# Add the ratio for the heatmap itself
ratios += [.8]
return ratios
@staticmethod
def color_list_to_matrix_and_cmap(colors, ind, axis=0):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
axis : int
Which axis this is labeling
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each corresponds to a color
from the originally provided list of colors
cmap : matplotlib.colors.ListedColormap
"""
# check for nested lists/color palettes.
# Will fail if matplotlib color is list not tuple
if any(issubclass(type(x), list) for x in colors):
all_colors = set(itertools.chain(*colors))
n = len(colors)
m = len(colors[0])
else:
all_colors = set(colors)
n = 1
m = len(colors)
colors = [colors]
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix = np.array([color_to_value[c]
for color in colors for c in color])
shape = (n, m)
matrix = matrix.reshape(shape)
matrix = matrix[:, ind]
if axis == 0:
# row-side:
matrix = matrix.T
cmap = mpl.colors.ListedColormap(all_colors)
return matrix, cmap
def savefig(self, *args, **kwargs):
if 'bbox_inches' not in kwargs:
kwargs['bbox_inches'] = 'tight'
self.fig.savefig(*args, **kwargs)
def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
row_linkage, col_linkage):
# Plot the row dendrogram
if row_cluster:
self.dendrogram_row = dendrogram(
self.data2d, metric=metric, method=method, label=False, axis=0,
ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage)
else:
self.ax_row_dendrogram.set_xticks([])
self.ax_row_dendrogram.set_yticks([])
# PLot the column dendrogram
if col_cluster:
self.dendrogram_col = dendrogram(
self.data2d, metric=metric, method=method, label=False,
axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage)
else:
self.ax_col_dendrogram.set_xticks([])
self.ax_col_dendrogram.set_yticks([])
despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
despine(ax=self.ax_col_dendrogram, bottom=True, left=True)
def plot_colors(self, xind, yind, **kws):
"""Plots color labels between the dendrogram and the heatmap
Parameters
----------
heatmap_kws : dict
Keyword arguments heatmap
"""
# Remove any custom colormap and centering
kws = kws.copy()
kws.pop('cmap', None)
kws.pop('center', None)
kws.pop('vmin', None)
kws.pop('vmax', None)
kws.pop('xticklabels', None)
kws.pop('yticklabels', None)
if self.row_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.row_colors, yind, axis=0)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_row_colors, left=True, bottom=True)
if self.col_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.col_colors, xind, axis=1)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_col_colors, left=True, bottom=True)
def plot_matrix(self, colorbar_kws, mask, xind, yind, **kws):
self.data2d = self.data2d.iloc[yind, xind]
heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.cax,
cbar_kws=colorbar_kws, mask=mask, **kws)
self.ax_heatmap.yaxis.set_ticks_position('right')
self.ax_heatmap.yaxis.set_label_position('right')
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
row_linkage, col_linkage, mask, **kws):
colorbar_kws = {} if colorbar_kws is None else colorbar_kws
self.plot_dendrograms(row_cluster, col_cluster, metric, method,
row_linkage=row_linkage, col_linkage=col_linkage)
try:
xind = self.dendrogram_col.reordered_ind
except AttributeError:
xind = np.arange(self.data2d.shape[1])
try:
yind = self.dendrogram_row.reordered_ind
except AttributeError:
yind = np.arange(self.data2d.shape[0])
self.plot_colors(xind, yind, **kws)
self.plot_matrix(colorbar_kws, mask, xind, yind, **kws)
return self
def clustermap(data, pivot_kws=None, method='average', metric='euclidean',
z_score=None, standard_scale=None, figsize=None, cbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
row_colors=None, col_colors=None, mask=None, **kwargs):
"""Plot a hierarchically clustered heatmap of a pandas DataFrame
Parameters
----------
data: pandas.DataFrame
Rectangular data for clustering. Cannot contain NAs.
pivot_kws : dict, optional
If `data` is a tidy dataframe, can provide keyword arguments for
pivot to create a rectangular dataframe.
method : str, optional
Linkage method to use for calculating clusters.
See scipy.cluster.hierarchy.linkage documentation for more information:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
metric : str, optional
Distance metric to use for the data. See
scipy.spatial.distance.pdist documentation for more options
http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
z_score : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
for the rows or the columns. Z scores are: z = (x - mean)/std, so
values in each row (column) will get the mean of the row (column)
subtracted, then divided by the standard deviation of the row (column).
This ensures that each row (column) has mean of 0 and variance of 1.
standard_scale : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to standardize that
dimension, meaning for each row or column, subtract the minimum and
divide each by its maximum.
figsize: tuple of two ints, optional
Size of the figure to create.
cbar_kws : dict, optional
Keyword arguments to pass to ``cbar_kws`` in ``heatmap``, e.g. to
add a label to the colorbar.
{row,col}_cluster : bool, optional
If True, cluster the {rows, columns}.
{row,col}_linkage : numpy.array, optional
Precomputed linkage matrix for the rows or columns. See
scipy.cluster.hierarchy.linkage for specific formats.
{row,col}_colors : list-like, optional
List of colors to label for either the rows or columns. Useful to
evaluate whether samples within a group are clustered together. Can
use nested lists for multiple color levels of labeling.
mask : boolean numpy.array, optional
A boolean array indicating where to mask the data so it is not
plotted on the heatmap. Only used for visualizing, not for calculating.
kwargs : other keyword arguments
All other keyword arguments are passed to ``sns.heatmap``
Returns
-------
clustergrid : ClusterGrid
A ClusterGrid instance.
Notes
----
The returned object has a ``savefig`` method that should be used if you
want to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
``clustergrid.dendrogram_row.reordered_ind``
Column indices, use:
``clustergrid.dendrogram_col.reordered_ind``
"""
plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
row_colors=row_colors, col_colors=col_colors,
z_score=z_score, standard_scale=standard_scale)
return plotter.plot(metric=metric, method=method,
colorbar_kws=cbar_kws,
row_cluster=row_cluster, col_cluster=col_cluster,
row_linkage=row_linkage, col_linkage=col_linkage,
mask=mask,
**kwargs)
| {
"content_hash": "60d8a29b78af7034aa906c22eaa196b0",
"timestamp": "",
"source": "github",
"line_count": 900,
"max_line_length": 96,
"avg_line_length": 37.766666666666666,
"alnum_prop": 0.5892321270962048,
"repo_name": "aashish24/seaborn",
"id": "8d999401f41d7ab21b18b84a600bdd76a70b2292",
"size": "33990",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "seaborn/matrix.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "261"
},
{
"name": "JavaScript",
"bytes": "2654"
},
{
"name": "Makefile",
"bytes": "7495"
},
{
"name": "Python",
"bytes": "640394"
}
],
"symlink_target": ""
} |
"""
Middleware that displays everything that is printed inline in
application pages.
Anything printed during the request will get captured and included on
the page. It will usually be included as a floating element in the
top right hand corner of the page. If you want to override this
you can include a tag in your template where it will be placed::
<pre id="paste-debug-prints"></pre>
You might want to include ``style="white-space: normal"``, as all the
whitespace will be quoted, and this allows the text to wrap if
necessary.
"""
from cStringIO import StringIO
import re
import cgi
from paste.util import threadedprint
from paste import wsgilib
from paste import response
_threadedprint_installed = False
__all__ = ['PrintDebugMiddleware']
class TeeFile(object):
def __init__(self, files):
self.files = files
def write(self, v):
if isinstance(v, unicode):
# WSGI is picky in this case
v = str(v)
for file in self.files:
file.write(v)
class PrintDebugMiddleware(object):
"""
This middleware captures all the printed statements, and inlines
them in HTML pages, so that you can see all the (debug-intended)
print statements in the page itself.
There are two keys added to the environment to control this:
``environ['paste.printdebug_listeners']`` is a list of functions
that will be called everytime something is printed.
``environ['paste.remove_printdebug']`` is a function that, if
called, will disable printing of output for that request.
"""
log_template = (
'<pre style="width: 40%%; border: 2px solid #000; white-space: normal; '
'background-color: #ffd; color: #000; float: right;">'
'<b style="border-bottom: 1px solid #000">Log messages</b><br>'
'%s</pre>')
def __init__(self, app, global_conf=None, force_content_type=False,
print_wsgi_errors=True):
# @@: global_conf should be handled separately and only for
# the entry point
self.app = app
self.force_content_type = force_content_type
if isinstance(print_wsgi_errors, basestring):
from paste.deploy.converters import asbool
print_wsgi_errors = asbool(print_wsgi_errors)
self.print_wsgi_errors = print_wsgi_errors
def __call__(self, environ, start_response):
global _threadedprint_installed
if environ.get('paste.testing'):
# In a testing environment this interception isn't
# useful:
return self.app(environ, start_response)
if not _threadedprint_installed:
# @@: Not strictly threadsafe
_threadedprint_installed = True
threadedprint.install(leave_stdout=True)
removed = []
def remove_printdebug():
removed.append(None)
environ['paste.remove_printdebug'] = remove_printdebug
logged = StringIO()
listeners = [logged]
environ['paste.printdebug_listeners'] = listeners
if self.print_wsgi_errors:
listeners.append(environ['wsgi.errors'])
replacement_stdout = TeeFile(listeners)
threadedprint.register(replacement_stdout)
try:
status, headers, body = wsgilib.intercept_output(
environ, self.app)
if status is None:
# Some error occurred
status = '500 Server Error'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
if not body:
body = 'An error occurred'
content_type = response.header_value(headers, 'content-type')
if (removed or
(not self.force_content_type and
(not content_type
or not content_type.startswith('text/html')))):
if replacement_stdout == logged:
# Then the prints will be lost, unless...
environ['wsgi.errors'].write(logged.getvalue())
start_response(status, headers)
return [body]
response.remove_header(headers, 'content-length')
body = self.add_log(body, logged.getvalue())
start_response(status, headers)
return [body]
finally:
threadedprint.deregister()
_body_re = re.compile(r'<body[^>]*>', re.I)
_explicit_re = re.compile(r'<pre\s*[^>]*id="paste-debug-prints".*?>',
re.I+re.S)
def add_log(self, html, log):
if not log:
return html
text = cgi.escape(log)
text = text.replace('\n', '<br>')
text = text.replace(' ', ' ')
match = self._explicit_re.search(html)
if not match:
text = self.log_template % text
match = self._body_re.search(html)
if not match:
return text + html
else:
return html[:match.end()] + text + html[match.end():]
| {
"content_hash": "6a49446b26e72744e45f63405eca2a10",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 80,
"avg_line_length": 36.608695652173914,
"alnum_prop": 0.5983768804433888,
"repo_name": "santisiri/popego",
"id": "c971895aff156758d53c3ff7b528ac0dd53022d4",
"size": "5221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/Paste-1.4.2-py2.5.egg/paste/debug/prints.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
} |
import paho.mqtt.client as mqtt
class mqttHandler:
"""Send MQTT 'push' messages to your LEDScreen """
def __init__(self,server="test.mosquitto.org",channel="ledbot/"):
self.channel = channel
self.client= mqtt.Client()
self.client.on_connect = self.on_connect
self.client.on_message = self.send_new_message
self.client.connect(server, 1883, 60)
def on_connect(self,client, userdata, flags, rc):
print("Connected to MQTT server with result code "+str(rc))
self.client.subscribe(self.channel)
def send_response(self, response, msg):
""" Send the response to a user who sent a message to us. """
def listen(self,callback):
self.callback = callback
while True:
self.client.loop()
def send_new_message(self,client, userdata, message):
if message.payload is not None:
print ("trying to send",message.payload)
msg = {
'text': [message.payload],
'type':'text',
'color':(0,0,120),
'background':(0,0,0)
}
self.callback(msg, self) | {
"content_hash": "1b0d9ea2c15e2e4ad0f2888ef9f5c583",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 66,
"avg_line_length": 29.636363636363637,
"alnum_prop": 0.6901840490797546,
"repo_name": "marqsm/LED-bot",
"id": "866505d062b3e38c63b1e5ccca8bdf1f860edf4d",
"size": "978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LEDBot/mqttHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13804"
},
{
"name": "HTML",
"bytes": "2118"
},
{
"name": "JavaScript",
"bytes": "1019"
},
{
"name": "Python",
"bytes": "47465"
},
{
"name": "Shell",
"bytes": "1722"
}
],
"symlink_target": ""
} |
from copengl import *
| {
"content_hash": "9e0ec8b2888189ba1aabf5cb15e24a91",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 21,
"avg_line_length": 11.5,
"alnum_prop": 0.7391304347826086,
"repo_name": "fdkz/libcopengl",
"id": "ac4c8de14cf86a09ffe9f58c61c021e6e4f281b3",
"size": "23",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "44586"
},
{
"name": "C++",
"bytes": "165"
},
{
"name": "Python",
"bytes": "49219"
}
],
"symlink_target": ""
} |
import httplib
import logging as log
session_map = {}
HASH_HEADER = 'Floodlight-Verify-Path'
def request(url, prefix="/api/v1/data/controller/", method='GET',
data='', hashPath=None, host="127.0.0.1:8080", cookie=None):
headers = {'Content-type': 'application/json'}
if cookie:
headers['Cookie'] = 'session_cookie=%s' % cookie
if hashPath:
headers[HASH_HEADER] = hashPath
connection = httplib.HTTPSConnection(host)
try:
connection.request(method, prefix + url, data, headers)
response = connection.getresponse()
ret = (response.status, response.reason, response.read(),
response.getheader(HASH_HEADER))
if response.status >= 300:
log.info('Controller REQUEST: %s %s:body=%r' %
(method, host + prefix + url, data))
log.info('Controller RESPONSE: status=%d reason=%r, data=%r,'
'hash=%r' % ret)
return ret
except Exception as e:
log.error("Controller REQUEST exception: %s" % e)
raise
def get(cookie, url, server, port, hashPath=None):
host = "%s:%d" % (server, port)
return request(url, hashPath=hashPath, host=host, cookie=cookie)
def post(cookie, url, server, port, data, hashPath=None):
host = "%s:%d" % (server, port)
return request(url, method='POST', hashPath=hashPath, host=host, data=data,
cookie=cookie)
def patch(cookie, url, server, port, data, hashPath=None):
host = "%s:%d" % (server, port)
return request(url, method='PATCH', hashPath=hashPath, host=host,
data=data, cookie=cookie)
def put(cookie, url, server, port, data, hashPath=None):
host = "%s:%d" % (server, port)
return request(url, method='PUT', hashPath=hashPath, host=host, data=data,
cookie=cookie)
def delete(cookie, url, server, port, hashPath=None):
host = "%s:%d" % (server, port)
return request(url, method='DELETE', hashPath=hashPath, host=host,
cookie=cookie)
| {
"content_hash": "ce88f17ff57433afc1db1f5634aa0ef7",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 33.29032258064516,
"alnum_prop": 0.6056201550387597,
"repo_name": "xinwu/horizon",
"id": "8b040f3f5d3f5acbafda7f4d4a05e7fc59c5bd03",
"size": "2064",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/connections/reachability_tests/rest_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "173137"
},
{
"name": "HTML",
"bytes": "518040"
},
{
"name": "JavaScript",
"bytes": "1879111"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4900312"
},
{
"name": "Shell",
"bytes": "19593"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys, os
import cgi
import xml.etree.ElementTree as ET
#
#
def log(text):
print("LOG: ", text, file=sys.stderr)
#
# construct data stores
typesPrimitive = [] # class == primitive
typesEnumerated = [] # no descriptor, choice count > 0
typesRestricted = [] # no descriptor, choice count == 0
typesDescribed = [] # contains descriptor
typesAll = {} # table[typename] = typenode for clean types
#
# indices computed while generating page
typeNameIndex = []
typeIndex = {} # key='name', value = [list of types]
fieldNameIndex = []
fieldIndex = {}
enumNameIndex = [] # names of enum values (not types)
enumIndex = {}
grandNameIndex = []
grandIndex = {}
xrefNameIndex = []
xrefIndex = {}
#
# provided types indexed by name of provided type, value is list of provider types
providedtypenames = []
provided = {} # {'name' : [type, type] with provides=name
#
# definition objects are constants
definitionsAll = []
#
# stats
class Stats():
def __init__(self):
self.nConstants = 0
self.nPrimitiveEncodings = 0
self.nEnumeratedTypes = 0
self.nRestrictedTypes = 0
self.nDescribedTypes = 0
self.nProvidedTypes = 0
self.nIndexedTypes = 0
self.nIndexedFields = 0
self.nIndexedEnumerations = 0
self.nIndexedGrand = 0
self.nIndexedXrefs = 0
def log(self):
log("STAT: nConstants = %s" % self.nConstants)
log("STAT: nPrimitiveEncodings = %s" % self.nPrimitiveEncodings)
log("STAT: nEnumeratedTypes = %s" % self.nEnumeratedTypes)
log("STAT: nRestrictedTypes = %s" % self.nRestrictedTypes)
log("STAT: nDescribedTypes = %s" % self.nDescribedTypes)
log("STAT: nProvidedTypes = %s" % self.nProvidedTypes)
log("STAT: nIndexedTypes = %s" % self.nIndexedTypes)
log("STAT: nIndexedFields = %s" % self.nIndexedFields)
log("STAT: nIndexedEnumerations = %s" % self.nIndexedEnumerations)
log("STAT: nIndexedGrand = %s" % self.nIndexedGrand)
log("STAT: nIndexedXrefs = %s" % self.nIndexedXrefs)
def statCheck(self, name, expectedValue):
currentValue = getattr(self, name)
if not currentValue == expectedValue:
log("WARNING stat %s expected %s but is actaully %s" % (name, expectedValue, currentValue))
stats = Stats()
class XmlStore():
def __init__(self, filename):
self.filename = filename
self.tree = ET.parse(os.path.join(os.path.dirname(__file__), filename))
self.root = self.tree.getroot() # root=Element 'amqp'
self.trimNamespace(self.root)
self.rootName = self.root.get("name")
self.sections = self.root.findall("section")
self.types = []
self.definitions = []
self.pictures = []
for section in self.sections:
ltypes = section.findall("type")
for type in ltypes:
# decorate and categorize each type
typesAll[type.get("name")] = type
type.text = self.rootName + ":" + section.get("name")
if type.get("class") == "primitive":
typesPrimitive.append(type)
else:
descr = type.find("descriptor")
if descr is None:
choices = type.find("choice")
if choices is None:
typesRestricted.append(type)
else:
typesEnumerated.append(type)
else:
typesDescribed.append(type)
provides = type.get("provides")
if provides is not None and not provides == "":
providelist = provides.replace(' ','').split(',')
for p in providelist:
if not p in provided:
providedtypenames.append(p)
provided[p] = []
provided[p].append(type)
self.types += section.findall("type")
ldefs = section.findall("definition")
for definition in ldefs:
#log("definition %s" % definition.get("name"))
definition.text = self.rootName + ":" + section.get("name")
definitionsAll.append(definition)
self.definitions += section.findall("definition")
sTitle = section.get("title")
if sTitle is None:
sTitle = ""
docs = section.findall("doc")
for doc in docs:
dTitle = doc.get("title")
if dTitle is None:
dTitle = ""
pics = doc.findall("picture")
for pic in pics:
pTitle = pic.get("title")
#print ("%s : %s : %s : %s" % (self.rootName, sTitle, dTitle, pTitle))
pic.caption = (self.rootName.capitalize() + " : " + sTitle + " : " + dTitle).strip()
self.pictures.append(pic)
def trimNamespace(self, node):
''' Strip out the "{amqp namespace}" ahead of each tag'''
pos = node.tag.find("}")
if pos > 0:
node.tag = node.tag[pos+1:]
for child in node:
self.trimNamespace(child)
def showPics(self):
nodeName = self.rootName.capitalize() + "Diag"
print("<a name=\"%sDiagrams\"</a><br>" % self.rootName.capitalize())
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%s%s<br>" %
((nodeName), lozenge(), nbsp(), self.rootName.capitalize() + " Diagrams"))
print("<div style=\"display:none; width=100%%; margin-bottom:2px; margin-left:10px\" id=\"%s\">" %
(nodeName))
for i in range(len(self.pictures)):
pic = self.pictures[i]
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%s<strong>%s</strong><br>" %
((nodeName + str(i)), lozenge(), nbsp(), pic.caption))
print("<div style=\"display:none; width=100%%; margin-bottom:2px; margin-left:10px\" id=\"%s\">" %
(nodeName + str(i)))
print("<pre>%s</pre><br>" % cgi.escape(pic.text))
print("</div>")
print("</div>")
print("<br>")
xmlTypes = XmlStore("types.xml")
xmlTransport = XmlStore("transport.xml")
xmlMessaging = XmlStore("messaging.xml")
xmlTransactions = XmlStore("transactions.xml")
xmlSecurity = XmlStore("security.xml")
xmlStoreList = [xmlTypes, xmlTransport, xmlMessaging, xmlTransactions, xmlSecurity]
#
# Utilities
#
#
class ExitStatus(Exception):
"""Raised if a command wants a non-0 exit status from the script"""
def __init__(self, status): self.status = status
def nbsp():
return " "
def lozenge():
return "◊"
def double_lozenge():
return lozenge() + lozenge()
def extract_descr_type_code(code):
return "0x" + code[19:]
def noNoneString(str):
if str:
return str
return ""
def noNoneTypeRef(str):
if str and not str == "":
res = "<a href=\"#TYPE_%s\">%s</a>" % (str, str)
return res
return ""
def noNoneProvideRef(str):
if str and not str == "":
res = ""
mylist = str.replace(' ','').split(',')
for e in mylist:
res += "<a href=\"#PROVIDEDTYPE_%s\">%s</a> " % (e, e)
return res
return ""
def addToIndex(name, section):
if not name in typeNameIndex:
typeNameIndex.append(name)
typeIndex[name] = []
typeIndex[name].append(section)
def addToFieldIndex(name, parentsection, parenttype):
if not name in fieldNameIndex:
fieldNameIndex.append(name)
fieldIndex[name] = []
fieldIndex[name].append( [parentsection, parenttype] )
def addToEnumIndex(name, parentsection, parenttype):
if not name in enumNameIndex:
enumNameIndex.append(name)
enumIndex[name] = []
enumIndex[name].append( [parentsection, parenttype] )
def addToGrandIndex(name, decoratedname, category, psect, ptype):
if not name in grandNameIndex:
grandNameIndex.append(name)
grandIndex[name] = []
grandIndex[name].append( [decoratedname, category, psect, ptype] )
def addToXrefIndex(name, decReferrerName, category, referrerSection):
if not name in xrefNameIndex:
xrefNameIndex.append(name)
xrefIndex[name] = []
xrefIndex[name].append( [decReferrerName, category, referrerSection] )
#
# Open html page header
def print_fixed_leading():
# start up the web stuff
print ("<html>")
print ("<head>")
print ("<title>AMQP 1.0 - Interactive Protocol Spec</title>")
print ('''<script src="http://ajax.googleapis.com/ajax/libs/dojo/1.4/dojo/dojo.xd.js" type="text/javascript"></script>
<!-- <script src="http://ajax.googleapis.com/ajax/libs/dojo/1.4/dojo/dojo.xd.js" type="text/javascript"></script> -->
<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied. See the License for the
- specific language governing permissions and limitations
- under the License.
-
-->
<script type="text/javascript">
function node_is_visible(node)
{
if(dojo.isString(node))
node = dojo.byId(node);
if(!node)
return false;
return node.style.display == "block";
}
function set_node(node, str)
{
if(dojo.isString(node))
node = dojo.byId(node);
if(!node) return;
node.style.display = str;
}
function toggle_node(node)
{
if(dojo.isString(node))
node = dojo.byId(node);
if(!node) return;
set_node(node, (node_is_visible(node)) ? 'none' : 'block');
}
function hide_node(node)
{
set_node(node, 'none');
}
function show_node(node)
{
set_node(node, 'block');
}
function go_back()
{
window.history.back();
}
''')
#
#
def print_start_body():
print("function show_all_tables()")
print("{")
print(" show_node('Constants');")
print(" show_node('PrimTypeName');")
print(" show_node('PrimTypeCode');")
print(" show_node('DescrTypes');")
print(" show_node('EnumTypes');")
print(" show_node('RestrTypes');")
print(" show_node('ProvTypes');")
print(" show_node('TypesDiag');")
print(" show_node('TransportDiag');")
print(" show_node('MessagingDiag');")
print(" show_node('TransactionsDiag');")
print(" show_node('SecurityDiag');")
print(" show_node('TypIndex');")
print(" show_node('FldIndex');")
print(" show_node('EnuIndex');")
print(" show_node('GndIndex');")
print(" show_node('XrefIndex');")
for type in typesDescribed:
print(" show_node('DT%s')" % type.get("name"))
for type in typesEnumerated:
print(" show_node('ET%s')" % type.get("name"))
print("}")
print("")
print("function hide_all_tables()")
print("{")
print(" hide_node('Constants');")
print(" hide_node('PrimTypeName');")
print(" hide_node('PrimTypeCode');")
print(" hide_node('DescrTypes');")
print(" hide_node('EnumTypes');")
print(" hide_node('RestrTypes');")
print(" hide_node('ProvTypes');")
print(" hide_node('TypesDiag');")
print(" hide_node('TransportDiag');")
print(" hide_node('MessagingDiag');")
print(" hide_node('TransactionsDiag');")
print(" hide_node('SecurityDiag');")
print(" hide_node('TypIndex');")
print(" hide_node('FldIndex');")
print(" hide_node('EnuIndex');")
print(" hide_node('GndIndex');")
print(" hide_node('XrefIndex');")
for type in typesDescribed:
print(" show_node('DT%s')" % type.get("name"))
for type in typesEnumerated:
print(" show_node('ET%s')" % type.get("name"))
print("}")
print ("</script>")
print ("</head>")
print ("<body>")
print ("<style>")
print (" * { font-family: sans-serif; }")
print ("</style>")
print ("<style>")
print ("table, th, td {")
print (" border: 1px solid black;")
print (" border-collapse: collapse;")
print ("}")
print ("th, td {")
print (" padding: 4px;")
print ("}")
print ("</style>")
print("<style>")
print("pre {")
print(" font-family:monospace,monospace;")
print(" font-size:1em;")
print("}")
print("</style>")
#
#
def print_toc():
# Table of Contents
print("<a href=\"#Constants\">Constants</a><br>")
print("<a href=\"#Types\">Types</a><br>")
print("%s%s<a href=\"#PrimitiveTypes\">Primitive Types</a><br>" % (nbsp(), nbsp()))
print("%s%s<a href=\"#EnumeratedTypes\">Enumerated Types</a><br>" % (nbsp(), nbsp()))
print("%s%s<a href=\"#RestrictedTypes\">Restricted Types</a><br>" % (nbsp(), nbsp()))
print("%s%s<a href=\"#DescribedTypes\">Described Types</a><br>" % (nbsp(), nbsp()))
print("%s%s<a href=\"#ProvidedTypes\">Provided Types</a><br>" % (nbsp(), nbsp()))
print("<a href=\"#Diagrams\">Diagrams</a><br>")
print("<a href=\"#Indices\">Indices</a><br>")
print("%s%s<a href=\"#TypeIndex\">Types</a><br>" % (nbsp(), nbsp()))
print("%s%s<a href=\"#FieldIndex\">Fields</a><br>" % (nbsp(), nbsp()))
print("%s%s<a href=\"#EnumerationIndex\">Enumerations</a><br>" % (nbsp(), nbsp()))
print("%s%s<a href=\"#GrandIndex\">Grand Index</a><br>" % (nbsp(), nbsp()))
print("%s%s<a href=\"#XrefIndex3\">Type Cross Reference</a><br>" % (nbsp(), nbsp()))
print("<hr>")
print("<strong>NOTE: Tables must be expanded or internal hyperlinks don't work.</strong><br>")
print("<a href=\"javascript:show_all_tables()\"> %s </a>%sTable view: expand all.<br>" % (lozenge(), nbsp()))
print("<a href=\"javascript:hide_all_tables()\"> %s </a>%sTable view: collapse all." % (lozenge(), nbsp()))
print("<hr>")
def print_constants():
# print types sorted by class name
print("<a name=\"Constants\"></a>")
print("<h2>Constants</h2>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sConstants<br>" % ("Constants", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"Constants\">")
print("<table>")
print("<tr>")
print(" <th>Section</th>")
print(" <th>Name</th>")
print(" <th>Value</th>")
print(" <th>Label</th>")
print("</tr>")
for definition in definitionsAll:
print("<tr>")
print(" <td>%s</td>" % definition.text)
print(" <td><a name=\"TYPE_%s\"></a><strong>%s</strong></td>" % (definition.get("name"),definition.get("name")))
print(" <td>%s</td>" % definition.get("value"))
print(" <td>%s</td>" % definition.get("label"))
print("</tr>")
addToIndex(definition.get("name"), definition.text) # Constants
stats.nConstants += 1
print("</table>")
print("</div>")
print("<br>")
#
#
encoding_typenames = []
encoding_codes = []
encoding_typemap = {}
encoding_codemap = {}
encoding_sectionmap = {}
def compute_primitive_types():
# create sorted lists for display
for type in typesPrimitive:
for enc in type.findall("encoding"):
typename = type.get("name")
if enc.get("name") is not None:
typename += ":" + enc.get("name")
typecode = enc.get("code")
enc.text = typename
if not typename in encoding_typenames:
encoding_typenames.append(typename)
encoding_codes.append(typecode)
encoding_typemap[typename] = enc
encoding_codemap[typecode] = enc
encoding_sectionmap[typename] = type.text
else:
raise ValueError("duplicate encoding type name: '%s'" % typename)
encoding_typenames.sort()
encoding_codes.sort()
def print_primitive_types():
# print types sorted by class name
print("<a name=\"Types\"></a>")
print("<h2>Types</h2>")
print("<a name=\"PrimitiveTypes\"></a>")
print("<h3>Primitive Types</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sby Name<br>" % ("PrimTypeName", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"PrimTypeName\">")
print("<table>")
print("<tr>")
print(" <th>Section</th>")
print(" <th>Name</th>")
print(" <th>Code</th>")
print(" <th>Category</th>")
print(" <th>Width</th>")
print(" <th>Label</th>")
print("</tr>")
for type in typesPrimitive:
print("<tr>")
print(" <td>%s</td>" % type.text)
print(" <td><a name=\"TYPE_%s\"></a><strong>%s</strong></td>" % (type.get("name"), type.get("name")))
print(" <td></td>")
print(" <td></td>")
print(" <td></td>")
print(" <td>%s</td>" % type.get("label"))
print("</tr>")
addToIndex(type.get("name"), type.text) # Primitive category
for enc in type.findall("encoding"):
print("<tr>")
print(" <td></td>")
print(" <td><a name=\"TYPE_%s\"></a><strong>%s</strong></td>" % (enc.text, enc.text))
print(" <td>%s</td>" % enc.get("code"))
print(" <td>%s</td>" % enc.get("category"))
print(" <td>%s</td>" % enc.get("width"))
print(" <td>%s</td>" % enc.get("label"))
print("</tr>")
addToIndex(enc.text, "types:encodings") # Primitive type
stats.nPrimitiveEncodings += 1
# Phony primitive type "*"
print("<tr>")
print(" <td>spec:wildcard</td>")
print(" <td><a name=\"TYPE_*\"><strong>*</strong></a></td>")
print(" <td></td>")
print(" <td></td>")
print(" <td></td>")
print(" <td>A value of any type is permitted.</td>")
print("</tr>")
print("</table>")
print("</div>")
print("<br>")
# print types sorted by class code
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sby Code<br>" % ("PrimTypeCode", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"PrimTypeCode\">")
print("<table>")
print("<tr>")
print(" <th>Section</th>")
print(" <th>Name</th>")
print(" <th>Code</th>")
print(" <th>Category</th>")
print(" <th>Width</th>")
print(" <th>Label</th>")
print("</tr>")
for code in encoding_codes:
enc = encoding_codemap[code]
print("<tr>")
print(" <td>%s</td>" % "types:encodings")
print(" <td><strong>%s</strong></td>" % enc.text)
print(" <td>%s</td>" % enc.get("code"))
print(" <td>%s</td>" % enc.get("category"))
print(" <td>%s</td>" % enc.get("width"))
print(" <td>%s</td>" % enc.get("label"))
print("</tr>")
print("</table>")
print("</div>")
print("<br>")
#
#
descr_longnames = [] # "transport:performatives open"
descr_codes = [] # "0x10"
descr_codemap = {} # map[longname] = "0x10"
descr_mapcode = {} # map[code] = longname
descr_typemap = {} # map[longname] = type node
descr_fieldmap = {} # map[longname] = [list-of-field-nodes]
descr_fieldindex = [] # list of (fieldname, field's_parent_type_node)
# TODO: get the provides info
def compute_described_types():
for type in typesDescribed:
descriptor = type.find("descriptor")
descr_name = descriptor.get("name")
descr_code = extract_descr_type_code(descriptor.get("code"))
fields = type.findall("field")
longname = type.text + " " + type.get("name")
descr_longnames.append(longname)
descr_codes.append(descr_code)
descr_codemap[longname] = descr_code
descr_mapcode[descr_code] = longname
descr_typemap[longname] = type
if fields is not None:
descr_fieldmap[longname] = fields
for field in fields:
descr_fieldindex.append( (field.get("name"), type) )
descr_codes.sort()
#
#
def print_described_types():
print("<a name=\"DescribedTypes\"></a>")
print("<h3>Described Types</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sDescribed Types<br>" % ("DescrTypes", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"DescrTypes\">")
print("<table>")
print("<tr>")
print(" <th>Section</th>")
print(" <th>Name</th>")
print(" <th>Code</th>")
print(" <th>Type</th>")
print(" <th>Provides</th>")
print(" <th>Label</th>")
print("</tr>")
for code in descr_codes:
name = descr_mapcode[code]
descr_key = name.split()
section = descr_key[0]
descr_typename = descr_key[1]
type = descr_typemap[name]
print("<tr id=\"TYPE_%s\">" % descr_typename)
print(" <td>%s</td>" % section)
print(" <td><a href=\"#details_%s\"><strong>%s</strong></a></td>" % (descr_typename, descr_typename))
print(" <td>%s</td>" % code)
print(" <td><a href=\"#TYPE_%s\">%s</a></td>" % (type.get("source"), type.get("source")))
print(" <td>%s</td>" % noNoneProvideRef(type.get("provides")))
print(" <td>%s</td>" % noNoneString(type.get("label")))
print("</tr>")
addToIndex(descr_typename, section) # Described
stats.nDescribedTypes += 1
print("</table>")
print("<br>")
for code in descr_codes:
name = descr_mapcode[code]
descr_key = name.split()
section = descr_key[0]
descr_typename = descr_key[1]
type = descr_typemap[name]
print("<a name=\"details_%s\"></a>" % descr_typename)
print("%s%s<a href=\"javascript:toggle_node('%s')\"> %s </a>%s %s<strong><a href=\"#TYPE_%s\">%s</a></strong><br>" % \
(nbsp(), nbsp(), "DT"+descr_typename, lozenge(), nbsp(), "Described type: " + section + " - ", descr_typename, descr_typename))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"%s\">" % ("DT"+descr_typename))
print("<table>")
print("<tr>")
print(" <th>Tag</th>")
print(" <th>Name</th>")
print(" <th>Type</th>")
print(" <th>Requires</th>")
print(" <th>Default</th>")
print(" <th>Mandatory</th>")
print(" <th>Multiple</th>")
print(" <th>Label</th>")
print("</tr>")
for child in type:
childtag = ""
childtype = ""
printthis = True
if child.tag == "field":
childtype = child.get("type")
childlabel = noNoneString(child.get("label"))
childname ="<a id=\"FIELD_%s_%s\">%s</a>" % (descr_typename, child.get("name"), child.tag)
childtag = " <td>%s</td>" % (childname)
addToFieldIndex(child.get("name"), section, descr_typename)
elif child.tag == "descriptor":
childlabel = noNoneString(type.get("label"))
childtag = " <td>%s</td>" % child.tag
else:
printthis = False
if printthis:
print("<tr>")
print("%s" % childtag)
print(" <td><strong>%s</strong></td>" % child.get("name"))
print(" <td><a href=\"#TYPE_%s\">%s</a></td>" % (childtype, childtype))
print(" <td>%s</td>" % noNoneProvideRef(child.get("requires")))
print(" <td>%s</td>" % noNoneString(child.get("default")))
print(" <td>%s</td>" % noNoneString(child.get("mandatory")))
print(" <td>%s</td>" % noNoneString(child.get("multiple")))
print(" <td>%s</td>" % childlabel)
print("</tr>")
print("</table>")
print("<br>")
print("</div>") # End one described type
print("</div>") # End described type details
print("<br>")
#
#
enum_longnames = [] # "messaging:message-format terminus-durability"
enum_typemap = {} # map[longname] = type node
enum_choicemap = {} # map[longname] = [list-of-choice-fields]
enum_choiceindex = {} # list of (choicename, choice's_parent_type_node)
def compute_enumerated_types():
#log("typesEnumerated: %s" % typesEnumerated)
for type in typesEnumerated:
#log("processing enum %s" % type.get("name"))
longname = type.text + " " + type.get("name")
enum_longnames.append(longname)
enum_typemap[longname] = type
# if choices is not None:
# enum_choicemap[longname] = choices
# for choice in choices:
# log("processing enum choice %s" % choice.get("name"))
# enum_choiceindex.append( (choice.get("name"), type) )
choices = []
for child in type:
if child.tag == "choice":
choices += child
enum_choiceindex[child.get("name")] = type
addToEnumIndex(child.get("name"), type.text, type.get("name"))
enum_choicemap[longname] = choices
enum_longnames.sort()
def print_enumerated_types():
print("<a name=\"EnumeratedTypes\"></a>")
print("<h3>Enumerated Types</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sEnumerated Types<br>" % ("EnumTypes", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"EnumTypes\">")
print("<table>")
print("<tr>")
print(" <th>Section</th>")
print(" <th>Name</th>")
print(" <th>Type</th>")
print(" <th>Label</th>")
print(" <th>Provides</th>")
print("</tr>")
for lname in enum_longnames:
type = enum_typemap[lname]
print("<tr id=\"TYPE_%s\">" % type.get("name"))
print(" <td>%s</td>" % type.text)
print(" <td><a href=\"#details_%s\"><strong>%s</strong></a></td>" % (type.get("name"), type.get("name")))
print(" <td><a href=\"#TYPE_%s\">%s</a></td>" % (type.get("source"), type.get("source")))
print(" <td>%s</td>" % noNoneString(type.get("label")))
print(" <td>%s</td>" % noNoneProvideRef(type.get("provides")))
print("</tr>")
addToIndex(type.get("name"), type.text) # Enum
stats.nEnumeratedTypes += 1
print("</table>")
print("<br>")
for lname in enum_longnames:
type = enum_typemap[lname]
enum_key = lname.split()
section = enum_key[0]
enum_typename = enum_key[1]
print("<a name=\"details_%s\"></a>" % (enum_typename))
print("%s%s<a href=\"javascript:toggle_node('%s')\"> %s </a>%s %s<strong><a href=\"#TYPE_%s\">%s</a></strong><br>" % \
(nbsp(), nbsp(), "ET"+enum_typename, lozenge(), nbsp(), "Enumerated type: " + section + " - ", enum_typename, enum_typename))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"%s\">" % ("ET"+enum_typename))
print("<table>")
print("<tr>")
print(" <th>Name</th>")
print(" <th>Type/Value</th>")
print(" <th>Label</th>")
print(" <th>Provides</th>")
print("</tr>")
print("<tr>")
print(" <td><strong>%s</strong></td>" % (type.get("name")))
print(" <td><a href=\"#TYPE_%s\">%s</a></td>" % (type.get("source"), type.get("source")))
print(" <td>%s</td>" % noNoneString(type.get("label")))
print(" <td>%s</td>" % noNoneProvideRef(type.get("provides")))
print("</tr>")
for child in type.findall("choice"):
print("<tr>")
print(" <td><strong>%s</strong></td>" % child.get("name"))
print(" <td>%s</td>" % child.get("value"))
print("</tr>")
print("</table>")
print("<br>")
print("</div>")
print("</div>") # End enumerated type details
print("<br>")
#
#
def print_restricted_types():
print("<a name=\"RestrictedTypes\"></a>")
print("<h3>Restricted Types</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sRestricted Types<br>" % ("RestrTypes", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"RestrTypes\">")
print("<table>")
print("<tr>")
print(" <th>Section</th>")
print(" <th>Name</th>")
print(" <th>Type</th>")
print(" <th>Label</th>")
print(" <th>Provides</th>")
print("</tr>")
for type in typesRestricted:
print("<tr>")
print(" <td>%s</td>" % type.text)
print(" <td><strong><a name=\"TYPE_%s\">%s</a></strong></td>" % (type.get("name"), type.get("name")))
print(" <td><a href=\"#TYPE_%s\">%s</a></td>" % (type.get("source"),type.get("source")))
print(" <td>%s</td>" % noNoneString(type.get("label")))
print(" <td>%s</td>" % noNoneProvideRef(type.get("provides")))
print("</tr>")
addToIndex(type.get("name"), type.text) # Restricted
stats.nRestrictedTypes += 1
print("</table>")
print("</div>")
print("<br>")
#
#
def print_provided_types():
providedtypenames.sort()
print("<a name=\"ProvidedTypes\"></a>")
print("<h3>Provided Types</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sProvided Types<br>" % ("ProvTypes", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"ProvTypes\">")
print("<table>")
print("<tr>")
print(" <th>Provided Type</th>")
print(" <th>Provider</th>")
print(" <th>Provider Section</th>")
print("</tr>")
for ptype in providedtypenames:
anchor = " id=\"PROVIDEDTYPE_%s\"" % ptype
types = provided[ptype]
addToIndex(ptype, "PROVIDED")
stats.nProvidedTypes += 1
for type in types:
print("<tr%s>" % anchor)
anchor = ""
print(" <td>%s</td>" % ptype)
print(" <td>%s</td>" % noNoneTypeRef(type.get("name")))
print(" <td>%s</td>" % type.text)
print("</tr>")
print("</table>")
print("</div>")
print("<br>")
#
#
def print_asciiart():
print("<a name=\"Diagrams\"></a>")
print("<h2>Diagrams</h2>")
print("These diagrams may not make sense when taken out of the context of the ")
print("<a href=\"http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-overview-v1.0-os.html\">")
print("AMQP 1.0 Specification</a>. Please refer to the spec to get the complete narrative.<br>")
for x in xmlStoreList:
x.showPics()
#
#
def print_type_index():
typeNameIndex.sort()
print("<a name=\"Indices\"></a>")
print("<h2>Indices</h2>")
print("<a name=\"TypeIndex\"></a>")
print("<h3>Type Index</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sType Index<br>" % ("TypIndex", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"TypIndex\">")
print("<table>")
print("<tr>")
print(" <th>Type Name</th>")
print(" <th>Section</th>")
print("</tr>")
for idx in typeNameIndex:
sections = typeIndex[idx]
for section in sections:
print("<tr>")
if section == "PROVIDED":
name = noNoneProvideRef(idx)
else:
name = noNoneTypeRef(idx)
print(" <td>%s</td>" % name)
print(" <td>%s</td>" % section)
print("</tr>")
addToGrandIndex(idx, name, "type", section, " ")
stats.nIndexedTypes += 1
print("</table>")
print("</div>")
print("<br>")
#
#
def print_field_index():
fieldNameIndex.sort()
print("<a name=\"FieldIndex\"></a>")
print("<h3>Field Index</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sField Index<br>" % ("FldIndex", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"FldIndex\">")
print("<table>")
print("<tr>")
print(" <th>Field Name</th>")
print(" <th>Parent Type</th>")
print(" <th>Section</th>")
print("</tr>")
for idx in fieldNameIndex:
parents = fieldIndex[idx]
for parent in parents:
psect = parent[0]
ptype = parent[1]
print("<tr>")
name = "<a href=\"#FIELD_%s_%s\">%s</a>" % (ptype, idx, idx)
print(" <td>%s</td>" % name)
print(" <td>%s</td>" % ptype)
print(" <td>%s</td>" % psect)
print("</tr>")
addToGrandIndex(idx, name, "field", psect, ptype)
stats.nIndexedFields += 1
print("</table>")
print("</div>")
print("<br>")
#
#
def print_enumeration_index():
enumNameIndex.sort()
print("<a name=\"EnumerationIndex\"></a>")
print("<h3>Enumeration Index</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sEnumeration Index<br>" % ("EnuIndex", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"EnuIndex\">")
print("<table>")
print("<tr>")
print(" <th>Enum Value</th>")
print(" <th>Enumeration</th>")
print(" <th>Section</th>")
print("</tr>")
for idx in enumNameIndex:
parents = enumIndex[idx]
for parent in parents:
psect = parent[0]
ptype = parent[1]
enum = "<a href=\"#TYPE_%s\">%s</a>" % (ptype, ptype)
print("<tr>")
print(" <td>%s</td>" % idx)
print(" <td>%s</td>" % enum)
print(" <td>%s</td>" % psect)
print("</tr>")
addToGrandIndex(idx, idx, "enum value", psect, enum)
stats.nIndexedEnumerations += 1
print("</table>")
print("</div>")
print("<br>")
#
#
def print_grand_index():
grandNameIndex.sort()
print("<a name=\"GrandIndex\"></a>")
print("<h3>Grand Index</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sGrand Index<br>" % ("GndIndex", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"GndIndex\">")
print("<table>")
print("<tr>")
print(" <th>Name</th>")
print(" <th>Category</th>")
print(" <th>Parent</th>")
print(" <th>Section</th>")
print("</tr>")
for idx in grandNameIndex:
parents = grandIndex[idx]
for parent in parents:
print("<tr>")
print(" <td>%s</td>" % parent[0])
print(" <td>%s</td>" % parent[1])
print(" <td>%s</td>" % parent[2])
print(" <td>%s</td>" % parent[3])
print("</tr>")
stats.nIndexedGrand += 1
print("</table>")
print("</div>")
print("<br>")
#
#
def print_xref_index():
# Create xref name index from type index.
xrefNameIndex.append("*")
for idx in typeNameIndex:
sections = typeIndex[idx]
for section in sections:
name = idx
if section == "PROVIDED":
name += ",PROVIDED"
if name not in xrefNameIndex:
xrefNameIndex.append(name)
else:
# primitive type names get reused as encoding names...
pass
xrefNameIndex.sort()
for name in xrefNameIndex:
xrefIndex[name] = [] # list of types defined in terms of type 'name'
# Enum types
for lname in enum_longnames:
type = enum_typemap[lname]
decname = noNoneTypeRef(type.get("name"))
source = type.get("source")
category = "enum"
refSection = type.text
xrefIndex[source].append( [decname, category, refSection])
# Restricted types
for type in typesRestricted:
decname = noNoneTypeRef(type.get("name"))
source = type.get("source")
category = "restricted"
refSection = type.text
xrefIndex[source].append( [decname, category, refSection])
# Described types
for code in descr_codes:
name = descr_mapcode[code]
descr_key = name.split()
section = descr_key[0]
descr_typename = descr_key[1]
type = descr_typemap[name]
decname = noNoneTypeRef(descr_typename)
source = type.get("source")
category = "described"
refSection = section
xrefIndex[source].append( [decname, category, refSection])
# Described fields
for code in descr_codes:
name = descr_mapcode[code]
descr_key = name.split()
section = descr_key[0]
descr_typename = descr_key[1]
type = descr_typemap[name]
for child in type:
if child.tag == "field":
decname = "<a href=\"#FIELD_%s_%s\">%s</a>" % (descr_typename, child.get("name"), child.get("name"))
source = child.get("type")
category = "field"
refSection = "%s - %s" % (section, descr_typename)
xrefIndex[source].append( [decname, category, refSection])
# Provided types
for ptype in providedtypenames:
types = provided[ptype]
for type in types:
decname = noNoneTypeRef(type.get("name"))
source = "%s,%s" % (ptype, "PROVIDED")
category = "provided"
refSection = ""
xrefIndex[source].append( [decname, category, refSection])
print("<a name=\"XrefIndex3\"></a>")
print("<h3>Cross Reference Index</h3>")
print("<a href=\"javascript:toggle_node('%s')\"> %s </a>%sType Cross Reference<br>" % ("XrefIndex", lozenge(), nbsp()))
print("<div width=\"100%%\" style=\"display:block\" margin-bottom:\"2px\" id=\"XrefIndex\">")
print("<table>")
print("<tr>")
print(" <th>Referenced Type</th>")
print(" <th>Referrer</th>")
print(" <th>Section</th>")
print(" <th>Type</th>")
print("</tr>")
for idx in xrefNameIndex:
if ":" not in idx:
try:
idxlist = idx.split(',')
typetext = ""
typename = ""
if len(idxlist) == 1:
if idx == "*":
typetext = "spec:wildcard"
typename = "*"
else:
type = typesAll[idx]
typetext = type.text
typename = idxlist[0]
else:
typetext = "provided"
typename = "<a href=\"#PROVIDEDTYPE_%s\"> %s </a>" % (idxlist[0], idxlist[0])
refs = xrefIndex[idx]
if len(refs) == 0:
print("<tr>")
print(" <td>%s:<strong>%s</strong></td>" % (typetext, typename))
print(" <td>%s</td>" % nbsp())
print(" <td>%s</td>" % nbsp())
print(" <td>%s</td>" % nbsp())
print("</tr>")
for ref in refs:
print("<tr>")
print(" <td>%s:<strong>%s</strong></td>" % (typetext, typename))
print(" <td>%s</td>" % ref[0])
print(" <td>%s</td>" % ref[2])
print(" <td>%s</td>" % ref[1])
print("</tr>")
stats.nIndexedXrefs += 1
except:
#log("Can't resolve as type: %s" % idx) # constants can't be resolved
pass
print("</table>")
print("</div>")
print("<br>")
#
#
def print_end_body():
print ("</body>")
print ("</html>")
#
#
def main_except(argv):
# Compute tables and stuff that may be needed by show/hide functions
compute_primitive_types()
compute_described_types()
compute_enumerated_types()
# Print the web page
print_fixed_leading()
print_start_body()
print("<h1>AMQP 1.0 - Interactive Protocol Type Reference</h1>")
print_toc()
print_constants()
print_primitive_types()
print_enumerated_types()
print_restricted_types()
print_described_types()
print_provided_types()
print_asciiart()
print_type_index()
print_field_index()
print_enumeration_index()
print_grand_index()
print_xref_index()
print_end_body()
stats.statCheck("nConstants", 13)
stats.statCheck("nPrimitiveEncodings", 39)
stats.statCheck("nEnumeratedTypes", 13)
stats.statCheck("nRestrictedTypes", 19)
stats.statCheck("nDescribedTypes", 40)
stats.statCheck("nProvidedTypes", 14)
stats.statCheck("nIndexedTypes", 162)
stats.statCheck("nIndexedFields", 125)
stats.statCheck("nIndexedEnumerations", 54)
stats.statCheck("nIndexedGrand", 341)
stats.statCheck("nIndexedXrefs", 252)
#
#
def main(argv):
try:
main_except(argv)
return 0
except ExitStatus, e:
return e.status
except Exception, e:
print("%s: %s"%(type(e).__name__, e))
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv))
| {
"content_hash": "bc5daa93b1381580073db02536ceebc5",
"timestamp": "",
"source": "github",
"line_count": 1150,
"max_line_length": 141,
"avg_line_length": 36.100869565217394,
"alnum_prop": 0.5418874650737066,
"repo_name": "ChugR/qpid-proton-tools",
"id": "e2fa02be99dbedd924309d5621440d29210be977",
"size": "42927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amqp-spec-webpage/webpage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "31336"
},
{
"name": "C",
"bytes": "3132"
},
{
"name": "C#",
"bytes": "4109"
},
{
"name": "C++",
"bytes": "11144"
},
{
"name": "CMake",
"bytes": "63"
},
{
"name": "HTML",
"bytes": "263684"
},
{
"name": "PowerShell",
"bytes": "22139"
},
{
"name": "Python",
"bytes": "73322"
},
{
"name": "Shell",
"bytes": "4599"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class ExtendedSignalsConfig(AppConfig):
name = 'extended_signals'
| {
"content_hash": "a28fcc953cabd93b9ad6aea8a2fe3c1b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 21.2,
"alnum_prop": 0.7830188679245284,
"repo_name": "biljettshop/django_extended_signals",
"id": "6420bcb3ccf34989b5e962fb81afe196b6122b3a",
"size": "106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extended_signals/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2283"
}
],
"symlink_target": ""
} |
import multiprocessing
import os
import sys
from distutils.dir_util import mkpath
from py4j.java_gateway import JavaObject
from pyspark.rdd import RDD
from bigdl.util.common import DOUBLEMAX
from bigdl.util.common import JTensor
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import callJavaFunc
from bigdl.util.common import get_node_and_core_number
from bigdl.util.common import init_engine
from bigdl.util.common import to_list
from bigdl.dataset.dataset import *
if sys.version >= '3':
long = int
unicode = str
class Top1Accuracy(JavaValue):
"""
Caculate the percentage that output's max probability index equals target.
>>> top1 = Top1Accuracy()
creating: createTop1Accuracy
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class TreeNNAccuracy(JavaValue):
"""
Caculate the percentage that output's max probability index equals target.
>>> top1 = TreeNNAccuracy()
creating: createTreeNNAccuracy
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class Top5Accuracy(JavaValue):
"""
Caculate the percentage that output's max probability index equals target.
>>> top5 = Top5Accuracy()
creating: createTop5Accuracy
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class Loss(JavaValue):
"""
This evaluation method is calculate loss of output with respect to target
>>> from bigdl.nn.criterion import ClassNLLCriterion
>>> loss = Loss()
creating: createClassNLLCriterion
creating: createLoss
>>> loss = Loss(ClassNLLCriterion())
creating: createClassNLLCriterion
creating: createLoss
"""
def __init__(self, cri=None, bigdl_type="float"):
from bigdl.nn.criterion import ClassNLLCriterion
if cri is None:
cri = ClassNLLCriterion()
JavaValue.__init__(self, None, bigdl_type, cri)
class HitRatio(JavaValue):
"""
Hit Ratio(HR) used in recommandation application.
HR intuitively measures whether the test item is present on the top-k list.
>>> hr10 = HitRatio(k = 10)
creating: createHitRatio
"""
def __init__(self, k = 10, neg_num = 100, bigdl_type="float"):
"""
Create hit ratio validation method.
:param k: top k
:param neg_num: number of negative items.
"""
JavaValue.__init__(self, None, bigdl_type, k, neg_num)
class NDCG(JavaValue):
"""
Normalized Discounted Cumulative Gain(NDCG).
NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.
>>> ndcg = NDCG(k = 10)
creating: createNDCG
"""
def __init__(self, k = 10, neg_num = 100, bigdl_type="float"):
"""
Create NDCG validation method.
:param k: top k
:param neg_num: number of negative items.
"""
JavaValue.__init__(self, None, bigdl_type, k, neg_num)
class MAE(JavaValue):
"""
This evaluation method calculates the mean absolute error of output with respect to target.
>>> mae = MAE()
creating: createMAE
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class MaxIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxIteration is a trigger that triggers an action when training reaches
the number of iterations specified by "max".
Usually used as end_trigger when creating an Optimizer.
>>> maxIteration = MaxIteration(20)
creating: createMaxIteration
"""
def __init__(self, max, bigdl_type="float"):
"""
Create a MaxIteration trigger.
:param max: max
"""
JavaValue.__init__(self, None, bigdl_type, max)
class MaxEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxEpoch is a trigger that triggers an action when training reaches
the number of epochs specified by "max_epoch".
Usually used as end_trigger when creating an Optimizer.
>>> maxEpoch = MaxEpoch(2)
creating: createMaxEpoch
"""
def __init__(self, max_epoch, bigdl_type="float"):
"""
Create a MaxEpoch trigger.
:param max_epoch: max_epoch
"""
JavaValue.__init__(self, None, bigdl_type, max_epoch)
class EveryEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
EveryEpoch is a trigger that triggers an action when each epoch finishs.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> everyEpoch = EveryEpoch()
creating: createEveryEpoch
"""
def __init__(self, bigdl_type="float"):
"""
Create a EveryEpoch trigger.
"""
JavaValue.__init__(self, None, bigdl_type)
class SeveralIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
SeveralIteration is a trigger that triggers an action every "n"
iterations.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> serveralIteration = SeveralIteration(2)
creating: createSeveralIteration
"""
def __init__(self, interval, bigdl_type="float"):
"""
Create a SeveralIteration trigger.
:param interval: interval is the "n" where an action is triggeredevery "n" iterations
"""
JavaValue.__init__(self, None, bigdl_type, interval)
class MaxScore(JavaValue):
"""
A trigger that triggers an action when validation score larger than "max" score
>>> maxScore = MaxScore(0.4)
creating: createMaxScore
"""
def __init__(self, max, bigdl_type="float"):
"""
Create a MaxScore trigger.
:param max: max score
"""
JavaValue.__init__(self, None, bigdl_type, max)
class MinLoss(JavaValue):
"""
A trigger that triggers an action when training loss less than "min" loss
>>> minLoss = MinLoss(0.1)
creating: createMinLoss
"""
def __init__(self, min, bigdl_type="float"):
"""
Create a MinLoss trigger.
:param min: min loss
"""
JavaValue.__init__(self, None, bigdl_type, min)
class Poly(JavaValue):
"""
A learning rate decay policy, where the effective learning rate
follows a polynomial decay, to be zero by the max_iteration.
Calculation: base_lr (1 - iter/max_iteration) ^ (power)
:param power: coeffient of decay, refer to calculation formula
:param max_iteration: max iteration when lr becomes zero
>>> poly = Poly(0.5, 2)
creating: createPoly
"""
def __init__(self, power, max_iteration, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, power, max_iteration)
class Exponential(JavaValue):
"""
[[Exponential]] is a learning rate schedule, which rescale the learning rate by
lr_{n + 1} = lr * decayRate `^` (iter / decayStep)
:param decay_step the inteval for lr decay
:param decay_rate decay rate
:param stair_case if true, iter / decayStep is an integer division
and the decayed learning rate follows a staircase function.
>>> exponential = Exponential(100, 0.1)
creating: createExponential
"""
def __init__(self, decay_step, decay_rate, stair_case=False, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, decay_step, decay_rate, stair_case)
class Step(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size:
:param gamma:
>>> step = Step(2, 0.3)
creating: createStep
"""
def __init__(self, step_size, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_size, gamma)
class Default(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size
:param gamma
>>> step = Default()
creating: createDefault
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class Plateau(JavaValue):
"""
Plateau is the learning rate schedule when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor of 2-10
once learning stagnates. It monitors a quantity and if no improvement
is seen for a 'patience' number of epochs, the learning rate is reduced.
:param monitor quantity to be monitored, can be Loss or score
:param factor factor by which the learning rate will be reduced. new_lr = lr * factor
:param patience number of epochs with no improvement after which learning rate will be reduced.
:param mode one of {min, max}.
In min mode, lr will be reduced when the quantity monitored has stopped decreasing;
in max mode it will be reduced when the quantity monitored has stopped increasing
:param epsilon threshold for measuring the new optimum, to only focus on significant changes.
:param cooldown number of epochs to wait before resuming normal operation
after lr has been reduced.
:param min_lr lower bound on the learning rate.
>>> plateau = Plateau("score")
creating: createPlateau
"""
def __init__(self,
monitor,
factor=0.1,
patience=10,
mode="min",
epsilon=1e-4,
cooldown=0,
min_lr=0.0,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, monitor, factor, patience, mode, epsilon,
cooldown, min_lr)
class Warmup(JavaValue):
"""
A learning rate gradual increase policy, where the effective learning rate
increase delta after each iteration.
Calculation: base_lr + delta * iteration
:param delta: increase amount after each iteration
>>> warmup = Warmup(0.05)
creating: createWarmup
"""
def __init__(self, delta, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, delta)
class SequentialSchedule(JavaValue):
"""
Stack several learning rate schedulers.
:param iterationPerEpoch: iteration numbers per epoch
>>> sequentialSchedule = SequentialSchedule(5)
creating: createSequentialSchedule
>>> poly = Poly(0.5, 2)
creating: createPoly
>>> test = sequentialSchedule.add(poly, 5)
"""
def __init__(self, iteration_per_epoch, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, iteration_per_epoch)
def add(self, scheduler, max_iteration, bigdl_type="float"):
"""
Add a learning rate scheduler to the contained `schedules`
:param scheduler: learning rate scheduler to be add
:param max_iteration: iteration numbers this scheduler will run
"""
return callBigDlFunc(bigdl_type, "addScheduler", self.value, scheduler, max_iteration)
class OptimMethod(JavaValue):
def __init__(self, jvalue, bigdl_type, *args):
if (jvalue):
assert(type(jvalue) == JavaObject)
self.value = jvalue
else:
self.value = callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
@staticmethod
def load(path, bigdl_type="float"):
"""
load optim method
:param path: file path
"""
return callBigDlFunc(bigdl_type, "loadOptimMethod", path)
def save(self, path, overWrite):
"""
save OptimMethod
:param path path
:param overWrite whether to overwrite
"""
method=self.value
return callBigDlFunc(self.bigdl_type, "saveOptimMethod", method, path, overWrite)
class SGD(OptimMethod):
"""
A plain implementation of SGD
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
:param momentum momentum
:param dampening dampening for momentum
:param nesterov enables Nesterov momentum
:param learningrates 1D tensor of individual learning rates
:param weightdecays 1D tensor of individual weight decays
>>> sgd = SGD()
creating: createDefault
creating: createSGD
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
momentum=0.0,
dampening=DOUBLEMAX,
nesterov=False,
leaningrate_schedule=None,
learningrates=None,
weightdecays=None,
bigdl_type="float"):
super(SGD, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay,
momentum, dampening, nesterov,
leaningrate_schedule if (leaningrate_schedule) else Default(),
JTensor.from_ndarray(learningrates), JTensor.from_ndarray(weightdecays))
class Adagrad(OptimMethod):
"""
An implementation of Adagrad. See the original paper:
http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
>>> adagrad = Adagrad()
creating: createAdagrad
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
bigdl_type="float"):
super(Adagrad, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay)
class LBFGS(OptimMethod):
"""
This implementation of L-BFGS relies on a user-provided line
search function (state.lineSearch). If this function is not
provided, then a simple learningRate is used to produce fixed
size steps. Fixed size steps are much less costly than line
searches, and can be useful for stochastic problems.
The learning rate is used even when a line search is provided.
This is also useful for large-scale stochastic problems, where
opfunc is a noisy approximation of f(x). In that case, the learning
rate allows a reduction of confidence in the step size.
:param max_iter Maximum number of iterations allowed
:param max_eval Maximum number of function evaluations
:param tolfun Termination tolerance on the first-order optimality
:param tolx Termination tol on progress in terms of func/param changes
:param ncorrection
:param learningrate
:param verbose
:param linesearch A line search function
:param linesearch_options If no line search provided, then a fixed step size is used
>>> lbfgs = LBFGS()
creating: createLBFGS
"""
def __init__(self,
max_iter=20,
max_eval=DOUBLEMAX,
tolfun=1e-5,
tolx=1e-9,
ncorrection=100,
learningrate=1.0,
verbose=False,
linesearch=None,
linesearch_options=None,
bigdl_type="float"):
if linesearch or linesearch_options:
raise ValueError('linesearch and linesearch_options must be None in LBFGS')
super(LBFGS, self).__init__(None, bigdl_type, max_iter, max_eval, tolfun, tolx,
ncorrection, learningrate, verbose, linesearch, linesearch_options)
class Adadelta(OptimMethod):
"""
Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701
:param decayrate interpolation parameter rho
:param epsilon for numerical stability
>>> adagrad = Adadelta()
creating: createAdadelta
"""
def __init__(self,
decayrate = 0.9,
epsilon = 1e-10,
bigdl_type="float"):
super(Adadelta, self).__init__(None, bigdl_type, decayrate, epsilon)
class Adam(OptimMethod):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adam = Adam()
creating: createAdam
"""
def __init__(self,
learningrate = 1e-3,
learningrate_decay = 0.0,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
bigdl_type="float"):
super(Adam, self).__init__(None, bigdl_type, learningrate, learningrate_decay,
beta1, beta2, epsilon)
class ParallelAdam(OptimMethod):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> init_engine()
>>> pAdam = ParallelAdam()
creating: createParallelAdam
"""
def __init__(self,
learningrate = 1e-3,
learningrate_decay = 0.0,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
parallel_num = -1,
bigdl_type="float"):
if parallel_num == -1:
parallel_num = get_node_and_core_number()[1]
super(ParallelAdam, self).__init__(None, bigdl_type, learningrate, learningrate_decay,
beta1, beta2, epsilon, parallel_num)
class Ftrl(OptimMethod):
"""
An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf.
Support L1 penalty, L2 penalty and shrinkage-type L2 penalty.
:param learningrate learning rate
:param learningrate_power double, must be less or equal to zero. Default is -0.5.
:param initial_accumulator_value double, the starting value for accumulators,
require zero or positive values.
:param l1_regularization_strength double, must be greater or equal to zero. Default is zero.
:param l2_regularization_strength double, must be greater or equal to zero. Default is zero.
:param l2_shrinkage_regularization_strength double, must be greater or equal to zero.
Default is zero. This differs from l2RegularizationStrength above. L2 above is a
stabilization penalty, whereas this one is a magnitude penalty.
>>> ftrl = Ftrl()
creating: createFtrl
>>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5)
creating: createFtrl
"""
def __init__(self,
learningrate = 1e-3,
learningrate_power = -0.5,
initial_accumulator_value = 0.1,
l1_regularization_strength = 0.0,
l2_regularization_strength = 0.0,
l2_shrinkage_regularization_strength = 0.0,
bigdl_type="float"):
super(Ftrl, self).__init__(None, bigdl_type, learningrate, learningrate_power,
initial_accumulator_value,
l1_regularization_strength,
l2_regularization_strength,
l2_shrinkage_regularization_strength)
class Adamax(OptimMethod):
"""
An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adagrad = Adamax()
creating: createAdamax
"""
def __init__(self,
learningrate = 0.002,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-38,
bigdl_type="float"):
super(Adamax, self).__init__(None, bigdl_type, learningrate, beta1, beta2, epsilon)
class RMSprop(OptimMethod):
"""
An implementation of RMSprop
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param decayrate decay rate, also called rho
:param epsilon for numerical stability
>>> adagrad = RMSprop()
creating: createRMSprop
"""
def __init__(self,
learningrate = 1e-2,
learningrate_decay = 0.0,
decayrate = 0.99,
epsilon = 1e-8,
bigdl_type="float"):
super(RMSprop, self).__init__(None, bigdl_type, learningrate, learningrate_decay, decayrate, epsilon)
class MultiStep(JavaValue):
"""
similar to step but it allows non uniform steps defined by stepSizes
:param step_size: the series of step sizes used for lr decay
:param gamma: coefficient of decay
>>> step = MultiStep([2, 5], 0.3)
creating: createMultiStep
"""
def __init__(self, step_sizes, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_sizes, gamma)
class BaseOptimizer(JavaValue):
def set_model(self, model):
"""
Set model.
:param model: new model
"""
self.value.setModel(model.value)
def set_criterion(self, criterion):
"""
set new criterion, for optimizer reuse
:param criterion: new criterion
:return:
"""
callBigDlFunc(self.bigdl_type, "setCriterion", self.value,
criterion)
def set_checkpoint(self, checkpoint_trigger,
checkpoint_path, isOverWrite=True):
"""
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.default is True
"""
if not os.path.exists(checkpoint_path):
mkpath(checkpoint_path)
callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value,
checkpoint_trigger, checkpoint_path, isOverWrite)
def set_gradclip_const(self, min_value, max_value):
"""
Configure constant clipping settings.
:param min_value: the minimum value to clip by
:param max_value: the maxmimum value to clip by
"""
callBigDlFunc(self.bigdl_type, "setConstantClip", self.value, min_value, max_value)
def set_gradclip_l2norm(self, clip_norm):
"""
Configure L2 norm clipping settings.
:param clip_norm: gradient L2-Norm threshold
"""
callBigDlFunc(self.bigdl_type, "setL2NormClip", self.value, clip_norm)
def disable_gradclip(self):
"""
disable clipping.
"""
callBigDlFunc(self.bigdl_type, "disableClip", self.value)
# return a module
def optimize(self):
"""
Do an optimization.
"""
jmodel = callJavaFunc(self.value.optimize)
from bigdl.nn.layer import Layer
return Layer.of(jmodel)
def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self
def set_val_summary(self, summary):
"""
Set validation summary. A ValidationSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of ValidationSummary.
:param summary: a ValidationSummary object
"""
callBigDlFunc(self.bigdl_type, "setValSummary", self.value,
summary)
return self
def prepare_input(self):
"""
Load input. Notebook user can call this method to seprate load data and
create optimizer time
"""
print("Loading input ...")
self.value.prepareInput()
def set_end_when(self, end_when):
"""
When to stop, passed in a [[Trigger]]
"""
self.value.setEndWhen(end_when.value)
return self
class Optimizer(BaseOptimizer):
# NOTE: This is a deprecated method, you should use `create` method instead.
def __init__(self,
model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method=None,
bigdl_type="float"):
"""
Create a distributed optimizer.
:param model: the neural net model
:param training_rdd: the training dataset
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
"""
self.pvalue = DistriOptimizer(model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method,
bigdl_type)
self.value = self.pvalue.value
self.bigdl_type = self.pvalue.bigdl_type
@staticmethod
def create(model,
training_set,
criterion,
end_trigger=None,
batch_size=32,
optim_method=None,
cores=None,
bigdl_type="float"):
"""
Create an optimizer.
Depend on the input type, the returning optimizer can be a local optimizer \
or a distributed optimizer.
:param model: the neural net model
:param training_set: (features, label) for local mode. RDD[Sample] for distributed mode.
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization. default value is MapEpoch(1)
:param batch_size: training batch size
:param cores: This is for local optimizer only and use total physical cores as the default value
"""
if not end_trigger:
end_trigger = MaxEpoch(1)
if not optim_method:
optim_method = SGD()
if isinstance(training_set, RDD) or isinstance(training_set, DataSet):
return DistriOptimizer(model=model,
training_rdd=training_set,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
bigdl_type=bigdl_type)
elif isinstance(training_set, tuple) and len(training_set) == 2:
x, y = training_set
return LocalOptimizer(X=x,
Y=y,
model=model,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
cores=cores,
bigdl_type="float")
else:
raise Exception("Not supported training set: %s" % type(training_set))
def set_validation(self, batch_size, val_rdd, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param val_rdd: validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
func_name = "setValidation"
if isinstance(val_rdd, DataSet):
func_name = "setValidationFromDataSet"
callBigDlFunc(self.bigdl_type, func_name, self.value, batch_size,
trigger, val_rdd, to_list(val_method))
def set_traindata(self, training_rdd, batch_size):
"""
Set new training dataset, for optimizer reuse
:param training_rdd: the training dataset
:param batch_size: training batch size
:return:
"""
callBigDlFunc(self.bigdl_type, "setTrainData", self.value,
training_rdd, batch_size)
class DistriOptimizer(Optimizer):
def __init__(self,
model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method=None,
bigdl_type="float"):
"""
Create an optimizer.
:param model: the neural net model
:param training_data: the training dataset
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
"""
if not optim_method:
optim_methods = {model.name(): SGD()}
elif isinstance(optim_method, OptimMethod):
optim_methods = {model.name(): optim_method}
elif isinstance(optim_method, JavaObject):
optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)}
else:
optim_methods = optim_method
if isinstance(training_rdd, RDD):
JavaValue.__init__(self, None, bigdl_type, model.value,
training_rdd, criterion,
optim_methods, end_trigger, batch_size)
elif isinstance(training_rdd, DataSet):
self.bigdl_type = bigdl_type
self.value = callBigDlFunc(self.bigdl_type, "createDistriOptimizerFromDataSet",
model.value, training_rdd, criterion,
optim_methods, end_trigger, batch_size)
class LocalOptimizer(BaseOptimizer):
"""
Create an optimizer.
:param model: the neural net model
:param X: the training features which is an ndarray or list of ndarray
:param Y: the training label which is an ndarray
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
:param cores: by default is the total physical cores.
"""
def __init__(self,
X,
Y,
model,
criterion,
end_trigger,
batch_size,
optim_method=None,
cores=None,
bigdl_type="float"):
if not optim_method:
optim_methods = {model.name(): SGD()}
elif isinstance(optim_method, OptimMethod):
optim_methods = {model.name(): optim_method}
elif isinstance(optim_method, JavaObject):
optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)}
else:
optim_methods = optim_method
if cores is None:
cores = multiprocessing.cpu_count()
JavaValue.__init__(self, None, bigdl_type,
[JTensor.from_ndarray(X) for X in to_list(X)],
JTensor.from_ndarray(Y),
model.value,
criterion,
optim_methods, end_trigger, batch_size, cores)
def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param X_val: features of validation dataset
:param Y_val: label of validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)],
JTensor.from_ndarray(Y_val), to_list(val_method))
class TrainSummary(JavaValue, ):
"""
A logging facility which allows user to trace how indicators (e.g.
learning rate, training loss, throughput, etc.) change with iterations/time
in an optimization process. TrainSummary is for training indicators only
(check ValidationSummary for validation indicators). It contains necessary
information for the optimizer to know where to store the logs, how to
retrieve the logs, and so on. - The logs are written in tensorflow-compatible
format so that they can be visualized directly using tensorboard. Also the
logs can be retrieved as ndarrays and visualized using python libraries
such as matplotlib (in notebook, etc.).
Use optimizer.setTrainSummary to enable train logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a TrainSummary. Logs will be saved to log_dir/app_name/train.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve train logs by type. Return an array of records in the format
(step,value,wallClockTime). - "Step" is the iteration count by default.
:param tag: the type of the logs, Supported tags are: "LearningRate","Loss", "Throughput"
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
def set_summary_trigger(self, name, trigger):
"""
Set the interval of recording for each indicator.
:param tag: tag name. Supported tag names are "LearningRate", "Loss","Throughput", "Parameters". "Parameters" is an umbrella tag thatincludes weight, bias, gradWeight, gradBias, and some running status(eg. runningMean and runningVar in BatchNormalization). If youdidn't set any triggers, we will by default record Loss and Throughputin each iteration, while *NOT* recording LearningRate and Parameters,as recording parameters may introduce substantial overhead when themodel is very big, LearningRate is not a public attribute for allOptimMethod.
:param trigger: trigger
"""
return callBigDlFunc(self.bigdl_type, "summarySetTrigger", self.value,
name, trigger)
class ValidationSummary(JavaValue):
"""
A logging facility which allows user to trace how indicators (e.g.
validation loss, top1 accuray, top5 accuracy etc.) change with
iterations/time in an optimization process. ValidationSummary is for
validation indicators only (check TrainSummary for train indicators).
It contains necessary information for the optimizer to know where to
store the logs, how to retrieve the logs, and so on. - The logs are
written in tensorflow-compatible format so that they can be visualized
directly using tensorboard. Also the logs can be retrieved as ndarrays
and visualized using python libraries such as matplotlib
(in notebook, etc.).
Use optimizer.setValidationSummary to enable validation logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a ValidationSummary. Logs will be saved to
log_dir/app_name/train. By default, all ValidationMethod set into
optimizer will be recorded and the recording interval is the same
as trigger of ValidationMethod in the optimizer.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve validation logs by type. Return an array of records in the
format (step,value,wallClockTime). - "Step" is the iteration count
by default.
:param tag: the type of the logs. The tag should match the name ofthe ValidationMethod set into the optimizer. e.g."Top1AccuracyLoss","Top1Accuracy" or "Top5Accuracy".
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
class L1L2Regularizer(JavaValue):
"""
Apply both L1 and L2 regularization
:param l1 l1 regularization rate
:param l2 l2 regularization rate
"""
def __init__(self, l1, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1, l2)
class ActivityRegularization(JavaValue):
"""
Apply both L1 and L2 regularization
:param l1 l1 regularization rate
:param l2 l2 regularization rate
"""
def __init__(self, l1, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1, l2)
class L1Regularizer(JavaValue):
"""
Apply L1 regularization
:param l1 l1 regularization rate
"""
def __init__(self, l1, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1)
class L2Regularizer(JavaValue):
"""
Apply L2 regularization
:param l2 l2 regularization rate
"""
def __init__(self, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l2)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.optim import optimizer
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = optimizer.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test optimizer",
conf=create_spark_conf())
init_engine()
globs['sc'] = sc
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "089797f273bcd10c44d57b1239bdd416",
"timestamp": "",
"source": "github",
"line_count": 1121,
"max_line_length": 554,
"avg_line_length": 35.504906333630686,
"alnum_prop": 0.6135021733122283,
"repo_name": "yiheng/BigDL",
"id": "c67279270a5c9789921c032fb0c62f26b515bc11",
"size": "40389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspark/bigdl/optim/optimizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5177"
},
{
"name": "Java",
"bytes": "6829"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Python",
"bytes": "1010500"
},
{
"name": "RobotFramework",
"bytes": "30098"
},
{
"name": "Scala",
"bytes": "8194601"
},
{
"name": "Shell",
"bytes": "55677"
}
],
"symlink_target": ""
} |
import json
import urllib
from tempest.common import rest_client
from tempest import config
CONF = config.CONF
class RegionClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(RegionClientJSON, self).__init__(auth_provider)
self.service = CONF.identity.catalog_type
self.endpoint_url = 'adminURL'
self.api_version = "v3"
def create_region(self, description, **kwargs):
"""Create region."""
req_body = {
'description': description,
}
if kwargs.get('parent_region_id'):
req_body['parent_region_id'] = kwargs.get('parent_region_id')
req_body = json.dumps({'region': req_body})
if kwargs.get('unique_region_id'):
resp, body = self.put(
'regions/%s' % kwargs.get('unique_region_id'), req_body)
else:
resp, body = self.post('regions', req_body)
body = json.loads(body)
return resp, body['region']
def update_region(self, region_id, **kwargs):
"""Updates a region."""
post_body = {}
if 'description' in kwargs:
post_body['description'] = kwargs.get('description')
if 'parent_region_id' in kwargs:
post_body['parent_region_id'] = kwargs.get('parent_region_id')
post_body = json.dumps({'region': post_body})
resp, body = self.patch('regions/%s' % region_id, post_body)
body = json.loads(body)
return resp, body['region']
def get_region(self, region_id):
"""Get region."""
url = 'regions/%s' % region_id
resp, body = self.get(url)
body = json.loads(body)
return resp, body['region']
def list_regions(self, params=None):
"""List regions."""
url = 'regions'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['regions']
def delete_region(self, region_id):
"""Delete region."""
resp, body = self.delete('regions/%s' % region_id)
return resp, body
| {
"content_hash": "1cf63725fe0bf9c6189f5c5f9df9ccba",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 74,
"avg_line_length": 32.93846153846154,
"alnum_prop": 0.5693601120971509,
"repo_name": "cloudbase/lis-tempest",
"id": "c0787656c057838eeeb457f0e3242f0d6d79f62a",
"size": "2797",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/services/identity/v3/json/region_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3377111"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
} |
import os
import time
import json
import re
import urllib2
import threading
from multiprocessing import Process
from crike_django.settings import MEDIA_ROOT
import urllib
try:
input = raw_input
except NameError:
pass
try:
import urllib.request
#import urllib.parse
except ImportError:
import urllib
urllib.request = __import__('urllib2')
urllib.parse = __import__('urlparse')
urlopen = urllib.request.urlopen
request = urllib.request.Request
def get_content_from_url(url):
attempts = 0
content = ''
while attempts < 5:
try:
content = urlopen(url).read().decode('utf-8', 'ignore')
break
except Exception as e:
attempts += 1
print(e)
return content
class download_from_bing_thread(threading.Thread):
def __init__(self, query, path):
threading.Thread.__init__(self)
self.query = query
self.path = path
def run(self):
# BASE_URL = 'http://image.baidu.com/i?tn=baiduimage&ie=utf-8&word=' + self.query
BASE_URL = 'http://cn.bing.com/images/search?q='+self.query
content = get_content_from_url(BASE_URL)
imginfos = re.findall('imgurl:"([^<>]*?) kB', content, re.M | re.S)
BASE_PATH = os.path.join(self.path, self.query)
if not os.path.exists(BASE_PATH):
os.makedirs(BASE_PATH)
for imginfo in imginfos:
if threadstop == self.query or len(os.listdir(BASE_PATH)) >= pics_per_word:
break
sizes = re.findall('t2=\"[^<>/"]* ([^<>/" ]*?)$', imginfo, re.M | re.S)
if len(sizes) == 0 or eval(sizes[0]) > 500:
continue
urls = re.findall('^(http[^&]+\.[j|J][p|P][g|G])"', imginfo, re.M | re.S)
if len(urls) == 0:
continue
imgurl = urls[0]
title = get_dir_len(BASE_PATH)
fname = os.path.join(BASE_PATH, '%s.jpg') % title
try:
urllib.urlretrieve(imgurl, fname)
if not is_file_valid(fname):
os.remove(fname)
except IOError, e:
# Throw away some gifs...blegh.
print 'could not save %s' % imgurl
continue
# Be nice to web host and they'll be nice back :)
time.sleep(1.5)
class download_from_google_thread(threading.Thread):
def __init__(self, query, path):
threading.Thread.__init__(self)
self.query = query
self.path = path
def run(self):
BASE_URL = 'https://ajax.googleapis.com/ajax/services/search/images?'\
'v=1.0&q=' + self.query + '&start=%d'
# BASE_URL = 'http://image.baidu.com/i?tn=baiduimage&ie=utf-8&word=' + query
BASE_PATH = os.path.join(self.path, self.query)
if not os.path.exists(BASE_PATH):
os.makedirs(BASE_PATH)
start = 0 # Google's start query string parameter for pagination.
while start < 16: # Google will only return a max of 56 results.
if len(os.listdir(BASE_PATH)) >= pics_per_word:
break
content = get_content_from_url(BASE_URL % start)
if type(content) == unicode or type(content) == str:
continue
for image_info in json.loads(content.text)['responseData']['results']:
urllist = re.findall('http.+\.[j|J][p|P][g|G]', image_info['unescapedUrl'])
if len(urllist) == 0:
continue
url = urllist[0]
# Remove file-system path characters from name.
# title = image_info['titleNoFormatting'].replace('/', '').replace('\\', '').replace(' ','_').replace('|','')
title = get_dir_len(BASE_PATH)
fname = os.path.join(BASE_PATH, '%s.jpg') % title
try:
urllib.urlretrieve(url, fname)
if not is_file_valid(fname):
os.remove(fname)
except IOError, e:
# Throw away some gifs...blegh.
print 'could not save %s' % url
continue
print start
start += 4 # 4 images per page.
# Be nice to Google and they'll be nice back :)
time.sleep(2)
def is_file_valid(file):
try:
if type(file) == unicode or type(file) == str:
if os.path.getsize(file) < 10000:
return False
file = open(file,"rb")
first_char = file.read(1) #get the first character
if not first_char:
print "file is empty" #first character is the empty string..
return False
else:
file.seek(0)
return True
except Exception as e:
print(e)
return False
def get_dir_len(path):
count = 0;
if os.path.exists(path):
for item in os.listdir(path):
if is_file_valid(path+'/'+item):
count += 1
return count
else:
return 0
def is_path_full(path):
return get_dir_len(path) >= pics_per_word
def download_controller(wordname, engine):
lastlen = get_dir_len(os.path.join(IMG_PATH, wordname))
count = 0
thread = engine(wordname, IMG_PATH)
thread.daemon = True
thread.start()
time.sleep(1)
while thread.is_alive():
print str(thread)+ ' ' + wordname + ' ' + str(count)
time.sleep(10)
currentlen = get_dir_len(os.path.join(IMG_PATH, wordname))
if currentlen == lastlen:
count += 1
if count == 10:
threadstop = wordname
break
else:
count = 0
lastlen = currentlen
return lastlen
def get_word_from_queue(words):
words_lock.acquire()
wordname = None
for word in words:
BASE_PATH = os.path.join(IMG_PATH, word)
if is_path_full(BASE_PATH):
words.remove(word)
continue
else:
words.remove(word)
wordname = word
break
words_lock.release()
return wordname
class download_thread(threading.Thread):
def __init__(self, words):
threading.Thread.__init__(self)
self.words = words
def run(self):
wordname = get_word_from_queue(self.words)
while wordname:
print('Start downloading "%s"' % wordname)
lastlen = download_controller(wordname, download_from_bing_thread)
"""
if lastlen < pics_per_word:
print('Try another engine')
download_controller(wordname, download_from_google_thread)
"""
wordname = get_word_from_queue(self.words)
def download_images_single(word):
download_controller(word, download_from_bing_thread)
if not is_path_full(os.path.join(IMG_PATH, word)):
print('Try another engine')
#download_controller(word, download_from_google_thread)
def download_images(words):
thread1 = download_thread(words)
thread1.daemon = True
thread1.start()
print('Image Thread 1 started!')
thread2 = download_thread(words)
thread2.daemon = True
thread2.start()
print('Image Thread 2 started!')
thread3 = download_thread(words)
thread3.daemon = True
thread3.start()
print('Image Thread 3 started!')
thread4 = download_thread(words)
thread4.daemon = True
thread4.start()
print('Image Thread 4 started!')
def main():
"""For images downloading to filesystem, then manually filter them"""
if use_proxy == True:
install_proxy()
words = open(filename).read().split()
thread1 = download_thread(words)
thread1.start()
print('Thread 1 started!')
thread2 = download_thread(words)
thread2.start()
print('Thread 2 started!')
thread1.join()
print('Thread 1 Done!')
thread2.join()
print('Thread 2 Done!')
def get_file():
filename = input("Input a file name: ")
if not os.path.exists(filename):
print "%s doesn't exists" % filename
filename = get_file()
elif not os.path.getsize(filename):
print "%s is empty" % filename
filename = get_file()
else:
pass
return filename
IMG_PATH = MEDIA_ROOT + '/images'
http_proxy = "http://localhost:8086"
use_proxy = False
http_proxys = {'http':http_proxy}
pics_per_word = 3
words_lock = threading.Lock()
url_lock = threading.Lock()
threadstop = ''
if __name__ == '__main__':
filename = get_file()
start = time.time()
main()
print("Elapsed Time:", (time.time() - start))
| {
"content_hash": "86d848909df1282f67ec12ec5f098d39",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 125,
"avg_line_length": 29.273333333333333,
"alnum_prop": 0.5537462992484627,
"repo_name": "crike/crike",
"id": "384d640d6bf199e11c633db0b0d95f953a9446f2",
"size": "8797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/crike_django/crike_django/image_download.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "85643"
},
{
"name": "HTML",
"bytes": "193611"
},
{
"name": "JavaScript",
"bytes": "202263"
},
{
"name": "Nginx",
"bytes": "2112"
},
{
"name": "Python",
"bytes": "135837"
},
{
"name": "Shell",
"bytes": "6022"
}
],
"symlink_target": ""
} |
"""
Helper routines for reading & writing MRS data in various formats.
Except for the map of internal data types to numpy type strings (which
doesn't require an import of numpy), this code is deliberately ignorant of
numpy. It returns native Python types that are easy to convert into
numpy types.
"""
# Python modules
from __future__ import division
import re
import struct
import os.path
import xdrlib
import exceptions
class FileNotFoundError(exceptions.Exception):
""" Specific to VASF. Raised when this module can't find a matching data
file for a params file or vice versa.
"""
pass
class UnreadableDataError(exceptions.Exception):
""" Raised when this module can't make sense of data (raw or XDR) due
to an unexpected format, buffer underrun (less data than expected),
buffer overrun (more data than expected), etc.
"""
pass
class IncompleteMetadataError(exceptions.Exception):
""" Raised when the metadata associated with a dataset doesn't contain
required information like the data's type or format.
"""
pass
# Regexes for sniffing VASF file types. The former looks for a word or
# two inside [SQUARE BRACKETS] and the latter looks for <vasf> or <?xml
_vasf_parameters_regex = re.compile(r"[[](\w+\s*\w*)[]]")
_vaxml_regex = re.compile(r"<vasf>|<\?xml")
class MrsFileTypes(object):
# These constants are arbitrary and may change.
# However bool(NONE) is guaranteed to be False
NONE = 0
VASF_DATA = 1
VASF_PARAMETERS = 2
VAXML = 3
DICOM_SIEMENS = 4
class DataTypes(object):
""" Internal representation of data type. INTEGER is for 16 bit values,
LONG for 32 bit values, FLOAT for 32 bit floats, DOUBLE for 64 bit,
COMPLEX for two 32 bit floats, COMPLEX_DOUBLE for two 64 bit floats.
"""
# These constants are arbitrary and may change except that bool(NONE) is
# always guaranteed to be False.
NONE = 0
BYTE = 1
INTEGER = 2
LONG = 3
FLOAT = 4
DOUBLE = 5
COMPLEX = 6
COMPLEX_DOUBLE = 7
ALL = (BYTE, INTEGER, LONG, FLOAT, DOUBLE, COMPLEX, COMPLEX_DOUBLE)
# Sizes of types, in bytes
_TYPE_SIZES = {
NONE : 0,
BYTE : 1,
INTEGER : 2,
LONG : 4,
FLOAT : 4,
DOUBLE : 8,
COMPLEX : 8,
COMPLEX_DOUBLE : 16
}
# Maps external type strings to internal values. External strings
# are the many variations one can find in VASF format. They also
# include the numpy type strings. Older code tended to strip the spaces
# out of values read from the INI file, so all the type names have to
# appear in spaceless (e.g. "doublefloat") as well as "spaced" form.
_EXTERNAL_TO_INTERNAL = {
# These are VASF strings
'float' : FLOAT,
'double' : DOUBLE,
'doublefloat' : DOUBLE,
'double float' : DOUBLE,
'shortinteger' : INTEGER,
'short integer' : INTEGER,
'integer' : INTEGER,
'unsignedinteger' : INTEGER,
'unsigned integer' : INTEGER,
'integer16bit' : INTEGER,
'integer 16bit' : INTEGER,
'integer 16 bit' : INTEGER,
'integer' : INTEGER,
'long' : LONG,
'unsignedlong' : LONG,
'unsigned long' : LONG,
'complexinteger8bit' : COMPLEX,
'complex integer8bit' : COMPLEX,
'complex integer 8bit' : COMPLEX,
'complex integer 8 bit' : COMPLEX,
'complexinteger16bit' : COMPLEX,
'complex integer16bit' : COMPLEX,
'complex integer 16bit' : COMPLEX,
'complex integer 16 bit' : COMPLEX,
'complexfloat' : COMPLEX,
'complex float' : COMPLEX,
'complex' : COMPLEX,
'complexdouble' : COMPLEX_DOUBLE,
'complex double' : COMPLEX_DOUBLE,
'byte' : BYTE,
# These are numpy types
"character" : BYTE,
"int16" : INTEGER,
"int32" : LONG,
"float32" : FLOAT,
"float64" : DOUBLE,
"complex64" : COMPLEX,
"complex128" : COMPLEX_DOUBLE,
}
_INTERNAL_TO_NUMPY = {
# Maps internal types to numpy type strings
# Valid numpy type names are in numpy.sctypeDict.keys()
BYTE : "byte",
INTEGER : "int16",
LONG : "int32",
FLOAT : "float32",
DOUBLE : "float64",
COMPLEX : "complex64",
COMPLEX_DOUBLE : "complex128",
}
@staticmethod
def is_complex(the_type):
return the_type in (DataTypes.COMPLEX, DataTypes.COMPLEX_DOUBLE)
@staticmethod
def any_type_to_internal(the_type):
if the_type in DataTypes.ALL:
pass
# This is already an internal type
else:
if the_type in DataTypes._EXTERNAL_TO_INTERNAL:
the_type = DataTypes._EXTERNAL_TO_INTERNAL[the_type]
else:
raise ValueError, 'Unknown type "%s"' % the_type
return the_type
@staticmethod
def any_type_to_numpy(the_type):
the_type = DataTypes.any_type_to_internal(the_type)
return DataTypes._INTERNAL_TO_NUMPY[the_type]
def sniff_file_type(f):
""" Guesses if a file is a DICOM file, VASF params file, a VASF data file,
or a VAXML file and returns one of the MrsFileTypes.XXX constants.
The param f can be a filename (string) or a file object.
Since VASF data files are just binary glop, they're indistinguishable
from most other file types and therefore MrsFileTypes.VASF_DATA is
the default "wild guess" return type.
"""
file_type = MrsFileTypes.NONE
close_on_exit = False
# If f isn't already a file, turn it into one.
if not hasattr(f, "read"):
f = open(f, "rb")
close_on_exit = True
# Read the first 1k
s = f.read(1024)
# Look for telltale strings
# Per the DICOM specs, a DICOM file starts with 128 reserved bytes
# followed by "DICM".
# ref: DICOM spec, Part 10: Media Storage and File Format for Media
# Interchange, 7.1 DICOM FILE META INFORMATION
if s[128:132] == "DICM":
file_type = MrsFileTypes.DICOM_SIEMENS
elif _vasf_parameters_regex.search(s):
file_type = MrsFileTypes.VASF_PARAMETERS
elif _vaxml_regex.search(s):
file_type = MrsFileTypes.VAXML
else:
file_type = MrsFileTypes.VASF_DATA
if close_on_exit:
f.close()
return file_type
def decode_xdr(data, data_type, element_count):
""" Given a string of data in XDR format and a data type, returns
an iterable (tuple or list) of Python objects representing the decoded
data. data_type must be one of the DataTypes.XXX constants defined in
this module.
element_count is the number of elements expected in the data.
"""
p = xdrlib.Unpacker(data)
is_complex = (data_type in (DataTypes.COMPLEX, DataTypes.COMPLEX_DOUBLE))
if data_type in (DataTypes.COMPLEX, DataTypes.FLOAT):
unpack_function = p.unpack_float
elif data_type in (DataTypes.COMPLEX_DOUBLE, DataTypes.DOUBLE):
unpack_function = p.unpack_double
elif data_type == DataTypes.LONG:
unpack_function = p.unpack_int
elif data_type == DataTypes.INTEGER:
unpack_function = p.unpack_int
elif data_type == DataTypes.BYTE:
unpack_function = p.unpack_byte
else:
raise ValueError, "Unknown data type '%s'" % data_type
if is_complex:
# XDR doesn't explicitly support complex numbers, so they're written
# as pairs of floats (or doubles).
element_count *= 2
try:
data = p.unpack_farray(element_count, unpack_function)
except (xdrlib.Error, xdrlib.ConversionError), instance:
raise UnreadableDataError, instance.msg
# Calling p.done() here will raise an xdrlib.Error if unextracted
# data remains (i.e. the code above is buggy or element_count is wrong)
try:
p.done()
except xdrlib.Error:
raise UnreadableDataError, "More data in file than expected (XDR overrun)"
if is_complex:
data = collapse_complexes(data)
return data
def collapse_complexes(data):
"""Given a list or other iterable that's a series of (real, imaginary)
pairs, returns a list of complex numbers. For instance, this list --
[a, b, c, d, e, f]
this function returns --
[complex(a, b), complex(c, d), complex(e, f)]
The returned list is a new list; the original is unchanged.
"""
return [complex(data[i], data[i+1]) for i in range(0, len(data), 2)]
def expand_complexes(data):
"""Expands a list or other iterable of complex numbers into a list of
(real, imaginary) pairs. For instance, given this list of complex
numbers --
[za, zb, zc]
this function returns --
[za.real, za.imag, zb.real, zb.imag, zc.real, zc.imag]
The returned list is a new list; the original is unchanged.
"""
# First I double the length of the list by adding empty elements to
# the front.
data = ([None] * len(data)) + list(data)
# Now I overwrite the items in the list with item N being split
# into real and imag and list[N/2] = real and list[N/2 + 1] = imag.
j = 0
for i in range(len(data) // 2, len(data)):
data[j] = data[i].real
j += 1
data[j] = data[i].imag
j += 1
return data
def convert_vasf_to_xml(source_filename, target_filename, pack_data=True):
""" Converts a VASF file pair to VAXML format.
source_filename can be a VASF parameters or data filename.
"""
# imports are done on-the-fly here because util_vasf_file and
# util_vaxml_file import this file and we need to avoid circular imports.
import util_vasf_file
import util_vaxml_file
source_parameters_filename, source_data_filename = \
util_vasf_file.get_filename_pair(source_filename)
parameters, data = util_vasf_file.read(source_parameters_filename,
source_data_filename)
# Extract the data type
util_vaxml_file.write(parameters, parameters["data_type"], data,
target_filename, pack_data)
def _test_collapse_expand_complexes():
import random
import numpy
random.seed()
LIST_SIZE = random.randint(0, 1000)
collapsed = [ ]
raw = [ ]
# Generate a bunch of random floats
for i in range(LIST_SIZE):
real = random.randint(-1000, 1000) + random.random()
imaginary = random.randint(-1000, 1000) + random.random()
raw.append(real)
raw.append(imaginary)
collapsed.append(complex(real, imaginary))
assert(collapse_complexes(raw) == collapsed)
assert(expand_complexes(collapsed) == raw)
# Ensure the functions work with numpy arrays
raw = numpy.array(raw)
collapsed = numpy.array(collapsed)
assert((numpy.array(collapse_complexes(raw)) == collapsed).all())
assert((numpy.array(expand_complexes(collapsed)) == raw).all())
if __name__ == '__main__':
_test_collapse_expand_complexes()
| {
"content_hash": "25dc9a79ba82b41bac344ee58a269731",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 82,
"avg_line_length": 33.740112994350284,
"alnum_prop": 0.5869055592766242,
"repo_name": "beOn/hcpre",
"id": "5afa384689fe5382ea61dbc21bd14586067c67da",
"size": "11944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hcpre/duke_siemens/util_mrs_file.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "145966"
}
],
"symlink_target": ""
} |
import json
from util import filelist
import re
if __name__ == "__main__":
datalist = filelist.findfiles("./dataset/exp/msapi", ".txt")
for data in datalist:
f = open(data, 'r')
line = f.readline()
segment = re.split('[|]', line)
print(len(segment), segment[0])
data = json.loads(line)
f.close()
# data = json.loads(str)
# roll = float(data['faceAttributes']['headPose']['roll'])
# pitch = float(data['faceAttributes']['headPose']['pitch'])
# yaw = float(data['faceAttributes']['headPose']['yaw'])
# print(roll, pitch, yaw) | {
"content_hash": "024ea5f7ebb535b5d6116aed6895b702",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 28.714285714285715,
"alnum_prop": 0.5754560530679934,
"repo_name": "elecun/pyface",
"id": "155a0ca60ee3156de98f25eb73d20ccd96985f99",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "msapi_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10171"
}
],
"symlink_target": ""
} |
import mock
from oslo_config import cfg
from webob import exc
from neutron.common import constants
from neutron.db import api as db_api
from neutron.db import external_net_db
from neutron.db import l3_db
from neutron.db import l3_gwmode_db
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.extensions import l3_ext_gw_mode
from neutron.openstack.common import uuidutils
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
FAKE_GW_PORT_ID = _uuid()
FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff'
FAKE_FIP_EXT_PORT_ID = _uuid()
FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66'
FAKE_FIP_INT_PORT_ID = _uuid()
FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa'
FAKE_ROUTER_PORT_ID = _uuid()
FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb'
class TestExtensionManager(object):
def get_resources(self):
# Simulate extension of L3 attribute map
for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
l3.RESOURCE_ATTRIBUTE_MAP[key].update(
l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# A simple class for making a concrete class out of the mixin
# for the case of a plugin that integrates l3 routing.
class TestDbIntPlugin(test_l3.TestL3NatIntPlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router", "ext-gw-mode"]
# A simple class for making a concrete class out of the mixin
# for the case of a l3 router service plugin
class TestDbSepPlugin(test_l3.TestL3NatServicePlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["router", "ext-gw-mode"]
class TestL3GwModeMixin(testlib_api.SqlTestCase):
def setUp(self):
super(TestL3GwModeMixin, self).setUp()
plugin = __name__ + '.' + TestDbIntPlugin.__name__
self.setup_coreplugin(plugin)
self.target_object = TestDbIntPlugin()
# Patch the context
ctx_patcher = mock.patch('neutron.context', autospec=True)
mock_context = ctx_patcher.start()
self.context = mock_context.get_admin_context()
# This ensure also calls to elevated work in unit tests
self.context.elevated.return_value = self.context
self.context.session = db_api.get_session()
# Create sample data for tests
self.ext_net_id = _uuid()
self.int_net_id = _uuid()
self.int_sub_id = _uuid()
self.tenant_id = 'the_tenant'
self.network = models_v2.Network(
id=self.ext_net_id,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.net_ext = external_net_db.ExternalNetwork(
network_id=self.ext_net_id)
self.context.session.add(self.network)
# The following is to avoid complains from sqlite on
# foreign key violations
self.context.session.flush()
self.context.session.add(self.net_ext)
self.router = l3_db.Router(
id=_uuid(),
name=None,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE,
enable_snat=True,
gw_port_id=None)
self.context.session.add(self.router)
self.context.session.flush()
self.router_gw_port = models_v2.Port(
id=FAKE_GW_PORT_ID,
tenant_id=self.tenant_id,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_GW,
admin_state_up=True,
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_GW_PORT_MAC,
network_id=self.ext_net_id)
self.router.gw_port_id = self.router_gw_port.id
self.context.session.add(self.router)
self.context.session.add(self.router_gw_port)
self.context.session.flush()
self.fip_ext_port = models_v2.Port(
id=FAKE_FIP_EXT_PORT_ID,
tenant_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_FLOATINGIP,
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_FIP_EXT_PORT_MAC,
network_id=self.ext_net_id)
self.context.session.add(self.fip_ext_port)
self.context.session.flush()
self.int_net = models_v2.Network(
id=self.int_net_id,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.int_sub = models_v2.Subnet(
id=self.int_sub_id,
tenant_id=self.tenant_id,
ip_version=4,
cidr='3.3.3.0/24',
gateway_ip='3.3.3.1',
network_id=self.int_net_id)
self.router_port = models_v2.Port(
id=FAKE_ROUTER_PORT_ID,
tenant_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_ROUTER_PORT_MAC,
network_id=self.int_net_id)
self.router_port_ip_info = models_v2.IPAllocation(
port_id=self.router_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.1')
self.context.session.add(self.int_net)
self.context.session.add(self.int_sub)
self.context.session.add(self.router_port)
self.context.session.add(self.router_port_ip_info)
self.context.session.flush()
self.fip_int_port = models_v2.Port(
id=FAKE_FIP_INT_PORT_ID,
tenant_id=self.tenant_id,
admin_state_up=True,
device_id='something',
device_owner='compute:nova',
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_FIP_INT_PORT_MAC,
network_id=self.int_net_id)
self.fip_int_ip_info = models_v2.IPAllocation(
port_id=self.fip_int_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.3')
self.fip = l3_db.FloatingIP(
id=_uuid(),
floating_ip_address='1.1.1.2',
floating_network_id=self.ext_net_id,
floating_port_id=FAKE_FIP_EXT_PORT_ID,
fixed_port_id=None,
fixed_ip_address=None,
router_id=None)
self.context.session.add(self.fip_int_port)
self.context.session.add(self.fip_int_ip_info)
self.context.session.add(self.fip)
self.context.session.flush()
self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID,
'tenant_id': self.tenant_id}
def _get_gwports_dict(self, gw_ports):
return dict((gw_port['id'], gw_port)
for gw_port in gw_ports)
def _reset_ext_gw(self):
# Reset external gateway
self.router.gw_port_id = None
self.context.session.add(self.router)
self.context.session.flush()
def _test_update_router_gw(self, gw_info, expected_enable_snat):
self.target_object._update_router_gw_info(
self.context, self.router.id, gw_info)
router = self.target_object._get_router(
self.context, self.router.id)
try:
self.assertEqual(FAKE_GW_PORT_ID,
router.gw_port.id)
self.assertEqual(FAKE_GW_PORT_MAC,
router.gw_port.mac_address)
except AttributeError:
self.assertIsNone(router.gw_port)
self.assertEqual(expected_enable_snat, router.enable_snat)
def test_update_router_gw_with_gw_info_none(self):
self._test_update_router_gw(None, True)
def test_update_router_gw_with_network_only(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(info, True)
def test_update_router_gw_with_snat_disabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': False}
self._test_update_router_gw(info, False)
def test_make_router_dict_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
self.assertIsNone(router_dict[l3.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': True,
'external_fixed_ips': []},
router_dict[l3.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': False,
'external_fixed_ips': []},
router_dict[l3.EXTERNAL_GW_INFO])
def test_build_routers_list_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(self.context,
[router_dict],
[])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertTrue(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertFalse(router.get('enable_snat'))
def test_build_routers_list_with_gw_port_mismatch(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict], {})
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin):
def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None):
# Store l3 resource attribute map as it will be updated
self._l3_attribute_map_bk = {}
for item in l3.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbIntPlugin')
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
ext_mgr = ext_mgr or TestExtensionManager()
super(ExtGwModeIntTestCase, self).setUp(plugin=plugin,
ext_mgr=ext_mgr,
service_plugins=svc_plugins)
self.addCleanup(self.restore_l3_attribute_map)
def restore_l3_attribute_map(self):
l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk
def tearDown(self):
super(ExtGwModeIntTestCase, self).tearDown()
def _set_router_external_gateway(self, router_id, network_id,
snat_enabled=None,
expected_code=exc.HTTPOk.code,
neutron_context=None):
ext_gw_info = {'network_id': network_id}
# Need to set enable_snat also if snat_enabled == False
if snat_enabled is not None:
ext_gw_info['enable_snat'] = snat_enabled
return self._update('routers', router_id,
{'router': {'external_gateway_info':
ext_gw_info}},
expected_code=expected_code,
neutron_context=neutron_context)
def test_router_create_show_no_ext_gwinfo(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def _test_router_create_show_ext_gwinfo(self, snat_input_value,
snat_expected_value):
name = 'router1'
tenant_id = _uuid()
with self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
input_value = {'network_id': ext_net_id}
if snat_input_value in (True, False):
input_value['enable_snat'] = snat_input_value
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info',
{'network_id': ext_net_id,
'enable_snat': snat_expected_value,
'external_fixed_ips': [{
'ip_address': mock.ANY,
'subnet_id': s['subnet']['id']}]})]
with self.router(
name=name, admin_state_up=True, tenant_id=tenant_id,
external_gateway_info=input_value) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def test_router_create_show_ext_gwinfo_default(self):
self._test_router_create_show_ext_gwinfo(None, True)
def test_router_create_show_ext_gwinfo_with_snat_enabled(self):
self._test_router_create_show_ext_gwinfo(True, True)
def test_router_create_show_ext_gwinfo_with_snat_disabled(self):
self._test_router_create_show_ext_gwinfo(False, False)
def _test_router_update_ext_gwinfo(self, snat_input_value,
snat_expected_value=False,
expected_http_code=exc.HTTPOk.code):
with self.router() as r:
with self.subnet() as s:
try:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
self._set_router_external_gateway(
r['router']['id'], ext_net_id,
snat_enabled=snat_input_value,
expected_code=expected_http_code)
if expected_http_code != exc.HTTPOk.code:
return
body = self._show('routers', r['router']['id'])
res_gw_info = body['router']['external_gateway_info']
self.assertEqual(res_gw_info['network_id'], ext_net_id)
self.assertEqual(res_gw_info['enable_snat'],
snat_expected_value)
finally:
self._remove_external_gateway_from_router(
r['router']['id'], ext_net_id)
def test_router_update_ext_gwinfo_default(self):
self._test_router_update_ext_gwinfo(None, True)
def test_router_update_ext_gwinfo_with_snat_enabled(self):
self._test_router_update_ext_gwinfo(True, True)
def test_router_update_ext_gwinfo_with_snat_disabled(self):
self._test_router_update_ext_gwinfo(False, False)
def test_router_update_ext_gwinfo_with_invalid_snat_setting(self):
self._test_router_update_ext_gwinfo(
'xxx', None, expected_http_code=exc.HTTPBadRequest.code)
class ExtGwModeSepTestCase(ExtGwModeIntTestCase):
def setUp(self, plugin=None):
# Store l3 resource attribute map as it will be updated
self._l3_attribute_map_bk = {}
for item in l3.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin')
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbSepPlugin')
svc_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
super(ExtGwModeSepTestCase, self).setUp(plugin=plugin,
svc_plugins=svc_plugins)
self.addCleanup(self.restore_l3_attribute_map)
| {
"content_hash": "5ccd9c9e0895ae0b56ab41d686f4955a",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 77,
"avg_line_length": 42.91569086651054,
"alnum_prop": 0.5753888130968622,
"repo_name": "Stavitsky/neutron",
"id": "291c88fcb66309f40724362c670f240a4c2c1085",
"size": "18955",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/extensions/test_l3_ext_gw_mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7134099"
},
{
"name": "Shell",
"bytes": "12319"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
sys.path.extend(['.', '..'])
from pycparser import c_parser, c_ast, parse_file
# A visitor with some state information (the funcname it's
# looking for)
#
class FuncCallVisitor(c_ast.NodeVisitor):
def __init__(self, funcname):
self.funcname = funcname
def visit_FuncCall(self, node):
if node.name.name == self.funcname:
print('%s called at %s' % (self.funcname, node.name.coord))
def show_func_calls(filename, funcname):
ast = parse_file(filename, use_cpp=True)
v = FuncCallVisitor(funcname)
v.visit(ast)
if __name__ == "__main__":
if len(sys.argv) > 2:
filename = sys.argv[1]
func = sys.argv[2]
else:
filename = 'c_files/hash.c'
func = 'malloc'
show_func_calls(filename, func)
| {
"content_hash": "23899e896c475b5c4eb7650b51bf6524",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 24.945945945945947,
"alnum_prop": 0.6359696641386782,
"repo_name": "songjiguo/C3-IDL-Compiler",
"id": "b0f33ba98c0ced72b16b0d747ec103b28ab59d5b",
"size": "1224",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/func_calls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143942"
},
{
"name": "C++",
"bytes": "15"
},
{
"name": "Makefile",
"bytes": "10958"
},
{
"name": "Python",
"bytes": "688035"
}
],
"symlink_target": ""
} |
import math
from dragon.vm.tensorflow.framework import dtypes
from dragon.vm.tensorflow.ops import random_ops
__all__ = ['xavier_initializer',
'xavier_initializer_conv2d',
'variance_scaling_initializer']
def xavier_initializer(uniform=True, seed=None, dtype=dtypes.float32):
return variance_scaling_initializer(factor=1.0, mode='FAN_AVG',
uniform=uniform, seed=seed, dtype=dtype)
xavier_initializer_conv2d = xavier_initializer
def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False,
seed=None, dtype=dtypes.float32):
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
if mode not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG']:
raise TypeError('Unknow mode %s [FAN_IN, FAN_OUT, FAN_AVG]', mode)
def _initializer(shape, dtype=dtype, partition_info=None):
"""Initializer function."""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
# Estimating fan_in and fan_out is not possible to do perfectly, but we try.
# This is the right thing for matrix multiply and convolutions.
if shape:
fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
fan_out = float(shape[-1])
else:
fan_in = 1.0
fan_out = 1.0
for dim in shape[:-2]:
fan_in *= float(dim)
fan_out *= float(dim)
if mode == 'FAN_IN':
# Count only number of input connections.
n = fan_in
elif mode == 'FAN_OUT':
# Count only number of output connections.
n = fan_out
elif mode == 'FAN_AVG':
# Average number of inputs and output connections.
n = (fan_in + fan_out) / 2.0
if uniform:
# To get stddev = math.sqrt(factor / n) need to adjust for uniform.
limit = math.sqrt(3.0 * factor / n)
return random_ops.random_uniform(shape, -limit, limit,
dtype, seed=seed)
else:
# To get stddev = math.sqrt(factor / n) need to adjust for truncated.
trunc_stddev = math.sqrt(1.3 * factor / n)
return random_ops.truncated_normal(shape, 0.0, trunc_stddev, dtype,
seed=seed)
return _initializer
| {
"content_hash": "da12f5c286056de0b87a55944a3fa59f",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 85,
"avg_line_length": 41.032786885245905,
"alnum_prop": 0.5705153815421494,
"repo_name": "neopenx/Dragon",
"id": "f2f673c491092f7d9cadc0331f5ac889ad2b56ba",
"size": "2696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dragon/python/dragon/vm/tensorflow/contrib/layers/initializers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7082"
},
{
"name": "C++",
"bytes": "1024612"
},
{
"name": "CMake",
"bytes": "7849"
},
{
"name": "Cuda",
"bytes": "246400"
},
{
"name": "Makefile",
"bytes": "7409"
},
{
"name": "Python",
"bytes": "552459"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 1, s, t 2, s, t 3, s, t 4.1, s, t 4.2, s, q"
tags = "FlipX"
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
def main():
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
# In real code after a sequence of grid actions the StopGrid() action
# should be called. Omited here to stay in the last grid action render
main_scene.do( FlipX(duration=4) )
director.run (main_scene)
if __name__ == '__main__':
main()
| {
"content_hash": "7f21bbc7a75d2bcb93d63c1ba8b6ba7e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 74,
"avg_line_length": 27.657894736842106,
"alnum_prop": 0.6117982873453853,
"repo_name": "shadowmint/nwidget",
"id": "e67905d5bc2c8af2f43a185e4efca56ea6c0a4aa",
"size": "1125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/cocos2d-0.5.5/test/test_flip_x.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11298"
},
{
"name": "JavaScript",
"bytes": "17394"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9815941"
},
{
"name": "Shell",
"bytes": "10521"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import subprocess
import os
from sys import stdout, exit
from time import sleep
import json
# Custom modules
from modules import string_ops as c
from modules import cfg, argparser
def update_cache_file(outfile, input):
outfile = "{}/{}".format(cfg.tabCacheDir, outfile)
c.debug("Updating cache file {}".format(outfile))
with open(outfile, 'w') as f:
json.dump(input, f)
def get_all_osvariants():
cmd = ['osinfo-query', 'os', '-f', 'short-id']
c.debug("Executing: {}".format(" ".join(cmd)))
try:
out = subprocess.check_output(cmd)
except:
print(c.RED("Error executing osinfo-query; install libosinfo rpm\n"))
raise
allVariants = ['auto']
for line in out.splitlines()[2:]:
allVariants.append(line.strip())
return allVariants
def refresh_cache():
if not cfg.osvariantChoices:
cfg.osvariantChoices = get_all_osvariants()
subprocess.call(['mkdir', '-p', cfg.tabCacheDir])
update_cache_file('osvariants', cfg.osvariantChoices)
if not cfg.templateList:
cfg.templateList = cfg.get_virt_builder_list('json')
for template in cfg.templateList:
cfg.templateChoices.append(template['os-version'])
update_cache_file('templates', cfg.templateChoices)
def build_initial_cache():
if os.path.isdir(cfg.tabCacheDir):
return
c.debug("Building initial cache")
refresh_cache()
def main():
# On very first run, we need to get osinfo-query db & virt-builder template list
# If tabCacheDir already exists, to speed execution when tab-completing, this does nothing
build_initial_cache()
# Parse cmdline arguments (If tab-completing, execution stops before returning)
# options namespace saved to cfg.opts
argparser.parse()
# Get possible os-variants and virt-builder --list output
if not cfg.osvariantChoices:
refresh_cache()
# Test for all needed system commands, appropriate permissions
from modules import sysvalidator
sysvalidator.check_system_config()
# Prompt user for any missing (required) input
from modules import finalprompter
finalprompter.prompt_final_checks()
# Launch virt-builder
from modules import builder
builder.build()
# Quit if requested
if cfg.opts.build_image_only:
exit()
# Write image to blockdevice if requested
if cfg.opts.primary_blockdev:
from modules import blockdevimager
blockdevimager.write_and_cleanup_image()
# Launch virt-install
from modules import installer
installer.install()
# Optionally launch serial connection
if cfg.opts.autoconsole and stdout.isatty():
if cfg.opts.loglevel < 20:
sleep(5.0)
subprocess.call(['virsh', 'console', cfg.opts.vmname])
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\nReceived KeyboardInterrupt. Exiting.")
cfg.cleanup_imagefile()
exit()
| {
"content_hash": "638ab7786db432908740c0f22e3ee6d0",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 94,
"avg_line_length": 33.79775280898876,
"alnum_prop": 0.676529255319149,
"repo_name": "ryran/upvm",
"id": "fabd842f7a7cc525ffdbf91e717346c86c7ea3bb",
"size": "3893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "upvm.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55762"
},
{
"name": "Shell",
"bytes": "7398"
}
],
"symlink_target": ""
} |
"""Word completion for IPython.
This module is a fork of the rlcompleter module in the Python standard
library. The original enhancements made to rlcompleter have been sent
upstream and were accepted as of Python 2.3, but we need a lot more
functionality specific to IPython, so this module will continue to live as an
IPython-specific utility.
Original rlcompleter documentation:
This requires the latest extension to the readline module (the
completes keywords, built-ins and globals in __main__; when completing
NAME.NAME..., it evaluates (!) the expression up to the last dot and
completes its attributes.
It's very cool to do "import string" type "string.", hit the
completion key (twice), and see the list of names defined by the
string module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and
generally cause the completion to fail). This is a feature -- since
readline sets the tty device in raw (or cbreak) mode, printing a
traceback wouldn't work well without some complicated hoopla to save,
reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary
application defined code to be executed if an object with a
``__getattr__`` hook is found. Since it is the responsibility of the
application (or the user) to enable this feature, I consider this an
acceptable risk. More complicated expressions (e.g. function calls or
indexing operations) are *not* evaluated.
- GNU readline is also used by the built-in functions input() and
raw_input(), and thus these also benefit/suffer from the completer
features. Clearly an interactive application can benefit by
specifying its own completer function and using raw_input() for all
its input.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
#
# Some of this code originated from rlcompleter in the Python standard library
# Copyright (C) 2001 Python Software Foundation, www.python.org
import __main__
import glob
import inspect
import itertools
import keyword
import os
import re
import sys
import unicodedata
import string
from traitlets.config.configurable import Configurable
from IPython.core.error import TryNext
from IPython.core.inputsplitter import ESC_MAGIC
from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
from IPython.utils import generics
from IPython.utils import io
from IPython.utils.decorators import undoc
from IPython.utils.dir2 import dir2
from IPython.utils.process import arg_split
from IPython.utils.py3compat import builtin_mod, string_types, PY3
from traitlets import CBool, Enum
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# Public API
__all__ = ['Completer','IPCompleter']
if sys.platform == 'win32':
PROTECTABLES = ' '
else:
PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
#-----------------------------------------------------------------------------
# Main functions and classes
#-----------------------------------------------------------------------------
def has_open_quotes(s):
"""Return whether a string has open quotes.
This simply counts whether the number of quote characters of either type in
the string is odd.
Returns
-------
If there is an open quote, the quote character is returned. Else, return
False.
"""
# We check " first, then ', so complex cases with nested quotes will get
# the " to take precedence.
if s.count('"') % 2:
return '"'
elif s.count("'") % 2:
return "'"
else:
return False
def protect_filename(s):
"""Escape a string to protect certain characters."""
return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
for ch in s])
def expand_user(path):
"""Expand '~'-style usernames in strings.
This is similar to :func:`os.path.expanduser`, but it computes and returns
extra information that will be useful if the input was being used in
computing completions, and you wish to return the completions with the
original '~' instead of its expanded value.
Parameters
----------
path : str
String to be expanded. If no ~ is present, the output is the same as the
input.
Returns
-------
newpath : str
Result of ~ expansion in the input path.
tilde_expand : bool
Whether any expansion was performed or not.
tilde_val : str
The value that ~ was replaced with.
"""
# Default values
tilde_expand = False
tilde_val = ''
newpath = path
if path.startswith('~'):
tilde_expand = True
rest = len(path)-1
newpath = os.path.expanduser(path)
if rest:
tilde_val = newpath[:-rest]
else:
tilde_val = newpath
return newpath, tilde_expand, tilde_val
def compress_user(path, tilde_expand, tilde_val):
"""Does the opposite of expand_user, with its outputs.
"""
if tilde_expand:
return path.replace(tilde_val, '~')
else:
return path
def penalize_magics_key(word):
"""key for sorting that penalizes magic commands in the ordering
Normal words are left alone.
Magic commands have the initial % moved to the end, e.g.
%matplotlib is transformed as follows:
%matplotlib -> matplotlib%
[The choice of the final % is arbitrary.]
Since "matplotlib" < "matplotlib%" as strings,
"timeit" will appear before the magic "%timeit" in the ordering
For consistency, move "%%" to the end, so cell magics appear *after*
line magics with the same name.
A check is performed that there are no other "%" in the string;
if there are, then the string is not a magic command and is left unchanged.
"""
# Move any % signs from start to end of the key
# provided there are no others elsewhere in the string
if word[:2] == "%%":
if not "%" in word[2:]:
return word[2:] + "%%"
if word[:1] == "%":
if not "%" in word[1:]:
return word[1:] + "%"
return word
@undoc
class Bunch(object): pass
DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
GREEDY_DELIMS = ' =\r\n'
class CompletionSplitter(object):
"""An object to split an input line in a manner similar to readline.
By having our own implementation, we can expose readline-like completion in
a uniform manner to all frontends. This object only needs to be given the
line of text to be split and the cursor position on said line, and it
returns the 'word' to be completed on at the cursor after splitting the
entire line.
What characters are used as splitting delimiters can be controlled by
setting the `delims` attribute (this is a property that internally
automatically builds the necessary regular expression)"""
# Private interface
# A string of delimiter characters. The default value makes sense for
# IPython's most typical usage patterns.
_delims = DELIMS
# The expression (a normal string) to be compiled into a regular expression
# for actual splitting. We store it as an attribute mostly for ease of
# debugging, since this type of code can be so tricky to debug.
_delim_expr = None
# The regular expression that does the actual splitting
_delim_re = None
def __init__(self, delims=None):
delims = CompletionSplitter._delims if delims is None else delims
self.delims = delims
@property
def delims(self):
"""Return the string of delimiter characters."""
return self._delims
@delims.setter
def delims(self, delims):
"""Set the delimiters for line splitting."""
expr = '[' + ''.join('\\'+ c for c in delims) + ']'
self._delim_re = re.compile(expr)
self._delims = delims
self._delim_expr = expr
def split_line(self, line, cursor_pos=None):
"""Split a line of text with a cursor at the given position.
"""
l = line if cursor_pos is None else line[:cursor_pos]
return self._delim_re.split(l)[-1]
class Completer(Configurable):
greedy = CBool(False, config=True,
help="""Activate greedy completion
This will enable completion on elements of lists, results of function calls, etc.,
but can be unsafe because the code is actually evaluated on TAB.
"""
)
def __init__(self, namespace=None, global_namespace=None, **kwargs):
"""Create a new completer for the command line.
Completer(namespace=ns,global_namespace=ns2) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
An optional second namespace can be given. This allows the completer
to handle cases where both the local and global scopes need to be
distinguished.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
# The global namespace, if given, can be bound directly
if global_namespace is None:
self.global_namespace = {}
else:
self.global_namespace = global_namespace
super(Completer, self).__init__(**kwargs)
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace or self.global_namespace that match.
"""
#print 'Completer->global_matches, txt=%r' % text # dbg
matches = []
match_append = matches.append
n = len(text)
for lst in [keyword.kwlist,
builtin_mod.__dict__.keys(),
self.namespace.keys(),
self.global_namespace.keys()]:
for word in lst:
if word[:n] == text and word != "__builtins__":
match_append(word)
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace or self.global_namespace, it will be
evaluated and its attributes (as revealed by dir()) are used as
possible completions. (For class instances, class members are are
also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
#io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
# Another option, seems to work great. Catches things like ''.<tab>
m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
if m:
expr, attr = m.group(1, 3)
elif self.greedy:
m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
if not m2:
return []
expr, attr = m2.group(1,2)
else:
return []
try:
obj = eval(expr, self.namespace)
except:
try:
obj = eval(expr, self.global_namespace)
except:
return []
if self.limit_to__all__ and hasattr(obj, '__all__'):
words = get__all__entries(obj)
else:
words = dir2(obj)
try:
words = generics.complete_object(obj, words)
except TryNext:
pass
except Exception:
# Silence errors from completion function
#raise # dbg
pass
# Build match list to return
n = len(attr)
res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
return res
def get__all__entries(obj):
"""returns the strings in the __all__ attribute"""
try:
words = getattr(obj, '__all__')
except:
return []
return [w for w in words if isinstance(w, string_types)]
def match_dict_keys(keys, prefix, delims):
"""Used by dict_key_matches, matching the prefix to a list of keys"""
if not prefix:
return None, 0, [repr(k) for k in keys
if isinstance(k, (string_types, bytes))]
quote_match = re.search('["\']', prefix)
quote = quote_match.group()
try:
prefix_str = eval(prefix + quote, {})
except Exception:
return None, 0, []
pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
token_match = re.search(pattern, prefix, re.UNICODE)
token_start = token_match.start()
token_prefix = token_match.group()
# TODO: support bytes in Py3k
matched = []
for key in keys:
try:
if not key.startswith(prefix_str):
continue
except (AttributeError, TypeError, UnicodeError):
# Python 3+ TypeError on b'a'.startswith('a') or vice-versa
continue
# reformat remainder of key to begin with prefix
rem = key[len(prefix_str):]
# force repr wrapped in '
rem_repr = repr(rem + '"')
if rem_repr.startswith('u') and prefix[0] not in 'uU':
# Found key is unicode, but prefix is Py2 string.
# Therefore attempt to interpret key as string.
try:
rem_repr = repr(rem.encode('ascii') + '"')
except UnicodeEncodeError:
continue
rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
if quote == '"':
# The entered prefix is quoted with ",
# but the match is quoted with '.
# A contained " hence needs escaping for comparison:
rem_repr = rem_repr.replace('"', '\\"')
# then reinsert prefix from start of token
matched.append('%s%s' % (token_prefix, rem_repr))
return quote, token_start, matched
def _safe_isinstance(obj, module, class_name):
"""Checks if obj is an instance of module.class_name if loaded
"""
return (module in sys.modules and
isinstance(obj, getattr(__import__(module), class_name)))
def back_unicode_name_matches(text):
u"""Match unicode characters back to unicode name
This does ☃ -> \\snowman
Note that snowman is not a valid python3 combining character but will be expanded.
Though it will not recombine back to the snowman character by the completion machinery.
This will not either back-complete standard sequences like \\n, \\b ...
Used on Python 3 only.
"""
if len(text)<2:
return u'', ()
maybe_slash = text[-2]
if maybe_slash != '\\':
return u'', ()
char = text[-1]
# no expand on quote for completion in strings.
# nor backcomplete standard ascii keys
if char in string.ascii_letters or char in ['"',"'"]:
return u'', ()
try :
unic = unicodedata.name(char)
return '\\'+char,['\\'+unic]
except KeyError as e:
pass
return u'', ()
def back_latex_name_matches(text):
u"""Match latex characters back to unicode name
This does ->\\sqrt
Used on Python 3 only.
"""
if len(text)<2:
return u'', ()
maybe_slash = text[-2]
if maybe_slash != '\\':
return u'', ()
char = text[-1]
# no expand on quote for completion in strings.
# nor backcomplete standard ascii keys
if char in string.ascii_letters or char in ['"',"'"]:
return u'', ()
try :
latex = reverse_latex_symbol[char]
# '\\' replace the \ as well
return '\\'+char,[latex]
except KeyError as e:
pass
return u'', ()
class IPCompleter(Completer):
"""Extension of the completer class with IPython-specific features"""
def _greedy_changed(self, name, old, new):
"""update the splitter and readline delims when greedy is changed"""
if new:
self.splitter.delims = GREEDY_DELIMS
else:
self.splitter.delims = DELIMS
if self.readline:
self.readline.set_completer_delims(self.splitter.delims)
merge_completions = CBool(True, config=True,
help="""Whether to merge completion results into a single list
If False, only the completion results from the first non-empty
completer will be returned.
"""
)
omit__names = Enum((0,1,2), default_value=2, config=True,
help="""Instruct the completer to omit private method names
Specifically, when completing on ``object.<tab>``.
When 2 [default]: all names that start with '_' will be excluded.
When 1: all 'magic' names (``__foo__``) will be excluded.
When 0: nothing will be excluded.
"""
)
limit_to__all__ = CBool(default_value=False, config=True,
help="""Instruct the completer to use __all__ for the completion
Specifically, when completing on ``object.<tab>``.
When True: only those names in obj.__all__ will be included.
When False [default]: the __all__ attribute is ignored
"""
)
def __init__(self, shell=None, namespace=None, global_namespace=None,
use_readline=True, config=None, **kwargs):
"""IPCompleter() -> completer
Return a completer object suitable for use by the readline library
via readline.set_completer().
Inputs:
- shell: a pointer to the ipython shell itself. This is needed
because this completer knows about magic functions, and those can
only be accessed via the ipython instance.
- namespace: an optional dict where completions are performed.
- global_namespace: secondary optional dict for completions, to
handle cases (such as IPython embedded inside functions) where
both Python scopes are visible.
use_readline : bool, optional
If true, use the readline library. This completer can still function
without readline, though in that case callers must provide some extra
information on each call about the current line."""
self.magic_escape = ESC_MAGIC
self.splitter = CompletionSplitter()
# Readline configuration, only used by the rlcompleter method.
if use_readline:
# We store the right version of readline so that later code
import IPython.utils.rlineimpl as readline
self.readline = readline
else:
self.readline = None
# _greedy_changed() depends on splitter and readline being defined:
Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
config=config, **kwargs)
# List where completion matches will be stored
self.matches = []
self.shell = shell
# Regexp to split filenames with spaces in them
self.space_name_re = re.compile(r'([^\\] )')
# Hold a local ref. to glob.glob for speed
self.glob = glob.glob
# Determine if we are running on 'dumb' terminals, like (X)Emacs
# buffers, to avoid completion problems.
term = os.environ.get('TERM','xterm')
self.dumb_terminal = term in ['dumb','emacs']
# Special handling of backslashes needed in win32 platforms
if sys.platform == "win32":
self.clean_glob = self._clean_glob_win32
else:
self.clean_glob = self._clean_glob
#regexp to parse docstring for function signature
self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
#use this if positional argument name is also needed
#= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
# All active matcher routines for completion
self.matchers = [self.python_matches,
self.file_matches,
self.magic_matches,
self.python_func_kw_matches,
self.dict_key_matches,
]
def all_completions(self, text):
"""
Wrapper around the complete method for the benefit of emacs
and pydb.
"""
return self.complete(text)[1]
def _clean_glob(self,text):
return self.glob("%s*" % text)
def _clean_glob_win32(self,text):
return [f.replace("\\","/")
for f in self.glob("%s*" % text)]
def file_matches(self, text):
"""Match filenames, expanding ~USER type strings.
Most of the seemingly convoluted logic in this completer is an
attempt to handle filenames with spaces in them. And yet it's not
quite perfect, because Python's readline doesn't expose all of the
GNU readline details needed for this to be done correctly.
For a filename with a space in it, the printed completions will be
only the parts after what's already been typed (instead of the
full completions, as is normally done). I don't think with the
current (as of Python 2.3) Python readline it's possible to do
better."""
#io.rprint('Completer->file_matches: <%r>' % text) # dbg
# chars that require escaping with backslash - i.e. chars
# that readline treats incorrectly as delimiters, but we
# don't want to treat as delimiters in filename matching
# when escaped with backslash
if text.startswith('!'):
text = text[1:]
text_prefix = '!'
else:
text_prefix = ''
text_until_cursor = self.text_until_cursor
# track strings with open quotes
open_quotes = has_open_quotes(text_until_cursor)
if '(' in text_until_cursor or '[' in text_until_cursor:
lsplit = text
else:
try:
# arg_split ~ shlex.split, but with unicode bugs fixed by us
lsplit = arg_split(text_until_cursor)[-1]
except ValueError:
# typically an unmatched ", or backslash without escaped char.
if open_quotes:
lsplit = text_until_cursor.split(open_quotes)[-1]
else:
return []
except IndexError:
# tab pressed on empty line
lsplit = ""
if not open_quotes and lsplit != protect_filename(lsplit):
# if protectables are found, do matching on the whole escaped name
has_protectables = True
text0,text = text,lsplit
else:
has_protectables = False
text = os.path.expanduser(text)
if text == "":
return [text_prefix + protect_filename(f) for f in self.glob("*")]
# Compute the matches from the filesystem
m0 = self.clean_glob(text.replace('\\',''))
if has_protectables:
# If we had protectables, we need to revert our changes to the
# beginning of filename so that we don't double-write the part
# of the filename we have so far
len_lsplit = len(lsplit)
matches = [text_prefix + text0 +
protect_filename(f[len_lsplit:]) for f in m0]
else:
if open_quotes:
# if we have a string with an open quote, we don't need to
# protect the names at all (and we _shouldn't_, as it
# would cause bugs when the filesystem call is made).
matches = m0
else:
matches = [text_prefix +
protect_filename(f) for f in m0]
#io.rprint('mm', matches) # dbg
# Mark directories in input list by appending '/' to their names.
matches = [x+'/' if os.path.isdir(x) else x for x in matches]
return matches
def magic_matches(self, text):
"""Match magics"""
#print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
# Get all shell magics now rather than statically, so magics loaded at
# runtime show up too.
lsm = self.shell.magics_manager.lsmagic()
line_magics = lsm['line']
cell_magics = lsm['cell']
pre = self.magic_escape
pre2 = pre+pre
# Completion logic:
# - user gives %%: only do cell magics
# - user gives %: do both line and cell magics
# - no prefix: do both
# In other words, line magics are skipped if the user gives %% explicitly
bare_text = text.lstrip(pre)
comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
if not text.startswith(pre2):
comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
return comp
def python_matches(self,text):
"""Match attributes or global python names"""
#io.rprint('Completer->python_matches, txt=%r' % text) # dbg
if "." in text:
try:
matches = self.attr_matches(text)
if text.endswith('.') and self.omit__names:
if self.omit__names == 1:
# true if txt is _not_ a __ name, false otherwise:
no__name = (lambda txt:
re.match(r'.*\.__.*?__',txt) is None)
else:
# true if txt is _not_ a _ name, false otherwise:
no__name = (lambda txt:
re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
matches = filter(no__name, matches)
except NameError:
# catches <undefined attributes>.<tab>
matches = []
else:
matches = self.global_matches(text)
return matches
def _default_arguments_from_docstring(self, doc):
"""Parse the first line of docstring for call signature.
Docstring should be of the form 'min(iterable[, key=func])\n'.
It can also parse cython docstring of the form
'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
"""
if doc is None:
return []
#care only the firstline
line = doc.lstrip().splitlines()[0]
#p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
#'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = self.docstring_sig_re.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
#re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
ret += self.docstring_kwd_re.findall(s)
return ret
def _default_arguments(self, obj):
"""Return the list of default arguments of obj if it is callable,
or empty list otherwise."""
call_obj = obj
ret = []
if inspect.isbuiltin(obj):
pass
elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
if inspect.isclass(obj):
#for cython embededsignature=True the constructor docstring
#belongs to the object itself not __init__
ret += self._default_arguments_from_docstring(
getattr(obj, '__doc__', ''))
# for classes, check for __init__,__new__
call_obj = (getattr(obj, '__init__', None) or
getattr(obj, '__new__', None))
# for all others, check if they are __call__able
elif hasattr(obj, '__call__'):
call_obj = obj.__call__
ret += self._default_arguments_from_docstring(
getattr(call_obj, '__doc__', ''))
if PY3:
_keeps = (inspect.Parameter.KEYWORD_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD)
signature = inspect.signature
else:
import IPython.utils.signatures
_keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
signature = IPython.utils.signatures.signature
try:
sig = signature(call_obj)
ret.extend(k for k, v in sig.parameters.items() if
v.kind in _keeps)
except ValueError:
pass
return list(set(ret))
def python_func_kw_matches(self,text):
"""Match named parameters (kwargs) of the last open function"""
if "." in text: # a parameter cannot be dotted
return []
try: regexp = self.__funcParamsRegex
except AttributeError:
regexp = self.__funcParamsRegex = re.compile(r'''
'.*?(?<!\\)' | # single quoted strings or
".*?(?<!\\)" | # double quoted strings or
\w+ | # identifier
\S # other characters
''', re.VERBOSE | re.DOTALL)
# 1. find the nearest identifier that comes before an unclosed
# parenthesis before the cursor
# e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
tokens = regexp.findall(self.text_until_cursor)
tokens.reverse()
iterTokens = iter(tokens); openPar = 0
for token in iterTokens:
if token == ')':
openPar -= 1
elif token == '(':
openPar += 1
if openPar > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
isId = re.compile(r'\w+$').match
while True:
try:
ids.append(next(iterTokens))
if not isId(ids[-1]):
ids.pop(); break
if not next(iterTokens) == '.':
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callableMatches = self.global_matches(ids[0])
else:
callableMatches = self.attr_matches('.'.join(ids[::-1]))
argMatches = []
for callableMatch in callableMatches:
try:
namedArgs = self._default_arguments(eval(callableMatch,
self.namespace))
except:
continue
for namedArg in namedArgs:
if namedArg.startswith(text):
argMatches.append("%s=" %namedArg)
return argMatches
def dict_key_matches(self, text):
"Match string keys in a dictionary, after e.g. 'foo[' "
def get_keys(obj):
# Only allow completion for known in-memory dict-like types
if isinstance(obj, dict) or\
_safe_isinstance(obj, 'pandas', 'DataFrame'):
try:
return list(obj.keys())
except Exception:
return []
elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
_safe_isinstance(obj, 'numpy', 'void'):
return obj.dtype.names or []
return []
try:
regexps = self.__dict_key_regexps
except AttributeError:
dict_key_re_fmt = r'''(?x)
( # match dict-referring expression wrt greedy setting
%s
)
\[ # open bracket
\s* # and optional whitespace
([uUbB]? # string prefix (r not handled)
(?: # unclosed string
'(?:[^']|(?<!\\)\\')*
|
"(?:[^"]|(?<!\\)\\")*
)
)?
$
'''
regexps = self.__dict_key_regexps = {
False: re.compile(dict_key_re_fmt % '''
# identifiers separated by .
(?!\d)\w+
(?:\.(?!\d)\w+)*
'''),
True: re.compile(dict_key_re_fmt % '''
.+
''')
}
match = regexps[self.greedy].search(self.text_until_cursor)
if match is None:
return []
expr, prefix = match.groups()
try:
obj = eval(expr, self.namespace)
except Exception:
try:
obj = eval(expr, self.global_namespace)
except Exception:
return []
keys = get_keys(obj)
if not keys:
return keys
closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
if not matches:
return matches
# get the cursor position of
# - the text being completed
# - the start of the key text
# - the start of the completion
text_start = len(self.text_until_cursor) - len(text)
if prefix:
key_start = match.start(2)
completion_start = key_start + token_offset
else:
key_start = completion_start = match.end()
# grab the leading prefix, to make sure all completions start with `text`
if text_start > key_start:
leading = ''
else:
leading = text[text_start:completion_start]
# the index of the `[` character
bracket_idx = match.end(1)
# append closing quote and bracket as appropriate
# this is *not* appropriate if the opening quote or bracket is outside
# the text given to this method
suf = ''
continuation = self.line_buffer[len(self.text_until_cursor):]
if key_start > text_start and closing_quote:
# quotes were opened inside text, maybe close them
if continuation.startswith(closing_quote):
continuation = continuation[len(closing_quote):]
else:
suf += closing_quote
if bracket_idx > text_start:
# brackets were opened inside text, maybe close them
if not continuation.startswith(']'):
suf += ']'
return [leading + k + suf for k in matches]
def unicode_name_matches(self, text):
u"""Match Latex-like syntax for unicode characters base
on the name of the character.
This does \\GREEK SMALL LETTER ETA -> η
Works only on valid python 3 identifier, or on combining characters that
will combine to form a valid identifier.
Used on Python 3 only.
"""
slashpos = text.rfind('\\')
if slashpos > -1:
s = text[slashpos+1:]
try :
unic = unicodedata.lookup(s)
# allow combining chars
if ('a'+unic).isidentifier():
return '\\'+s,[unic]
except KeyError as e:
pass
return u'', []
def latex_matches(self, text):
u"""Match Latex syntax for unicode characters.
This does both \\alp -> \\alpha and \\alpha -> α
Used on Python 3 only.
"""
slashpos = text.rfind('\\')
if slashpos > -1:
s = text[slashpos:]
if s in latex_symbols:
# Try to complete a full latex symbol to unicode
# \\alpha -> α
return s, [latex_symbols[s]]
else:
# If a user has partially typed a latex symbol, give them
# a full list of options \al -> [\aleph, \alpha]
matches = [k for k in latex_symbols if k.startswith(s)]
return s, matches
return u'', []
def dispatch_custom_completer(self, text):
#io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
line = self.line_buffer
if not line.strip():
return None
# Create a little structure to pass all the relevant information about
# the current completion to any custom completer.
event = Bunch()
event.line = line
event.symbol = text
cmd = line.split(None,1)[0]
event.command = cmd
event.text_until_cursor = self.text_until_cursor
#print "\ncustom:{%s]\n" % event # dbg
# for foo etc, try also to find completer for %foo
if not cmd.startswith(self.magic_escape):
try_magic = self.custom_completers.s_matches(
self.magic_escape + cmd)
else:
try_magic = []
for c in itertools.chain(self.custom_completers.s_matches(cmd),
try_magic,
self.custom_completers.flat_matches(self.text_until_cursor)):
#print "try",c # dbg
try:
res = c(event)
if res:
# first, try case sensitive match
withcase = [r for r in res if r.startswith(text)]
if withcase:
return withcase
# if none, then case insensitive ones are ok too
text_low = text.lower()
return [r for r in res if r.lower().startswith(text_low)]
except TryNext:
pass
return None
def complete(self, text=None, line_buffer=None, cursor_pos=None):
"""Find completions for the given text and line context.
Note that both the text and the line_buffer are optional, but at least
one of them must be given.
Parameters
----------
text : string, optional
Text to perform the completion on. If not given, the line buffer
is split using the instance's CompletionSplitter object.
line_buffer : string, optional
If not given, the completer attempts to obtain the current line
buffer via readline. This keyword allows clients which are
requesting for text completions in non-readline contexts to inform
the completer of the entire text.
cursor_pos : int, optional
Index of the cursor in the full line buffer. Should be provided by
remote frontends where kernel has no access to frontend state.
Returns
-------
text : str
Text that was actually used in the completion.
matches : list
A list of completion matches.
"""
# io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
# if the cursor position isn't given, the only sane assumption we can
# make is that it's at the end of the line (the common case)
if cursor_pos is None:
cursor_pos = len(line_buffer) if text is None else len(text)
if PY3:
base_text = text if not line_buffer else line_buffer[:cursor_pos]
latex_text, latex_matches = self.latex_matches(base_text)
if latex_matches:
return latex_text, latex_matches
name_text = ''
name_matches = []
for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
name_text, name_matches = meth(base_text)
if name_text:
return name_text, name_matches
# if text is either None or an empty string, rely on the line buffer
if not text:
text = self.splitter.split_line(line_buffer, cursor_pos)
# If no line buffer is given, assume the input text is all there was
if line_buffer is None:
line_buffer = text
self.line_buffer = line_buffer
self.text_until_cursor = self.line_buffer[:cursor_pos]
# io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
# Start with a clean slate of completions
self.matches[:] = []
custom_res = self.dispatch_custom_completer(text)
if custom_res is not None:
# did custom completers produce something?
self.matches = custom_res
else:
# Extend the list of completions with the results of each
# matcher, so we return results to the user from all
# namespaces.
if self.merge_completions:
self.matches = []
for matcher in self.matchers:
try:
self.matches.extend(matcher(text))
except:
# Show the ugly traceback if the matcher causes an
# exception, but do NOT crash the kernel!
sys.excepthook(*sys.exc_info())
else:
for matcher in self.matchers:
self.matches = matcher(text)
if self.matches:
break
# FIXME: we should extend our api to return a dict with completions for
# different types of objects. The rlcomplete() method could then
# simply collapse the dict into a list for readline, but we'd have
# richer completion semantics in other evironments.
# use penalize_magics_key to put magics after variables with same name
self.matches = sorted(set(self.matches), key=penalize_magics_key)
#io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
return text, self.matches
def rlcomplete(self, text, state):
"""Return the state-th possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
Parameters
----------
text : string
Text to perform the completion on.
state : int
Counter used by readline.
"""
if state==0:
self.line_buffer = line_buffer = self.readline.get_line_buffer()
cursor_pos = self.readline.get_endidx()
#io.rprint("\nRLCOMPLETE: %r %r %r" %
# (text, line_buffer, cursor_pos) ) # dbg
# if there is only a tab on a line with only whitespace, instead of
# the mostly useless 'do you want to see all million completions'
# message, just do the right thing and give the user his tab!
# Incidentally, this enables pasting of tabbed text from an editor
# (as long as autoindent is off).
# It should be noted that at least pyreadline still shows file
# completions - is there a way around it?
# don't apply this on 'dumb' terminals, such as emacs buffers, so
# we don't interfere with their own tab-completion mechanism.
if not (self.dumb_terminal or line_buffer.strip()):
self.readline.insert_text('\t')
sys.stdout.flush()
return None
# Note: debugging exceptions that may occur in completion is very
# tricky, because readline unconditionally silences them. So if
# during development you suspect a bug in the completion code, turn
# this flag on temporarily by uncommenting the second form (don't
# flip the value in the first line, as the '# dbg' marker can be
# automatically detected and is used elsewhere).
DEBUG = False
#DEBUG = True # dbg
if DEBUG:
try:
self.complete(text, line_buffer, cursor_pos)
except:
import traceback; traceback.print_exc()
else:
# The normal production version is here
# This method computes the self.matches array
self.complete(text, line_buffer, cursor_pos)
try:
return self.matches[state]
except IndexError:
return None
| {
"content_hash": "df1685b54ef35e0e5fb3cf7a2b329b0a",
"timestamp": "",
"source": "github",
"line_count": 1275,
"max_line_length": 104,
"avg_line_length": 35.98980392156863,
"alnum_prop": 0.5633621722928063,
"repo_name": "fzheng/codejam",
"id": "4c793b43c17f70ee7f865db6c6232945a1e62094",
"size": "45910",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/IPython/core/completer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "26929"
},
{
"name": "CSS",
"bytes": "70961"
},
{
"name": "HTML",
"bytes": "80615"
},
{
"name": "Java",
"bytes": "376384"
},
{
"name": "JavaScript",
"bytes": "5201764"
},
{
"name": "Jupyter Notebook",
"bytes": "13408"
},
{
"name": "Makefile",
"bytes": "2379"
},
{
"name": "Python",
"bytes": "16542061"
},
{
"name": "Smarty",
"bytes": "22430"
},
{
"name": "TeX",
"bytes": "85477"
}
],
"symlink_target": ""
} |
import mosquitto
import os
import sys
import time
import redis
import datetime
import textwrap
import json
# globals
global debug
global mqttc
global run
global r_server
# mqtt
broker = "winter.ceit.uq.edu.au"
port = 1883
serverPort = 55671
# connect
def connect():
global mqttc
global r_server
try:
mqttc.reinitialise()
mqttc.connect(broker, port, 20, True)
mqttc.publish("gumballlog", "RFID Parser Re-Connected")
mqttc.subscribe("gumballrfid", 0)
mqttc.on_message = on_message
mqttc.on_disconnect = on_disconnect
# connect to Redis
r_server = redis.Redis(host='winter.ceit.uq.edu.au', port=6379, db=10, password=None, socket_timeout=None, connection_pool=None, charset='utf-8', errors='strict', decode_responses=False, unix_socket_path=None)
except:
print "Parser Problems, retrying"
time.sleep(5)
# callback
def on_message(mosq, obj, msg):
global mqqtc
global debug
global r_server
jsonString = json.loads(msg.payload)
sourceId = jsonString["id"]
hashkey = jsonString["value"]
my_str = ""
# get the current number of unregisterd cards
current_number = int(r_server.get("Number"));
# check if card has been registed
if(r_server.hget(hashkey, "RFID") == None):
# check if they have not registed the card.
current_unregisterd_dict = r_server.hgetall("Unregistered")
for key, value in current_unregisterd_dict.iteritems():
if value == hashkey:
print "comparing: " + value + " to: " + hashkey
my_str = "Unregistered! Go to\nwinter.ceit.uq.edu.au\n:" + str(serverPort) + "/index.html\nTo Register\nYour Key is: " + key + "\n"
print my_str
# publish her
mqttc.publish("gumballscreen", my_str)
return
# This is the first time the card has been scanned
# Enter it into the Unregisterd database
r_server.hset("Unregistered", current_number , str(hashkey))
my_str += "Unregistered Go to\nwinter.ceit.uq.edu.au\n:" + str(serverPort) + "/index.html\nTo Register\n"
my_str += "Your Key is: " + str(current_number) + "\n"
print my_str
# publish here
mqttc.publish("gumballscreen", my_str)
# boost the number by one, so no overwrite
r_server.set("Number", (current_number+1))
# The Card is Registerd
else:
# update the time stamp
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
r_server.hset(str(hashkey), "Python Time", ts)
r_server.hset(str(hashkey), "Human Time", str(st))
# is it a registered card.
my_str += "Card: " + str(hashkey)[0:10] + "\nRegistered!\n" + "Hello: " + str(r_server.hget(str(hashkey), "First"))
my_str += "\nSigned In:\n" + str(r_server.hget(str(hashkey), "Human Time"))
# if user has permission for lollies, dispense
if(int(r_server.hget(str(hashkey), "Lollies")) == 1):
# profit?
mqttc.publish("ait", str(hashkey))
else:
my_str += "\nNo lollies for you!"
my_str += "\n"
# publish here
mqttc.publish("gumballscreen", my_str)
def on_disconnect(mosq, obj, rc):
connect()
time.sleep(10)
# start
debug = 0
run = 1
# generate client name and connect to mqtt
mypid = os.getpid()
client_uniq = "pubclient_"+str(mypid)
# connect mqtt
mqttc = mosquitto.Mosquitto(client_uniq)
connect()
print """\n############ HELLO ############
# ~~~~~~~ NOW RUNNING ~~~~~~~~ #
#################################\n"""
#remain connected and publish
while True:
#try:
if(mqttc.loop() != 0):
print "Re-Connecting"
connect()
#except Exception, e:
# print("error")
# connect()
time.sleep(1) | {
"content_hash": "adad220e57cd62f65d36c5c61bf99c4a",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 211,
"avg_line_length": 24.305555555555557,
"alnum_prop": 0.6631428571428571,
"repo_name": "DavidWrigley/Gumball",
"id": "df463f7827166fb5a05ae68aebcf813c236ec8e4",
"size": "3518",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Mirror USB/shell/Python side/mqqt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "6571"
},
{
"name": "Arduino",
"bytes": "11508"
},
{
"name": "C",
"bytes": "250456"
},
{
"name": "C++",
"bytes": "91334"
},
{
"name": "CSS",
"bytes": "18638"
},
{
"name": "D",
"bytes": "9297"
},
{
"name": "JavaScript",
"bytes": "233334"
},
{
"name": "Objective-C",
"bytes": "4120"
},
{
"name": "PHP",
"bytes": "358"
},
{
"name": "Python",
"bytes": "41184"
},
{
"name": "Shell",
"bytes": "22822"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.synapse import SynapseManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-synapse
# USAGE
python kusto_pool_data_connections_create_or_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SynapseManagementClient(
credential=DefaultAzureCredential(),
subscription_id="12345678-1234-1234-1234-123456789098",
)
response = client.kusto_pool_data_connections.begin_create_or_update(
resource_group_name="kustorptest",
workspace_name="synapseWorkspaceName",
kusto_pool_name="kustoclusterrptest4",
database_name="KustoDatabase8",
data_connection_name="DataConnections8",
parameters={
"kind": "EventHub",
"location": "westus",
"properties": {
"consumerGroup": "testConsumerGroup1",
"eventHubResourceId": "/subscriptions/12345678-1234-1234-1234-123456789098/resourceGroups/kustorptest/providers/Microsoft.EventHub/namespaces/eventhubTestns1/eventhubs/eventhubTest1",
},
},
).result()
print(response)
# x-ms-original-file: specification/synapse/resource-manager/Microsoft.Synapse/preview/2021-06-01-preview/examples/KustoPoolDataConnectionsCreateOrUpdate.json
if __name__ == "__main__":
main()
| {
"content_hash": "31d0e23a913b9bd9c39736a21322e25a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 199,
"avg_line_length": 39.15909090909091,
"alnum_prop": 0.7069065583284968,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2ddef8a2cf50b1f0adaac8774e530e491217e74a",
"size": "2191",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/synapse/azure-mgmt-synapse/generated_samples/kusto_pool_data_connections_create_or_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import socket, threading, string
debug = True
_connector = None
_running = True
_host = '0.0.0.0'
_port = 2222
_maxClient = 99
_recvBuffer = 1024
def printd(aString):
if debug:
print aString
class talkToClient(threading.Thread):
def __init__(self, clientSock, addr):
self.clientSock = clientSock
self.addr = addr
threading.Thread.__init__(self)
def run(self):
while True:
recvData = self.clientSock.recv(_recvBuffer)
if not recvData:
self.clientSock.send('bye')
break
printd('Client ' + str(self.addr) + ' say "' + str(recvData) + '"')
self.clientSock.send(recvData)
if recvData == "close":
break
self.clientSock.close()
_connector = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_connector.bind((str(_host), int(_port)))
_connector.listen(int(_maxClient))
while _running:
printd('Running on ' + _host + ':' + str(_port) + '.')
clientSock, addr = _connector.accept()
printd('Conect on : ' + str (addr))
#talkToClient (channel, details).start ()
while True:
recvData = clientSock.recv(_recvBuffer)
if not recvData:
clientSock.send('bye')
break
printd('Client ' + str(addr) + ' say "' + str(recvData) + '"')
clientSock.send(recvData)
if recvData == "data":
break
clientSock.close()
_connector.close()
| {
"content_hash": "4be9f3aad8b49f10887b4c35773a84b1",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 27.61111111111111,
"alnum_prop": 0.5694164989939637,
"repo_name": "khoteevnd/stepic",
"id": "385884105f1a3452238aa7435d1980ed62ed08bb",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3665"
}
],
"symlink_target": ""
} |
from gpu_tests.gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuProcessExpectations(GpuTestExpectations):
def SetExpectations(self):
# Accelerated 2D canvas is not available on Linux due to driver instability
self.Fail('GpuProcess.canvas2d', ['linux'], bug=254724)
self.Fail('GpuProcess.video', ['linux'], bug=257109)
# Android
self.Fail('GpuProcess.no_gpu_process',
['android'], bug=611930)
self.Fail('GpuProcess.identify_active_gpu1',
['android'], bug=611930)
self.Fail('GpuProcess.identify_active_gpu2',
['android'], bug=611930)
self.Fail('GpuProcess.identify_active_gpu3',
['android'], bug=611930)
self.Fail('GpuProcess.identify_active_gpu4',
['android'], bug=611930)
self.Fail('GpuProcess.readback_webgl_gpu_process',
['android'], bug=611930)
self.Fail('GpuProcess.driver_bug_workarounds_upon_gl_renderer',
['android'], bug=611930)
# Nexus 5X
# Skip this test because expecting it to fail will still run it.
self.Skip('GpuProcess.skip_gpu_process',
['android', ('qualcomm', 'Adreno (TM) 418')], bug=610951)
# Nexus 9
# Skip this test because expecting it to fail will still run it.
self.Skip('GpuProcess.skip_gpu_process',
['android', 'nvidia'], bug=610023)
| {
"content_hash": "fab3d1ba8b02de80775e1a442f0594cf",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 38.648648648648646,
"alnum_prop": 0.6496503496503496,
"repo_name": "axinging/chromium-crosswalk",
"id": "97375eb47c3ced35058892e09fef558a436237f5",
"size": "1593",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "content/test/gpu/gpu_tests/gpu_process_expectations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "8242"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23945"
},
{
"name": "C",
"bytes": "4103204"
},
{
"name": "C++",
"bytes": "225022948"
},
{
"name": "CSS",
"bytes": "949808"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Go",
"bytes": "18155"
},
{
"name": "HTML",
"bytes": "28206993"
},
{
"name": "Java",
"bytes": "7651204"
},
{
"name": "JavaScript",
"bytes": "18831169"
},
{
"name": "Makefile",
"bytes": "96270"
},
{
"name": "Objective-C",
"bytes": "1228122"
},
{
"name": "Objective-C++",
"bytes": "7563676"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "418221"
},
{
"name": "Python",
"bytes": "7855597"
},
{
"name": "Shell",
"bytes": "472586"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
} |
from django.forms import Form
from django.forms import CharField
from django.forms import DecimalField
from django.utils.translation import ugettext as _
from django.forms import ValidationError
from apps.asset.models import ColdStorage
from apps.asset import control
class ColdStorageAdd(Form):
address = CharField(label=_("ADDRESS"))
def __init__(self, *args, **kwargs):
self.asset = kwargs.pop("asset")
super(ColdStorageAdd, self).__init__(*args, **kwargs)
def clean_address(self):
am = control.get_manager(self.asset)
address = self.cleaned_data["address"]
if not am.validate(address):
raise ValidationError(_("ERROR_INVALID_ADDRESS"))
return address
class ColdStorageSend(Form):
amount = DecimalField(label=_("AMOUNT"))
def __init__(self, *args, **kwargs):
self.asset = kwargs.pop("asset")
am = control.get_manager(self.asset)
super(ColdStorageSend, self).__init__(*args, **kwargs)
self.fields["amount"].initial = 0.0
self.fields["amount"].decimal_places = am.decimal_places
def clean_amount(self):
am = control.get_manager(self.asset)
amount = am.quantize(self.cleaned_data["amount"])
# check max amount
if amount > am.get_wallet_balance():
raise ValidationError(_("INSUFFICIENT_HOT_FUNDS"))
# cold storage wallet exists
coldstorages = ColdStorage.objects.filter(asset=self.asset)
coldstorages = filter(lambda cs: cs.imported == False, coldstorages)
if len(coldstorages) == 0:
raise ValidationError(_("ERROR_NO_COLD_STORAGE"))
return amount
class ColdStorageImport(Form):
private_key = CharField(label=_("PRIVATE_KEY"))
| {
"content_hash": "31f67cd459ba9cc54afeee30a974ddb2",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 72,
"avg_line_length": 30.072727272727274,
"alnum_prop": 0.7007255139056832,
"repo_name": "F483/bitcoin-bounties.com",
"id": "6898536e050df1a57ee7811d7dc602544c721891",
"size": "1780",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/asset/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1494966"
},
{
"name": "JavaScript",
"bytes": "279366"
},
{
"name": "Python",
"bytes": "146017"
},
{
"name": "Ruby",
"bytes": "4418"
},
{
"name": "Shell",
"bytes": "4329"
}
],
"symlink_target": ""
} |
import json
import os
import re
import sys
import dcos.util as util
import six
from dcos import mesos
from dcos.util import create_schema
import pytest
from ..fixtures.node import slave_fixture
from .common import assert_command, assert_lines, exec_command, ssh_output
def test_help():
with open('tests/data/help/node.txt') as content:
stdout = six.b(content.read())
assert_command(['dcos', 'node', '--help'], stdout=stdout)
def test_info():
stdout = b"Administer and manage DC/OS cluster nodes\n"
assert_command(['dcos', 'node', '--info'], stdout=stdout)
def test_node():
returncode, stdout, stderr = exec_command(['dcos', 'node', '--json'])
assert returncode == 0
assert stderr == b''
nodes = json.loads(stdout.decode('utf-8'))
schema = _get_schema(slave_fixture())
for node in nodes:
assert not util.validate_json(node, schema)
def test_node_table():
returncode, stdout, stderr = exec_command(['dcos', 'node'])
assert returncode == 0
assert stderr == b''
assert len(stdout.decode('utf-8').split('\n')) > 2
def test_node_log_empty():
stderr = b"You must choose one of --leader or --mesos-id.\n"
assert_command(['dcos', 'node', 'log'], returncode=1, stderr=stderr)
def test_node_log_leader():
assert_lines(['dcos', 'node', 'log', '--leader'], 10)
def test_node_log_slave():
slave_id = _node()[0]['id']
assert_lines(['dcos', 'node', 'log', '--mesos-id={}'.format(slave_id)], 10)
def test_node_log_missing_slave():
returncode, stdout, stderr = exec_command(
['dcos', 'node', 'log', '--mesos-id=bogus'])
assert returncode == 1
assert stdout == b''
assert stderr == b'No slave found with ID "bogus".\n'
def test_node_log_leader_slave():
slave_id = _node()[0]['id']
returncode, stdout, stderr = exec_command(
['dcos', 'node', 'log', '--leader', '--mesos-id={}'.format(slave_id)])
assert returncode == 0
assert stderr == b''
lines = stdout.decode('utf-8').split('\n')
assert len(lines) == 23
assert re.match('===>.*<===', lines[0])
assert re.match('===>.*<===', lines[11])
def test_node_log_lines():
assert_lines(['dcos', 'node', 'log', '--leader', '--lines=4'], 4)
def test_node_log_invalid_lines():
assert_command(['dcos', 'node', 'log', '--leader', '--lines=bogus'],
stdout=b'',
stderr=b'Error parsing string as int\n',
returncode=1)
@pytest.mark.skipif(sys.platform == 'win32',
reason='No pseudo terminal on windows')
def test_node_ssh_leader():
_node_ssh(['--leader'])
@pytest.mark.skipif(sys.platform == 'win32',
reason='No pseudo terminal on windows')
def test_node_ssh_slave():
slave_id = mesos.DCOSClient().get_state_summary()['slaves'][0]['id']
_node_ssh(['--mesos-id={}'.format(slave_id), '--master-proxy'])
@pytest.mark.skipif(sys.platform == 'win32',
reason='No pseudo terminal on windows')
def test_node_ssh_option():
stdout, stderr, _ = _node_ssh_output(
['--leader', '--option', 'Protocol=0'])
assert stdout == b''
assert b'ignoring bad proto spec' in stderr
@pytest.mark.skipif(sys.platform == 'win32',
reason='No pseudo terminal on windows')
def test_node_ssh_config_file():
stdout, stderr, _ = _node_ssh_output(
['--leader', '--config-file', 'tests/data/node/ssh_config'])
assert stdout == b''
assert b'ignoring bad proto spec' in stderr
@pytest.mark.skipif(sys.platform == 'win32',
reason='No pseudo terminal on windows')
def test_node_ssh_user():
stdout, stderr, _ = _node_ssh_output(
['--master-proxy', '--leader', '--user=bogus', '--option',
'BatchMode=yes'])
assert stdout == b''
assert b'Permission denied' in stderr
def test_node_ssh_master_proxy_no_agent():
env = os.environ.copy()
env.pop('SSH_AUTH_SOCK', None)
stderr = (b"There is no SSH_AUTH_SOCK env variable, which likely means "
b"you aren't running `ssh-agent`. `dcos node ssh "
b"--master-proxy` depends on `ssh-agent` to safely use your "
b"private key to hop between nodes in your cluster. Please "
b"run `ssh-agent`, then add your private key with `ssh-add`.\n")
assert_command(['dcos', 'node', 'ssh', '--master-proxy', '--leader'],
stderr=stderr,
returncode=1,
env=env)
@pytest.mark.skipif(sys.platform == 'win32',
reason='No pseudo terminal on windows')
def test_node_ssh_master_proxy():
_node_ssh(['--leader', '--master-proxy'])
def test_master_arg_deprecation_notice():
stderr = b"--master has been deprecated. Please use --leader.\n"
assert_command(['dcos', 'node', 'log', '--master'],
stderr=stderr,
returncode=1)
assert_command(['dcos', 'node', 'ssh', '--master'],
stderr=stderr,
returncode=1)
def test_slave_arg_deprecation_notice():
stderr = b"--slave has been deprecated. Please use --mesos-id.\n"
assert_command(['dcos', 'node', 'log', '--slave=bogus'],
stderr=stderr,
returncode=1)
assert_command(['dcos', 'node', 'ssh', '--slave=bogus'],
stderr=stderr,
returncode=1)
@pytest.mark.skipif(sys.platform == 'win32',
reason='No pseudo terminal on windows')
def test_node_ssh_with_command():
leader_hostname = mesos.DCOSClient().get_state_summary()['hostname']
_node_ssh(['--leader', '--master-proxy', '/opt/mesosphere/bin/detect_ip'],
0, leader_hostname)
@pytest.mark.skipif(sys.platform == 'win32',
reason='No pseudo terminal on windows')
def test_node_ssh_slave_with_command():
slave = mesos.DCOSClient().get_state_summary()['slaves'][0]
_node_ssh(['--mesos-id={}'.format(slave['id']), '--master-proxy',
'/opt/mesosphere/bin/detect_ip'], 0, slave['hostname'])
def _node_ssh_output(args):
cli_test_ssh_key_path = os.environ['CLI_TEST_SSH_KEY_PATH']
cmd = ('ssh-agent /bin/bash -c "ssh-add {} 2> /dev/null && ' +
'dcos node ssh --option StrictHostKeyChecking=no {}"').format(
cli_test_ssh_key_path,
' '.join(args))
return ssh_output(cmd)
def _node_ssh(args, expected_returncode=None, expected_stdout=None):
if os.environ.get('CLI_TEST_MASTER_PROXY') and \
'--master-proxy' not in args:
args.append('--master-proxy')
stdout, stderr, returncode = _node_ssh_output(args)
assert returncode is expected_returncode
if expected_stdout is not None:
assert stdout.decode('utf-8').startswith(expected_stdout)
assert b"Running `" in stderr
def _get_schema(slave):
schema = create_schema(slave, True)
schema['required'].remove('reregistered_time')
schema['required'].remove('reserved_resources')
schema['properties']['reserved_resources']['required'] = []
schema['required'].remove('unreserved_resources')
schema['properties']['unreserved_resources']['required'] = []
schema['properties']['used_resources']['required'].remove('ports')
schema['properties']['offered_resources']['required'].remove('ports')
schema['required'].remove('version')
return schema
def _node():
returncode, stdout, stderr = exec_command(['dcos', 'node', '--json'])
assert returncode == 0
assert stderr == b''
return json.loads(stdout.decode('utf-8'))
| {
"content_hash": "53b1c50f63f6668b322ce2012bd580de",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 79,
"avg_line_length": 31.74688796680498,
"alnum_prop": 0.5974382433668801,
"repo_name": "sis-tools/dcos-cli",
"id": "9061c14ad44d2ffe8706cbd9f513bc1508d262d5",
"size": "7651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cli/tests/integrations/test_node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "33616"
},
{
"name": "Makefile",
"bytes": "373"
},
{
"name": "PowerShell",
"bytes": "8661"
},
{
"name": "Python",
"bytes": "488095"
},
{
"name": "Shell",
"bytes": "26652"
}
],
"symlink_target": ""
} |
"""
A collection of commands used in this workflow
"""
import json, sys, time, uuid, os, pickle
from datetime import datetime
from numbers import Number
import click
from botocore.exceptions import ClientError
#---------------------CONFIG-----------------------------
# Override these next 2 functions to change the title/subtitle in the instance list
#<instance> is a boto3 Instance object <http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#instance>
def instance_title(instance):
"""Given an instance, returns the title of the list item as a string"""
tags = {t['Key']: t['Value'] for t in instance.tags} if getattr(instance, 'tags', None) else {}
return ' '.join([
tags.get('Name', ''),
tags.get('aws:autoscaling:groupName', ''),
])
def instance_subtitle(instance):
"""Given an instance, returns the subtitle of the list item as a string"""
return ' '.join([
instance.id,
instance.instance_type,
getattr(instance, 'private_ip_address', None) or getattr(instance, 'public_ip_address', None) or '',
instance.state['Name'],
])
DEFAULT_OUTPUT_FIELD = 'private_ip_address'
CACHE_DIR = 'caches'
CREDS_CACHE_FILE = os.path.join(CACHE_DIR, "creds.cache")
CREDS_CACHE_EXPIRATION_WINDOW = 2 #seconds
CONFIG_CACHE_FILE = os.path.join(CACHE_DIR, "boto-config.cache")
INSTANCES_CACHE_EXT = "aws-instances.cache"
INSTANCES_CACHE_MAX_AGE = 40 #seconds
#--------------------------------------------------------
@click.group()
def cli():
"""Main group for other subcommands"""
#Make sure cache dir exists
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
#--------------------------------------------------------
@cli.command()
def get_profiles():
"""Print a alfred-formatted list of available boto profiles"""
profiles = get_boto_config().keys()
result = {
"items": [
{
"uid": profile,
"title": profile,
"arg": profile,
"autocomplete": profile,
}
for profile in profiles
]
}
click.echo(json.dumps(result))
#--------------------------------------------------------
@cli.command()
@click.argument('profile')
def check_profile(profile):
"""
If no MFA is necessary for <profile>, exits with status 2
If an MFA is neccessary for <profile> but the cached temporary credentials are expired, exit with status 1
If an MFA is required for <profile> and the cached temporary credentials are still valid, exit with status 0
"""
config = get_boto_config()[profile]
if 'role_arn' not in config:
sys.exit(2) #No MFA necessary, go straight to search
creds_cache = get_creds_cache(profile)
now = time.time()
if creds_cache is None or creds_cache['expires'] - CREDS_CACHE_EXPIRATION_WINDOW <= now:
sys.exit(1) #Creds are expired, prompt user for MFA
sys.exit(0) #Creds are still valid, move along
#--------------------------------------------------------
@cli.command()
@click.argument('profile')
@click.argument('token')
def prompt_for_mfa(profile, token):
"""
Prompt a user for their MFA token, retrieve temporary credentials,
store them in the cache, then pass them to the next stage
"""
if len(token) < 6:
click.echo(json.dumps({'items': [{'title': '...', 'valid': False}]}))
elif len(token) > 6:
click.echo(json.dumps({'items': [{'title': 'Token too long!', 'valid': False}]}))
else:
try:
temp_creds = get_temp_creds(profile, token)
except ClientError:
click.echo(json.dumps({'items': [{'title': 'Invalid token!', 'valid': False}]}))
except:
click.echo(json.dumps({'items': [{'title': 'Unexpected error!', 'valid': False}]}))
else:
update_creds_cache(profile, temp_creds)
click.echo(json.dumps({
"items": [{
"title": "Continue",
"arg": "PLACEHOLDER", #If "arg" is not set, the option will not be selectable
}]
}))
def get_temp_creds(profile, token):
"""Use STS to retrieve temporary credentials for <profile>"""
from boto3 import Session #Late import because importing boto3 is slow
config = get_boto_config()[profile]
hub_client = Session(profile_name=config['source_profile']).client('sts')
response = hub_client.assume_role(
RoleArn = config['role_arn'],
RoleSessionName = 'alfed-aws-{}@{}'.format(str(uuid.uuid4())[:8], profile),
DurationSeconds = 3600,
SerialNumber = config['mfa_serial'],
TokenCode = token,
)
temp_creds = response['Credentials']
return {
'access_key': temp_creds['AccessKeyId'],
'secret_key': temp_creds['SecretAccessKey'],
'session_token': temp_creds['SessionToken'],
#Python's datetime lib is dumb and doesn't know how to turn timezone-aware datetimes
#into epoch timestamps. Since the datetime boto returns and the datetime returned
#by datetime.utcfromtimestamp() are both in UTC, this is safe.
'expires': (temp_creds['Expiration'].replace(tzinfo=None) - datetime.utcfromtimestamp(0)).total_seconds(),
}
def update_creds_cache(profile, dct):
"""Update the creds cache with <dct> as its new value"""
if os.path.exists(CREDS_CACHE_FILE):
with open(CREDS_CACHE_FILE, 'r') as f:
creds = json.load(f)
creds[profile] = dct
new_creds = creds
else:
new_creds = {profile: dct}
with open(CREDS_CACHE_FILE, 'w') as f:
json.dump(new_creds, f)
#--------------------------------------------------------
@cli.command()
@click.option('--profile')
@click.argument('query')
def search_for_instances(profile, query):
"""
Print an alfred-formatted list of instances in the AWS account given by <profile> that match <query>
"""
temp_creds = get_creds_cache(profile)
query = query.split()
result = {"items": []}
instances = get_instances(profile, temp_creds)
for instance in instances:
title = instance_title(instance)
subtitle = instance_subtitle(instance)
text = title + subtitle
match = 0
for q in query:
if q in text:
match += 1
if match == len(query):
entry = {
'uid': instance.id,
'title': title or '', #Protect against potential None (unserializable)
'subtitle': subtitle or '',
'mods': {
'shift': {
#Pass the selected result as a string to the next node, which filters it
'arg': json.dumps(extract_output_fields(instance)),
'subtitle': "More options",
'valid': True
}
}
}
# If the instance doesn't have a private IP address, the only valid action is "More options"
arg = ({'arg': getattr(instance, DEFAULT_OUTPUT_FIELD)}
if hasattr(instance, DEFAULT_OUTPUT_FIELD)
else {'valid': False})
entry.update(arg)
result['items'].append(entry)
click.echo(json.dumps(result))
def get_instances(profile, temp_creds):
"""Get a list of all instances in the account given by <profile> from AWS"""
cache_file = os.path.join(CACHE_DIR, profile + '-' + INSTANCES_CACHE_EXT)
if temp_creds is not None:
cred_kwargs = {
'aws_access_key_id': temp_creds['access_key'],
'aws_secret_access_key': temp_creds['secret_key'],
'aws_session_token': temp_creds['session_token'],
}
else:
cred_kwargs = {}
if not os.path.isfile(cache_file) or os.stat(cache_file).st_mtime + INSTANCES_CACHE_MAX_AGE < time.time():
from boto3 import Session #Late import because importing boto3 is slow
ec2 = Session(profile_name=profile, **cred_kwargs).resource('ec2')
instances = map(SerializableInstance, ec2.instances.all())
with open(cache_file, 'w') as f:
pickle.dump(instances, f, pickle.HIGHEST_PROTOCOL)
return instances
else:
with open(cache_file) as f:
return pickle.load(f)
def extract_output_fields(instance):
output_fields = [
{'prop': 'id', 'desc': 'Instance ID'},
{'prop': 'image_id', 'desc': 'AMI ID'},
{'prop': 'instance_type', 'desc': 'Type'},
{'prop': 'private_dns_name', 'desc': 'Private Hostname'},
{'prop': 'private_ip_address', 'desc': 'Private IP Address'},
{'prop': 'public_dns_name', 'desc': 'Public Hostname'},
{'prop': 'public_ip_address', 'desc': 'Public IP Address'},
{'prop': 'subnet_id', 'desc': 'Subnet ID'},
{'prop': 'vpc_id', 'desc': 'VPC ID'},
]
return {
'items': [
{
'uid': field['prop'],
'title': getattr(instance, field['prop']),
'subtitle': field['desc'],
'arg': getattr(instance, field['prop']),
}
for field in output_fields if getattr(instance, field['prop'], None)
]
}
#--------------------------------------------------------
@cli.command()
@click.argument('spec')
@click.argument('query')
def filter_output_fields(spec, query):
"""Filters on both title and subtitle, unlike default Alfred filtering, which filters only on title"""
spec = json.loads(spec)
results = {
"items": [
item for item in spec['items']
if query in item.get('title', '').lower() or query in item.get('subtitle', '').lower()
]
}
click.echo(json.dumps(results))
#--------------- Shared helper functions-----------------
def get_creds_cache(profile):
"""Return the creds cache for a particular profile"""
if os.path.exists(CREDS_CACHE_FILE):
with open(CREDS_CACHE_FILE, 'r') as f:
return json.load(f)[profile]
else:
return None
def get_boto_config():
"""Return full boto config. Caches responses for performance."""
cf_files = filter(os.path.exists, map(os.path.expanduser,
['~/.aws/config', '~/.aws/credentials', '/etc/boto.cfg', '~/.boto']))
env_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN',
'AWS_DEFAULT_REGION', 'AWS_PROFILE', 'AWS_CONFIG_FILE', 'AWS_SHARED_CREDENTIALS_FILE',
'AWS_CA_BUNDLE', 'AWS_METADATA_SERVICE_TIMEOUT', 'AWS_METADATA_SERVICE_NUM_ATTEMPTS',
'AWS_DATA_PATH']
if os.path.exists(CONFIG_CACHE_FILE):
with open(CONFIG_CACHE_FILE) as f:
cache = json.load(f)
cache_invalid = (
any(os.stat(cf).st_mtime > os.stat(CONFIG_CACHE_FILE).st_mtime for cf in cf_files) or
any(os.environ.get(cv) != cache['env'].get(cv) for cv in env_vars)
)
else:
cache_invalid = True
if cache_invalid:
from boto3 import Session #late import because importing boto3 is slow
config = Session()._session.full_config['profiles']
with open(CONFIG_CACHE_FILE, 'w') as f:
json.dump({'config': config, 'env': {cv: os.environ.get(cv) for cv in env_vars}}, f)
return config
else:
return cache['config']
class SerializableInstance(object):
"""A wrapper for Boto3 Instance resources that is pickleable"""
def __init__(self, instance):
for prop in dir(instance):
val = getattr(instance, prop)
if self._is_serializable(val):
setattr(self, prop, val)
def _is_serializable(self, val):
if isinstance(val, Number):
return True
elif isinstance(val, str):
return not val.startswith('__')
elif isinstance(val, dict):
return all(self._is_serializable(v) for v in val.values())
elif isinstance(val, list):
return all(self._is_serializable(i) for i in val)
else:
return False
if __name__ == '__main__':
cli()
| {
"content_hash": "4eaf6131bd1c17f6a28bc09144c8f6fe",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 115,
"avg_line_length": 31.764044943820224,
"alnum_prop": 0.6156703218960028,
"repo_name": "maxrothman/aws-alfred-workflow",
"id": "44e4e0ed30a60ab0e33e0ad4f529c8ab273a79ce",
"size": "11308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aws.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52779"
},
{
"name": "JavaScript",
"bytes": "15800"
},
{
"name": "Python",
"bytes": "5150800"
},
{
"name": "Shell",
"bytes": "3372"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
} |
import pytest
from time import sleep
@pytest.fixture(autouse=True)
def delay():
# put a small delay in before every test,
# otherwise tkinter can get upset by the constant
# creating and destroying of apps
sleep(0.1)
| {
"content_hash": "3c3c7b9e5215b3dff790287df4882ecf",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.711864406779661,
"repo_name": "lawsie/guizero",
"id": "2ebe23cb5b0ddf637abe3112bd002597644fa06d",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8697"
},
{
"name": "HTML",
"bytes": "14526"
},
{
"name": "JavaScript",
"bytes": "5837"
},
{
"name": "Python",
"bytes": "230438"
}
],
"symlink_target": ""
} |
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import DataBoxEdgeManagementClientConfiguration
from .operations import Operations
from .operations import DevicesOperations
from .operations import AlertsOperations
from .operations import BandwidthSchedulesOperations
from .operations import JobsOperations
from .operations import NodesOperations
from .operations import OperationsStatusOperations
from .operations import OrdersOperations
from .operations import RolesOperations
from .operations import SharesOperations
from .operations import StorageAccountCredentialsOperations
from .operations import TriggersOperations
from .operations import UsersOperations
from .. import models
class DataBoxEdgeManagementClient(object):
"""The DataBoxEdge Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.databoxedge.v2019_07_01.aio.operations.Operations
:ivar devices: DevicesOperations operations
:vartype devices: azure.mgmt.databoxedge.v2019_07_01.aio.operations.DevicesOperations
:ivar alerts: AlertsOperations operations
:vartype alerts: azure.mgmt.databoxedge.v2019_07_01.aio.operations.AlertsOperations
:ivar bandwidth_schedules: BandwidthSchedulesOperations operations
:vartype bandwidth_schedules: azure.mgmt.databoxedge.v2019_07_01.aio.operations.BandwidthSchedulesOperations
:ivar jobs: JobsOperations operations
:vartype jobs: azure.mgmt.databoxedge.v2019_07_01.aio.operations.JobsOperations
:ivar nodes: NodesOperations operations
:vartype nodes: azure.mgmt.databoxedge.v2019_07_01.aio.operations.NodesOperations
:ivar operations_status: OperationsStatusOperations operations
:vartype operations_status: azure.mgmt.databoxedge.v2019_07_01.aio.operations.OperationsStatusOperations
:ivar orders: OrdersOperations operations
:vartype orders: azure.mgmt.databoxedge.v2019_07_01.aio.operations.OrdersOperations
:ivar roles: RolesOperations operations
:vartype roles: azure.mgmt.databoxedge.v2019_07_01.aio.operations.RolesOperations
:ivar shares: SharesOperations operations
:vartype shares: azure.mgmt.databoxedge.v2019_07_01.aio.operations.SharesOperations
:ivar storage_account_credentials: StorageAccountCredentialsOperations operations
:vartype storage_account_credentials: azure.mgmt.databoxedge.v2019_07_01.aio.operations.StorageAccountCredentialsOperations
:ivar triggers: TriggersOperations operations
:vartype triggers: azure.mgmt.databoxedge.v2019_07_01.aio.operations.TriggersOperations
:ivar users: UsersOperations operations
:vartype users: azure.mgmt.databoxedge.v2019_07_01.aio.operations.UsersOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription ID.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = DataBoxEdgeManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.devices = DevicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.alerts = AlertsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bandwidth_schedules = BandwidthSchedulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.jobs = JobsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.nodes = NodesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations_status = OperationsStatusOperations(
self._client, self._config, self._serialize, self._deserialize)
self.orders = OrdersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.roles = RolesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.shares = SharesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.storage_account_credentials = StorageAccountCredentialsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.triggers = TriggersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users = UsersOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "DataBoxEdgeManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| {
"content_hash": "a248a9998292c8e1d3f41ba5823ee117",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 129,
"avg_line_length": 52.582089552238806,
"alnum_prop": 0.7320465512347432,
"repo_name": "Azure/azure-sdk-for-python",
"id": "646214481c402c936d829f8da349008a241c3b09",
"size": "7514",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_07_01/aio/_data_box_edge_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from .models import Action
class ContentTypeSerializer(serializers.ModelSerializer):
class Meta:
model = ContentType
class StrRelatedField(serializers.Field):
def to_representation(self, value):
return str(value)
class ActionSerializer(serializers.ModelSerializer):
actor_content_type = ContentTypeSerializer(read_only=True)
actor = StrRelatedField(read_only=True)
target_content_type = ContentTypeSerializer(read_only=True)
target = StrRelatedField(read_only=True)
class Meta:
model = Action
fields = (
'timestamp', 'target_link', 'target', 'target_object_id',
'target_content_type', 'verb', 'actor', 'actor_content_type',
'actor_object_id', 'actor_link',
)
| {
"content_hash": "69d35b274878c1bfc360016caa2415a7",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 31,
"alnum_prop": 0.6947004608294931,
"repo_name": "thelabnyc/django-social-timeline",
"id": "bcfa331b3c184c5aa58afec66714c049c1131088",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social_timeline/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16189"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from http import HTTPStatus
from typing import Callable
from typing import Tuple
from intezer_sdk import consts
from intezer_sdk.analysis import FileAnalysis
from intezer_sdk.analysis import UrlAnalysis
from intezer_sdk.api import IntezerApi
from intezer_sdk.endpoint_analysis import EndpointAnalysis
from intezer_sdk.errors import AnalysisIsAlreadyRunning
from intezer_sdk.errors import AnalysisIsStillRunning
from intezer_sdk.errors import FamilyNotFoundError
from intezer_sdk.errors import HashDoesNotExistError
from intezer_sdk.errors import InvalidApiKey
from intezer_sdk.errors import ServerError
from intezer_sdk.family import Family
from intezer_sdk.sub_analysis import SubAnalysis
from requests import HTTPError
from CommonServerPython import *
''' CONSTS '''
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
IS_AVAILABLE_URL = 'is-available'
REQUESTER = 'xsoar'
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
''' HELPER FUNCTIONS '''
def _get_missing_file_result(file_hash: str) -> CommandResults:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
return CommandResults(
readable_output=f'The Hash {file_hash} was not found on Intezer genome database',
outputs={
outputPaths['dbotscore']: dbot
}
)
def _get_missing_url_result(url: str, ex: ServerError = None) -> CommandResults:
dbot = {
'Vendor': 'Intezer',
'Type': 'Url',
'Indicator': url,
'Score': 0
}
return CommandResults(
readable_output=f'The Url {url} was not found on Intezer. Error {ex}',
outputs={
outputPaths['dbotscore']: dbot
}
)
def _get_missing_analysis_result(analysis_id: str, sub_analysis_id: str = None) -> CommandResults:
if not sub_analysis_id:
output = f'The Analysis {analysis_id} was not found on Intezer Analyze'
else:
output = f'Could not find the analysis \'{analysis_id}\' or the sub analysis \'{sub_analysis_id}\''
return CommandResults(
readable_output=output
)
def _get_missing_endpoint_analysis_result(analysis_id: str) -> CommandResults:
output = f'Could not find the endpoint analysis \'{analysis_id}\''
return CommandResults(
readable_output=output
)
def _get_missing_family_result(family_id: str) -> CommandResults:
return CommandResults(
readable_output=f'The Family {family_id} was not found on Intezer Analyze'
)
def _get_analysis_running_result(analysis_type: str,
analysis_id: str = None,
response: requests.Response = None) -> CommandResults:
if response:
analysis_id = response.json()['result_url'].split('/')[2]
context_json = {
'ID': analysis_id,
'Status': 'InProgress',
'Type': analysis_type
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
readable_output='Analysis is still in progress',
outputs=context_json
)
''' COMMANDS '''
def check_is_available(intezer_api: IntezerApi, args: dict) -> str:
try:
response = intezer_api.get_url_result(f'/{IS_AVAILABLE_URL}')
return 'ok' if response else 'Empty response from intezer service'
except InvalidApiKey as error:
return f'Invalid API key received.\n{error}'
except HTTPError as error:
return f'Error occurred when reaching Intezer Analyze. Please check Analyze Base URL. \n{error}'
except ConnectionError as error:
return f'Error connecting to Analyze Base url.\n{error}'
def analyze_by_hash_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults:
file_hash = args.get('file_hash')
if not file_hash:
raise ValueError('Missing file hash')
analysis = FileAnalysis(file_hash=file_hash, api=intezer_api)
try:
analysis.send(requester=REQUESTER)
analysis_id = analysis.analysis_id
context_json = {
'ID': analysis.analysis_id,
'Status': 'Created',
'Type': 'File'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
outputs=context_json,
readable_output='Analysis created successfully: {}'.format(analysis_id)
)
except HashDoesNotExistError:
return _get_missing_file_result(file_hash)
except AnalysisIsAlreadyRunning as error:
return _get_analysis_running_result(analysis_type='File', response=error.response)
def analyze_url_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults:
url = args.get('url')
if not url:
raise ValueError('Missing url')
analysis = UrlAnalysis(url=url, api=intezer_api)
try:
analysis.send(requester=REQUESTER)
analysis_id = analysis.analysis_id
context_json = {
'ID': analysis.analysis_id,
'Status': 'Created',
'Type': 'Url'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
outputs=context_json,
readable_output='Analysis created successfully: {}'.format(analysis_id)
)
except AnalysisIsAlreadyRunning as error:
return _get_analysis_running_result('Url', response=error.response)
except ServerError as ex:
return _get_missing_url_result(url, ex)
def get_latest_result_command(intezer_api: IntezerApi, args: Dict[str, str]) -> CommandResults:
file_hash = args.get('file_hash')
if not file_hash:
raise ValueError('Missing file hash')
latest_analysis = FileAnalysis.from_latest_hash_analysis(file_hash=file_hash, api=intezer_api, requester=REQUESTER)
if not latest_analysis:
return _get_missing_file_result(file_hash)
file_metadata = latest_analysis.get_root_analysis().metadata
return enrich_dbot_and_display_file_analysis_results(latest_analysis.result(), file_metadata)
def analyze_by_uploaded_file_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
file_id = args.get('file_entry_id')
file_data = demisto.getFilePath(file_id)
try:
analysis = FileAnalysis(file_path=file_data['path'], file_name=file_data['name'], api=intezer_api)
analysis.send(requester=REQUESTER)
context_json = {
'ID': analysis.analysis_id,
'Status': 'Created',
'Type': 'File'
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
outputs=context_json,
readable_output='Analysis created successfully: {}'.format(analysis.analysis_id)
)
except AnalysisIsAlreadyRunning as error:
return _get_analysis_running_result('File', response=error.response)
def check_analysis_status_and_get_results_command(intezer_api: IntezerApi, args: dict) -> List[CommandResults]:
analysis_type = args.get('analysis_type', 'File')
analysis_ids = argToList(args.get('analysis_id'))
indicator_name = args.get('indicator_name')
command_results = []
file_metadata = {}
for analysis_id in analysis_ids:
try:
if analysis_type == 'Endpoint':
analysis = EndpointAnalysis.from_analysis_id(analysis_id, intezer_api)
if not analysis:
command_results.append(_get_missing_endpoint_analysis_result(analysis_id))
continue
analysis_result = analysis.result()
elif analysis_type == 'Url':
analysis = UrlAnalysis.from_analysis_id(analysis_id, api=intezer_api)
if not analysis:
command_results.append(_get_missing_url_result(analysis_id))
continue
else:
analysis_result = analysis.result()
else:
analysis = FileAnalysis.from_analysis_id(analysis_id, api=intezer_api)
if not analysis:
command_results.append(_get_missing_analysis_result(analysis_id))
continue
else:
analysis_result = analysis.result()
file_metadata = analysis.get_root_analysis().metadata
if analysis_result and analysis_type == 'Endpoint':
command_results.append(
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name))
elif analysis_result and analysis_type == 'Url':
command_results.append(
enrich_dbot_and_display_url_analysis_results(analysis_result, intezer_api))
elif analysis_result:
command_results.append(enrich_dbot_and_display_file_analysis_results(analysis_result, file_metadata))
except HTTPError as http_error:
if http_error.response.status_code == HTTPStatus.CONFLICT:
command_results.append(_get_analysis_running_result(analysis_type, analysis_id=analysis_id))
elif http_error.response.status_code == HTTPStatus.NOT_FOUND:
command_results.append(_get_missing_analysis_result(analysis_id))
else:
raise http_error
except AnalysisIsStillRunning:
command_results.append(_get_analysis_running_result(analysis_type, analysis_id=analysis_id))
return command_results
def get_analysis_sub_analyses_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
try:
analysis = FileAnalysis.from_analysis_id(analysis_id, api=intezer_api)
if not analysis:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
except AnalysisIsStillRunning:
return _get_analysis_running_result('File', analysis_id=str(analysis_id))
sub_analyses: List[SubAnalysis] = analysis.get_sub_analyses()
all_sub_analyses_ids = [sub.analysis_id for sub in sub_analyses]
sub_analyses_table = tableToMarkdown('Sub Analyses', all_sub_analyses_ids, headers=['Analysis IDs'])
context_json = {
'ID': analysis.analysis_id,
'SubAnalysesIDs': all_sub_analyses_ids
}
return CommandResults(
outputs_prefix='Intezer.Analysis',
outputs_key_field='ID',
readable_output=sub_analyses_table,
outputs=context_json,
raw_response=all_sub_analyses_ids
)
def get_analysis_code_reuse_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
sub_analysis_id = args.get('sub_analysis_id', 'root')
try:
sub_analysis: SubAnalysis = SubAnalysis.from_analysis_id(sub_analysis_id, analysis_id, api=intezer_api)
sub_analysis_code_reuse = sub_analysis.code_reuse
except HTTPError as error:
if error.response.status_code == HTTPStatus.NOT_FOUND:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
elif error.response.status_code == HTTPStatus.CONFLICT:
return _get_analysis_running_result('File', analysis_id=str(analysis_id))
raise
if not sub_analysis_code_reuse:
return CommandResults(
readable_output='No code reuse for this analysis'
)
families = sub_analysis_code_reuse.pop('families') if 'families' in sub_analysis_code_reuse else None
readable_output = tableToMarkdown('Code Reuse', sub_analysis_code_reuse)
if families:
readable_output += '\nFamilies:\n'
readable_output += '\n'.join(tableToMarkdown(family['family_name'], family) for family in families)
is_root = sub_analysis_id == 'root'
if is_root:
context_json = {
'Intezer.Analysis(obj.ID == val.ID)': {
'ID': analysis_id,
'CodeReuse': sub_analysis_code_reuse,
'CodeReuseFamilies': families
}
}
else:
context_json = {
'Intezer.Analysis(obj.RootAnalysis == val.ID).SubAnalyses(obj.ID == val.ID)': {
'ID': sub_analysis_id,
'RootAnalysis': analysis_id,
'CodeReuse': sub_analysis_code_reuse,
'CodeReuseFamilies': families
}
}
return CommandResults(
readable_output=readable_output,
outputs=context_json,
raw_response=sub_analysis.code_reuse
)
def get_analysis_metadata_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
sub_analysis_id = args.get('sub_analysis_id', 'root')
try:
sub_analysis: SubAnalysis = SubAnalysis(analysis_id=sub_analysis_id,
composed_analysis_id=analysis_id,
sha256='',
source='',
extraction_info=None,
api=intezer_api)
sub_analysis_metadata = sub_analysis.metadata
except HTTPError as error:
if error.response.status_code == HTTPStatus.NOT_FOUND:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
elif error.response.status_code == HTTPStatus.CONFLICT:
return _get_analysis_running_result('File', analysis_id=str(analysis_id))
raise
metadata_table = tableToMarkdown('Analysis Metadata', sub_analysis_metadata)
is_root = sub_analysis_id == 'root'
if is_root:
context_json = {
'Intezer.Analysis(obj.ID == val.ID)': {
'ID': analysis_id,
'Metadata': sub_analysis_metadata
}
}
else:
context_json = {
'Intezer.Analysis(obj.RootAnalysis == val.ID).SubAnalyses(obj.ID == val.ID)': {
'ID': sub_analysis_id,
'RootAnalysis': analysis_id,
'Metadata': sub_analysis_metadata
}
}
return CommandResults(
readable_output=metadata_table,
outputs=context_json,
raw_response=sub_analysis_metadata
)
def get_analysis_iocs_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
try:
analysis = FileAnalysis.from_analysis_id(analysis_id, api=intezer_api)
except HTTPError as error:
if error.response.status_code == HTTPStatus.CONFLICT:
return _get_analysis_running_result('File', analysis_id=str(analysis_id))
raise
if not analysis:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
iocs = analysis.iocs
readable_output = ''
if iocs:
if network_iocs := iocs.get('network'):
readable_output += tableToMarkdown('Network IOCs', network_iocs)
if files_iocs := iocs.get('files'):
readable_output += tableToMarkdown('Files IOCs', files_iocs)
else:
readable_output = 'No IOCs found'
context_json = {
'Intezer.Analysis(obj.ID == val.ID)': {
'ID': analysis_id,
'IOCs': iocs
}
}
return CommandResults(
readable_output=readable_output,
outputs=context_json,
raw_response=iocs
)
def get_family_info_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
family_id = args.get('family_id')
family = Family(family_id, api=intezer_api)
try:
family.fetch_info()
except FamilyNotFoundError:
return _get_missing_family_result(str(family_id))
output = {
'ID': family_id,
'Name': family.name,
'Type': family.type
}
markdown = tableToMarkdown('Family Info', output)
return CommandResults(
readable_output=markdown,
outputs_prefix='Intezer.Family',
outputs=output
)
# region Enrich DBot
def enrich_dbot_and_display_file_analysis_results(intezer_result: dict, file_metadata: dict) -> CommandResults:
verdict = intezer_result.get('verdict')
sha256 = intezer_result.get('sha256')
analysis_id = intezer_result.get('analysis_id')
md5 = file_metadata.get('md5')
sha1 = file_metadata.get('sha1')
dbot_entry, file = _get_dbot_score_and_file_entries(intezer_result, file_metadata)
if verdict == 'malicious':
file['Malicious'] = {'Vendor': 'Intezer'}
intezer_result['sha1'] = sha1
intezer_result['md5'] = md5
presentable_result = _file_analysis_presentable_code(intezer_result, sha256, verdict)
return CommandResults(
readable_output=presentable_result,
raw_response=intezer_result,
outputs={
outputPaths['dbotscore']: dbot_entry,
outputPaths['file']: file,
'Intezer.Analysis(val.ID && val.ID == obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
)
def _get_dbot_score_and_file_entries(file_analysis_result: dict, file_metadata: dict) -> Tuple[List[dict], dict]:
verdict: str = file_analysis_result.get('verdict', '')
sha256 = file_metadata.get('sha256')
md5 = file_metadata.get('md5')
sha1 = file_metadata.get('sha1')
dbot = [
{
'Vendor': 'Intezer',
'Type': 'file',
'Indicator': sha256,
'Score': dbot_score_by_verdict.get(verdict, 0)
},
{
'Vendor': 'Intezer',
'Type': 'file',
'Indicator': sha1,
'Score': dbot_score_by_verdict.get(verdict, 0)
},
{
'Vendor': 'Intezer',
'Type': 'file',
'Indicator': md5,
'Score': dbot_score_by_verdict.get(verdict, 0)
}]
file = {'SHA256': sha256, 'MD5': md5, 'SHA1': sha1, 'Metadata': file_analysis_result, 'ExistsInIntezer': True}
return dbot, file
def _file_analysis_presentable_code(intezer_result: dict, sha256: str = None, verdict: str = None):
if not sha256:
sha256 = intezer_result['sha256']
if not verdict:
verdict = intezer_result['verdict']
md = tableToMarkdown('Analysis Report', intezer_result, url_keys=['analysis_url'])
presentable_result = '## Intezer File analysis result\n'
presentable_result += f' SHA256: {sha256}\n'
presentable_result += f' Verdict: **{verdict}** ({intezer_result["sub_verdict"]})\n'
if 'family_name' in intezer_result:
presentable_result += f'Family: **{intezer_result["family_name"]}**\n'
presentable_result += f'[Analysis Link]({intezer_result["analysis_url"]})\n'
presentable_result += md
return presentable_result
def get_indicator_text(classification: str, indicators: dict) -> str:
if classification in indicators:
return f'{classification.capitalize()}: {", ".join(indicators[classification])}'
return ''
def enrich_dbot_and_display_url_analysis_results(intezer_result, intezer_api):
summary = intezer_result.pop('summary')
_refine_gene_counts(summary)
intezer_result.update(summary)
verdict = summary['verdict_type']
submitted_url = intezer_result['submitted_url']
scanned_url = intezer_result['scanned_url']
analysis_id = intezer_result['analysis_id']
dbot = [{
'Vendor': 'Intezer',
'Type': 'Url',
'Indicator': submitted_url,
'Score': dbot_score_by_verdict.get(verdict, 0)
}]
if scanned_url != submitted_url:
dbot.append({
'Vendor': 'Intezer',
'Type': 'Url',
'Indicator': scanned_url,
'Score': dbot_score_by_verdict.get(verdict, 0)
})
url = {'URL': submitted_url, 'Data': submitted_url, 'Metadata': intezer_result, 'ExistsInIntezer': True}
if verdict == 'malicious':
url['Malicious'] = {'Vendor': 'Intezer'}
if 'redirect_chain' in intezer_result:
redirect_chain = ' → '.join(f'{node["response_status"]}: {node["url"]}'
for node in intezer_result['redirect_chain'])
intezer_result['redirect_chain'] = redirect_chain
if 'indicators' in intezer_result:
indicators: Dict[str, List[str]] = defaultdict(list)
for indicator in intezer_result['indicators']:
indicators[indicator['classification']].append(indicator['text'])
indicators_text = [
get_indicator_text('malicious', indicators),
get_indicator_text('suspicious', indicators),
get_indicator_text('informative', indicators),
]
intezer_result['indicators'] = '\n'.join(indicator_text for indicator_text in indicators_text if indicator_text)
presentable_result = '## Intezer Url analysis result\n'
presentable_result += f' Url: {submitted_url}\n'
presentable_result += f' Verdict: **{verdict}** ({summary["verdict_name"]})\n'
presentable_result += f'[Analysis Link]({intezer_result["analysis_url"]})\n'
downloaded_file_presentable_result = ''
file_entry: dict = {}
if 'downloaded_file' in intezer_result:
downloaded_file = intezer_result.pop('downloaded_file')
presentable_result += f'Downloaded file SHA256: {downloaded_file["sha256"]}\n'
presentable_result += f'Downloaded file Verdict: **{downloaded_file["analysis_summary"]["verdict_type"]}**\n'
downloaded_file_analysis = FileAnalysis.from_analysis_id(downloaded_file['analysis_id'], intezer_api)
download_file_result = downloaded_file_analysis.result()
intezer_result['downloaded_file'] = download_file_result
metadata = downloaded_file_analysis.get_root_analysis().metadata
file_dbot_entry, file_entry = _get_dbot_score_and_file_entries(download_file_result, metadata)
sha1 = metadata.get('sha1')
md5 = metadata.get('md5')
download_file_result['sha1'] = sha1
download_file_result['md5'] = md5
downloaded_file_presentable_result = _file_analysis_presentable_code(download_file_result)
dbot.extend(file_dbot_entry)
file_entry = {outputPaths['file']: file_entry}
md = tableToMarkdown('Analysis Report', intezer_result, url_keys=['analysis_url'])
presentable_result += md + downloaded_file_presentable_result
return CommandResults(
readable_output=presentable_result,
raw_response=intezer_result,
outputs={
outputPaths['dbotscore']: dbot,
outputPaths['url']: url,
**file_entry,
'Intezer.Analysis(val.ID && val.ID == obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
)
def _refine_gene_counts(summary: dict):
summary.pop('main_connection_gene_count', None)
summary.pop('main_connection_gene_percentage', None)
summary.pop('main_connection', None)
summary.pop('main_connection_family_id', None)
summary.pop('main_connection_software_type', None)
summary.pop('main_connection_classification', None)
def enrich_dbot_and_display_endpoint_analysis_results(intezer_result, indicator_name=None) -> CommandResults:
verdict = intezer_result['verdict']
computer_name = intezer_result['computer_name']
analysis_id = intezer_result['analysis_id']
dbot = {
'Vendor': 'Intezer',
'Type': 'hostname',
'Indicator': indicator_name if indicator_name else computer_name,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
endpoint = {'Metadata': intezer_result}
presentable_result = '## Intezer Endpoint analysis result\n'
presentable_result += f'Host Name: {computer_name}\n'
presentable_result += f' Verdict: **{verdict}**\n'
if intezer_result.get('families') is not None:
presentable_result += f'Families: **{intezer_result["families"]}**\n'
presentable_result += f' Scan Time: {intezer_result["scan_start_time"]}\n'
presentable_result += f'[Analysis Link]({intezer_result["analysis_url"]})\n'
return CommandResults(
readable_output=presentable_result,
raw_response=intezer_result,
outputs={
outputPaths['dbotscore']: dbot,
'Endpoint': endpoint,
'Intezer.Analysis(val.ID && val.ID == obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
)
# endregion
''' EXECUTION CODE '''
def main():
command = None
try:
handle_proxy()
intezer_api_key = demisto.getParam('APIKey')
intezer_base_url_param = demisto.getParam('AnalyzeBaseURL')
use_ssl = not demisto.params().get('insecure', False)
analyze_base_url = intezer_base_url_param or consts.BASE_URL
intezer_api = IntezerApi(consts.API_VERSION,
intezer_api_key,
analyze_base_url,
use_ssl,
user_agent=get_pack_version())
command_handlers: Dict[str, Callable[[IntezerApi, dict], Union[List[CommandResults], CommandResults, str]]] = {
'test-module': check_is_available,
'intezer-analyze-by-hash': analyze_by_hash_command,
'intezer-analyze-by-file': analyze_by_uploaded_file_command,
'intezer-analyze-url': analyze_url_command,
'intezer-get-latest-report': get_latest_result_command,
'intezer-get-analysis-result': check_analysis_status_and_get_results_command,
'intezer-get-sub-analyses': get_analysis_sub_analyses_command,
'intezer-get-analysis-code-reuse': get_analysis_code_reuse_command,
'intezer-get-analysis-metadata': get_analysis_metadata_command,
'intezer-get-analysis-iocs': get_analysis_iocs_command,
'intezer-get-family-info': get_family_info_command
}
command = demisto.command()
command_handler = command_handlers[command]
command_results = command_handler(intezer_api, demisto.args())
return_results(command_results)
except Exception as e:
return_error(f'Failed to execute {command} command. Error: {str(e)}')
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| {
"content_hash": "f7610b2c2372b5e47a04868c8948da3c",
"timestamp": "",
"source": "github",
"line_count": 738,
"max_line_length": 120,
"avg_line_length": 35.8739837398374,
"alnum_prop": 0.6216052880075543,
"repo_name": "demisto/content",
"id": "97585759de0e6067252dd620ef9bbce95d7f7b1a",
"size": "26477",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/Intezer/Integrations/IntezerV2/IntezerV2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
'''Fetch a bunch of URLs and store them permanently on-disk.
Uses requests-cache and a sqlite database. Does rate-throttling.
Usage:
./url_fetcher.py path-to-list-of.urls.txt
'''
import fileinput
import os
import requests
import requests_cache
import time
import sys
class NotInCacheError(Exception):
pass
class Fetcher(object):
def __init__(self, throttle_secs=1.0):
self._session = requests_cache.CachedSession('.url_fetcher_cache')
self._throttle_secs = throttle_secs
self._last_fetch = 0.0
def fetch_url(self, url, force_refetch=False):
if force_refetch:
self.remove_url_from_cache(url)
req = self._make_request(url)
start_t = time.time()
response = self._session.send(req)
end_t = time.time()
response.raise_for_status() # checks for status == 200 OK
if not response.from_cache and end_t - self._last_fetch < self._throttle_secs:
wait_s = end_t - self._last_fetch
sys.stderr.write('Waiting %s secs...\n' % wait_s)
time.sleep(wait_s)
self._last_fetch = end_t
return response.content
def is_url_in_cache(self, url):
return self._cache().has_url(url)
def fetch_url_from_cache(self, url):
req = self._make_request(url)
cache_key = self._cache().create_key(req)
response, _ = self._cache().get_response_and_time(cache_key)
if not response:
raise NotInCacheError()
return response.content
def remove_url_from_cache(self, url):
self._cache().delete_url(url)
def _cache(self):
return self._session.cache
def _make_request(self, url):
# By constructing the request outside the Session, we avoid attaching
# unwanted cookie headers to subsequent requests, which might break the
# cache.
return requests.Request('GET', url).prepare()
if __name__ == '__main__':
f = Fetcher()
for i, line in enumerate(fileinput.input()):
line = line.strip()
if '\t' in line:
filename, url = line.split('\t')
else:
filename = None
url = line
print '%5d Fetching %s' % (i + 1, url)
content = f.fetch_url(url)
if filename:
open(filename, 'wb').write(content)
| {
"content_hash": "b1a8e8a4425dd47e126fd8a2055390ae",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 86,
"avg_line_length": 28.938271604938272,
"alnum_prop": 0.5998293515358362,
"repo_name": "nypl-spacetime/oldnyc",
"id": "bcfd38e95727a33b345bbcfa66373180d4220c42",
"size": "2366",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ocr/url_fetcher.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "86183"
},
{
"name": "CSS",
"bytes": "11081"
},
{
"name": "HTML",
"bytes": "33644"
},
{
"name": "JavaScript",
"bytes": "140699"
},
{
"name": "Python",
"bytes": "299190"
},
{
"name": "Shell",
"bytes": "3016"
}
],
"symlink_target": ""
} |
from django.core.cache import cache
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
#CONTENTIOUS
from contentious.compat import get_request_context
from .api import BasicEditAPI
class APITest(TestCase):
""" Tests for the BasicEditAPI. """
urls = 'contentious.tests.urls'
@override_settings(TEMPLATE_CONTEXT_PROCESSORS=["django.core.context_processors.request",])
def test_save_and_get_data(self):
api = BasicEditAPI()
request = HttpRequest()
request.path = '/test_view/'
context = get_request_context(request)
#First test that trying to get content for something that hasn't been saved returns {}
result = api.get_content_data('some_key', context)
self.assertEqual(result, {})
#Now save some data
data = {'content': 'pineapple', 'href': 'http://www.google.com/'}
api.save_content_data('some_key', data, context)
#Now calling get_content_data should return that data
result = api.get_content_data('some_key', context)
self.assertIsSubDict(data, result)
#And now even if we have a new request object (so that there's no on-request caching)
#and even if memcache is cleared, we should still get the same result
cache.clear()
request = HttpRequest()
request.path = '/test_view/'
context = get_request_context(request)
result = api.get_content_data('some_key', context)
self.assertIsSubDict(data, result)
def assertIsSubDict(self, subdict, superdict):
for k, v in subdict.items():
self.assertTrue(k in superdict)
self.assertEqual(superdict[k], v)
| {
"content_hash": "04d25701aef9f964f52f77e452d2196a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 95,
"avg_line_length": 39.5,
"alnum_prop": 0.667433831990794,
"repo_name": "potatolondon/contentious",
"id": "63124f72a9c5657435fbea260a6c6737721764ea",
"size": "1749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contentious/contrib/basicedit/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1810"
},
{
"name": "JavaScript",
"bytes": "25343"
},
{
"name": "Python",
"bytes": "52474"
}
],
"symlink_target": ""
} |
import random
import math
import numpy as np
import networkx as nx
import cPickle as pkl
from scipy.spatial.distance import cosine
from experiment_util import weighted_choice
from dag_util import get_roots
from interactions import InteractionsUtil as IU
from experiment_util import get_number_and_percentage, \
experiment_signature
def random_topic(n_topics, topic_noise=0.0001, taboo_topics=set()):
taboo_topics = set(taboo_topics)
while True:
main_topic = np.random.choice(np.arange(n_topics))
if main_topic not in taboo_topics:
break
dirich_alpha = np.zeros(n_topics)
dirich_alpha[main_topic] = 1
dirich_alpha += np.random.uniform(0, topic_noise, n_topics)
dirich_alpha /= dirich_alpha.sum()
return np.random.dirichlet(dirich_alpha), main_topic
def random_topic_distribution(n_topics):
raw_vect = np.random.random(n_topics)
return raw_vect / raw_vect.sum()
def gen_event_with_known_tree_structure(event_size, participants,
start_time, end_time,
event_topic_param,
topic_noise,
alpha, tau,
forward_proba,
reply_proba,
create_new_proba):
n_participants = len(participants)
time_step = (end_time - start_time) / float(event_size)
if time_step < 1:
# raise ValueError('time_step should be larger than 1 ({}-{}) / {}'.format(
# end_time,
# start_time,
# event_size
# ))
print("timestemp < 1")
tree = nx.DiGraph()
for i in xrange(event_size):
time = start_time + time_step * (i+1)
if tree.number_of_nodes() == 0:
rand_inds = np.random.permutation(n_participants)
sender_id = participants[rand_inds[0]]
recipient_id = participants[rand_inds[1]]
else:
# sample a node to connect
# weighted by degree and recency
nodes = tree.nodes()
out_degree = np.asarray([tree.out_degree(n)
for n in nodes])
recency = np.asarray([time - tree.node[n]['timestamp']
for n in nodes])
weights = alpha * out_degree + np.power(tau, recency)
parent = weighted_choice(zip(nodes, weights))[0]
# randomly choose the type of connection
# e.g, forward, reply, create_new
c_type = weighted_choice(
[('f', forward_proba),
('r', reply_proba),
('c', create_new_proba)]
)[0]
tree.add_edge(parent, i, c_type=c_type)
if c_type == 'r':
sender_id = tree.node[parent]['recipient_id']
recipient_id = tree.node[parent]['sender_id']
elif c_type == 'f':
parent_sender_id = tree.node[parent]['sender_id']
sender_id = tree.node[parent]['recipient_id']
# print(np.random.permutation(n_participants))
try:
recipient_id = (participants[r_id]
for r_id in np.random.permutation(n_participants)
if participants[r_id] != sender_id and
participants[r_id] != parent_sender_id).next()
except StopIteration:
print('participants', participants)
print('sender_id, parent_sender_id', sender_id, parent_sender_id)
else:
sender_id = tree.node[parent]['sender_id']
recipient_id = (participants[r_id]
for r_id in np.random.permutation(n_participants)
if participants[r_id] != sender_id).next()
tree.add_node(i)
# randomly adding white noise
topics = event_topic_param + np.random.uniform(
0, topic_noise,
len(event_topic_param)
)
topics /= topics.sum()
tree.node[i] = {
'message_id': i,
'sender_id': sender_id,
'recipient_id': recipient_id,
'timestamp': time,
'topics': topics
}
# change int to string
for n in tree.nodes_iter():
tree.node[n]['sender_id'] = 'u-{}'.format(tree.node[n]['sender_id'])
tree.node[n]['recipient_ids'] = ['u-{}'.format(
tree.node[n]['recipient_id'])
]
del tree.node[n]['recipient_id']
return tree
def random_events(n_events, event_size_mu, event_size_sigma,
n_total_participants, participant_mu, participant_sigma,
min_time, max_time, event_duration_mu, event_duration_sigma,
n_topics, topic_scaling_factor, topic_noise,
alpha, tau,
forward_proba,
reply_proba,
create_new_proba,
taboo_topics=set(),
accumulate_taboo=False):
# add main events
events = []
taboo_topics = set(taboo_topics)
for i in xrange(n_events):
# randomly select a topic and add some noise to it
event = []
event_topic_param, topic_id = random_topic(
n_topics,
topic_noise,
taboo_topics
)
if accumulate_taboo:
taboo_topics.add(topic_id)
print('event_topic_param:', event_topic_param)
event_size = 0
while event_size <= 0:
event_size = int(round(
np.random.normal(event_size_mu, event_size_sigma)
))
assert event_size > 0
# randomly select participants
n_participants = 0
while n_participants <= 2:
n_participants = int(round(
np.random.normal(participant_mu, participant_sigma)
))
assert n_participants > 2
participants = np.random.permutation(
n_total_participants
)[:n_participants]
print('participants:', participants)
# event timespan
start_time = np.random.uniform(min_time, max_time - event_duration_mu)
end_time = start_time + np.random.normal(event_duration_mu,
event_duration_sigma)
if end_time > max_time:
end_time = max_time
event = gen_event_with_known_tree_structure(
event_size, participants, start_time, end_time,
event_topic_param,
topic_noise,
alpha, tau,
forward_proba,
reply_proba,
create_new_proba
)
# some checking
g = IU.get_meta_graph(
[event.node[n] for n in event.nodes_iter()],
decompose_interactions=False,
remove_singleton=True,
given_topics=True,
convert_time=False)
n_interactions_in_mg = g.number_of_nodes()
if n_interactions_in_mg == len(event):
roots = [n
for n, d in g.in_degree(g.nodes_iter()).items()
if d == 0]
if len(roots) > 1:
print(roots)
for r in roots:
print(event[r])
print("WARNING: roots number {}".format(len(roots)))
raise
else:
print(
'invalid meta graph. {} < {}'.format(
n_interactions_in_mg,
len(event)
))
raise
events.append(event)
return events, taboo_topics
def random_noisy_interactions(n_noisy_interactions,
min_time, max_time,
n_total_participants,
n_topics, topic_noise,
taboo_topics=set()):
taboo_topics = set(taboo_topics)
noisy_interactions = []
# noisy events
for i in xrange(n_noisy_interactions):
topic = random_topic_distribution(n_topics)
sender_id, recipient_id = np.random.permutation(
n_total_participants
)[:2]
sender_id, recipient_id = 'u-{}'.format(sender_id), \
'u-{}'.format(recipient_id)
noisy_interactions.append({
'sender_id': sender_id,
'recipient_ids': [recipient_id],
'timestamp': np.random.uniform(min_time, max_time),
'topics': topic
})
return noisy_interactions
def get_gen_cand_tree_params(e):
U = np.sum(e[s][t]['c'] for s, t in e.edges_iter())
roots = get_roots(e)
timestamps = [e.node[n]['timestamp'] for n in e.nodes_iter()]
preprune_secs = np.max(timestamps) - np.min(timestamps)
return {
'U': U,
'roots': roots,
'preprune_secs': math.ceil(preprune_secs)
}
def make_artificial_data(
# for main events
n_events,
event_size_mu, event_size_sigma,
participant_mu, participant_sigma,
# for minor events
n_minor_events,
minor_event_size_mu, minor_event_size_sigma,
minor_event_participant_mu, minor_event_participant_sigma,
# shared
n_total_participants,
min_time, max_time, event_duration_mu, event_duration_sigma,
n_topics, topic_scaling_factor, topic_noise,
n_noisy_interactions, n_noisy_interactions_fraction,
alpha, tau,
forward_proba,
reply_proba,
create_new_proba,
dist_func):
events, taboo_topics = random_events(
n_events, event_size_mu, event_size_sigma,
n_total_participants, participant_mu, participant_sigma,
min_time, max_time, event_duration_mu, event_duration_sigma,
n_topics, topic_scaling_factor, topic_noise,
alpha, tau,
forward_proba,
reply_proba,
create_new_proba,
accumulate_taboo=True
)
minor_events, _ = random_events(
n_minor_events, minor_event_size_mu, minor_event_size_sigma,
n_total_participants, minor_event_participant_mu,
minor_event_participant_sigma,
min_time, max_time, event_duration_mu, event_duration_sigma,
n_topics, topic_scaling_factor, topic_noise,
alpha, tau,
forward_proba,
reply_proba,
create_new_proba,
taboo_topics=taboo_topics,
accumulate_taboo=False
)
(n_noisy_interactions, _) = get_number_and_percentage(
sum([1 for e in events for _ in e]),
n_noisy_interactions, n_noisy_interactions_fraction
)
noisy_interactions = random_noisy_interactions(
n_noisy_interactions,
min_time, max_time,
n_total_participants,
n_topics, topic_noise,
taboo_topics
)
event_interactions = [e.node[n] for e in events
for n in e.nodes_iter()]
minor_event_interactions = [e.node[n] for e in minor_events
for n in e.nodes_iter()]
all_interactions = (event_interactions + minor_event_interactions
+ noisy_interactions)
# add interaction id
for i, intr in enumerate(all_interactions):
intr['message_id'] = i
intr['topics'] = intr['topics'].tolist()
# relabel the nodes
relabeled_events = []
for e in events:
mapping = {n: e.node[n]['message_id'] for n in e.nodes_iter()}
relabeled_events.append(nx.relabel_nodes(e, mapping))
for e in events:
e = IU.assign_edge_weights(e, dist_func)
gen_cand_trees_params = [get_gen_cand_tree_params(e)
for e in events]
return relabeled_events, all_interactions, gen_cand_trees_params
def main():
import ujson as json
import argparse
from pprint import pprint
parser = argparse.ArgumentParser('Make sythetic interaction data')
parser.add_argument('--n_events', type=int, default=10)
parser.add_argument('--event_size_mu', type=int, default=40)
parser.add_argument('--event_size_sigma', type=int, default=5)
parser.add_argument('--participant_mu', type=int, default=5)
parser.add_argument('--participant_sigma', type=float, default=3)
parser.add_argument('--n_minor_events', type=int, default=0)
parser.add_argument('--minor_event_size_mu', type=int, default=10)
parser.add_argument('--minor_event_size_sigma', type=int, default=1)
parser.add_argument('--minor_event_participant_mu', type=int, default=4)
parser.add_argument('--minor_event_participant_sigma', type=float, default=0.1)
parser.add_argument('--n_total_participants', type=int, default=50)
parser.add_argument('--min_time', type=int, default=10)
parser.add_argument('--max_time', type=int, default=1100)
parser.add_argument('--event_duration_mu', type=int, default=100)
parser.add_argument('--event_duration_sigma', type=int, default=1)
parser.add_argument('--n_topics', type=int, default=10)
parser.add_argument('--topic_scaling_factor', type=float, default=0.5)
parser.add_argument('--topic_noise', type=float, default=0.1)
parser.add_argument('--n_noisy_interactions', type=int, default=None)
parser.add_argument('--n_noisy_interactions_fraction',
type=float, default=0.1)
parser.add_argument('--output_dir', type=str, default='data/synthetic')
parser.add_argument('--alpha',
type=float, default=1.0)
parser.add_argument('--tau',
type=float, default=0.8)
parser.add_argument('--forward_proba',
type=float, default=0.3)
parser.add_argument('--reply_proba',
type=float, default=0.5)
parser.add_argument('--create_new_proba',
type=float, default=0.2)
parser.add_argument('--result_suffix',
default='')
parser.add_argument('--random_seed',
type=int,
default=None)
args = parser.parse_args()
random.seed(args.random_seed)
np.random.seed(args.random_seed)
pprint(vars(args))
result_suffix = args.result_suffix
output_dir = args.output_dir
args_dict = vars(args)
del args_dict['output_dir']
del args_dict['result_suffix']
del args_dict['random_seed']
events, interactions, gen_cand_tree_params = make_artificial_data(
dist_func=cosine,
**args_dict
)
sig = experiment_signature(
n_noisy_interactions_fraction=args.n_noisy_interactions_fraction,
event_size=args.event_size_mu,
)
nx.write_gpickle(events,
'{}/events--{}{}.pkl'.format(output_dir, sig,
result_suffix)
)
json.dump(interactions,
open('{}/interactions--{}{}.json'.format(output_dir, sig,
result_suffix),
'w'))
pkl.dump(gen_cand_tree_params,
open('{}/gen_cand_tree_params--{}{}.pkl'.format(output_dir, sig,
result_suffix),
'w'))
if __name__ == '__main__':
main()
| {
"content_hash": "84e093882bbc8cacd8d2f950d25a0b95",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 85,
"avg_line_length": 35.86143187066975,
"alnum_prop": 0.5425038639876353,
"repo_name": "xiaohan2012/lst",
"id": "d0e6a503ebc79c0609b76e20ca8865f307106980",
"size": "15553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artificial_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "23944"
},
{
"name": "JavaScript",
"bytes": "12920"
},
{
"name": "Makefile",
"bytes": "480"
},
{
"name": "Python",
"bytes": "326635"
},
{
"name": "Shell",
"bytes": "27798"
}
],
"symlink_target": ""
} |
"""API for Home Connect bound to HASS OAuth."""
from asyncio import run_coroutine_threadsafe
import logging
import homeconnect
from homeconnect.api import HomeConnectError
from homeassistant import config_entries, core
from homeassistant.const import DEVICE_CLASS_TIMESTAMP, TIME_SECONDS, UNIT_PERCENTAGE
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.dispatcher import dispatcher_send
from .const import (
BSH_ACTIVE_PROGRAM,
BSH_POWER_OFF,
BSH_POWER_STANDBY,
SIGNAL_UPDATE_ENTITIES,
)
_LOGGER = logging.getLogger(__name__)
class ConfigEntryAuth(homeconnect.HomeConnectAPI):
"""Provide Home Connect authentication tied to an OAuth2 based config entry."""
def __init__(
self,
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,
):
"""Initialize Home Connect Auth."""
self.hass = hass
self.config_entry = config_entry
self.session = config_entry_oauth2_flow.OAuth2Session(
hass, config_entry, implementation
)
super().__init__(self.session.token)
self.devices = []
def refresh_tokens(self) -> dict:
"""Refresh and return new Home Connect tokens using Home Assistant OAuth2 session."""
run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self.hass.loop
).result()
return self.session.token
def get_devices(self):
"""Get a dictionary of devices."""
appl = self.get_appliances()
devices = []
for app in appl:
if app.type == "Dryer":
device = Dryer(self.hass, app)
elif app.type == "Washer":
device = Washer(self.hass, app)
elif app.type == "Dishwasher":
device = Dishwasher(self.hass, app)
elif app.type == "FridgeFreezer":
device = FridgeFreezer(self.hass, app)
elif app.type == "Oven":
device = Oven(self.hass, app)
elif app.type == "CoffeeMaker":
device = CoffeeMaker(self.hass, app)
elif app.type == "Hood":
device = Hood(self.hass, app)
elif app.type == "Hob":
device = Hob(self.hass, app)
else:
_LOGGER.warning("Appliance type %s not implemented", app.type)
continue
devices.append({"device": device, "entities": device.get_entity_info()})
self.devices = devices
return devices
class HomeConnectDevice:
"""Generic Home Connect device."""
# for some devices, this is instead BSH_POWER_STANDBY
# see https://developer.home-connect.com/docs/settings/power_state
power_off_state = BSH_POWER_OFF
def __init__(self, hass, appliance):
"""Initialize the device class."""
self.hass = hass
self.appliance = appliance
def initialize(self):
"""Fetch the info needed to initialize the device."""
try:
self.appliance.get_status()
except (HomeConnectError, ValueError):
_LOGGER.debug("Unable to fetch appliance status. Probably offline")
try:
self.appliance.get_settings()
except (HomeConnectError, ValueError):
_LOGGER.debug("Unable to fetch settings. Probably offline")
try:
program_active = self.appliance.get_programs_active()
except (HomeConnectError, ValueError):
_LOGGER.debug("Unable to fetch active programs. Probably offline")
program_active = None
if program_active and "key" in program_active:
self.appliance.status[BSH_ACTIVE_PROGRAM] = {"value": program_active["key"]}
self.appliance.listen_events(callback=self.event_callback)
def event_callback(self, appliance):
"""Handle event."""
_LOGGER.debug("Update triggered on %s", appliance.name)
_LOGGER.debug(self.appliance.status)
dispatcher_send(self.hass, SIGNAL_UPDATE_ENTITIES, appliance.haId)
class DeviceWithPrograms(HomeConnectDevice):
"""Device with programs."""
PROGRAMS = []
def get_programs_available(self):
"""Get the available programs."""
return self.PROGRAMS
def get_program_switches(self):
"""Get a dictionary with info about program switches.
There will be one switch for each program.
"""
programs = self.get_programs_available()
return [{"device": self, "program_name": p["name"]} for p in programs]
def get_program_sensors(self):
"""Get a dictionary with info about program sensors.
There will be one of the four types of sensors for each
device.
"""
sensors = {
"Remaining Program Time": (None, None, DEVICE_CLASS_TIMESTAMP, 1),
"Duration": (TIME_SECONDS, "mdi:update", None, 1),
"Program Progress": (UNIT_PERCENTAGE, "mdi:progress-clock", None, 1),
}
return [
{
"device": self,
"desc": k,
"unit": unit,
"key": "BSH.Common.Option.{}".format(k.replace(" ", "")),
"icon": icon,
"device_class": device_class,
"sign": sign,
}
for k, (unit, icon, device_class, sign) in sensors.items()
]
class DeviceWithDoor(HomeConnectDevice):
"""Device that has a door sensor."""
def get_door_entity(self):
"""Get a dictionary with info about the door binary sensor."""
return {
"device": self,
"desc": "Door",
"device_class": "door",
}
class Dryer(DeviceWithDoor, DeviceWithPrograms):
"""Dryer class."""
PROGRAMS = [
{"name": "LaundryCare.Dryer.Program.Cotton"},
{"name": "LaundryCare.Dryer.Program.Synthetic"},
{"name": "LaundryCare.Dryer.Program.Mix"},
{"name": "LaundryCare.Dryer.Program.Blankets"},
{"name": "LaundryCare.Dryer.Program.BusinessShirts"},
{"name": "LaundryCare.Dryer.Program.DownFeathers"},
{"name": "LaundryCare.Dryer.Program.Hygiene"},
{"name": "LaundryCare.Dryer.Program.Jeans"},
{"name": "LaundryCare.Dryer.Program.Outdoor"},
{"name": "LaundryCare.Dryer.Program.SyntheticRefresh"},
{"name": "LaundryCare.Dryer.Program.Towels"},
{"name": "LaundryCare.Dryer.Program.Delicates"},
{"name": "LaundryCare.Dryer.Program.Super40"},
{"name": "LaundryCare.Dryer.Program.Shirts15"},
{"name": "LaundryCare.Dryer.Program.Pillow"},
{"name": "LaundryCare.Dryer.Program.AntiShrink"},
]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {
"binary_sensor": [door_entity],
"switch": program_switches,
"sensor": program_sensors,
}
class Dishwasher(DeviceWithDoor, DeviceWithPrograms):
"""Dishwasher class."""
PROGRAMS = [
{"name": "Dishcare.Dishwasher.Program.Auto1"},
{"name": "Dishcare.Dishwasher.Program.Auto2"},
{"name": "Dishcare.Dishwasher.Program.Auto3"},
{"name": "Dishcare.Dishwasher.Program.Eco50"},
{"name": "Dishcare.Dishwasher.Program.Quick45"},
{"name": "Dishcare.Dishwasher.Program.Intensiv70"},
{"name": "Dishcare.Dishwasher.Program.Normal65"},
{"name": "Dishcare.Dishwasher.Program.Glas40"},
{"name": "Dishcare.Dishwasher.Program.GlassCare"},
{"name": "Dishcare.Dishwasher.Program.NightWash"},
{"name": "Dishcare.Dishwasher.Program.Quick65"},
{"name": "Dishcare.Dishwasher.Program.Normal45"},
{"name": "Dishcare.Dishwasher.Program.Intensiv45"},
{"name": "Dishcare.Dishwasher.Program.AutoHalfLoad"},
{"name": "Dishcare.Dishwasher.Program.IntensivPower"},
{"name": "Dishcare.Dishwasher.Program.MagicDaily"},
{"name": "Dishcare.Dishwasher.Program.Super60"},
{"name": "Dishcare.Dishwasher.Program.Kurz60"},
{"name": "Dishcare.Dishwasher.Program.ExpressSparkle65"},
{"name": "Dishcare.Dishwasher.Program.MachineCare"},
{"name": "Dishcare.Dishwasher.Program.SteamFresh"},
{"name": "Dishcare.Dishwasher.Program.MaximumCleaning"},
]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {
"binary_sensor": [door_entity],
"switch": program_switches,
"sensor": program_sensors,
}
class Oven(DeviceWithDoor, DeviceWithPrograms):
"""Oven class."""
PROGRAMS = [
{"name": "Cooking.Oven.Program.HeatingMode.PreHeating"},
{"name": "Cooking.Oven.Program.HeatingMode.HotAir"},
{"name": "Cooking.Oven.Program.HeatingMode.TopBottomHeating"},
{"name": "Cooking.Oven.Program.HeatingMode.PizzaSetting"},
{"name": "Cooking.Oven.Program.Microwave.600Watt"},
]
power_off_state = BSH_POWER_STANDBY
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {
"binary_sensor": [door_entity],
"switch": program_switches,
"sensor": program_sensors,
}
class Washer(DeviceWithDoor, DeviceWithPrograms):
"""Washer class."""
PROGRAMS = [
{"name": "LaundryCare.Washer.Program.Cotton"},
{"name": "LaundryCare.Washer.Program.Cotton.CottonEco"},
{"name": "LaundryCare.Washer.Program.EasyCare"},
{"name": "LaundryCare.Washer.Program.Mix"},
{"name": "LaundryCare.Washer.Program.DelicatesSilk"},
{"name": "LaundryCare.Washer.Program.Wool"},
{"name": "LaundryCare.Washer.Program.Sensitive"},
{"name": "LaundryCare.Washer.Program.Auto30"},
{"name": "LaundryCare.Washer.Program.Auto40"},
{"name": "LaundryCare.Washer.Program.Auto60"},
{"name": "LaundryCare.Washer.Program.Chiffon"},
{"name": "LaundryCare.Washer.Program.Curtains"},
{"name": "LaundryCare.Washer.Program.DarkWash"},
{"name": "LaundryCare.Washer.Program.Dessous"},
{"name": "LaundryCare.Washer.Program.Monsoon"},
{"name": "LaundryCare.Washer.Program.Outdoor"},
{"name": "LaundryCare.Washer.Program.PlushToy"},
{"name": "LaundryCare.Washer.Program.ShirtsBlouses"},
{"name": "LaundryCare.Washer.Program.SportFitness"},
{"name": "LaundryCare.Washer.Program.Towels"},
{"name": "LaundryCare.Washer.Program.WaterProof"},
]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {
"binary_sensor": [door_entity],
"switch": program_switches,
"sensor": program_sensors,
}
class CoffeeMaker(DeviceWithPrograms):
"""Coffee maker class."""
PROGRAMS = [
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Espresso"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.EspressoMacchiato"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Coffee"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Cappuccino"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.LatteMacchiato"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.CaffeLatte"},
{"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.Americano"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.EspressoDoppio"},
{"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.FlatWhite"},
{"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.Galao"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.MilkFroth"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.WarmMilk"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Ristretto"},
{"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.Cortado"},
]
power_off_state = BSH_POWER_STANDBY
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {"switch": program_switches, "sensor": program_sensors}
class Hood(DeviceWithPrograms):
"""Hood class."""
PROGRAMS = [
{"name": "Cooking.Common.Program.Hood.Automatic"},
{"name": "Cooking.Common.Program.Hood.Venting"},
{"name": "Cooking.Common.Program.Hood.DelayedShutOff"},
]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {"switch": program_switches, "sensor": program_sensors}
class FridgeFreezer(DeviceWithDoor):
"""Fridge/Freezer class."""
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
return {"binary_sensor": [door_entity]}
class Hob(DeviceWithPrograms):
"""Hob class."""
PROGRAMS = [{"name": "Cooking.Hob.Program.PowerLevelMode"}]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {"switch": program_switches, "sensor": program_sensors}
| {
"content_hash": "a1bb103aded04dd66e0bb95d75a70291",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 93,
"avg_line_length": 38.53763440860215,
"alnum_prop": 0.6213727678571429,
"repo_name": "titilambert/home-assistant",
"id": "f768e28be9238f5d99c97fbf3bd6569dedf67214",
"size": "14336",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/home_connect/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |
import multiprocessing.connection as connection
import sys
import os
class Client:
def message(args):
address = ('localhost', 6000)
conn = connection.Client(address, authkey='s'.encode(encoding='UTF-8'))
msg = {}
msg['pid'] = os.getppid()
msg['args'] = sys.argv[1:]
conn.send(msg)
resp = conn.recv()
print(resp)
conn.close()
| {
"content_hash": "acdeb9c4e4683803c7e52fbcf01a747e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 26.8,
"alnum_prop": 0.5771144278606966,
"repo_name": "Gazolik/shell_player",
"id": "d4b8d6d868003dc4ab688f974cf2f02b1ca1b451",
"size": "422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shell_player/client/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "331453"
},
{
"name": "Shell",
"bytes": "274"
}
],
"symlink_target": ""
} |
__all__ = [
'Body',
'Header',
'Message',
'Property',
'WsId'
]
#
# Package definition for com.raytheon.uf.common.message
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 09/16/10 dgilling Initial Creation.
# 08/19/14 2926 bclement added Message files
#
#
from .Body import Body
from .Header import Header
from .Message import Message
from .Property import Property
from .WsId import WsId
| {
"content_hash": "86c4855df52b89d84ca1975b0ed80d08",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 23.555555555555557,
"alnum_prop": 0.46226415094339623,
"repo_name": "mjames-upc/python-awips",
"id": "e4ad2b5d6ee59c3013545e46e879751f9cff8488",
"size": "643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamicserialize/dstypes/com/raytheon/uf/common/message/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "27192"
},
{
"name": "Python",
"bytes": "714011"
}
],
"symlink_target": ""
} |
import re
from unittest import TestCase
from ptime import Format, FormatError
class TestFormat(TestCase):
def test_qualifiers(self):
regexp = Format('').regexp
self.assertEqual(
regexp, re.compile(r'^$', re.IGNORECASE | re.UNICODE))
def test_escaping(self):
regexp, attrs = Format('').parse_template('%%%%')
self.assertEqual(regexp, '%%')
def test_invalid_template(self):
self.assertRaises(FormatError, Format, '%~')
def test_basic_format(self):
regexp = Format('%Y-%m-%d').regexp
self.assertIsNotNone(re.match(regexp, '2013-09-10'))
class TestStandardFormats(TestCase):
def test_iso8601(self):
regexp = Format.iso8601().regexp
self.assertIsNotNone(re.match(regexp, '2013-09-11T18:44:25+03:00'))
def test_rfc822(self):
regexp = Format.rfc822().regexp
self.assertIsNotNone(
re.match(regexp, 'Wed, 11 Sep 2013 15:53:02 -0000'))
def test_rfc3339(self):
regexp = Format.rfc3339().regexp
self.assertIsNotNone(re.match(regexp, '2013-09-11T19:13:02+03:00'))
self.assertIsNotNone(
re.match(regexp, '2013-09-11T19:13:02.654321+03:00'))
def test_rfc850(self):
regexp = Format.rfc850().regexp
self.assertIsNotNone(
re.match(regexp, 'Wednesday, 11-Sep-13 19:15:03 EEST'))
def test_mysql(self):
regexp = Format.mysql().regexp
self.assertIsNotNone(re.match(regexp, '2013-09-11 19:17:39'))
| {
"content_hash": "43a9de4b256cabc12337711344cff08e",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 31.5625,
"alnum_prop": 0.6257425742574257,
"repo_name": "lun/ptime",
"id": "05261d9c414ff8dbff334fa7c04fc4afbbbfaf08",
"size": "1515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18017"
}
],
"symlink_target": ""
} |
import getopt
import re
from lib.builds.build_query import BuildQuery
import lib.logger
import ConfigParser
import os
import collections
#class to parse the inputs either from command line or from a ini file
#command line supports a subset of
# configuration
# which tests
# ideally should accept a regular expression
class TestInputSingleton():
input = None
class TestInput(object):
def __init__(self):
self.servers = []
self.moxis = []
self.clusters = {}
self.membase_settings = None
self.test_params = {}
#servers , each server can have u,p,port,directory
def param(self, name, default_value):
if name in self.test_params:
return TestInput._parse_param(self.test_params[name])
else:
return default_value
@staticmethod
def _parse_param(value):
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if value.lower() == "false":
return False
if value.lower() == "true":
return True
return value
class TestInputServer(object):
def __init__(self):
self.ip = ''
self.ssh_username = ''
self.ssh_password = ''
self.ssh_key = ''
self.rest_username = ''
self.rest_password = ''
self.port = ''
self.cli_path = ''
self.data_path = ''
def __str__(self):
#ip_str = "ip:{0}".format(self.ip)
ip_str = "ip:{0} port:{1}".format(self.ip, self.port)
ssh_username_str = "ssh_username:{0}".format(self.ssh_username)
return "{0} {1}".format(ip_str, ssh_username_str)
def __repr__(self):
#ip_str = "ip:{0}".format(self.ip)
ip_str = "ip:{0} port:{1}".format(self.ip, self.port)
ssh_username_str = "ssh_username:{0}".format(self.ssh_username)
return "{0} {1}".format(ip_str, ssh_username_str)
class TestInputMembaseSetting(object):
def __init__(self):
self.rest_username = ''
self.rest_password = ''
class TestInputBuild(object):
def __init__(self):
self.version = ''
self.url = ''
# we parse this and then pass it on to all the test case
class TestInputParser():
@staticmethod
def get_test_input(argv):
#if file is given use parse_from_file
#if its from command line
(opts, args) = getopt.getopt(argv[1:], 'ht:c:v:s:i:p:l:', [])
#first let's loop over and find out if user has asked for help
#if it has i
params = {}
has_ini = False
ini_file = ''
for option, argument in opts:
if option == '-h':
print 'usage'
return
if option == '-i':
has_ini = True
ini_file = argument
if option == '-p':
# takes in a string of the form "p1=v1,v2,p2=v3,p3=v4,v5,v6"
# converts to a dictionary of the form {"p1":"v1,v2","p2":"v3","p3":"v4,v5,v6"}
argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", argument)[1:]]
pairs = dict(zip(argument_split[::2], argument_split[1::2]))
for pair in pairs.iteritems():
if pair[0] == "vbuckets":
# takes in a string of the form "1-100,140,150-160"
# converts to an array with all those values inclusive
vbuckets = set()
for v in pair[1].split(","):
r = v.split("-")
vbuckets.update(range(int(r[0]), int(r[-1]) + 1))
params[pair[0]] = sorted(vbuckets)
else:
argument_list = [a.strip() for a in pair[1].split(",")]
if len(argument_list) > 1:
# if the parameter had multiple entries seperated by comma
# then store as a list
# ex. {'vbuckets':[1,2,3,4,100]}
params[pair[0]] = argument_list
else:
# if parameter only had one entry then store as a string
# ex. {'product':'cb'}
params[pair[0]] = argument_list[0]
if has_ini:
input = TestInputParser.parse_from_file(ini_file)
#now let's get the test specific parameters
else:
input = TestInputParser.parse_from_command_line(argv)
input.test_params = params
if "num_clients" not in input.test_params.keys() and input.clients: # do not override the command line value
input.test_params["num_clients"] = len(input.clients)
if "num_nodes" not in input.test_params.keys() and input.servers:
input.test_params["num_nodes"] = len(input.servers)
return input
@staticmethod
def parse_from_file(file):
servers = []
ips = []
input = TestInput()
config = ConfigParser.ConfigParser()
config.read(file)
sections = config.sections()
global_properties = {}
count = 0
start = 0
end = 0
cluster_ips = []
clusters = {}
moxis = []
moxi_ips = []
client_ips = []
input.dashboard = []
for section in sections:
result = re.search('^cluster', section)
if section == 'servers':
ips = TestInputParser.get_server_ips(config, section)
elif section == 'moxis':
moxi_ips = TestInputParser.get_server_ips(config, section)
elif section == 'clients':
client_ips = TestInputParser.get_server_ips(config, section)
elif section == 'membase':
input.membase_settings = TestInputParser.get_membase_settings(config, section)
elif section == 'global':
#get global stuff and override for those unset
for option in config.options(section):
global_properties[option] = config.get(section, option)
elif section == 'dashboard':
input.dashboard = TestInputParser.get_server_ips(config, section)
elif result is not None:
cluster_list = TestInputParser.get_server_ips(config, section)
cluster_ips.extend(cluster_list)
clusters[count] = len(cluster_list)
count += 1
# Setup 'cluster#' tag as dict
# input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}
for cluster_ip in cluster_ips:
servers.append(TestInputParser.get_server(cluster_ip, config))
servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
for key, value in clusters.items():
end += value
input.clusters[key] = servers[start:end]
start += value
# Setting up 'servers' tag
servers = []
for ip in ips:
servers.append(TestInputParser.get_server(ip, config))
input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
# Setting up 'moxis' tag
moxis = []
for moxi_ip in moxi_ips:
moxis.append(TestInputParser.get_server(moxi_ip, config))
input.moxis = TestInputParser.get_server_options(moxis, input.membase_settings, global_properties)
# Setting up 'clients' tag
input.clients = client_ips
return input
@staticmethod
def get_server_options(servers, membase_settings, global_properties):
for server in servers:
if server.ssh_username == '' and 'username' in global_properties:
server.ssh_username = global_properties['username']
if server.ssh_password == '' and 'password' in global_properties:
server.ssh_password = global_properties['password']
if server.ssh_key == '' and 'ssh_key' in global_properties:
server.ssh_key = os.path.expanduser(global_properties['ssh_key'])
if not server.port and 'port' in global_properties:
server.port = global_properties['port']
if server.cli_path == '' and 'cli' in global_properties:
server.cli_path = global_properties['cli']
if server.rest_username == '' and membase_settings.rest_username != '':
server.rest_username = membase_settings.rest_username
if server.rest_password == '' and membase_settings.rest_password != '':
server.rest_password = membase_settings.rest_password
if server.data_path == '' and 'data_path' in global_properties:
server.data_path = global_properties['data_path']
return servers
@staticmethod
def get_server_ips(config, section):
ips = []
options = config.options(section)
for option in options:
ips.append(config.get(section, option))
return ips
@staticmethod
def get_server(ip, config):
server = TestInputServer()
server.ip = ip
for section in config.sections():
if section == ip:
options = config.options(section)
for option in options:
if option == 'username':
server.ssh_username = config.get(section, option)
if option == 'password':
server.ssh_password = config.get(section, option)
if option == 'cli':
server.cli_path = config.get(section, option)
if option == 'ssh_key':
server.ssh_key = config.get(section, option)
if option == 'port':
server.port = config.get(section, option)
if option == 'ip':
server.ip = config.get(section, option)
break
#get username
#get password
#get port
#get cli_path
#get key
return server
@staticmethod
def get_membase_build(config, section):
membase_build = TestInputBuild()
for option in config.options(section):
if option == 'version':
pass
if option == 'url':
pass
return membase_build
@staticmethod
def get_membase_settings(config, section):
membase_settings = TestInputMembaseSetting()
for option in config.options(section):
if option == 'rest_username':
membase_settings.rest_username = config.get(section, option)
if option == 'rest_password':
membase_settings.rest_password = config.get(section, option)
return membase_settings
@staticmethod
def parse_from_command_line(argv):
input = TestInput()
try:
# -f : won't be parse here anynore
# -s will have comma separated list of servers
# -t : wont be parsed here anymore
# -v : version
# -u : url
# -b : will have the path to cli
# -k : key file
# -p : for smtp ( taken care of by jenkins)
# -o : taken care of by jenkins
servers = []
membase_setting = None
(opts, args) = getopt.getopt(argv[1:], 'h:t:c:i:p:', [])
#first let's loop over and find out if user has asked for help
need_help = False
for option, argument in opts:
if option == "-h":
print 'usage...'
need_help = True
break
if need_help:
return
#first let's populate the server list and the version number
for option, argument in opts:
if option == "-s":
#handle server list
servers = TestInputParser.handle_command_line_s(argument)
elif option == "-u" or option == "-v":
input_build = TestInputParser.handle_command_line_u_or_v(option, argument)
#now we can override the username pass and cli_path info
for option, argument in opts:
if option == "-k":
#handle server list
for server in servers:
if server.ssh_key == '':
server.ssh_key = argument
elif option == "--username":
#handle server list
for server in servers:
if server.ssh_username == '':
server.ssh_username = argument
elif option == "--password":
#handle server list
for server in servers:
if server.ssh_password == '':
server.ssh_password = argument
elif option == "-b":
#handle server list
for server in servers:
if server.cli_path == '':
server.cli_path = argument
# loop over stuff once again and set the default
# value
for server in servers:
if server.ssh_username == '':
server.ssh_username = 'root'
if server.ssh_password == '':
server.ssh_password = 'northscale!23'
if server.cli_path == '':
server.cli_path = '/opt/membase/bin/'
if not server.port:
server.port = 8091
input.servers = servers
input.membase_settings = membase_setting
return input
except Exception:
log = logger.Logger.get_logger()
log.error("unable to parse input arguments")
raise
@staticmethod
def handle_command_line_u_or_v(option, argument):
input_build = TestInputBuild()
if option == "-u":
# let's check whether this url exists or not
# let's extract version from this url
pass
if option == "-v":
allbuilds = BuildQuery().get_all_builds()
for build in allbuilds:
if build.product_version == argument:
input_build.url = build.url
input_build.version = argument
break
return input_build
#returns list of server objects
@staticmethod
def handle_command_line_s(argument):
#ip:port:username:password:clipath
ips = argument.split(",")
servers = []
for ip in ips:
server = TestInputServer()
if ip.find(":") == -1:
pass
else:
info = ip.split(":")
#info[0] : ip
#info[1] : port
#info[2] :username
#info[3] : password
#info[4] : cli path
server.ip = info[0]
server.port = info[1]
server.ssh_username = info[2]
server.ssh_password = info[3]
server.cli_path = info[4]
servers.append(server)
return servers
| {
"content_hash": "af64c24188fb69760d693699a4777025",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 118,
"avg_line_length": 37.752997601918466,
"alnum_prop": 0.5146414279362256,
"repo_name": "couchbaselabs/litmus",
"id": "9b69d64146f2f4d48ed407f411fe5165e07661b1",
"size": "15743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "TestInput.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "103452"
},
{
"name": "CSS",
"bytes": "15530"
},
{
"name": "JavaScript",
"bytes": "713886"
},
{
"name": "Python",
"bytes": "1336652"
}
],
"symlink_target": ""
} |
import redis
import os
import dice
# import some_api_lib
# import ...
# Example of your code beginning
# Config vars
token = os.environ['TELEGRAM_TOKEN']
# Your bot code below
bot = dice.initialize(token)
# some_api = some_api_lib.connect(some_api_token)
# ...
| {
"content_hash": "a5eb02c9e744b622ca20fe9fa168d851",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 49,
"avg_line_length": 19.533333333333335,
"alnum_prop": 0.6416382252559727,
"repo_name": "DanAmador/telegram-dice-bot",
"id": "64f78f87e9639e0954c32d1b32e067079141a3cb",
"size": "317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3309"
}
],
"symlink_target": ""
} |
from libcloud.utils import deprecated_warning
from libcloud.compute.drivers.rimuhosting import *
deprecated_warning(__name__)
| {
"content_hash": "65e6b5a81c7c16cd7139fc3c6902607a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 50,
"avg_line_length": 31.75,
"alnum_prop": 0.8188976377952756,
"repo_name": "cloudkick/libcloud",
"id": "f55ed823ffbbdfe858229f2a134ba9fd944a53ee",
"size": "909",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libcloud/drivers/rimuhosting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "574113"
},
{
"name": "Shell",
"bytes": "5637"
}
],
"symlink_target": ""
} |
from sqlalchemy import *
from migrate import *
from sqlalchemy.dialects.sqlite import \
BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \
INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR
meta = MetaData()
projects = Table('projects', meta,
Column('id', INTEGER, primary_key=True),
Column('description', TEXT),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
col = Column('estimate', TEXT, default='median')
col.create(projects, populate_default=True)
assert col is projects.c.estimate
col = Column('units', TEXT, default='days')
col.create(projects, populate_default=True)
assert col is projects.c.units
def downgrade(migrate_engine):
meta.bind = migrate_engine
projects.c.estimate.drop()
projects.c.units.drop()
| {
"content_hash": "e6bb2240dadc4b8977b3edcfed9ceaaa",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 29.714285714285715,
"alnum_prop": 0.6730769230769231,
"repo_name": "macterra/galton",
"id": "82876f24f7b4717b921c2ffdf11e588c309bdd12",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbrepo/versions/002_Add_estimate_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1886"
},
{
"name": "JavaScript",
"bytes": "12340"
},
{
"name": "Python",
"bytes": "30387"
},
{
"name": "Shell",
"bytes": "30"
}
],
"symlink_target": ""
} |
""" A bunch of util functions to build Seq2Seq models with Caffe2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
from future.utils import viewitems
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import attention, core, rnn_cell, brew
PAD_ID = 0
PAD = '<PAD>'
GO_ID = 1
GO = '<GO>'
EOS_ID = 2
EOS = '<EOS>'
UNK_ID = 3
UNK = '<UNK>'
def gen_vocab(corpus, unk_threshold):
vocab = collections.defaultdict(lambda: len(vocab))
freqs = collections.defaultdict(lambda: 0)
# Adding padding tokens to the vocabulary to maintain consistency with IDs
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
tokens = sentence.strip().split()
for token in tokens:
freqs[token] += 1
for token, freq in viewitems(freqs):
if freq > unk_threshold:
vocab[token]
return vocab
def get_numberized_sentence(sentence, vocab):
numerized_sentence = []
for token in sentence.strip().split():
if token in vocab:
numerized_sentence.append(vocab[token])
else:
numerized_sentence.append(vocab[UNK])
return numerized_sentence
def rnn_unidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=None,
):
""" Unidirectional LSTM encoder."""
with core.NameScope(scope):
initial_cell_state = model.param_init_net.ConstantFill(
[],
'initial_cell_state',
shape=[num_units],
value=0.0,
)
initial_hidden_state = model.param_init_net.ConstantFill(
[],
'initial_hidden_state',
shape=[num_units],
value=0.0,
)
cell = rnn_cell.LSTMCell(
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
name=(scope + '/' if scope else '') + 'lstm',
forward_only=forward_only,
)
dropout_ratio = (
None if dropout_keep_prob is None else (1.0 - dropout_keep_prob)
)
if dropout_ratio is not None:
cell = rnn_cell.DropoutCell(
internal_cell=cell,
dropout_ratio=dropout_ratio,
name=(scope + '/' if scope else '') + 'dropout',
forward_only=forward_only,
is_test=False,
)
outputs_with_grads = []
if return_sequence_output:
outputs_with_grads.append(0)
if return_final_state:
outputs_with_grads.extend([1, 3])
outputs, (_, final_hidden_state, _, final_cell_state) = (
cell.apply_over_sequence(
model=model,
inputs=inputs,
seq_lengths=input_lengths,
initial_states=(initial_hidden_state, initial_cell_state),
outputs_with_grads=outputs_with_grads,
)
)
return outputs, final_hidden_state, final_cell_state
def rnn_bidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=None,
):
outputs_fw, final_hidden_fw, final_cell_fw = rnn_unidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=(scope + '/' if scope else '') + 'fw',
)
with core.NameScope(scope):
reversed_inputs = model.net.ReversePackedSegs(
[inputs, input_lengths],
['reversed_inputs'],
)
outputs_bw, final_hidden_bw, final_cell_bw = rnn_unidirectional_layer(
model,
reversed_inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=(scope + '/' if scope else '') + 'bw',
)
with core.NameScope(scope):
outputs_bw = model.net.ReversePackedSegs(
[outputs_bw, input_lengths],
['outputs_bw'],
)
# Concatenate forward and backward results
if return_sequence_output:
with core.NameScope(scope):
outputs, _ = model.net.Concat(
[outputs_fw, outputs_bw],
['outputs', 'outputs_dim'],
axis=2,
)
else:
outputs = None
if return_final_state:
with core.NameScope(scope):
final_hidden_state, _ = model.net.Concat(
[final_hidden_fw, final_hidden_bw],
['final_hidden_state', 'final_hidden_state_dim'],
axis=2,
)
final_cell_state, _ = model.net.Concat(
[final_cell_fw, final_cell_bw],
['final_cell_state', 'final_cell_state_dim'],
axis=2,
)
else:
final_hidden_state = None
final_cell_state = None
return outputs, final_hidden_state, final_cell_state
def build_embeddings(
model,
vocab_size,
embedding_size,
name,
freeze_embeddings,
):
embeddings = model.param_init_net.GaussianFill(
[],
name,
shape=[vocab_size, embedding_size],
std=0.1,
)
if not freeze_embeddings:
model.params.append(embeddings)
return embeddings
def get_layer_scope(scope, layer_type, i):
prefix = (scope + '/' if scope else '') + layer_type
return '{}/layer{}'.format(prefix, i)
def build_embedding_encoder(
model,
encoder_params,
num_decoder_layers,
inputs,
input_lengths,
vocab_size,
embeddings,
embedding_size,
use_attention,
num_gpus=0,
forward_only=False,
scope=None,
):
with core.NameScope(scope or ''):
if num_gpus == 0:
embedded_encoder_inputs = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs'],
)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
embedded_encoder_inputs_cpu = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs_cpu'],
)
embedded_encoder_inputs = model.CopyCPUToGPU(
embedded_encoder_inputs_cpu,
'embedded_encoder_inputs',
)
layer_inputs = embedded_encoder_inputs
layer_input_size = embedding_size
encoder_units_per_layer = []
final_encoder_hidden_states = []
final_encoder_cell_states = []
num_encoder_layers = len(encoder_params['encoder_layer_configs'])
use_bidirectional_encoder = encoder_params.get(
'use_bidirectional_encoder',
False,
)
for i, layer_config in enumerate(encoder_params['encoder_layer_configs']):
if use_bidirectional_encoder and i == 0:
layer_func = rnn_bidirectional_layer
output_dims = 2 * layer_config['num_units']
else:
layer_func = rnn_unidirectional_layer
output_dims = layer_config['num_units']
encoder_units_per_layer.append(output_dims)
is_final_layer = (i == num_encoder_layers - 1)
dropout_keep_prob = layer_config.get(
'dropout_keep_prob',
None,
)
return_final_state = i >= (num_encoder_layers - num_decoder_layers)
(
layer_outputs,
final_layer_hidden_state,
final_layer_cell_state,
) = layer_func(
model=model,
inputs=layer_inputs,
input_lengths=input_lengths,
input_size=layer_input_size,
num_units=layer_config['num_units'],
dropout_keep_prob=dropout_keep_prob,
forward_only=forward_only,
return_sequence_output=(not is_final_layer) or use_attention,
return_final_state=return_final_state,
scope=get_layer_scope(scope, 'encoder', i),
)
if not is_final_layer:
layer_inputs = layer_outputs
layer_input_size = output_dims
final_encoder_hidden_states.append(final_layer_hidden_state)
final_encoder_cell_states.append(final_layer_cell_state)
encoder_outputs = layer_outputs
weighted_encoder_outputs = None
return (
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
)
class LSTMWithAttentionDecoder(object):
def scope(self, name):
return self.name + '/' + name if self.name is not None else name
def _get_attention_type(self, attention_type_as_string):
if attention_type_as_string == 'regular':
return attention.AttentionType.Regular
elif attention_type_as_string == 'recurrent':
return attention.AttentionType.Recurrent
else:
assert False, 'Unknown type ' + attention_type_as_string
def __init__(
self,
encoder_outputs,
encoder_output_dim,
encoder_lengths,
vocab_size,
attention_type,
embedding_size,
decoder_num_units,
decoder_cells,
residual_output_layers=None,
name=None,
weighted_encoder_outputs=None,
):
self.name = name
self.num_layers = len(decoder_cells)
if attention_type == 'none':
self.cell = rnn_cell.MultiRNNCell(
decoder_cells,
name=self.scope('decoder'),
residual_output_layers=residual_output_layers,
)
self.use_attention = False
self.decoder_output_dim = decoder_num_units
self.output_indices = self.cell.output_indices
else:
decoder_cell = rnn_cell.MultiRNNCell(
decoder_cells,
name=self.scope('decoder'),
residual_output_layers=residual_output_layers,
)
self.cell = rnn_cell.AttentionCell(
encoder_output_dim=encoder_output_dim,
encoder_outputs=encoder_outputs,
encoder_lengths=encoder_lengths,
decoder_cell=decoder_cell,
decoder_state_dim=decoder_num_units,
name=self.scope('attention_decoder'),
attention_type=self._get_attention_type(attention_type),
weighted_encoder_outputs=weighted_encoder_outputs,
attention_memory_optimization=True,
)
self.use_attention = True
self.decoder_output_dim = decoder_num_units + encoder_output_dim
self.output_indices = decoder_cell.output_indices
self.output_indices.append(2 * self.num_layers)
def get_state_names(self):
return self.cell.get_state_names()
def get_outputs_with_grads(self):
# sequence (all) output locations are at twice their state index
return [2 * i for i in self.output_indices]
def get_output_dim(self):
return self.decoder_output_dim
def get_attention_weights(self):
assert self.use_attention
# [batch_size, encoder_length, 1]
return self.cell.get_attention_weights()
def apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
):
return self.cell.apply(
model=model,
input_t=input_t,
seq_lengths=seq_lengths,
states=states,
timestep=timestep,
)
def apply_over_sequence(
self,
model,
inputs,
seq_lengths,
initial_states,
):
return self.cell.apply_over_sequence(
model=model,
inputs=inputs,
seq_lengths=seq_lengths,
initial_states=initial_states,
outputs_with_grads=self.get_outputs_with_grads(),
)
def build_initial_rnn_decoder_states(
model,
encoder_units_per_layer,
decoder_units_per_layer,
final_encoder_hidden_states,
final_encoder_cell_states,
use_attention,
):
num_encoder_layers = len(encoder_units_per_layer)
num_decoder_layers = len(decoder_units_per_layer)
if num_encoder_layers > num_decoder_layers:
offset = num_encoder_layers - num_decoder_layers
else:
offset = 0
initial_states = []
for i, decoder_num_units in enumerate(decoder_units_per_layer):
if (
final_encoder_hidden_states and
len(final_encoder_hidden_states) > (i + offset)
):
final_encoder_hidden_state = final_encoder_hidden_states[i + offset]
else:
final_encoder_hidden_state = None
if final_encoder_hidden_state is None:
decoder_initial_hidden_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_hidden_state_{}'.format(i),
shape=[decoder_num_units],
value=0.0,
)
model.params.append(decoder_initial_hidden_state)
elif decoder_num_units != encoder_units_per_layer[i + offset]:
decoder_initial_hidden_state = brew.fc(
model,
final_encoder_hidden_state,
'decoder_initial_hidden_state_{}'.format(i),
encoder_units_per_layer[i + offset],
decoder_num_units,
axis=2,
)
else:
decoder_initial_hidden_state = final_encoder_hidden_state
initial_states.append(decoder_initial_hidden_state)
if (
final_encoder_cell_states and
len(final_encoder_cell_states) > (i + offset)
):
final_encoder_cell_state = final_encoder_cell_states[i + offset]
else:
final_encoder_cell_state = None
if final_encoder_cell_state is None:
decoder_initial_cell_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_cell_state_{}'.format(i),
shape=[decoder_num_units],
value=0.0,
)
model.params.append(decoder_initial_cell_state)
elif decoder_num_units != encoder_units_per_layer[i + offset]:
decoder_initial_cell_state = brew.fc(
model,
final_encoder_cell_state,
'decoder_initial_cell_state_{}'.format(i),
encoder_units_per_layer[i + offset],
decoder_num_units,
axis=2,
)
else:
decoder_initial_cell_state = final_encoder_cell_state
initial_states.append(decoder_initial_cell_state)
if use_attention:
initial_attention_weighted_encoder_context = (
model.param_init_net.ConstantFill(
[],
'initial_attention_weighted_encoder_context',
shape=[encoder_units_per_layer[-1]],
value=0.0,
)
)
model.params.append(initial_attention_weighted_encoder_context)
initial_states.append(initial_attention_weighted_encoder_context)
return initial_states
def build_embedding_decoder(
model,
decoder_layer_configs,
inputs,
input_lengths,
encoder_lengths,
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
vocab_size,
embeddings,
embedding_size,
attention_type,
forward_only,
num_gpus=0,
scope=None,
):
with core.NameScope(scope or ''):
if num_gpus == 0:
embedded_decoder_inputs = model.net.Gather(
[embeddings, inputs],
['embedded_decoder_inputs'],
)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
embedded_decoder_inputs_cpu = model.net.Gather(
[embeddings, inputs],
['embedded_decoder_inputs_cpu'],
)
embedded_decoder_inputs = model.CopyCPUToGPU(
embedded_decoder_inputs_cpu,
'embedded_decoder_inputs',
)
decoder_cells = []
decoder_units_per_layer = []
for i, layer_config in enumerate(decoder_layer_configs):
num_units = layer_config['num_units']
decoder_units_per_layer.append(num_units)
if i == 0:
input_size = embedding_size
else:
input_size = decoder_cells[-1].get_output_dim()
cell = rnn_cell.LSTMCell(
forward_only=forward_only,
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
)
dropout_keep_prob = layer_config.get('dropout_keep_prob', None)
if dropout_keep_prob is not None:
dropout_ratio = 1.0 - layer_config.dropout_keep_prob
cell = rnn_cell.DropoutCell(
internal_cell=cell,
dropout_ratio=dropout_ratio,
forward_only=forward_only,
is_test=False,
name=get_layer_scope(scope, 'decoder_dropout', i),
)
decoder_cells.append(cell)
states = build_initial_rnn_decoder_states(
model=model,
encoder_units_per_layer=encoder_units_per_layer,
decoder_units_per_layer=decoder_units_per_layer,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
use_attention=(attention_type != 'none'),
)
attention_decoder = LSTMWithAttentionDecoder(
encoder_outputs=encoder_outputs,
encoder_output_dim=encoder_units_per_layer[-1],
encoder_lengths=encoder_lengths,
vocab_size=vocab_size,
attention_type=attention_type,
embedding_size=embedding_size,
decoder_num_units=decoder_units_per_layer[-1],
decoder_cells=decoder_cells,
weighted_encoder_outputs=weighted_encoder_outputs,
name=scope,
)
decoder_outputs, _ = attention_decoder.apply_over_sequence(
model=model,
inputs=embedded_decoder_inputs,
seq_lengths=input_lengths,
initial_states=states,
)
# we do softmax over the whole sequence
# (max_length in the batch * batch_size) x decoder embedding size
# -1 because we don't know max_length yet
decoder_outputs_flattened, _ = model.net.Reshape(
[decoder_outputs],
[
'decoder_outputs_flattened',
'decoder_outputs_and_contexts_combination_old_shape',
],
shape=[-1, attention_decoder.get_output_dim()],
)
decoder_outputs = decoder_outputs_flattened
decoder_output_dim = attention_decoder.get_output_dim()
return (decoder_outputs, decoder_output_dim)
def output_projection(
model,
decoder_outputs,
decoder_output_size,
target_vocab_size,
decoder_softmax_size,
):
if decoder_softmax_size is not None:
decoder_outputs = brew.fc(
model,
decoder_outputs,
'decoder_outputs_scaled',
dim_in=decoder_output_size,
dim_out=decoder_softmax_size,
)
decoder_output_size = decoder_softmax_size
output_projection_w = model.param_init_net.XavierFill(
[],
'output_projection_w',
shape=[target_vocab_size, decoder_output_size],
)
output_projection_b = model.param_init_net.XavierFill(
[],
'output_projection_b',
shape=[target_vocab_size],
)
model.params.extend([
output_projection_w,
output_projection_b,
])
output_logits = model.net.FC(
[
decoder_outputs,
output_projection_w,
output_projection_b,
],
['output_logits'],
)
return output_logits
| {
"content_hash": "834d73b158546ca3894ee7438dfe9721",
"timestamp": "",
"source": "github",
"line_count": 670,
"max_line_length": 80,
"avg_line_length": 30.31044776119403,
"alnum_prop": 0.5745519007287768,
"repo_name": "xzturn/caffe2",
"id": "d0702880c1ec7efbaa007f5ed3fe95dda2d25c3a",
"size": "20378",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "caffe2/python/models/seq2seq/seq2seq_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3296"
},
{
"name": "C",
"bytes": "678918"
},
{
"name": "C++",
"bytes": "5480393"
},
{
"name": "CMake",
"bytes": "323261"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "2013333"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "15290"
},
{
"name": "Metal",
"bytes": "41257"
},
{
"name": "Objective-C",
"bytes": "4053"
},
{
"name": "Objective-C++",
"bytes": "249566"
},
{
"name": "Python",
"bytes": "3658352"
},
{
"name": "Shell",
"bytes": "65206"
}
],
"symlink_target": ""
} |
import serial
import time
import datetime
import sys
import paddle.v2 as paddle
from PIL import Image
import numpy as np
import cv2
from vgg import vgg_bn_drop
def multilayer_perceptron(img):
hidden1 = paddle.layer.fc(input=img, size=128, act=paddle.activation.Relu())
hidden2 = paddle.layer.fc(input=hidden1, size=64, act=paddle.activation.Relu())
predict = paddle.layer.fc(input=hidden2, size=4, act=paddle.activation.Softmax())
return predict
camera_port = 0
ramp_frames = 1
camera = cv2.VideoCapture(camera_port)
camera.set(3,320)
camera.set(4,240)
def get_image():
retval, im = camera.read()
return im
ser = serial.Serial('/dev/cu.usbserial', 9600, timeout=0.001)
words = '1'
for i in range(ramp_frames):
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
temp = get_image()
datadim = 3 * 320 * 240
classdim = 4
# PaddlePaddle init
paddle.init(use_gpu=False, trainer_count=1)
image = paddle.layer.data(
name="image", type=paddle.data_type.dense_vector(datadim))
net = multilayer_perceptron(image)
out = paddle.layer.fc(
input=net, size=classdim, act=paddle.activation.Softmax())
lbl = paddle.layer.data(
name="label", type=paddle.data_type.integer_value(classdim))
cost = paddle.layer.classification_cost(input=out, label=lbl)
while(1):
line = ser.readline().decode()
#print line
#print(line)
if line == '6':
print("Receive "+line)
# for i in range(ramp_frames):
#print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
#temp = get_image()
#print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
#print("Taking image...")
camera_capture = get_image()
#print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
#print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
#cv2.imwrite(file, camera_capture)
im = np.array(camera_capture).astype(np.float32)
im = im.transpose((2, 0, 1)) # CHW
im = im.flatten()
im = im / 255.0
test_data = []
test_data.append((im,))
with open('params_pass_199.tar', 'r') as f:
parameters = paddle.parameters.Parameters.from_tar(f)
probs = paddle.infer(
output_layer=out, parameters=parameters, input=test_data)
lab = np.argsort(-probs) # probs and lab are the results of one batch data
#print "Label of image/dog.png is: %d" % lab[0][0]
ser.write(str(lab[0][0]))
print(str(lab[0][0]))
#if words=="1":
# words="2"
#elif words=="2":
# words="3"
#elif words=="3":
# words="4"
#elif words=="4":
# words="5"
#elif words=="5":
# words="1"
ser.close()
del(camera)
| {
"content_hash": "1d3a7e866c06103cd8fce65fc3c4e363",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 83,
"avg_line_length": 25.8,
"alnum_prop": 0.5979563072586328,
"repo_name": "sorting4peach/sorting4peach",
"id": "c00b76497fe8c02bc2cfd77ff0f96380a312864e",
"size": "2838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/four_grade_classfication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6376"
},
{
"name": "C",
"bytes": "19225"
},
{
"name": "HTML",
"bytes": "1394"
},
{
"name": "Python",
"bytes": "10535"
}
],
"symlink_target": ""
} |
"""
celery.contrib.rdb
==================
Remote debugger for Celery tasks running in multiprocessing pool workers.
Inspired by http://snippets.dzone.com/posts/show/7248
**Usage**
.. code-block:: python
from celery.contrib import rdb
from celery import task
@task()
def add(x, y):
result = x + y
rdb.set_trace()
return result
**Environment Variables**
.. envvar:: CELERY_RDB_HOST
Hostname to bind to. Default is '127.0.01', which means the socket
will only be accessible from the local host.
.. envvar:: CELERY_RDB_PORT
Base port to bind to. Default is 6899.
The debugger will try to find an available port starting from the
base port. The selected port will be logged by the worker.
"""
from __future__ import absolute_import
from __future__ import with_statement
import errno
import os
import socket
import sys
from pdb import Pdb
from billiard import current_process
from celery.platforms import ignore_errno
default_port = 6899
CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1'
CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or default_port)
#: Holds the currently active debugger.
_current = [None]
_frame = getattr(sys, '_getframe')
class Rdb(Pdb):
me = 'Remote Debugger'
_prev_outs = None
_sock = None
def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT,
port_search_limit=100, port_skew=+0, out=sys.stdout):
self.active = True
self.out = out
self._prev_handles = sys.stdin, sys.stdout
self._sock, this_port = self.get_avail_port(
host, port, port_search_limit, port_skew,
)
self._sock.listen(1)
me = '%s:%s' % (self.me, this_port)
context = self.context = {'me': me, 'host': host, 'port': this_port}
self.say('%(me)s: Please telnet %(host)s %(port)s.'
' Type `exit` in session to continue.' % context)
self.say('%(me)s: Waiting for client...' % context)
self._client, address = self._sock.accept()
context['remote_addr'] = ':'.join(map(str, address))
self.say('%(me)s: In session with %(remote_addr)s' % context)
self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
Pdb.__init__(self, completekey='tab',
stdin=self._handle, stdout=self._handle)
def get_avail_port(self, host, port, search_limit=100, skew=+0):
try:
_, skew = current_process().name.split('-')
skew = int(skew)
except ValueError:
pass
this_port = None
for i in xrange(search_limit):
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
this_port = port + skew + i
try:
_sock.bind((host, this_port))
except socket.error, exc:
if exc.errno in [errno.EADDRINUSE, errno.EINVAL]:
continue
raise
else:
return _sock, this_port
else:
raise Exception(
'%s: Could not find available port. Please set using '
'environment variable CELERY_RDB_PORT' % (self.me, ))
def say(self, m):
self.out.write(m + '\n')
def _close_session(self):
self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
self._handle.close()
self._client.close()
self._sock.close()
self.active = False
self.say('%(me)s: Session %(remote_addr)s ended.' % self.context)
def do_continue(self, arg):
self._close_session()
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_quit(self, arg):
self._close_session()
self.set_quit()
return 1
do_q = do_exit = do_quit
def set_trace(self, frame=None):
if frame is None:
frame = _frame().f_back
with ignore_errno(errno.ECONNRESET):
Pdb.set_trace(self, frame)
def set_quit(self):
# this raises a BdbQuit exception that we are unable to catch.
sys.settrace(None)
def debugger():
"""Returns the current debugger instance (if any),
or creates a new one."""
rdb = _current[0]
if rdb is None or not rdb.active:
rdb = _current[0] = Rdb()
return rdb
def set_trace(frame=None):
"""Set breakpoint at current location, or a specified frame"""
if frame is None:
frame = _frame().f_back
return debugger().set_trace(frame)
| {
"content_hash": "ef53603f523d6e2a4b1eb2222e2c5ede",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 76,
"avg_line_length": 28.56875,
"alnum_prop": 0.586304966090571,
"repo_name": "mozilla/firefox-flicks",
"id": "43cc6a42191e9d41b3f4b51b2170a9f780965950",
"size": "4595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/celery/contrib/rdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "68358"
},
{
"name": "HTML",
"bytes": "337116"
},
{
"name": "JavaScript",
"bytes": "44816"
},
{
"name": "Puppet",
"bytes": "6653"
},
{
"name": "Python",
"bytes": "4166155"
},
{
"name": "Shell",
"bytes": "2409"
}
],
"symlink_target": ""
} |
LISTENING_ADDRESS = '127.0.0.1'
# Webserver port, if port is in use another random port will be selected
LISTENING_PORT = 8000
# Url path to access OmniDB, default is empty
CUSTOM_PATH = ''
# Number of seconds between each prompt password request. Default: 30 minutes
PWD_TIMEOUT_TOTAL = 1800
# Security parameters
# is_ssl = True requires ssl_certificate_file and ssl_key_file parameters
# This is highly recommended to protect information
IS_SSL = False
SSL_CERTIFICATE_FILE = '/path/to/cert_file'
SSL_KEY_FILE = '/path/to/key_file'
#SESSION_COOKIE_SECURE = True
#CSRF_COOKIE_SECURE = True
# Trusted origins, use this parameter if OmniDB is configured with SSL and is being accessed by another domain
CSRF_TRUSTED_ORIGINS = []
# Max number of threads that can used by each advanced object search request
THREAD_POOL_MAX_WORKERS = 2
# List of domains that OmniDB can serve. '*' serves all domains
ALLOWED_HOSTS = ['*']
# Session cookie name
SESSION_COOKIE_NAME = 'omnidb_sessionid'
# CSRF cookie name
CSRF_COOKIE_NAME = 'omnidb_csrftoken'
### PostgreSQL Database Backend ################################################
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'dbname',
# 'USER': 'postgres',
# 'PASSWORD': '',
# 'HOST': '127.0.0.1',
# 'PORT': '5432',
# }
#}
### LDAP Authentication ########################################################
#import ldap
#import django_auth_ldap.config
#from django_auth_ldap.config import LDAPSearch
#AUTH_LDAP_SERVER_URI = 'SERVER'
#AUTH_LDAP_BIND_DN = "uid=example,dc=example,dc=com"
#AUTH_LDAP_BIND_PASSWORD = "password"
#AUTH_LDAP_USER_SEARCH = django_auth_ldap.config.LDAPSearch(
# "uid=example,dc=example,dc=com", ldap.SCOPE_SUBTREE, "uid=%(user)s"
# )
#AUTHENTICATION_BACKENDS = [
# 'django_auth_ldap.backend.LDAPBackend',
# 'django.contrib.auth.backends.ModelBackend'
#]
| {
"content_hash": "7f94cabbfce72d3de7c64f32cbcfcef7",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 110,
"avg_line_length": 30.723076923076924,
"alnum_prop": 0.6464697045568353,
"repo_name": "OmniDB/OmniDB",
"id": "3db475beca588d976382d07b2b9b89e5e6cb7285",
"size": "2092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OmniDB/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19630"
},
{
"name": "C++",
"bytes": "302"
},
{
"name": "CSS",
"bytes": "304604"
},
{
"name": "Dockerfile",
"bytes": "13652"
},
{
"name": "HTML",
"bytes": "95804"
},
{
"name": "JavaScript",
"bytes": "20832908"
},
{
"name": "Makefile",
"bytes": "264"
},
{
"name": "PLpgSQL",
"bytes": "6153"
},
{
"name": "Python",
"bytes": "2766750"
},
{
"name": "Ruby",
"bytes": "25824"
},
{
"name": "SQLPL",
"bytes": "88625"
},
{
"name": "Shell",
"bytes": "59204"
},
{
"name": "TSQL",
"bytes": "88280"
}
],
"symlink_target": ""
} |
import os
import re
import sys
total = 0.0
count = 0
BASE_DIRECTORY = os.getcwd()
EXTENDED = ""
TYPE = "text"
excluded_directories = ["develop_scripts", "tests", "docs", "demo"]
excluded_files = ["__init__.py", "setup.py", "custom_assertions.py", "conftest.py"]
expected_html_tags = ["<table>", "<html>"]
expected_parsable_lines = ["<h2>Global evaluation</h2>"]
def parse_html(line):
if "<html>" in line:
return ""
if "<table>" in line:
return '<table class="table" style="width:auto;">'
def parse_line(line):
if "Global evaluation" in line:
return "<h2>File evaluation</h2>"
def check(module):
global total, count, BASE_DIRECTORY
if module[-3:] == ".py":
pout = os.popen('pylint {} --output-format={}'.format(module, TYPE), 'r')
module = module.replace("../", "")
if TYPE == "html":
print('<button data-toggle="collapse" data-target="#{1}" class="btn btn-default" style="width: 500px;">'
'{0}</button> <div id="{1}" class="collapse">'
.format(module, module.replace(".", "-").replace("\\", "-")))
else:
"Checking : {0}".format(module)
for line in pout:
if line.strip() in expected_html_tags:
print(parse_html(line))
elif EXTENDED == "f":
print(line)
elif EXTENDED == "e" and line[0:2] in ["C:", "W:", "E:"]:
print(line)
elif "Your code has been rated at" in line:
print(line)
if "Your code has been rated at" in line:
score = re.findall("[-]?(\d+.\d+)", line)[0]
total += float(score)
count += 1
print("-" * 50 + "\n")
if TYPE == "html":
print("</div></br>")
def print_header():
print("<html><head>")
print('<script src="http://code.jquery.com/jquery-latest.min.js" type="text/javascript"></script>'
'<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" '
'integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" '
'crossorigin="anonymous">'
'<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"'
' integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" '
'crossorigin="anonymous"></script>')
print("</head>")
if __name__ == "__main__":
BASE_DIRECTORY = sys.argv[1]
EXTENDED = sys.argv[2]
TYPE = sys.argv[3]
if len(sys.argv) > 4:
sys.stdout = open(sys.argv[4], 'w+')
if TYPE == "html":
print_header()
for root, dirs, files in os.walk(BASE_DIRECTORY):
for ignore in excluded_directories:
if ignore in dirs:
dirs.remove(ignore)
for name in files:
if name in excluded_files:
continue
check(os.path.join(root, name))
brk = "</br>" if TYPE == "html" else "\n"
print(brk + "%d modules found" % count)
print(brk + "AVERAGE SCORE = %.02f" % (total / count))
| {
"content_hash": "cc9c9a8946b30d9da1fd82452dd3b56d",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 116,
"avg_line_length": 33.57446808510638,
"alnum_prop": 0.5506970849176173,
"repo_name": "DrimTim32/py_proj_lights",
"id": "1a61fd8ac1eb88230e2dadc2cbfd2982ee575fe3",
"size": "3156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "develop_scripts/code_check/code_check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1143"
},
{
"name": "PowerShell",
"bytes": "3085"
},
{
"name": "Python",
"bytes": "126124"
}
],
"symlink_target": ""
} |
"""Utilities for reading LSA-SAF product files.
This module contains some functions for reading the data products produced by
the Land Surface Analysis Satellite Applications Facility (http://landsaf.meteo.pt).
The data products are distributed in HDF5 format and therefore the module requires
the PyTables package. Numpy is also used.
"""
import os
from bz2 import BZ2File
from datetime import datetime
import numpy as np
import tables as h5
from numpy import ma
def parse_file_name(file_name):
"""Parse an LSA-SAF file name to get the slot time.
A datetime object containg the slot time (in UTC) is returned.
"""
# HDF5_LSASAF_MSG_DSLF_SAfr_200702211000.h5 or S-LSA_-HDF5_LSASAF_MSG_DSLF_SAfr_200707260830 etc.
indx = file_name.rfind('_') + 1
year = int(file_name[indx:indx+4])
month = int(file_name[indx+4:indx+6])
day = int(file_name[indx+6:indx+8])
hour = int(file_name[indx+8:indx+10])
min = int(file_name[indx+10:indx+12])
return datetime(year, month, day, hour, min)
def _read_raw(file_name, data_node_name, quality_node_name):
"""Return the raw data and quality control flags.
This function returns the data as stored in the HDF5 data and q_flag arrays.
The scaling factors are applied. Use this function if you need to do your own
(non-standard) masking of the LSA-SAF data. Numpy arrays are returned with
the same shape as the HDF5 data arrays. The returned data array has type float32
and the flags aray has the same type as the data in the HDF5 file.
"""
h5file = h5.openFile(file_name)
node = h5file.getNode(data_node_name)
data = node.read()
data = np.asarray(data, np.float32)
if (node._v_attrs.SCALING_FACTOR != 1):
data /= node._v_attrs.SCALING_FACTOR
node = h5file.getNode(quality_node_name)
flags = node.read()
h5file.close()
return data, flags
def read_lst(file_name):
"""Get a masked array containing the LST values.
Sea, space and severly contaminated pixels are masked out. The masked array
returned by this function contains the LST in degrees Centigrade.
"""
# _read_raw() requires an uncompressed HDF5 file
if file_name[-3:] == 'bz2':
# create a temp file
temp_fname = 'temp.h5'
bz2_file = BZ2File(file_name)
fp = open(temp_fname, 'wb')
fp.write(bz2_file.read())
fp.close()
bz2_file.close()
data, flags = _read_raw(temp_fname, '/LST', '/Q_FLAGS')
os.remove(temp_fname)
else:
data, flags = _read_raw(file_name, '/LST', '/Q_FLAGS')
# mask based on the quality flags
data = ma.masked_where(flags == 0, data)# sea pixel
data = ma.masked_where(flags == 4, data)# corrupted pixel
data = ma.masked_where(flags == 12, data)# CMa - pixel non processed
data = ma.masked_where(flags == 44, data)# CMa - pixel contaminated by clouds
data = ma.masked_where(flags == 60, data)# CMa - Cloud filled
data = ma.masked_where(flags == 76, data)# CMa - contaminated by snow/ice
data = ma.masked_where(flags == 92, data)# CMa - Undefined
data = ma.masked_where(flags == 28, data)# Emissivity Information Missing
data = ma.masked_where(flags == 156, data)# Viewing Angle Out of Range (EM Poor)
data = ma.masked_where(flags == 284, data)# Viewing Angle Out of Range (EM Nominal)
data = ma.masked_where(flags == 412, data)# Viewing Angle Out of Range (EM Excellent)
data = ma.masked_where(flags == 668, data)# cwv information missing
data = ma.masked_where(flags == 796, data)# cwv information missing
data = ma.masked_where(flags == 924, data)# cwv information missing
## data = ma.masked_where(flags == 5790, data)# Below Nominal (+ EM below nominal)
## data = ma.masked_where(flags == 5918, data)# Below Nominal (+ EM nominal)
## data = ma.masked_where(flags == 6046, data)# Below Nominal (+ EM above nominal)
## data = ma.masked_where(flags == 10014, data)# Nominal (EM nominal)
## data = ma.masked_where(flags == 10142, data)# Nominal (EM above nominal)
## data = ma.masked_where(flags == 14238, data)# Above Nominal (EM above nominal)
return data
def read_dslf(file_name):
"""Get a masked array containing the DSLF values.
Sea, space and severly contaminated pixels are masked out. The masked array
returned by this function contains the DSLF in W/m^2.
"""
data, flags = _read_raw(file_name, '/DSLF', '/Q_FLAGS')
# mask based on the quality flags
data = ma.masked_where(flags == 0, data)# sea or space pixel
data = ma.masked_where(flags == 4, data)# T2m missing
data = ma.masked_where(flags == 12, data)# CMa - pixel non processed
data = ma.masked_where(flags == 92, data)# CMa - Undefined
data = ma.masked_where(flags == 156, data)# TPW information missing
data = ma.masked_where(flags == 44, data)# CTTH_EFFECTIVE missing (CMa - pixel contaminated by clouds)
data = ma.masked_where(flags == 60, data)# CTTH_EFFECTIVE missing (CMa - Cloud filled)
data = ma.masked_where(flags == 76, data)# CTTH_EFFECTIVE missing (CMa - contaminated by snow/ice)
data = ma.masked_where(flags == 812, data)# Td2m missing (CMa - pixel contaminated by clouds)
data = ma.masked_where(flags == 828, data)# Td2m missing (CMa - Cloud filled)
data = ma.masked_where(flags == 844, data)# Td2m missing (CMa - contaminated by snow/ice)
## data = ma.masked_where(flags == 11422, data)# Below Nominal (CMa - Cloud-free)
## data = ma.masked_where(flags == 19614, data)# Nominal (CMa - Cloud-free)
## data = ma.masked_where(flags == 27806, data)# Above Nominal (CMa - Cloud-free)
## data = ma.masked_where(flags == 13102, data)# Below Nominal (CMa - pixel contaminated by clouds)
## data = ma.masked_where(flags == 21294, data)# Nominal (CMa - pixel contaminated by clouds)
## data = ma.masked_where(flags == 29486, data)# Above Nominal (CMa - pixel contaminated by clouds)
## data = ma.masked_where(flags == 13118, data)# Below Nominal (CMa - Cloud filled)
## data = ma.masked_where(flags == 21310, data)# Nominal (CMa - Cloud filled)
## data = ma.masked_where(flags == 29502, data)# Above Nominal (CMa - Cloud filled)
## data = ma.masked_where(flags == 13134, data)# Below Nominal (CMa - contaminated by snow/ice)
## data = ma.masked_where(flags == 21326, data)# Nominal(CMa - contaminated by snow/ice)
## data = ma.masked_where(flags == 29518, data)# Above Nominal (CMa - contaminated by snow/ice)
return data
def _get_bit_value(n, p):
"""
get the bitvalue of denary (base 10) number n at the equivalent binary
position p (binary count starts at position 0 from the right)
"""
return (n >> p) & 1
def _test_bit_values(n, p):
"""
Test the bitvalue of denary (base 10) number n at the equivalent binary
position p (binary count starts at position 0 from the right)
"""
_get_bit_value(n, p)
return (n >> p) & 1
def read_dssf(file_name):
"""Get a masked array containing the DSSF values.
Sea, space and severly contaminated pixels are masked out. The mask is defined
according to the bitfield specified in SAF_LAND_MF_PUM_DSSF_1.4.pdf. The
masked array returned by this function contains the DSSF in W/m^2.
"""
data, flags = _read_raw(file_name, '/DSSF', '/DSSF_Q_Flag')
# mask based on the quality flags [THIS IS STILL INCOMPLETE!!!]
data = ma.masked_where(flags == 0, data)# ocean pixel [Always masked]
data = ma.masked_where(flags == 2, data)# space pixel [Always masked]
return data
| {
"content_hash": "d16aaba08f4cd9903fd45f49e612d410",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 106,
"avg_line_length": 44.89655172413793,
"alnum_prop": 0.6504096262160778,
"repo_name": "sahg/SAHGutils",
"id": "c40d2e5960fd81f8be91a83cad040abce409be5d",
"size": "7812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahgutils/io/landsaf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "218499"
}
],
"symlink_target": ""
} |
{
'!langcode!': 'en-us',
'!langname!': 'English (US)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'"User Exception" debug mode. ': '"User Exception" debug mode. ',
'%s': '%s',
'%s %%{row} deleted': '%s %%{row} deleted',
'%s %%{row} updated': '%s %%{row} updated',
'%s selected': '%s selected',
'%s students registered': '%s students registered',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(requires internet access, experimental)': '(requires internet access, experimental)',
'(something like "it-it")': '(something like "it-it")',
'(version %s)': '(version %s)',
'?': '?',
'Abort': 'Abort',
'About': 'About',
'About application': 'About application',
'Accept Terms': 'Accept Terms',
'Add breakpoint': 'Add breakpoint',
'Additional code for your application': 'Additional code for your application',
'Admin design page': 'Admin design page',
'admin disabled because no admin password': 'admin disabled because no admin password',
'admin disabled because not supported on google app engine': 'admin disabled because not supported on google app engine',
'admin disabled because too many invalid login attempts': 'admin disabled because too many invalid login attempts',
'admin disabled because unable to access password file': 'admin disabled because unable to access password file',
'Admin is disabled because insecure channel': 'Admin is disabled because insecure channel',
'Admin language': 'Admin language',
'Admin versioning page': 'Admin versioning page',
'administrative interface': 'administrative interface',
'Administrator Password:': 'Administrator Password:',
'and rename it:': 'and rename it:',
'App does not exist or you are not authorized': 'App does not exist or you are not authorized',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'Application': 'Application',
'application "%s" uninstalled': 'application "%s" uninstalled',
'Application cannot be generated in demo mode': 'Application cannot be generated in demo mode',
'application compiled': 'application compiled',
'Application exists already': 'Application exists already',
'application is compiled and cannot be designed': 'application is compiled and cannot be designed',
'Application name:': 'Application name:',
'Application updated via git pull': 'Application updated via git pull',
'are not used': 'are not used',
'are not used yet': 'are not used yet',
'Are you sure you want to delete file "%s"?': 'Are you sure you want to delete file "%s"?',
'Are you sure you want to delete plugin "%s"?': 'Are you sure you want to delete plugin "%s"?',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Are you sure you want to uninstall application "%s"?': 'Are you sure you want to uninstall application "%s"?',
'Are you sure?': 'Are you sure?',
'arguments': 'arguments',
'at char %s': 'at char %s',
'at line %s': 'at line %s',
'ATTENTION:': 'ATTENTION:',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.',
'ATTENTION: you cannot edit the running application!': 'ATTENTION: you cannot edit the running application!',
'Autocomplete Python Code': 'Autocomplete Python Code',
'Available Databases and Tables': 'Available Databases and Tables',
'back': 'back',
'Back to the plugins list': 'Back to the plugins list',
'Back to wizard': 'Back to wizard',
'Basics': 'Basics',
'Begin': 'Begin',
'breakpoint': 'breakpoint',
'Breakpoints': 'Breakpoints',
'breakpoints': 'breakpoints',
'Bulk Register': 'Bulk Register',
'Bulk Student Registration': 'Bulk Student Registration',
'Cache': 'Cache',
'cache': 'cache',
'Cache Cleared': 'Cache Cleared',
'Cache Keys': 'Cache Keys',
'cache, errors and sessions cleaned': 'cache, errors and sessions cleaned',
'can be a git repo': 'can be a git repo',
'Cancel': 'Cancel',
'Cannot be empty': 'Cannot be empty',
'Cannot compile: there are errors in your app:': 'Cannot compile: there are errors in your app:',
'cannot create file': 'cannot create file',
'cannot upload file "%(filename)s"': 'cannot upload file "%(filename)s"',
'Change Admin Password': 'Change Admin Password',
'Change admin password': 'Change admin password',
'change editor settings': 'change editor settings',
'Changelog': 'Changelog',
'check all': 'check all',
'Check for upgrades': 'Check for upgrades',
'Check to delete': 'Check to delete',
'Checking for upgrades...': 'Checking for upgrades...',
'Clean': 'Clean',
'Clear': 'Clear',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Click row to expand traceback': 'Click row to expand traceback',
'Click row to view a ticket': 'Click row to view a ticket',
'code': 'code',
'Code listing': 'Code listing',
'collapse/expand all': 'collapse/expand all',
'Command': 'Command',
'Comment:': 'Comment:',
'Commit': 'Commit',
'Commit form': 'Commit form',
'Committed files': 'Committed files',
'Compile': 'Compile',
'Compile (all or nothing)': 'Compile (all or nothing)',
'Compile (skip failed views)': 'Compile (skip failed views)',
'compiled application removed': 'compiled application removed',
'Condition': 'Condition',
'continue': 'continue',
'Controllers': 'Controllers',
'controllers': 'controllers',
'Count': 'Count',
'Create': 'Create',
'create file with filename:': 'create file with filename:',
'Create/Upload': 'Create/Upload',
'created by': 'created by',
'Created by:': 'Created by:',
'Created On': 'Created On',
'Created on:': 'Created on:',
'crontab': 'crontab',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'currently running': 'currently running',
'currently saved or': 'currently saved or',
'data uploaded': 'data uploaded',
'Database': 'Database',
'Database %s select': 'Database %s select',
'Database administration': 'Database administration',
'database administration': 'database administration',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'Date and Time': 'Date and Time',
'db': 'db',
'Debug': 'Debug',
'defines tables': 'defines tables',
'Delete': 'Delete',
'delete': 'delete',
'delete all checked': 'delete all checked',
'delete plugin': 'delete plugin',
'Delete this file (you will be asked to confirm deletion)': 'Delete this file (you will be asked to confirm deletion)',
'Delete:': 'Delete:',
'deleted after first hit': 'deleted after first hit',
'Demo': 'Demo',
'Deploy': 'Deploy',
'Deploy on Google App Engine': 'Deploy on Google App Engine',
'Deploy to OpenShift': 'Deploy to OpenShift',
'Deploy to pythonanywhere': 'Deploy to pythonanywhere',
'Deploy to PythonAnywhere': 'Deploy to PythonAnywhere',
'Deployment form': 'Deployment form',
'Deployment Interface': 'Deployment Interface',
'Description:': 'Description:',
'design': 'design',
'Detailed traceback description': 'Detailed traceback description',
'details': 'details',
'direction: ltr': 'direction: ltr',
'directory not found': 'directory not found',
'Disable': 'Disable',
'Disabled': 'Disabled',
'disabled in demo mode': 'disabled in demo mode',
'disabled in GAE mode': 'disabled in GAE mode',
'disabled in multi user mode': 'disabled in multi user mode',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Display line numbers': 'Display line numbers',
'DO NOT use the "Pack compiled" feature.': 'DO NOT use the "Pack compiled" feature.',
'docs': 'docs',
'Docs': 'Docs',
'done!': 'done!',
'Downgrade': 'Downgrade',
'Download .w2p': 'Download .w2p',
'Download as .exe': 'Download as .exe',
'download layouts': 'download layouts',
'Download layouts from repository': 'Download layouts from repository',
'download plugins': 'download plugins',
'Download plugins from repository': 'Download plugins from repository',
'Edit': 'Edit',
'edit all': 'edit all',
'Edit application': 'Edit application',
'edit controller:': 'edit controller:',
'Edit current record': 'Edit current record',
'edit views:': 'edit views:',
'Editing %s': 'Editing %s',
'Editing Language file': 'Editing Language file',
'Editing Plural Forms File': 'Editing Plural Forms File',
'Editor': 'Editor',
'Email Address': 'Email Address',
'Enable': 'Enable',
'Enable Close-Tag': 'Enable Close-Tag',
'Enable Code Folding': 'Enable Code Folding',
'Error': 'Error',
'Error logs for "%(app)s"': 'Error logs for "%(app)s"',
'Error snapshot': 'Error snapshot',
'Error ticket': 'Error ticket',
'Errors': 'Errors',
'Exception %(extype)s: %(exvalue)s': 'Exception %(extype)s: %(exvalue)s',
'Exception %s': 'Exception %s',
'Exception instance attributes': 'Exception instance attributes',
'Exit Fullscreen': 'Exit Fullscreen',
'Expand Abbreviation (html files only)': 'Expand Abbreviation (html files only)',
'export as csv file': 'export as csv file',
'Exports:': 'Exports:',
'exposes': 'exposes',
'exposes:': 'exposes:',
'extends': 'extends',
'failed to compile file because:': 'failed to compile file because:',
'failed to reload module because:': 'failed to reload module because:',
'File': 'File',
'file "%(filename)s" created': 'file "%(filename)s" created',
'file "%(filename)s" deleted': 'file "%(filename)s" deleted',
'file "%(filename)s" uploaded': 'file "%(filename)s" uploaded',
'file "%s" of %s restored': 'file "%s" of %s restored',
'file changed on disk': 'file changed on disk',
'file does not exist': 'file does not exist',
'file not found': 'file not found',
'file saved on %(time)s': 'file saved on %(time)s',
'file saved on %s': 'file saved on %s',
'filename': 'filename',
'Filename': 'Filename',
'Files added': 'Files added',
'filter': 'filter',
'Find Next': 'Find Next',
'Find Previous': 'Find Previous',
'Form has errors': 'Form has errors',
'Frames': 'Frames',
'Functions with no doctests will result in [passed] tests.': 'Functions with no doctests will result in [passed] tests.',
'GAE Email': 'GAE Email',
'GAE Output': 'GAE Output',
'GAE Password': 'GAE Password',
'Generate': 'Generate',
'Git Pull': 'Git Pull',
'Git Push': 'Git Push',
'Globals##debug': 'Globals##debug',
'go!': 'go!',
'Google App Engine Deployment Interface': 'Google App Engine Deployment Interface',
'Google Application Id': 'Google Application Id',
'Goto': 'Goto',
'Graph Model': 'Graph Model',
'graph model': 'graph model',
'Help': 'Help',
'here': 'here',
'Hide/Show Translated strings': 'Hide/Show Translated strings',
'Highlight current line': 'Highlight current line',
'Hits': 'Hits',
'Home': 'Home',
'honored only if the expression evaluates to true': 'honored only if the expression evaluates to true',
'If start the downgrade, be patient, it may take a while to rollback': 'If start the downgrade, be patient, it may take a while to rollback',
'If start the upgrade, be patient, it may take a while to download': 'If start the upgrade, be patient, it may take a while to download',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\n\t\tA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\n\t\tA green title indicates that all tests (if defined) passed. In this case test results are not shown.',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.',
'if your application uses a database other than sqlite you will then have to configure its DAL in pythonanywhere.': 'if your application uses a database other than sqlite you will then have to configure its DAL in pythonanywhere.',
'import': 'import',
'Import/Export': 'Import/Export',
'In development, use the default Rocket webserver that is currently supported by this debugger.': 'In development, use the default Rocket webserver that is currently supported by this debugger.',
'includes': 'includes',
'Indent with tabs': 'Indent with tabs',
'inspect attributes': 'inspect attributes',
'Install': 'Install',
'Installation of %(plugin)s for %(app)s': 'Installation of %(plugin)s for %(app)s',
'Installed applications': 'Installed applications',
'Interaction at %s line %s': 'Interaction at %s line %s',
'Interactive console': 'Interactive console',
'internal error': 'internal error',
'internal error: %s': 'internal error: %s',
'Internal State': 'Internal State',
'Invalid action': 'Invalid action',
'Invalid application name': 'Invalid application name',
'invalid circular reference': 'invalid circular reference',
'Invalid git repository specified.': 'Invalid git repository specified.',
'invalid password': 'invalid password',
'invalid password.': 'invalid password.',
'Invalid Query': 'Invalid Query',
'invalid request': 'invalid request',
'Invalid request': 'Invalid request',
'invalid table names (auth_* tables already defined)': 'invalid table names (auth_* tables already defined)',
'invalid ticket': 'invalid ticket',
'Key': 'Key',
'Keyboard shortcuts': 'Keyboard shortcuts',
'kill process': 'kill process',
'language file "%(filename)s" created/updated': 'language file "%(filename)s" created/updated',
'Language files (static strings) updated': 'Language files (static strings) updated',
'languages': 'languages',
'Languages': 'Languages',
'Last Revision': 'Last Revision',
'Last saved on:': 'Last saved on:',
'License for': 'License for',
'License:': 'License:',
'Line Nr': 'Line Nr',
'Line number': 'Line number',
'lists by exception': 'lists by exception',
'lists by ticket': 'lists by ticket',
'Loading...': 'Loading...',
'Local Apps': 'Local Apps',
'locals': 'locals',
'Locals##debug': 'Locals##debug',
'Login': 'Login',
'Login successful': 'Login successful',
'Login to the Administrative Interface': 'Login to the Administrative Interface',
'Login/Register': 'Login/Register',
'Logout': 'Logout',
'lost password': 'lost password',
'Main Menu': 'Main Menu',
'Manage': 'Manage',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Admin Users/Students': 'Manage Admin Users/Students',
'Manage Cache': 'Manage Cache',
'Manage Students': 'Manage Students',
'Memberships': 'Memberships',
'merge': 'merge',
'Models': 'Models',
'models': 'models',
'Modified On': 'Modified On',
'Modules': 'Modules',
'modules': 'modules',
'Multi User Mode': 'Multi User Mode',
'new application "%s" created': 'new application "%s" created',
'new application "%s" imported': 'new application "%s" imported',
'New Application Wizard': 'New Application Wizard',
'New application wizard': 'New application wizard',
'new plugin installed': 'new plugin installed',
'New plugin installed: %s': 'New plugin installed: %s',
'New Record': 'New Record',
'new record inserted': 'new record inserted',
'New simple application': 'New simple application',
'next': 'next',
'next %s rows': 'next %s rows',
'NO': 'NO',
'no changes': 'no changes',
'No databases in this application': 'No databases in this application',
'No Interaction yet': 'No Interaction yet',
'no match': 'no match',
'no package selected': 'no package selected',
'no permission to uninstall "%s"': 'no permission to uninstall "%s"',
'Node:': 'Node:',
'Not Authorized': 'Not Authorized',
'Not supported': 'Not supported',
'Note: If you receive an error with github status code of 128, ensure the system and account you are deploying from has a cooresponding ssh key configured in the openshift account.': 'Note: If you receive an error with github status code of 128, ensure the system and account you are deploying from has a cooresponding ssh key configured in the openshift account.',
"On production, you'll have to configure your webserver to use one process and multiple threads to use this debugger.": "On production, you'll have to configure your webserver to use one process and multiple threads to use this debugger.",
'Open new app in new window': 'Open new app in new window',
'OpenShift Deployment Interface': 'OpenShift Deployment Interface',
'OpenShift Output': 'OpenShift Output',
'or alternatively': 'or alternatively',
'Or Get from URL:': 'Or Get from URL:',
'or import from csv file': 'or import from csv file',
'Original/Translation': 'Original/Translation',
'Overview': 'Overview',
'Overwrite installed app': 'Overwrite installed app',
'Pack all': 'Pack all',
'Pack compiled': 'Pack compiled',
'Pack custom': 'Pack custom',
'pack plugin': 'pack plugin',
'password changed': 'password changed',
'Past revisions': 'Past revisions',
'Path to appcfg.py': 'Path to appcfg.py',
'Path to local openshift repo root.': 'Path to local openshift repo root.',
'Peeking at file': 'Peeking at file',
'Permission': 'Permission',
'Permissions': 'Permissions',
'Please': 'Please',
'Please wait, giving pythonanywhere a moment...': 'Please wait, giving pythonanywhere a moment...',
'plugin "%(plugin)s" deleted': 'plugin "%(plugin)s" deleted',
'Plugin "%s" in application': 'Plugin "%s" in application',
'plugin not specified': 'plugin not specified',
'Plugin page': 'Plugin page',
'plugins': 'plugins',
'Plugins': 'Plugins',
'Plural Form #%s': 'Plural Form #%s',
'Plural-Forms:': 'Plural-Forms:',
'Powered by': 'Powered by',
'Preferences saved correctly': 'Preferences saved correctly',
'Preferences saved on session only': 'Preferences saved on session only',
'previous %s rows': 'previous %s rows',
'Private files': 'Private files',
'private files': 'private files',
'Project Progress': 'Project Progress',
'Pull': 'Pull',
'Pull failed, certain files could not be checked out. Check logs for details.': 'Pull failed, certain files could not be checked out. Check logs for details.',
'Pull is not possible because you have unmerged files. Fix them up in the work tree, and then try again.': 'Pull is not possible because you have unmerged files. Fix them up in the work tree, and then try again.',
'Push': 'Push',
'Push failed, there are unmerged entries in the cache. Resolve merge issues manually and try again.': 'Push failed, there are unmerged entries in the cache. Resolve merge issues manually and try again.',
'pygraphviz library not found': 'pygraphviz library not found',
'PythonAnywhere Apps': 'PythonAnywhere Apps',
'PythonAnywhere Password': 'PythonAnywhere Password',
'Query:': 'Query:',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Rapid Search': 'Rapid Search',
'Record': 'Record',
'record does not exist': 'record does not exist',
'Record id': 'Record id',
'refresh': 'refresh',
'register': 'register',
'Reload routes': 'Reload routes',
'Remove compiled': 'Remove compiled',
'Removed Breakpoint on %s at line %s': 'Removed Breakpoint on %s at line %s',
'Replace': 'Replace',
'Replace All': 'Replace All',
'Repository (%s)': 'Repository (%s)',
'request': 'request',
'requires distutils, but not installed': 'requires distutils, but not installed',
'requires python-git, but not installed': 'requires python-git, but not installed',
'Resolve Conflict file': 'Resolve Conflict file',
'response': 'response',
'restart': 'restart',
'restore': 'restore',
'return': 'return',
'Revert': 'Revert',
'revert': 'revert',
'reverted to revision %s': 'reverted to revision %s',
'Revision %s': 'Revision %s',
'Revision:': 'Revision:',
'Role': 'Role',
'Roles': 'Roles',
'Rows in Table': 'Rows in Table',
'Rows selected': 'Rows selected',
'rules are not defined': 'rules are not defined',
'Run tests': 'Run tests',
'Run tests in this file': 'Run tests in this file',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Run tests in this file (to run all files, you may also use the button labelled 'test')",
'Running on %s': 'Running on %s',
'Save': 'Save',
'Save file:': 'Save file:',
'Save file: %s': 'Save file: %s',
'Save model as...': 'Save model as...',
'Save via Ajax': 'Save via Ajax',
'Saved file hash:': 'Saved file hash:',
'Screenshot %s': 'Screenshot %s',
'Search': 'Search',
'Select Files to Package': 'Select Files to Package',
'session': 'session',
'session expired': 'session expired',
'Session saved correctly': 'Session saved correctly',
'Session saved on session only': 'Session saved on session only',
'Set Breakpoint on %s at line %s: %s': 'Set Breakpoint on %s at line %s: %s',
'shell': 'shell',
'Showing %s to %s of %s %s found': 'Showing %s to %s of %s %s found',
'Singular Form': 'Singular Form',
'Site': 'Site',
'Size of cache:': 'Size of cache:',
'skip to generate': 'skip to generate',
'some files could not be removed': 'some files could not be removed',
'Something went wrong please wait a few minutes before retrying': 'Something went wrong please wait a few minutes before retrying',
'Sorry, could not find mercurial installed': 'Sorry, could not find mercurial installed',
'source : db': 'source : db',
'source : filesystem': 'source : filesystem',
'Start a new app': 'Start a new app',
'Start searching': 'Start searching',
'Start wizard': 'Start wizard',
'state': 'state',
'static': 'static',
'Static': 'Static',
'Static files': 'Static files',
'Statistics': 'Statistics',
'Step': 'Step',
'step': 'step',
'stop': 'stop',
'Submit': 'Submit',
'submit': 'submit',
'successful': 'successful',
'switch to : db': 'switch to : db',
'switch to : filesystem': 'switch to : filesystem',
'Tab width (# characters)': 'Tab width (# characters)',
'Table': 'Table',
'Temporary': 'Temporary',
'test': 'test',
'Testing application': 'Testing application',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The app exists, was created by wizard, continue to overwrite!': 'The app exists, was created by wizard, continue to overwrite!',
'The app exists, was NOT created by wizard, continue to overwrite!': 'The app exists, was NOT created by wizard, continue to overwrite!',
'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller',
'The data representation, define database tables and sets': 'The data representation, define database tables and sets',
'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates',
'Theme': 'Theme',
'There are no controllers': 'There are no controllers',
'There are no models': 'There are no models',
'There are no modules': 'There are no modules',
'There are no plugins': 'There are no plugins',
'There are no private files': 'There are no private files',
'There are no static files': 'There are no static files',
'There are no translators': 'There are no translators',
'There are no translators, only default language is supported': 'There are no translators, only default language is supported',
'There are no views': 'There are no views',
'These files are not served, they are only available from within your app': 'These files are not served, they are only available from within your app',
'These files are served without processing, your images go here': 'These files are served without processing, your images go here',
"This debugger may not work properly if you don't have a threaded webserver or you're using multiple daemon processes.": "This debugger may not work properly if you don't have a threaded webserver or you're using multiple daemon processes.",
'This is an experimental feature and it needs more testing. If you decide to downgrade you do it at your own risk': 'This is an experimental feature and it needs more testing. If you decide to downgrade you do it at your own risk',
'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk': 'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk',
"This page can commit your changes to an openshift app repo and push them to your cloud instance. This assumes that you've already created the application instance using the web2py skeleton and have that repo somewhere on a filesystem that this web2py instance can access. This functionality requires GitPython installed and on the python path of the runtime that web2py is operating in.": "This page can commit your changes to an openshift app repo and push them to your cloud instance. This assumes that you've already created the application instance using the web2py skeleton and have that repo somewhere on a filesystem that this web2py instance can access. This functionality requires GitPython installed and on the python path of the runtime that web2py is operating in.",
'This page can upload your application to the Google App Engine computing cloud. Mind that you must first create indexes locally and this is done by installing the Google appserver and running the app locally with it once, or there will be errors when selecting records. Attention: deployment may take long time, depending on the network speed. Attention: it will overwrite your app.yaml. DO NOT SUBMIT TWICE.': 'This page can upload your application to the Google App Engine computing cloud. Mind that you must first create indexes locally and this is done by installing the Google appserver and running the app locally with it once, or there will be errors when selecting records. Attention: deployment may take long time, depending on the network speed. Attention: it will overwrite your app.yaml. DO NOT SUBMIT TWICE.',
'this page to see if a breakpoint was hit and debug interaction is required.': 'this page to see if a breakpoint was hit and debug interaction is required.',
'This will pull changes from the remote repo for application "%s"?': 'This will pull changes from the remote repo for application "%s"?',
'This will push changes to the remote repo for application "%s".': 'This will push changes to the remote repo for application "%s".',
'Ticket': 'Ticket',
'Ticket ID': 'Ticket ID',
'Ticket Missing': 'Ticket Missing',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'to previous version.': 'to previous version.',
'To create a plugin, name a file/folder plugin_[name]': 'To create a plugin, name a file/folder plugin_[name]',
'To emulate a breakpoint programatically, write:': 'To emulate a breakpoint programatically, write:',
'to use the debugger!': 'to use the debugger!',
'toggle breakpoint': 'toggle breakpoint',
'Toggle comment': 'Toggle comment',
'Toggle Fullscreen': 'Toggle Fullscreen',
'Traceback': 'Traceback',
'Translation strings for the application': 'Translation strings for the application',
'try something like': 'try something like',
'Try the mobile interface': 'Try the mobile interface',
'try view': 'try view',
'Type PDB debugger command in here and hit Return (Enter) to execute it.': 'Type PDB debugger command in here and hit Return (Enter) to execute it.',
'Type some Python code in here and hit Return (Enter) to execute it.': 'Type some Python code in here and hit Return (Enter) to execute it.',
'Unable to check for upgrades': 'Unable to check for upgrades',
'unable to create application "%s"': 'unable to create application "%s"',
'unable to delete file "%(filename)s"': 'unable to delete file "%(filename)s"',
'unable to delete file plugin "%(plugin)s"': 'unable to delete file plugin "%(plugin)s"',
'Unable to determine the line number!': 'Unable to determine the line number!',
'Unable to download app because:': 'Unable to download app because:',
'unable to download layout': 'unable to download layout',
'unable to download plugin: %s': 'unable to download plugin: %s',
'Unable to download the list of plugins': 'Unable to download the list of plugins',
'unable to install plugin "%s"': 'unable to install plugin "%s"',
'unable to parse csv file': 'unable to parse csv file',
'unable to uninstall "%s"': 'unable to uninstall "%s"',
'unable to upgrade because "%s"': 'unable to upgrade because "%s"',
'uncheck all': 'uncheck all',
'Uninstall': 'Uninstall',
'Unsupported webserver working mode: %s': 'Unsupported webserver working mode: %s',
'update': 'update',
'update all languages': 'update all languages',
'Update:': 'Update:',
'Upgrade': 'Upgrade',
'upgrade now to %s': 'upgrade now to %s',
'upload': 'upload',
'Upload': 'Upload',
'Upload a package:': 'Upload a package:',
'Upload and install packed application': 'Upload and install packed application',
'upload file:': 'upload file:',
'upload plugin file:': 'upload plugin file:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'User',
'Username': 'Username',
'Users': 'Users',
'Using the shell may lock the database to other users of this app.': 'Using the shell may lock the database to other users of this app.',
'variables': 'variables',
'Version': 'Version',
'Versioning': 'Versioning',
'Views': 'Views',
'views': 'views',
'Warning!': 'Warning!',
'WARNING:': 'WARNING:',
'WARNING: The following views could not be compiled:': 'WARNING: The following views could not be compiled:',
'Web Framework': 'Web Framework',
'web2py Admin Password': 'web2py Admin Password',
'web2py apps to deploy': 'web2py apps to deploy',
'web2py Debugger': 'web2py Debugger',
'web2py downgrade': 'web2py downgrade',
'web2py is up to date': 'web2py is up to date',
'web2py online debugger': 'web2py online debugger',
'web2py upgrade': 'web2py upgrade',
'web2py upgraded; please restart it': 'web2py upgraded; please restart it',
'Working...': 'Working...',
'WSGI reference name': 'WSGI reference name',
'YES': 'YES',
'Yes': 'Yes',
'You can also set and remove breakpoint in the edit window, using the Toggle Breakpoint button': 'You can also set and remove breakpoint in the edit window, using the Toggle Breakpoint button',
'You can inspect variables using the console below': 'You can inspect variables using the console below',
'You have one more login attempt before you are locked out': 'You have one more login attempt before you are locked out',
'You need to set up and reach a': 'You need to set up and reach a',
'You only need these if you have already registered': 'You only need these if you have already registered',
'Your application will be blocked until you click an action button (next, step, continue, etc.)': 'Your application will be blocked until you click an action button (next, step, continue, etc.)',
}
| {
"content_hash": "c5e6f3f93d9be63891441cd10c452ac1",
"timestamp": "",
"source": "github",
"line_count": 573,
"max_line_length": 823,
"avg_line_length": 55.132635253054104,
"alnum_prop": 0.722072742236713,
"repo_name": "xiang12835/python_web",
"id": "0d5dcf5c5fc24c8dede82f10c1245113f5c20674",
"size": "31615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py2_web2py/web2py/applications/admin/languages/en.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3341"
},
{
"name": "Python",
"bytes": "17420"
}
],
"symlink_target": ""
} |
"""Base class for api services."""
import base64
import contextlib
import datetime
import logging
import pprint
import six
from six.moves import http_client
from six.moves import urllib
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.py import encoding
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
from apitools.base.py import util
__all__ = [
'ApiMethodInfo',
'ApiUploadInfo',
'BaseApiClient',
'BaseApiService',
'NormalizeApiEndpoint',
]
# TODO(craigcitro): Remove this once we quiet the spurious logging in
# oauth2client (or drop oauth2client).
logging.getLogger('oauth2client.util').setLevel(logging.ERROR)
_MAX_URL_LENGTH = 2048
class ApiUploadInfo(messages.Message):
"""Media upload information for a method.
Fields:
accept: (repeated) MIME Media Ranges for acceptable media uploads
to this method.
max_size: (integer) Maximum size of a media upload, such as 3MB
or 1TB (converted to an integer).
resumable_path: Path to use for resumable uploads.
resumable_multipart: (boolean) Whether or not the resumable endpoint
supports multipart uploads.
simple_path: Path to use for simple uploads.
simple_multipart: (boolean) Whether or not the simple endpoint
supports multipart uploads.
"""
accept = messages.StringField(1, repeated=True)
max_size = messages.IntegerField(2)
resumable_path = messages.StringField(3)
resumable_multipart = messages.BooleanField(4)
simple_path = messages.StringField(5)
simple_multipart = messages.BooleanField(6)
class ApiMethodInfo(messages.Message):
"""Configuration info for an API method.
All fields are strings unless noted otherwise.
Fields:
relative_path: Relative path for this method.
flat_path: Expanded version (if any) of relative_path.
method_id: ID for this method.
http_method: HTTP verb to use for this method.
path_params: (repeated) path parameters for this method.
query_params: (repeated) query parameters for this method.
ordered_params: (repeated) ordered list of parameters for
this method.
description: description of this method.
request_type_name: name of the request type.
response_type_name: name of the response type.
request_field: if not null, the field to pass as the body
of this POST request. may also be the REQUEST_IS_BODY
value below to indicate the whole message is the body.
upload_config: (ApiUploadInfo) Information about the upload
configuration supported by this method.
supports_download: (boolean) If True, this method supports
downloading the request via the `alt=media` query
parameter.
"""
relative_path = messages.StringField(1)
flat_path = messages.StringField(2)
method_id = messages.StringField(3)
http_method = messages.StringField(4)
path_params = messages.StringField(5, repeated=True)
query_params = messages.StringField(6, repeated=True)
ordered_params = messages.StringField(7, repeated=True)
description = messages.StringField(8)
request_type_name = messages.StringField(9)
response_type_name = messages.StringField(10)
request_field = messages.StringField(11, default='')
upload_config = messages.MessageField(ApiUploadInfo, 12)
supports_download = messages.BooleanField(13, default=False)
REQUEST_IS_BODY = '<request>'
def _LoadClass(name, messages_module):
if name.startswith('message_types.'):
_, _, classname = name.partition('.')
return getattr(message_types, classname)
elif '.' not in name:
return getattr(messages_module, name)
else:
raise exceptions.GeneratedClientError('Unknown class %s' % name)
def _RequireClassAttrs(obj, attrs):
for attr in attrs:
attr_name = attr.upper()
if not hasattr(obj, '%s' % attr_name) or not getattr(obj, attr_name):
msg = 'No %s specified for object of class %s.' % (
attr_name, type(obj).__name__)
raise exceptions.GeneratedClientError(msg)
def NormalizeApiEndpoint(api_endpoint):
if not api_endpoint.endswith('/'):
api_endpoint += '/'
return api_endpoint
def _urljoin(base, url): # pylint: disable=invalid-name
"""Custom urljoin replacement supporting : before / in url."""
# In general, it's unsafe to simply join base and url. However, for
# the case of discovery documents, we know:
# * base will never contain params, query, or fragment
# * url will never contain a scheme or net_loc.
# In general, this means we can safely join on /; we just need to
# ensure we end up with precisely one / joining base and url. The
# exception here is the case of media uploads, where url will be an
# absolute url.
if url.startswith('http://') or url.startswith('https://'):
return urllib.parse.urljoin(base, url)
new_base = base if base.endswith('/') else base + '/'
new_url = url[1:] if url.startswith('/') else url
return new_base + new_url
class _UrlBuilder(object):
"""Convenient container for url data."""
def __init__(self, base_url, relative_path=None, query_params=None):
components = urllib.parse.urlsplit(_urljoin(
base_url, relative_path or ''))
if components.fragment:
raise exceptions.ConfigurationValueError(
'Unexpected url fragment: %s' % components.fragment)
self.query_params = urllib.parse.parse_qs(components.query or '')
if query_params is not None:
self.query_params.update(query_params)
self.__scheme = components.scheme
self.__netloc = components.netloc
self.relative_path = components.path or ''
@classmethod
def FromUrl(cls, url):
urlparts = urllib.parse.urlsplit(url)
query_params = urllib.parse.parse_qs(urlparts.query)
base_url = urllib.parse.urlunsplit((
urlparts.scheme, urlparts.netloc, '', None, None))
relative_path = urlparts.path or ''
return cls(
base_url, relative_path=relative_path, query_params=query_params)
@property
def base_url(self):
return urllib.parse.urlunsplit(
(self.__scheme, self.__netloc, '', '', ''))
@base_url.setter
def base_url(self, value):
components = urllib.parse.urlsplit(value)
if components.path or components.query or components.fragment:
raise exceptions.ConfigurationValueError(
'Invalid base url: %s' % value)
self.__scheme = components.scheme
self.__netloc = components.netloc
@property
def query(self):
# TODO(craigcitro): In the case that some of the query params are
# non-ASCII, we may silently fail to encode correctly. We should
# figure out who is responsible for owning the object -> str
# conversion.
return urllib.parse.urlencode(self.query_params, True)
@property
def url(self):
if '{' in self.relative_path or '}' in self.relative_path:
raise exceptions.ConfigurationValueError(
'Cannot create url with relative path %s' % self.relative_path)
return urllib.parse.urlunsplit((
self.__scheme, self.__netloc, self.relative_path, self.query, ''))
def _SkipGetCredentials():
"""Hook for skipping credentials. For internal use."""
return False
class BaseApiClient(object):
"""Base class for client libraries."""
MESSAGES_MODULE = None
_API_KEY = ''
_CLIENT_ID = ''
_CLIENT_SECRET = ''
_PACKAGE = ''
_SCOPES = []
_USER_AGENT = ''
def __init__(self, url, credentials=None, get_credentials=True, http=None,
model=None, log_request=False, log_response=False,
num_retries=5, max_retry_wait=60, credentials_args=None,
default_global_params=None, additional_http_headers=None,
check_response_func=None, retry_func=None,
response_encoding=None):
_RequireClassAttrs(self, ('_package', '_scopes', 'messages_module'))
if default_global_params is not None:
util.Typecheck(default_global_params, self.params_type)
self.__default_global_params = default_global_params
self.log_request = log_request
self.log_response = log_response
self.__num_retries = 5
self.__max_retry_wait = 60
# We let the @property machinery below do our validation.
self.num_retries = num_retries
self.max_retry_wait = max_retry_wait
self._credentials = credentials
get_credentials = get_credentials and not _SkipGetCredentials()
if get_credentials and not credentials:
credentials_args = credentials_args or {}
self._SetCredentials(**credentials_args)
self._url = NormalizeApiEndpoint(url)
self._http = http or http_wrapper.GetHttp()
# Note that "no credentials" is totally possible.
if self._credentials is not None:
self._http = self._credentials.authorize(self._http)
# TODO(craigcitro): Remove this field when we switch to proto2.
self.__include_fields = None
self.additional_http_headers = additional_http_headers or {}
self.check_response_func = check_response_func
self.retry_func = retry_func
self.response_encoding = response_encoding
# TODO(craigcitro): Finish deprecating these fields.
_ = model
self.__response_type_model = 'proto'
def _SetCredentials(self, **kwds):
"""Fetch credentials, and set them for this client.
Note that we can't simply return credentials, since creating them
may involve side-effecting self.
Args:
**kwds: Additional keyword arguments are passed on to GetCredentials.
Returns:
None. Sets self._credentials.
"""
args = {
'api_key': self._API_KEY,
'client': self,
'client_id': self._CLIENT_ID,
'client_secret': self._CLIENT_SECRET,
'package_name': self._PACKAGE,
'scopes': self._SCOPES,
'user_agent': self._USER_AGENT,
}
args.update(kwds)
# credentials_lib can be expensive to import so do it only if needed.
from apitools.base.py import credentials_lib
# TODO(craigcitro): It's a bit dangerous to pass this
# still-half-initialized self into this method, but we might need
# to set attributes on it associated with our credentials.
# Consider another way around this (maybe a callback?) and whether
# or not it's worth it.
self._credentials = credentials_lib.GetCredentials(**args)
@classmethod
def ClientInfo(cls):
return {
'client_id': cls._CLIENT_ID,
'client_secret': cls._CLIENT_SECRET,
'scope': ' '.join(sorted(util.NormalizeScopes(cls._SCOPES))),
'user_agent': cls._USER_AGENT,
}
@property
def base_model_class(self):
return None
@property
def http(self):
return self._http
@property
def url(self):
return self._url
@classmethod
def GetScopes(cls):
return cls._SCOPES
@property
def params_type(self):
return _LoadClass('StandardQueryParameters', self.MESSAGES_MODULE)
@property
def user_agent(self):
return self._USER_AGENT
@property
def _default_global_params(self):
if self.__default_global_params is None:
# pylint: disable=not-callable
self.__default_global_params = self.params_type()
return self.__default_global_params
def AddGlobalParam(self, name, value):
params = self._default_global_params
setattr(params, name, value)
@property
def global_params(self):
return encoding.CopyProtoMessage(self._default_global_params)
@contextlib.contextmanager
def IncludeFields(self, include_fields):
self.__include_fields = include_fields
yield
self.__include_fields = None
@property
def response_type_model(self):
return self.__response_type_model
@contextlib.contextmanager
def JsonResponseModel(self):
"""In this context, return raw JSON instead of proto."""
old_model = self.response_type_model
self.__response_type_model = 'json'
yield
self.__response_type_model = old_model
@property
def num_retries(self):
return self.__num_retries
@num_retries.setter
def num_retries(self, value):
util.Typecheck(value, six.integer_types)
if value < 0:
raise exceptions.InvalidDataError(
'Cannot have negative value for num_retries')
self.__num_retries = value
@property
def max_retry_wait(self):
return self.__max_retry_wait
@max_retry_wait.setter
def max_retry_wait(self, value):
util.Typecheck(value, six.integer_types)
if value <= 0:
raise exceptions.InvalidDataError(
'max_retry_wait must be a postiive integer')
self.__max_retry_wait = value
@contextlib.contextmanager
def WithRetries(self, num_retries):
old_num_retries = self.num_retries
self.num_retries = num_retries
yield
self.num_retries = old_num_retries
def ProcessRequest(self, method_config, request):
"""Hook for pre-processing of requests."""
if self.log_request:
logging.info(
'Calling method %s with %s: %s', method_config.method_id,
method_config.request_type_name, request)
return request
def ProcessHttpRequest(self, http_request):
"""Hook for pre-processing of http requests."""
http_request.headers.update(self.additional_http_headers)
if self.log_request:
logging.info('Making http %s to %s',
http_request.http_method, http_request.url)
logging.info('Headers: %s', pprint.pformat(http_request.headers))
if http_request.body:
# TODO(craigcitro): Make this safe to print in the case of
# non-printable body characters.
logging.info('Body:\n%s',
http_request.loggable_body or http_request.body)
else:
logging.info('Body: (none)')
return http_request
def ProcessResponse(self, method_config, response):
if self.log_response:
logging.info('Response of type %s: %s',
method_config.response_type_name, response)
return response
# TODO(craigcitro): Decide where these two functions should live.
def SerializeMessage(self, message):
return encoding.MessageToJson(
message, include_fields=self.__include_fields)
def DeserializeMessage(self, response_type, data):
"""Deserialize the given data as method_config.response_type."""
try:
message = encoding.JsonToMessage(response_type, data)
except (exceptions.InvalidDataFromServerError,
messages.ValidationError, ValueError) as e:
raise exceptions.InvalidDataFromServerError(
'Error decoding response "%s" as type %s: %s' % (
data, response_type.__name__, e))
return message
def FinalizeTransferUrl(self, url):
"""Modify the url for a given transfer, based on auth and version."""
url_builder = _UrlBuilder.FromUrl(url)
if self.global_params.key:
url_builder.query_params['key'] = self.global_params.key
return url_builder.url
class BaseApiService(object):
"""Base class for generated API services."""
def __init__(self, client):
self.__client = client
self._method_configs = {}
self._upload_configs = {}
@property
def _client(self):
return self.__client
@property
def client(self):
return self.__client
def GetMethodConfig(self, method):
"""Returns service cached method config for given method."""
method_config = self._method_configs.get(method)
if method_config:
return method_config
func = getattr(self, method, None)
if func is None:
raise KeyError(method)
method_config = getattr(func, 'method_config', None)
if method_config is None:
raise KeyError(method)
self._method_configs[method] = config = method_config()
return config
@classmethod
def GetMethodsList(cls):
return [f.__name__ for f in six.itervalues(cls.__dict__)
if getattr(f, 'method_config', None)]
def GetUploadConfig(self, method):
return self._upload_configs.get(method)
def GetRequestType(self, method):
method_config = self.GetMethodConfig(method)
return getattr(self.client.MESSAGES_MODULE,
method_config.request_type_name)
def GetResponseType(self, method):
method_config = self.GetMethodConfig(method)
return getattr(self.client.MESSAGES_MODULE,
method_config.response_type_name)
def __CombineGlobalParams(self, global_params, default_params):
"""Combine the given params with the defaults."""
util.Typecheck(global_params, (type(None), self.__client.params_type))
result = self.__client.params_type()
global_params = global_params or self.__client.params_type()
for field in result.all_fields():
value = global_params.get_assigned_value(field.name)
if value is None:
value = default_params.get_assigned_value(field.name)
if value not in (None, [], ()):
setattr(result, field.name, value)
return result
def __EncodePrettyPrint(self, query_info):
# The prettyPrint flag needs custom encoding: it should be encoded
# as 0 if False, and ignored otherwise (True is the default).
if not query_info.pop('prettyPrint', True):
query_info['prettyPrint'] = 0
# The One Platform equivalent of prettyPrint is pp, which also needs
# custom encoding.
if not query_info.pop('pp', True):
query_info['pp'] = 0
return query_info
def __FinalUrlValue(self, value, field):
"""Encode value for the URL, using field to skip encoding for bytes."""
if isinstance(field, messages.BytesField) and value is not None:
return base64.urlsafe_b64encode(value)
elif isinstance(value, six.text_type):
return value.encode('utf8')
elif isinstance(value, six.binary_type):
return value.decode('utf8')
elif isinstance(value, datetime.datetime):
return value.isoformat()
return value
def __ConstructQueryParams(self, query_params, request, global_params):
"""Construct a dictionary of query parameters for this request."""
# First, handle the global params.
global_params = self.__CombineGlobalParams(
global_params, self.__client.global_params)
global_param_names = util.MapParamNames(
[x.name for x in self.__client.params_type.all_fields()],
self.__client.params_type)
global_params_type = type(global_params)
query_info = dict(
(param,
self.__FinalUrlValue(getattr(global_params, param),
getattr(global_params_type, param)))
for param in global_param_names)
# Next, add the query params.
query_param_names = util.MapParamNames(query_params, type(request))
request_type = type(request)
query_info.update(
(param,
self.__FinalUrlValue(getattr(request, param, None),
getattr(request_type, param)))
for param in query_param_names)
query_info = dict((k, v) for k, v in query_info.items()
if v is not None)
query_info = self.__EncodePrettyPrint(query_info)
query_info = util.MapRequestParams(query_info, type(request))
return query_info
def __ConstructRelativePath(self, method_config, request,
relative_path=None):
"""Determine the relative path for request."""
python_param_names = util.MapParamNames(
method_config.path_params, type(request))
params = dict([(param, getattr(request, param, None))
for param in python_param_names])
params = util.MapRequestParams(params, type(request))
return util.ExpandRelativePath(method_config, params,
relative_path=relative_path)
def __FinalizeRequest(self, http_request, url_builder):
"""Make any final general adjustments to the request."""
if (http_request.http_method == 'GET' and
len(http_request.url) > _MAX_URL_LENGTH):
http_request.http_method = 'POST'
http_request.headers['x-http-method-override'] = 'GET'
http_request.headers[
'content-type'] = 'application/x-www-form-urlencoded'
http_request.body = url_builder.query
url_builder.query_params = {}
http_request.url = url_builder.url
def __ProcessHttpResponse(self, method_config, http_response, request):
"""Process the given http response."""
if http_response.status_code not in (http_client.OK,
http_client.CREATED,
http_client.NO_CONTENT):
raise exceptions.HttpError.FromResponse(
http_response, method_config=method_config, request=request)
if http_response.status_code == http_client.NO_CONTENT:
# TODO(craigcitro): Find out why _replace doesn't seem to work
# here.
http_response = http_wrapper.Response(
info=http_response.info, content='{}',
request_url=http_response.request_url)
content = http_response.content
if self._client.response_encoding and isinstance(content, bytes):
content = content.decode(self._client.response_encoding)
if self.__client.response_type_model == 'json':
return content
response_type = _LoadClass(method_config.response_type_name,
self.__client.MESSAGES_MODULE)
return self.__client.DeserializeMessage(response_type, content)
def __SetBaseHeaders(self, http_request, client):
"""Fill in the basic headers on http_request."""
# TODO(craigcitro): Make the default a little better here, and
# include the apitools version.
user_agent = client.user_agent or 'apitools-client/1.0'
http_request.headers['user-agent'] = user_agent
http_request.headers['accept'] = 'application/json'
http_request.headers['accept-encoding'] = 'gzip, deflate'
def __SetBody(self, http_request, method_config, request, upload):
"""Fill in the body on http_request."""
if not method_config.request_field:
return
request_type = _LoadClass(
method_config.request_type_name, self.__client.MESSAGES_MODULE)
if method_config.request_field == REQUEST_IS_BODY:
body_value = request
body_type = request_type
else:
body_value = getattr(request, method_config.request_field)
body_field = request_type.field_by_name(
method_config.request_field)
util.Typecheck(body_field, messages.MessageField)
body_type = body_field.type
# If there was no body provided, we use an empty message of the
# appropriate type.
body_value = body_value or body_type()
if upload and not body_value:
# We're going to fill in the body later.
return
util.Typecheck(body_value, body_type)
http_request.headers['content-type'] = 'application/json'
http_request.body = self.__client.SerializeMessage(body_value)
def PrepareHttpRequest(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Prepares an HTTP request to be sent."""
request_type = _LoadClass(
method_config.request_type_name, self.__client.MESSAGES_MODULE)
util.Typecheck(request, request_type)
request = self.__client.ProcessRequest(method_config, request)
http_request = http_wrapper.Request(
http_method=method_config.http_method)
self.__SetBaseHeaders(http_request, self.__client)
self.__SetBody(http_request, method_config, request, upload)
url_builder = _UrlBuilder(
self.__client.url, relative_path=method_config.relative_path)
url_builder.query_params = self.__ConstructQueryParams(
method_config.query_params, request, global_params)
# It's important that upload and download go before we fill in the
# relative path, so that they can replace it.
if upload is not None:
upload.ConfigureRequest(upload_config, http_request, url_builder)
if download is not None:
download.ConfigureRequest(http_request, url_builder)
url_builder.relative_path = self.__ConstructRelativePath(
method_config, request, relative_path=url_builder.relative_path)
self.__FinalizeRequest(http_request, url_builder)
return self.__client.ProcessHttpRequest(http_request)
def _RunMethod(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Call this method with request."""
if upload is not None and download is not None:
# TODO(craigcitro): This just involves refactoring the logic
# below into callbacks that we can pass around; in particular,
# the order should be that the upload gets the initial request,
# and then passes its reply to a download if one exists, and
# then that goes to ProcessResponse and is returned.
raise exceptions.NotYetImplementedError(
'Cannot yet use both upload and download at once')
http_request = self.PrepareHttpRequest(
method_config, request, global_params, upload, upload_config,
download)
# TODO(craigcitro): Make num_retries customizable on Transfer
# objects, and pass in self.__client.num_retries when initializing
# an upload or download.
if download is not None:
download.InitializeDownload(http_request, client=self.client)
return
http_response = None
if upload is not None:
http_response = upload.InitializeUpload(
http_request, client=self.client)
if http_response is None:
http = self.__client.http
if upload and upload.bytes_http:
http = upload.bytes_http
opts = {
'retries': self.__client.num_retries,
'max_retry_wait': self.__client.max_retry_wait,
}
if self.__client.check_response_func:
opts['check_response_func'] = self.__client.check_response_func
if self.__client.retry_func:
opts['retry_func'] = self.__client.retry_func
http_response = http_wrapper.MakeRequest(
http, http_request, **opts)
return self.ProcessHttpResponse(method_config, http_response, request)
def ProcessHttpResponse(self, method_config, http_response, request=None):
"""Convert an HTTP response to the expected message type."""
return self.__client.ProcessResponse(
method_config,
self.__ProcessHttpResponse(method_config, http_response, request))
| {
"content_hash": "267b9ec3bbcba44a444a01abc3fb1734",
"timestamp": "",
"source": "github",
"line_count": 721,
"max_line_length": 79,
"avg_line_length": 39.535367545076284,
"alnum_prop": 0.6257849500087704,
"repo_name": "kevinli7/apitools",
"id": "1d490c3100f2faedcad6b989246604a0f0c24e8a",
"size": "29105",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "apitools/base/py/base_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "814906"
}
],
"symlink_target": ""
} |
from EXOSIMS.SurveySimulation.tieredScheduler_SLSQP import tieredScheduler_SLSQP
import astropy.units as u
import astropy.constants as const
import numpy as np
import time
import copy
class tieredScheduler_DD_SLSQP(tieredScheduler_SLSQP):
"""tieredScheduler_DD_SLSQP - tieredScheduler Dual Detection SLSQP
This class implements a version of the tieredScheduler that performs dual-band
detections and uses the SLSQP scheduler as a base for inheritance.
"""
def __init__(self, **specs):
tieredScheduler_SLSQP.__init__(self, **specs)
def run_sim(self):
"""Performs the survey simulation
Returns:
mission_end (string):
Message printed at the end of a survey simulation.
"""
OS = self.OpticalSystem
TL = self.TargetList
SU = self.SimulatedUniverse
Obs = self.Observatory
TK = self.TimeKeeping
Comp = self.Completeness
# TODO: start using this self.currentSep
# set occulter separation if haveOcculter
self.currentSep = Obs.occulterSep
# Choose observing modes selected for detection (default marked with a flag),
det_modes = list(
filter(lambda mode: "imag" in mode["inst"]["name"], OS.observingModes)
)
base_det_mode = list(
filter(lambda mode: mode["detectionMode"], OS.observingModes)
)[0]
# and for characterization (default is first spectro/IFS mode)
spectroModes = list(
filter(lambda mode: "spec" in mode["inst"]["name"], OS.observingModes)
)
if np.any(spectroModes):
char_mode = spectroModes[0]
# if no spectro mode, default char mode is first observing mode
else:
char_mode = OS.observingModes[0]
# Begin Survey, and loop until mission is finished
self.logger.info("OB{}: survey beginning.".format(TK.OBnumber + 1))
self.vprint("OB{}: survey beginning.".format(TK.OBnumber + 1))
t0 = time.time()
sInd = None
occ_sInd = None
cnt = 0
while not TK.mission_is_over(OS, Obs, det_modes[0]):
# Acquire the NEXT TARGET star index and create DRM
# prev_occ_sInd = occ_sInd
old_sInd = sInd # used to save sInd if returned sInd is None
waitTime = None
DRM, sInd, occ_sInd, t_det, sd, occ_sInds, det_mode = self.next_target(
sInd, occ_sInd, det_modes, char_mode
)
if det_mode is not None:
true_t_det = (
t_det * det_mode["timeMultiplier"]
+ Obs.settlingTime
+ det_mode["syst"]["ohTime"]
)
else:
true_t_det = t_det
if sInd != occ_sInd and sInd is not None:
assert t_det != 0, "Integration time can't be 0."
if (
sInd is not None
and (TK.currentTimeAbs.copy() + true_t_det) >= self.occ_arrives
and occ_sInd != self.last_chard
):
sInd = occ_sInd
if sInd == occ_sInd:
self.ready_to_update = True
time2arrive = self.occ_arrives - TK.currentTimeAbs.copy()
if sInd is not None:
cnt += 1
# clean up revisit list when one occurs to prevent repeats
if np.any(self.starRevisit) and np.any(
np.where(self.starRevisit[:, 0] == float(sInd))
):
s_revs = np.where(self.starRevisit[:, 0] == float(sInd))[0]
dt_max = 1.0 * u.week
t_revs = np.where(
self.starRevisit[:, 1] * u.day - TK.currentTimeNorm.copy()
< dt_max
)[0]
self.starRevisit = np.delete(
self.starRevisit, np.intersect1d(s_revs, t_revs), 0
)
# get the index of the selected target for the extended list
if (
TK.currentTimeNorm.copy() > TK.missionLife
and self.starExtended.shape[0] == 0
):
for i in range(len(self.DRM)):
if np.any([x == 1 for x in self.DRM[i]["plan_detected"]]):
self.starExtended = np.hstack(
(self.starExtended, self.DRM[i]["star_ind"])
)
self.starExtended = np.unique(self.starExtended)
# Beginning of observation, start to populate DRM
DRM["OB_nb"] = TK.OBnumber + 1
DRM["ObsNum"] = cnt
DRM["star_ind"] = sInd
pInds = np.where(SU.plan2star == sInd)[0]
DRM["plan_inds"] = pInds.astype(int).tolist()
if sInd == occ_sInd:
# wait until expected arrival time is observed
if time2arrive > 0 * u.d:
TK.advanceToAbsTime(self.occ_arrives)
if time2arrive > 1 * u.d:
self.GAtime = self.GAtime + time2arrive.to("day")
TK.obsStart = TK.currentTimeNorm.copy().to("day")
self.logger.info(
"Observation #%s, target #%s/%s with %s planet(s), mission time: %s"
% (cnt, sInd + 1, TL.nStars, len(pInds), TK.obsStart.round(2))
)
self.vprint(
"Observation #%s, target #%s/%s with %s planet(s), mission time: %s"
% (cnt, sInd + 1, TL.nStars, len(pInds), TK.obsStart.round(2))
)
DRM["arrival_time"] = TK.currentTimeNorm.copy().to("day")
if sInd != occ_sInd:
self.starVisits[sInd] += 1
# PERFORM DETECTION and populate revisit list attribute.
# First store fEZ, dMag, WA
if np.any(pInds):
DRM["det_fEZ"] = SU.fEZ[pInds].to("1/arcsec2").value.tolist()
DRM["det_dMag"] = SU.dMag[pInds].tolist()
DRM["det_WA"] = SU.WA[pInds].to("mas").value.tolist()
(
detected,
det_fZ,
det_systemParams,
det_SNR,
FA,
) = self.observation_detection(sInd, t_det, det_mode)
if np.any(detected):
self.sInd_detcounts[sInd] += 1
self.sInd_dettimes[sInd] = (
self.sInd_dettimes.get(sInd) or []
) + [TK.currentTimeNorm.copy().to("day")]
self.vprint(" Det. results are: %s" % (detected))
# update GAtime
self.GAtime = (
self.GAtime + t_det.to("day") * self.GA_simult_det_fraction
)
# populate the DRM with detection results
DRM["det_time"] = t_det.to("day")
DRM["det_status"] = detected
DRM["det_SNR"] = det_SNR
DRM["det_fZ"] = det_fZ.to("1/arcsec2")
DRM["det_params"] = det_systemParams
DRM["FA_det_status"] = int(FA)
det_comp = Comp.comp_per_intTime(
t_det,
TL,
sInd,
det_fZ,
self.ZodiacalLight.fEZ0,
TL.int_WA[sInd],
det_mode,
)[0]
DRM["det_comp"] = det_comp
DRM["det_mode"] = dict(det_mode)
del DRM["det_mode"]["inst"], DRM["det_mode"]["syst"]
elif sInd == occ_sInd:
self.last_chard = occ_sInd
self.occ_starVisits[occ_sInd] += 1
# PERFORM CHARACTERIZATION and populate spectra list attribute.
occ_pInds = np.where(SU.plan2star == occ_sInd)[0]
sInd = occ_sInd
DRM["slew_time"] = self.occ_slewTime.to("day").value
DRM["slew_angle"] = self.occ_sd.to("deg").value
slew_mass_used = (
self.occ_slewTime * Obs.defburnPortion * Obs.flowRate
)
DRM["slew_dV"] = (
(self.occ_slewTime * self.ao * Obs.defburnPortion)
.to("m/s")
.value
)
DRM["slew_mass_used"] = slew_mass_used.to("kg")
Obs.scMass = Obs.scMass - slew_mass_used
DRM["scMass"] = Obs.scMass.to("kg")
if Obs.twotanks:
Obs.slewMass = Obs.slewMass - slew_mass_used
DRM["slewMass"] = Obs.slewMass.to("kg")
self.logger.info(" Starshade and telescope aligned at target star")
self.vprint(" Starshade and telescope aligned at target star")
# PERFORM CHARACTERIZATION and populate spectra list attribute
(
characterized,
char_fZ,
char_systemParams,
char_SNR,
char_intTime,
) = self.observation_characterization(sInd, char_mode)
if np.any(characterized):
self.vprint(" Char. results are: %s" % (characterized))
else:
# make sure we don't accidnetally double characterize
TK.advanceToAbsTime(TK.currentTimeAbs.copy() + 0.01 * u.d)
assert char_intTime != 0, "Integration time can't be 0."
if np.any(occ_pInds):
DRM["char_fEZ"] = (
SU.fEZ[occ_pInds].to("1/arcsec2").value.tolist()
)
DRM["char_dMag"] = SU.dMag[occ_pInds].tolist()
DRM["char_WA"] = SU.WA[occ_pInds].to("mas").value.tolist()
DRM["char_mode"] = dict(char_mode)
del DRM["char_mode"]["inst"], DRM["char_mode"]["syst"]
# update the occulter wet mass
if OS.haveOcculter and char_intTime is not None:
DRM = self.update_occulter_mass(DRM, sInd, char_intTime, "char")
char_comp = Comp.comp_per_intTime(
char_intTime,
TL,
occ_sInd,
char_fZ,
self.ZodiacalLight.fEZ0,
TL.int_WA[occ_sInd],
char_mode,
)[0]
DRM["char_comp"] = char_comp
FA = False
# populate the DRM with characterization results
DRM["char_time"] = (
char_intTime.to("day") if char_intTime else 0.0 * u.day
)
# DRM['char_counts'] = self.sInd_charcounts[sInd]
DRM["char_status"] = characterized[:-1] if FA else characterized
DRM["char_SNR"] = char_SNR[:-1] if FA else char_SNR
DRM["char_fZ"] = char_fZ.to("1/arcsec2")
DRM["char_params"] = char_systemParams
# populate the DRM with FA results
DRM["FA_det_status"] = int(FA)
DRM["FA_char_status"] = characterized[-1] if FA else 0
DRM["FA_char_SNR"] = char_SNR[-1] if FA else 0.0
DRM["FA_char_fEZ"] = (
self.lastDetected[sInd, 1][-1] / u.arcsec**2
if FA
else 0.0 / u.arcsec**2
)
DRM["FA_char_dMag"] = self.lastDetected[sInd, 2][-1] if FA else 0.0
DRM["FA_char_WA"] = (
self.lastDetected[sInd, 3][-1] * u.arcsec
if FA
else 0.0 * u.arcsec
)
# add star back into the revisit list
if np.any(characterized):
char = np.where(characterized)[0]
pInds = np.where(SU.plan2star == sInd)[0]
smin = np.min(SU.s[pInds[char]])
pInd_smin = pInds[np.argmin(SU.s[pInds[char]])]
Ms = TL.MsTrue[sInd]
sp = smin
Mp = SU.Mp[pInd_smin]
mu = const.G * (Mp + Ms)
T = 2.0 * np.pi * np.sqrt(sp**3 / mu)
t_rev = TK.currentTimeNorm.copy() + T / 2.0 # noqa: F841
self.goal_GAtime = self.GA_percentage * TK.currentTimeNorm.copy().to(
"day"
)
goal_GAdiff = self.goal_GAtime - self.GAtime
# allocate extra time to GA if we are falling behind
if (
goal_GAdiff > 1 * u.d
and TK.currentTimeAbs.copy() < self.occ_arrives
):
GA_diff = min(
self.occ_arrives - TK.currentTimeAbs.copy(), goal_GAdiff
)
self.vprint(
"Allocating time %s to general astrophysics" % (GA_diff)
)
self.GAtime = self.GAtime + GA_diff
TK.advanceToAbsTime(TK.currentTimeAbs.copy() + GA_diff)
# allocate time if there is no target for the starshade
elif (
goal_GAdiff > 1 * u.d
and (self.occ_arrives - TK.currentTimeAbs.copy()) < -5 * u.d
and not np.any(occ_sInds)
):
self.vprint(
(
"No Available Occulter Targets: Allocating time {} "
"to general astrophysics"
).format(goal_GAdiff)
)
self.GAtime = self.GAtime + goal_GAdiff
TK.advanceToAbsTime(TK.currentTimeAbs.copy() + goal_GAdiff)
DRM["exoplanetObsTime"] = TK.exoplanetObsTime.copy()
# Append result values to self.DRM
self.DRM.append(DRM)
# Calculate observation end time
TK.obsEnd = TK.currentTimeNorm.copy().to("day")
# With prototype TimeKeeping, if no OB duration was specified, advance
# to the next OB with timestep equivalent to time spent on one target
if np.isinf(TK.OBduration) and (TK.missionPortion < 1):
self.arbitrary_time_advancement(
TK.currentTimeNorm.to("day").copy() - DRM["arrival_time"]
)
else: # sInd == None
sInd = old_sInd # Retain the last observed star
if (
TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]
): # currentTime is at end of OB
# Conditional Advance To Start of Next OB
if not TK.mission_is_over(
OS, Obs, det_mode
): # as long as the mission is not over
TK.advancetToStartOfNextOB() # Advance To Start of Next OB
elif waitTime is not None:
# CASE 1: Advance specific wait time
_ = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)
self.vprint("waitTime is not None")
else:
startTimes = (
TK.currentTimeAbs.copy() + np.zeros(TL.nStars) * u.d
) # Start Times of Observations
observableTimes = Obs.calculate_observableTimes(
TL,
np.arange(TL.nStars),
startTimes,
self.koMaps,
self.koTimes,
base_det_mode,
)[0]
# CASE 2 If There are no observable targets
# for the rest of the mission
if (
observableTimes[
(
TK.missionFinishAbs.copy().value * u.d
> observableTimes.value * u.d
)
* (
observableTimes.value * u.d
>= TK.currentTimeAbs.copy().value * u.d
)
].shape[0]
) == 0:
self.vprint(
(
"No Observable Targets for Remainder of mission at "
"currentTimeNorm = {}"
).format(TK.currentTimeNorm)
)
# Manually advancing time to mission end
TK.currentTimeNorm = TK.missionLife
TK.currentTimeAbs = TK.missionFinishAbs
else:
# CASE 3 nominal wait time if at least 1 target
# is still in list and observable
# TODO: ADD ADVANCE TO WHEN FZMIN OCURS
inds1 = np.arange(TL.nStars)[
observableTimes.value * u.d
> TK.currentTimeAbs.copy().value * u.d
]
# apply intTime filter
inds2 = np.intersect1d(self.intTimeFilterInds, inds1)
# apply revisit Filter #NOTE this means stars you
# added to the revisit list
inds3 = self.revisitFilter(
inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)
)
self.vprint(
"Filtering %d stars from advanceToAbsTime"
% (TL.nStars - len(inds3))
)
oTnowToEnd = observableTimes[inds3]
# there is at least one observableTime between now
# and the end of the mission
if not oTnowToEnd.value.shape[0] == 0:
# advance to that observable time
tAbs = np.min(oTnowToEnd)
else:
# advance to end of mission
tAbs = TK.missionStart + TK.missionLife
tmpcurrentTimeNorm = TK.currentTimeNorm.copy()
# Advance Time to this time OR start of next
# OB following this time
_ = TK.advanceToAbsTime(tAbs)
self.vprint(
(
"No Observable Targets a currentTimeNorm = {:.2f} "
"Advanced To currentTimeNorm = {:.2f}"
).format(
tmpcurrentTimeNorm.to("day"),
TK.currentTimeNorm.to("day"),
)
)
else:
dtsim = (time.time() - t0) * u.s
mission_end = (
"Mission complete: no more time available.\n"
+ "Simulation duration: %s.\n" % dtsim.astype("int")
+ "Results stored in SurveySimulation.DRM (Design Reference Mission)."
)
self.logger.info(mission_end)
self.vprint(mission_end)
return mission_end
def next_target(self, old_sInd, old_occ_sInd, det_modes, char_mode):
"""Finds index of next target star and calculates its integration time.
This method chooses the next target star index based on which
stars are available, their integration time, and maximum completeness.
Returns None if no target could be found.
Args:
old_sInd (integer):
Index of the previous target star for the telescope
old_occ_sInd (integer):
Index of the previous target star for the occulter
det_modes (dict array):
Selected observing mode for detection
char_mode (dict):
Selected observing mode for characterization
Returns:
tuple:
DRM (dicts):
Contains the results of survey simulation
sInd (integer):
Index of next target star. Defaults to None.
occ_sInd (integer):
Index of next occulter target star. Defaults to None.
t_det (astropy Quantity):
Selected star integration time for detection in units of day.
Defaults to None.
"""
OS = self.OpticalSystem
ZL = self.ZodiacalLight
TL = self.TargetList
Obs = self.Observatory
TK = self.TimeKeeping
SU = self.SimulatedUniverse
# Create DRM
DRM = {}
# selecting appropriate koMap
occ_koMap = self.koMaps[char_mode["syst"]["name"]]
koMap = self.koMaps[det_modes[0]["syst"]["name"]]
# In case of an occulter, initialize slew time factor
# (add transit time and reduce starshade mass)
assert OS.haveOcculter
self.ao = Obs.thrust / Obs.scMass
# Star indices that correspond with the given HIPs numbers for the occulter
# XXX ToDo: print out HIPs that don't show up in TL
HIP_sInds = np.where(np.in1d(TL.Name, self.occHIPs))[0]
if TL.earths_only:
HIP_sInds = np.union1d(HIP_sInds, self.promoted_stars).astype(int)
sInd = None
# Now, start to look for available targets
while not TK.mission_is_over(OS, Obs, det_modes[0]):
# allocate settling time + overhead time
tmpCurrentTimeAbs = TK.currentTimeAbs.copy()
occ_tmpCurrentTimeAbs = TK.currentTimeAbs.copy()
# 0 initialize arrays
slewTimes = np.zeros(TL.nStars) * u.d
# fZs = np.zeros(TL.nStars) / u.arcsec**2
# dV = np.zeros(TL.nStars) * u.m / u.s
intTimes = np.zeros(TL.nStars) * u.d
occ_intTimes = np.zeros(TL.nStars) * u.d
# tovisit = np.zeros(TL.nStars, dtype=bool)
occ_tovisit = np.zeros(TL.nStars, dtype=bool)
sInds = np.arange(TL.nStars)
# 1 Find spacecraft orbital START positions and filter out unavailable
# targets. If occulter, each target has its own START position.
sd = Obs.star_angularSep(TL, old_occ_sInd, sInds, tmpCurrentTimeAbs)
obsTimes = Obs.calculate_observableTimes(
TL, sInds, tmpCurrentTimeAbs, self.koMaps, self.koTimes, char_mode
)
slewTimes = Obs.calculate_slewTimes(
TL, old_occ_sInd, sInds, sd, obsTimes, tmpCurrentTimeAbs
)
# 2.1 filter out totTimes > integration cutoff
if len(sInds) > 0:
occ_sInds = np.intersect1d(self.occ_intTimeFilterInds, sInds)
if len(sInds) > 0:
sInds = np.intersect1d(self.intTimeFilterInds, sInds)
# Starttimes based off of slewtime
occ_startTimes = occ_tmpCurrentTimeAbs.copy() + slewTimes
startTimes = tmpCurrentTimeAbs.copy() + np.zeros(TL.nStars) * u.d
# 2.5 Filter stars not observable at startTimes
try:
tmpIndsbool = list()
for i in np.arange(len(occ_sInds)):
koTimeInd = np.where(
np.round(occ_startTimes[occ_sInds[i]].value)
- self.koTimes.value
== 0
)[0][
0
] # find indice where koTime is endTime[0]
tmpIndsbool.append(
occ_koMap[occ_sInds[i]][koTimeInd].astype(bool)
) # Is star observable at time ind
sInds_occ_ko = occ_sInds[tmpIndsbool]
occ_sInds = sInds_occ_ko[np.where(np.in1d(sInds_occ_ko, HIP_sInds))[0]]
del tmpIndsbool
except: # noqa: E722 If there are no target stars to observe
sInds_occ_ko = np.asarray([], dtype=int)
occ_sInds = np.asarray([], dtype=int)
try:
tmpIndsbool = list()
for i in np.arange(len(sInds)):
koTimeInd = np.where(
np.round(startTimes[sInds[i]].value) - self.koTimes.value == 0
)[0][
0
] # find indice where koTime is endTime[0]
tmpIndsbool.append(
koMap[sInds[i]][koTimeInd].astype(bool)
) # Is star observable at time ind
sInds = sInds[tmpIndsbool]
del tmpIndsbool
except: # noqa: E722 If there are no target stars to observe
sInds = np.asarray([], dtype=int)
# 2.9 Occulter target promotion step
occ_sInds = self.promote_coro_targets(occ_sInds, sInds_occ_ko)
# 3 Filter out all previously (more-)visited targets, unless in
# revisit list, with time within some dt of start (+- 1 week)
if len(sInds.tolist()) > 0:
sInds = self.revisitFilter(sInds, TK.currentTimeNorm.copy())
# revisit list, with time after start
if np.any(occ_sInds):
occ_tovisit[occ_sInds] = (
self.occ_starVisits[occ_sInds]
== self.occ_starVisits[occ_sInds].min()
)
if self.occ_starRevisit.size != 0:
dt_max = 1.0 * u.week # noqa: F841
dt_rev = (
TK.currentTimeNorm.copy() - self.occ_starRevisit[:, 1] * u.day
)
ind_rev = [
int(x)
for x in self.occ_starRevisit[dt_rev > 0, 0]
if x in occ_sInds
]
occ_tovisit[ind_rev] = True
occ_sInds = np.where(occ_tovisit)[0]
# 4 calculate integration times for ALL preselected targets,
# and filter out totTimes > integration cutoff
(
maxIntTimeOBendTime,
maxIntTimeExoplanetObsTime,
maxIntTimeMissionLife,
) = TK.get_ObsDetectionMaxIntTime(Obs, det_modes[0])
maxIntTime = min(
maxIntTimeOBendTime,
maxIntTimeExoplanetObsTime,
maxIntTimeMissionLife,
OS.intCutoff,
) # Maximum intTime allowed
(
maxIntTimeOBendTime,
maxIntTimeExoplanetObsTime,
maxIntTimeMissionLife,
) = TK.get_ObsDetectionMaxIntTime(Obs, char_mode)
occ_maxIntTime = min(
maxIntTimeOBendTime,
maxIntTimeExoplanetObsTime,
maxIntTimeMissionLife,
OS.intCutoff,
) # Maximum intTime allowed
if len(occ_sInds) > 0:
if self.int_inflection:
fEZ = ZL.fEZ0
WA = TL.int_WA
occ_intTimes[occ_sInds] = self.calc_int_inflection(
occ_sInds,
fEZ,
occ_startTimes,
WA[occ_sInds],
char_mode,
ischar=True,
)
totTimes = occ_intTimes * char_mode["timeMultiplier"]
occ_endTimes = occ_startTimes + totTimes
else:
# characterization_start = occ_startTimes
occ_intTimes[occ_sInds] = self.calc_targ_intTime(
occ_sInds, occ_startTimes[occ_sInds], char_mode
) * (1 + self.charMargin)
# Adjust integration time for stars with known earths around them
for occ_star in occ_sInds:
if occ_star in self.promoted_stars:
occ_earths = np.intersect1d(
np.where(SU.plan2star == occ_star)[0], self.known_earths
).astype(int)
if np.any(occ_earths):
fZ = ZL.fZ(
Obs,
TL,
occ_star,
occ_startTimes[occ_star],
char_mode,
)
fEZ = (
SU.fEZ[occ_earths].to("1/arcsec2").value
/ u.arcsec**2
)
dMag = SU.dMag[occ_earths]
WA = SU.WA[occ_earths]
earthlike_inttimes = OS.calc_intTime(
TL, occ_star, fZ, fEZ, dMag, WA, char_mode
) * (1 + self.charMargin)
earthlike_inttime = earthlike_inttimes[
(earthlike_inttimes < occ_maxIntTime)
]
if len(earthlike_inttime) > 0:
occ_intTimes[occ_star] = np.max(earthlike_inttime)
else:
occ_intTimes[occ_star] = np.max(earthlike_inttimes)
occ_endTimes = (
occ_startTimes
+ (occ_intTimes * char_mode["timeMultiplier"])
+ Obs.settlingTime
+ char_mode["syst"]["ohTime"]
)
occ_sInds = occ_sInds[
(occ_intTimes[occ_sInds] <= occ_maxIntTime)
] # Filters targets exceeding maximum intTime
occ_sInds = occ_sInds[
(occ_intTimes[occ_sInds] > 0.0 * u.d)
] # Filters with an inttime of 0
if occ_maxIntTime.value <= 0:
occ_sInds = np.asarray([], dtype=int)
if len(sInds.tolist()) > 0:
intTimes[sInds] = self.calc_targ_intTime(
sInds, startTimes[sInds], det_modes[0]
)
sInds = sInds[
np.where(
(intTimes[sInds] <= maxIntTime) & (intTimes[sInds] > 0.0 * u.d)
)
] # Filters targets exceeding end of OB
endTimes = startTimes + intTimes
if maxIntTime.value <= 0:
sInds = np.asarray([], dtype=int)
# 5.2 find spacecraft orbital END positions (for each candidate target),
# and filter out unavailable targets
if len(occ_sInds.tolist()) > 0 and Obs.checkKeepoutEnd:
try:
tmpIndsbool = list()
for i in np.arange(len(occ_sInds)):
koTimeInd = np.where(
np.round(occ_endTimes[occ_sInds[i]].value)
- self.koTimes.value
== 0
)[0][
0
] # find indice where koTime is endTime[0]
tmpIndsbool.append(
occ_koMap[occ_sInds[i]][koTimeInd].astype(bool)
) # Is star observable at time ind
occ_sInds = occ_sInds[tmpIndsbool]
del tmpIndsbool
except: # noqa: E722
occ_sInds = np.asarray([], dtype=int)
if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:
try:
tmpIndsbool = list()
for i in np.arange(len(sInds)):
koTimeInd = np.where(
np.round(endTimes[sInds[i]].value) - self.koTimes.value == 0
)[0][
0
] # find indice where koTime is endTime[0]
tmpIndsbool.append(
koMap[sInds[i]][koTimeInd].astype(bool)
) # Is star observable at time ind
sInds = sInds[tmpIndsbool]
del tmpIndsbool
except: # noqa: E722
sInds = np.asarray([], dtype=int)
# 5.3 Filter off current occulter target star from detection list
if old_occ_sInd is not None:
sInds = sInds[np.where(sInds != old_occ_sInd)[0]]
occ_sInds = occ_sInds[np.where(occ_sInds != old_occ_sInd)[0]]
# 6.1 Filter off any stars visited by the occulter 3 or more times
if np.any(occ_sInds):
occ_sInds = occ_sInds[
np.where(self.occ_starVisits[occ_sInds] < self.occ_max_visits)[0]
]
# 6.2 Filter off coronograph stars with > 3 visits and no detections
no_dets = np.logical_and(
(self.starVisits[sInds] > self.n_det_remove),
(self.sInd_detcounts[sInds] == 0),
)
sInds = sInds[np.where(np.invert(no_dets))[0]]
max_dets = np.where(self.sInd_detcounts[sInds] < self.max_successful_dets)[
0
]
sInds = sInds[max_dets]
# 7 Filter off cornograph stars with too-long inttimes
available_time = None
if self.occ_arrives > TK.currentTimeAbs:
available_time = self.occ_arrives - TK.currentTimeAbs.copy()
if np.any(sInds[intTimes[sInds] < available_time]):
sInds = sInds[intTimes[sInds] < available_time]
# 8 remove occ targets on ignore_stars list
occ_sInds = np.setdiff1d(
occ_sInds, np.intersect1d(occ_sInds, self.ignore_stars)
)
t_det = 0 * u.d
det_mode = copy.deepcopy(det_modes[0])
occ_sInd = old_occ_sInd
# 8 Choose best target from remaining
# if the starshade has arrived at its destination,
# or it is the first observation
if np.any(occ_sInds):
if old_occ_sInd is None or (
(TK.currentTimeAbs.copy() + t_det) >= self.occ_arrives
and self.ready_to_update
):
occ_sInd = self.choose_next_occulter_target(
old_occ_sInd, occ_sInds, occ_intTimes
)
if old_occ_sInd is None:
self.occ_arrives = TK.currentTimeAbs.copy()
else:
self.occ_arrives = occ_startTimes[occ_sInd]
self.occ_slewTime = slewTimes[occ_sInd]
self.occ_sd = sd[occ_sInd]
# if not np.any(sInds):
# sInd = occ_sInd
self.ready_to_update = False
# self.occ_starVisits[occ_sInd] += 1
elif not np.any(sInds):
TK.advanceToAbsTime(TK.currentTimeAbs.copy() + 1 * u.d)
continue
if occ_sInd is not None:
sInds = sInds[np.where(sInds != occ_sInd)[0]]
if np.any(sInds):
# choose sInd of next target
sInd = self.choose_next_telescope_target(
old_sInd, sInds, intTimes[sInds]
)
# Perform dual band detections if necessary
if (
TL.int_WA[sInd] > det_modes[1]["IWA"]
and TL.int_WA[sInd] < det_modes[1]["OWA"]
):
det_mode["BW"] = det_mode["BW"] + det_modes[1]["BW"]
det_mode["inst"]["sread"] = (
det_mode["inst"]["sread"] + det_modes[1]["inst"]["sread"]
)
det_mode["inst"]["idark"] = (
det_mode["inst"]["idark"] + det_modes[1]["inst"]["idark"]
)
det_mode["inst"]["CIC"] = (
det_mode["inst"]["CIC"] + det_modes[1]["inst"]["CIC"]
)
det_mode["syst"]["optics"] = np.mean(
(det_mode["syst"]["optics"], det_modes[1]["syst"]["optics"])
)
det_mode["instName"] = "combined"
t_det = self.calc_targ_intTime(
np.array([sInd]), np.array([startTimes[sInd]]), det_mode
)[0]
if t_det > maxIntTime and maxIntTime > 0 * u.d:
t_det = maxIntTime
if available_time is not None and available_time > 0 * u.d:
if t_det > available_time:
t_det = available_time.copy().value * u.d
else:
sInd = None
# if no observable target, call the TimeKeeping.wait() method
if not np.any(sInds) and not np.any(occ_sInds):
self.vprint(
"No Observable Targets at currentTimeNorm= "
+ str(TK.currentTimeNorm.copy())
)
return DRM, None, None, None, None, None, None
break
else:
self.logger.info("Mission complete: no more time available")
self.vprint("Mission complete: no more time available")
return DRM, None, None, None, None, None, None
if TK.mission_is_over(OS, Obs, det_mode):
self.logger.info("Mission complete: no more time available")
self.vprint("Mission complete: no more time available")
return DRM, None, None, None, None, None, None
occ_earths = np.intersect1d(
np.where(SU.plan2star == occ_sInd)[0], self.known_earths
).astype(int)
return DRM, sInd, occ_sInd, t_det, sd, occ_sInds, det_mode
| {
"content_hash": "fa39be91f7c6d127fba59e38708c6778",
"timestamp": "",
"source": "github",
"line_count": 889,
"max_line_length": 88,
"avg_line_length": 44.503937007874015,
"alnum_prop": 0.4523809523809524,
"repo_name": "dsavransky/EXOSIMS",
"id": "02d53b1382caf85e2e1835e1df32524d69c3448e",
"size": "39564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EXOSIMS/SurveySimulation/tieredScheduler_DD_SLSQP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8087"
},
{
"name": "Cython",
"bytes": "2459"
},
{
"name": "Python",
"bytes": "2936469"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.conf.urls import patterns, include, url
from django.core.urlresolvers import reverse_lazy
urlpatterns = patterns(
'',
url(r'^login/$', 'authenticateapp.views.log_in', name='log_in'),
url(
r'^logout/$',
'django.contrib.auth.views.logout',
{'next_page': reverse_lazy('log_in')},
name='logout'),
url(r'^register/$',
'authenticateapp.views.register',
name='register'
),
)
| {
"content_hash": "9ccbc4ba075c1fa0bd2a48a552d54884",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 68,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.6163265306122448,
"repo_name": "panioglovadim/first",
"id": "5178554b305a98a2bfc45805ab063f5e374ac1f7",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/authenticateapp/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7789"
},
{
"name": "Python",
"bytes": "8878"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.