text stringlengths 4 1.02M | meta dict |
|---|---|
"""Tests for image_searcher_dataloader."""
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core.data_util import image_searcher_dataloader
from tensorflow_examples.lite.model_maker.core.data_util import metadata_loader
from tensorflow_examples.lite.model_maker.core import test_util
class ImageSearcherDataloaderTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
self.tflite_path = test_util.get_test_data_path(
"mobilenet_v2_035_96_embedder_with_metadata.tflite")
self.image_dir1 = test_util.get_test_data_path("food")
self.image_dir2 = test_util.get_test_data_path("animals")
@parameterized.parameters(
(False, 1.335398),
(True, 0.0494329),
)
def test_from_folder(self, l2_normalize, expected_value):
data_loader = image_searcher_dataloader.DataLoader.create(
self.tflite_path, l2_normalize=l2_normalize)
data_loader.load_from_folder(self.image_dir1)
data_loader.load_from_folder(self.image_dir2)
self.assertLen(data_loader, 3)
self.assertEqual(data_loader.dataset.shape, (3, 1280))
self.assertAlmostEqual(data_loader.dataset[0][0], expected_value, places=6)
# The order of file may be different.
self.assertEqual(set(data_loader.metadata),
set(["burger", "sparrow", "cats_and_dogs"]))
def test_from_folder_binary_metadata(self,):
image_dir = test_util.get_test_data_path("images_with_binary_metadata")
data_loader = image_searcher_dataloader.DataLoader.create(
self.tflite_path,
metadata_type=metadata_loader.MetadataType.FROM_DAT_FILE)
# Loads from binary metadata.
data_loader.load_from_folder(image_dir, mode="rb")
self.assertLen(data_loader, 2)
self.assertEqual(data_loader.dataset.shape, (2, 1280))
# The order of file may be different.
self.assertEqual(set(data_loader.metadata), set([b"\x11\x33", b"\x00\x44"]))
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "a9158b216b725c966dcba8131ea1ce3e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 89,
"avg_line_length": 38.82692307692308,
"alnum_prop": 0.7097573055968301,
"repo_name": "tensorflow/examples",
"id": "05adc43f95222cce0f6c2243ea970df15a6ba706",
"size": "2627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_examples/lite/model_maker/core/data_util/image_searcher_dataloader_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "106227"
},
{
"name": "CMake",
"bytes": "1553"
},
{
"name": "CSS",
"bytes": "4746"
},
{
"name": "Dockerfile",
"bytes": "467"
},
{
"name": "HTML",
"bytes": "12491"
},
{
"name": "Java",
"bytes": "305092"
},
{
"name": "JavaScript",
"bytes": "24461"
},
{
"name": "Jupyter Notebook",
"bytes": "1733035"
},
{
"name": "Kotlin",
"bytes": "631463"
},
{
"name": "Objective-C",
"bytes": "14639"
},
{
"name": "Objective-C++",
"bytes": "14293"
},
{
"name": "Python",
"bytes": "1232357"
},
{
"name": "Ruby",
"bytes": "3744"
},
{
"name": "Shell",
"bytes": "41573"
},
{
"name": "Starlark",
"bytes": "17498"
},
{
"name": "Swift",
"bytes": "553535"
}
],
"symlink_target": ""
} |
import datetime
# A UTC class, see:
# http://docs.python.org/2/library/datetime.html#tzinfo-objects
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
def __unicode__(self):
return "UTC"
def json_to_datetime(date):
"""Tranforms a JSON datetime string into a timezone aware datetime
object with a UTC tzinfo object.
:param date: The datetime representation.
:type date: :obj:`string`
:returns: A timezone aware datetime object.
:rtype: :class:`datetime.datetime`
"""
return datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ").replace(
tzinfo=UTC()
)
| {
"content_hash": "fdb36693eb3f7b6a5a47ee621245cfe5",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 21.95,
"alnum_prop": 0.6116173120728929,
"repo_name": "voxpupuli/pypuppetdb",
"id": "6828105856183f162f1b97742f50943526989f28",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypuppetdb/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "194970"
}
],
"symlink_target": ""
} |
"""
Note: Legacy module which is not actually used any more,
but which is potentially useful for future reference.
Offers theano-implementations of the distance functions used,
for calculating the pairwise distances between two vectors.
"""
import theano
import theano.tensor as T
def get_manhattan_fn():
a = T.fvector('a')
b = T.fvector('b')
idxs = T.ivector('idxs')
distance = T.sum(T.abs_(a[[idxs]] - b[[idxs]]), axis=1)
return theano.function([a, b, idxs], distance, allow_input_downcast=True)
def get_euclidean_fn():
a = T.fvector('a')
b = T.fvector('b')
idxs = T.ivector('idxs')
distance = T.sqrt(T.sum(T.sqr(a[[idxs]] - b[[idxs]]), axis=1))
return theano.function([a, b, idxs], distance, allow_input_downcast=True)
def get_minmax_fn():
a = T.fvector('a')
b = T.fvector('b')
idxs = T.ivector('idxs')
comb = T.stack([a[[idxs]], b[[idxs]]], axis=1)
distance = 1.0 - ( T.sum(T.min(comb, axis=1)) / (T.sum(T.max(comb, axis=1)) + 1e-6) )
return theano.function(inputs=[a, b, idxs], outputs=distance, allow_input_downcast=True)
| {
"content_hash": "69bbf5c6f4b26dce100b72cc497c20ff",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 92,
"avg_line_length": 33.27272727272727,
"alnum_prop": 0.6402550091074681,
"repo_name": "mikekestemont/ruzicka",
"id": "0e3c41f84d1737fc8f59a64d9f0ac161687c6a45",
"size": "1145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/ruzicka/tensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "33992"
},
{
"name": "Python",
"bytes": "219727"
}
],
"symlink_target": ""
} |
__author__ = 'matthewpang'
import serial
import time
import pickle
import struct
serStepper = serial.Serial('/dev/cu.usbmodem14231', 230400)
time.sleep(3)
def output_encoder(value):
"""
Takes a 16 bit integer value, packs it little endian, then encapsulates it in the defined format
[0xAA,LSByte,MSByte,OxFF]
"""
b = struct.pack('<H', value)
output = bytearray(4)
output[0] = 0xAA
output[1] = b[0]
output[2] = b[1]
output[3] = 0xFF
return output
def output_decoder(array):
"""
Takes a little endian byte array of format [0xAA,LSByte,MSByte,OxFF]
and returns the corresponding 16 bit integer value
"""
if len(array) != 4: #If the packet length is correct, otherwise return None
return None
if (array[0] == 0xAA) and (array[3] == 0XFF) and len(array) == 4: #Check that the packet has the correct start and end frame
a = array[2] << 8 | array[1]
return int(a)
def serial_send(value):
"""
Accepts a 16 bit unsigned int , encodes it and sends it, returns the bytearray that was sent
"""
frame = output_encoder(value)
serStepper.write(frame)
print(str(value))
return frame
def serial_receive():
"""
Waits up to 5 seconds for a response after being called, decodes the byte array and returns a 16 bit unsigned int
"""
timeout = time.time() + 5
while (serStepper.in_waiting <= 3) and (time.time() < timeout): # Wait until correct number of packets, timeout if waiting too long
time.sleep(0.0001)
else:
serial_read = (serStepper.read(serStepper.in_waiting))
val = output_decoder(serial_read)
print(str(val))
return val
def arrival_wait():
"""
Waits the get the arrival confirmation message 0xFF00 .
Clears the buffers for cleanliness
"""
timeout = time.time() + 600
while (serial_receive() != 0xFF00) and (time.time() <= timeout):
time.sleep(0.0001)
serStepper.reset_input_buffer()
serStepper.reset_output_buffer()
def go(pos=0):
"""
Accepts a position and sends it serially.
Waits for arrival confirmation
#Optionally times the difference between instruction and respose - uncomment.
"""
sent = time.time()
serial_send(pos)
arrival_wait()
received = time.time()
print(str(received - sent))
go(0x0000) | {
"content_hash": "ee1926088c46a29334d40318ec4b9681",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 135,
"avg_line_length": 28.481927710843372,
"alnum_prop": 0.6484771573604061,
"repo_name": "matthewpang/stretch_tester",
"id": "77dbb43a96512aa0ad014d7b216a5f23d031e456",
"size": "2364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Serial Testing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2891"
},
{
"name": "Jupyter Notebook",
"bytes": "4578"
},
{
"name": "Python",
"bytes": "11105"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColumnValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="column", parent_name="parcats.domain", **kwargs):
super(ColumnValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "5afde383189210092fbefe769e1d96d2",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6068181818181818,
"repo_name": "plotly/plotly.py",
"id": "0f0090ca352af2d77a9b03f25e6eb151bd275c23",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/parcats/domain/_column.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Fake clock for unit test injection."""
__author__ = 'Matthew.H.Frantz@gmail.com (Matt Frantz)'
import datetime
class FakeNow(object):
"""Fake clock for unit test injection as datetime.datetime.now.
Attributes:
now: Next value to return (datetime.datetime)
increment: Amount to increment each call (datetime.timedelta)
"""
DEFAULT_NOW = datetime.datetime(1970, 1, 1, 0, 0, 0)
DEFAULT_INCREMENT = datetime.timedelta(seconds=1)
def __init__(self, initial_value=DEFAULT_NOW, increment=DEFAULT_INCREMENT):
"""Initializes a FakeNow object.
Args:
initial_value: First value to return (datetime.datetime)
increment: Amount to increment each call (datetime.timedelta)
"""
self.now = initial_value
self.increment = increment
def __call__(self):
"""Returns the current clock value.
Returns:
datetime.datetime
Postconditions:
Next call will return current value plus increment.
"""
now = self.now
self.now += self.increment
return now
| {
"content_hash": "db9419ddf49d7644dfa7f5a765ae6b59",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 25.775,
"alnum_prop": 0.6828322017458778,
"repo_name": "mhfrantz/alertfeed",
"id": "4cb4b71cb20ce7e1e53b5aea3cee48f1f3c72c0d",
"size": "1630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fake_clock.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1523"
},
{
"name": "HTML",
"bytes": "16653"
},
{
"name": "JavaScript",
"bytes": "15566"
},
{
"name": "Python",
"bytes": "280386"
},
{
"name": "Shell",
"bytes": "173"
}
],
"symlink_target": ""
} |
from fabric import api as fab
from ..base import Debian, Ubuntu
from ..deployment import _
from ..project import LogRotate
from ..project.cron import CronTab, BCronTab
from ..database import Maria52Database
from .base import Provider
class NetangelsShared(Provider):
shared = True
def __init__(self):
super(NetangelsShared, self).__init__()
fab.env.os = Debian()
fab.env.remote_dir = _("/home/%(user)s/%(instance_name)s/")
fab.env.cron = BCronTab()
class Lite(NetangelsShared):
pass
class Standard(NetangelsShared):
pass
class Professional(NetangelsShared):
pass
class NetangelsVDS(Provider):
def __init__(self):
super(NetangelsVDS, self).__init__()
fab.env.os = Debian()
fab.env.remote_dir = _("/home/%(user)s/")
fab.env.db = Maria52Database()
fab.env.cron = CronTab()
fab.env.logrotate = LogRotate()
class VDS512(NetangelsVDS):
pass
VDS2 = VDS512
class NetangelsCloudVDS(Provider):
def __init__(self):
super(NetangelsCloudVDS, self).__init__()
fab.env.os = Ubuntu()
fab.env.remote_dir = _("/home/%(user)s/")
fab.env.cron = CronTab()
fab.env.logrotate = LogRotate()
# TODO
#self.can_sudo = False
class Medium(NetangelsCloudVDS):
# 4 Core, 2gb RAM
pass
| {
"content_hash": "4b4f70e26381858add2eb6a84aaa8d49",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 67,
"avg_line_length": 18.616438356164384,
"alnum_prop": 0.6195732155997057,
"repo_name": "suvit/speedydeploy",
"id": "30f609fc16f62a76cf95051f15f8385dfd39cf47",
"size": "1359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "speedydeploy/providers/netangels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1110"
},
{
"name": "Nginx",
"bytes": "1900"
},
{
"name": "Python",
"bytes": "90835"
},
{
"name": "Shell",
"bytes": "10296"
}
],
"symlink_target": ""
} |
"""
Created by: Lee Bergstrand (2018)
Description: The genome property tree class.
"""
import json
import csv
import pandas as pd
from pygenprop.genome_property import GenomeProperty
class GenomePropertiesTree(object):
"""
This class contains a representation of a set of nested genome properties. Internally, the instantiated
object contains a rooted DAG of genome properties connected from root to leaf (parent to child). A dictionary is
also included which points to each tree node for fast lookups by genome property identifier.
"""
def __init__(self, *genome_properties):
"""
When the object is created create a dictionary and connect the nodes to each other to form the polytree.
:param genome_properties: One or more genome property objects.
"""
self.genome_properties_dictionary = {}
for new_property in genome_properties:
new_property.tree = self
self.genome_properties_dictionary[new_property.id] = new_property
self.build_genome_property_connections()
@property
def root(self):
"""
Gets the top level genome properties object in a genome properties tree.
:return: The root genome property of the genome properties tree.
"""
genome_property = next(iter(self.genome_properties_dictionary.values()))
while True:
if genome_property.parents:
genome_property = genome_property.parents[0]
else:
break
return genome_property
@property
def leafs(self):
"""
Returns the leaf nodes of the rooted DAG.
:return: A list of all genome property objects with no children.
"""
for genome_property in self:
if not genome_property.children:
yield genome_property
def build_genome_property_connections(self):
"""
Build connections between parent-child genome properties in the dictionary. This creates the rooted DAG.
"""
for genome_property in self:
child_identifiers = genome_property.child_genome_property_identifiers
for identifier in child_identifiers:
child_genome_property = self[identifier]
if child_genome_property:
genome_property.children.append(child_genome_property)
child_genome_property.parents.append(genome_property)
def to_json(self, nodes_and_links=False):
"""
Converts the object to a JSON representation.
:param nodes_and_links: If True, returns the JSON in node and link format.
:return: A JSON formatted string representing the genome property tree.
"""
if nodes_and_links:
nodes = self.create_graph_nodes_json(as_list=True)
links = self.create_graph_links_json(as_list=True)
final_json = json.dumps({'nodes': nodes, 'links': links})
else:
final_json = self.create_nested_json()
return final_json
def create_nested_json(self, current_property=None, as_dict=False):
"""
Converts the object to a nested JSON representation.
:param current_property: The current root genome property (for recursion)
:param as_dict: Returns Return a dictionary for incorporation into other json objects.
:return: A JSON formatted string or dictionary representing the object.
"""
if current_property:
root_genome_property = current_property
else:
root_genome_property = self.root
root_json = root_genome_property.to_json(as_dict=True)
child_jsons = []
for child in root_genome_property.children:
child_json = self.create_nested_json(child, as_dict=True)
child_jsons.append(child_json)
root_json['children'] = child_jsons
if as_dict:
output = root_json
else:
output = json.dumps(root_json)
return output
def create_graph_nodes_json(self, as_list=False):
"""
Creates a JSON representation of a genome property dictionary.
:param as_list: Return as a list instead of a JSON formatted string.
:return: A JSON formatted string of a list of each properties JSON representation.
"""
nodes = []
for genome_property in self:
genome_property_dict = genome_property.to_json(as_dict=True)
nodes.append(genome_property_dict)
if as_list:
output = nodes
else:
output = json.dumps(nodes)
return output
def create_graph_links_json(self, as_list=False):
"""
Creates a JSON representation of a genome property links.
:param as_list: Return as a list instead of a JSON formatted string.
:return: A JSON formatted string of a list of each properties JSON representation.
"""
links = []
for genome_property in self:
if genome_property.parents:
for parent in genome_property.parents:
link = {'parent': parent.id, 'child': genome_property.id}
links.append(link)
if as_list:
output = links
else:
output = json.dumps(links)
return output
@property
def genome_property_identifiers(self):
"""
The identifiers all genome properties in the database.
:return: A set of all genome property identifiers.
"""
return set(genome_property.id for genome_property in self)
@property
def consortium_identifiers(self):
"""
All InterPro consortium signature identifiers (PFAM, TIGRFAM, etc.) used by the genome properties database.
:return: A set of all unique consortium identifiers used in genome properties.
"""
return self.get_evidence_identifiers(consortium=True)
@property
def consortium_identifiers_dataframe(self):
"""
All InterPro consortium signature identifiers (PFAM, TIGRFAM, etc.) used by the genome properties database.
:return: A pandas dataframe.
"""
consortium_mapping = []
for genome_property in self:
for step in genome_property.steps:
for identifier in step.consortium_identifiers:
consortium_mapping.append((genome_property.id, step.number, identifier))
consortium_dataframe = pd.DataFrame(data=consortium_mapping, columns=['Property_Identifier', 'Step_Number',
'Signature_Accession'])
consortium_dataframe.set_index(['Property_Identifier', 'Step_Number'], inplace=True)
return consortium_dataframe
@property
def interpro_identifiers(self):
"""
All global InterPro identifiers (IPRXXXX, etc.) used by the genome properties database.
:return: A set of all unique InterPro identifiers used in genome properties.
"""
return self.get_evidence_identifiers()
def get_evidence_identifiers(self, consortium=False):
"""
Gets evidence identifiers from all genome properties in the database.
:param consortium: If true, list the consortium signature identifiers (PFAM, TIGRFAM)
:return: A set of all unique evidence identifiers used in genome properties.
"""
global_identifiers = []
for genome_property in self:
for step in genome_property.steps:
if consortium:
global_identifiers.extend(step.consortium_identifiers)
else:
global_identifiers.extend(step.interpro_identifiers)
return set(global_identifiers)
def create_metabolism_database_mapping_file(self, file_handle):
"""
Writes a mapping file which maps each genome property to KEGG and MetaCyc.
:param file_handle: A python file handle object.
"""
mapping_data = []
for genome_property in self:
for database in genome_property.databases:
if database.database_name in ['MetaCyc', 'KEGG']:
row = (genome_property.id,
genome_property.name,
database.database_name,
database.record_title,
database.record_ids[0])
sanitized_row = [data_point.replace(',', '') for data_point in row]
mapping_data.append(sanitized_row)
csv.writer(file_handle).writerows(mapping_data)
def __getitem__(self, item) -> GenomeProperty:
return self.genome_properties_dictionary.get(item)
def __len__(self):
return len(self.genome_properties_dictionary)
def __iter__(self) -> GenomeProperty:
for genome_property in self.genome_properties_dictionary.values():
yield genome_property
def __contains__(self, item):
return True if item in self.genome_properties_dictionary else False
def __repr__(self):
repr_data = []
for genome_property in self:
repr_data.append(str(genome_property))
return '\n'.join(repr_data)
def __str__(self):
"""
Prints a human readable summary for all properties in a genome properties dictionary.
"""
for genome_property in self:
parent_ids = [parent.id for parent in genome_property.parents]
child_ids = [child.id for child in genome_property.children]
if not parent_ids:
parent_ids = "[ No Parent Genome Properties ]"
if not child_ids:
child_ids = "[ No Child Properties ]"
print(
"\n" + genome_property.id + " (" + genome_property.name + ")" + " Type: [" +
genome_property.type + "]" + " Parents: " + str(parent_ids) + " Children: " + str(child_ids))
print(
'=====================================================================================================')
for step in genome_property.steps:
print(str(step) + "\n")
| {
"content_hash": "7617c6e853124ac27bf28b771cb3b6b9",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 120,
"avg_line_length": 36.33098591549296,
"alnum_prop": 0.6008916456677651,
"repo_name": "LeeBergstrand/pygenprop",
"id": "c06de9dad80051107803fa106ee54b0a1e7884f9",
"size": "10341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygenprop/tree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36614"
}
],
"symlink_target": ""
} |
import base64
import json
import os
import shutil
import urllib2
from oslo_log import log as logging
from oslo_config import cfg
from bork_api.clients import chef_client, puppet_client, murano_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class LocalStorage:
def __init__(self, path=None):
path = path or CONF.clients_storage.local_path
self.path = os.path.abspath(path)
def list_users(self):
"""
:return: list of all users in the storage path
"""
users = []
for us in os.listdir(self.path):
us = os.path.join(self.path, us)
users.append(us)
return users
def list_cookbooks(self, user):
"""
:return: list of all cookbooks in the storage path
"""
valid = []
for cb in os.listdir(os.path.join(self.path, user)):
cb = os.path.join(self.path, cb)
system = self.find_system(cb)
if system:
valid.append(cb)
return valid
def list_recipes(self, cb):
"""
Lists available recipes in given cookbook
:param cb: cookbook path
:return: list of recipes
"""
valid = []
system = self.find_system(cb)
if system == "chef":
valid = chef_client.list_recipes(cb)
elif system == "puppet":
valid = puppet_client.list_classes(cb)
elif system == "murano":
valid == murano_client.list_specs(cb)
return valid
@staticmethod
def find_system(cb_path):
"""
Discovers cookbook format from a given path
:param cb_path: path to the cookbook
:return: system type
"""
system = None
if puppet_client.check_puppet_module(cb_path):
system = "puppet"
elif chef_client.check_chef_cookbook(cb_path):
system = "chef"
elif murano_client.check_murano_blueprint(cb_path):
system = "murano"
return system
def reset(self):
if os.path.exists(self.path):
shutil.rmtree(self.path)
def download(self, url):
"""
Download cookbook from a given url
:param url: repository to download from
:return: local temp path
"""
# expected path: r"https://api.github.com/repos/user/project/contents/subpath1/subpath2"
BASE_API_URL = r"https://api.github.com/repos/"
repo_path = [x for x in url.split("/") if len(x) > 0]
repo_user = repo_path[2]
repo_project = repo_path[3]
repo_subpath = "/".join(repo_path[repo_path.index("tree") + 2:])
url = BASE_API_URL + "/".join((repo_user, repo_project, "contents", repo_subpath))
local_path = os.path.join(self.path, url.split("/")[-1])
LOG.info("Downloading from %s to %s" % (url, local_path))
if os.path.exists(local_path):
shutil.rmtree(local_path)
resp = None, None
def write_file(item, dir_name):
name = item['name']
try:
res = urllib2.urlopen(item['url']).read()
coded_string = json.loads(res)['content']
with open(os.path.join(dir_name, name), 'w') as f:
f.write(base64.b64decode(coded_string))
except urllib2.HTTPError as e:
LOG.error("File error %s" % e)
def write_files(url, dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
try:
data = urllib2.urlopen(url).read()
except urllib2.HTTPError as e:
LOG.error("Directory error %s" % e)
return True
git_dir = json.loads(data)
for item in git_dir:
if item['type'] == 'file':
write_file(item, dir_name)
elif item['type'] == 'dir':
write_files(item['url'], dir_name=os.path.join(dir_name, item['name']))
return False
error = write_files(url, local_path)
if not error:
resp = url.split("/")[-1], local_path
return resp | {
"content_hash": "0aeda735df0c9dde1b2f10a7f47cf06d",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 96,
"avg_line_length": 32.207692307692305,
"alnum_prop": 0.5419154525913542,
"repo_name": "Fiware/ops.Validator",
"id": "5142ce668ca06a227a9b4e84233f6ad945e1dcd9",
"size": "4766",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "validator_api/bork_api/clients/storage_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "281"
},
{
"name": "CSS",
"bytes": "7529"
},
{
"name": "Dockerfile",
"bytes": "12322"
},
{
"name": "HTML",
"bytes": "3863"
},
{
"name": "JavaScript",
"bytes": "345113"
},
{
"name": "Python",
"bytes": "291979"
},
{
"name": "Ruby",
"bytes": "10189"
},
{
"name": "Shell",
"bytes": "5963"
}
],
"symlink_target": ""
} |
import unittest
from komoo import create_app
from settings import Testing
class KomooInstanceTest(unittest.TestCase):
def test_instantiate_app(self):
komoo_app = create_app(Testing)
self.assertTrue(komoo_app)
self.assertTrue(hasattr(komoo_app, 'run'))
app = komoo_app.test_client()
self.assertTrue(app)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "30b3f052985b470507567045970e1089",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 24.875,
"alnum_prop": 0.6633165829145728,
"repo_name": "it3s/komoo",
"id": "25f6d0c78f3b14a74ac2a153af6a4e32312e0a63",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/komoo_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "626"
},
{
"name": "JavaScript",
"bytes": "54247"
},
{
"name": "Python",
"bytes": "31009"
},
{
"name": "Shell",
"bytes": "240"
}
],
"symlink_target": ""
} |
""" Module for building specdb files related to FRB spectra
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
from IPython import embed
import h5py
import glob
from pkg_resources import resource_filename
from astropy.coordinates import SkyCoord
from astropy.coordinates import match_coordinates_sky
from astropy import units
from astropy.table import Table, vstack
from specdb import defs
from specdb.build import privatedb as pbuild
from specdb.build import utils as spbu
from frb.surveys import sdss
# Globals
all_instruments = ['SDSS', 'FORS2', 'MUSE', 'KCWI', 'MagE', 'GMOS-S',
'LRISb', 'LRISr', 'DEIMOS', 'XSHOOTER', 'Goodman',
'GMOS-N']
db_path = os.getenv('FRB_GDB')
public_refs = ['Prochaska2019', 'Bannister2019', 'Bhandari2019',
'Heintz2020', 'Simha2020', 'Tendulkar2017',
'rajwade2022']
def grab_files(all_files, refs_list, instrument):
"""
Simple method to parse a set of files for a given an instrument
Args:
all_files (list):
Complete list of files
refs_list (list):
List of references of these files
instrument (str):
Instrument name to parse on
Returns:
list, list: List of files and their references matching the input instrument
"""
# Setup
base_files = [os.path.basename(ifile) for ifile in all_files]
file_subset = []
ref_subset = []
# Simple loop
for kk, ifile in enumerate(base_files):
if instrument in ifile:
file_subset.append(all_files[kk])
ref_subset.append(refs_list[kk])
# Return
return file_subset, ref_subset
def load_z_tables(path):
"""
Load up a redshift table from the Galaxy_DB
Redshift tables are those that begin with 'z'
Args:
path (str):
Path to the folder holding one or more redshift tables.
Returns:
astropy.table.Table: Redshift table with RA, DEC, ZEM, ..
"""
z_files = glob.glob(os.path.join(path, 'z*'))
z_tbl = Table()
for z_file in z_files:
# Load
if False:
pass
else:
itbl = Table.read(z_file, format='ascii.fixed_width')
# Add RA, DEC
if 'RA' in itbl.keys():
pass
elif 'JCOORD' in itbl.keys():
coords = SkyCoord(itbl['JCOORD'], unit=(units.hourangle, units.deg))
itbl['RA'] = coords.ra.value
itbl['DEC'] = coords.dec.value
# Append
z_tbl = vstack([z_tbl, itbl])
# Return
return z_tbl
def sdss_redshifts():
"""
Enter the directory and build a redshift table
based on the spectra present
Returns:
"""
embed(header='THIS NEEDS HELP')
#
all_folders = glob.glob(db_path+'/SDSS/*')
for folder in all_folders:
Jnames = []
# Grab the list of spectra files
spec_files = glob.glob(os.path.join(folder, 'J*_spec.fits'))
# Generate the name list
Jnames += [os.path.basename(ifile).split('_')[0] for ifile in spec_files]
# Coords
coords = SkyCoord(Jnames, unit=(units.hourangle, units.deg)) # from DES
# Setup
done = np.zeros_like(coords.ra.value, dtype=bool)
zs = np.zeros_like(coords.ra.value)
# Loop me
while np.any(~done):
# Grab the first not done
i0 = np.where(~done)[0][0]
coord = coords[i0]
# Grab the SDSS data
sdssSurvey = sdss.SDSS_Survey(coord, 10*units.arcmin)
#
sub_coords = coords[~done]
sep = coord.separation(sub_coords)
doidx = np.where(sep < 10*units.arcmin)[0]
dothem = coords[doidx]
# Now match
catalog = sdssSurvey.get_catalog()
sdss_coords = SkyCoord(ra=catalog['ra'], dec=catalog['dec'], unit='deg')
idx, d2d, d3d = match_coordinates_sky(dothem, sdss_coords, nthneighbor=1)
# Fill
zs[doidx] = catalog['z_spec'][idx]
done[np.where(dothem)] = True
# Write the catalog
tbl = Table()
tbl['RA'] = coords.ra.value
tbl['DEC'] = coords.dec.value
tbl['ZEM'] = zs
tbl['ZEM_SOURCE'] = 'SDSS'
tbl['ZQ'] = 4
tbl.write(os.path.join(folder, 'z_SDSS.ascii'), overwrite=True, format='ascii.fixed_width')
def generate_by_refs(input_refs, outfile, version):
"""
Build a specDB file according to the input references
Args:
input_refs (list):
List of references from which to build the specDB
outfile (str):
Output filename
version (str):
Version number
"""
# Not elegant but it works
all_folders = glob.glob(db_path+'/*/*')
all_refs = [os.path.basename(ifolder) for ifolder in all_folders]
# z_tbl
allz_tbl = Table()
# Loop in input refs
all_spec_files = []
refs_list = []
for ref in input_refs:
idx = all_refs.index(ref)
# Redshift tables
z_tbl = load_z_tables(all_folders[idx])
allz_tbl = vstack([allz_tbl, z_tbl])
# Grab the list of spectra
specs = glob.glob(os.path.join(all_folders[idx], 'J*_spec.fits'))
if len(specs) == 0:
continue
# Save
all_spec_files += specs
refs_list += [ref]*len(specs)
# Get it started
# HDF5 file
hdf = h5py.File(outfile, 'w')
# Defs
zpri = defs.z_priority()
# Main DB Table
id_key = 'FRB_ID'
maindb, tkeys = spbu.start_maindb(id_key)
tkeys += ['ZQ']
gdict = {}
# Loop on Instruments
#pair_groups = ['MUSE']
pair_groups = []
badf = None
for instr in all_instruments:
print("Working on {}".format(instr))
fits_files, irefs = grab_files(all_spec_files, refs_list, instr)
if len(fits_files) == 0:
continue
# Option dicts
mwargs = {}
mwargs['toler'] = 1.0 * units.arcsec # Require an
skipz = False
swargs = {}
# Meta
parse_head, mdict, fname = None, None, True
if instr == 'SDSS':
mdict = dict(DISPERSER='BOTH', R=2000., TELESCOPE='SDSS 2.5-M', INSTR='SDSS')
parse_head = {'DATE-OBS': 'MJD'}
maxpix = 4000
scale = 1e-17
elif instr == 'FORS2':
mdict = dict(TELESCOPE='VLT', INSTR='FORS2')
parse_head = {'DATE-OBS': 'MJD', 'DISPERSER': 'DISPNAME', 'R': True}
maxpix = 2050
scale = 1e-17
elif instr == 'MUSE':
mdict = dict(TELESCOPE='VLT', R=2000.)
parse_head = {'DATE-OBS': 'MJD-OBS', 'DISPERSER': 'DISPNAME', 'INSTR': 'INSTRUME'}
maxpix = 4000
scale = 1e-20
elif instr == 'KCWI':
mdict = dict(TELESCOPE='Keck-2')
parse_head = {'DATE-OBS': 'MJD', 'DISPERSER': 'DISPNAME', 'INSTR': 'INSTRUME', 'R': True}
maxpix = 4000
scale = 1e-17
elif instr == 'MagE':
parse_head = {'R': True, 'DATE-OBS': 'MJD-OBS', 'TELESCOPE': 'TELESCOP',
'INSTR': 'INSTRUME', 'DISPERSER': 'DISPNAME'}
maxpix = 18000
scale = 1e-17
elif instr == 'GMOS-S':
mdict = dict(TELESCOPE='Gemini-S', INSTR='GMOS-S')
parse_head = {'R': True, 'DATE-OBS': 'MJD-OBS', 'DISPERSER': 'DISPNAME'}
maxpix = 3500
scale = 1e-17
elif instr == 'LRISb':
mdict = dict(TELESCOPE='Keck-1')
parse_head = {'DATE-OBS': 'MJD', 'DISPERSER': 'DISPNAME', 'INSTR': 'INSTRUME'}
maxpix = 2050 # 2x binning
elif instr == 'GMOS-N':
mdict = dict(TELESCOPE='Gemini-N', INSTR='GMOS-N')
parse_head = {'R': True, 'DATE-OBS': 'MJD-OBS',
'DISPERSER': 'DISPNAME'}
maxpix = 3500
scale = 1e-17
elif instr == 'LRISr':
mdict = dict(TELESCOPE='Keck-1')
parse_head = {'DATE-OBS': 'MJD', 'DISPERSER': 'DISPNAME', 'INSTR': 'INSTRUME'}
maxpix = 2050
scale = 1e-17
elif instr == 'DEIMOS':
mdict = dict(TELESCOPE='Keck-2')
parse_head = {'DATE-OBS': 'MJD', 'DISPERSER': 'DISPNAME',
'INSTR': 'INSTRUME'}
maxpix = 9000
scale = 1e-17
elif instr == 'Goodman':
mdict = dict(TELESCOPE='SOAR', INSTR='Goodman')
parse_head = {'DATE-OBS': 'MJD', 'DISPERSER': 'DISPNAME',
'R': True}
maxpix = 2048
scale = 1e-17
elif instr == 'XSHOOTER':
mdict = dict(TELESCOPE='VLT')
parse_head = {'DATE-OBS': 'MJD', 'DISPERSER': 'DISPNAME', 'INSTR': 'INSTRUME'}
maxpix = 33000
scale = 1e-17
else:
embed(header='172')
# Meta
full_meta = pbuild.mk_meta(fits_files, allz_tbl, mdict=mdict, fname=fname,
verbose=True, parse_head=parse_head, skip_badz=skipz,
stype='GAL',
chkz=True, **mwargs)
full_meta['Ref'] = irefs
# Survey flag
flag_g = spbu.add_to_group_dict(instr, gdict, skip_for_debug=True)
# IDs
#if 'MUSE' in instr:
# embed(header='278 of build specdb')
maindb = spbu.add_ids(maindb, full_meta, flag_g, tkeys, id_key,
first=(flag_g==1),
mtch_toler=1.*units.arcsec,
close_pairs=(instr in pair_groups))
# Ingest --
pbuild.ingest_spectra(hdf, instr, full_meta, max_npix=maxpix, verbose=False,
badf=badf, grab_conti=False, scale=scale, **swargs)
# Write
spbu.write_hdf(hdf, str('FRB'), maindb, zpri, gdict, version, Publisher=str('JXP'))
print("Wrote {:s} DB file".format(outfile))
print("You probably need to move it into SPECDB")
def main(inflg='all'):
if inflg == 'all':
flg = np.sum(np.array( [2**ii for ii in range(25)]))
else:
flg = int(inflg)
# Public
if flg & (2**0):
generate_by_refs(public_refs,
'FRB_specDB_Public.hdf5', 'v0.5')
# Command line execution
if __name__ == '__main__':
pass
| {
"content_hash": "80afa67e2aca629cece0435a634f9388",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 101,
"avg_line_length": 32.54012345679013,
"alnum_prop": 0.5332448069809352,
"repo_name": "FRBs/FRB",
"id": "3a546709750d43a25e8ce397dc44c93f2a9d558d",
"size": "10543",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "frb/builds/build_specdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2903347"
},
{
"name": "Python",
"bytes": "793709"
}
],
"symlink_target": ""
} |
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose
from scrapy.spiders import SitemapSpider
from w3lib.html import remove_tags
class MySpider(SitemapSpider):
name = 'my_spider'
sitemap_urls = ['https://www.example.com/sitemap.xml']
sitemap_rules = [
('/something/', 'scrape_product'),
]
def scrape_product(self, response):
item_loader = ItemLoader(item=MyItem(), response=response)
item_loader.default_input_processor = MapCompose(remove_tags)
item_loader.default_output_processor = TakeFirst()
item_loader.add_css('my_field', 'selector')
return item_loader.load_item() | {
"content_hash": "1518a848d9d4f9377fc635b70035636f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 32.714285714285715,
"alnum_prop": 0.6986899563318777,
"repo_name": "zseta/scrapy-templates",
"id": "44425930f4c4cec5b46b8e543f8e7a8be6e8f4e8",
"size": "1044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templates/sitemap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17318"
}
],
"symlink_target": ""
} |
__author__ = 'tbeltramelli'
from AHomography import *
from Filtering import *
from UInteractive import *
class TextureMapper(AHomography):
_result = None
_texture = None
_map = None
_texture_position = None
def __init__(self, homography_output_path):
self._homography_output_path = homography_output_path
def map(self, video_path, texture_path, is_automatic):
self._texture = UMedia.get_image(texture_path)
self._homography = None
if is_automatic:
UMedia.load_media(video_path, self.process_with_chessboard)
else:
UMedia.load_media(video_path, self.process_with_homography)
def process_with_homography(self, img):
if self._homography is None:
self._homography = self.get_homography_part_from_mouse(self._texture, img)
self._result = self.get_mapped_texture(self._texture, img, self._homography)
UMedia.show(self._result)
def process_with_chessboard(self, img, pattern_size=(9, 6), to_draw=False):
self._result = cv2.pyrDown(img)
pos = [0, pattern_size[0] - 1, pattern_size[0] * (pattern_size[1] - 1), (pattern_size[0] * pattern_size[1]) - 1]
corners = []
img = Filtering.get_gray_scale_image(self._result)
is_found, coordinates = cv2.findChessboardCorners(img, pattern_size)
if is_found:
term = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1
cv2.cornerSubPix(img, coordinates, (5, 5), (-1, -1), term)
if to_draw:
cv2.drawChessboardCorners(self._result, pattern_size, coordinates, is_found)
for p in pos:
corner = int(coordinates[p, 0, 0]), int(coordinates[p, 0, 1])
if to_draw:
cv2.circle(self._result, corner, 10, (255, 0, 0))
corners.append(corner)
self._homography = self.get_homography_from_coordinates(self._texture, corners)
self._result = self.get_mapped_texture(self._texture, self._result, self._homography)
UMedia.show(self._result)
def map_realistically(self, video_path, map_path, texture_path):
self._map = UMedia.get_image(map_path)
self._texture = UMedia.get_image(texture_path)
self._homography = None
UMedia.load_media(video_path, self.process_realistically)
def process_realistically(self, img):
self._result = img
if self._homography is None:
self.define_map_homography([img, self._map])
self._homography = np.linalg.inv(self._homography)
x, y = self.get_texture_position(self._map)
x, y = self.get_2d_transform_from_homography(x, y, self._homography)
self.apply_texture(0.5, x, y, self._homography)
UMedia.show(self._result)
def apply_texture(self, scale, x, y, homography):
height, width, layers = self._texture.shape
w = width * scale
h = height * scale
p1 = self.get_2d_transform_from_homography(x - (w/2), y - (h/2), homography)
p2 = self.get_2d_transform_from_homography(x + (w/2), y - (h/2), homography)
p3 = self.get_2d_transform_from_homography(x - (w/2), y + (h/2), homography)
p4 = self.get_2d_transform_from_homography(x + (w/2), y + (h/2), homography)
corners = [p1, p2, p3, p4]
h = self.get_homography_from_coordinates(self._texture, corners)
self._result = self.get_mapped_texture(self._texture, self._result, h)
def get_texture_position(self, img):
if self._texture_position is None:
self._texture_position = UInteractive.select_points_in_images([img], 1)[0][0]
return self._texture_position
| {
"content_hash": "4c46175e4cd9499eab3d8666703021a6",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 120,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.613903743315508,
"repo_name": "tonybeltramelli/Graphics-And-Vision",
"id": "26d641926a7fffb197b10341ca54e2d12c7e1738",
"size": "3740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Projective-Geometry/tony/com.tonybeltramelli.homography/TextureMapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "343757"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ShowlakesValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlakes", parent_name="layout.geo", **kwargs):
super(ShowlakesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "a33fbb91f94ea9c288ca43905ced3cab",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 37.583333333333336,
"alnum_prop": 0.614190687361419,
"repo_name": "plotly/python-api",
"id": "5a242c6dcac0e1a066e75824938141ce73da94bf",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/geo/_showlakes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from flask import Flask
app = Flask(__name__)
@app.route('/scale_up/1', methods=['POST'])
def scale_up():
return "Scaling Up"
@app.route('/scale_down/1', methods=['POST'])
def scale_down():
return "Scaling Down"
if __name__ == '__main__':
app.run(debug=True, port=5001)
| {
"content_hash": "8a5a6a1d144255e9ef2bb4650f1696ca",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 45,
"avg_line_length": 20.428571428571427,
"alnum_prop": 0.6118881118881119,
"repo_name": "stevelle/loadmonitor",
"id": "c0bbe9c4d57a65ea3154ce3fd81be4c2734082dd",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/heater.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3557"
},
{
"name": "Shell",
"bytes": "696"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
def validate_not_spaces(value):
if value.strip() == '':
raise ValidationError(u"You must provide more than just whitespace.")
class Admins(models.Model):
id = models.IntegerField(primary_key=True)
username = models.CharField(max_length=150, blank=True)
pass_field = models.CharField(max_length=150, db_column='pass', blank=True) # Field renamed because it was a Python reserved word.
userlevel = models.IntegerField(null=True, blank=True)
permissions = models.TextField(blank=True)
ip_restricted = models.CharField(max_length=150, blank=True)
lastlogin = models.DateTimeField(null=True, blank=True)
lastchange = models.DateTimeField(null=True, blank=True)
email = models.CharField(max_length=765, blank=True)
usersession = models.CharField(max_length=96, blank=True)
userip = models.CharField(max_length=150, blank=True)
terms_accepted = models.DateTimeField(null=True, blank=True)
name = models.CharField(max_length=150, blank=True)
resetpasskey = models.CharField(max_length=150, blank=True)
resetpassexp = models.DateTimeField(null=True, blank=True)
class Meta:
app_label = ''
class customers(models.Model):
contactid = models.IntegerField(primary_key=True)
billing_firstname = models.CharField(max_length=150, blank=True)
billing_lastname = models.CharField(max_length=150, blank=True)
billing_address = models.CharField(max_length=765, blank=True)
billing_address2 = models.CharField(max_length=150, blank=True)
billing_city = models.CharField(max_length=300, blank=True)
billing_state = models.CharField(max_length=300, blank=True)
billing_zip = models.CharField(max_length=60, blank=True)
billing_country = models.CharField(max_length=300, blank=True)
billing_company = models.CharField(max_length=765, blank=True)
billing_phone = models.CharField(max_length=150, blank=True)
email = models.CharField(max_length=300, blank=True)
shipping_firstname = models.CharField(max_length=150, blank=True)
shipping_lastname = models.CharField(max_length=150, blank=True)
shipping_address = models.CharField(max_length=765, blank=True)
shipping_address2 = models.CharField(max_length=150, blank=True)
shipping_city = models.CharField(max_length=300, blank=True)
shipping_state = models.CharField(max_length=300, blank=True)
shipping_zip = models.CharField(max_length=60, blank=True)
shipping_country = models.CharField(max_length=300, blank=True)
shipping_company = models.CharField(max_length=765, blank=True)
shipping_phone = models.CharField(max_length=150, blank=True)
comments = models.CharField(max_length=765, blank=True)
lastlogindate = models.DateTimeField(null=True, blank=True)
website = models.CharField(max_length=600, blank=True)
pass_field = models.CharField(max_length=150, db_column='pass', blank=True)
discount = models.FloatField(null=True, blank=True)
custother1 = models.CharField(max_length=300, blank=True)
accountno = models.CharField(max_length=150, blank=True)
maillist = models.IntegerField(null=True, blank=True)
type = models.IntegerField(null=True, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
custenabled = models.IntegerField(null=True, blank=True)
additional_field1 = models.CharField(max_length=750, blank=True)
additional_field2 = models.CharField(max_length=750, blank=True)
additional_field3 = models.CharField(max_length=450, blank=True)
additional_field4 = models.CharField(max_length=450, blank=True)
alt_contactid = models.CharField(max_length=150, blank=True)
class Meta:
app_label = ''
ordering = ["-contactid"]
class Category(models.Model):
id = models.IntegerField(primary_key=True)
category_name = models.CharField(max_length=450, blank=True)
category_description = models.TextField(blank=True)
category_main = models.IntegerField()
category_parent = models.IntegerField(null=True, blank=True)
category_header = models.TextField(blank=True)
category_footer = models.TextField(blank=True)
category_title = models.TextField(blank=True)
category_meta = models.TextField(blank=True)
sorting = models.IntegerField(null=True, blank=True)
numtolist = models.IntegerField(null=True, blank=True)
displaytype = models.IntegerField(null=True, blank=True)
columnum = models.IntegerField(null=True, blank=True)
iconimage = models.CharField(max_length=300, blank=True)
special_numtolist = models.IntegerField(null=True, blank=True)
special_displaytype = models.IntegerField(null=True, blank=True)
special_columnum = models.IntegerField(null=True, blank=True)
category_columnum = models.IntegerField(null=True, blank=True)
category_displaytype = models.IntegerField(null=True, blank=True)
related_displaytype = models.IntegerField(null=True, blank=True)
related_columnum = models.IntegerField(null=True, blank=True)
listing_displaytype = models.IntegerField(null=True, blank=True)
hide = models.IntegerField(null=True, blank=True)
category_defaultsorting = models.IntegerField(null=True, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
itemicon = models.IntegerField(null=True, blank=True)
redirectto = models.CharField(max_length=450, blank=True)
accessgroup = models.CharField(max_length=750, blank=True)
link = models.TextField(blank=True)
link_target = models.CharField(max_length=150, blank=True)
upsellitems_displaytype = models.IntegerField(null=True, blank=True)
upsellitems_columnum = models.IntegerField(null=True, blank=True)
filename = models.CharField(max_length=765, blank=True)
isfilter = models.IntegerField(null=True, db_column='isFilter', blank=True) # Field name made lowercase.
keywords = models.TextField(blank=True)
class Meta:
app_label = ''
class Crm(models.Model):
id = models.IntegerField(primary_key=True)
custid = models.IntegerField(null=True, blank=True)
orderid = models.IntegerField(null=True, blank=True)
productid = models.IntegerField(null=True, blank=True)
custemail = models.CharField(max_length=150, blank=True)
subject = models.CharField(max_length=450, blank=True)
datentime = models.DateTimeField(null=True, blank=True)
assignedto = models.IntegerField(null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
customer = models.CharField(max_length=150, blank=True)
lastactiondatentime = models.DateTimeField(null=True, blank=True)
messagekey = models.CharField(max_length=150, blank=True)
departmentid = models.IntegerField(null=True, blank=True)
phone = models.CharField(max_length=150, blank=True)
class Meta:
db_table = u'crm'
app_label = ''
ordering = ["-id"]
class CrmDepartment(models.Model):
id = models.IntegerField(primary_key=True)
department = models.CharField(max_length=150, blank=True)
visible = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'crm_department'
app_label = ''
ordering = ["id"]
class CrmMessages(models.Model):
id = models.IntegerField(primary_key=True)
crmid = models.IntegerField(null=True, blank=True)
datentime = models.DateTimeField(null=True, blank=True)
message = models.TextField(blank=True)
sender = models.IntegerField(null=True, blank=True)
sendername = models.CharField(max_length=150, blank=True)
senderemail = models.CharField(max_length=765, blank=True)
class Meta:
db_table = u'crm_messages'
app_label = ''
class CrmStatus(models.Model):
id = models.IntegerField(primary_key=True)
statusid = models.IntegerField(null=True, blank=True)
statustext = models.CharField(max_length=30, blank=True)
class Meta:
db_table = u'crm_status'
app_label = ''
class CustomersAddressbook(models.Model):
id = models.IntegerField(primary_key=True)
contactid = models.IntegerField(null=True, blank=True)
address_alias = models.CharField(max_length=150, blank=True)
shipping_firstname = models.CharField(max_length=150, blank=True)
shipping_lastname = models.CharField(max_length=150, blank=True)
shipping_address = models.CharField(max_length=765, blank=True)
shipping_address2 = models.CharField(max_length=150, blank=True)
shipping_city = models.CharField(max_length=300, blank=True)
shipping_state = models.CharField(max_length=300, blank=True)
shipping_zip = models.CharField(max_length=60, blank=True)
shipping_country = models.CharField(max_length=300, blank=True)
shipping_company = models.CharField(max_length=765, blank=True)
shipping_phone = models.CharField(max_length=150, blank=True)
date_added = models.DateTimeField(null=True, blank=True)
addres_type = models.IntegerField(null=True, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'customers_addressbook'
app_label = ''
class CustomerRewards(models.Model):
id = models.IntegerField(primary_key=True)
contactid = models.IntegerField(null=True, blank=True)
orderid = models.IntegerField(null=True, blank=True)
points = models.IntegerField(null=True, blank=True)
reference = models.CharField(max_length=150, blank=True)
datentime = models.DateTimeField(null=True, blank=True)
giftcertid = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'customer_rewards'
app_label = ''
class Emails(models.Model):
id = models.IntegerField()
etype = models.CharField(max_length=150, blank=True)
name = models.CharField(max_length=150, blank=True)
subject = models.TextField(blank=True)
body = models.TextField(blank=True)
to = models.CharField(max_length=150, blank=True)
order_status = models.IntegerField(null=True, blank=True)
body_html = models.TextField(blank=True)
from_email = models.CharField(max_length=765, blank=True)
reply_email = models.CharField(max_length=765, blank=True)
bcc_email = models.CharField(max_length=765, blank=True)
section = models.CharField(max_length=150, blank=True)
class Meta:
db_table = u'emails'
app_label = ''
class Extrapages(models.Model):
id = models.IntegerField()
htmlpage = models.CharField(max_length=750, blank=True)
title = models.TextField(blank=True)
meta = models.TextField(blank=True)
content = models.TextField(blank=True)
sorting = models.IntegerField(null=True, blank=True)
hide = models.IntegerField(null=True, blank=True)
link = models.TextField(blank=True)
link_target = models.CharField(max_length=150, blank=True)
page_parent = models.IntegerField(null=True, blank=True)
isdatabase = models.IntegerField(null=True, blank=True)
recordsperpage = models.IntegerField(null=True, blank=True)
page_displaytype = models.IntegerField(null=True, blank=True)
showindex = models.IntegerField(null=True, blank=True)
showrss = models.IntegerField(null=True, blank=True)
feed_sorting = models.IntegerField(null=True, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
accessgroup = models.IntegerField(null=True, blank=True)
redirectto = models.CharField(max_length=450, blank=True)
filename = models.CharField(max_length=765, blank=True)
hide_left = models.IntegerField(null=True, blank=True)
hide_right = models.IntegerField(null=True, blank=True)
frame_displaytype = models.IntegerField(null=True, blank=True)
keywords = models.TextField(blank=True)
class Meta:
db_table = u'extrapages'
app_label = ''
class GiftCertificates(models.Model):
id = models.IntegerField(primary_key=True)
orderid = models.IntegerField(null=True, blank=True)
certificate_name = models.CharField(max_length=450, blank=True)
certificate_amount = models.FloatField(null=True, blank=True)
certificate_expenses = models.FloatField(null=True, blank=True)
certificate_balance = models.FloatField(null=True, blank=True)
certificate_to = models.CharField(max_length=150, blank=True)
certificate_message = models.TextField(blank=True)
certificate_date = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'gift_certificates'
app_label = ''
ordering = ["-certificate_date"]
class Html(models.Model):
id = models.IntegerField()
htmlpage = models.CharField(max_length=150, blank=True)
title = models.TextField(blank=True)
meta = models.TextField(blank=True)
header = models.TextField(blank=True)
footer = models.TextField(blank=True)
sorting = models.IntegerField(null=True, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
hide_left = models.IntegerField(null=True, blank=True)
hide_right = models.IntegerField(null=True, blank=True)
keywords = models.TextField(blank=True)
class Meta:
db_table = u'html'
app_label = ''
class Manufacturer(models.Model):
id = models.IntegerField(primary_key=True)
manufacturer = models.CharField(max_length=150, blank=True)
logo = models.CharField(max_length=150, blank=True)
sorting = models.IntegerField(null=True, blank=True)
header = models.TextField(blank=True)
website = models.CharField(max_length=150, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'manufacturer'
app_label = ''
class Oitems(models.Model):
orderitemid = models.IntegerField(primary_key=True)
orderid = models.IntegerField(null=True, blank=True)
catalogid = models.IntegerField(null=True, blank=True)
itemid = models.CharField(max_length=450, blank=True)
itemname = models.TextField(blank=True)
numitems = models.FloatField(null=True, blank=True)
unitprice = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
options = models.TextField(blank=True)
optionprice = models.FloatField(null=True, blank=True)
weight = models.FloatField(null=True, blank=True)
additional_field1 = models.CharField(max_length=150, blank=True)
additional_field2 = models.CharField(max_length=150, blank=True)
additional_field3 = models.CharField(max_length=150, blank=True)
shipment_id = models.IntegerField(null=True, blank=True)
catoptions = models.CharField(max_length=765, blank=True)
catalogidoptions = models.CharField(max_length=765, blank=True)
warehouseid = models.IntegerField(null=True, db_column='warehouseID', blank=True) # Field name made lowercase.
unitcost = models.FloatField(null=True, blank=True)
unitstock = models.IntegerField(null=True, blank=True)
date_added = models.DateTimeField(null=True, blank=True)
page_added = models.CharField(max_length=765, blank=True)
itemdescription = models.CharField(max_length=450, blank=True)
reminder = models.IntegerField(null=True, blank=True)
recurrent = models.IntegerField(null=True, blank=True)
wsh_id = models.IntegerField(null=True, blank=True)
wsi_id = models.IntegerField(null=True, blank=True)
depends_on_item = models.IntegerField(null=True, blank=True)
recurring_order_frequency = models.IntegerField(null=True, blank=True)
item_type = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'oitems'
app_label = ''
class OnlinePayments(models.Model):
id = models.IntegerField(primary_key=True)
payment_id = models.IntegerField(null=True, blank=True)
payment_description = models.TextField(blank=True)
payment_confirmation_message = models.TextField(blank=True)
payment_hide = models.IntegerField(null=True, blank=True)
payment_sort = models.IntegerField(null=True, blank=True)
payment_login = models.CharField(max_length=150, blank=True)
payment_password = models.CharField(max_length=765, blank=True)
payment_url = models.CharField(max_length=150, blank=True)
authorizeonly = models.IntegerField(null=True, blank=True)
lowerol = models.IntegerField(null=True, db_column='LowerOL', blank=True) # Field name made lowercase.
upperol = models.IntegerField(null=True, db_column='UpperOL', blank=True) # Field name made lowercase.
enabled = models.IntegerField(null=True, db_column='Enabled', blank=True) # Field name made lowercase.
usage = models.IntegerField(null=True, blank=True)
paymentcountry = models.CharField(max_length=150, db_column='PaymentCountry', blank=True) # Field name made lowercase.
paymentstate = models.CharField(max_length=150, db_column='PaymentState', blank=True) # Field name made lowercase.
discountgroup = models.IntegerField(null=True, blank=True)
payment_description_message = models.TextField(blank=True)
payment_status = models.IntegerField(null=True, blank=True)
payment_signature = models.CharField(max_length=765, blank=True)
testmode = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'online_payments'
app_label = ''
class OrderDiscounts(models.Model):
id = models.IntegerField(primary_key=True)
orderid = models.IntegerField(null=True, blank=True)
discount_id = models.IntegerField(null=True, blank=True)
discount_amount = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
discount_freeship = models.IntegerField(null=True, blank=True)
discount_freeprod = models.CharField(max_length=150, blank=True)
coupon = models.CharField(max_length=150, blank=True)
freeprod = models.IntegerField(null=True, blank=True)
giftcert = models.IntegerField(null=True, blank=True)
applied = models.DateTimeField(null=True, blank=True)
promo_amount = models.FloatField(null=True, blank=True)
promo_qty = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'order_discounts'
app_label = ''
class OrderQuestions(models.Model):
id = models.IntegerField(primary_key=True)
questionid = models.IntegerField(null=True, blank=True)
orderid = models.IntegerField(null=True, blank=True)
answer = models.TextField(blank=True)
class Meta:
db_table = u'order_questions'
app_label = ''
class Orders(models.Model):
orderid = models.IntegerField(primary_key=True)
ocustomerid = models.IntegerField(null=True, blank=True)
odate = models.DateTimeField(null=True, blank=True)
orderamount = models.FloatField(null=True, blank=True)
ofirstname = models.CharField(max_length=300, blank=True)
olastname = models.CharField(max_length=300, blank=True)
oemail = models.CharField(max_length=300, blank=True)
oaddress = models.CharField(max_length=600, blank=True)
oaddress2 = models.CharField(max_length=150, blank=True)
ocity = models.CharField(max_length=300, blank=True)
ozip = models.CharField(max_length=45, blank=True)
ostate = models.CharField(max_length=300, blank=True)
ocountry = models.CharField(max_length=150, blank=True)
ophone = models.CharField(max_length=90, blank=True)
ofax = models.CharField(max_length=90, blank=True)
ocompany = models.CharField(max_length=765, blank=True)
ocardtype = models.CharField(max_length=150, blank=True)
ocardno = models.CharField(max_length=765, blank=True)
ocardname = models.CharField(max_length=600, blank=True)
ocardexpiresmonth = models.CharField(max_length=30, blank=True)
ocardexpiresyear = models.CharField(max_length=30, blank=True)
ocardissuenum = models.CharField(max_length=30, blank=True)
ocardstartmonth = models.CharField(max_length=30, blank=True)
ocardstartyear = models.CharField(max_length=30, blank=True)
ocardaddress = models.CharField(max_length=450, blank=True)
ocardverification = models.CharField(max_length=150, blank=True)
oprocessed = models.IntegerField()
ocomment = models.TextField(blank=True)
otax = models.FloatField(null=True, blank=True)
otax2 = models.FloatField(null=True, blank=True)
otax3 = models.FloatField(null=True, blank=True)
ointernalcomment = models.TextField(blank=True)
oexternalcomment = models.CharField(max_length=150, blank=True)
oshippeddate = models.CharField(max_length=150, blank=True)
oshipmethod = models.CharField(max_length=450, blank=True)
oshipcost = models.FloatField(null=True, blank=True)
oshipfirstname = models.CharField(max_length=300, blank=True)
oshiplastname = models.CharField(max_length=150, blank=True)
oshipcompany = models.CharField(max_length=600, blank=True)
oshipemail = models.CharField(max_length=300, blank=True)
oshipaddress = models.CharField(max_length=765, blank=True)
oshipaddress2 = models.CharField(max_length=150, blank=True)
oshipcity = models.CharField(max_length=150, blank=True)
oshipzip = models.CharField(max_length=60, blank=True)
oshipstate = models.CharField(max_length=150, blank=True)
oshipcountry = models.CharField(max_length=150, blank=True)
oshipphone = models.CharField(max_length=150, blank=True)
opaymethod = models.IntegerField(null=True, blank=True)
opaymethodinfo = models.TextField(blank=True)
status = models.IntegerField(null=True, blank=True)
other2 = models.CharField(max_length=150, blank=True)
otime = models.DateTimeField(null=True, blank=True)
oauthorization = models.CharField(max_length=765, blank=True)
oerrors = models.CharField(max_length=765, blank=True)
odiscount = models.FloatField(null=True, blank=True)
ostatus = models.CharField(max_length=765, blank=True)
ohandling = models.FloatField(null=True, blank=True)
coupon = models.CharField(max_length=300, blank=True)
coupondiscount = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
coupondiscountdual = models.FloatField(null=True, blank=True)
giftcertificate = models.CharField(max_length=300, blank=True)
giftamountused = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
giftamountuseddual = models.FloatField(null=True, blank=True)
trackingcode = models.CharField(max_length=300, blank=True)
invoicenum_prefix = models.CharField(max_length=150, blank=True)
invoicenum = models.IntegerField(null=True, blank=True)
order_status = models.IntegerField(null=True, blank=True)
referer = models.TextField(blank=True)
salesperson = models.CharField(max_length=150, blank=True)
ip = models.CharField(max_length=48, blank=True)
date_started = models.DateTimeField(null=True, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
last_auto_email = models.IntegerField(null=True, blank=True)
oweight = models.FloatField(null=True, blank=True)
oboxes = models.IntegerField(null=True, blank=True)
orderkey = models.CharField(max_length=48, blank=True)
ostep = models.CharField(max_length=150, blank=True)
shipmethodid = models.IntegerField(null=True, blank=True)
insured = models.IntegerField(null=True, blank=True)
alt_orderid = models.CharField(max_length=150, blank=True)
affiliate_id = models.IntegerField(null=True, blank=True)
affiliate_commission = models.DecimalField(null=True, max_digits=7, decimal_places=2, blank=True)
affiliate_approved = models.IntegerField(null=True, blank=True)
affiliate_approvedreason = models.CharField(max_length=150, blank=True)
shipping_id = models.IntegerField(null=True, blank=True)
buysafe = models.IntegerField(null=True, blank=True)
checktype = models.CharField(max_length=24, blank=True)
checkacctype = models.CharField(max_length=24, blank=True)
checkrouting = models.CharField(max_length=150, blank=True)
checkaccount = models.CharField(max_length=150, blank=True)
oshipaddresstype = models.IntegerField(null=True, blank=True)
isrecurrent = models.IntegerField(null=True, blank=True)
recurrent_frequency = models.IntegerField(null=True, blank=True)
parent_orderid = models.IntegerField(null=True, blank=True)
last_order = models.DateTimeField(null=True, blank=True)
next_order = models.DateTimeField(null=True, blank=True)
customer_pmntprofileid = models.CharField(max_length=150, blank=True)
class Meta:
db_table = u'orders'
app_label = ''
ordering = ["-odate"]
class OrdersShipments(models.Model):
id = models.IntegerField(primary_key=True)
orderid = models.IntegerField(null=True, blank=True)
address_id = models.IntegerField(null=True, blank=True)
oshippeddate = models.CharField(max_length=150, blank=True)
oshipmethod = models.CharField(max_length=450, blank=True)
oshipmethodid = models.IntegerField(null=True, blank=True)
oshipcost = models.FloatField(null=True, blank=True)
trackingcode = models.CharField(max_length=300, blank=True)
oshipalias = models.CharField(max_length=300, blank=True)
oshipfirstname = models.CharField(max_length=300, blank=True)
oshiplastname = models.CharField(max_length=150, blank=True)
oshipcompany = models.CharField(max_length=600, blank=True)
oshipemail = models.CharField(max_length=300, blank=True)
oshipaddress = models.CharField(max_length=765, blank=True)
oshipaddress2 = models.CharField(max_length=150, blank=True)
oshipcity = models.CharField(max_length=150, blank=True)
oshipzip = models.CharField(max_length=60, blank=True)
oshipstate = models.CharField(max_length=150, blank=True)
oshipcountry = models.CharField(max_length=150, blank=True)
oshipphone = models.CharField(max_length=150, blank=True)
order_status = models.IntegerField(null=True, blank=True)
distributor_id = models.IntegerField(null=True, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
oweight = models.FloatField(null=True, blank=True)
oboxes = models.IntegerField(null=True, blank=True)
otax = models.FloatField(null=True, blank=True)
ointernalcomment = models.TextField(blank=True)
shipping_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'orders_shipments'
app_label = ''
class OrderStatus(models.Model):
id = models.IntegerField()
statusid = models.IntegerField(null=True, db_column='StatusID', blank=True) # Field name made lowercase.
statusdefinition = models.CharField(max_length=150, db_column='StatusDefinition', blank=True) # Field name made lowercase.
statustext = models.CharField(max_length=150, db_column='StatusText', blank=True) # Field name made lowercase.
visible = models.IntegerField(null=True, db_column='Visible', blank=True) # Field name made lowercase.
class Meta:
db_table = u'order_status'
app_label = ''
ordering = ["statusid"]
class PaymentMethods(models.Model):
id = models.IntegerField()
gateway_id = models.IntegerField(null=True, db_column='gateway_ID', blank=True) # Field name made lowercase.
payment_gateway = models.CharField(max_length=150, blank=True)
ccgateway = models.IntegerField(null=True, db_column='CCGateway', blank=True) # Field name made lowercase.
class Meta:
db_table = u'payment_methods'
app_label = ''
class ProductAccessories(models.Model):
id = models.IntegerField(primary_key=True)
catalogid = models.IntegerField(null=True, blank=True)
accessory_id = models.IntegerField(null=True, blank=True)
sorting = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'product_accessories'
app_label = ''
class ProductArticle(models.Model):
id = models.IntegerField(db_index=True,primary_key=True)
catalogid = models.IntegerField(db_index=True,null=True, blank=True)
article_title = models.CharField(db_index=True,max_length=150, blank=True)
short_review = models.CharField(db_index=True,max_length=450, blank=True)
long_review = models.TextField(db_index=True,blank=True)
rating = models.IntegerField(db_index=True,null=True, blank=True)
review_date = models.DateTimeField(db_index=True,null=True, blank=True)
approved = models.IntegerField(db_index=True,null=True, blank=True)
userid = models.IntegerField(db_index=True,null=True, blank=True)
userip = models.CharField(max_length=150, blank=True)
class Meta:
db_table = u'product_article'
app_label = ''
class ProductBoxes(models.Model):
id = models.IntegerField(primary_key=True)
catalogid = models.IntegerField(null=True, blank=True)
weight = models.FloatField(null=True, blank=True)
height = models.IntegerField(null=True, blank=True)
width = models.IntegerField(null=True, blank=True)
depth = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'product_boxes'
app_label = ''
class ProductCategory(models.Model):
id = models.IntegerField()
catalogid = models.IntegerField(db_index=True,null=True, blank=True)
categoryid = models.IntegerField(db_index=True,null=True, blank=True)
ismain = models.CharField(db_index=True,max_length=150, blank=True)
sorting = models.IntegerField(db_index=True,null=True, blank=True)
class Meta:
db_table = u'product_category'
app_label = ''
class ProductEmailfriend(models.Model):
id = models.IntegerField(primary_key=True)
catalogid = models.IntegerField(null=True, blank=True)
user_name = models.CharField(max_length=150, blank=True)
user_email = models.CharField(max_length=150, blank=True)
friend_name = models.CharField(max_length=150, blank=True)
friend_email = models.CharField(max_length=450, blank=True)
message = models.TextField(blank=True)
record_date = models.DateTimeField(null=True, blank=True)
userid = models.IntegerField(null=True, blank=True)
userip = models.CharField(max_length=150, blank=True)
class Meta:
db_table = u'product_emailfriend'
app_label = ''
class ProductReview(models.Model):
id = models.IntegerField(db_index=True,primary_key=True)
catalogid = models.IntegerField(db_index=True,null=True, blank=True)
user_name = models.CharField(db_index=True,max_length=150, blank=True)
user_email = models.CharField(db_index=True,max_length=150, blank=True)
user_city = models.CharField(db_index=True,max_length=150, blank=True)
short_review = models.CharField(db_index=True,max_length=450, blank=True)
long_review = models.TextField(db_index=True,blank=True)
rating = models.IntegerField(db_index=True,null=True, blank=True)
review_date = models.DateTimeField(db_index=True,null=True, blank=True)
approved = models.IntegerField(db_index=True,null=True, blank=True)
userid = models.IntegerField(db_index=True,null=True, blank=True)
userip = models.CharField(max_length=150, blank=True)
class Meta:
db_table = u'product_review'
app_label = ''
class ProductShipping(models.Model):
id = models.IntegerField(primary_key=True)
catalogid = models.IntegerField(null=True, blank=True)
shipcarriermethodid = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'product_shipping'
app_label = ''
class ProductWaitinglist(models.Model):
id = models.IntegerField(primary_key=True)
catalogid = models.IntegerField(null=True, blank=True)
user_name = models.CharField(max_length=150, blank=True)
user_email = models.CharField(max_length=150, blank=True)
user_phone = models.CharField(max_length=150, blank=True)
record_date = models.DateTimeField(null=True, blank=True)
userid = models.IntegerField(null=True, blank=True)
userip = models.CharField(max_length=150, blank=True)
message = models.TextField(blank=True)
current_stock = models.IntegerField(null=True, blank=True)
last_contact = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'product_waitinglist'
app_label = ''
class Products(models.Model):
catalogid = models.IntegerField(db_index=True, primary_key=True)
id = models.CharField(db_index=True, max_length=150, blank=True)
name = models.CharField(db_index=True, max_length=765, blank=True)
categoriesaaa = models.CharField(db_index=True, max_length=300, blank=True)
mfgid = models.CharField(db_index=True, max_length=150, blank=True)
manufacturer = models.IntegerField(null=True, blank=True)
distributor = models.IntegerField(null=True, blank=True)
cost = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price2 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price3 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
saleprice = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
onsale = models.IntegerField(null=True, blank=True)
stock = models.FloatField(null=True, blank=True)
stock_alert = models.IntegerField(null=True, blank=True)
display_stock = models.CharField(max_length=150, blank=True)
weight = models.FloatField(null=True, blank=True)
minimumorder = models.FloatField(null=True, blank=True)
maximumorder = models.FloatField(null=True, blank=True)
date_created = models.DateTimeField(null=True, blank=True)
description = models.TextField(blank=True)
extended_description = models.TextField(blank=True)
keywords = models.TextField(blank=True)
sorting = models.IntegerField(null=True, blank=True)
thumbnail = models.CharField(max_length=765, blank=True)
image1 = models.CharField(max_length=765, blank=True)
image2 = models.CharField(max_length=765, blank=True)
image3 = models.CharField(max_length=765, blank=True)
image4 = models.CharField(max_length=765, blank=True)
realmedia = models.CharField(max_length=765, blank=True)
related = models.CharField(max_length=150, blank=True)
shipcost = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
imagecaption1 = models.TextField(blank=True)
imagecaption2 = models.TextField(blank=True)
imagecaption3 = models.TextField(blank=True)
imagecaption4 = models.TextField(blank=True)
title = models.CharField(max_length=450, blank=True)
metatags = models.TextField(blank=True)
displaytext = models.CharField(max_length=150, blank=True)
eproduct_password = models.CharField(max_length=45, blank=True)
eproduct_random = models.IntegerField(null=True, blank=True)
eproduct_expire = models.IntegerField(null=True, blank=True)
eproduct_path = models.TextField(blank=True)
eproduct_serial = models.IntegerField(null=True, blank=True)
eproduct_instructions = models.TextField(blank=True)
homespecial = models.IntegerField(null=True, blank=True)
categoryspecial = models.IntegerField(null=True, blank=True)
hide = models.IntegerField(null=True, blank=True)
free_shipping = models.IntegerField(null=True, blank=True)
nontax = models.IntegerField(null=True, blank=True)
notforsale = models.IntegerField(null=True, blank=True)
giftcertificate = models.IntegerField(null=True, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
extra_field_1 = models.CharField(max_length=450, blank=True)
extra_field_2 = models.CharField(max_length=450, blank=True)
extra_field_3 = models.CharField(max_length=450, blank=True)
extra_field_4 = models.CharField(max_length=450, blank=True)
extra_field_5 = models.CharField(max_length=450, blank=True)
extra_field_6 = models.TextField(blank=True)
extra_field_7 = models.TextField(blank=True)
extra_field_8 = models.TextField(blank=True)
extra_field_9 = models.TextField(blank=True)
extra_field_10 = models.TextField(blank=True)
usecatoptions = models.IntegerField(null=True, blank=True)
qtyoptions = models.CharField(max_length=750, blank=True)
price_1 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price_2 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price_3 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price_4 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price_5 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price_6 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price_7 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price_8 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price_9 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
price_10 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
hide_1 = models.IntegerField(null=True, blank=True)
hide_2 = models.IntegerField(null=True, blank=True)
hide_3 = models.IntegerField(null=True, blank=True)
hide_4 = models.IntegerField(null=True, blank=True)
hide_5 = models.IntegerField(null=True, blank=True)
hide_6 = models.IntegerField(null=True, blank=True)
hide_7 = models.IntegerField(null=True, blank=True)
hide_8 = models.IntegerField(null=True, blank=True)
hide_9 = models.IntegerField(null=True, blank=True)
hide_10 = models.IntegerField(null=True, blank=True)
minorderpkg = models.IntegerField(null=True, blank=True)
listing_displaytype = models.IntegerField(null=True, blank=True)
show_out_stock = models.IntegerField(null=True, blank=True)
pricing_groupopt = models.IntegerField(null=True, blank=True)
qtydiscount_opt = models.IntegerField(null=True, blank=True)
loginlevel = models.IntegerField(null=True, blank=True)
redirectto = models.CharField(max_length=450, blank=True)
accessgroup = models.CharField(max_length=750, blank=True)
self_ship = models.IntegerField(null=True, blank=True)
tax_code = models.CharField(max_length=9, blank=True)
eproduct_reuseserial = models.FloatField(null=True, blank=True)
nonsearchable = models.IntegerField(null=True, blank=True)
instock_message = models.CharField(max_length=450, blank=True)
outofstock_message = models.CharField(max_length=450, blank=True)
backorder_message = models.CharField(max_length=450, blank=True)
height = models.IntegerField(null=True, blank=True)
width = models.IntegerField(null=True, blank=True)
depth = models.IntegerField(null=True, blank=True)
reward_points = models.IntegerField(null=True, blank=True)
reward_disable = models.IntegerField(null=True, blank=True)
reward_redeem = models.IntegerField(null=True, blank=True)
filename = models.CharField(max_length=765, blank=True)
rma_maxperiod = models.IntegerField(null=True, blank=True)
recurring_order = models.IntegerField(null=True, blank=True)
fractional_qty = models.IntegerField(null=True, blank=True)
reminders_enabled = models.IntegerField(null=True, blank=True)
reminders_frequency = models.IntegerField(null=True, blank=True)
review_average = models.FloatField(null=True, blank=True)
review_count = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'products'
app_label = ''
class Promotions(models.Model):
id = models.IntegerField(primary_key=True)
promotion_name = models.CharField(max_length=450, blank=True)
promotion_description = models.TextField(blank=True)
promotion_start = models.DateTimeField(null=True, blank=True)
promotion_end = models.DateTimeField(null=True, blank=True)
promotion_enabled = models.IntegerField(null=True, blank=True)
by_amount = models.IntegerField(null=True, blank=True)
chk_byamount = models.IntegerField(null=True, blank=True)
by_quantity = models.IntegerField(null=True, blank=True)
chk_byquantity = models.IntegerField(null=True, blank=True)
by_product = models.IntegerField(null=True, blank=True)
by_category = models.TextField(blank=True)
promotion_amount = models.FloatField(null=True, blank=True)
promotion_percentage = models.IntegerField(null=True, blank=True)
promotion_peritem = models.IntegerField(null=True, blank=True)
promotion_category = models.TextField(blank=True)
promotion_product = models.CharField(max_length=450, blank=True)
promotion_freeshipping = models.IntegerField(null=True, blank=True)
promotion_freeproduct = models.CharField(max_length=450, blank=True)
prod1_id = models.IntegerField(null=True, blank=True)
prod1_qty = models.IntegerField(null=True, blank=True)
prod2_id = models.IntegerField(null=True, blank=True)
prod2_qty = models.IntegerField(null=True, blank=True)
prod3_id = models.IntegerField(null=True, blank=True)
prod3_qty = models.IntegerField(null=True, blank=True)
prod4_id = models.IntegerField(null=True, blank=True)
prod4_qty = models.IntegerField(null=True, blank=True)
coupon = models.CharField(max_length=150, blank=True)
promotion_uses = models.IntegerField(null=True, blank=True)
promotion_maxuses = models.IntegerField(null=True, blank=True)
promotion_usespercust = models.IntegerField(null=True, blank=True)
coupon_group = models.IntegerField(null=True, blank=True)
userid = models.CharField(max_length=150, blank=True)
last_update = models.DateTimeField(null=True, blank=True)
by_amount2 = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
by_quantity2 = models.FloatField(null=True, blank=True)
promotion_country = models.CharField(max_length=150, blank=True)
promotion_state = models.CharField(max_length=150, blank=True)
nonstackable = models.IntegerField(null=True, blank=True)
rules_retailprice = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'promotions'
app_label = ''
ordering = ["-id"]
class Rma(models.Model):
idrma = models.IntegerField(primary_key=True, db_column='idRma') # Field name made lowercase.
rmadate = models.DateTimeField(null=True, db_column='RmaDate', blank=True) # Field name made lowercase.
orderid = models.IntegerField(null=True, blank=True)
idrmareason = models.IntegerField(null=True, db_column='idRmaReason', blank=True) # Field name made lowercase.
qty_received = models.IntegerField(null=True, blank=True)
qty_restock = models.IntegerField(null=True, blank=True)
idrmamethod = models.IntegerField(null=True, db_column='idRmaMethod', blank=True) # Field name made lowercase.
idrmastatus = models.IntegerField(null=True, db_column='idRmaStatus', blank=True) # Field name made lowercase.
idrmaaction = models.IntegerField(db_column='idRmaAction') # Field name made lowercase.
comments = models.TextField(db_column='Comments', blank=True) # Field name made lowercase.
intcomments = models.TextField(db_column='intComments', blank=True) # Field name made lowercase.
staffcomments = models.TextField(db_column='staffComments', blank=True) # Field name made lowercase.
filename = models.CharField(max_length=150, blank=True)
filename2 = models.CharField(max_length=150, blank=True)
filename3 = models.CharField(max_length=150, blank=True)
filename4 = models.CharField(max_length=150, blank=True)
filename5 = models.CharField(max_length=150, blank=True)
filename6 = models.CharField(max_length=150, blank=True)
filename7 = models.CharField(max_length=150, blank=True)
filename8 = models.CharField(max_length=150, blank=True)
filename9 = models.CharField(max_length=150, blank=True)
filename10 = models.CharField(max_length=150, blank=True)
class Meta:
db_table = u'rma'
app_label = ''
ordering = ["-rmadate"]
class RmaOitem(models.Model):
idrma = models.IntegerField(primary_key=True, db_column='idRma', blank=True) # Field name made lowercase.
orderitemid = models.IntegerField(null=True, blank=True)
qty_return = models.IntegerField(null=True, blank=True)
qty_received = models.IntegerField(null=True, blank=True)
qty_restock = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'rma_oitem'
app_label = ''
class Rmastatus(models.Model):
idrmastatus = models.IntegerField(primary_key=True, db_column='IdRmaStatus') # Field name made lowercase.
rmastatus = models.CharField(max_length=150, db_column='RmaStatus', blank=True) # Field name made lowercase.
class Meta:
db_table = u'rmastatus'
app_label = ''
class Rmamethod(models.Model):
idrmamethod = models.IntegerField(primary_key=True, db_column='IdRmaMethod') # Field name made lowercase.
rmamethod = models.CharField(max_length=765, db_column='RmaMethod', blank=True) # Field name made lowercase.
visible = models.IntegerField(null=True, db_column='Visible', blank=True) # Field name made lowercase.
class Meta:
db_table = u'rmamethod'
app_label = ''
class Rmareason(models.Model):
idrmareason = models.IntegerField(primary_key=True, db_column='IdRmaReason') # Field name made lowercase.
rmareason = models.CharField(max_length=765, db_column='RmaReason', blank=True) # Field name made lowercase.
visible = models.IntegerField(null=True, db_column='Visible', blank=True) # Field name made lowercase.
class Meta:
db_table = u'rmareason'
app_label = ''
class RmaMessages(models.Model):
id = models.IntegerField(primary_key=True)
rmaid = models.IntegerField(null=True, blank=True)
datentime = models.DateTimeField(null=True, blank=True)
message = models.TextField(blank=True)
sender = models.IntegerField(null=True, blank=True)
sendername = models.CharField(max_length=150, blank=True)
senderemail = models.CharField(max_length=765, blank=True)
class Meta:
db_table = u'rma_messages'
app_label = ''
class ShippingCategory(models.Model):
id = models.BigIntegerField(primary_key=True)
category_name = models.CharField(max_length=765, blank=True)
status = models.CharField(max_length=75, blank=True)
fuel_charge = models.FloatField(null=True, blank=True)
use_fedex_shippingrates = models.IntegerField(null=True, blank=True)
priority_shipping = models.FloatField(null=True, blank=True)
saturday_delivery = models.FloatField(null=True, blank=True)
alaska_delivery = models.FloatField(null=True, blank=True)
is_free_shipping = models.IntegerField(null=True, blank=True)
flatrate_shipping_charge = models.FloatField(null=True, blank=True)
createdate = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'shipping_category'
app_label = ''
ordering = ["id"]
class ShippingCharges(models.Model):
id = models.BigIntegerField(primary_key=True)
shipping_category_id = models.BigIntegerField(null=True, blank=True)
order_total_min = models.FloatField(null=True, blank=True)
order_total_max = models.FloatField(null=True, blank=True)
shipping_charge = models.FloatField(null=True, blank=True)
shipping_state = models.CharField(max_length=6, blank=True)
excluded_zip_codes = models.CharField(max_length=1536, blank=True)
createdate = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'shipping_charges'
app_label = ''
class ProductShippingCategories(models.Model):
id = models.BigIntegerField(primary_key=True)
product_category_id = models.BigIntegerField(null=True, blank=True)
shipping_category_id = models.BigIntegerField(null=True, blank=True)
createdate = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'product_shipping_categories'
app_label = ''
class Shippingtuple(models.Model):
id = models.BigIntegerField(primary_key=True, db_column='ID') # Field name made lowercase.
orderid = models.BigIntegerField(null=True, db_column='OrderID', blank=True) # Field name made lowercase.
shippingcategoryid = models.BigIntegerField(null=True, db_column='ShippingCategoryID', blank=True) # Field name made lowercase.
name = models.CharField(max_length=765, db_column='Name', blank=True) # Field name made lowercase.
stringvalue = models.CharField(max_length=765, db_column='StringValue', blank=True) # Field name made lowercase.
intvalue = models.BigIntegerField(null=True, db_column='IntValue', blank=True) # Field name made lowercase.
floatvalue = models.FloatField(null=True, db_column='FloatValue', blank=True) # Field name made lowercase.
boolvalue = models.IntegerField(null=True, db_column='BoolValue', blank=True) # Field name made lowercase.
class Meta:
db_table = u'shippingtuple'
app_label = ''
class ShippingCountries(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=150, blank=True)
name_short = models.CharField(max_length=150, db_column='name-short', blank=True) # Field renamed to remove dashes. Field name made lowercase.
enabled = models.IntegerField(null=True, blank=True)
enabled_billing = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'shipping_countries'
app_label = ''
class ShippingStates(models.Model):
id = models.IntegerField(primary_key=True)
country = models.CharField(max_length=150, blank=True)
name = models.CharField(max_length=150, blank=True)
name_short = models.CharField(max_length=150, db_column='name-short', blank=True) # Field renamed to remove dashes. Field name made lowercase.
enabled = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'shipping_states'
app_label = ''
class SiteBanners(models.Model):
id = models.IntegerField(primary_key=True)
banner_name = models.CharField(max_length=250, blank=True)
banner_type = models.CharField(max_length=100, blank=True)
banner_image = models.CharField(max_length=250, blank=True)
banner_link = models.TextField(blank=True)
banner_content = models.TextField(blank=True)
banner_target = models.CharField(max_length=33, blank=True)
banner_status = models.IntegerField(null=True, blank=True)
datentime = models.CharField(max_length=150, blank=True)
class Meta:
db_table = u'site_banners'
app_label = ''
class StoreSettings2(models.Model):
id = models.IntegerField()
varname = models.CharField(max_length=150, blank=True)
varvalue = models.TextField(blank=True)
class Meta:
db_table = u'store_settings2'
app_label = ''
class SwfCustomerCreditsLog(models.Model):
id = models.IntegerField()
customers_email_address = models.CharField(max_length=765)
customers_credit = models.FloatField(null=True, blank=True)
customers_promocode = models.CharField(max_length=765, blank=True)
customers_credit_type = models.CharField(max_length=765, blank=True)
customers_credit_applied = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'swf_customer_credits_log'
app_label = ''
class SwfCustomerCreditsTracking(models.Model):
id = models.IntegerField()
customers_email_address = models.CharField(max_length=765)
customers_credit = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
customers_promocode = models.CharField(max_length=765, blank=True)
customers_credit_type = models.CharField(max_length=765, blank=True)
class Meta:
db_table = u'swf_customer_credits_tracking'
app_label = ''
class SwfProductSortTemp(models.Model):
id = models.IntegerField(primary_key=True)
product_category_id = models.IntegerField()
product_category_sorting = models.IntegerField()
class Meta:
db_table = u'swf_product_sort_temp'
app_label = ''
class Tax(models.Model):
id = models.IntegerField(primary_key=True)
tax_country = models.CharField(max_length=150, blank=True)
tax_state = models.CharField(max_length=150, blank=True)
tax_value1 = models.CharField(max_length=9, blank=True)
tax_value2 = models.FloatField(null=True, blank=True)
tax_value3 = models.FloatField(null=True, blank=True)
tax_shipping = models.IntegerField(null=True, blank=True)
tax_discount = models.IntegerField(null=True, blank=True)
tax_code = models.CharField(max_length=9, blank=True)
tax_zip_low = models.IntegerField(null=True, blank=True)
tax_zip_high = models.IntegerField(null=True, blank=True)
tax2includeprev = models.IntegerField(null=True, blank=True)
tax3includeprev = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'tax'
app_label = ''
class Template(models.Model):
template = models.CharField(max_length=150, blank=True)
stylesheet = models.CharField(max_length=150, blank=True)
class Meta:
db_table = u'template'
app_label = ''
class Transactions(models.Model):
id = models.IntegerField(primary_key=True)
datetime = models.DateTimeField(null=True, blank=True)
orderid = models.IntegerField(null=True, blank=True)
ttype = models.CharField(max_length=30, blank=True)
transactionid = models.CharField(max_length=150, blank=True)
cvv2 = models.CharField(max_length=765, blank=True)
avs = models.CharField(max_length=765, blank=True)
responsetext = models.TextField(blank=True)
approvalcode = models.CharField(max_length=150, blank=True)
responsecode = models.CharField(max_length=150, blank=True)
amount = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
gwid = models.IntegerField(null=True, blank=True)
captured = models.IntegerField(null=True, blank=True)
paymenttype = models.CharField(max_length=765, blank=True)
reference = models.CharField(max_length=765, blank=True)
class Meta:
db_table = u'transactions'
app_label = ''
class WshWishlist(models.Model):
wsh_id = models.IntegerField(primary_key=True, db_column='WSH_Id') # Field name made lowercase.
customerid = models.IntegerField()
wsh_name = models.CharField(max_length=150, db_column='WSH_Name', blank=True) # Field name made lowercase.
wsh_created = models.DateTimeField(db_column='WSH_Created') # Field name made lowercase.
wsh_lastmod = models.DateTimeField(null=True, db_column='WSH_LastMod', blank=True) # Field name made lowercase.
wsh_expiration = models.DateTimeField(null=True, db_column='WSH_Expiration', blank=True) # Field name made lowercase.
wsh_eventdate = models.DateTimeField(null=True, db_column='WSH_EventDate', blank=True) # Field name made lowercase.
wsh_password = models.CharField(max_length=30, db_column='WSH_Password', blank=True) # Field name made lowercase.
wsh_message = models.TextField(db_column='WSH_Message', blank=True) # Field name made lowercase.
wsh_shipmyaddress = models.IntegerField(null=True, db_column='WSH_ShipMyAddress', blank=True) # Field name made lowercase.
customeraddressid = models.IntegerField(null=True, db_column='CustomerAddressId', blank=True) # Field name made lowercase.
wsh_giftregistry = models.IntegerField(null=True, db_column='WSH_GiftRegistry', blank=True) # Field name made lowercase.
class Meta:
db_table = u'wsh_wishlist'
app_label = ''
class WsiWishlistitems(models.Model):
wsi_id = models.IntegerField(primary_key=True, db_column='WSI_Id') # Field name made lowercase.
wsh_id = models.IntegerField(null=True, blank=True)
catalogid = models.IntegerField(null=True, blank=True)
itemid = models.CharField(max_length=450, blank=True)
itemname = models.TextField(blank=True)
numitems = models.IntegerField(null=True, blank=True)
unitprice = models.DecimalField(null=True, max_digits=21, decimal_places=4, blank=True)
options = models.TextField(blank=True)
optionprice = models.FloatField(null=True, blank=True)
weight = models.FloatField(null=True, blank=True)
additional_field1 = models.CharField(max_length=150, blank=True)
additional_field2 = models.CharField(max_length=150, blank=True)
additional_field3 = models.CharField(max_length=150, blank=True)
shipment_id = models.IntegerField(null=True, blank=True)
catoptions = models.CharField(max_length=765, blank=True)
catalogidoptions = models.CharField(max_length=765, blank=True)
warehouseid = models.IntegerField(null=True, db_column='warehouseID', blank=True) # Field name made lowercase.
unitcost = models.FloatField(null=True, blank=True)
unitstock = models.IntegerField(null=True, blank=True)
date_added = models.DateTimeField(null=True, blank=True)
page_added = models.CharField(max_length=765, blank=True)
itemdescription = models.CharField(max_length=450, blank=True)
reminder = models.IntegerField(null=True, blank=True)
recurrent = models.IntegerField(null=True, blank=True)
offlinesold = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'wsi_wishlistitems'
app_label = ''
class Fedexlocations(models.Model):
id = models.BigIntegerField(primary_key=True, db_column='ID') # Field name made lowercase.
locationcode = models.CharField(max_length=30, db_column='LocationCode', blank=True) # Field name made lowercase.
address = models.CharField(max_length=765, db_column='Address', blank=True) # Field name made lowercase.
city = models.CharField(max_length=300, db_column='City', blank=True) # Field name made lowercase.
state = models.CharField(max_length=6, db_column='State', blank=True) # Field name made lowercase.
zip = models.CharField(max_length=30, db_column='Zip', blank=True) # Field name made lowercase.
class Meta:
db_table = u'fedexlocations'
app_label = ''
| {
"content_hash": "f1e73286d93b63beb0d7c2dbeba787e3",
"timestamp": "",
"source": "github",
"line_count": 1099,
"max_line_length": 146,
"avg_line_length": 53.484986351228386,
"alnum_prop": 0.7246512419190201,
"repo_name": "hughsons/saltwaterfish",
"id": "38cdbcc8f818b2ab16cce46dded3b8d1bc1599bd",
"size": "58780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "520809"
},
{
"name": "JavaScript",
"bytes": "226028"
},
{
"name": "Python",
"bytes": "1392538"
},
{
"name": "Shell",
"bytes": "847"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import time
import argparse
import traceback
from threading import Thread
from .i18n import i18n
from .core import xeHentai
from .const import *
from .const import __version__
from .util import logger
from . import config as default_config
sys.path.insert(1, FILEPATH)
try:
import config
except ImportError:
config = default_config
sys.path.pop(1)
def start():
opt = parse_opt()
xeH = xeHentai()
if opt.daemon:
if opt.interactive:
xeH.logger.warning(i18n.XEH_OPT_IGNORING_I)
if os.name == "posix":
pid = os.fork()
if pid == 0:
sys.stdin.close()
sys.stdout = open("/dev/null", "w")
sys.stderr = open("/dev/null", "w")
return main(xeH, opt)
elif os.name == "nt":
return xeH.logger.error(i18n.XEH_PLATFORM_NO_DAEMON % os.name)
else:
return xeH.logger.error(i18n.XEH_PLATFORM_NO_DAEMON % os.name)
xeH.logger.info(i18n.XEH_DAEMON_START % pid)
else:
main(xeH, opt)
def main(xeH, opt):
xeH.update_config(vars(opt))
log = xeH.logger
log.info(i18n.XEH_STARTED % xeH.verstr)
if opt.cookie:
xeH.set_cookie(opt.cookie)
if opt.username and opt.key and not xeH.has_login:
xeH.login_exhentai(opt.username, opt.key)
if opt.interactive and not opt.daemon:
try:
r = interactive(xeH)
opt.__dict__.update(r)
xeH.update_config(r)
except (KeyboardInterrupt, SystemExit):
log.info(i18n.XEH_CLEANUP)
xeH._cleanup()
return
try:
if opt.urls:
for u in opt.urls:
xeH.add_task(u.strip())
# Thread(target = lambda:(time.sleep(0.618), setattr(xeH, "_exit", XEH_STATE_SOFT_EXIT))).start()
Thread(target = xeH._task_loop, name = "main" ).start()
while xeH._exit < XEH_STATE_CLEAN:
# if specify urls, finished this task and exit xeHentai
if opt.urls and not [k for k, v in xeH._all_tasks.items() if TASK_STATE_WAITING <= v.state < TASK_STATE_FINISHED]:
xeH._exit = XEH_STATE_SOFT_EXIT
time.sleep(1)
except KeyboardInterrupt:
log.info(i18n.XEH_CLEANUP)
xeH._term_threads()
except Exception as ex:
log.error(i18n.XEH_CRITICAL_ERROR % traceback.format_exc())
xeH._term_threads()
else:
sys.exit(0) # this is mandatory for single task auto exit
try:
# we should call cleanup ourself because we break out of task_loop
xeH._cleanup()
except KeyboardInterrupt:
pass
# this is mandatory for ctrl+c kill
os._exit(0)
''' -ro --redirect-norm 是否应用在线代理到已解析到的非原图,默认不启用
-f --force 即使超出配额也下载,默认为否
-j --no-jp-name 是否不使用日语命名,默认为否'''
def parse_opt():
_def = {k:v for k,v in default_config.__dict__.items() if not k.startswith("_")}
_def.update({k:v for k,v in config.__dict__.items() if not k.startswith("_")})
if not PY3K:
for k in ('dir', 'log_path'):
_def[k] = _def[k].decode('utf-8')
parser = argparse.ArgumentParser(description = i18n.XEH_OPT_DESC, epilog = i18n.XEH_OPT_EPILOG, add_help = False)
# the followings are handled in cli
parser.add_argument('-u', '--username', help = i18n.XEH_OPT_u)
parser.add_argument('-k', '--key', help = i18n.XEH_OPT_k)
parser.add_argument('-c', '--cookie', help = i18n.XEH_OPT_c)
parser.add_argument('-i', '--interactive', action = 'store_true', default = False,
help = i18n.XEH_OPT_i)
# the followings are passed to xeHentai
parser.add_argument('urls', metavar = 'url', type = str, nargs = '*',
help = i18n.XEH_OPT_URLS)
parser.add_argument('-o', '--download-ori',
action = 'store_true', default = _def['download_ori'],
help = i18n.XEH_OPT_o)
parser.add_argument('-t', '--thread', type = int, metavar = 'N',
default = _def['download_thread_cnt'], dest = 'download_thread_cnt',
help = i18n.XEH_OPT_t)
# parser.add_argument('-f', '--fast-scan', action = 'store_true', default = _def.fast_scan,
# help = i18n.XEH_OPT_f)
parser.add_argument('-d', '--dir', default = os.path.abspath(_def['dir']),
help = i18n.XEH_OPT_d)
parser.add_argument('--daemon', action = 'store_true', default = _def['daemon'],
help = i18n.XEH_OPT_daemon)
parser.add_argument('-l', '--logpath', metavar = '/path/to/eh.log',
default = os.path.abspath(_def['log_path']), help = i18n.XEH_OPT_l)
parser.add_argument('-p', '--proxy', action = 'append', default = _def['proxy'],
help = i18n.XEH_OPT_p)
parser.add_argument('--proxy-image', action = 'store_true', default = _def['proxy_image'],
help = i18n.XEH_OPT_proxy_image)
parser.add_argument('-v', '--verbose', action = 'count', default = _def['log_verbose'],
help = i18n.XEH_OPT_v)
parser.add_argument('--rpc-interface', metavar = "ADDR", default = _def['rpc_interface'],
help = i18n.XEH_OPT_rpc_interface)
parser.add_argument('--rpc-port', type = int, metavar = "PORT", default = _def['rpc_port'],
help = i18n.XEH_OPT_rpc_port)
parser.add_argument('--rpc-secret', metavar = "...", default = _def['rpc_secret'],
help = i18n.XEH_OPT_rpc_secret)
parser.add_argument('-r', '--rename-ori', type = bool, metavar = "BOOL", default = _def['rename_ori'],
help = i18n.XEH_OPT_r)
parser.add_argument('-a', '--archive', type = bool, metavar = "BOOL", default = _def['make_archive'],
dest = 'make_archive', help = i18n.XEH_OPT_a)
parser.add_argument('-h','--help', action = 'help', help = i18n.XEH_OPT_h)
parser.add_argument('--version', action = 'version', version = '%s v%.3f' % (SCRIPT_NAME, __version__),
help = i18n.XEH_OPT_version)
args = parser.parse_args()
return args
def interactive(xeH):
def _readline(x, default = ""):
if default:
x = x % default
_ = input(logger.safestr(x)) if PY3K else raw_input(logger.safestr(x))
_ = _ or default
return _.decode(locale.getdefaultlocale()[1] or 'utf-8')
if not xeH.has_login and _readline(i18n.PS_LOGIN) == 'y':
uname = pwd = ""
while not uname:
uname = _readline(i18n.PS_USERNAME)
while not pwd:
pwd = _readline(i18n.PS_PASSWD)
xeH.login_exhentai(uname, pwd)
url = proxy = ""
while not url:
url = _readline(i18n.PS_URL)
url = url.split(",")
download_ori = _readline(i18n.PS_DOWNLOAD_ORI, 'y' if xeH.cfg['download_ori'] else 'n') == 'y'
proxy = _readline(i18n.PS_PROXY).strip()
proxy = [proxy] if proxy else xeH.cfg['proxy']
__def_dir = os.path.abspath(xeH.cfg['dir'])
# if not PY3K:
# __def_dir = __def_dir.decode(sys.getfilesystemencoding())
_dir = _readline(i18n.PS_DOWNLOAD_DIR % __def_dir) or xeH.cfg['dir']
rename_ori = _readline(i18n.PS_RENAME_ORI, 'y' if xeH.cfg['rename_ori'] else 'n') == 'y'
make_archive = _readline(i18n.PS_MAKE_ARCHIVE, 'y' if xeH.cfg['make_archive'] else 'n') == 'y'
return {'urls': url, 'proxy': proxy, 'download_ori': download_ori, 'dir': _dir, 'rename_ori':rename_ori,
'make_archive': make_archive, 'save_tasks': False}
| {
"content_hash": "628aefe865c86de54c5d7171d885df8d",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 126,
"avg_line_length": 44.456647398843934,
"alnum_prop": 0.5670263944870628,
"repo_name": "ChristophorusX/ehentai-downloader",
"id": "b3c8092d42305c01855738fb0cec231d36ddf37b",
"size": "7890",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xeHentai/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "188057"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dcim', '0054_site_status_timezone_description'),
]
operations = [
migrations.AlterModelOptions(
name='virtualchassis',
options={'ordering': ['master'], 'verbose_name_plural': 'virtual chassis'},
),
migrations.AlterField(
model_name='virtualchassis',
name='master',
field=models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, related_name='vc_master_for', to='dcim.Device'),
),
]
| {
"content_hash": "36ef0218a8d466f4f92f432ddd6f7a9b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 132,
"avg_line_length": 30.61904761904762,
"alnum_prop": 0.6205287713841369,
"repo_name": "lampwins/netbox",
"id": "ab23f403f7a75df43f301ae44a395f9def13af99",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/dcim/migrations/0055_virtualchassis_ordering.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815169"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-trace"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-trace-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-trace.tex",
u"google-cloud-trace Documentation",
author,
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "google-cloud-trace", u"google-cloud-trace Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-trace",
u"google-cloud-trace Documentation",
author,
"google-cloud-trace",
"GAPIC library for the {metadata.shortName} v2 service",
"APIs",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("https://requests.kennethreitz.org/en/master/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| {
"content_hash": "affbf8b55cde2b34fe4b19f546c844a9",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 88,
"avg_line_length": 33.27906976744186,
"alnum_prop": 0.6906009783368274,
"repo_name": "tswast/google-cloud-python",
"id": "7e5c9d1b4f87cc5334f7b54ffd17e48cf0a442b3",
"size": "11818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trace/docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import re
def lexicology_get_all_synsets(input_dict):
if input_dict['corpus'].lower()=='wordnet':
from nltk.corpus import wordnet as wn
synsets = {}
for word in input_dict['words']:
synsets[word] = wn.synsets(word)
return {'synsets' : synsets}
def lexicology_get_word_synsets(input_dict):
if input_dict['corpus'].lower()=='wordnet':
from nltk.corpus import wordnet as wn
return {'synsets':wn.synsets(input_dict['word'])}
def read_string_in_slovene(input_dict):
return {}
def lexicology_mesh_filter(input_dict):
return {'output_file':'svoboden kot pticek na veji'}
def lexicology_mesh_filter_finished(postdata, input_dict, output_dict):
import json
from os.path import normpath, join, dirname
ngrams = input_dict.get('ngrams')
selected_categories=postdata.get('selected[]')
terms_per_category=json.load(open(normpath(join(dirname(__file__),'data/mesh_terms_per_category.json'))))
terms=set()
for category in selected_categories:
for term in terms_per_category[category]:
if ngrams:
single_words=term.split(" ")
print "SW:",single_words
for n in range(2 if len(single_words)>1 else 1,len(single_words)+1):
grams = set([" ".join(single_words[i:i+n]) for i in xrange(len(single_words)-n+1)])
terms |= grams
print grams
else:
terms.add(term)
return {'term_list':list(terms)}
| {
"content_hash": "70695edfa925743d9ba6ce78ff86b6df",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 109,
"avg_line_length": 29.826923076923077,
"alnum_prop": 0.6054158607350096,
"repo_name": "xflows/textflows",
"id": "57a6954e4ec789b21255949915d80d63e491e742",
"size": "1551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workflows/lexicology/library.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "192012"
},
{
"name": "CSS",
"bytes": "76342"
},
{
"name": "HTML",
"bytes": "363446"
},
{
"name": "JavaScript",
"bytes": "794623"
},
{
"name": "Makefile",
"bytes": "385"
},
{
"name": "Prolog",
"bytes": "146760"
},
{
"name": "Python",
"bytes": "30267970"
},
{
"name": "Roff",
"bytes": "58306446"
},
{
"name": "Shell",
"bytes": "97"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TexttemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="texttemplate", parent_name="scattergl", **kwargs):
super(TexttemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "ef853ed2e11476f589b38efae0748cb6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 86,
"avg_line_length": 38.75,
"alnum_prop": 0.6258064516129033,
"repo_name": "plotly/plotly.py",
"id": "8b17b18e00ad9b6a2dd05bf1da0c01b802954afc",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergl/_texttemplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from flask import jsonify, request, session
from sqlalchemy.orm import subqueryload, undefer
from werkzeug.exceptions import BadRequest
from indico.core.db import db
from indico.core.db.sqlalchemy.colors import ColorTuple
from indico.core.db.sqlalchemy.protection import ProtectionMode, render_acl
from indico.core.permissions import get_principal_permissions, update_permissions
from indico.modules.events import EventLogRealm
from indico.modules.events.contributions import contribution_settings
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.management.controllers.base import RHContributionPersonListMixin
from indico.modules.events.sessions.controllers.management import (RHManageSessionBase, RHManageSessionsActionsBase,
RHManageSessionsBase)
from indico.modules.events.sessions.forms import (MeetingSessionBlockForm, SessionForm, SessionProtectionForm,
SessionTypeForm)
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.sessions.models.types import SessionType
from indico.modules.events.sessions.operations import (create_session, delete_session, update_session,
update_session_block)
from indico.modules.events.sessions.util import (generate_pdf_from_sessions, generate_spreadsheet_from_sessions,
render_session_type_row)
from indico.modules.events.sessions.views import WPManageSessions
from indico.modules.events.util import get_random_color, track_location_changes
from indico.modules.logs import LogKind
from indico.util.spreadsheets import send_csv, send_xlsx
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import send_file
from indico.web.forms.base import FormDefaults
from indico.web.forms.colors import get_colors
from indico.web.forms.fields.principals import serialize_principal
from indico.web.forms.util import get_form_field_names
from indico.web.util import jsonify_data, jsonify_form, jsonify_template
def _get_session_list_args(event):
sessions = (Session.query.with_parent(event)
.options(undefer('attachment_count'),
subqueryload('blocks').undefer('contribution_count'))
.order_by(db.func.lower(Session.title))
.all())
types = [{'id': t.id, 'title': t.name} for t in event.session_types]
return {'sessions': sessions, 'default_colors': get_colors(), 'types': types}
def _render_session_list(event):
tpl = get_template_module('events/sessions/management/_session_list.html')
return tpl.render_session_list(event, **_get_session_list_args(event))
class RHSessionsList(RHManageSessionsBase):
"""Display list of all sessions within the event,"""
def _process(self):
selected_entry = request.args.get('selected')
selected_entry = int(selected_entry) if selected_entry else None
return WPManageSessions.render_template('management/session_list.html', self.event,
selected_entry=selected_entry,
**_get_session_list_args(self.event))
class RHCreateSession(RHManageSessionsBase):
"""Create a session in the event."""
def _get_response(self, new_session):
sessions = [{'id': s.id, 'title': s.title, 'colors': s.colors} for s in self.event.sessions]
return jsonify_data(sessions=sessions, new_session_id=new_session.id,
html=_render_session_list(self.event))
def _process(self):
inherited_location = self.event.location_data
inherited_location['inheriting'] = True
default_duration = contribution_settings.get(self.event, 'default_duration')
form = SessionForm(obj=FormDefaults(colors=get_random_color(self.event), location_data=inherited_location,
default_contribution_duration=default_duration),
event=self.event)
if form.validate_on_submit():
new_session = create_session(self.event, form.data)
return self._get_response(new_session)
return jsonify_form(form)
class RHModifySession(RHManageSessionBase):
"""Modify a session."""
def render_form(self, form):
return jsonify_form(form)
def _process(self):
form = SessionForm(obj=self.session, event=self.event)
if form.validate_on_submit():
with track_location_changes():
update_session(self.session, form.data)
return jsonify_data(html=_render_session_list(self.event))
return self.render_form(form)
class RHDeleteSessions(RHManageSessionsActionsBase):
"""Remove multiple sessions."""
def _process(self):
for sess in self.sessions:
delete_session(sess)
return jsonify_data(html=_render_session_list(self.event))
class RHManageSessionsExportBase(RHManageSessionsActionsBase):
ALLOW_LOCKED = True
class RHExportSessionsCSV(RHManageSessionsExportBase):
"""Export list of sessions to a CSV."""
def _process(self):
headers, rows = generate_spreadsheet_from_sessions(self.sessions)
return send_csv('sessions.csv', headers, rows)
class RHExportSessionsExcel(RHManageSessionsExportBase):
"""Export list of sessions to a XLSX."""
def _process(self):
headers, rows = generate_spreadsheet_from_sessions(self.sessions)
return send_xlsx('sessions.xlsx', headers, rows)
class RHExportSessionsPDF(RHManageSessionsExportBase):
"""Export list of sessions to a PDF."""
def _process(self):
pdf_file = generate_pdf_from_sessions(self.sessions)
return send_file('sessions.pdf', pdf_file, 'application/pdf')
class RHSessionREST(RHManageSessionBase):
"""Perform update or removal of a session."""
def _process_DELETE(self):
delete_session(self.session)
return jsonify_data(html=_render_session_list(self.event))
def _process_PATCH(self):
data = request.json
updates = {}
if set(data) - {'colors', 'type_id'}:
raise BadRequest
if 'colors' in data:
colors = ColorTuple(**data['colors'])
if colors not in get_colors():
raise BadRequest
updates['colors'] = colors
if 'type_id' in data:
updates.update(self._get_session_type_updates(data['type_id']))
update_session(self.session, updates)
return jsonify()
def _get_session_type_updates(self, type_id):
updates = {}
if type_id is None:
updates['type'] = None
else:
type_ = SessionType.query.with_parent(self.event).filter_by(id=type_id).first()
if not type_:
raise BadRequest('Invalid type id')
if not self.session.type or type_id != self.session.type.id:
updates['type'] = type_
return updates
class RHSessionPersonList(RHContributionPersonListMixin, RHManageSessionsActionsBase):
"""List of persons in the session's contributions."""
template = 'events/sessions/management/session_person_list.html'
ALLOW_LOCKED = True
@property
def _membership_filter(self):
session_ids = {s.id for s in self.sessions}
return Contribution.session_id.in_(session_ids)
class RHSessionProtection(RHManageSessionBase):
"""Manage session protection."""
def _process(self):
form = SessionProtectionForm(obj=FormDefaults(**self._get_defaults()), session=self.session,
prefix='session-protection-')
if form.validate_on_submit():
update_permissions(self.session, form)
update_session(self.session, {'protection_mode': form.protection_mode.data})
return jsonify_data(flash=False, html=_render_session_list(self.event))
return jsonify_template('events/management/protection_dialog.html', form=form)
def _get_defaults(self):
permissions = [[serialize_principal(p.principal), list(get_principal_permissions(p, Session))]
for p in self.session.acl_entries]
permissions = [item for item in permissions if item[1]]
return {'permissions': permissions, 'protection_mode': self.session.protection_mode}
class RHSessionACL(RHManageSessionBase):
"""Display the ACL of the session."""
def _process(self):
return render_acl(self.session)
class RHSessionACLMessage(RHManageSessionBase):
"""Render the inheriting ACL message."""
def _process(self):
mode = ProtectionMode[request.args['mode']]
return jsonify_template('forms/protection_field_acl_message.html', object=self.session, mode=mode,
endpoint='sessions.acl')
class RHManageSessionBlock(RHManageSessionBase):
"""Manage a block of a session."""
normalize_url_spec = {
'locators': {
lambda self: self.session_block
}
}
def _process_args(self):
RHManageSessionBase._process_args(self)
self.session_block = SessionBlock.get_or_404(request.view_args['block_id'])
def _process(self):
form = MeetingSessionBlockForm(obj=FormDefaults(**self._get_form_defaults()), event=self.event,
session_block=self.session_block)
if form.validate_on_submit():
session_data = {k[8:]: v for k, v in form.data.items() if k in form.session_fields}
block_data = {k[6:]: v for k, v in form.data.items() if k in form.block_fields}
update_session(self.session, session_data)
with track_location_changes():
update_session_block(self.session_block, block_data)
return jsonify_data(flash=False)
self.commit = False
return jsonify_template('events/forms/session_block_form.html', form=form, block=self.session_block)
def _get_form_defaults(self):
fields = get_form_field_names(MeetingSessionBlockForm)
defaults = {}
defaults.update((name, getattr(self.session, name[8:])) for name in fields if name.startswith('session_'))
defaults.update((name, getattr(self.session_block, name[6:])) for name in fields if name.startswith('block_'))
return defaults
class RHSessionBlocks(RHManageSessionBase):
def _process(self):
return jsonify_template('events/sessions/management/session_blocks.html', sess=self.session)
class RHManageSessionTypes(RHManageSessionsBase):
"""Dialog to manage the session types of an event."""
def _process(self):
return jsonify_template('events/sessions/management/types_dialog.html', event=self.event,
types=self.event.session_types)
class RHManageSessionTypeBase(RHManageSessionsBase):
"""Manage a session type of an event."""
normalize_url_spec = {
'locators': {
lambda self: self.session_type
}
}
def _process_args(self):
RHManageSessionsBase._process_args(self)
self.session_type = SessionType.get_or_404(request.view_args['session_type_id'])
class RHEditSessionType(RHManageSessionTypeBase):
"""Dialog to edit a SessionType."""
def _process(self):
form = SessionTypeForm(event=self.event, obj=self.session_type)
if form.validate_on_submit():
old_name = self.session_type.name
form.populate_obj(self.session_type)
db.session.flush()
self.event.log(EventLogRealm.management, LogKind.change, 'Sessions',
f'Updated type: {old_name}', session.user)
return jsonify_data(html_row=render_session_type_row(self.session_type), flash=False)
return jsonify_form(form)
class RHCreateSessionType(RHManageSessionsBase):
"""Dialog to add a SessionType."""
def _process(self):
form = SessionTypeForm(event=self.event)
if form.validate_on_submit():
session_type = SessionType()
form.populate_obj(session_type)
self.event.session_types.append(session_type)
db.session.flush()
self.event.log(EventLogRealm.management, LogKind.positive, 'Sessions',
f'Added type: {session_type.name}', session.user)
types = [{'id': t.id, 'title': t.name} for t in self.event.session_types]
return jsonify_data(types=types, new_type_id=session_type.id,
html_row=render_session_type_row(session_type))
return jsonify_form(form)
class RHDeleteSessionType(RHManageSessionTypeBase):
"""Dialog to delete a SessionType."""
def _process(self):
db.session.delete(self.session_type)
db.session.flush()
self.event.log(EventLogRealm.management, LogKind.negative, 'Sessions',
f'Deleted type: {self.session_type.name}', session.user)
return jsonify_data(flash=False)
| {
"content_hash": "710203158f83ac98fa05b3818f8c685f",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 118,
"avg_line_length": 41.59375,
"alnum_prop": 0.6588279489105935,
"repo_name": "indico/indico",
"id": "e2716bf5f8c2273dc16751b4bf09cb6a06844146",
"size": "13524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/sessions/controllers/management/sessions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33289"
},
{
"name": "HTML",
"bytes": "1420471"
},
{
"name": "JavaScript",
"bytes": "2362355"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5550085"
},
{
"name": "SCSS",
"bytes": "486043"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
results = super(ActionModule, self).run(tmp, task_vars)
# remove as modules might hide due to nolog
del results['invocation']['module_args']
results = merge_hash(results, self._execute_module(tmp=tmp, task_vars=task_vars))
# Remove special fields from the result, which can only be set
# internally by the executor engine. We do this only here in
# the 'normal' action, as other action plugins may set this.
#
# We don't want modules to determine that running the module fires
# notify handlers. That's for the playbook to decide.
for field in ('_ansible_notify',):
if field in results:
results.pop(field)
return results
| {
"content_hash": "89762b50a4d24d24455cbfd6e1665bc6",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 89,
"avg_line_length": 38.07142857142857,
"alnum_prop": 0.650093808630394,
"repo_name": "abtreece/ansible",
"id": "932ad8309c30fa192390620125f9d36c037d7d58",
"size": "1774",
"binary": false,
"copies": "63",
"ref": "refs/heads/stable-2.2",
"path": "lib/ansible/plugins/action/normal.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from utils import colors
# Segments colors.
USERATHOST_BG = colors.PASTEL_BLUE #colors.SMERALD
USERATHOST_FG = colors.WHITE
SSH_BG = colors.LIGHT_ORANGE
SSH_FG = colors.WHITE
CURRENTDIR_BG = colors.EXTRA_DARK_GREY
CURRENTDIR_FG = colors.LIGHT_GREY
READONLY_BG = colors.LIGHT_GREY
READONLY_FG = colors.RED
EXITCODE_BG = colors.RED
EXITCODE_FG = colors.WHITE
PADDING_BG = 0 #colors.EXTRA_DARK_GREY
GIT_UNTRACKED_FILES_BG = colors.PINKISH_RED
GIT_UNTRACKED_FILES_FG = colors.NEARLY_WHITE_GREY
GIT_CHANGES_NOT_STAGED_BG = colors.PINKISH_RED
GIT_CHANGES_NOT_STAGED_FG = colors.NEARLY_WHITE_GREY
GIT_ALL_CHANGES_STAGED_BG = colors.LIGHT_ORANGE
GIT_ALL_CHANGES_STAGED_FG = colors.DARKER_GREY
GIT_CLEAN_BG = colors.PISTACHIO
GIT_CLEAN_FG = colors.DARKER_GREY
VENV_BG = colors.PASTEL_BLUE #colors.SMERALD
VENV_FG = colors.EXTRA_LIGHT_GREY
JOBS_BG = colors.DARK_PURPLE
JOBS_FG = colors.WHITE
TIME_BG = colors.DARKER_GREY
TIME_FG = colors.MID_DARK_GREY
| {
"content_hash": "dcff0fc49f44ea97f65d0048f634c682",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 52,
"avg_line_length": 25.07894736842105,
"alnum_prop": 0.7691500524658972,
"repo_name": "egoddard/promptastic",
"id": "76dd858101f7d4a4e18ffb826cfc9a73da690ed1",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "themes/custom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34976"
}
],
"symlink_target": ""
} |
"""Visitor restricting traversal to only the public tensorflow API."""
import re
import six
from tensorflow.python.util import tf_inspect
class PublicAPIVisitor(object):
"""Visitor to use with `traverse` to visit exactly the public TF API."""
def __init__(self, visitor):
"""Constructor.
`visitor` should be a callable suitable as a visitor for `traverse`. It will
be called only for members of the public TensorFlow API.
Args:
visitor: A visitor to call for the public API.
"""
self._visitor = visitor
self._root_name = 'tf'
# Modules/classes we want to suppress entirely.
self._private_map = {
'tf': [
'compiler',
'core',
'python',
],
# Some implementations have this internal module that we shouldn't
# expose.
'tf.flags': ['cpp_flags'],
}
# Modules/classes we do not want to descend into if we hit them. Usually,
# system modules exposed through platforms for compatibility reasons.
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
'examples',
'flags', # Don't add flags
# TODO(drpng): This can be removed once sealed off.
'platform',
# TODO(drpng): This can be removed once sealed.
'pywrap_tensorflow',
# TODO(drpng): This can be removed once sealed.
'user_ops',
'tools',
'tensorboard',
],
## Everything below here is legitimate.
# It'll stay, but it's not officially part of the API.
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
}
@property
def private_map(self):
"""A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
"""
return self._private_map
@property
def do_not_descend_map(self):
"""A map from parents to symbols that should not be descended into.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not explore.
"""
return self._do_not_descend_map
def set_root_name(self, root_name):
"""Override the default root name of 'tf'."""
self._root_name = root_name
def _is_private(self, path, name, obj=None):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
del obj # Unused.
return ((path in self._private_map and name in self._private_map[path]) or
(six.ensure_str(name).startswith('_') and
not re.match('__.*__$', six.ensure_str(name)) or
name in ['__base__', '__class__', '__next_in_mro__']))
def _do_not_descend(self, path, name):
"""Safely queries if a specific fully qualified name should be excluded."""
return (path in self._do_not_descend_map and
name in self._do_not_descend_map[path])
def __call__(self, path, parent, children):
"""Visitor interface, see `traverse` for details."""
# Avoid long waits in cases of pretty unambiguous failure.
if tf_inspect.ismodule(parent) and len(
six.ensure_str(path).split('.')) > 10:
raise RuntimeError('Modules nested too deep:\n%s.%s\n\nThis is likely a '
'problem with an accidental public import.' %
(self._root_name, path))
# Includes self._root_name
full_path = '.'.join([self._root_name, path]) if path else self._root_name
# Remove things that are not visible.
for name, child in list(children):
if self._is_private(full_path, name, child):
children.remove((name, child))
self._visitor(path, parent, children)
# Remove things that are visible, but which should not be descended into.
for name, child in list(children):
if self._do_not_descend(full_path, name):
children.remove((name, child))
| {
"content_hash": "9b73cd9d354ec071067f11664c6ef338",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 80,
"avg_line_length": 32.801587301587304,
"alnum_prop": 0.6102105008468425,
"repo_name": "Intel-Corporation/tensorflow",
"id": "2aa54816b56f9ca14088e344992568c0b12b4ae4",
"size": "4850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/tools/common/public_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(name='photomanip',
version='0.2.0',
description='average like, all the photos',
author='Andrew Catellier',
author_email='andrew@thisisreal.net',
packages=find_packages(),
include_package_data=True,
package_data={'': ['*.jpg']}
)
| {
"content_hash": "6755504f6a9f47d468c60e9eb540b42e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 49,
"avg_line_length": 29.818181818181817,
"alnum_prop": 0.6310975609756098,
"repo_name": "whlteXbread/photoManip",
"id": "a700a5c3a64e085d39ca5bce2e41d336d57e2022",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59817"
}
],
"symlink_target": ""
} |
from mock import Mock
from mock import mock_open
from mock import patch
from contextlib import nested
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils import reverse
class TestVersionHandler(BaseIntegrationTest):
@patch('nailgun.api.v1.handlers.version.settings.VERSION', {
"release": "0.1b",
"feature_groups": [],
})
def test_version_handler(self):
with nested(
patch(
'nailgun.utils.glob.glob',
Mock(return_value=["test.yaml"])
),
patch(
'__builtin__.open',
mock_open(read_data='test_data'),
create=True
)
):
resp = self.app.get(
reverse('VersionHandler'),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
self.assertEqual(
resp.json_body,
{
"release": "0.1b",
"feature_groups": [],
"auth_required": True,
}
)
| {
"content_hash": "e349f5b7bf3372ea7f779e09fdac809a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 64,
"avg_line_length": 26.70731707317073,
"alnum_prop": 0.5123287671232877,
"repo_name": "eayunstack/fuel-web",
"id": "6583250d5ee85b21ca9309a25c6cc8fea1d5ce51",
"size": "1730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/test/unit/test_version_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "97543"
},
{
"name": "HTML",
"bytes": "2844"
},
{
"name": "JavaScript",
"bytes": "815534"
},
{
"name": "Mako",
"bytes": "1943"
},
{
"name": "Python",
"bytes": "3710735"
},
{
"name": "Ruby",
"bytes": "13649"
},
{
"name": "Shell",
"bytes": "22527"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['LinearTrend'] , ['Seasonal_DayOfWeek'] , ['NoAR'] ); | {
"content_hash": "c77abec1925a4eb828abda7b4c868ab2",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 90,
"avg_line_length": 40.75,
"alnum_prop": 0.7177914110429447,
"repo_name": "antoinecarme/pyaf",
"id": "fe3e8346e3fb4ef3d25667c0448da8ef0a61bf7e",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_LinearTrend_Seasonal_DayOfWeek_NoAR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.en import parse, Text
# The easiest way to analyze the output of the parser is to create a Text.
# A Text is a "parse tree" of linked Python objects.
# A Text is essentially a list of Sentence objects.
# Each Sentence is a list of Word objects.
# Each Word can be part of a Chunk object, accessible with Word.chunk.
s = "I eat pizza with a silver fork."
s = parse(s)
s = Text(s)
# You can also use the parsetree() function,
# which is the equivalent of Text(parse()).
print(s[0].words) # A list of all the words in the first sentence.
print(s[0].chunks) # A list of all the chunks in the first sentence.
print(s[0].chunks[-1].words)
print("")
for sentence in s:
for word in sentence:
print(word.string,
word.type,
word.chunk,
word.pnp)
# A Text can be exported as an XML-string (among other).
print("")
print(s.xml)
| {
"content_hash": "f9ba08b2892fdae290a2249385da00ba",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 74,
"avg_line_length": 29.157894736842106,
"alnum_prop": 0.6759927797833934,
"repo_name": "clips/pattern",
"id": "2450601c79672ec3f80e5282d5122741d401c7c7",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/03-en/04-tree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "606"
},
{
"name": "JavaScript",
"bytes": "229360"
},
{
"name": "Python",
"bytes": "1562007"
}
],
"symlink_target": ""
} |
import unittest
import paddle
from test_collective_api_base import TestDistBase
paddle.enable_static()
class TestCollectiveAllreduceAPI(TestDistBase):
def _setup_config(self):
pass
def test_allreduce_nccl(self):
if paddle.fluid.core.is_compiled_with_cuda():
self.check_with_place(
"collective_allreduce_api.py", "allreduce", "nccl"
)
def test_allreduce_bkcl(self):
if paddle.fluid.core.is_compiled_with_xpu():
self.check_with_place(
"collective_allreduce_api.py", "allreduce", "bkcl"
)
def test_allreduce_gloo(self):
self.check_with_place(
"collective_allreduce_api.py", "allreduce", "gloo", "2"
)
def test_allreduce_nccl_dygraph(self):
dtypes_to_test = [
"float16",
"float32",
"float64",
"int32",
"int64",
"int8",
"uint8",
"bool",
]
if self._nccl_version >= 2100:
dtypes_to_test.append("bfloat16")
for dtype in dtypes_to_test:
self.check_with_place(
"collective_allreduce_api_dygraph.py",
"allreduce",
"nccl",
static_mode="0",
dtype=dtype,
)
def test_allreduce_gloo_dygraph(self):
dtypes_to_test = [
"float16",
"float32",
"float64",
"int32",
"int64",
"int8",
"uint8",
"bool",
"bfloat16",
]
for dtype in dtypes_to_test:
self.check_with_place(
"collective_allreduce_api_dygraph.py",
"allreduce",
"gloo",
"2",
static_mode="0",
dtype=dtype,
)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "e1dacd51a6be4d8a9ebdb4568a52fb28",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 67,
"avg_line_length": 25.657894736842106,
"alnum_prop": 0.4723076923076923,
"repo_name": "luotao1/Paddle",
"id": "6b2b9c0ade8f401cadd40af0f299bfff8fc34f09",
"size": "2563",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/collective/test_collective_allreduce_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
"""
WSGI config for bhs_sales project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bhs_sales.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "c343c1940acbd7f3e6199fabbde0b705",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 29.933333333333334,
"alnum_prop": 0.7750556792873051,
"repo_name": "Gnewt/bhs_sales",
"id": "a5da1e093eaae2d68aef70aa44cb14fcd74ec0ef",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bhs_sales/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2510"
},
{
"name": "Python",
"bytes": "16901"
},
{
"name": "Shell",
"bytes": "1513"
}
],
"symlink_target": ""
} |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import re
import xml.dom.minidom
import time
import zipfile
DEBUG=False
SourceSpectra=False
#SourceSpectra=True
def findNode(node,path):
o=node
last=path.pop()
for tag in path:
o=o.getElementsByTagName(tag)[0]
return o.getElementsByTagName(last)
def getParents(node):
source=None
processed=None
c1=node.firstChild
for child in c1.childNodes:
if child.nodeName=='sourceSpectra':
source=child
if child.nodeName=='certificates':
processed=child
return (source,processed)
def getText(node):
element=node.firstChild
if element and element.nodeType==xml.dom.Node.TEXT_NODE:
return element.data.replace(',', ' ').encode('utf8')
else:
return None
def getWavelengths(node):
values=findNode(node, ['channelWavelengths', 'double'])
return [float(getText(v)) for v in values]
def getDarkSpectrum(node):
values=findNode(node, ['darkSpectrum', 'pixelValues', 'double'])
return [float(getText(v)) for v in values]
def getRefSpectrum(node):
values=findNode(node, ['referenceSpectrum', 'pixelValues', 'double'])
return [float(getText(v)) for v in values]
def getSourceSpectrum(node):
values=findNode(node, ['pixelValues', 'double'])
return [float(getText(v)) for v in values]
def getProcessedSpectrum(node):
certs=node.getElementsByTagName('processedPixels')
for o in certs:
if len(o.childNodes)>10:
break
values=o.getElementsByTagName('double')
return [float(getText(v)) for v in values]
def getTimestamp(node):
ts=findNode(node, ['sourceSpectra', 'acquisitionTime', 'milliTime'])[0]
return getText(ts)
def getMetadata(node):
meta={}
ss=node.getElementsByTagName('sourceSpectra')[0]
meta['numberOfPixels']=int(getText(ss.getElementsByTagName('spectrometerNumberOfPixels')[0]))
meta['maxIntensity']=float(getText(ss.getElementsByTagName('spectrometerMaximumIntensity')[0]))
meta['Firmware']=getText(ss.getElementsByTagName('spectrometerFirmwareVersion')[0])
meta['Serialnumber']=getText(ss.getElementsByTagName('spectrometerSerialNumber')[0])
meta['nDarkPixels']=int(getText(ss.getElementsByTagName('spectrometerNumberOfDarkPixels')[0]))
meta['user']=getText(ss.getElementsByTagName('userName')[0])
meta['timestamp']=getTimestamp(node)
meta['date']=time.ctime(float(meta['timestamp'])/1000.0)
meta['integrationTime']=float(getText(ss.getElementsByTagName('integrationTime')[0]))
meta['boxcarWidth']=int(getText(ss.getElementsByTagName('boxcarWidth')[0]))
meta['saturated']=getText(ss.getElementsByTagName('saturated')[0])
meta['scansToAverage']=int(getText(ss.getElementsByTagName('scansToAverage')[0]))
return meta
def parseOOData(dom, source=False):
metadata=getMetadata(dom)
(source,processed)=getParents(dom)
metadata['lam']=getWavelengths(source)
if source:
metadata['blackref']=()
metadata['whiteref']=()
else:
metadata['blackref']=getDarkSpectrum(processed)
metadata['whiteref']=getRefSpectrum(processed)
metadata['Xsource']=getSourceSpectrum(source)
metadata['Xprocessed']=getProcessedSpectrum(processed)
return metadata
def writeSpectra(fid, X, descr):
#fid=open(filename, 'w')
fid.write('%s\t' %descr)
for val in X:
fid.write('%f\t' %val)
#fid.close()
fid.write('\n')
def writeDataVector(filename, v, title='lambda'):
fid=open(filename, 'w')
fid.write(title)
fid.write('\n')
for l in v:
fid.write('%f\n' % l)
fid.close()
def writeStrVector(filename, v, title='lambda'):
with open(filename, 'w') as fid:
fid.write(title)
fid.write('\n')
for l in v:
fid.write('%s\n' % l)
def writeMeta(filename, meta):
names=[
'Firmware',
'Serialnumber',
'user',
'timestamp',
'date',
'numberOfPixels',
'maxIntensity',
'integrationTime',
'boxcarWidth',
'scansToAverage',
'saturated',
]
with open(filename, 'w') as fid:
for k in names:
fid.write('%20s: %s\n' % (k, meta[k]))
def parseFile(filename):
illegal=[chr(x) for x in (0xa0, 0x89, 0x80, 0xe2, 0xb0, 0x88, 0x9e)]
fid=zipfile.ZipFile(filename, 'r')
names=fid.namelist()
for name in names:
if name.startswith('ps_'):
break
data=fid.read(name)
fid.close()
data=re.sub('['+''.join(illegal)+']', '', data.decode('latin1'))
dom=xml.dom.minidom.parseString(data)
return dom
def findLCB(args):
"""Finds the longest common beginning"""
minlength=min([len(x) for x in args])
for i in range(0,minlength):
for s in args:
if args[0][i]!=s[i]:
return i
return i
def sort(args):
i=findLCB(args)
s=[x[i:] for x in args]
s.sort(key=lambda x,y: cmp(int(x), int(y)))
return ['%s%s' % (args[0][0:i], x) for x in s]
def getSampleNums(args):
i=findLCB(args)
return [int(x[i:]) for x in args]
if __name__=='__main__':
import sys
args=sys.argv
progname=args.pop(0)
lam=[]
data=[]
meta={}
times=[]
if args[0]=='-f':
opt=args.pop(0)
outfilenamebase=args.pop(0)
else:
outfilenamebase='spectra'
fid=open('%s.csv' % outfilenamebase, 'w')
fids=open('%s-source.csv' % outfilenamebase, 'w')
first=True
N=len(args)
basenames=[filename.split('.')[0].split('/').pop() for filename in args]
print(basenames)
samplenums=getSampleNums(basenames)
records=[]
for i in range(N):
records.append((samplenums[i], basenames[i], args[i]))
#records.sort(key=lambda x,y: cmp(int(x[0]), int(y[0])))
for i in range(N):
samplenum, basename, filename = records[i]
dom=parseFile(filename)
if DEBUG:
print("Parsing ", basename)
info=parseOOData(dom, SourceSpectra)
if first:
writeSpectra(fid, info['lam'], 'Samplenum')
writeSpectra(fids, info['lam'], 'Samplenum')
first=False
writeSpectra(fids, info['Xsource'], samplenum)
writeSpectra(fid, info['Xprocessed'], samplenum)
times.append('%s, %s' % (info['timestamp'], info['date']))
fid.close()
fids.close()
writeMeta('%s-metadata.txt' % outfilenamebase, info)
writeDataVector('%s-lambda.csv' % outfilenamebase, info['lam'], 'lambda')
writeStrVector('%s-times.csv' % outfilenamebase, times, 'timestamp, time')
| {
"content_hash": "26758ea172d26756c431971de64835d3",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 99,
"avg_line_length": 29.43859649122807,
"alnum_prop": 0.6232121573301549,
"repo_name": "belfhi/FP-Spektren",
"id": "9ff68596bb471eac517aadc2583f0b5b1ddac31c",
"size": "6714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ooextract.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6714"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
AUTHOR = u'Garagem'
SITENAME = u'Garagem Hacker 2.0'
SITESUBTITLE = u'Hackerspace'
SITEURL = 'http://localhost:8000'
PATH = 'content'
THEME = 'themes/bricks'
STATIC_PATHS = ['images', 'extra']
EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'}}
ARTICLE_PATHS = ['blog']
ARTICLE_SAVE_AS = '{slug}.html'
ARTICLE_URL = '{slug}'
TIMEZONE = 'America/Sao_Paulo'
DEFAULT_LANG = u'pt'
LOCALE = 'pt_BR'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
# Pagination
DEFAULT_PAGINATION = 4
PAGINATION_PATTERNS = (
(1, '{base_name}/', '{base_name}/index.html'),
(2, '{base_name}/page/{number}/', '{base_name}/page/{number}/index.html'),
)
PAGE_URL = '{slug}/'
PAGE_SAVE_AS = '{slug}/index.html'
TAG_URL = 'tag/{slug}/'
TAG_SAVE_AS = 'tag/{slug}/index.html'
TAGS_URL = 'tags/'
TAGS_SAVE_AS = 'tags/index.html'
AUTHOR_URL = 'author/{slug}/'
AUTHOR_SAVE_AS = 'author/{slug}/index.html'
AUTHORS_URL = 'authors/'
AUTHORS_SAVE_AS = 'authors/index.html'
CATEGORY_URL = 'category/{slug}/'
CATEGORY_SAVE_AS = 'category/{slug}/index.html'
CATEGORYS_URL = 'categories/'
CATEGORYS_SAVE_AS = 'categories/index.html'
DISQUS_SITENAME = "garagemhacker"
GOOGLE_ANALYTICS = "UA-899823-17"
| {
"content_hash": "f781ea11be552f9e98a9b82f98ee990b",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 24.71186440677966,
"alnum_prop": 0.691358024691358,
"repo_name": "GaragemHacker/garagemhacker.github.io",
"id": "9cfa11e44cea6523a29948f7b46dfd0368b8f4d0",
"size": "1506",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pelicanconf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6960"
},
{
"name": "HTML",
"bytes": "21389"
},
{
"name": "Makefile",
"bytes": "3347"
},
{
"name": "Python",
"bytes": "1940"
}
],
"symlink_target": ""
} |
__author__ = 'yolosec'
class Error(Exception):
"""Generic EB client error."""
class InvalidResponse(Error):
"""Invalid server response"""
class InvalidStatus(Error):
"""Invalid server response"""
class RequestFailed(Error):
"""API request failed"""
class FbError(Error):
def __init__(self, error=None):
self.error = error
| {
"content_hash": "a168b1b2f9507cbfd6ca95eba1f6eb71",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 35,
"avg_line_length": 15.695652173913043,
"alnum_prop": 0.6426592797783933,
"repo_name": "yolosec/zeman-parser",
"id": "4ff68ce05fbb4d36577ae15ff61ad62cb17bb29c",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zemanfeed/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "64955"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import mdtraj
from mdtraj.geometry.dihedral import _get_indices_phi, \
_get_indices_psi, compute_dihedrals, _atom_sequence, CHI1_ATOMS
import numpy as np
import warnings
from itertools import combinations as _combinations, count
from itertools import product as _product
from pyemma.util.types import is_iterable_of_int as _is_iterable_of_int
import functools
from six import PY3
from pyemma.util.types import is_iterable_of_int as _is_iterable_of_int
from pyemma.util.log import getLogger
#from pyemma.util.annotators import deprecated
from six.moves import map
from six.moves import range
from six.moves import zip
__author__ = 'Frank Noe, Martin Scherer'
__all__ = ['MDFeaturizer',
]
def _get_indices_chi1(traj):
rids, indices = list(zip(*(_atom_sequence(traj, atoms) for atoms in CHI1_ATOMS)))
id_sort = np.argsort(np.concatenate(rids))
if not any(x.size for x in indices):
return np.empty(shape=(0, 4), dtype=np.int)
indices = np.vstack(x for x in indices if x.size)[id_sort]
return id_sort, indices
# this is needed for get_indices functions, since they expect a Trajectory,
# not a Topology
class fake_traj(object):
def __init__(self, top):
self.top = top
def _describe_atom(topology, index):
"""
Returns a string describing the given atom
:param topology:
:param index:
:return:
"""
#assert isinstance(index, int)
at = topology.atom(index)
return "%s %i %s %i" % (at.residue.name, at.residue.index, at.name, at.index)
def _catch_unhashable(x):
if hasattr(x, '__getitem__'):
res = list(x)
for i, value in enumerate(x):
if isinstance(value, np.ndarray):
res[i] = _hash_numpy_array(value)
else:
res[i] = value
return tuple(res)
elif isinstance(x, np.ndarray):
return _hash_numpy_array(x)
return x
if PY3:
def _hash_numpy_array(x):
hash_value = hash(x.shape)
hash_value ^= hash(x.strides)
hash_value ^= hash(x.data.tobytes())
return hash_value
else:
def _hash_numpy_array(x):
writeable = x.flags.writeable
try:
x.flags.writeable = False
hash_value = hash(x.shape)
hash_value ^= hash(x.strides)
hash_value ^= hash(x.data)
finally:
x.flags.writeable = writeable
return hash_value
def hash_top(top):
if not PY3:
return hash(top)
else:
# this is a temporary workaround for py3
hash_value = hash(top.n_atoms)
hash_value ^= hash(tuple(top.atoms))
hash_value ^= hash(tuple(top.residues))
hash_value ^= hash(tuple(top.bonds))
return hash_value
def _parse_pairwise_input(indices1, indices2, MDlogger, fname=''):
r"""For input of pairwise type (distances, inverse distances, contacts) checks the
type of input the user gave and reformats it so that :py:func:`DistanceFeature`,
:py:func:`InverseDistanceFeature`, and ContactFeature can work.
In case the input isn't already a list of distances, this function will:
- sort the indices1 array
- check for duplicates within the indices1 array
- sort the indices2 array
- check for duplicates within the indices2 array
- check for duplicates between the indices1 and indices2 array
- if indices2 is None, produce a list of pairs of indices in indices1, or
- if indices2 is not None, produce a list of pairs of (i,j) where i comes from indices1, and j from indices2
"""
if _is_iterable_of_int(indices1):
MDlogger.warning('The 1D arrays input for %s have been sorted, and '
'index duplicates have been eliminated.\n'
'Check the output of describe() to see the actual order of the features' % fname)
# Eliminate duplicates and sort
indices1 = np.unique(indices1)
# Intra-group distances
if indices2 is None:
atom_pairs = np.array(list(_combinations(indices1, 2)))
# Inter-group distances
elif _is_iterable_of_int(indices2):
# Eliminate duplicates and sort
indices2 = np.unique(indices2)
# Eliminate duplicates between indices1 and indices1
uniqs = np.in1d(indices2, indices1, invert=True)
indices2 = indices2[uniqs]
atom_pairs = np.asarray(list(_product(indices1, indices2)))
else:
atom_pairs = indices1
return atom_pairs
def _parse_groupwise_input(group_definitions, group_pairs, MDlogger, mname=''):
r"""For input of group type (add_group_mindist), prepare the array of pairs of indices
and groups so that :py:func:`MinDistanceFeature` can work
This function will:
- check the input types
- sort the 1D arrays of each entry of group_definitions
- check for duplicates within each group_definition
- produce the list of pairs for all needed distances
- produce a list that maps each entry in the pairlist to a given group of distances
"""
assert isinstance(group_definitions, list), "group_definitions has to be of type list, not %s"%type(group_definitions)
# Handle the special case of just one group
if len(group_definitions) == 1:
group_pairs = np.array([0,0], ndmin=2)
# Sort the elements within each group
new_groups = []
for igroup in group_definitions:
assert np.ndim(igroup) == 1, "The elements of the groups definition have to be of dim 1, not %u"%np.ndim(igroup)
new_groups.append(np.unique(igroup))
# Check for group duplicates
for ii, igroup in enumerate(new_groups[:-1]):
for jj, jgroup in enumerate(new_groups[ii+1:]):
if len(igroup) == len(jgroup):
assert not np.allclose(igroup, jgroup), "Some group definitions appear to be duplicated, e.g %u and %u"%(ii,ii+jj+1)
# Create and/or check the pair-list
if group_pairs == 'all':
new_pairs = np.array(list(_combinations(np.arange(len(group_definitions)), 2)))
else:
assert isinstance(group_pairs, np.ndarray)
assert group_pairs.shape[1] == 2
assert group_pairs.max() <= len(new_groups), "Cannot ask for group nr. %u if group_definitions only " \
"contains %u groups"%(group_pairs.max(), len(new_groups))
assert group_pairs.min() >= 0, "Group pairs contains negative group indices"
new_pairs = np.zeros_like(group_pairs, dtype='int')
for ii, ipair in enumerate(group_pairs):
if ipair[0] == ipair[1]:
MDlogger.warning("%s will compute the mindist of group %u with itself. Is this wanted? "%(mname, ipair[0]))
new_pairs[ii,:] = np.sort(ipair)
# Create the large list of distances that will be computed, and an array containing group identfiers
# of the distances that actually characterize a pair of groups
group_distance_indexes = []
group_distance_identifiers = np.zeros_like(new_pairs)
b = 0
for ii, pair in enumerate(new_pairs):
if pair[0] != pair[1]:
group_distance_indexes.append(list(_product(new_groups[pair[0]],
new_groups[pair[1]])))
else:
group_distance_indexes.append(list(_combinations(new_groups[pair[0]], 2)))
group_distance_identifiers[ii,:] = [b, b+len(group_distance_indexes[ii])]
b += len(group_distance_indexes[ii])
return new_groups, new_pairs, np.vstack(group_distance_indexes), group_distance_identifiers
class CustomFeature(object):
"""
A CustomFeature is the base class for user-defined features. If you want to
implement a new fancy feature, derive from this class, calculate the quantity
of interest in the map method and return it as an ndarray.
If you have defined a map function that should be classed, you don't need to derive a class, but you
can simply pass a function to the constructor of this class
Parameters
----------
func : function
will be invoked with given args and kwargs on mapping traj
args : list of positional args (optional) passed to func
kwargs : named arguments (optional) passed to func
Notes
-----
Your passed in function will get a mdtraj.Trajectory object as first argument.
Examples
--------
We define a feature that transforms all coordinates by :math:`1 / x^2`:
>>> from pyemma.coordinates import source
>>> from pyemma.datasets import get_bpti_test_data
>>> inp = get_bpti_test_data()
Define a function which transforms the coordinates of the trajectory object.
Note that you need to define the output dimension, which we pass directly in
the feature construction. The trajectory contains 58 atoms, so the output
dimension will be 3 * 58 = 174:
>>> my_feature = CustomFeature(lambda x: (1.0 / x.xyz**2).reshape(-1, 174), dim=174)
>>> reader = source(inp['trajs'][0], top=inp['top'])
pass the feature to the featurizer and transform the data
>>> reader.featurizer.add_custom_feature(my_feature)
>>> data = reader.get_output()
"""
def __init__(self, func=None, *args, **kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
self.dimension = kwargs.pop('dim', 0)
def describe(self):
return ["CustomFeature calling %s with args %s" % (str(self._func),
str(self._args) +
str(self._kwargs))]
def map(self, traj):
feature = self._func(traj, *self._args, **self._kwargs)
if not isinstance(feature, np.ndarray):
raise ValueError("your function should return a NumPy array!")
return feature
def __hash__(self):
hash_value = hash(self._func)
# if key contains numpy arrays, we hash their data arrays
key = tuple(list(map(_catch_unhashable, self._args)) +
list(map(_catch_unhashable, sorted(self._kwargs.items()))))
hash_value ^= hash(key)
return hash_value
def __eq__(self, other):
return self.__hash__() == other.__hash__()
class SelectionFeature(object):
"""
Just provide the cartesian coordinates of a selection of atoms (could be simply all atoms).
The coordinates are flattened as follows: [x1, y1, z1, x2, y2, z2, ...]
"""
# TODO: Needs an orientation option
def __init__(self, top, indexes):
self.top = top
self.indexes = np.array(indexes)
self.prefix_label = "ATOM:"
def describe(self):
labels = []
for i in self.indexes:
labels.append("%s%s x" %
(self.prefix_label, _describe_atom(self.top, i)))
labels.append("%s%s y" %
(self.prefix_label, _describe_atom(self.top, i)))
labels.append("%s%s z" %
(self.prefix_label, _describe_atom(self.top, i)))
return labels
@property
def dimension(self):
return 3 * self.indexes.shape[0]
def map(self, traj):
newshape = (traj.xyz.shape[0], 3 * self.indexes.shape[0])
return np.reshape(traj.xyz[:, self.indexes, :], newshape)
def __hash__(self):
hash_value = hash(self.prefix_label)
hash_value ^= hash_top(self.top)
hash_value ^= _hash_numpy_array(self.indexes)
return hash_value
def __eq__(self, other):
return self.__hash__() == other.__hash__()
class DistanceFeature(object):
def __init__(self, top, distance_indexes, periodic=True):
self.top = top
self.distance_indexes = np.array(distance_indexes)
self.prefix_label = "DIST:"
self.periodic = periodic
def describe(self):
labels = ["%s %s - %s" % (self.prefix_label,
_describe_atom(self.top, pair[0]),
_describe_atom(self.top, pair[1]))
for pair in self.distance_indexes]
return labels
@property
def dimension(self):
return self.distance_indexes.shape[0]
def map(self, traj):
return mdtraj.compute_distances(traj, self.distance_indexes, periodic=self.periodic)
def __hash__(self):
hash_value = _hash_numpy_array(self.distance_indexes)
hash_value ^= hash_top(self.top)
hash_value ^= hash(self.prefix_label)
return hash_value
def __eq__(self, other):
return self.__hash__() == other.__hash__()
class InverseDistanceFeature(DistanceFeature):
def __init__(self, top, distance_indexes, periodic=True):
DistanceFeature.__init__(
self, top, distance_indexes, periodic=periodic)
self.prefix_label = "INVDIST:"
def map(self, traj):
return 1.0 / mdtraj.compute_distances(traj, self.distance_indexes, periodic=self.periodic)
# does not need own hash impl, since we take prefix label into account
class ResidueMinDistanceFeature(DistanceFeature):
def __init__(self, top, contacts, scheme, ignore_nonprotein, threshold):
self.top = top
self.contacts = contacts
self.scheme = scheme
self.threshold = threshold
self.prefix_label = "RES_DIST (%s)"%scheme
# mdtraj.compute_contacts might ignore part of the user input (if it is contradictory) and
# produce a warning. I think it is more robust to let it run once on a dummy trajectory to
# see what the actual size of the output is:
dummy_traj = mdtraj.Trajectory(np.zeros((top.n_atoms, 3)), top)
dummy_dist, dummy_pairs = mdtraj.compute_contacts(dummy_traj, contacts=contacts,
scheme=scheme,
ignore_nonprotein=ignore_nonprotein)
self._dimension = dummy_dist.shape[1]
self.distance_indexes = dummy_pairs
def describe(self):
labels = ["%s %s - %s" % (self.prefix_label,
self.top.residue(pair[0]),
self.top.residue(pair[1]))
for pair in self.distance_indexes]
return labels
def map(self, traj):
# We let mdtraj compute the contacts with the input scheme
D = mdtraj.compute_contacts(traj, contacts=self.contacts, scheme=self.scheme)[0]
res = np.zeros_like(D)
# Do we want binary?
if self.threshold is not None:
I = np.argwhere(D <= self.threshold)
res[I[:, 0], I[:, 1]] = 1.0
else:
res = D
return res
class GroupMinDistanceFeature(DistanceFeature):
def __init__(self, top, group_pairs, distance_list, group_identifiers, threshold):
self.top = top
self.group_identifiers = group_identifiers
self.distance_list = distance_list
self.prefix_label = "GROUP_MINDIST"
self.threshold = threshold
self.distance_indexes = group_pairs
def describe(self):
labels = ["%s %s - %s" % (self.prefix_label,
pair[0],
pair[1])
for pair in self.distance_indexes]
return labels
def map(self, traj):
# All needed distances
Dall = mdtraj.compute_distances(traj, self.distance_list)
# Just the minimas
Dmin = np.zeros((traj.n_frames,self.dimension))
res = np.zeros_like(Dmin)
# Compute the min groupwise
for ii, (gi, gf) in enumerate(self.group_identifiers):
Dmin[:, ii] = Dall[:,gi:gf].min(1)
# Do we want binary?
if self.threshold is not None:
I = np.argwhere(Dmin <= self.threshold)
res[I[:, 0], I[:, 1]] = 1.0
else:
res = Dmin
return res
class ContactFeature(DistanceFeature):
def __init__(self, top, distance_indexes, threshold=5.0, periodic=True):
DistanceFeature.__init__(self, top, distance_indexes)
self.prefix_label = "CONTACT:"
self.threshold = threshold
self.periodic = periodic
def map(self, traj):
dists = mdtraj.compute_distances(
traj, self.distance_indexes, periodic=self.periodic)
res = np.zeros(
(len(traj), self.distance_indexes.shape[0]), dtype=np.float32)
I = np.argwhere(dists <= self.threshold)
res[I[:, 0], I[:, 1]] = 1.0
return res
def __hash__(self):
hash_value = DistanceFeature.__hash__(self)
hash_value ^= hash(self.threshold)
return hash_value
class AngleFeature(object):
def __init__(self, top, angle_indexes, deg=False, cossin=False):
self.top = top
self.angle_indexes = np.array(angle_indexes)
self.deg = deg
self.cossin = cossin
def describe(self):
if self.cossin:
sin_cos = ("ANGLE: COS(%s - %s - %s)",
"ANGLE: SIN(%s - %s - %s)")
labels = [s % (_describe_atom(self.top, triple[0]),
_describe_atom(self.top, triple[1]),
_describe_atom(self.top, triple[2]))
for triple in self.angle_indexes
for s in sin_cos]
else:
labels = ["ANGLE: %s - %s - %s " %
(_describe_atom(self.top, triple[0]),
_describe_atom(self.top, triple[1]),
_describe_atom(self.top, triple[2]))
for triple in self.angle_indexes]
return labels
@property
def dimension(self):
dim = self.angle_indexes.shape[0]
if self.cossin:
dim *= 2
return dim
def map(self, traj):
rad = mdtraj.compute_angles(traj, self.angle_indexes)
if self.cossin:
rad = np.dstack((np.cos(rad), np.sin(rad)))
rad = rad.reshape(functools.reduce(lambda x, y: x * y, rad.shape),)
if self.deg:
return np.rad2deg(rad)
else:
return rad
def __hash__(self):
hash_value = _hash_numpy_array(self.angle_indexes)
hash_value ^= hash_top(self.top)
hash_value ^= hash(self.deg)
return hash_value
def __eq__(self, other):
return self.__hash__() == other.__hash__()
class DihedralFeature(object):
def __init__(self, top, dih_indexes, deg=False, cossin=False):
self.top = top
self.dih_indexes = np.array(dih_indexes)
self.deg = deg
self.cossin = cossin
self._dim = self.dih_indexes.shape[0]
if self.cossin:
self._dim *= 2
def describe(self):
if self.cossin:
sin_cos = (
"DIH: COS(%s - %s - %s - %s)", "DIH: SIN(%s - %s - %s - %s)")
labels = [s %
(_describe_atom(self.top, quad[0]),
_describe_atom(self.top, quad[1]),
_describe_atom(self.top, quad[2]),
_describe_atom(self.top, quad[3]))
for quad in self.dih_indexes
for s in sin_cos]
else:
labels = ["DIH: %s - %s - %s - %s " %
(_describe_atom(self.top, quad[0]),
_describe_atom(self.top, quad[1]),
_describe_atom(self.top, quad[2]),
_describe_atom(self.top, quad[3]))
for quad in self.dih_indexes]
return labels
@property
def dimension(self):
return self._dim
def map(self, traj):
rad = mdtraj.compute_dihedrals(traj, self.dih_indexes)
if self.cossin:
rad = np.dstack((np.cos(rad), np.sin(rad)))
rad = rad.reshape(rad.shape[0], rad.shape[1]*rad.shape[2])
# convert to degrees
if self.deg:
rad = np.rad2deg(rad)
return rad
def __hash__(self):
hash_value = _hash_numpy_array(self.dih_indexes)
hash_value ^= hash_top(self.top)
hash_value ^= hash(self.deg)
hash_value ^= hash(self.cossin)
return hash_value
def __eq__(self, other):
return hash(self) == hash(other)
class BackboneTorsionFeature(DihedralFeature):
def __init__(self, topology, selstr=None, deg=False, cossin=False):
ft = fake_traj(topology)
_, indices = _get_indices_phi(ft)
if not selstr:
self._phi_inds = indices
else:
self._phi_inds = indices[np.in1d(indices[:, 1],
topology.select(selstr), assume_unique=True)]
_, indices = _get_indices_psi(ft)
if not selstr:
self._psi_inds = indices
else:
self._psi_inds = indices[np.in1d(indices[:, 1],
topology.select(selstr), assume_unique=True)]
# alternate phi, psi pairs (phi_1, psi_1, ..., phi_n, psi_n)
dih_indexes = np.array(list(phi_psi for phi_psi in
zip(self._phi_inds, self._psi_inds))).reshape(-1, 4)
super(BackboneTorsionFeature, self).__init__(topology, dih_indexes,
deg=deg, cossin=cossin)
def describe(self):
top = self.top
getlbl = lambda at: "%i %s %i " % (
at.residue.chain.index, at.residue.name, at.residue.resSeq)
if self.cossin:
sin_cos = ("COS(PHI %s)", "SIN(PHI %s)")
labels_phi = [s % getlbl(top.atom(ires[1])) for ires in self._phi_inds
for s in sin_cos]
sin_cos = ("COS(PSI %s)", "SIN(PSI %s)")
labels_psi = [s % getlbl(top.atom(ires[1])) for ires in self._psi_inds
for s in sin_cos]
else:
labels_phi = [
"PHI %s" % getlbl(top.atom(ires[1])) for ires in self._phi_inds]
labels_psi = [
"PSI %s" % getlbl(top.atom(ires[1])) for ires in self._psi_inds]
return labels_phi + labels_psi
class Chi1TorsionFeature(DihedralFeature):
def __init__(self, topology, selstr=None, deg=False, cossin=False):
ft = fake_traj(topology)
_, indices = _get_indices_chi1(ft)
if not selstr:
dih_indexes = indices
else:
dih_indexes = indices[np.in1d(indices[:, 1],
topology.select(selstr),
assume_unique=True)]
super(Chi1TorsionFeature, self).__init__(topology, dih_indexes,
deg=deg, cossin=cossin)
def describe(self):
top = self.top
getlbl = lambda at: "%i %s %i " \
% (at.residue.chain.index, at.residue.name, at.residue.resSeq)
if self.cossin:
cossin = ("COS(CHI1 %s)", "SIN(CHI1 %s)")
labels_chi1 = [s % getlbl(top.atom(ires[1]))
for ires in self.dih_indexes
for s in cossin]
else:
labels_chi1 = ["CHI1" + getlbl(top.atom(ires[1]))
for ires in self.dih_indexes]
return labels_chi1
class MinRmsdFeature(object):
def __init__(self, ref, ref_frame=0, atom_indices=None, topology=None, precentered=False):
assert isinstance(
ref_frame, int), "ref_frame has to be of type integer, and not %s" % type(ref_frame)
# Will be needing the hashed input parameter
self.__hashed_input__ = hash(ref)
# Types of inputs
# 1. Filename+top
if isinstance(ref, str):
# Store the filename
self.name = ref[:]
ref = mdtraj.load_frame(ref, ref_frame, top=topology)
# mdtraj is pretty good handling exceptions, we're not checking for
# types or anything here
# 2. md.Trajectory object
elif isinstance(ref, mdtraj.Trajectory):
self.name = ref.__repr__()[:]
else:
raise TypeError("input reference has to be either a filename or "
"a mdtraj.Trajectory object, and not of %s" % type(ref))
self.ref = ref
self.ref_frame = ref_frame
self.atom_indices = atom_indices
self.precentered = precentered
def describe(self):
label = "minrmsd to frame %u of %s" % (self.ref_frame, self.name)
if self.precentered:
label += ', precentered=True'
if self.atom_indices is not None:
label += ', subset of atoms '
return [label]
@property
def dimension(self):
return 1
def map(self, traj):
return np.array(mdtraj.rmsd(traj, self.ref, atom_indices=self.atom_indices), ndmin=2).T
def __hash__(self):
hash_value = hash(self.__hashed_input__)
# TODO: identical md.Trajectory objects have different hashes need a
# way to differentiate them here
hash_value ^= hash(self.ref_frame)
if self.atom_indices is None:
hash_value ^= _hash_numpy_array(np.arange(self.ref.n_atoms))
else:
hash_value ^= _hash_numpy_array(np.array(self.atom_indices))
hash_value ^= hash(self.precentered)
return hash_value
def __eq__(self, other):
return self.__hash__() == other.__hash__()
class MDFeaturizer(object):
_ids = count(0)
def __init__(self, topfile):
"""extracts features from MD trajectories.
Parameters
----------
topfile : str
a path to a topology file (pdb etc.)
"""
self.topologyfile = topfile
self.topology = (mdtraj.load(topfile)).topology
self.active_features = []
self._dim = 0
self._create_logger()
def _create_logger(self):
count = next(self._ids)
i = self.__module__.rfind(".")
j = self.__module__.find(".") + 1
package = self.__module__[j:i]
name = "%s.%s[%i]" % (package, self.__class__.__name__, count)
self._name = name
self._logger = getLogger(name)
def __add_feature(self, f):
# perform sanity checks
if f.dimension == 0:
self._logger.error("given an empty feature (eg. due to an empty/"
"ineffective selection). Skipping it."
" Feature desc: %s" % f.describe())
return
if f not in self.active_features:
self.active_features.append(f)
else:
self._logger.warning("tried to re-add the same feature %s"
% f.__class__.__name__)
def describe(self):
"""
Returns a list of strings, one for each feature selected,
with human-readable descriptions of the features.
Returns
-------
labels : list of str
An ordered list of strings, one for each feature selected,
with human-readable descriptions of the features.
"""
all_labels = []
for f in self.active_features:
all_labels += f.describe()
return all_labels
def select(self, selstring):
"""
Returns the indexes of atoms matching the given selection
Parameters
----------
selstring : str
Selection string. See mdtraj documentation for details:
http://mdtraj.org/latest/atom_selection.html
Returns
-------
indexes : ndarray((n), dtype=int)
array with selected atom indexes
"""
return self.topology.select(selstring)
def select_Ca(self):
"""
Returns the indexes of all Ca-atoms
Returns
-------
indexes : ndarray((n), dtype=int)
array with selected atom indexes
"""
return self.topology.select("name CA")
def select_Backbone(self):
"""
Returns the indexes of backbone C, CA and N atoms
Returns
-------
indexes : ndarray((n), dtype=int)
array with selected atom indexes
"""
return self.topology.select("backbone and (name C or name CA or name N)")
def select_Heavy(self):
"""
Returns the indexes of all heavy atoms (Mass >= 2)
Returns
-------
indexes : ndarray((n), dtype=int)
array with selected atom indexes
"""
return self.topology.select("mass >= 2")
@staticmethod
def pairs(sel, excluded_neighbors=0):
"""
Creates all pairs between indexes. Will except closest neighbors up to :py:obj:`excluded_neighbors`
The self-pair (i,i) is always excluded
Parameters
----------
sel : ndarray((n), dtype=int)
array with selected atom indexes
excluded_neighbors: int, default = 0
number of neighbors that will be excluded when creating the pairs
Returns
-------
sel : ndarray((m,2), dtype=int)
m x 2 array with all pair indexes between different atoms that are at least :obj:`excluded_neighbors`
indexes apart, i.e. if i is the index of an atom, the pairs [i,i-2], [i,i-1], [i,i], [i,i+1], [i,i+2], will
not be in :py:obj:`sel` (n=excluded_neighbors) if :py:obj:`excluded_neighbors` = 2.
Moreover, the list is non-redundant,i.e. if [i,j] is in sel, then [j,i] is not.
"""
assert isinstance(excluded_neighbors,int)
p = []
for i in range(len(sel)):
for j in range(i + 1, len(sel)):
# get ordered pair
I = sel[i]
J = sel[j]
if (I > J):
I = sel[j]
J = sel[i]
# exclude 1 and 2 neighbors
if (J > I + excluded_neighbors):
p.append([I, J])
return np.array(p)
def _check_indices(self, pair_inds, pair_n=2):
"""ensure pairs are valid (shapes, all atom indices available?, etc.)
"""
pair_inds = np.array(pair_inds).astype(dtype=np.int, casting='safe')
if pair_inds.ndim != 2:
raise ValueError("pair indices has to be a matrix.")
if pair_inds.shape[1] != pair_n:
raise ValueError("pair indices shape has to be (x, %i)." % pair_n)
if pair_inds.max() > self.topology.n_atoms:
raise ValueError("index out of bounds: %i."
" Maximum atom index available: %i"
% (pair_inds.max(), self.topology.n_atoms))
return pair_inds
def add_all(self):
"""
Adds all atom coordinates to the feature list.
The coordinates are flattened as follows: [x1, y1, z1, x2, y2, z2, ...]
"""
# TODO: add possibility to align to a reference structure
self.add_selection(list(range(self.topology.n_atoms)))
def add_selection(self, indexes):
"""
Adds the coordinates of the selected atom indexes to the feature list.
The coordinates of the selection [1, 2, ...] are flattened as follows: [x1, y1, z1, x2, y2, z2, ...]
Parameters
----------
indexes : ndarray((n), dtype=int)
array with selected atom indexes
"""
# TODO: add possibility to align to a reference structure
f = SelectionFeature(self.topology, indexes)
self.__add_feature(f)
def add_distances(self, indices, periodic=True, indices2=None):
r"""
Adds the distances between atoms to the feature list.
Parameters
----------
indices : can be of two types:
ndarray((n, 2), dtype=int):
n x 2 array with the pairs of atoms between which the distances shall be computed
iterable of integers (either list or ndarray(n, dtype=int)):
indices (not pairs of indices) of the atoms between which the distances shall be computed.
indices2: iterable of integers (either list or ndarray(n, dtype=int)), optional:
Only has effect if :py:obj:`indices` is an iterable of integers. Instead of the above behaviour,
only the distances between the atoms in :py:obj:`indices` and :py:obj:`indices2` will be computed.
.. note::
When using the iterable of integers input, :py:obj:`indices` and :py:obj:`indices2`
will be sorted numerically and made unique before converting them to a pairlist.
Please look carefully at the output of :py:func:`describe()` to see what features exactly have been added.
"""
atom_pairs = _parse_pairwise_input(
indices, indices2, self._logger, fname='add_distances()')
atom_pairs = self._check_indices(atom_pairs)
f = DistanceFeature(self.topology, atom_pairs, periodic=periodic)
self.__add_feature(f)
def add_distances_ca(self, periodic=True):
"""
Adds the distances between all Ca's to the feature list.
"""
distance_indexes = self.pairs(self.select_Ca())
self.add_distances(distance_indexes, periodic=periodic)
def add_inverse_distances(self, indices, periodic=True, indices2=None):
"""
Adds the inverse distances between atoms to the feature list.
Parameters
----------
indices : can be of two types:
ndarray((n, 2), dtype=int):
n x 2 array with the pairs of atoms between which the inverse distances shall be computed
iterable of integers (either list or ndarray(n, dtype=int)):
indices (not pairs of indices) of the atoms between which the inverse distances shall be computed.
indices2: iterable of integers (either list or ndarray(n, dtype=int)), optional:
Only has effect if :py:obj:`indices` is an iterable of integers. Instead of the above behaviour,
only the inverse distances between the atoms in :py:obj:`indices` and :py:obj:`indices2` will be computed.
.. note::
When using the *iterable of integers* input, :py:obj:`indices` and :py:obj:`indices2`
will be sorted numerically and made unique before converting them to a pairlist.
Please look carefully at the output of :py:func:`describe()` to see what features exactly have been added.
"""
atom_pairs = _parse_pairwise_input(
indices, indices2, self._logger, fname='add_inverse_distances()')
atom_pairs = self._check_indices(atom_pairs)
f = InverseDistanceFeature(self.topology, atom_pairs, periodic=True)
self.__add_feature(f)
def add_contacts(self, indices, indices2=None, threshold=5.0, periodic=True):
r"""
Adds the contacts to the feature list.
Parameters
----------
indices : can be of two types:
ndarray((n, 2), dtype=int):
n x 2 array with the pairs of atoms between which the contacts shall be computed
iterable of integers (either list or ndarray(n, dtype=int)):
indices (not pairs of indices) of the atoms between which the contacts shall be computed.
indices2: iterable of integers (either list or ndarray(n, dtype=int)), optional:
Only has effect if :py:obj:`indices` is an iterable of integers. Instead of the above behaviour,
only the contacts between the atoms in :py:obj:`indices` and :py:obj:`indices2` will be computed.
threshold : float, optional, default = 5.0
distances below this threshold will result in a feature 1.0, distances above will result in 0.0.
The default is set with Angstrom distances in mind.
Make sure that you know whether your coordinates are in Angstroms or nanometers when setting this threshold.
.. note::
When using the *iterable of integers* input, :py:obj:`indices` and :py:obj:`indices2`
will be sorted numerically and made unique before converting them to a pairlist.
Please look carefully at the output of :py:func:`describe()` to see what features exactly have been added.
"""
atom_pairs = _parse_pairwise_input(
indices, indices2, self._logger, fname='add_contacts()')
atom_pairs = self._check_indices(atom_pairs)
f = ContactFeature(self.topology, atom_pairs, threshold, periodic)
self.__add_feature(f)
def add_residue_mindist(self,
residue_pairs='all',
scheme='closest-heavy',
ignore_nonprotein=True,
threshold=None):
r"""
Adds the minimum distance between residues to the feature list. See below how
the minimum distance can be defined.
Parameters
----------
residue_pairs : can be of two types:
'all'
Computes distances between all pairs of residues excluding first and second neighbors
ndarray((n, 2), dtype=int):
n x 2 array with the pairs residues for which distances will be computed
scheme : 'ca', 'closest', 'closest-heavy', default is closest-heavy
Within a residue, determines the sub-group atoms that will be considered when computing distances
ignore_nonprotein : boolean, default True
Ignore residues that are not of protein type (e.g. water molecules, post-traslational modifications etc)
threshold : float, optional, default is None
distances below this threshold (in nm) will result in a feature 1.0, distances above will result in 0.0. If
left to None, the numerical value will be returned
.. note::
Using :py:obj:`scheme` = 'closest' or 'closest-heavy' with :py:obj:`residue pairs` = 'all'
will compute nearly all interatomic distances, for every frame, before extracting the closest pairs.
This can be very time consuming. Those schemes are intended to be used with a subset of residues chosen
via :py:obj:`residue_pairs`.
"""
if scheme != 'ca' and residue_pairs == 'all':
self._logger.warning("Using all residue pairs with schemes like closest or closest-heavy is "
"very time consuming. Consider reducing the residue pairs")
f = ResidueMinDistanceFeature(self.topology, residue_pairs, scheme, ignore_nonprotein, threshold)
self.__add_feature(f)
def add_group_mindist(self,
group_definitions,
group_pairs='all',
threshold=None,
):
r"""
Adds the minimum distance between groups of atoms to the feature list. If the groups of
atoms are identical to residues, use :py:obj:`add_residue_mindist <pyemma.coordinates.data.featurizer.MDFeaturizer.add_residue_mindist>`.
Parameters
----------
group_definition : list of 1D-arrays/iterables containing the group definitions via atom indices.
If there is only one group_definition, it is assumed the minimum distance within this group (excluding the
self-distance) is wanted. In this case, :py:obj:`group_pairs` is ignored.
group_pairs : Can be of two types:
'all'
Computes minimum distances between all pairs of groups contained in the group definitions
ndarray((n, 2), dtype=int):
n x 2 array with the pairs of groups for which the minimum distances will be computed.
threshold : float, optional, default is None
distances below this threshold (in nm) will result in a feature 1.0, distances above will result in 0.0. If
left to None, the numerical value will be returned
"""
# Some thorough input checking and reformatting
__, group_pairs, distance_list, group_identifiers = _parse_groupwise_input(group_definitions, group_pairs, self._logger, 'add_group_mindist')
distance_list = self._check_indices(distance_list)
f = GroupMinDistanceFeature(self.topology, group_pairs, distance_list, group_identifiers, threshold)
self.__add_feature(f)
def add_angles(self, indexes, deg=False, cossin=False):
"""
Adds the list of angles to the feature list
Parameters
----------
indexes : np.ndarray, shape=(num_pairs, 3), dtype=int
an array with triplets of atom indices
deg : bool, optional, default = False
If False (default), angles will be computed in radians.
If True, angles will be computed in degrees.
cossin : bool, optional, default = False
If True, each angle will be returned as a pair of (sin(x), cos(x)).
This is useful, if you calculate the mean (e.g TICA/PCA, clustering)
in that space.
"""
indexes = self._check_indices(indexes, pair_n=3)
f = AngleFeature(self.topology, indexes, deg=deg, cossin=cossin)
self.__add_feature(f)
def add_dihedrals(self, indexes, deg=False, cossin=False):
"""
Adds the list of dihedrals to the feature list
Parameters
----------
indexes : np.ndarray, shape=(num_pairs, 4), dtype=int
an array with quadruplets of atom indices
deg : bool, optional, default = False
If False (default), angles will be computed in radians.
If True, angles will be computed in degrees.
cossin : bool, optional, default = False
If True, each angle will be returned as a pair of (sin(x), cos(x)).
This is useful, if you calculate the mean (e.g TICA/PCA, clustering)
in that space.
"""
indexes = self._check_indices(indexes, pair_n=4)
f = DihedralFeature(self.topology, indexes, deg=deg, cossin=cossin)
self.__add_feature(f)
def add_backbone_torsions(self, selstr=None, deg=False, cossin=False):
"""
Adds all backbone phi/psi angles or the ones specified in :obj:`selstr` to the feature list.
Parameters
----------
selstr : str, optional, default = ""
selection string specifying the atom selection used to specify a specific set of backbone angles
If "" (default), all phi/psi angles found in the topology will be computed
deg : bool, optional, default = False
If False (default), angles will be computed in radians.
If True, angles will be computed in degrees.
cossin : bool, optional, default = False
If True, each angle will be returned as a pair of (sin(x), cos(x)).
This is useful, if you calculate the mean (e.g TICA/PCA, clustering)
in that space.
"""
f = BackboneTorsionFeature(
self.topology, selstr=selstr, deg=deg, cossin=cossin)
self.__add_feature(f)
def add_chi1_torsions(self, selstr="", deg=False, cossin=False):
"""
Adds all chi1 angles or the ones specified in :obj:`selstr` to the feature list.
Parameters
----------
selstr : str, optional, default = ""
selection string specifying the atom selection used to specify a specific set of backbone angles
If "" (default), all chi1 angles found in the topology will be computed
deg : bool, optional, default = False
If False (default), angles will be computed in radians.
If True, angles will be computed in degrees.
cossin : bool, optional, default = False
If True, each angle will be returned as a pair of (sin(x), cos(x)).
This is useful, if you calculate the mean (e.g TICA/PCA, clustering)
in that space.
"""
f = Chi1TorsionFeature(
self.topology, selstr=selstr, deg=deg, cossin=cossin)
self.__add_feature(f)
def add_custom_feature(self, feature):
"""
Adds a custom feature to the feature list.
Parameters
----------
feature : object
an object with interface like CustomFeature (map, describe methods)
"""
if feature.dimension <= 0:
raise ValueError("Dimension has to be positive. "
"Please override dimension attribute in feature!")
if not hasattr(feature, 'map'):
raise ValueError("no map method in given feature")
else:
if not callable(getattr(feature, 'map')):
raise ValueError("map exists but is not a method")
self.__add_feature(feature)
def add_minrmsd_to_ref(self, ref, ref_frame=0, atom_indices=None, precentered=False):
r"""
Adds the minimum root-mean-square-deviation (minrmsd) with respect to a reference structure to the feature list.
Parameters
----------
ref:
Reference structure for computing the minrmsd. Can be of two types:
1. :py:obj:`mdtraj.Trajectory` object
2. filename for mdtraj to load. In this case, only the :py:obj:`ref_frame` of that file will be used.
ref_frame: integer, default=0
Reference frame of the filename specified in :py:obj:`ref`.
This parameter has no effect if :py:obj:`ref` is not a filename.
atom_indices: array_like, default=None
Atoms that will be used for:
1. aligning the target and reference geometries.
2. computing rmsd after the alignment.
If left to None, all atoms of :py:obj:`ref` will be used.
precentered: bool, default=False
Use this boolean at your own risk to let mdtraj know that the target conformations are already
centered at the origin, i.e., their (uniformly weighted) center of mass lies at the origin.
This will speed up the computation of the rmsd.
"""
f = MinRmsdFeature(ref, ref_frame=ref_frame, atom_indices=atom_indices, topology=self.topology,
precentered=precentered)
self.__add_feature(f)
def add_custom_func(self, func, dim, *args, **kwargs):
""" adds a user defined function to extract features
Parameters
----------
func : function
a user-defined function, which accepts mdtraj.Trajectory object as
first parameter and as many optional and named arguments as desired.
Has to return a numpy.ndarray
dim : int
output dimension of :py:obj:`function`
args : any number of positional arguments
these have to be in the same order as :py:obj:`func` is expecting them
kwargs : dictionary
named arguments passed to func
"""
f = CustomFeature(func, dim=dim, *args, **kwargs)
self.add_custom_feature(f)
def dimension(self):
""" current dimension due to selected features
Returns
-------
dim : int
total dimension due to all selection features
"""
dim = sum(f.dimension for f in self.active_features)
return dim
def map(self, traj):
"""
Maps an mdtraj Trajectory object to the selected output features
Parameters
----------
traj : mdtraj Trajectory
Trajectory object used as an input
Returns
-------
out : ndarray((T, n), dtype=float32)
Output features: For each of T time steps in the given trajectory,
a vector with all n output features selected.
"""
# if there are no features selected, return given trajectory
if len(self.active_features) == 0:
warnings.warn("You have no features selected."
" Returning plain coordinates.")
s = traj.xyz.shape
new_shape = (s[0], s[1] * s[2])
return traj.xyz.reshape(new_shape)
# handle empty chunks (which might occur due to time lagged access
if traj.xyz.shape[0] == 0:
return np.empty((0, self.dimension()))
# TODO: define preprocessing step (RMSD etc.)
# otherwise build feature vector.
feature_vec = []
# TODO: consider parallel evaluation computation here, this effort is
# only worth it, if computation time dominates memory transfers
for f in self.active_features:
# perform sanity checks for custom feature input
if isinstance(f, CustomFeature):
# NOTE: casting=safe raises in numpy>=1.9
vec = f.map(traj).astype(np.float32, casting='safe')
if vec.shape[0] == 0:
vec = np.empty((0, f.dimension))
if not isinstance(vec, np.ndarray):
raise ValueError('Your custom feature %s did not return'
' a numpy.ndarray!' % str(f.describe()))
if not vec.ndim == 2:
raise ValueError('Your custom feature %s did not return'
' a 2d array. Shape was %s'
% (str(f.describe()),
str(vec.shape)))
if not vec.shape[0] == traj.xyz.shape[0]:
raise ValueError('Your custom feature %s did not return'
' as many frames as it received!'
'Input was %i, output was %i'
% (str(f.describe()),
traj.xyz.shape[0],
vec.shape[0]))
else:
vec = f.map(traj).astype(np.float32)
feature_vec.append(vec)
if len(feature_vec) > 1:
res = np.hstack(feature_vec)
else:
res = feature_vec[0]
return res
| {
"content_hash": "9b844eaaa92fefd9ad03757bd6c09df5",
"timestamp": "",
"source": "github",
"line_count": 1342,
"max_line_length": 149,
"avg_line_length": 37.89269746646796,
"alnum_prop": 0.5737040824353025,
"repo_name": "trendelkampschroer/PyEMMA",
"id": "37cb5df9cfa45b104be34c527884879ba70c1d3b",
"size": "52290",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "pyemma/coordinates/data/featurizer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "44510"
},
{
"name": "Python",
"bytes": "1172743"
}
],
"symlink_target": ""
} |
import unittest
import marked
class MarkedTests(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4c552d15255c82ff9d4cf71cd8b21b61",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 37,
"avg_line_length": 11.727272727272727,
"alnum_prop": 0.6511627906976745,
"repo_name": "1stvamp/marked",
"id": "4c932ca3a2b4699577156e7c6b04baeb922bb3b0",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marked_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4620"
}
],
"symlink_target": ""
} |
"""
Created on Mon Dec 07 22:08:13 2015
@author: derricw
Demonstrates use of the Sentech DLL using ctypes.
1) Loads the header file and dll.
2) Checks for available cameras.
3) Gets a camera handle.
4) Gets image properties from the camera.
5) Sets up a buffer for the image.
6) Continuously grabs images from the camera.
6) Displays the images in an opencv window until user hits ESC
"""
import traceback
import ctypes
from ctypes import *
malloc = ctypes.cdll.msvcrt.malloc #windows
free = ctypes.cdll.msvcrt.free
import numpy as np
import cv2
from pysentech import SentechDLL
# load the dll
sdk_folder = r"C:\Users\derricw\Downloads\StandardSDK(v3.08)\StandardSDK(v3.08)"
dll = SentechDLL(sdk_folder)
#dll = SentechDLL() # or this, if you have set SENTECHPATH env variable
print("DLL loaded!")
# check for cameras
cameras_available = dll.StCam_CameraCount(None)
print("Cameras found: {}".format(cameras_available))
if cameras_available < 1:
raise Exception("No cameras found.")
# Open a camera
camera = dll.StCam_Open(0)
handle_id = camera.contents.value
if handle_id > 0:
print("Camera open! Handle: {}".format(handle_id))
else:
raise Exception("Failed to initialize camera!")
try:
# Get image shape
cwidth = c_ulong()
cheight = c_ulong()
creserved = c_ulong()
cscanmode = c_ushort()
coffsetx = c_ulong()
coffsety = c_ulong()
dll.StCam_GetImageSize(camera, byref(creserved), byref(cscanmode),
byref(coffsetx), byref(coffsety), byref(cwidth),
byref(cheight))
width, height = cwidth.value, cheight.value
print("Camera image shape: {}x{}".format(width, height))
# Set pixel format so that the rest of this example works properly
pixformat = dll.STCAM_PIXEL_FORMAT_08_MONO_OR_RAW
ret = dll.StCam_SetPreviewPixelFormat(camera, pixformat)
if not ret:
print("Failed to set pixel format!")
# Get pixel format
cpixformat = c_ulong()
dll.StCam_GetPreviewPixelFormat(camera, byref(cpixformat))
pixformat = cpixformat.value
print("Camera pixel format: {}".format(pixformat))
# Get bits per pixel
cbpp = c_ulong()
dll.StCam_GetTransferBitsPerPixel(camera, byref(cbpp))
bpp = cbpp.value
print("Camera bits per pixel: {}".format(bpp))
# Get bytes per image
cbpi = c_ulong()
dll.StCam_GetRawDataSize(camera, byref(cbpi))
bpi = cbpi.value
print("Camera bytes per image: {}".format(bpi))
# Allocate memory for image
imgdata = cast(create_string_buffer(bpi), POINTER(c_byte))
# Set up display window
cv2.namedWindow("pysentech")
# Transfer images from camera until user hits ESC
cbytesxferred = c_ulong()
cframeno = c_ulong()
cmillisecs = c_ulong(1000)
while True:
ret = dll.StCam_TakeRawSnapShot(camera, imgdata, bpi,
byref(cbytesxferred), byref(cframeno),
cmillisecs)
if not ret:
print("Failed to transfer image from camera.")
# Make image array
array = (c_ubyte * int(height*bpi) *
int(width*bpi)).from_address(addressof(imgdata.contents))
# Convert image array to numpy so we can display it easily
npimg = np.ndarray(buffer=array, dtype=np.uint8, shape=(height, width))
# Show in display window
cv2.imshow("pysentech", npimg)
k = cv2.waitKey(1)
if k == 27:
# ESC to quit
break
cv2.destroyAllWindows()
# Free buffer
del imgdata
except Exception:
traceback.print_exc()
# Close the camera
dll.StCam_Close(camera) | {
"content_hash": "982e0a7a30833d12e449e9c019cb065f",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 80,
"avg_line_length": 28.59090909090909,
"alnum_prop": 0.6375198728139905,
"repo_name": "derricw/pysentech",
"id": "619472b6fd352abb9dd294526aeb6825b1ef28e0",
"size": "3798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysentech/examples/low_level_cv2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42988"
}
],
"symlink_target": ""
} |
import random
import time
import sys
#variables
name = input("What's your name? ")
counter = 0
answer = ""
userscore = 0
begin = input("Are you ready? ")
time.sleep(1)
if begin == "yes":
time.sleep(1)
print("Welcome to the Maths Quiz",name,"!")
time.sleep(1)
while counter<10:
number1 = random.randint(0,20)
number2 = random.randint(0,15)
operators = random.randint(1,3)
if operators == 1:
print("What is",number1,"+",number2,)
ans = number1 + number2
counter = counter+1
elif operators == 2:
print("What is",number1,"*",number2,)
ans = number1 * number2
counter = counter+1
else:
print("What is",number1,"-",number2,)
ans = number1 - number2
counter = counter+1
useranswer = int(input())
if useranswer == ans:
print("Correct!")
userscore = userscore + 1
else:
print("Wrong!")
if userscore<5:
time.sleep(1)
print("Better luck next time",name,"you scored",userscore,"/ 10!")
else:
time.sleep(1)
print("Congratulations",name,"you scored",userscore,"/ 10!")
else:
print("James isn't worth oxygen!")
sys.exit()
time.sleep(4)
| {
"content_hash": "7a92e8746f93f0d4d525deb86a85c11f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 74,
"avg_line_length": 20.733333333333334,
"alnum_prop": 0.5731511254019293,
"repo_name": "AlexEaton1105/computerScience",
"id": "8d09e6822743a61c22c6105486d7fd5d62812fe8",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jamesQuiz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10651"
}
],
"symlink_target": ""
} |
import argparse
import socket
import sys
import string
import random
import time
import numpy
def pong_server(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind((host, port))
except socket.error as err:
print("Socket bind failed.", err)
sys.exit(1)
sock.listen(0)
print("Listening on {0}:{1}...".format(host, port))
try:
while True:
conn, addr = sock.accept()
print("Incoming connection from {0}:{1}".format(*addr))
file_wrapper = conn.makefile("r")
try:
while True:
line = file_wrapper.readline()
if not line:
break
cmd = line.split()
if len(cmd) >= 2 and cmd[0] == "ping":
pong_size = 0
try:
pong_size = max(0, int(cmd[1]))
except ValueError:
pass
conn.send("pong ")
while pong_size > 0:
chunk_size = min(pong_size, 4096)
data = 'Y' * chunk_size
conn.send(data)
pong_size -= chunk_size
conn.send("\n")
else:
print("Invalid command received.")
break
except socket.error:
pass
finally:
file_wrapper.close()
conn.close()
print("Connection closed.")
except KeyboardInterrupt:
print("Interrupted. Shutting down.")
sock.close()
def ping(host, port, ping_size = 10, pong_size = 500000, seconds = 10):
roundtrips = []
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
file_wrapper = sock.makefile("r")
start_time = time.time()
try:
r = 1
now = 0
print("seq\troundtrip-time\telapsed-time")
while now < start_time + seconds:
t0 = time.time()
sock.send("ping {0} ".format(pong_size))
data_left = ping_size
while data_left > 0:
chunk_size = min(data_left, 4096)
data = 'X' * chunk_size
sock.send(data)
data_left -= chunk_size
sock.send("\n")
line = file_wrapper.readline()
now = time.time()
dt = now - t0
roundtrips.append(dt)
print("{0}\t{1}\t{2}".format(r, dt, now - start_time))
r += 1
finally:
file_wrapper.close()
except KeyboardInterrupt:
print("Interrupted. Shutting down.")
except:
print("Unexpected exception: {0}".format(sys.exc_info()))
finally:
sock.close()
print("Connection closed.")
print("Roundtrips: {count}".format(count=len(roundtrips)))
print("Average: {average}".format(average=numpy.mean(roundtrips)))
print("Min: {min}".format(min=numpy.min(roundtrips)))
print("Max: {max}".format(max=numpy.max(roundtrips)))
print("Stddev: {stddev}".format(stddev=numpy.std(roundtrips)))
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("mode", type=str, choices=['ping', 'pong'], help="program mode")
argparser.add_argument("host", type=str, help="host ip address (in pong mode 'ALL' indicates bind to all addresses)")
argparser.add_argument("port", type=int, help="tcp port")
argparser.add_argument("--ping-size", type=int, default=10000, help="ping payload size (only in ping mode)")
argparser.add_argument("--pong-size", type=int, default=500, help="pong payload size (only in ping mode)")
argparser.add_argument("--seconds", type=int, default=30, help="duration in seconds (only in ping mode)")
args = argparser.parse_args()
if args.mode == 'pong':
host = '0.0.0.0' if args.host == 'ALL' else args.host
pong_server(host, args.port)
else:
try:
host = socket.gethostbyname(args.host)
except:
print("Could not resolve host '{0}'.".format(args.host))
exit(1)
ping(host, args.port, ping_size=args.ping_size, pong_size=args.pong_size, seconds=args.seconds)
| {
"content_hash": "204aa8e5ebfd162f5f16aa75664d3d44",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 118,
"avg_line_length": 28.984126984126984,
"alnum_prop": 0.6486856516976999,
"repo_name": "tbknl/pingpongpy",
"id": "a3a6a93734a21f917dc0513aae9c5f420ac4ed5d",
"size": "3675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pingpong.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3675"
}
],
"symlink_target": ""
} |
import os
from syncplay import constants
from syncplay.players.mpv import MpvPlayer
from syncplay.utils import playerPathExists
class MpvnetPlayer(MpvPlayer):
@staticmethod
def run(client, playerPath, filePath, args):
constants.MPV_NEW_VERSION = True
constants.MPV_OSC_VISIBILITY_CHANGE_VERSION = True
return MpvnetPlayer(client, MpvnetPlayer.getExpandedPath(playerPath), filePath, args)
@staticmethod
def getDefaultPlayerPathsList():
l = []
for path in constants.MPVNET_PATHS:
p = MpvnetPlayer.getExpandedPath(path)
if p:
l.append(p)
return l
@staticmethod
def isValidPlayerPath(path):
if "mpvnet" in path and MpvnetPlayer.getExpandedPath(path):
return True
return False
@staticmethod
def getExpandedPath(playerPath):
if not playerPathExists(playerPath):
if playerPathExists(playerPath + "mpvnet.exe"):
playerPath += "mpvnet.exe"
return playerPath
elif playerPathExists(playerPath + "\\mpvnet.exe"):
playerPath += "\\mpvnet.exe"
return playerPath
if os.access(playerPath, os.X_OK):
return playerPath
for path in os.environ['PATH'].split(':'):
path = os.path.join(os.path.realpath(path), playerPath)
if os.access(path, os.X_OK):
return path
@staticmethod
def getIconPath(path):
return constants.MPVNET_ICONPATH
| {
"content_hash": "fdb66793c17d8e92368cfed7232f4eb7",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 97,
"avg_line_length": 30.03846153846154,
"alnum_prop": 0.617157490396927,
"repo_name": "Syncplay/syncplay",
"id": "ef28d18b687326010ed56356652c15bda0e9dc39",
"size": "1562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syncplay/players/mpvnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "53024"
},
{
"name": "Makefile",
"bytes": "3662"
},
{
"name": "Python",
"bytes": "1073792"
},
{
"name": "Shell",
"bytes": "9820"
}
],
"symlink_target": ""
} |
"""Support for Met.no weather service."""
import logging
import voluptuous as vol
from homeassistant.components.weather import PLATFORM_SCHEMA, WeatherEntity
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
LENGTH_METERS,
LENGTH_MILES,
PRESSURE_HPA,
PRESSURE_INHG,
TEMP_CELSIUS,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.util.distance import convert as convert_distance
from homeassistant.util.pressure import convert as convert_pressure
from .const import CONF_TRACK_HOME, DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = (
"Weather forecast from met.no, delivered by the Norwegian "
"Meteorological Institute."
)
DEFAULT_NAME = "Met.no"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
vol.Optional(CONF_ELEVATION): int,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Met.no weather platform."""
_LOGGER.warning("Loading Met.no via platform config is deprecated")
# Add defaults.
config = {CONF_ELEVATION: hass.config.elevation, **config}
if config.get(CONF_LATITUDE) is None:
config[CONF_TRACK_HOME] = True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add a weather entity from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
MetWeather(
coordinator, config_entry.data, hass.config.units.is_metric, False
),
MetWeather(
coordinator, config_entry.data, hass.config.units.is_metric, True
),
]
)
class MetWeather(WeatherEntity):
"""Implementation of a Met.no weather condition."""
def __init__(self, coordinator, config, is_metric, hourly):
"""Initialise the platform with a data instance and site."""
self._config = config
self._coordinator = coordinator
self._is_metric = is_metric
self._hourly = hourly
self._name_appendix = "-hourly" if hourly else ""
async def async_added_to_hass(self):
"""Start fetching data."""
self.async_on_remove(
self._coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Only used by the generic entity update service."""
await self._coordinator.async_request_refresh()
@property
def track_home(self):
"""Return if we are tracking home."""
return self._config.get(CONF_TRACK_HOME, False)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return unique ID."""
if self.track_home:
return f"home{self._name_appendix}"
return f"{self._config[CONF_LATITUDE]}-{self._config[CONF_LONGITUDE]}{self._name_appendix}"
@property
def name(self):
"""Return the name of the sensor."""
name = self._config.get(CONF_NAME)
if name is not None:
return f"{name}{self._name_appendix}"
if self.track_home:
return f"{self.hass.config.location_name}{self._name_appendix}"
return f"{DEFAULT_NAME}{self._name_appendix}"
@property
def condition(self):
"""Return the current condition."""
return self._coordinator.data.current_weather_data.get("condition")
@property
def temperature(self):
"""Return the temperature."""
return self._coordinator.data.current_weather_data.get("temperature")
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
pressure_hpa = self._coordinator.data.current_weather_data.get("pressure")
if self._is_metric or pressure_hpa is None:
return pressure_hpa
return round(convert_pressure(pressure_hpa, PRESSURE_HPA, PRESSURE_INHG), 2)
@property
def humidity(self):
"""Return the humidity."""
return self._coordinator.data.current_weather_data.get("humidity")
@property
def wind_speed(self):
"""Return the wind speed."""
speed_m_s = self._coordinator.data.current_weather_data.get("wind_speed")
if self._is_metric or speed_m_s is None:
return speed_m_s
speed_mi_s = convert_distance(speed_m_s, LENGTH_METERS, LENGTH_MILES)
speed_mi_h = speed_mi_s / 3600.0
return int(round(speed_mi_h))
@property
def wind_bearing(self):
"""Return the wind direction."""
return self._coordinator.data.current_weather_data.get("wind_bearing")
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def forecast(self):
"""Return the forecast array."""
if self._hourly:
return self._coordinator.data.hourly_forecast
return self._coordinator.data.daily_forecast
| {
"content_hash": "46066e048a8fe6e01e201c140620e712",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 99,
"avg_line_length": 30.41711229946524,
"alnum_prop": 0.6366033755274262,
"repo_name": "titilambert/home-assistant",
"id": "e2827367757a3f66fa7f5cf7037ee821a16b7c15",
"size": "5688",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/met/weather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import inspect
import requests
from datetime import datetime, timedelta
import iso8601
import base64
import httplib
import json
import os
import ast
from pprint import pprint
from urlparse import urlparse
import copy
from cloudmesh.config.cm_config import cm_config
from cloudmesh.config.cm_config import cm_config_server
from cloudmesh.config.cm_config import cm_config_flavor
from cloudmesh.iaas.ComputeBaseType import ComputeBaseType
from cloudmesh_base.logger import LOGGER
# import novaclient
# from novaclient.openstack.common import strutils
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
log = LOGGER(__file__)
def donotchange(fn):
return fn
class openstack(ComputeBaseType):
# : the type of the cloud. It is "openstack"
type = "openstack" # global var
# : a dict with the images
images = {} # global var
# : a dict with the flavors
flavors = {} # global var
# : a dict with the servers
servers = {} # global var
# : a dict with the users
# users = {} # global var
# : a dict containing the credentionls read with cm_config
# credential = None # global var
user_credential = None # global var
admin_credential = None
with_admin_credential = None
user_token = None
admin_token = None
# : a unique label for the clous
label = None # global var
# cm_type = "openstack"
# name = "undefined"
# : This is the cloud, should be internal though with _
cloud = None # internal var for the cloud client in openstack
keystone = None
# : The user id
user_id = None # internal var
# _nova = nova
def _load_admin_credential(self):
isproduction = cm_config_server().get('cloudmesh.server.production')
if isproduction:
if self.admin_credential is None:
if 'keystone' in cm_config_server().get('cloudmesh.server'):
self.idp_clouds = cm_config_server().get(
"cloudmesh.server.keystone").keys()
self.with_admin_credential = self.label in self.idp_clouds
if self.with_admin_credential:
try:
self.admin_credential = cm_config_server().get(
"cloudmesh.server.keystone.{0}".format(self.label))
except:
log.error(str(
lineno()) + " No admin credential found! Please check your cloudmesh_server.yaml file.")
else:
self.admin_credential = None
log.info(
str(lineno()) + ": The cloud {0} has no admin credential".format(self.label))
return self.admin_credential
else:
return None
#
# initialize
#
# possibly make connext seperate
def __init__(self,
label,
credential=None,
admin_credential=None,
service_url_type='publicURL'):
"""
initializes the openstack cloud from a file
located at cloudmesh.yaml.
However if a credential dict is used it is used instead
"""
self.clear()
self.label = label
user_credential = credential # HACK to avoid changes in older code
self.user_credential = user_credential
self.admin_credential = admin_credential
self.service_url_type = service_url_type
if user_credential is None:
try:
self.compute_config = cm_config()
self.user_credential = self.compute_config.credential(label)
except:
log.error(str(
lineno()) + ": No user credentail found! Please check your cloudmesh.yaml file.")
# sys.exit(1)
self._load_admin_credential()
self.connect()
def clear(self):
"""
clears the data of this openstack instance, a new connection
including reading the credentials and a refresh needs to be
called to obtain again data.
"""
# Todo: we may just use the name of the class instead as the type
self._clear()
self.user_token = None
self.admin_token = None
self.user_credentials = None
self.admin_credentials = None
self.type = "openstack"
def connect(self):
"""
creates tokens for a connection
"""
log.info(str(lineno()) + ": Loading User Credentials")
if self.user_credential is None:
log.error(
str(lineno()) + ": error connecting to openstack compute, credential is None")
elif not self.user_token:
self.user_token = self.get_token(self.user_credentials)
# check if keystone is defined, and if failed print log msg
#
log.info(str(lineno()) + ": Loading Admin Credentials")
if (self.admin_credential is None) and (self.with_admin_credential):
log.error(
str(lineno()) + ":error connecting to openstack compute, credential is None")
else:
try:
if self.with_admin_credential and (not self.admin_token):
self.admin_token = self.get_token(self.admin_credential)
except:
log.error(str(lineno()) + ": error connecting to openstack "
+ "keystone, credential or server name is invalid")
def DEBUG(self, msg, line_number=None):
if line_number is None:
line_number = ""
if msg == "credential":
debug_dict = dict(self.user_credential)
debug_dict['OS_PASSWORD'] = "********"
log.debug(
"{1} - GET CRED {0}".format(debug_dict, str(line_number)))
else:
log.debug("{0} - {1}".format(str(line_number), str(msg)))
def auth(self):
# DEBUG
try:
_args = locals()
if 'self' in _args:
del (_args['self'])
log.debug("[{0}()] called with [{1}]".format(sys._getframe().f_code.co_name,
str(_args)))
log.debug("user_token:{0}".format(str(self.user_token)))
except:
pass
return 'access' in self.user_token
def get_token(self, credential=None):
# DEBUG
try:
import sys
_args = locals()
if 'self' in _args:
del (_args['self'])
log.debug("[{0}()] called with [{1}]".format(sys._getframe().f_code.co_name,
str(_args)))
except:
pass
if credential is None:
credential = self.user_credential
self.DEBUG("credential", lineno())
param = None
if 'OS_TENANT_NAME' in credential:
param = {"auth": {"passwordCredentials": {
"username": credential['OS_USERNAME'],
"password": credential['OS_PASSWORD'],
},
"tenantName": credential['OS_TENANT_NAME']
}
}
elif 'OS_TENANT_ID' in credential:
param = {"auth": {"passwordCredentials": {
"username": credential['OS_USERNAME'],
"password": credential['OS_PASSWORD'],
},
"tenantId": credential['OS_TENANT_ID']
}
}
url = "{0}/tokens".format(credential['OS_AUTH_URL'])
log.debug(str(lineno()) + ": URL {0}".format(url))
headers = {'content-type': 'application/json'}
verify = self._get_cacert(credential)
print_param = copy.deepcopy(param)
print_param["auth"]["passwordCredentials"]["password"] = "********"
log.debug(str(lineno()) + ":PARAM {0}".format(json.dumps(print_param)))
log.debug(str(lineno()) + ":HEADER {0}".format(headers))
log.debug(str(lineno()) + ":VERIFY {0}".format(verify))
r = requests.post(url,
data=json.dumps(param),
headers=headers,
verify=verify)
# pprint (r.json())
try:
sanitized_r = copy.deepcopy(r.json())
if 'access' in sanitized_r:
if 'token' in sanitized_r['access']:
if 'id' in sanitized_r['access']['token']:
sanitized_r['access']['token']['id'] = '******'
log.debug("{0}".format(str(sanitized_r)))
except:
pass
return r.json()
#
# FIND USER ID
#
def find_user_id(self, force=False):
"""
this method returns the user id and stores it for later use.
"""
config = cm_config()
if not force:
try:
self.user_id = self.user_credential['OS_USER_ID']
return self.user_id
except:
self.user_id = None
log.error("OS_USER_ID not set")
self.user_token = self.get_token()
self.user_id = self.user_token['access']['user']['id']
return self.user_id
# not working yet
# user role is disalowed to execute this by policy setting
# admin role gives uninformative error
def get_server_usage(self, serverid):
apiurl = "servers/%s/diagnostics" % serverid
return self._get(msg=apiurl, kind='admin', urltype='adminURL')
def _get_service(self, type="compute", kind="user"):
token = self.user_token
# print token
if kind == "admin":
token = self.admin_token
for service in token['access']['serviceCatalog']:
if service['type'] == type:
break
return service
def _get_compute_service(self, token=None):
return self._get_service("compute")
def _get_cacert(self, credential=None):
if credential is None:
credential = self.user_credential
verify = False
if 'OS_CACERT' in credential:
if credential['OS_CACERT'] is not None and \
credential['OS_CACERT'] != "None" and \
os.path.isfile(credential['OS_CACERT']):
verify = credential['OS_CACERT']
return verify
def _post(self, posturl, params=None, credential=None):
# print posturl
# print self.config
if credential is None:
credential = self.user_credential
conf = self._get_service_endpoint("compute")
headers = {'content-type': 'application/json',
'X-Auth-Token': '%s' % conf['token']}
# print headers
# print self._get_cacert(credential)
r = requests.post(posturl, headers=headers,
data=json.dumps(params),
verify=self._get_cacert(credential))
ret = {"msg": "success"}
if r.text:
try:
ret = r.json()
except:
pass
return ret
def _put(self, posturl, credential=None, params=None):
# print self.config
if credential is None:
credential = self.user_credential
conf = self._get_service_endpoint("compute")
headers = {'content-type': 'application/json',
'X-Auth-Token': '%s' % conf['token']}
# print headers
r = requests.put(posturl, headers=headers,
data=json.dumps(params),
verify=self._get_cacert(credential))
ret = {"msg": "success"}
if r.text:
try:
ret = r.json()
except:
pass
return ret
#
def ks_get_extensions(self):
pass
# conf = self._get_service_endpoint("identity")
def keypair_list(self):
apiurl = "os-keypairs"
return self._get(msg=apiurl, urltype=self.service_url_type)
def keypair_add(self, keyname, keycontent):
log.debug(str(lineno()) + ":adding a keypair in cm_compute...")
# keysnow = self.keypair_list()
url = self._get_service_endpoint("compute")[self.service_url_type]
posturl = "%s/os-keypairs" % url
params = {"keypair": {"name": "%s" % keyname,
"public_key": "%s" % keycontent
}
}
# print params
return self._post(posturl, params)
def keypair_remove(self, keyname):
log.debug(str(lineno()) + ":removing a keypair in cm_compute...")
conf = self._get_service_endpoint("compute")
url = conf[self.service_url_type]
url = "%s/os-keypairs/%s" % (url, keyname)
headers = {'content-type': 'application/json',
'X-Auth-Token': '%s' % conf['token']}
r = requests.delete(url, headers=headers, verify=self._get_cacert())
ret = {"msg": "success"}
if r.text:
try:
ret = r.json()
except:
pass
return ret
def vm_create(self, name,
flavor_name,
image_id,
security_groups=None,
key_name=None,
meta={},
userdata=None):
"""
start a vm via rest api call
"""
#
# TODO: add logic for getting default image
# if image_id is None:
# get image id from profile information (default image for that cloud)
# TODO: add logic for getting label of machine
#
# if flavor_name is None:
# get flavorname from profile information (ther is a get label function
# ...)
# if keyname is None:
# get the default key from the profile information
url = self._get_service_endpoint("compute")[self.service_url_type]
posturl = "%s/servers" % url
# print posturl
# keycontent = base64.b64encode(key_name)
secgroups = []
if security_groups:
for secgroup in security_groups:
secgroups.append({"name": secgroup})
else:
secgroups = [{"name": "default"}]
params = {
"server": {
"name": "%s" % name,
"imageRef": "%s" % image_id,
"flavorRef": "%s" % flavor_name,
# max_count is the number of instances to launch
# If 3 specified, three vm instances will be launched
# "max_count": 1,
# "min_count": 1,
"security_groups": secgroups,
"metadata": meta,
}
}
if key_name:
params["server"]["key_name"] = key_name
if userdata:
#
# TODO: strutils not defined
#
# safe_userdata = strutils.safe_encode(userdata)
# params["server"]["user_data"] = base64.b64encode(safe_userdata)
safe_userdata = None
log.debug(str(lineno()) + ":POST PARAMS {0}".format(params))
return self._post(posturl, params)
def vm_delete(self, id):
"""
delete a single vm and returns the id
"""
conf = self._get_service_endpoint("compute")
url = conf[self.service_url_type]
url = "%s/servers/%s" % (url, id)
headers = {'content-type': 'application/json',
'X-Auth-Token': '%s' % conf['token']}
# print headers
# no return from http delete via rest api
r = requests.delete(url, headers=headers, verify=self._get_cacert())
ret = {"msg": "success"}
if r.text:
try:
ret = r.json()
except:
pass
return ret
def stack_create(self, name, template_url, parameters, timeout_mins=60):
"""
Create a stack by OpenStack Heat Orchestration
ref: http://developer.openstack.org/api-ref-orchestration-v1.html
"""
url = self._get_service_endpoint("orchestration")[self.service_url_type]
posturl = "%s/stacks" % url
try:
param = ast.literal_eval(parameters)
except ValueError:
param = parameters
params = {
"stack_name": "%s" % name,
"template_url": "%s" % template_url,
"parameters": param,
"timeout_mins": "%s" % timeout_mins
}
log.debug(str(lineno()) + ":POST PARAMS {0}".format(params))
return self._post(posturl, params)
def stack_delete(self, stack_name):
"""
delete a specified stack and returns the id
ref: http://developer.openstack.org/api-ref-orchestration-v1.html
"""
conf = self._get_service_endpoint("orchestration")
url = conf[self.service_url_type]
headers = {'content-type': 'application/json',
'X-Auth-Token': '%s' % conf['token']}
# Find stacks
msg = "stacks/%s" % stack_name
service = "orchestration"
r1 = self._get(msg, service=service,
urltype=self.service_url_type)
try:
stack_id = r1['stack']['id']
except KeyError:
log.warning("stack does not exist ({0})".format(stack_name))
ret = {"msg": "failed"}
return ret
url = "%s/stacks/%s/%s" % (url, stack_name, stack_id)
# no return from http delete via rest api
r = requests.delete(url, headers=headers, verify=self._get_cacert())
ret = {"msg": "success"}
if r.text:
try:
ret = r.json()
except:
pass
return ret
# possibly for future use in network management via Neuron
# currently is not being used
def get_network_id(self):
"""
Obtaining router/expertnal gateway info via the rest api call
"""
ret = {"msg": "failed"}
r = self._get('v2.0/routers', service='network', urltype=self.service_url_type)
if "floating_ip" in r:
ret = r["floating_ip"]["ip"]
return r
def get_public_ip(self):
"""
Obtaining a floating ip from the pool via the rest api call
"""
url = self._get_service_endpoint("compute")[self.service_url_type]
posturl = "%s/os-floating-ips" % url
ret = {"msg": "failed"}
# Default to the default pool, possibly 'nova'
# Before the juno deployment, this always worked
r = self._post(posturl)
# Since Juno deployment, the pool name was changed
if 'itemNotFound' in r:
if 'message' in r['itemNotFound'] and r['itemNotFound']['message'] == 'Floating ip pool not found.':
# get floating ip pool name first
r = self._get('os-floating-ip-pools')
if 'floating_ip_pools' in r:
# use the first pool
pool = r['floating_ip_pools'][0]['name']
params = {'pool': pool}
# reissue the request with returned pool name
r = self._post(posturl, params)
if "floating_ip" in r:
ret = r["floating_ip"]["ip"]
#
# currently not being used
# Nureon related operations
# else:
# gatewayinfo = self.get_network_id()
# url = self._get_service_endpoint("network")[self.service_url_type]
# posturl = '%s/v2.0/floatingips' % url
# tenant_id = self.user_token['access']['token']['tenant']['id']
# params = {"floatingip":{"floating_network_id":<UUID from gatewayinfo>}}
# r = self._post(posturl)
# #r = self._get('/v2.0/floatingips',service='network')
# print (r)
return ret
def assign_public_ip(self, serverid, ip):
"""
assigning public ip to an instance
"""
url = self._get_service_endpoint("compute")[self.service_url_type]
posturl = "%s/servers/%s/action" % (url, serverid)
params = {"addFloatingIp": {
"address": "%s" % ip
}
}
log.debug("POST PARAMS {0}".format(params))
return self._post(posturl, params)
def delete_public_ip(self, idofip):
"""
delete a public ip that is assigned but not currently being used
"""
conf = self._get_service_endpoint("compute")
url = conf[self.service_url_type]
url = "%s/os-floating-ips/%s" % (url, idofip)
headers = {'content-type': 'application/json',
'X-Auth-Token': '%s' % conf['token']}
r = requests.delete(url, headers=headers, verify=self._get_cacert())
ret = {"msg": "success"}
if r.text:
try:
ret = r.json()
except:
pass
return ret
def list_allocated_ips(self):
"""
return list of ips allocated to current account
"""
conf = self._get_service_endpoint("compute")
url = conf[self.service_url_type]
url = "%s/os-floating-ips" % url
headers = {'content-type': 'application/json',
'X-Auth-Token': '%s' % conf['token']}
r = requests.get(url, headers=headers, verify=self._get_cacert())
return r.json()["floating_ips"]
def release_unused_public_ips(self):
ips = self.list_allocated_ips()
ips_id_to_instance = {}
for ip in ips:
ips_id_to_instance[ip['id']] = ip['instance_id']
for id, instance in ips_id_to_instance.iteritems():
if instance is None:
self.delete_public_ip(id)
return True
def _get(self, msg, kind="user", service="compute", urltype="publicURL", payload=None, json=True):
# kind = "admin", "user"
# service = "publicURL, adminURL"
# service= "compute", "identity", ....
# token=None, url=None, kind=None, urltype=None, json=True):
credential = self.user_credential
token = self.user_token
if kind is "admin":
credential = self.admin_credential
token = self.admin_token
conf = self._get_service_endpoint(service)
url = conf[urltype]
url = "{0}/{1}".format(url, msg)
log.debug(str(lineno()) + ": AUTH URL {0}".format(url))
headers = {'X-Auth-Token': token['access']['token']['id']}
r = requests.get(
url, headers=headers, verify=self._get_cacert(credential), params=payload)
log.debug(str(lineno()) + ": Response {0}".format(r))
if json:
return r.json()
else:
return r
# http
def _get_service_endpoint(self, type=None):
"""what example %/servers"""
if type is None:
type = "compute"
compute_service = self._get_service(type)
# pprint(compute_service)
credential = self.user_credential
# print credential
conf = {}
credential = self.user_credential
conf['publicURL'] = str(compute_service['endpoints'][0]['publicURL'])
# some cloud does not have this, e.g. HP cloud
if 'internalURL' in compute_service['endpoints'][0]:
conf['internalURL'] = str(compute_service['endpoints'][0]['internalURL'])
if 'OS_REGION' in credential:
for endpoint in compute_service['endpoints']:
if endpoint['region'] == credential['OS_REGION']:
conf['publicURL'] = endpoint['publicURL']
break
conf['adminURL'] = None
if 'adminURL' in compute_service['endpoints'][0]:
conf['adminURL'] = str(compute_service['endpoints'][0]['adminURL'])
conf['token'] = str(self.user_token['access']['token']['id'])
return conf
# new
def _now(self):
return datetime.now().strftime('%Y-%m-%dT%H-%M-%SZ')
# new
def _list_to_dict(self, list, id, type, time_stamp):
d = {}
# cm_type_version = self.compute_config.get('cloudmesh.clouds.{0}.cm_type_version'.format(self.label))
# log.debug ("CM TYPE VERSION {0}".format(cm_type_version))
for element in list:
element['cm_type'] = type
element['cm_cloud'] = self.label
element['cm_cloud_type'] = self.type
# element['cm_cloud_version'] = cm_type_version
element['cm_refresh'] = time_stamp
d[str(element[id])] = dict(element)
return d
# new
def get_extensions(self):
time_stamp = self._now()
msg = "extensons"
# list = self._get(msg)['extensions']
result = self._get(msg, urltype=self.service_url_type, json=False)
if result.status_code == 404:
log.error("extensions not available")
return {}
else:
list = result.json()
return self._list_to_dict(list, 'name', "extensions", time_stamp)
def get_limits(self):
'''Gets absolute and rate limit information, including information on
currently used absolute limits.'''
time_stamp = self._now()
msg = "limits"
_dict = self._get(msg, urltype=self.service_url_type)['limits']
return _dict
def get_absolute_limits(self, view="original"):
'''Gets absolute limit information
Args:
view (str) : two types of output available
* original - returns integer value in a key and
value pair
* fraction - returns xx / xx fraction value
'''
limits = self.get_limits()
if view == "fraction":
new_limits = {"Cores": None,
"Instances": None,
"RAM": None,
"SecurityGroups": None,
"FloatingIps": None}
new_limits['Cores'] = str(limits['absolute']['totalCoresUsed']) + \
" / " + str(limits['absolute']['maxTotalCores'])
new_limits['Instances'] = \
str(limits['absolute']['totalInstancesUsed']) + " / " + \
str(limits['absolute']['maxTotalInstances'])
new_limits['RAM'] = str(limits['absolute']['totalRAMUsed']) + \
" / " + str(limits['absolute']['maxTotalRAMSize'])
new_limits['SecurityGroups'] = \
str(limits['absolute']['totalSecurityGroupsUsed']) + " / " + \
str(limits['absolute']['maxSecurityGroups'])
new_limits['FloatingIps'] = \
str(limits['absolute']['totalFloatingIpsUsed']) + " / " + \
str(limits['absolute']['maxTotalFloatingIps'])
return new_limits
else:
return limits['absolute']
# new
def get_servers(self):
time_stamp = self._now()
msg = "servers/detail"
list = self._get(msg, urltype=self.service_url_type)['servers']
self.servers = self._list_to_dict(list, 'id', "server", time_stamp)
#
# hack for the hp cloud west
#
for server in self.servers:
self.servers[server]['id'] = str(self.servers[server]['id'])
return self.servers
# new
def get_flavors(self):
time_stamp = self._now()
msg = "flavors/detail"
list = self._get(msg, urltype=self.service_url_type)['flavors']
self.flavors = self._list_to_dict(list, 'name', "flavor", time_stamp)
#
# hack for the hp cloud west
#
for flavor in self.flavors:
self.flavors[flavor]['id'] = str(self.flavors[flavor]['id'])
return self.flavors
def flavorid(self, name):
for key in self.flavors:
if self.flavors[key]['name'] == name:
return key
def flavor(self, id_or_name):
keys = self.flavors.keys()
if id_or_name not in keys:
key = self.flavorid(id_or_name)
return self.flavors[key]
# new
def get_images(self):
'''List images'''
time_stamp = self._now()
msg = "images/detail"
list = self._get(msg, urltype=self.service_url_type)['images']
self.images = self._list_to_dict(list, 'id', "image", time_stamp)
return self.images
def get_security_groups(self):
'''Lists security groups. '''
time_stamp = self._now()
list = self.list_security_groups()['security_groups']
self.security_groups = self._list_to_dict(list, 'id', 'security_group',
time_stamp)
return self.security_groups
def get_stacks(self):
'''Lists active stacks.'''
time_stamp = self._now()
msg = "stacks"
service = "orchestration"
list = self._get(msg, service=service,
urltype=self.service_url_type)['stacks']
self.stacks = self._list_to_dict(list, 'id', 'stacks', time_stamp)
return self.stacks
def get_usage(self):
'''Report usage statistics on compute and storage resources.'''
time_stamp = self._now()
tenant_id = self.user_token['access']['token']['tenant']['id']
msg = "os-simple-tenant-usage/{0}".format(tenant_id)
param = {"start": datetime.now() - timedelta(hours=24),
"end": datetime.now()}
_dict = self._get(msg, urltype=self.service_url_type,
payload=param)['tenant_usage']
log.debug(_dict)
self.usage = _dict
return _dict
def get_quota(self):
''' View quotas for a tenant (project). Administrators only, depending
on policy settings. '''
time_stamp = self._now()
tenant_id = self.user_token['access']['token']['tenant']['id']
msg = "os-quota-sets/{0}".format(tenant_id)
_dict = self._get(msg, urltype=self.service_url_type)['quota_set']
log.debug(_dict)
return _dict
# new
"""
def get_tenants(self, credential=None):
time_stamp = self._now()
#get the tenants dict for the vm with the given id
if credential is None:
p = cm_profile()
name = self.label
credential = p.server.get("cloudmesh.server.keystone")[name]
msg = "tenants"
list = self._get(msg, kind="admin")['tenants']
return self._list_to_dict(list, 'id', "tenants", time_stamp)
# new
def get_users(self, credential=None):
time_stamp = self._now()
#get the tenants dict for the vm with the given id
if credential is None:
p = cm_profile()
name = self.label
idp_clouds = p.server.get("cloudmesh.server.keystone").keys()
if name in idp_clouds:
credential = p.server.get("cloudmesh.server.keystone")[name]
else:
log.error("The cloud {0} does not have keyston access".format(name))
return dict({})
cloud = openstack(name, credential=credential)
msg = "users"
list = cloud._get(msg, kind="admin", service="identity", urltype='adminURL')['users']
return self._list_to_dict(list, 'id', "users", time_stamp)
"""
def get_meta(self, id):
"""get the metadata dict for the vm with the given id"""
msg = "/servers/%s/metadata" % id
return self._get(msg, urltype=self.service_url_type)
def set_meta(self, id, metadata, replace=False):
"""set the metadata for the given vm with the id"""
conf = self._get_service_endpoint()
conf['serverid'] = id
if replace:
conf['set'] = "PUT"
else:
conf['set'] = "POST"
apiurlt = urlparse(conf[self.service_url_type])
url2 = apiurlt[1]
params2 = '{"metadata":' + str(metadata).replace("'", '"') + '}'
headers2 = {"X-Auth-Token": conf[
'token'], "Accept": "application/json", "Content-type": "application/json"}
print("%%%%%%%%%%%%%%%%%%")
pprint(conf)
print("%%%%%%%%%%%%%%%%%%")
print("PARAMS", params2)
print("HEADERS", headers2)
print("API2", apiurlt[2])
print("API1", apiurlt[1])
print("ACTIVITY", conf['set'])
print("ID", conf['serverid'])
print("####################")
conn2 = httplib.HTTPConnection(url2)
conn2.request(conf['set'], "%s/servers/%s/metadata" %
(apiurlt[2], conf['serverid']), params2, headers2)
response2 = conn2.getresponse()
data2 = response2.read()
dd2 = json.loads(data2)
conn2.close()
return dd2
#
# refresh
#
# identity management moved to its dedicated class
"""
def _get_users_dict(self):
result = self.get_users()
return result
def _get_tenants_dict(self):
result = self.get_tenants()
return result
"""
def _get_images_dict(self):
result = self.get_images()
return result
def _get_flavors_dict(self):
try:
result = self.get_flavors_from_yaml()
except:
result = None
if not result:
return self.get_flavors()
self.flavors = result
return self.flavors
def get_flavors_from_yaml(self):
obj = cm_config_flavor()
flavors = obj.get('cloudmesh.flavor')
return flavors.get(self.label)
def _get_servers_dict(self):
result = self.get_servers()
return result
def _get_security_groups_dict(self):
result = self.get_security_groups()
return result
def _get_stacks_dict(self):
result = self.get_stacks()
return result
def _get_usage_dict(self):
result = self.get_usage()
return result
def limits(self):
""" returns the limits of a tenant"""
list = []
info = self.get_limits()
for rate in info['rate']:
limit_set = rate['limit']
print(limit_set)
for limit in limit_set:
list.append(limit)
print(list)
return list
# return the security groups for the current authenticated tenant, in dict
# format
def list_security_groups(self):
apiurl = "os-security-groups"
return self._get(apiurl, urltype=self.service_url_type)
# return the security group id given a name, if it's defined in the current tenant
# The id is used to identify a group when adding more rules to it
def find_security_groupid_by_name(self, name):
groupid = None
secgroups = self.list_security_groups()
for secgroup in secgroups["security_groups"]:
if secgroup["name"] == name:
groupid = secgroup["id"]
break
return groupid
# creating a security group, and optionally add rules to it
# for the current TENANT that it authenticated as
# This implementation is based on the rest api
def create_security_group(self, secgroup, rules=[]):
url = self._get_service_endpoint("compute")[self.service_url_type]
posturl = "%s/os-security-groups" % url
params = {"security_group":
{
"name": secgroup.name,
"description": secgroup.description
}
}
# log.debug ("POST PARAMS {0}".format(params))
ret = self._post(posturl, params)
groupid = None
# upon successful, it returns a dict keyed by 'security_group',
# otherwide may have failed due to some reason
if "security_group" in ret:
groupid = ret["security_group"]["id"]
# if the security group object has rules included, add them first
if len(secgroup.rules) > 0:
self.add_security_group_rules(groupid, secgroup.rules)
# only trying to add the additional rules if the empty group has been
# created successfully
if not groupid:
log.error(
"Failed to create security group. Error message: '%s'" % ret)
else:
self.add_security_group_rules(groupid, rules)
# return the groupid of the newly created group, or None if failed
return groupid
# add rules to an existing security group
def add_security_group_rules(self, groupid, rules):
url = self._get_service_endpoint("compute")[self.service_url_type]
posturl = "%s/os-security-group-rules" % url
ret = None
for rule in rules:
params = {"security_group_rule":
{
"ip_protocol": rule.ip_protocol,
"from_port": rule.from_port,
"to_port": rule.to_port,
"cidr": rule.cidr,
"parent_group_id": groupid
}
}
# log.debug ("POST PARAMS {0}".format(params))
ret = self._post(posturl, params)
if "security_group_rule" not in ret:
if 'badRequest' in ret and ret['badRequest']['message'].startswith('This rule already exists'):
log.warning("The rule already exists")
else:
log.error(
"Failed to create security group rule(s). Error message: '%s'" % ret)
break
return ret
#
# security Groups of VMS
#
# GVL: review
# how does this look for azure and euca? Should there be a general framework for this in the BaseCloud class
# based on that analysis?
#
# comments of wht these things do and how they work are missing
#
'''
def createSecurityGroup(self, default_security_group, description="no-description"):
"""
comment is missing
"""
protocol = ""
ipaddress = ""
max_port = ""
min_port = ""
default_security_group_id = self.cloud.security_groups.create(
default_security_group, description)
default_security_group_id = default_security_group_id.id
config_security = cm_config()
yamlFile = config_security.get()
ruleNames = yamlFile['security'][
'security_groups'][default_security_group]
for ruleName in ruleNames:
rules = yamlFile['security']['rules'][ruleName]
for key, value in rules.iteritems():
if 'protocol' in key:
protocol = value
elif 'max_port' in key:
max_port = value
elif 'min_port' in key:
min_port = value
else:
ip_address = value
self.cloud.security_group_rules.create(
default_security_group_id, protocol, min_port,
max_port, ip_address)
return default_security_group
# GVL: review
# how does this look for azure and euca? Should there be a general framework for this in the BaseCloud class
# based on that analysis?
#
# comments of wht these things do and how they work are missing
def checkSecurityGroups(self):
"""
TODO: comment is missing
"""
config_security = cm_config()
names = {}
securityGroups = self.cloud.security_groups.list()
for securityGroup in securityGroups:
names[securityGroup.name] = securityGroup.id
yamlFile = config_security.get()
if yamlFile.has_key('security'):
default_security_group = yamlFile['security']['default']
else:
return None
# default_security_group_id=names[default_security_group]
if default_security_group in names:
return default_security_group
else:
return self.createSecurityGroup(default_security_group)
# GVL: review
# how does this look for azure and euca? Should there be a general framework for this in the BaseCloud class
# based on that analysis?
#
# comments of wht these things do and how they work are missing
#
def get_public_ip(self):
"""
TODO: comment is missing
"""
return self.cloud.floating_ips.create()
# GVL: review
# how does this look for azure and euca? Should there be a general framework for this in the BaseCloud class
# based on that analysis?
#
# comments of wht these things do and how they work are missing
#
def assign_public_ip(self, serverid, ip):
"""
comment is missing
"""
self.cloud.servers.add_floating_ip(serverid, ip)
#
# set vm meta
#
def vm_set_meta(self, vm_id, metadata):
"""an experimental class to set the metadata"""
print metadata
is_set = 0
# serverid = self.servers[id]['manager']
while not is_set:
try:
print "set ", vm_id, "to set", metadata
result = self.cloud.servers.set_meta(vm_id, metadata)
# body = {'metadata': metadata}
# print body
# result = self.cloud.servers._create("/servers/%s/metadata" %
# vm_id, body, "metadata")
print result
is_set = 1
except Exception, e:
print "ERROR", e
time.sleep(2)
print result
#
# create a vm
#
def vm_create(self,
name=None,
flavor_name=None,
image_id=None,
security_groups=None,
key_name=None,
meta=None):
"""
create a vm with the given parameters
"""
if not key_name is None:
if not self.check_key_pairs(key_name):
config = cm_config()
dict_t = config.get()
key = dict_t['keys']['keylist'][key_name]
if not 'ssh-rsa' in key and not 'ssh-dss' in key:
key = open(key, "r").read()
self.upload_key_pair(key, key_name)
config = cm_config()
if flavor_name is None:
flavor_name = config.default(self.label)['flavor']
if image_id is None:
image_id = config.default(self.label)['image']
# print "CREATE>>>>>>>>>>>>>>>>"
# print image_id
# print flavor_name
vm_flavor = self.cloud.images.find(name=flavor_name)
vm_image = self.cloud.images.find(id=image_id)
if key_name is None:
vm = self.cloud.servers.create(name,
flavor=vm_flavor,
image=vm_image,
security_groups=security_groups,
meta=meta
)
else:
# bug would passing None just work?
vm = self.cloud.servers.create(name,
flavor=vm_flavor,
image=vm_image,
key_name=key_name,
security_groups=security_groups,
meta=meta
)
delay = vm.user_id # trick to hopefully get all fields
data = vm.__dict__
del data['manager']
# data['cm_name'] = name
# data['cm_flavor'] = flavor_name
# data['cm_image'] = image_id
# return {str(data['id']): data}
# should probably just be
return data
#
# delete vm(s)
#
def vm_delete(self, id):
"""
delete a single vm and returns the id
"""
vm = self.cloud.servers.delete(id)
# return just the id or None if its deleted
return vm
@donotchange
def vms_delete(self, ids):
"""
delete many vms by id. ids is an array
"""
for id in ids:
print "Deleting %s" % self.servers[id]['name']
vm = self.vm_delete(id)
return ids
#
# list user images
#
'''
@donotchange
def vms_user(self, refresh=False):
"""
find my vms
"""
user_id = self.find_user_id()
time_stamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%SZ')
if refresh:
self.refresh("servers")
result = {}
for (key, vm) in self.servers.items():
if vm['user_id'] == self.user_id:
result[key] = vm
return result
#
# list project vms
#
def vms_project(self, refresh=False):
"""
find my vms that arein this project. this method was needed for openstack essex deployment on fg
"""
user_id = self.find_user_id()
time_stamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%SZ')
if refresh:
self.refresh("images")
result = {}
for (key, vm) in self.servers.items():
result[key] = vm
return result
#
# delete images from a user
#
@donotchange
def vms_delete_user(self):
"""
find my vms and delete them
"""
user_id = self.find_user_id()
vms = self.find('user_id', user_id)
self.vms_delete(vms)
return
#
# find
#
@donotchange
def find(self, key, value=None):
"""find my vms"""
ids = []
if key == 'user_id' and value is None:
value = self.user_id
for (id, vm) in self.servers.items():
if vm[str(key)] == value:
ids.append(str(vm['id']))
return ids
#
# rename
#
'''
def rename(self, old, new, id=None):
"""rename the vm with the given name old to new. If more than
one exist with the same name only the first one will be
renamed. consider moving the code to the baseclass."""
all = self.find('name', old)
print all
if len(all) > 0:
id = all[0]
vm = self.cloud.servers.update(id, new)
return
# TODO: BUG WHY ARE TGERE TWO REINDEX FUNCTIONS?
@donotchange
def reindex(self, prefixold, prefix, index_format):
all = self.find('user_id')
counter = 1
for id in all:
old = self.servers[id]['name']
new = prefix + index_format % counter
print "Rename %s -> %s, %s" % (old, new, self.servers[id]['key_name'])
if old != new:
vm = self.cloud.servers.update(id, new)
counter += 1
'''
#
# TODO
#
"""
refresh just a specific VM
delete all images that follow a regualr expression in name
look into sort of images, images, vms
"""
#
# EXTRA
#
# will be moved into one class
@donotchange
def table_col_to_dict(self, body):
"""converts a given list of rows to a dict"""
result = {}
for element in body:
key = element[0]
value = element[1]
result[key] = value
return result
@donotchange
def table_matrix(self, text, format=None):
"""converts a given pretty table to a list of rows or a
dict. The format can be specified with 'dict' to return a
dict. otherwise it returns an array"""
lines = text.splitlines()
headline = lines[0].split("|")
headline = headline[1:-1]
for i in range(0, len(headline)):
headline[i] = str(headline[i]).strip()
lines = lines[1:]
body = []
for l in lines:
line = l.split("|")
line = line[1:-1]
entry = {}
for i in range(0, len(line)):
line[i] = str(line[i]).strip()
if format == "dict":
key = headline[i]
entry[key] = line[i]
if format == "dict":
body.append(entry)
else:
body.append(line)
if format == 'dict':
return body
else:
return (headline, body)
#
# CLI call of ussage
#
# will be moved into utils
@donotchange
def parse_isotime(self, timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def usage(self, tenant_id=None, serverid=None, start=None, end=None, format='dict'):
""" returns the usage information of the tennant"""
DEFAULT_STAT_DURATION = 30
if not tenant_id:
url = self._get_service_endpoint("compute")[self.service_url_type]
urlsplit = url.split("/")
tenant_id = urlsplit[len(urlsplit) - 1]
# print 70 * "-"
# print self.cloud.certs.__dict__.get()
# print 70 * "-"
# tenantid = "member" # not sure how to get that
if not end:
end = datetime.now()
# end = self._now()
if not start:
start = end - timedelta(days=DEFAULT_STAT_DURATION)
# start = start.strftime('%Y-%m-%dT%H-%M-%SZ')
# iso_start = self.parse_isotime(start)
# iso_end = self.parse_isotime(end)
# print ">>>>>", iso_start, iso_end
# info = self.cloud.usage.get(tenantid, iso_start, iso_end)
# print info.__dict__
# sys.exit()
# (start, rest) = start.split("T") # ignore time for now
# (end, rest) = end.split("T") # ignore time for now
apiurl = "os-simple-tenant-usage/%s" % tenant_id
payload = {'start': start, 'end': end}
result = self._get(apiurl, payload=payload, urltype=self.service_url_type)['tenant_usage']
instances = result['server_usages']
numInstances = len(instances)
ramhours = result['total_memory_mb_usage']
cpuhours = result['total_hours']
vcpuhours = result['total_vcpus_usage']
diskhours = result['total_local_gb_usage']
# if serverid provided, only return the server specific data
ret = None
if serverid:
for instance in instances:
if instance["instance_id"] == serverid:
ret = instance
break
# else return tenant usage info
else:
ret = {'tenant_id': tenant_id,
'start': start.strftime('%Y-%m-%dT%H-%M-%SZ'),
'end': end.strftime('%Y-%m-%dT%H-%M-%SZ'),
'instances': numInstances,
'cpuHours': cpuhours,
'vcpuHours': vcpuhours,
'ramMBHours': ramhours,
'diskGBHours': diskhours}
return ret
# (headline, matrix) = self.table_matrix(result)
# headline.append("Start")
# headline.append("End")
# matrix[0].append(start)
# matrix[0].append(end)
# if format == 'dict':
# result = {}
# for i in range(0, len(headline)):
# result[headline[i]] = matrix[0][i]
# return result
# else:
# return (headline, matrix[0])
#
# CLI call of absolute-limits
#
# def limits(self):
# conf = get_conf()
# return _get(conf, "%s/limits")
'''
def check_key_pairs(self, key_name):
"""simple check to see if a keyname is in the keypair list"""
allKeys = self.cloud.keypairs.list()
for key in allKeys:
if key.name in key_name:
return True
return False
#
# Upload Key Pair
#
def upload_key_pair(self, publickey, name):
""" Uploads key pair """
try:
self.cloud.keypairs.create(name, publickey)
except Exception, e:
return 1, e
return (0, 'Key added successfully')
#
# Delete Key Pair
#
def delete_key(self, name):
""" delets key pair """
try:
self.cloud.keypairs.delete(name)
except Exception, e:
return (1, e)
return (0, 'Key deleted successfully')
#
# List Security Group
#
def sec_grp_list(self):
""" lists all security groups """
try:
return self.cloud.security_groups.list()
except Exception, e:
print e
states = [
"ACTIVE",
"ERROR",
"BUILDING",
"PAUSED",
"SUSPENDED",
"STOPPED",
"DELETED",
"RESCUED",
"RESIZED",
"SOFT_DELETED"
]
def display(self, states, userid):
""" simple or on states and check if userid. If userid is None
all users will be marked. A new variable cm_display is
introduced manageing if a VM should be printed or not"""
for (id, vm) in self.servers.items():
vm['cm_display'] = vm['status'] in states
if userid is not None:
vm['cm_display'] = vm['cm_display'] and (
vm['user_id'] == userid)
'''
def display_regex(self, state_check, userid):
print(state_check)
for (id, vm) in self.servers.items():
vm['cm_display'] = eval(state_check)
# vm['cm_display'] = vm['status'] in states
if userid is not None:
vm['cm_display'] = vm['cm_display'] and (
vm['user_id'] == userid)
#
# MAIN FOR TESTING
#
if __name__ == "__main__":
"""
cloud = openstack("india-openstack")
name ="%s-%04d" % (cloud.credential["OS_USERNAME"], 1)
out = cloud.vm_create(name, "m1.tiny", "6d2bca76-8fff-4d57-9f29-50378539b4fa")
pprint(out)
"""
# cloud = openstack("india")
# flavors = cloud.get_flavors()
# for flavor in flavors:
# print(flavor)
# keys = cloud.list_key_pairs()
# for key in keys:
# print key.name
"""
print cloud.find_user_id()
"""
"""
for i in range (1,3):
name ="%s-%04d" % (cloud.credential["OS_USERNAME"], i)
out = cloud.vm_create(name, "m1.tiny", "6d2bca76-8fff-4d57-9f29-50378539b4fa")
<pprint(out)
"""
"""
print cloud.find('name', name)
"""
# cloud.rename("gvonlasz-0001","gregor")
| {
"content_hash": "a3f0a7c2f6b7b87f02058d9cd7d2b544",
"timestamp": "",
"source": "github",
"line_count": 1703,
"max_line_length": 120,
"avg_line_length": 32.456253669994126,
"alnum_prop": 0.5238000470392415,
"repo_name": "rajpushkar83/cloudmesh",
"id": "323d93da7c327a9f44a3277b2869a43baa86b73c",
"size": "55386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudmesh/iaas/openstack/cm_compute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "390396"
},
{
"name": "HTML",
"bytes": "4158355"
},
{
"name": "Java",
"bytes": "369"
},
{
"name": "JavaScript",
"bytes": "2803977"
},
{
"name": "Makefile",
"bytes": "7572"
},
{
"name": "PHP",
"bytes": "183557"
},
{
"name": "Python",
"bytes": "1736957"
},
{
"name": "Ruby",
"bytes": "10670"
},
{
"name": "Shell",
"bytes": "32263"
}
],
"symlink_target": ""
} |
"""Implementation of compile_html for HTML source files."""
import os
import shutil
import codecs
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs
class CompileHtml(PageCompiler):
"""Compile HTML into HTML."""
name = "html"
def compile_html(self, source, dest, is_two_file=True):
makedirs(os.path.dirname(dest))
shutil.copyfile(source, dest)
return True
def create_post(self, path, onefile=False, **kw):
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
with codecs.open(path, "wb+", "utf8") as fd:
if onefile:
fd.write('<!-- \n')
for k, v in metadata.items():
fd.write('.. {0}: {1}\n'.format(k, v))
fd.write('-->\n\n')
fd.write("\n<p>Write your post here.</p>")
| {
"content_hash": "ccf5242cd645801434be58efa2d2cff6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 59,
"avg_line_length": 29.125,
"alnum_prop": 0.5804721030042919,
"repo_name": "Proteus-tech/nikola",
"id": "a309960cfba0ec92a8cfd23e251d6c7a6adbe159",
"size": "2074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/plugins/compile/html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "CSS",
"bytes": "9393"
},
{
"name": "Erlang",
"bytes": "30"
},
{
"name": "HTML",
"bytes": "3394"
},
{
"name": "JavaScript",
"bytes": "27629"
},
{
"name": "Python",
"bytes": "589460"
},
{
"name": "Shell",
"bytes": "635"
}
],
"symlink_target": ""
} |
"""Some simple financial calculations
patterned after spreadsheet computations.
There is some complexity in each function
so that the functions behave like ufuncs with
broadcasting and being able to be called with scalars
or arrays (or other sequences).
Functions support the :class:`decimal.Decimal` type unless
otherwise stated.
"""
import warnings
from decimal import Decimal
import functools
import numpy as np
from numpy.core import overrides
_depmsg = ("numpy.{name} is deprecated and will be removed from NumPy 1.20. "
"Use numpy_financial.{name} instead "
"(https://pypi.org/project/numpy-financial/).")
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate',
'irr', 'npv', 'mirr']
_when_to_num = {'end':0, 'begin':1,
'e':0, 'b':1,
0:0, 1:1,
'beginning':1,
'start':1,
'finish':0}
def _convert_when(when):
#Test to see if when has already been converted to ndarray
#This will happen if one function calls another, for example ppmt
if isinstance(when, np.ndarray):
return when
try:
return _when_to_num[when]
except (KeyError, TypeError):
return [_when_to_num[x] for x in when]
def _fv_dispatcher(rate, nper, pmt, pv, when=None):
warnings.warn(_depmsg.format(name='fv'),
DeprecationWarning, stacklevel=3)
return (rate, nper, pmt, pv)
@array_function_dispatch(_fv_dispatcher)
def fv(rate, nper, pmt, pv, when='end'):
"""
Compute the future value.
.. deprecated:: 1.18
`fv` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
Given:
* a present value, `pv`
* an interest `rate` compounded once per period, of which
there are
* `nper` total
* a (fixed) payment, `pmt`, paid either
* at the beginning (`when` = {'begin', 1}) or the end
(`when` = {'end', 0}) of each period
Return:
the value at the end of the `nper` periods
Parameters
----------
rate : scalar or array_like of shape(M, )
Rate of interest as decimal (not per cent) per period
nper : scalar or array_like of shape(M, )
Number of compounding periods
pmt : scalar or array_like of shape(M, )
Payment
pv : scalar or array_like of shape(M, )
Present value
when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
When payments are due ('begin' (1) or 'end' (0)).
Defaults to {'end', 0}.
Returns
-------
out : ndarray
Future values. If all input is scalar, returns a scalar float. If
any input is array_like, returns future values for each input element.
If multiple inputs are array_like, they all must have the same shape.
Notes
-----
The future value is computed by solving the equation::
fv +
pv*(1+rate)**nper +
pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0
or, when ``rate == 0``::
fv + pv + pmt * nper == 0
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
.. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
Standards (OASIS). Billerica, MA, USA. [ODT Document].
Available:
http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
OpenDocument-formula-20090508.odt
Examples
--------
What is the future value after 10 years of saving $100 now, with
an additional monthly savings of $100. Assume the interest rate is
5% (annually) compounded monthly?
>>> np.fv(0.05/12, 10*12, -100, -100)
15692.928894335748
By convention, the negative sign represents cash flow out (i.e. money not
available today). Thus, saving $100 a month at 5% annual interest leads
to $15,692.93 available to spend in 10 years.
If any input is array_like, returns an array of equal shape. Let's
compare different interest rates from the example above.
>>> a = np.array((0.05, 0.06, 0.07))/12
>>> np.fv(a, 10*12, -100, -100)
array([ 15692.92889434, 16569.87435405, 17509.44688102]) # may vary
"""
when = _convert_when(when)
(rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when])
temp = (1+rate)**nper
fact = np.where(rate == 0, nper,
(1 + rate*when)*(temp - 1)/rate)
return -(pv*temp + pmt*fact)
def _pmt_dispatcher(rate, nper, pv, fv=None, when=None):
warnings.warn(_depmsg.format(name='pmt'),
DeprecationWarning, stacklevel=3)
return (rate, nper, pv, fv)
@array_function_dispatch(_pmt_dispatcher)
def pmt(rate, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal plus interest.
.. deprecated:: 1.18
`pmt` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
Given:
* a present value, `pv` (e.g., an amount borrowed)
* a future value, `fv` (e.g., 0)
* an interest `rate` compounded once per period, of which
there are
* `nper` total
* and (optional) specification of whether payment is made
at the beginning (`when` = {'begin', 1}) or the end
(`when` = {'end', 0}) of each period
Return:
the (fixed) periodic payment.
Parameters
----------
rate : array_like
Rate of interest (per period)
nper : array_like
Number of compounding periods
pv : array_like
Present value
fv : array_like, optional
Future value (default = 0)
when : {{'begin', 1}, {'end', 0}}, {string, int}
When payments are due ('begin' (1) or 'end' (0))
Returns
-------
out : ndarray
Payment against loan plus interest. If all input is scalar, returns a
scalar float. If any input is array_like, returns payment for each
input element. If multiple inputs are array_like, they all must have
the same shape.
Notes
-----
The payment is computed by solving the equation::
fv +
pv*(1 + rate)**nper +
pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0
or, when ``rate == 0``::
fv + pv + pmt * nper == 0
for ``pmt``.
Note that computing a monthly mortgage payment is only
one use for this function. For example, pmt returns the
periodic deposit one must make to achieve a specified
future balance given an initial deposit, a fixed,
periodically compounded interest rate, and the total
number of periods.
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
.. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
Standards (OASIS). Billerica, MA, USA. [ODT Document].
Available:
http://www.oasis-open.org/committees/documents.php
?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt
Examples
--------
What is the monthly payment needed to pay off a $200,000 loan in 15
years at an annual interest rate of 7.5%?
>>> np.pmt(0.075/12, 12*15, 200000)
-1854.0247200054619
In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained
today, a monthly payment of $1,854.02 would be required. Note that this
example illustrates usage of `fv` having a default value of 0.
"""
when = _convert_when(when)
(rate, nper, pv, fv, when) = map(np.array, [rate, nper, pv, fv, when])
temp = (1 + rate)**nper
mask = (rate == 0)
masked_rate = np.where(mask, 1, rate)
fact = np.where(mask != 0, nper,
(1 + masked_rate*when)*(temp - 1)/masked_rate)
return -(fv + pv*temp) / fact
def _nper_dispatcher(rate, pmt, pv, fv=None, when=None):
warnings.warn(_depmsg.format(name='nper'),
DeprecationWarning, stacklevel=3)
return (rate, pmt, pv, fv)
@array_function_dispatch(_nper_dispatcher)
def nper(rate, pmt, pv, fv=0, when='end'):
"""
Compute the number of periodic payments.
.. deprecated:: 1.18
`nper` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
:class:`decimal.Decimal` type is not supported.
Parameters
----------
rate : array_like
Rate of interest (per period)
pmt : array_like
Payment
pv : array_like
Present value
fv : array_like, optional
Future value
when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
When payments are due ('begin' (1) or 'end' (0))
Notes
-----
The number of periods ``nper`` is computed by solving the equation::
fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0
but if ``rate = 0`` then::
fv + pv + pmt*nper = 0
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
Examples
--------
If you only had $150/month to pay towards the loan, how long would it take
to pay-off a loan of $8,000 at 7% annual interest?
>>> print(np.round(np.nper(0.07/12, -150, 8000), 5))
64.07335
So, over 64 months would be required to pay off the loan.
The same analysis could be done with several different interest rates
and/or payments and/or total amounts to produce an entire table.
>>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12,
... -150 : -99 : 50 ,
... 8000 : 9001 : 1000]))
array([[[ 64.07334877, 74.06368256],
[108.07548412, 127.99022654]],
[[ 66.12443902, 76.87897353],
[114.70165583, 137.90124779]]])
"""
when = _convert_when(when)
(rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when])
use_zero_rate = False
with np.errstate(divide="raise"):
try:
z = pmt*(1+rate*when)/rate
except FloatingPointError:
use_zero_rate = True
if use_zero_rate:
return (-fv + pv) / pmt
else:
A = -(fv + pv)/(pmt+0)
B = np.log((-fv+z) / (pv+z))/np.log(1+rate)
return np.where(rate == 0, A, B)
def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
warnings.warn(_depmsg.format(name='ipmt'),
DeprecationWarning, stacklevel=3)
return (rate, per, nper, pv, fv)
@array_function_dispatch(_ipmt_dispatcher)
def ipmt(rate, per, nper, pv, fv=0, when='end'):
"""
Compute the interest portion of a payment.
.. deprecated:: 1.18
`ipmt` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
Parameters
----------
rate : scalar or array_like of shape(M, )
Rate of interest as decimal (not per cent) per period
per : scalar or array_like of shape(M, )
Interest paid against the loan changes during the life or the loan.
The `per` is the payment period to calculate the interest amount.
nper : scalar or array_like of shape(M, )
Number of compounding periods
pv : scalar or array_like of shape(M, )
Present value
fv : scalar or array_like of shape(M, ), optional
Future value
when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
When payments are due ('begin' (1) or 'end' (0)).
Defaults to {'end', 0}.
Returns
-------
out : ndarray
Interest portion of payment. If all input is scalar, returns a scalar
float. If any input is array_like, returns interest payment for each
input element. If multiple inputs are array_like, they all must have
the same shape.
See Also
--------
ppmt, pmt, pv
Notes
-----
The total payment is made up of payment against principal plus interest.
``pmt = ppmt + ipmt``
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
Examples
--------
What is the amortization schedule for a 1 year loan of $2500 at
8.24% interest per year compounded monthly?
>>> principal = 2500.00
The 'per' variable represents the periods of the loan. Remember that
financial equations start the period count at 1!
>>> per = np.arange(1*12) + 1
>>> ipmt = np.ipmt(0.0824/12, per, 1*12, principal)
>>> ppmt = np.ppmt(0.0824/12, per, 1*12, principal)
Each element of the sum of the 'ipmt' and 'ppmt' arrays should equal
'pmt'.
>>> pmt = np.pmt(0.0824/12, 1*12, principal)
>>> np.allclose(ipmt + ppmt, pmt)
True
>>> fmt = '{0:2d} {1:8.2f} {2:8.2f} {3:8.2f}'
>>> for payment in per:
... index = payment - 1
... principal = principal + ppmt[index]
... print(fmt.format(payment, ppmt[index], ipmt[index], principal))
1 -200.58 -17.17 2299.42
2 -201.96 -15.79 2097.46
3 -203.35 -14.40 1894.11
4 -204.74 -13.01 1689.37
5 -206.15 -11.60 1483.22
6 -207.56 -10.18 1275.66
7 -208.99 -8.76 1066.67
8 -210.42 -7.32 856.25
9 -211.87 -5.88 644.38
10 -213.32 -4.42 431.05
11 -214.79 -2.96 216.26
12 -216.26 -1.49 -0.00
>>> interestpd = np.sum(ipmt)
>>> np.round(interestpd, 2)
-112.98
"""
when = _convert_when(when)
rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper,
pv, fv, when)
total_pmt = pmt(rate, nper, pv, fv, when)
ipmt = _rbl(rate, per, total_pmt, pv, when)*rate
try:
ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt)
ipmt = np.where(np.logical_and(when == 1, per == 1), 0, ipmt)
except IndexError:
pass
return ipmt
def _rbl(rate, per, pmt, pv, when):
"""
This function is here to simply have a different name for the 'fv'
function to not interfere with the 'fv' keyword argument within the 'ipmt'
function. It is the 'remaining balance on loan' which might be useful as
it's own function, but is easily calculated with the 'fv' function.
"""
return fv(rate, (per - 1), pmt, pv, when)
def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
warnings.warn(_depmsg.format(name='ppmt'),
DeprecationWarning, stacklevel=3)
return (rate, per, nper, pv, fv)
@array_function_dispatch(_ppmt_dispatcher)
def ppmt(rate, per, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal.
.. deprecated:: 1.18
`ppmt` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
Parameters
----------
rate : array_like
Rate of interest (per period)
per : array_like, int
Amount paid against the loan changes. The `per` is the period of
interest.
nper : array_like
Number of compounding periods
pv : array_like
Present value
fv : array_like, optional
Future value
when : {{'begin', 1}, {'end', 0}}, {string, int}
When payments are due ('begin' (1) or 'end' (0))
See Also
--------
pmt, pv, ipmt
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
"""
total = pmt(rate, nper, pv, fv, when)
return total - ipmt(rate, per, nper, pv, fv, when)
def _pv_dispatcher(rate, nper, pmt, fv=None, when=None):
warnings.warn(_depmsg.format(name='pv'),
DeprecationWarning, stacklevel=3)
return (rate, nper, nper, pv, fv)
@array_function_dispatch(_pv_dispatcher)
def pv(rate, nper, pmt, fv=0, when='end'):
"""
Compute the present value.
.. deprecated:: 1.18
`pv` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
Given:
* a future value, `fv`
* an interest `rate` compounded once per period, of which
there are
* `nper` total
* a (fixed) payment, `pmt`, paid either
* at the beginning (`when` = {'begin', 1}) or the end
(`when` = {'end', 0}) of each period
Return:
the value now
Parameters
----------
rate : array_like
Rate of interest (per period)
nper : array_like
Number of compounding periods
pmt : array_like
Payment
fv : array_like, optional
Future value
when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
When payments are due ('begin' (1) or 'end' (0))
Returns
-------
out : ndarray, float
Present value of a series of payments or investments.
Notes
-----
The present value is computed by solving the equation::
fv +
pv*(1 + rate)**nper +
pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0
or, when ``rate = 0``::
fv + pv + pmt * nper = 0
for `pv`, which is then returned.
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
.. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
Standards (OASIS). Billerica, MA, USA. [ODT Document].
Available:
http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
OpenDocument-formula-20090508.odt
Examples
--------
What is the present value (e.g., the initial investment)
of an investment that needs to total $15692.93
after 10 years of saving $100 every month? Assume the
interest rate is 5% (annually) compounded monthly.
>>> np.pv(0.05/12, 10*12, -100, 15692.93)
-100.00067131625819
By convention, the negative sign represents cash flow out
(i.e., money not available today). Thus, to end up with
$15,692.93 in 10 years saving $100 a month at 5% annual
interest, one's initial deposit should also be $100.
If any input is array_like, ``pv`` returns an array of equal shape.
Let's compare different interest rates in the example above:
>>> a = np.array((0.05, 0.04, 0.03))/12
>>> np.pv(a, 10*12, -100, 15692.93)
array([ -100.00067132, -649.26771385, -1273.78633713]) # may vary
So, to end up with the same $15692.93 under the same $100 per month
"savings plan," for annual interest rates of 4% and 3%, one would
need initial investments of $649.27 and $1273.79, respectively.
"""
when = _convert_when(when)
(rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when])
temp = (1+rate)**nper
fact = np.where(rate == 0, nper, (1+rate*when)*(temp-1)/rate)
return -(fv + pmt*fact)/temp
# Computed with Sage
# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x -
# p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r +
# p*((r + 1)^n - 1)*w/r)
def _g_div_gp(r, n, p, x, y, w):
t1 = (r+1)**n
t2 = (r+1)**(n-1)
return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) /
(n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r +
p*(t1 - 1)*w/r))
def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None,
maxiter=None):
warnings.warn(_depmsg.format(name='rate'),
DeprecationWarning, stacklevel=3)
return (nper, pmt, pv, fv)
# Use Newton's iteration until the change is less than 1e-6
# for all values or a maximum of 100 iterations is reached.
# Newton's rule is
# r_{n+1} = r_{n} - g(r_n)/g'(r_n)
# where
# g(r) is the formula
# g'(r) is the derivative with respect to r.
@array_function_dispatch(_rate_dispatcher)
def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100):
"""
Compute the rate of interest per period.
.. deprecated:: 1.18
`rate` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
Parameters
----------
nper : array_like
Number of compounding periods
pmt : array_like
Payment
pv : array_like
Present value
fv : array_like
Future value
when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
When payments are due ('begin' (1) or 'end' (0))
guess : Number, optional
Starting guess for solving the rate of interest, default 0.1
tol : Number, optional
Required tolerance for the solution, default 1e-6
maxiter : int, optional
Maximum iterations in finding the solution
Notes
-----
The rate of interest is computed by iteratively solving the
(non-linear) equation::
fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0
for ``rate``.
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
.. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
Standards (OASIS). Billerica, MA, USA. [ODT Document].
Available:
http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
OpenDocument-formula-20090508.odt
"""
when = _convert_when(when)
default_type = Decimal if isinstance(pmt, Decimal) else float
# Handle casting defaults to Decimal if/when pmt is a Decimal and
# guess and/or tol are not given default values
if guess is None:
guess = default_type('0.1')
if tol is None:
tol = default_type('1e-6')
(nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when])
rn = guess
iterator = 0
close = False
while (iterator < maxiter) and not close:
rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when)
diff = abs(rnp1-rn)
close = np.all(diff < tol)
iterator += 1
rn = rnp1
if not close:
# Return nan's in array of the same shape as rn
return np.nan + rn
else:
return rn
def _irr_dispatcher(values):
warnings.warn(_depmsg.format(name='irr'),
DeprecationWarning, stacklevel=3)
return (values,)
@array_function_dispatch(_irr_dispatcher)
def irr(values):
"""
Return the Internal Rate of Return (IRR).
.. deprecated:: 1.18
`irr` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
This is the "average" periodically compounded rate of return
that gives a net present value of 0.0; for a more complete explanation,
see Notes below.
:class:`decimal.Decimal` type is not supported.
Parameters
----------
values : array_like, shape(N,)
Input cash flows per time period. By convention, net "deposits"
are negative and net "withdrawals" are positive. Thus, for
example, at least the first element of `values`, which represents
the initial investment, will typically be negative.
Returns
-------
out : float
Internal Rate of Return for periodic input values.
Notes
-----
The IRR is perhaps best understood through an example (illustrated
using np.irr in the Examples section below). Suppose one invests 100
units and then makes the following withdrawals at regular (fixed)
intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100
unit investment yields 173 units; however, due to the combination of
compounding and the periodic withdrawals, the "average" rate of return
is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution
(for :math:`r`) of the equation:
.. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2}
+ \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0
In general, for `values` :math:`= [v_0, v_1, ... v_M]`,
irr is the solution of the equation: [2]_
.. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
.. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
Addison-Wesley, 2003, pg. 348.
Examples
--------
>>> round(np.irr([-100, 39, 59, 55, 20]), 5)
0.28095
>>> round(np.irr([-100, 0, 0, 74]), 5)
-0.0955
>>> round(np.irr([-100, 100, 0, -7]), 5)
-0.0833
>>> round(np.irr([-100, 100, 0, 7]), 5)
0.06206
>>> round(np.irr([-5, 10.5, 1, -8, 1]), 5)
0.0886
"""
# `np.roots` call is why this function does not support Decimal type.
#
# Ultimately Decimal support needs to be added to np.roots, which has
# greater implications on the entire linear algebra module and how it does
# eigenvalue computations.
res = np.roots(values[::-1])
mask = (res.imag == 0) & (res.real > 0)
if not mask.any():
return np.nan
res = res[mask].real
# NPV(rate) = 0 can have more than one solution so we return
# only the solution closest to zero.
rate = 1/res - 1
rate = rate.item(np.argmin(np.abs(rate)))
return rate
def _npv_dispatcher(rate, values):
warnings.warn(_depmsg.format(name='npv'),
DeprecationWarning, stacklevel=3)
return (values,)
@array_function_dispatch(_npv_dispatcher)
def npv(rate, values):
"""
Returns the NPV (Net Present Value) of a cash flow series.
.. deprecated:: 1.18
`npv` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
Parameters
----------
rate : scalar
The discount rate.
values : array_like, shape(M, )
The values of the time series of cash flows. The (fixed) time
interval between cash flow "events" must be the same as that for
which `rate` is given (i.e., if `rate` is per year, then precisely
a year is understood to elapse between each cash flow event). By
convention, investments or "deposits" are negative, income or
"withdrawals" are positive; `values` must begin with the initial
investment, thus `values[0]` will typically be negative.
Returns
-------
out : float
The NPV of the input cash flow series `values` at the discount
`rate`.
Warnings
--------
``npv`` considers a series of cashflows starting in the present (t = 0).
NPV can also be defined with a series of future cashflows, paid at the
end, rather than the start, of each period. If future cashflows are used,
the first cashflow `values[0]` must be zeroed and added to the net
present value of the future cashflows. This is demonstrated in the
examples.
Notes
-----
Returns the result of: [2]_
.. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}}
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
.. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
Addison-Wesley, 2003, pg. 346.
Examples
--------
Consider a potential project with an initial investment of $40 000 and
projected cashflows of $5 000, $8 000, $12 000 and $30 000 at the end of
each period discounted at a rate of 8% per period. To find the project's
net present value:
>>> rate, cashflows = 0.08, [-40_000, 5_000, 8_000, 12_000, 30_000]
>>> np.npv(rate, cashflows).round(5)
3065.22267
It may be preferable to split the projected cashflow into an initial
investment and expected future cashflows. In this case, the value of
the initial cashflow is zero and the initial investment is later added
to the future cashflows net present value:
>>> initial_cashflow = cashflows[0]
>>> cashflows[0] = 0
>>> np.round(np.npv(rate, cashflows) + initial_cashflow, 5)
3065.22267
"""
values = np.asarray(values)
return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0)
def _mirr_dispatcher(values, finance_rate, reinvest_rate):
warnings.warn(_depmsg.format(name='mirr'),
DeprecationWarning, stacklevel=3)
return (values,)
@array_function_dispatch(_mirr_dispatcher)
def mirr(values, finance_rate, reinvest_rate):
"""
Modified internal rate of return.
.. deprecated:: 1.18
`mirr` is deprecated; for details, see NEP 32 [1]_.
Use the corresponding function in the numpy-financial library,
https://pypi.org/project/numpy-financial.
Parameters
----------
values : array_like
Cash flows (must contain at least one positive and one negative
value) or nan is returned. The first value is considered a sunk
cost at time zero.
finance_rate : scalar
Interest rate paid on the cash flows
reinvest_rate : scalar
Interest rate received on the cash flows upon reinvestment
Returns
-------
out : float
Modified internal rate of return
References
----------
.. [1] NumPy Enhancement Proposal (NEP) 32,
https://numpy.org/neps/nep-0032-remove-financial-functions.html
"""
values = np.asarray(values)
n = values.size
# Without this explicit cast the 1/(n - 1) computation below
# becomes a float, which causes TypeError when using Decimal
# values.
if isinstance(finance_rate, Decimal):
n = Decimal(n)
pos = values > 0
neg = values < 0
if not (pos.any() and neg.any()):
return np.nan
numer = np.abs(npv(reinvest_rate, values*pos))
denom = np.abs(npv(finance_rate, values*neg))
return (numer/denom)**(1/(n - 1))*(1 + reinvest_rate) - 1
| {
"content_hash": "939b13040659e695d11bb7c30a600def",
"timestamp": "",
"source": "github",
"line_count": 967,
"max_line_length": 82,
"avg_line_length": 32.59979317476732,
"alnum_prop": 0.6065220149727192,
"repo_name": "WarrenWeckesser/numpy",
"id": "b055bb1ec7899ffaaabe5ad032ba9571fae5a30e",
"size": "31524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/lib/financial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9059444"
},
{
"name": "C++",
"bytes": "174989"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8313055"
},
{
"name": "Shell",
"bytes": "9612"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
} |
import os
from shopify_settings import *
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
try:
from djangoappengine.settings_base import *
USING_APP_ENGINE = True
except ImportError:
USING_APP_ENGINE = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', '
'NAME': os.path.join(SITE_ROOT, 'db-development.sqlite3'),
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with
'PORT': '', # Set to empty string for default. Not used with sq
}
}
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'static'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#w%yp9_5wnupojr=4o0mwap#!)y=q9ovu=o#xnytga7u5^bf27'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'shopify_app.context_processors.current_shop',
)
if not USING_APP_ENGINE:
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.static',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'shopify_app.middleware.LoginProtection',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'home',
'shopify_app',
)
if USING_APP_ENGINE:
INSTALLED_APPS += (
'djangoappengine',
'djangotoolbox',
)
else:
INSTALLED_APPS += (
'django.contrib.sites',
'django.contrib.staticfiles',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "7db302db19cc4506292be088296327e7",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 96,
"avg_line_length": 31.062937062937063,
"alnum_prop": 0.6602881584871679,
"repo_name": "daniyalzade/burgaz",
"id": "b208730b8e590b90d8e30b44e142cb6a2f2e28b8",
"size": "4442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5128"
},
{
"name": "HTML",
"bytes": "14941"
},
{
"name": "Python",
"bytes": "11457"
},
{
"name": "Shell",
"bytes": "1264"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import operator
import time
import logging
from root_folder_info import RootFolderInfo
logger = logging.getLogger(__name__)
class Sync(object):
def __init__(self, config, src, dest):
self._config = config
self._src = src
self._dest = dest
self._copy_count = 0
self._skip_count = 0
def run(self):
if self._config.dry_run:
logger.info("dry run enabled, no files will be copied")
logger.info("building folder list...")
start = time.time()
src_folders = self._src.list_folders()
dest_folders = {folder.name.lower(): folder for folder in self._dest.list_folders()}
for src_folder in src_folders:
dest_folder = dest_folders.get(src_folder.name.lower())
print(src_folder.name + os.sep)
if dest_folder:
self._merge_folders(src_folder, dest_folder)
else:
self._copy_folder(src_folder)
# Merge root files if requested
if self._config.root_files:
self._merge_folders(RootFolderInfo(), RootFolderInfo())
self._print_summary(time.time() - start, self._copy_count, self._skip_count)
def _copy_folder(self, folder):
src_files = self._src.list_files(folder)
for src_file in src_files:
path = os.path.join(folder.name, src_file.name)
self._copy_count += 1
self._copy_file(folder, src_file, path)
def _merge_folders(self, src_folder, dest_folder):
src_files = self._src.list_files(src_folder)
dest_files = [file.name.lower() for file in self._dest.list_files(dest_folder)]
for src_file in src_files:
path = os.path.join(src_folder.name, src_file.name)
lower_filename = src_file.name.lower()
file_exists = lower_filename in dest_files
# Fix for flickr converting .jpeg to .jpg.
if lower_filename.endswith(".jpeg"):
file_exists = file_exists or "{}.jpg".format(lower_filename[:-5]) in dest_files
if not file_exists:
self._copy_count += 1
self._copy_file(src_folder, src_file, path)
else:
self._skip_count += 1
logger.debug("{}...skipped, file exists".format(path))
pass
def _copy_file(self, folder, file, path):
print(path)
if not self._config.dry_run:
self._src.copy_file(file, folder and folder.name, self._dest)
logger.debug("{}...copied".format(path))
def _print_summary(self, elapsed, files_copied, files_skipped):
skipped_msg = ", skipped {} files(s) that already exist".format(files_skipped) if files_skipped > 0 else ''
logger.info("\ntransferred {} file(s){} in {} sec".format(files_copied, skipped_msg, round(elapsed, 2)))
| {
"content_hash": "58257438b9954ede3641da304e61c127",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 115,
"avg_line_length": 39.4054054054054,
"alnum_prop": 0.5888203017832647,
"repo_name": "phdesign/flickr-rsync",
"id": "6dd8b3ec3dbdd7fbe54bd702038efc11516f3140",
"size": "2916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flickr_rsync/sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81823"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle.fluid as fluid
class ArcMarginLoss():
def __init__(self, class_dim, margin=0.15, scale=80.0, easy_margin=False):
self.class_dim = class_dim
self.margin = margin
self.scale = scale
self.easy_margin = easy_margin
def loss(self, input, label):
out = self.arc_margin_product(input, label, self.class_dim, self.margin, self.scale, self.easy_margin)
#loss = fluid.layers.softmax_with_cross_entropy(logits=out, label=label)
out = fluid.layers.softmax(input=out)
loss = fluid.layers.cross_entropy(input=out, label=label)
return loss, out
def arc_margin_product(self, input, label, out_dim, m, s, easy_margin=False):
#input = fluid.layers.l2_normalize(input, axis=1)
input_norm = fluid.layers.sqrt(fluid.layers.reduce_sum(fluid.layers.square(input), dim=1))
input = fluid.layers.elementwise_div(input, input_norm, axis=0)
weight = fluid.layers.create_parameter(
shape=[out_dim, input.shape[1]],
dtype='float32',
name='weight_norm',
attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Xavier()))
#weight = fluid.layers.l2_normalize(weight, axis=1)
weight_norm = fluid.layers.sqrt(fluid.layers.reduce_sum(fluid.layers.square(weight), dim=1))
weight = fluid.layers.elementwise_div(weight, weight_norm, axis=0)
weight = fluid.layers.transpose(weight, perm = [1, 0])
cosine = fluid.layers.mul(input, weight)
sine = fluid.layers.sqrt(1.0 - fluid.layers.square(cosine) + 1e-6)
cos_m = math.cos(m)
sin_m = math.sin(m)
phi = cosine * cos_m - sine * sin_m
th = math.cos(math.pi - m)
mm = math.sin(math.pi - m) * m
if easy_margin:
phi = self.paddle_where_more_than(cosine, 0, phi, cosine)
else:
phi = self.paddle_where_more_than(cosine, th, phi, cosine-mm)
one_hot = fluid.layers.one_hot(input=label, depth=out_dim)
output = fluid.layers.elementwise_mul(one_hot, phi) + fluid.layers.elementwise_mul((1.0 - one_hot), cosine)
output = output * s
return output
def paddle_where_more_than(self, target, limit, x, y):
mask = fluid.layers.cast(x=(target>limit), dtype='float32')
output = fluid.layers.elementwise_mul(mask, x) + fluid.layers.elementwise_mul((1.0 - mask), y)
return output
| {
"content_hash": "49eaa8f80288ed9b36aea5e3b9c12f5d",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 115,
"avg_line_length": 44.66101694915254,
"alnum_prop": 0.6166982922201139,
"repo_name": "kuke/models",
"id": "b166f7bb2b7aa2d1e318bcab358b7307b779b380",
"size": "2635",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluid/PaddleCV/metric_learning/losses/arcmarginloss.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Perl",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "2905007"
},
{
"name": "Shell",
"bytes": "2506531"
}
],
"symlink_target": ""
} |
have_multiproc = False
try:
from multiprocessing import Array as array, Process as Thread
from uuid import uuid1 as get_ident
Thread.isAlive = Thread.is_alive
have_multiproc = True
except ImportError:
from threading import Thread
from thread import get_ident
from array import array
from hashlib import md5
import time, random, sys, os
from random import randint, gauss
from optparse import OptionParser
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.protocol import TBinaryProtocol
try:
from cassandra import Cassandra
from cassandra.ttypes import *
except ImportError:
# add cassandra directory to sys.path
L = os.path.abspath(__file__).split(os.path.sep)[:-3]
root = os.path.sep.join(L)
_ipath = os.path.join(root, 'interface', 'thrift', 'gen-py')
sys.path.append(os.path.join(_ipath, 'cassandra'))
import Cassandra
from ttypes import *
except ImportError:
print "Cassandra thrift bindings not found, please run 'ant gen-thrift-py'"
sys.exit(2)
try:
from thrift.protocol import fastbinary
except ImportError:
print "WARNING: thrift binary extension not found, benchmark will not be accurate!"
parser = OptionParser()
parser.add_option('-n', '--num-keys', type="int", dest="numkeys",
help="Number of keys", default=1000**2)
parser.add_option('-t', '--threads', type="int", dest="threads",
help="Number of threads/procs to use", default=50)
parser.add_option('-c', '--columns', type="int", dest="columns",
help="Number of columns per key", default=5)
parser.add_option('-d', '--nodes', type="string", dest="nodes",
help="Host nodes (comma separated)", default="localhost")
parser.add_option('-s', '--stdev', type="float", dest="stdev", default=0.1,
help="standard deviation factor")
parser.add_option('-r', '--random', action="store_true", dest="random",
help="use random key generator (stdev will have no effect)")
parser.add_option('-f', '--file', type="string", dest="file",
help="write output to file")
parser.add_option('-p', '--port', type="int", default=9160, dest="port",
help="thrift port")
parser.add_option('-m', '--framed', action="store_true", dest="framed",
help="use framed transport")
parser.add_option('-o', '--operation', type="choice", dest="operation",
default="insert", choices=('insert', 'read', 'rangeslice'),
help="operation to perform")
parser.add_option('-u', '--supercolumns', type="int", dest="supers", default=1,
help="number of super columns per key")
parser.add_option('-y', '--family-type', type="choice", dest="cftype",
choices=('regular','super'), default='regular',
help="column family type")
parser.add_option('-k', '--keep-going', action="store_true", dest="ignore",
help="ignore errors inserting or reading")
parser.add_option('-i', '--progress-interval', type="int", default=10,
dest="interval", help="progress report interval (seconds)")
parser.add_option('-g', '--get-range-slice-count', type="int", default=1000,
dest="rangecount",
help="amount of keys to get_range_slice per call")
(options, args) = parser.parse_args()
total_keys = options.numkeys
n_threads = options.threads
keys_per_thread = total_keys / n_threads
columns_per_key = options.columns
supers_per_key = options.supers
# this allows client to round robin requests directly for
# simple request load-balancing
nodes = options.nodes.split(',')
# a generator that generates all keys according to a bell curve centered
# around the middle of the keys generated (0..total_keys). Remember that
# about 68% of keys will be within stdev away from the mean and
# about 95% within 2*stdev.
stdev = total_keys * options.stdev
mean = total_keys / 2
def key_generator_gauss():
fmt = '%0' + str(len(str(total_keys))) + 'd'
while True:
guess = gauss(mean, stdev)
if 0 <= guess < total_keys:
return fmt % int(guess)
# a generator that will generate all keys w/ equal probability. this is the
# worst case for caching.
def key_generator_random():
fmt = '%0' + str(len(str(total_keys))) + 'd'
return fmt % randint(0, total_keys - 1)
key_generator = key_generator_gauss
if options.random:
key_generator = key_generator_random
def get_client(host='127.0.0.1', port=9160, framed=False):
socket = TSocket.TSocket(host, port)
if framed:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Cassandra.Client(protocol)
client.transport = transport
return client
class Operation(Thread):
def __init__(self, i, counts, latencies):
Thread.__init__(self)
# generator of the keys to be used
self.range = xrange(keys_per_thread * i, keys_per_thread * (i + 1))
# we can't use a local counter, since that won't be visible to the parent
# under multiprocessing. instead, the parent passes a "counts" array
# and an index that is our assigned counter.
self.idx = i
self.counts = counts
# similarly, a shared array for latency totals
self.latencies = latencies
# random host for pseudo-load-balancing
[hostname] = random.sample(nodes, 1)
# open client
self.cclient = get_client(hostname, options.port, options.framed)
self.cclient.transport.open()
class Inserter(Operation):
def run(self):
data = md5(str(get_ident())).hexdigest()
columns = [Column('C' + str(j), data, 0) for j in xrange(columns_per_key)]
fmt = '%0' + str(len(str(total_keys))) + 'd'
if 'super' == options.cftype:
supers = [SuperColumn('S' + str(j), columns) for j in xrange(supers_per_key)]
for i in self.range:
key = fmt % i
if 'super' == options.cftype:
cfmap= {'Super1': [ColumnOrSuperColumn(super_column=s) for s in supers]}
else:
cfmap = {'Standard1': [ColumnOrSuperColumn(column=c) for c in columns]}
start = time.time()
try:
self.cclient.batch_insert('Keyspace1', key, cfmap, ConsistencyLevel.ONE)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - start
self.counts[self.idx] += 1
class Reader(Operation):
def run(self):
p = SlicePredicate(slice_range=SliceRange('', '', False, columns_per_key))
if 'super' == options.cftype:
for i in xrange(keys_per_thread):
key = key_generator()
for j in xrange(supers_per_key):
parent = ColumnParent('Super1', 'S' + str(j))
start = time.time()
try:
r = self.cclient.get_slice('Keyspace1', key, parent, p, ConsistencyLevel.ONE)
if not r: raise RuntimeError("Key %s not found" % key)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - start
self.counts[self.idx] += 1
else:
parent = ColumnParent('Standard1')
for i in xrange(keys_per_thread):
key = key_generator()
start = time.time()
try:
r = self.cclient.get_slice('Keyspace1', key, parent, p, ConsistencyLevel.ONE)
if not r: raise RuntimeError("Key %s not found" % key)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - start
self.counts[self.idx] += 1
class RangeSlicer(Operation):
def run(self):
begin = self.range[0]
end = self.range[-1]
current = begin
last = current + options.rangecount
fmt = '%0' + str(len(str(total_keys))) + 'd'
p = SlicePredicate(slice_range=SliceRange('', '', False, columns_per_key))
if 'super' == options.cftype:
while current < end:
start = fmt % current
finish = fmt % last
res = []
for j in xrange(supers_per_key):
parent = ColumnParent('Super1', 'S' + str(j))
begin = time.time()
try:
res = self.cclient.get_range_slice('Keyspace1', parent, p, start,finish, options.rangecount, ConsistencyLevel.ONE)
if not res: raise RuntimeError("Key %s not found" % key)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - begin
self.counts[self.idx] += 1
current += len(r) + 1
last += len(r)
else:
parent = ColumnParent('Standard1')
while current < end:
start = fmt % current
finish = fmt % last
begin = time.time()
try:
r = self.cclient.get_range_slice('Keyspace1', parent, p, start, finish, options.rangecount, ConsistencyLevel.ONE)
if not r: raise RuntimeError("Range not found:", start, finish)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
current += len(r) + 1
last += len(r)
self.latencies[self.idx] += time.time() - begin
self.counts[self.idx] += 1
class OperationFactory:
@staticmethod
def create(type, i, counts, latencies):
if type == 'read':
return Reader(i, counts, latencies)
elif type == 'insert':
return Inserter(i, counts, latencies)
elif type == 'rangeslice':
return RangeSlicer(i, counts, latencies)
else:
raise RuntimeError, 'Unsupported op!'
class Stress(object):
counts = array('i', [0] * n_threads)
latencies = array('d', [0] * n_threads)
def create_threads(self,type):
threads = []
for i in xrange(n_threads):
th = OperationFactory.create(type, i, self.counts, self.latencies)
threads.append(th)
th.start()
return threads
def run_test(self,filename,threads):
start_t = time.time()
if filename:
outf = open(filename,'w')
else:
outf = sys.stdout
outf.write('total,interval_op_rate,avg_latency,elapsed_time\n')
total = old_total = latency = old_latency = 0
while True:
time.sleep(options.interval)
old_total, old_latency = total, latency
total = sum(self.counts[th.idx] for th in threads)
latency = sum(self.latencies[th.idx] for th in threads)
delta = total - old_total
delta_latency = latency - old_latency
delta_formatted = (delta_latency / delta) if delta > 0 else 'NAN'
elapsed_t = int(time.time() - start_t)
outf.write('%d,%d,%s,%d\n'
% (total, delta / options.interval, delta_formatted, elapsed_t))
if not [th for th in threads if th.isAlive()]:
break
def insert(self):
threads = self.create_threads('insert')
self.run_test(options.file,threads);
def read(self):
threads = self.create_threads('read')
self.run_test(options.file,threads);
def rangeslice(self):
threads = self.create_threads('rangeslice')
self.run_test(options.file,threads);
stresser = Stress()
benchmark = getattr(stresser, options.operation, None)
if not have_multiproc:
print """WARNING: multiprocessing not present, threading will be used.
Benchmark may not be accurate!"""
benchmark()
| {
"content_hash": "6043c1ebe3f407fd6acd1b798f3f169e",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 138,
"avg_line_length": 40.18769230769231,
"alnum_prop": 0.5644284511140035,
"repo_name": "ucare-uchicago/cass-fate-system",
"id": "fc6c2a5840b6da33feaf728a7ece1cde77c32933",
"size": "13994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/py_stress/stress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AspectJ",
"bytes": "90833"
},
{
"name": "Batchfile",
"bytes": "10766"
},
{
"name": "CSS",
"bytes": "6740"
},
{
"name": "GAP",
"bytes": "4837"
},
{
"name": "HTML",
"bytes": "6052054"
},
{
"name": "Java",
"bytes": "2869774"
},
{
"name": "Makefile",
"bytes": "262"
},
{
"name": "PigLatin",
"bytes": "333"
},
{
"name": "Python",
"bytes": "316071"
},
{
"name": "Shell",
"bytes": "51520"
},
{
"name": "Thrift",
"bytes": "21513"
}
],
"symlink_target": ""
} |
"""Tests."""
import pytest
from pyramid.interfaces import IRoutesMapper
@pytest.mark.parametrize('config_fixture, route_count', (
('simplerouting_config', 3),
('moduled_config', 6),
('two_moduled_config', 9),
('prefixed_config', 9)
))
def test_read_count(request, config_fixture, route_count):
"""A test to read routes from python package."""
config = request.getfixturevalue(config_fixture)
# actually needed to be able tu use getUtility
config.commit()
mapper = config.registry.getUtility(IRoutesMapper)
routes = mapper.get_routes()
assert routes
assert len(routes) == route_count
@pytest.mark.parametrize('config_fixture, route_number, name, pattern', (
('simplerouting_config', 0, 'index', '/'),
('moduled_config', 0, 'module_index', 'module/'),
('moduled_config', 3, 'index', '/'),
('two_moduled_config', 0, 'module_index', 'module/'),
('two_moduled_config', 3, 'second_index', 'second/'),
('two_moduled_config', 4, 'second_secret', 'second/secret'),
('two_moduled_config', 6, 'index', '/'),
('prefixed_config', 0, 'module_index', '{var}/'),
('prefixed_config', 1, 'module_secret', '{var}/secret'),
('prefixed_config', 3, 'second_index', '{var}/subpath/'),
('prefixed_config', 4, 'second_secret', '{var}/subpath/secret'),
))
def test_routename(request, config_fixture, route_number, name, pattern):
"""A test to check whether index is the first route."""
config = request.getfixturevalue(config_fixture)
# actually needed to be able tu use getUtility
config.commit()
mapper = config.registry.getUtility(IRoutesMapper)
routes = mapper.get_routes()
assert routes[route_number].name == name
assert routes[route_number].pattern == pattern
def test_by_hand_only(clean_config):
"""Run includeme by hand."""
config = clean_config
config.commit()
# pylint:disable=import-outside-toplevel
from tzf.pyramid_routing import routes_from_package
routes_from_package(config, 'tests.routes_definitions.routing')
config.commit()
def test_includeme_and_by_hand(simplerouting_config):
"""config.include and by hand in app."""
config = simplerouting_config
config.commit()
# pylint:disable=import-outside-toplevel
from tzf.pyramid_routing import routes_from_package
routes_from_package(config, 'tests.routes_definitions.routing_moduled')
config.commit()
def test_includeme_and_by_hand_with_includeme_ab(simplerouting_config):
"""config.include in app, and two includeme's in other 'plugin'."""
config = simplerouting_config
config.commit()
config.include('tzf.pyramid_routing')
config.include('tests.routes_definitions.module_to_include_1')
config.include('tests.routes_definitions.module_to_include_2')
config.commit()
def test_includeme_and_by_hand_with_includeme(simplerouting_config):
"""config.include in app, and includeme in other 'plugin'."""
config = simplerouting_config
config.commit()
def includeme_test(config):
"""Configure routes in test."""
# pylint:disable=import-outside-toplevel
from tzf.pyramid_routing import routes_from_package
routes_from_package(config, 'tests.routes_definitions.routing_moduled')
config.include(includeme_test)
config.commit()
config.include('tzf.pyramid_routing')
config.commit()
| {
"content_hash": "ca0ff97c76d4e3d2523c74c352e488b1",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 33.067961165048544,
"alnum_prop": 0.6814445096887844,
"repo_name": "fizyk/pyramid_routing",
"id": "1ead720941a08d2e755fc46374c7edb6aa9c3ade",
"size": "3406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_routing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12361"
}
],
"symlink_target": ""
} |
"""
Abiquo Utilities Module for the Abiquo Driver.
Common utilities needed by the :class:`AbiquoNodeDriver`.
"""
import base64
from libcloud.common.base import ConnectionUserAndKey, PollingConnection
from libcloud.common.base import XmlResponse
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import b
from libcloud.compute.base import NodeState
def get_href(element, rel):
"""
Search a RESTLink element in the :class:`AbiquoResponse`.
Abiquo, as a REST API, it offers self-discovering functionality.
That means that you could walk through the whole API only
navigating from the links offered by the entities.
This is a basic method to find the 'relations' of an entity searching into
its links.
For instance, a Rack entity serialized as XML as the following::
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<rack>
<link href="http://host/api/admin/datacenters/1"
type="application/vnd.abiquo.datacenter+xml" rel="datacenter"/>
<link href="http://host/api/admin/datacenters/1/racks/1"
type="application/vnd.abiquo.rack+xml" rel="edit"/>
<link href="http://host/api/admin/datacenters/1/racks/1/machines"
type="application/vnd.abiquo.machines+xml" rel="machines"/>
<haEnabled>false</haEnabled>
<id>1</id>
<longDescription></longDescription>
<name>racacaca</name>
<nrsq>10</nrsq>
<shortDescription></shortDescription>
<vlanIdMax>4094</vlanIdMax>
<vlanIdMin>2</vlanIdMin>
<vlanPerVdcReserved>1</vlanPerVdcReserved>
<vlansIdAvoided></vlansIdAvoided>
</rack>
offers link to datacenters (rel='datacenter'), to itself (rel='edit') and
to the machines defined in it (rel='machines')
A call to this method with the 'rack' element using 'datacenter' as 'rel'
will return:
'http://10.60.12.7:80/api/admin/datacenters/1'
:type element: :class:`xml.etree.ElementTree`
:param element: Xml Entity returned by Abiquo API (required)
:type rel: ``str``
:param rel: relation link name
:rtype: ``str``
:return: the 'href' value according to the 'rel' input parameter
"""
links = element.findall('link')
for link in links:
if link.attrib['rel'] == rel:
href = link.attrib['href']
# href is something like:
#
# 'http://localhost:80/api/admin/enterprises'
#
# we are only interested in '/admin/enterprises/' part
needle = '/api/'
url_path = urlparse.urlparse(href).path
index = url_path.find(needle)
result = url_path[index + len(needle) - 1:]
return result
class AbiquoResponse(XmlResponse):
"""
Abiquo XML Response.
Wraps the response in XML bodies or extract the error data in
case of error.
"""
# Map between abiquo state and Libcloud State
NODE_STATE_MAP = {
'NOT_ALLOCATED': NodeState.TERMINATED,
'ALLOCATED': NodeState.PENDING,
'CONFIGURED': NodeState.PENDING,
'ON': NodeState.RUNNING,
'PAUSED': NodeState.PENDING,
'OFF': NodeState.PENDING,
'LOCKED': NodeState.PENDING,
'UNKNOWN': NodeState.UNKNOWN
}
def parse_error(self):
"""
Parse the error messages.
Response body can easily be handled by this class parent
:class:`XmlResponse`, but there are use cases which Abiquo API
does not respond an XML but an HTML. So we need to
handle these special cases.
"""
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError(driver=self.connection.driver)
elif self.status == httplib.FORBIDDEN:
raise ForbiddenError(self.connection.driver)
else:
errors = self.parse_body().findall('error')
# Most of the exceptions only have one error
raise LibcloudError(errors[0].findtext('message'))
def success(self):
"""
Determine if the request was successful.
Any of the 2XX HTTP response codes are accepted as successfull requests
:rtype: ``bool``
:return: successful request or not.
"""
return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT,
httplib.ACCEPTED]
def async_success(self):
"""
Determinate if async request was successful.
An async_request retrieves for a task object that can be successfully
retrieved (self.status == OK), but the asyncronous task (the body of
the HTTP response) which we are asking for has finished with an error.
So this method checks if the status code is 'OK' and if the task
has finished successfully.
:rtype: ``bool``
:return: successful asynchronous request or not
"""
if self.success():
# So we have a 'task' object in the body
task = self.parse_body()
return task.findtext('state') == 'FINISHED_SUCCESSFULLY'
else:
return False
class AbiquoConnection(ConnectionUserAndKey, PollingConnection):
"""
A Connection to Abiquo API.
Basic :class:`ConnectionUserAndKey` connection with
:class:`PollingConnection` features for asynchronous tasks.
"""
responseCls = AbiquoResponse
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None):
super(AbiquoConnection, self).__init__(user_id=user_id, key=key,
secure=secure,
host=host, port=port,
url=url, timeout=timeout)
# This attribute stores data cached across multiple request
self.cache = {}
def add_default_headers(self, headers):
"""
Add Basic Authentication header to all the requests.
It injects the 'Authorization: Basic Base64String===' header
in each request
:type headers: ``dict``
:param headers: Default input headers
:rtype: ``dict``
:return: Default input headers with the 'Authorization'
header
"""
b64string = b('%s:%s' % (self.user_id, self.key))
encoded = base64.b64encode(b64string).decode('utf-8')
authorization = 'Basic ' + encoded
headers['Authorization'] = authorization
return headers
def get_poll_request_kwargs(self, response, context, request_kwargs):
"""
Manage polling request arguments.
Return keyword arguments which are passed to the
:class:`NodeDriver.request` method when polling for the job status. The
Abiquo Asynchronous Response returns and 'acceptedrequest' XmlElement
as the following::
<acceptedrequest>
<link href="http://uri/to/task" rel="status"/>
<message>You can follow the progress in the link</message>
</acceptedrequest>
We need to extract the href URI to poll.
:type response: :class:`xml.etree.ElementTree`
:keyword response: Object returned by poll request.
:type request_kwargs: ``dict``
:keyword request_kwargs: Default request arguments and headers
:rtype: ``dict``
:return: Modified keyword arguments
"""
accepted_request_obj = response.object
link_poll = get_href(accepted_request_obj, 'status')
# Override just the 'action' and 'method' keys of the previous dict
request_kwargs['action'] = link_poll
request_kwargs['method'] = 'GET'
return request_kwargs
def has_completed(self, response):
"""
Decide if the asynchronous job has ended.
:type response: :class:`xml.etree.ElementTree`
:param response: Response object returned by poll request
:rtype: ``bool``
:return: Whether the job has completed
"""
task = response.object
task_state = task.findtext('state')
return task_state in ['FINISHED_SUCCESSFULLY', 'ABORTED',
'FINISHED_UNSUCCESSFULLY']
class ForbiddenError(LibcloudError):
"""
Exception used when credentials are ok but user has not permissions.
"""
def __init__(self, driver):
message = 'User has not permission to perform this task.'
super(LibcloudError, self).__init__(message, driver)
| {
"content_hash": "039ae202e5017ee63599491440192ffa",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 79,
"avg_line_length": 35.80971659919028,
"alnum_prop": 0.6114188807235726,
"repo_name": "ferewuz/libcloud",
"id": "041a9ffea3186c1254f4f5236cda9733a7ca1016",
"size": "9626",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "libcloud/common/abiquo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from bulbs.model import Node, Relationship
from bulbs.property import String, Integer, DateTime, List
from bulbs.utils import current_datetime
#print "ID: " , identifier
#print "Name: " , name
#print "Alternate Names: ", alt_names
#print "Subjects: ", subjects
#print "Nationalities: ", nationalities
#print "Languages: ", languages
#print "Places: ", places
#print "Occupations: ", occupations
#print "Associated with: ", associated
#print "Corresponded with: ", corresponded
#print "Same as: ", sameas
class Agent(Node):
element_type = "agent"
snac_type = String(nullable=False, indexed=True)
identifier = String(nullable=False, indexed=True)
name = String(nullable=False)
altNames = List()
startDate = String()
endDate = String()
occupations = List()
subjects = List()
languages = List()
nationalities = List()
places = List()
sameAs = List()
class Place(Node):
element_type = "place"
identifier = String(nullable=False, indexed=True)
name = String(nullable=False)
latitude = String()
longitude = String()
class Subject(Node):
element_type = "subject"
name = String(nullable=False)
class Document(Node):
element_type = "document"
identifier = String(nullable=False)
name = String(nullable=False)
class Occupation(Node):
element_type = "occupation"
name = String(nullable=False)
class AssociatedWith(Relationship):
label = "associatedWith"
class CorrespondedWith(Relationship):
label = "correspondedWith"
class ReferencedIn(Relationship):
label = "referencedIn"
class Location(Relationship):
label = "associatedPlace"
| {
"content_hash": "948612ab1e22e33540b246f6e2366a88",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 58,
"avg_line_length": 27.688524590163933,
"alnum_prop": 0.6791000592066312,
"repo_name": "deternitydx/SNAC-EAC-Parser",
"id": "5a58d9e62bc959edadaebe4f3e97697c9711ce1c",
"size": "1734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "relationships.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31159"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
} |
"""Helper module for dealing with datasets loaded from TFDS."""
import copy
import enum
from typing import List, Dict, Optional, Text, Any, Tuple, Callable
import attr
import cv2
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
def tfds_load_dataset(dataset_name, *args, **kwargs):
"""Helper function used to bridge internal google, and the external world."""
data_dir = kwargs.pop("data_dir", None)
return tfds.load(
dataset_name, *args, data_dir=data_dir, download=True, **kwargs)
class Split(enum.Enum):
"""Enum representing different splits of data for cross validation.
Two validation sets are needed for meta-learning optimizers.
"""
TRAIN = "TRAIN"
VALID_INNER = "VALID_INNER"
VALID_OUTER = "VALID_OUTER"
TEST = "TEST"
def split_dataset(
dataset,
num_per_split,
num_splits = 3,
):
"""Helper to split a dataset for cross validaton.
The first num_splits-1 datasets contain num_per_split examples.
The last dataset contains the remaining number of examples.
This often used to split a training set into more validation sets:
e.g. train_old --> [valid_inner, valid_outer, train]
Args:
dataset: name of tfds dataset
num_per_split: number of examples to have for each of the split off dataset.
num_splits: number of splits to create.
Returns:
A list of the requested datasets.
"""
new_datasets = []
# make the first n_split-1 splits containing num_per_split examples
for i in range(num_splits - 1):
new_datasets.append(dataset.skip(num_per_split * i).take(num_per_split))
# The remainder of the dataset
new_datasets.append(dataset.skip(num_per_split * (num_splits - 1)))
return new_datasets
def _add_onehot_label_to_dict(d,
num_label):
"""Returns a new dictionary with a label_onehot key."""
d = copy.copy(d)
d["label_onehot"] = tf.one_hot(d["label"], num_label)
return d
def _process_image_in_dict(d):
"""Returns a new dict with a uint8 image converted to 0-1 scaled image."""
d = copy.copy(d)
image = d["image"]
if image.dtype != tf.uint8:
raise ValueError("Only supports uint8 images")
d["image"] = tf.cast(image, tf.float32) / 255.
return d
@attr.s
class Datasets(object):
train = attr.ib(Any)
valid_inner = attr.ib(Any)
valid_outer = attr.ib(Any)
test = attr.ib(Any)
def get_image_datasets(
dataset_name,
batch_size,
num_per_valid = 3000,
num_train = None,
cache_dataset = True,
shuffle_buffer = None,
data_dir = None,
augmentation_fn = None,
):
"""Get an image `Datasets` instance that is ready to train with.
This includes caching for speed, repeating, shuffling, preprocessing, and
batching for each of the 4 splits.
Args:
dataset_name: Name of tfds dataset.
batch_size: Batch size to use.
num_per_valid: Number of validation images.
num_train: Number of training examples to use. If None, use all.
cache_dataset: Optionally cache the dataset for speed.
shuffle_buffer: Size of shuffle buffer. If none, use the full train set
size.
data_dir: Location of tfds data_dir.
augmentation_fn: Function to apply before batching for augmentation.
Returns:
`Datasets` ready to train with.
"""
# TODO(lmetz) pin all versions of datasets so they are consistent in time.
splits, info = tfds_load_dataset(
dataset_name, with_info=True, data_dir=data_dir)
num_classes = info.features["label"].num_classes
# Some datasets have different splits defined. For meta-learning we need 4
# splits. The following takes the splits that are defined, and tries to use
# them when possible. For missing splits, examples are taken off of the train
# dataset.
if set(splits.keys()) == set(["train", "validation", "test"]):
train = splits["train"]
test = splits["test"]
valid_outer = splits["validation"]
# pylint: disable=unbalanced-tuple-unpacking
valid_inner, train = split_dataset(
train, num_per_split=num_per_valid, num_splits=2)
num_test = info.splits["test"].num_examples
total_num_train = info.splits["train"].num_examples
num_valid = info.splits["validation"].num_examples
elif (set(splits.keys()) == set(["train", "test"]) or
set(splits.keys()) == set(["train", "validation"])):
train = splits["train"]
# pylint: disable=unbalanced-tuple-unpacking
valid_inner, valid_outer, train = split_dataset(
train, num_per_split=num_per_valid, num_splits=3)
if "test" in info.splits:
heldout_split = info.splits["test"]
else:
heldout_split = info.splits["validation"]
num_test = heldout_split.num_examples
test = splits["test"] if "test" in splits else splits["validation"]
total_num_train = info.splits["train"].num_examples - num_per_valid * 2
num_valid = num_per_valid
elif set(splits.keys()) == set(["train"]):
train = splits["train"]
# pylint: disable=unbalanced-tuple-unpacking
valid_inner, valid_outer, test, train = split_dataset(
train, num_per_split=num_per_valid, num_splits=4)
total_num_train = info.splits["train"].num_examples - num_per_valid * 3
num_test = num_per_valid
num_valid = num_per_valid
else:
raise ValueError("Unsure how to manage the following splits: %s" %
str(list(splits.keys())))
if num_train:
train = train.take(num_train)
else:
num_train = total_num_train
datasets = Datasets(
train=train, valid_inner=valid_inner, valid_outer=valid_outer, test=test)
if cache_dataset:
datasets = tf.nest.map_structure(lambda ds: ds.cache(), datasets)
datasets = tf.nest.map_structure(lambda ds: ds.repeat(), datasets)
train_shuffle = shuffle_buffer if shuffle_buffer else num_train
valid_shuffle = shuffle_buffer if shuffle_buffer else num_valid
test_shuffle = shuffle_buffer if shuffle_buffer else num_test
datasets = Datasets(
train=datasets.train.shuffle(train_shuffle),
valid_inner=datasets.valid_inner.shuffle(valid_shuffle),
valid_outer=datasets.valid_outer.shuffle(valid_shuffle),
test=datasets.test.shuffle(test_shuffle))
def pre_process(example):
example = _add_onehot_label_to_dict(example, num_classes)
return _process_image_in_dict(example)
datasets = tf.nest.map_structure(lambda ds: ds.map(pre_process), datasets)
if augmentation_fn:
datasets = tf.nest.map_structure(lambda ds: ds.map(augmentation_fn),
datasets)
return tf.nest.map_structure(
lambda ds: ds.batch(batch_size, drop_remainder=True), datasets)
def _random_slice(example,
length):
"""Extract a random slice or pad to make all sequences a fixed length.
For example -- if one passes in [1,2,3,4] with length=2, this would return
one of the following: [1,2], [2,3], [3,4].
If the input is [1, 2] with length=4, this would return [1, 2, 0, 0].
Args:
example: Dictionary containing a single example with the "text" key. This
"text" key should be a vector with an integer type.
length: Length of the slice.
Returns:
An example containing only a fixed slice of text.
"""
input_length = tf.shape(example["text"])[0]
max_idx = input_length - length
# pylint: disable=g-long-lambda
start_idx = tf.cond(
tf.greater(max_idx, 0), lambda: tf.random_uniform(
[], tf.to_int32(0), tf.cast(max_idx, tf.int32), dtype=tf.int32),
lambda: 0)
# pylint: enable=g-long-lambda
to_pad = tf.maximum(length - input_length, 0)
pad_input = tf.pad(example["text"], [[0, to_pad]])
# copy to prevent a mutation of inputs.
example = copy.copy(example)
example["text"] = pad_input[start_idx:start_idx + length]
example["text"].set_shape([length])
pad_mask = tf.pad(tf.ones([input_length]), [[0, to_pad]])
example["mask"] = pad_mask[start_idx:start_idx + length]
example["mask"].set_shape([length])
return example
def random_slice_text_data(
dataset_name,
batch_size,
num_train = None,
patch_length = 128,
num_per_valid = 3000,
cache_dataset = False,
shuffle_buffer = None,
):
"""Gets a text dataset ready to train on.
This splits the dataset into 4 cross validation splits, takes a random slice
to make all entries the same length, and batches the examples.
Args:
dataset_name: tensorflow_dataset's dataset name.
batch_size: batch size.
num_train: number of training examples. If None use all examples.
patch_length: length of patch to extract.
num_per_valid: number of images for each validation set.
cache_dataset: Cache the dataset or not.
shuffle_buffer: Shuffle buffer size. If None, use dataset size.
Returns:
Datasets object containing tf.Dataset.
"""
train, info = tfds_load_dataset(
dataset_name, split="train", with_info=True, shuffle_files=True)
total_num_train = info.splits["train"].num_examples
num_test = info.splits["test"].num_examples
# pylint: disable=unbalanced-tuple-unpacking
valid_inner, valid_outer, train = split_dataset(
train, num_per_split=num_per_valid)
# pylint: enable=unbalanced-tuple-unpacking
if num_train:
train = train.take(num_train)
test = tfds_load_dataset(dataset_name, split="test", shuffle_files=True)
datasets = Datasets(
train=train, valid_inner=valid_inner, valid_outer=valid_outer, test=test)
if cache_dataset:
datasets = tf.nest.map_structure(lambda ds: ds.cache(), datasets)
datasets = tf.nest.map_structure(lambda ds: ds.repeat(), datasets)
train_shuffle = shuffle_buffer if shuffle_buffer else total_num_train - num_per_valid * 2
valid_shuffle = shuffle_buffer if shuffle_buffer else num_per_valid
test_shuffle = shuffle_buffer if shuffle_buffer else num_test
datasets = Datasets(
train=datasets.train.shuffle(train_shuffle),
valid_inner=datasets.valid_inner.shuffle(valid_shuffle),
valid_outer=datasets.valid_outer.shuffle(valid_shuffle),
test=datasets.test.shuffle(test_shuffle))
def pre_process(example):
"""Preprocess example by adding onehot label, and taking a random slice."""
if "label" in info.features:
num_classes = info.features["label"].num_classes
example = _add_onehot_label_to_dict(example, num_classes)
return _random_slice(example, patch_length)
datasets = tf.nest.map_structure(lambda ds: ds.map(pre_process), datasets)
return tf.nest.map_structure(
lambda ds: ds.batch(batch_size, drop_remainder=True), datasets)
class ResizedDataset(tfds.core.GeneratorBasedBuilder):
"""Base class for a resized image tensorflow dataset."""
def __init__(self, parent_builder,
size, *args, **kwargs):
"""Initialize the resized image dataset builder.
Args:
parent_builder: The builder to build the resized image dataset from.
size: size to resize each example to.
*args: args passed super class.
**kwargs: kwargs passed super class.
"""
parent_builder.download_and_prepare()
self._builder = parent_builder
self._size = size
super(ResizedDataset, self).__init__(*args, **kwargs)
def _info(self):
info = self._builder.info
description = "\n This dataset has been resized to %dx%d!" % (self._size[0],
self._size[1])
new_feature_dict = {k: v for k, v in info.features.items()}
new_feature_dict["image"] = tfds.features.Image(
shape=list(self._size) + [3])
return tfds.core.DatasetInfo(
builder=self,
description=info.description + description,
homepage=info.homepage,
features=tfds.features.FeaturesDict(new_feature_dict),
supervised_keys=info.supervised_keys,
citation=info.citation)
def _split_generators(self, dl_manager):
return [
tfds.core.SplitGenerator(
name=split, gen_kwargs=dict(split=split))
for split in self._builder.info.splits.keys()
]
def _generate_examples(self, split):
for exi, ex in enumerate(
tfds.as_numpy(self._builder.as_dataset(split=split))):
ex = self._process_example(ex)
yield exi, ex
def _process_example(self, example):
# As of now, this simply converts the image to the passed in size.
# TODO(lmetz) It might also make sense to resize then crop out the center.
example["image"] = cv2.resize(
example["image"], dsize=self._size, interpolation=cv2.INTER_CUBIC)
return example
class Food101_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The Food101 dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("food101", version="1.0.0")
super(Food101_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class Food101_64x64(ResizedDataset): # pylint: disable=invalid-name
"""The Food101 dataset resized to be 64x64."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("food101", version="1.0.0")
super(Food101_64x64, self).__init__(
*args, parent_builder=parent_builder, size=(64, 64), **kwargs)
class Coil100_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The coil100 dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("coil100", version="1.0.0")
super(Coil100_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class ColorectalHistology_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The colorectal_histology dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("colorectal_histology", version="2.*.*")
super(ColorectalHistology_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class DeepWeeds_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The deep_weeds dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("deep_weeds", version="1.0.0")
super(DeepWeeds_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class Sun397_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The sun397/tfds dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("sun397/tfds", version="4.0.0")
super(Sun397_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class TokenizedConfig(tfds.core.BuilderConfig):
"""BuilderConfig for tokenized text datasets."""
def __init__(self, version=None, text_encoder_config=None, **kwargs):
"""BuilderConfig for tokenized text datasets.
Args:
version (string): version as string.
text_encoder_config: `tfds.deprecated.text.TextEncoderConfig`, configuration
for the `tfds.deprecated.text.TextEncoder` used for the `"text"` feature.
**kwargs: keyword arguments forwarded to super.
"""
super(TokenizedConfig, self).__init__(
version=tfds.core.Version(version), **kwargs)
self.text_encoder_config = (
text_encoder_config or tfds.deprecated.text.TextEncoderConfig())
# This is an arbitrarily chosen subset of languages.
WIKIPEDIA_PREFIX = [
"20190301.zh", "20190301.ru", "20190301.ja", "20190301.hsb", "20190301.en"
]
def _get_builder_configs(base_configs):
"""Get the builder configs for tokenized datasets."""
configs = []
for prefix in base_configs:
configs.append(
TokenizedConfig(
name="%s_bytes" % prefix,
version="0.0.1",
description=("Uses byte-level text encoding with "
"`tfds.deprecated.text.ByteTextEncoder`"),
text_encoder_config=tfds.deprecated.text.TextEncoderConfig(
encoder=tfds.deprecated.text.ByteTextEncoder()),
))
configs.append(
TokenizedConfig(
name="%s_subwords8k" % prefix,
version="0.0.1",
description=("Uses `tfds.deprecated.text.SubwordTextEncoder` with 8k "
"vocab size"),
text_encoder_config=tfds.deprecated.text.TextEncoderConfig(
encoder_cls=tfds.deprecated.text.SubwordTextEncoder,
vocab_size=8192),
))
return configs
class TokenizedWikipedia(tfds.core.GeneratorBasedBuilder):
"""Builder which tokenizes the tfds wikipedia datasets.
This dataset returns 1 paragraph (split via new line) per example
extracted from the articles. We additionally filter examples to have more than
5 bytes. Encoding is either bytes, or subwords. The vocab is constructed out
of the first 200k examples. While this is likely not perfect this should be
sufficient for meta-learning optimizers.
Additionally, we make a train and test split by hashing the article seed.
Finally, for computational reasons we only use 1 millon articles. For the size
of the models we are training here this should be plenty.
"""
BUILDER_CONFIGS = _get_builder_configs(WIKIPEDIA_PREFIX)
def __init__(self, config=None, **kwargs):
"""Initialize the resized image dataset builder.
Args:
config: str Config string specified to build dataset with.
**kwargs: kwargs passed super class.
"""
# extract the base dataset.
base, _ = config.split("_")
self._builder = tfds.builder("wikipedia/%s" % base)
super(TokenizedWikipedia, self).__init__(config=config, **kwargs)
self._perc_train = 0.7
self._max_num_articles = 1000000
# Number of examples used to build the tokenizer.
self._examples_for_tokenizer = 200000
def _info(self):
info = self._builder.info
description = "\n This dataset has been tokenized!"
return tfds.core.DatasetInfo(
builder=self,
description=info.description + description,
features=tfds.features.FeaturesDict({
"title":
tfds.features.Text(),
"text":
tfds.features.Text(
encoder_config=self.builder_config.text_encoder_config),
}),
supervised_keys=("text", "text"),
homepage=info.homepage,
citation=info.citation)
def _split_generators(self, dl_manager):
self.info.features["text"].maybe_build_from_corpus(self._vocab_text_gen())
return [
tfds.core.SplitGenerator(
name=split, gen_kwargs=dict(split=split))
for split in ["train", "test"]
]
def _split_article(self, ex):
for i, split in enumerate(ex["text"].split("\n")):
if len(split.strip()) > 5:
yield i, {"title": ex["title"], "text": split}
def _generate_examples(self, split):
hasher = tfds.core.hashing.Hasher("token_wikipedia_salt")
for exi, example in enumerate(
tfds.as_numpy(self._builder.as_dataset(split="train"))):
if exi > self._max_num_articles:
return
# To make a train test split we first hash the key and convert it to a
# floating point value between 0-1. Depending on this value we either
# yield the example or not depending on the split.
p = hasher.hash_key(exi) % 100000 / 100000.
if split == "train" and p < self._perc_train:
for i, sub_example in self._split_article(example):
key = (exi, i)
yield key, sub_example
elif split == "test" and p >= self._perc_train:
for i, sub_example in self._split_article(example):
key = (exi, i)
yield key, sub_example
def _vocab_text_gen(self):
for i, (_, ex) in enumerate(self._generate_examples("train")):
# Only yield a subset of the data used for tokenization for
# performance reasons.
if self._examples_for_tokenizer > i:
yield ex["text"]
else:
return
# Arbitrary subset of datasets.
AMAZON_PRODUCTS = ["Books_v1_02", "Camera_v1_00", "Home_v1_00", "Video_v1_00"]
class TokenizedAmazonReviews(tfds.core.GeneratorBasedBuilder):
"""Builder which tokenizes the tfds amazon reviews datasets.
For compute reasons we only tokenize with 200000 examples.
We make a train and test split by hashing the example index.
"""
BUILDER_CONFIGS = _get_builder_configs(AMAZON_PRODUCTS)
def __init__(self, config=None, **kwargs):
"""Initialize the resized image dataset builder.
Args:
config: str Config string specified to build dataset with.
**kwargs: kwargs passed super class.
"""
# extract the base dataset.
base = "_".join(config.split("_")[0:-1])
self._builder = tfds.builder("amazon_us_reviews/%s" % base)
super(TokenizedAmazonReviews, self).__init__(config=config, **kwargs)
self._perc_train = 0.7
self._examples_for_tokenizer = 200000
def _info(self):
info = self._builder.info
description = "\n This dataset has been tokenized!"
return tfds.core.DatasetInfo(
builder=self,
description=info.description + description,
features=tfds.features.FeaturesDict({
# 1-5 stars are the labels.
"label":
tfds.features.ClassLabel(num_classes=5),
"text":
tfds.features.Text(
encoder_config=self.builder_config.text_encoder_config),
}),
supervised_keys=("text", "label"),
homepage=info.homepage,
citation=info.citation)
def _split_generators(self, dl_manager):
self.info.features["text"].maybe_build_from_corpus(self._vocab_text_gen())
return [
tfds.core.SplitGenerator(
name=split, gen_kwargs=dict(split=split))
for split in ["train", "test"]
]
def _generate_examples(self, split):
hasher = tfds.core.hashing.Hasher("token_wikipedia_salt")
for exi, example in enumerate(
tfds.as_numpy(self._builder.as_dataset(split="train"))):
p = hasher.hash_key(exi) % 1000 / 1000.
example = {
"text": example["data"]["review_body"],
# subtract one to zero index.
"label": example["data"]["star_rating"] - 1
}
if split == "train" and p < self._perc_train:
yield exi, example
elif split == "test" and p > self._perc_train:
yield exi, example
def _vocab_text_gen(self):
for i, (_, ex) in enumerate(self._generate_examples("train")):
if self._examples_for_tokenizer > i:
yield ex["text"]
else:
return
def _single_associative_retrieval(batch_size=128, num_pairs=5, num_tokens=10):
"""See associative_retrieval."""
def _onehot_pack(inp, out, loss_mask):
inp_seq, outputs, loss_mask = (tf.one_hot(inp, num_tokens + 2),
tf.one_hot(out, num_tokens + 2), loss_mask)
return {"input": inp_seq, "output": outputs, "loss_mask": loss_mask}
def _py_make_example():
"""Iterator that makes single examples in python."""
while True:
keys = np.random.choice(num_tokens, size=num_pairs, replace=False)
values = np.random.choice(num_tokens, size=num_pairs, replace=True)
empty_token_idx = num_tokens
query_token_idx = num_tokens + 1
input_seq = []
output_seq = []
for k, v in zip(keys, values):
input_seq.extend([k, v])
output_seq.extend([empty_token_idx, empty_token_idx])
input_seq.append(query_token_idx)
output_seq.append(empty_token_idx)
query_key = np.random.randint(0, num_pairs)
input_seq.append(keys[query_key])
output_seq.append(values[query_key])
loss_mask = np.zeros(2 * num_pairs + 2, dtype=np.float32)
loss_mask[-1] = 1.
input_seq = np.asarray(input_seq, dtype=np.int32)
output_seq = np.asarray(output_seq, dtype=np.int32)
yield input_seq, output_seq, loss_mask
# per pair, there is a key and a value. Extra 2 account for query indicator
# and query key.
seq_len = 2 * num_pairs + 2
dataset = tf.data.Dataset.from_generator(_py_make_example,
(tf.int32, tf.int32, tf.float32),
([seq_len], [seq_len], [seq_len]))
dataset = dataset.map(_onehot_pack)
return dataset.batch(batch_size, drop_remainder=True)
def associative_sequence(batch_size=128, num_pairs=5, num_tokens=10):
"""Associative Retrieval datasets.
The inputs consist of pairs of key and value sequentially followed by an
indicator token and then a retrieval token.
Output consists of the value associated with the retrieval key in the final
step of the sequence, preceded by empty tokens.
The problem can be perfectly solved, as in the 'key' tokens will be unique.
There can be duplicate values, however, for different keys.
Example (using characters instead of the onehot representations):
input: A1B2C3D4?A
output: _________1
loss_mask: 0000000001
The outputs are represented using a one-hot encoding.
The problem is based off of the one used in
https://arxiv.org/pdf/1610.06258.pdf.
Args:
batch_size: int
num_pairs: int, number of pairs to put into memory.
num_tokens: int, number of possible tokens to choose from.
Returns:
datasets: Datasets object with each split containing the same data
generating process.
"""
fn = lambda: _single_associative_retrieval(batch_size, num_pairs, num_tokens)
return Datasets(train=fn(), valid_inner=fn(), valid_outer=fn(), test=fn())
def _single_copy_sequence(batch_size=128,
sequence_length=5,
num_separator=1,
num_tokens=10):
"""See copy_sequence for docs."""
def _build_batch(_):
"""Construct a batch.
Args:
_: tf.Tensor Needed to construct a tf.data.Dataset that iteratively calls
this function. This is a dummy value that never changes.
Returns:
batch: SequencePrediction, containing a batch of sequences.
"""
inp = tf.random_uniform([batch_size, sequence_length],
0,
num_tokens,
dtype=tf.int32)
sep = tf.ones([batch_size, num_separator], dtype=tf.int32) * num_tokens
emit = tf.ones([batch_size, sequence_length], dtype=tf.int32) * (
num_tokens + 1)
inp_seq_pre_onehot = tf.concat([inp, sep, emit], axis=1)
inp_seq = tf.one_hot(inp_seq_pre_onehot, num_tokens + 2)
loss_mask = tf.concat([
tf.zeros([batch_size, sequence_length + num_separator]),
tf.ones([batch_size, sequence_length])
],
axis=1)
outputs_pre_onehot = tf.concat(
[tf.zeros_like(inp), tf.zeros_like(sep), inp], axis=1)
outputs = tf.one_hot(outputs_pre_onehot, num_tokens + 2)
return {"input": inp_seq, "output": outputs, "loss_mask": loss_mask}
return tf.data.Dataset.from_tensor_slices([0]).repeat().map(_build_batch)
def copy_sequence(batch_size=128,
sequence_length=5,
num_separator=1,
num_tokens=10):
"""A simple input copy to output task.
Input consists of `seq_len` tokens drawn from a vocab size of `num_tokens`
followed by `n_sep` separation tokens, followed by 3 empty tokens.
The output consists of `seq_len + n_sep` empty tokens followed by the same
input tokens from the input.
All token outputs are onehot.
A sample input output pair for seq_len=3, num_tokens=3, n_sep=1
input:: <tokenA><tokenB><tokenC><sep> <empty> <empty> <empty>
output:: <empty> <empty> <empty> <empty><tokenA><tokenB><tokenC>
loss_mask:: 0. 0. 0. 0. 1. 1. 1.
Args:
batch_size: int
sequence_length: int, length of sequence to copy
num_separator: int, number of empty tokens separating input from output
num_tokens: int, number of tokens to build input from
Returns:
dataset: tf.Data.Dataset
"""
def fn():
return _single_copy_sequence(batch_size, sequence_length, num_separator,
num_tokens)
return Datasets(train=fn(), valid_inner=fn(), valid_outer=fn(), test=fn())
| {
"content_hash": "a23070bb37e7cf66a44d1176119cd8fa",
"timestamp": "",
"source": "github",
"line_count": 829,
"max_line_length": 91,
"avg_line_length": 34.225572979493364,
"alnum_prop": 0.6513939308497515,
"repo_name": "google-research/google-research",
"id": "44329155534d449740fc0e9965a85e2671e8a2bb",
"size": "28981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task_set/datasets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._access_review_instance_my_decisions_operations import (
build_get_by_id_request,
build_list_request,
build_patch_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AccessReviewInstanceMyDecisionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.authorization.v2021_12_01_preview.aio.AuthorizationManagementClient`'s
:attr:`access_review_instance_my_decisions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, schedule_definition_id: str, id: str, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.AccessReviewDecision"]:
"""Get my access review instance decisions.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param id: The id of the access review instance. Required.
:type id: str
:param filter: The filter to apply on the operation. Other than standard filters, one custom
filter option is supported : 'assignedToMeToReview()'. When one specified
$filter=assignedToMeToReview(), only items that are assigned to the calling user to review are
returned. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccessReviewDecision or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewDecision]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AccessReviewDecisionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
schedule_definition_id=schedule_definition_id,
id=id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AccessReviewDecisionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}/decisions"} # type: ignore
@distributed_trace_async
async def get_by_id(
self, schedule_definition_id: str, id: str, decision_id: str, **kwargs: Any
) -> _models.AccessReviewDecision:
"""Get my single access review instance decision.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param id: The id of the access review instance. Required.
:type id: str
:param decision_id: The id of the decision record. Required.
:type decision_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessReviewDecision or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewDecision
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AccessReviewDecision]
request = build_get_by_id_request(
schedule_definition_id=schedule_definition_id,
id=id,
decision_id=decision_id,
api_version=api_version,
template_url=self.get_by_id.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AccessReviewDecision", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {"url": "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}/decisions/{decisionId}"} # type: ignore
@overload
async def patch(
self,
schedule_definition_id: str,
id: str,
decision_id: str,
properties: _models.AccessReviewDecisionProperties,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AccessReviewDecision:
"""Record a decision.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param id: The id of the access review instance. Required.
:type id: str
:param decision_id: The id of the decision record. Required.
:type decision_id: str
:param properties: Access review decision properties to patch. Required.
:type properties:
~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewDecisionProperties
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessReviewDecision or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewDecision
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def patch(
self,
schedule_definition_id: str,
id: str,
decision_id: str,
properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AccessReviewDecision:
"""Record a decision.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param id: The id of the access review instance. Required.
:type id: str
:param decision_id: The id of the decision record. Required.
:type decision_id: str
:param properties: Access review decision properties to patch. Required.
:type properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessReviewDecision or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewDecision
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def patch(
self,
schedule_definition_id: str,
id: str,
decision_id: str,
properties: Union[_models.AccessReviewDecisionProperties, IO],
**kwargs: Any
) -> _models.AccessReviewDecision:
"""Record a decision.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param id: The id of the access review instance. Required.
:type id: str
:param decision_id: The id of the decision record. Required.
:type decision_id: str
:param properties: Access review decision properties to patch. Is either a model type or a IO
type. Required.
:type properties:
~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewDecisionProperties or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessReviewDecision or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewDecision
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AccessReviewDecision]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(properties, (IO, bytes)):
_content = properties
else:
_json = self._serialize.body(properties, "AccessReviewDecisionProperties")
request = build_patch_request(
schedule_definition_id=schedule_definition_id,
id=id,
decision_id=decision_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.patch.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AccessReviewDecision", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
patch.metadata = {"url": "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}/decisions/{decisionId}"} # type: ignore
| {
"content_hash": "4f26306d1b46c17d14782e66985620b8",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 179,
"avg_line_length": 44.151162790697676,
"alnum_prop": 0.6497893073479062,
"repo_name": "Azure/azure-sdk-for-python",
"id": "17980d374525eab50d8da3ac6884a1232fffd5ee",
"size": "15688",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2021_12_01_preview/aio/operations/_access_review_instance_my_decisions_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
Tests for django test runner
"""
import unittest
from unittest import mock
from admin_scripts.tests import AdminScriptTestCase
from django import db
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.core.management.base import SystemCheckError
from django.test import (
SimpleTestCase, TransactionTestCase, skipUnlessDBFeature,
)
from django.test.runner import DiscoverRunner
from django.test.testcases import connections_support_transactions
from django.test.utils import (
captured_stderr, dependency_ordered, get_unique_databases_and_mirrors,
)
from django.utils.deprecation import RemovedInDjango50Warning
from .models import B, Person, Through
class DependencyOrderingTests(unittest.TestCase):
def test_simple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['charlie'],
'bravo': ['charlie'],
}
ordered = dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
def test_chained_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['bravo'],
'bravo': ['charlie'],
}
ordered = dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
# Implied dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_multiple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
('s4', ('s4_db', ['delta'])),
]
dependencies = {
'alpha': ['bravo', 'delta'],
'bravo': ['charlie'],
'delta': ['charlie'],
}
ordered = dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, aliases in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertIn('s4', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s4'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s4'))
# Implicit dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_circular_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
]
dependencies = {
'bravo': ['alpha'],
'alpha': ['bravo'],
}
with self.assertRaises(ImproperlyConfigured):
dependency_ordered(raw, dependencies=dependencies)
def test_own_alias_dependency(self):
raw = [
('s1', ('s1_db', ['alpha', 'bravo']))
]
dependencies = {
'alpha': ['bravo']
}
with self.assertRaises(ImproperlyConfigured):
dependency_ordered(raw, dependencies=dependencies)
# reordering aliases shouldn't matter
raw = [
('s1', ('s1_db', ['bravo', 'alpha']))
]
with self.assertRaises(ImproperlyConfigured):
dependency_ordered(raw, dependencies=dependencies)
class MockTestRunner:
def __init__(self, *args, **kwargs):
pass
MockTestRunner.run_tests = mock.Mock(return_value=[])
class ManageCommandTests(unittest.TestCase):
def test_custom_test_runner(self):
call_command('test', 'sites',
testrunner='test_runner.tests.MockTestRunner')
MockTestRunner.run_tests.assert_called_with(('sites',))
def test_bad_test_runner(self):
with self.assertRaises(AttributeError):
call_command('test', 'sites', testrunner='test_runner.NonexistentRunner')
def test_time_recorded(self):
with captured_stderr() as stderr:
call_command('test', '--timing', 'sites', testrunner='test_runner.tests.MockTestRunner')
self.assertIn('Total run took', stderr.getvalue())
class CustomTestRunnerOptionsSettingsTests(AdminScriptTestCase):
"""
Custom runners can add command line arguments. The runner is specified
through a settings file.
"""
def setUp(self):
super().setUp()
settings = {
'TEST_RUNNER': '\'test_runner.runner.CustomOptionsTestRunner\'',
}
self.write_settings('settings.py', sdict=settings)
def test_default_options(self):
args = ['test', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:2:3')
def test_default_and_given_options(self):
args = ['test', '--settings=test_project.settings', '--option_b=foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_option_name_and_value_separated(self):
args = ['test', '--settings=test_project.settings', '--option_b', 'foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_all_options_given(self):
args = ['test', '--settings=test_project.settings', '--option_a=bar',
'--option_b=foo', '--option_c=31337']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'bar:foo:31337')
class CustomTestRunnerOptionsCmdlineTests(AdminScriptTestCase):
"""
Custom runners can add command line arguments when the runner is specified
using --testrunner.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py')
def test_testrunner_option(self):
args = [
'test', '--testrunner', 'test_runner.runner.CustomOptionsTestRunner',
'--option_a=bar', '--option_b=foo', '--option_c=31337'
]
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'bar:foo:31337')
def test_testrunner_equals(self):
args = [
'test', '--testrunner=test_runner.runner.CustomOptionsTestRunner',
'--option_a=bar', '--option_b=foo', '--option_c=31337'
]
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'bar:foo:31337')
def test_no_testrunner(self):
args = ['test', '--testrunner']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertIn('usage', err)
self.assertNotIn('Traceback', err)
self.assertNoOutput(out)
class Ticket17477RegressionTests(AdminScriptTestCase):
def setUp(self):
super().setUp()
self.write_settings('settings.py')
def test_ticket_17477(self):
"""'manage.py help test' works after r16352."""
args = ['help', 'test']
out, err = self.run_manage(args)
self.assertNoOutput(err)
class SQLiteInMemoryTestDbs(TransactionTestCase):
available_apps = ['test_runner']
databases = {'default', 'other'}
@unittest.skipUnless(all(db.connections[conn].vendor == 'sqlite' for conn in db.connections),
"This is an sqlite-specific issue")
def test_transaction_support(self):
# Assert connections mocking is appropriately applied by preventing
# any attempts at calling create_test_db on the global connection
# objects.
for connection in db.connections.all():
create_test_db = mock.patch.object(
connection.creation,
'create_test_db',
side_effect=AssertionError("Global connection object shouldn't be manipulated.")
)
create_test_db.start()
self.addCleanup(create_test_db.stop)
for option_key, option_value in (
('NAME', ':memory:'), ('TEST', {'NAME': ':memory:'})):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.sqlite3',
option_key: option_value,
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
option_key: option_value,
},
})
with mock.patch('django.test.utils.connections', new=tested_connections):
other = tested_connections['other']
DiscoverRunner(verbosity=0).setup_databases()
msg = (
"DATABASES setting '%s' option set to sqlite3's ':memory:' value "
"shouldn't interfere with transaction support detection." % option_key
)
# Transaction support is properly initialized for the 'other' DB.
self.assertTrue(other.features.supports_transactions, msg)
# And all the DBs report that they support transactions.
self.assertTrue(connections_support_transactions(), msg)
class DummyBackendTest(unittest.TestCase):
def test_setup_databases(self):
"""
setup_databases() doesn't fail with dummy database backend.
"""
tested_connections = db.ConnectionHandler({})
with mock.patch('django.test.utils.connections', new=tested_connections):
runner_instance = DiscoverRunner(verbosity=0)
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
class AliasedDefaultTestSetupTest(unittest.TestCase):
def test_setup_aliased_default_database(self):
"""
setup_databases() doesn't fail when 'default' is aliased
"""
tested_connections = db.ConnectionHandler({
'default': {
'NAME': 'dummy'
},
'aliased': {
'NAME': 'dummy'
}
})
with mock.patch('django.test.utils.connections', new=tested_connections):
runner_instance = DiscoverRunner(verbosity=0)
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
class SetupDatabasesTests(SimpleTestCase):
def setUp(self):
self.runner_instance = DiscoverRunner(verbosity=0)
def test_setup_aliased_databases(self):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
},
'other': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
}
})
with mock.patch('django.db.backends.dummy.base.DatabaseWrapper.creation_class') as mocked_db_creation:
with mock.patch('django.test.utils.connections', new=tested_connections):
old_config = self.runner_instance.setup_databases()
self.runner_instance.teardown_databases(old_config)
mocked_db_creation.return_value.destroy_test_db.assert_called_once_with('dbname', 0, False)
def test_setup_test_database_aliases(self):
"""
The default database must be the first because data migrations
use the default alias by default.
"""
tested_connections = db.ConnectionHandler({
'other': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
},
'default': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
}
})
with mock.patch('django.test.utils.connections', new=tested_connections):
test_databases, _ = get_unique_databases_and_mirrors()
self.assertEqual(
test_databases,
{
('', '', 'django.db.backends.dummy', 'test_dbname'): (
'dbname',
['default', 'other'],
),
},
)
def test_destroy_test_db_restores_db_name(self):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': settings.DATABASES[db.DEFAULT_DB_ALIAS]["ENGINE"],
'NAME': 'xxx_test_database',
},
})
# Using the real current name as old_name to not mess with the test suite.
old_name = settings.DATABASES[db.DEFAULT_DB_ALIAS]["NAME"]
with mock.patch('django.db.connections', new=tested_connections):
tested_connections['default'].creation.destroy_test_db(old_name, verbosity=0, keepdb=True)
self.assertEqual(tested_connections['default'].settings_dict["NAME"], old_name)
def test_serialization(self):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.dummy',
},
})
with mock.patch('django.db.backends.dummy.base.DatabaseWrapper.creation_class') as mocked_db_creation:
with mock.patch('django.test.utils.connections', new=tested_connections):
self.runner_instance.setup_databases()
mocked_db_creation.return_value.create_test_db.assert_called_once_with(
verbosity=0, autoclobber=False, serialize=True, keepdb=False
)
def test_serialized_off(self):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.dummy',
'TEST': {'SERIALIZE': False},
},
})
msg = (
'The SERIALIZE test database setting is deprecated as it can be '
'inferred from the TestCase/TransactionTestCase.databases that '
'enable the serialized_rollback feature.'
)
with mock.patch('django.db.backends.dummy.base.DatabaseWrapper.creation_class') as mocked_db_creation:
with mock.patch('django.test.utils.connections', new=tested_connections):
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.runner_instance.setup_databases()
mocked_db_creation.return_value.create_test_db.assert_called_once_with(
verbosity=0, autoclobber=False, serialize=False, keepdb=False
)
@skipUnlessDBFeature('supports_sequence_reset')
class AutoIncrementResetTest(TransactionTestCase):
"""
Creating the same models in different test methods receive the same PK
values since the sequences are reset before each test method.
"""
available_apps = ['test_runner']
reset_sequences = True
def _test(self):
# Regular model
p = Person.objects.create(first_name='Jack', last_name='Smith')
self.assertEqual(p.pk, 1)
# Auto-created many-to-many through model
p.friends.add(Person.objects.create(first_name='Jacky', last_name='Smith'))
self.assertEqual(p.friends.through.objects.first().pk, 1)
# Many-to-many through model
b = B.objects.create()
t = Through.objects.create(person=p, b=b)
self.assertEqual(t.pk, 1)
def test_autoincrement_reset1(self):
self._test()
def test_autoincrement_reset2(self):
self._test()
class EmptyDefaultDatabaseTest(unittest.TestCase):
def test_empty_default_database(self):
"""
An empty default database in settings does not raise an ImproperlyConfigured
error when running a unit test that does not use a database.
"""
tested_connections = db.ConnectionHandler({'default': {}})
with mock.patch('django.db.connections', new=tested_connections):
connection = tested_connections[db.utils.DEFAULT_DB_ALIAS]
self.assertEqual(connection.settings_dict['ENGINE'], 'django.db.backends.dummy')
connections_support_transactions()
class RunTestsExceptionHandlingTests(unittest.TestCase):
def test_run_checks_raises(self):
"""
Teardown functions are run when run_checks() raises SystemCheckError.
"""
with mock.patch('django.test.runner.DiscoverRunner.setup_test_environment'), \
mock.patch('django.test.runner.DiscoverRunner.setup_databases'), \
mock.patch('django.test.runner.DiscoverRunner.build_suite'), \
mock.patch('django.test.runner.DiscoverRunner.run_checks', side_effect=SystemCheckError), \
mock.patch('django.test.runner.DiscoverRunner.teardown_databases') as teardown_databases, \
mock.patch('django.test.runner.DiscoverRunner.teardown_test_environment') as teardown_test_environment:
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(SystemCheckError):
runner.run_tests(['test_runner_apps.sample.tests_sample.TestDjangoTestCase'])
self.assertTrue(teardown_databases.called)
self.assertTrue(teardown_test_environment.called)
def test_run_checks_raises_and_teardown_raises(self):
"""
SystemCheckError is surfaced when run_checks() raises SystemCheckError
and teardown databases() raises ValueError.
"""
with mock.patch('django.test.runner.DiscoverRunner.setup_test_environment'), \
mock.patch('django.test.runner.DiscoverRunner.setup_databases'), \
mock.patch('django.test.runner.DiscoverRunner.build_suite'), \
mock.patch('django.test.runner.DiscoverRunner.run_checks', side_effect=SystemCheckError), \
mock.patch('django.test.runner.DiscoverRunner.teardown_databases', side_effect=ValueError) \
as teardown_databases, \
mock.patch('django.test.runner.DiscoverRunner.teardown_test_environment') as teardown_test_environment:
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(SystemCheckError):
runner.run_tests(['test_runner_apps.sample.tests_sample.TestDjangoTestCase'])
self.assertTrue(teardown_databases.called)
self.assertFalse(teardown_test_environment.called)
def test_run_checks_passes_and_teardown_raises(self):
"""
Exceptions on teardown are surfaced if no exceptions happen during
run_checks().
"""
with mock.patch('django.test.runner.DiscoverRunner.setup_test_environment'), \
mock.patch('django.test.runner.DiscoverRunner.setup_databases'), \
mock.patch('django.test.runner.DiscoverRunner.build_suite'), \
mock.patch('django.test.runner.DiscoverRunner.run_checks'), \
mock.patch('django.test.runner.DiscoverRunner.teardown_databases', side_effect=ValueError) \
as teardown_databases, \
mock.patch('django.test.runner.DiscoverRunner.teardown_test_environment') as teardown_test_environment:
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(ValueError):
# Suppress the output when running TestDjangoTestCase.
with mock.patch('sys.stderr'):
runner.run_tests(['test_runner_apps.sample.tests_sample.TestDjangoTestCase'])
self.assertTrue(teardown_databases.called)
self.assertFalse(teardown_test_environment.called)
| {
"content_hash": "999b4943b4e1a72f1e6f238d50a4d117",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 119,
"avg_line_length": 40.219844357976655,
"alnum_prop": 0.6060562085812412,
"repo_name": "adamchainz/django",
"id": "5dc31265812581ada39c9f0565a0b0a401ca90db",
"size": "20673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_runner/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84602"
},
{
"name": "HTML",
"bytes": "223742"
},
{
"name": "JavaScript",
"bytes": "138208"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "14372826"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
"""
Module for common mock definitions and helpers that are useful across various fixtures and unit tests.
"""
import gzip
import io
from botocore.exceptions import ClientError
import simplejson as json
def create_aws_provisioned_throughput_exceeded_exception():
return ClientError(
{'Error': {'Code': 102, 'Message': 'ProvisionedThroughputExceededException'}},
operation_name="DoSomething")
def create_aws_client_exception():
return ClientError(
{'Error': {'Code': 102, 'Message': 'JustSomeRandomException'}},
operation_name="DoSomething")
def create_aws_permissions_exception():
return ClientError({'Error': {'Code': 102, 'Message': 'AccessDenied: Let Erik try it'}},
operation_name="DoSomething")
def create_aws_client_throttled_exception():
return ClientError({'Error': {'Code': 102, 'Message': 'RequestLimitExceeded: spam spam spam spam spaaaaaam'}},
operation_name="DoSomething")
def build_sns_event(message_id='message-abc123', message=None, message_attributes=None,
event_subscription_arn='arn:aaa:bbb:ccc:ddd', timestamp='1970-01-01T00:00:00.000Z',
subject='TestInvoke', topic_arn='arn:eee:fff:ggg:hhh'):
"""
Construct a properly formatted SNS message for testing.
Args:
message_id:
message:
message_attributes:
event_subscription_arn:
timestamp:
subject:
topic_arn:
Returns:
The event as a dict
"""
event_dict = {
'SignatureVersion': '1',
'Timestamp': timestamp,
'Signature': 'EXAMPLE',
'SigningCertUrl': 'EXAMPLE',
'MessageId': message_id,
'Message': message,
'MessageAttributes': message_attributes,
'Type': 'Notification',
'UnsubscribeUrl': 'EXAMPLE',
'TopicArn': topic_arn,
'Subject': subject
}
return event_dict
def create_cloudtrail_archive():
"""
Create a gzipped archive of cloudtrail events.
Returns:
A set of bytes containing the archive data
"""
json_bytes = json.dumps({
'Records': [
{
"eventID": "e794ee1c-e792-4e69-8e2f-f05492a82d63",
"awsRegion": "us-west-1",
"eventName": "DescribeTrails",
"eventTime": "2017-03-15T14:07:38Z",
"eventType": "AwsApiCall",
"requestID": "bee7e449-0988-11e7-8deb-1ddd802bb01e",
"userAgent": "Boto3/1.4.3 Python/2.7.6 Linux/3.13.0-100-generic Botocore/1.4.91+dd.0",
"eventSource": "cloudtrail.amazonaws.com",
"eventVersion": "1.05",
"recipientAccountId": "222222222222",
"userIdentity": {
"arn": "arn:aws:sts::111111111111:assumed-role/DnoAggiSatIWdeaotanrtRole/DnoAggiSatIWdeaotanrt",
"type": "AssumedRole",
"accountId": "111111111111",
"accessKeyId": "AAAAAAAAAAAAAAAAAAAA",
"principalId": "AAAAAAAAAAAAAAAAAAAAA:DnoAggiSatIWdeaotanrt",
"sessionContext": {
"attributes": {
"creationDate": "2017-03-15T14:07:37Z",
"mfaAuthenticated": "false"
},
"sessionIssuer": {
"arn": "arn:aws:iam::111111111111:role/DnoAggiSatIWdeaotanrtRole",
"type": "Role",
"userName": "DnoAggiSatIWdeaotanrtRole",
"accountId": "111111111111",
"principalId": "AAAAAAAAAAAAAAAAAAAAA"
}
}
}
}
]
}).encode('utf-8')
buffer = io.BytesIO()
with gzip.GzipFile(fileobj=buffer, mode='wb') as f:
f.write(json_bytes)
return buffer.getvalue()
| {
"content_hash": "ff48ffef0e80ee3c5d12a059e1a64194",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 116,
"avg_line_length": 34.87826086956522,
"alnum_prop": 0.5507354774370481,
"repo_name": "Cloudzero/cloudzero-reactor-aws",
"id": "a438402a5cf88a69b9ee8ea55c12ba1b9487baa4",
"size": "4210",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/unit/mock_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5349"
},
{
"name": "Python",
"bytes": "803664"
}
],
"symlink_target": ""
} |
from .user import *
from .admin import *
from .error import *
from .feedback import *
from .letsencrypt import *
from .profile import *
from .test import *
from .welcome import *
from .post import *
from .resource import *
from .recommender import *
from .search import *
from .static_pages import *
from .guide import *
| {
"content_hash": "ad93a5a33ee080323f3a8529ad420c7e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 27,
"avg_line_length": 21.466666666666665,
"alnum_prop": 0.7329192546583851,
"repo_name": "daspots/dasapp",
"id": "48257e20d82257a636b0ace347e022d7388c6cec",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/control/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1113637"
},
{
"name": "CoffeeScript",
"bytes": "24343"
},
{
"name": "HTML",
"bytes": "140644"
},
{
"name": "JavaScript",
"bytes": "2074640"
},
{
"name": "Python",
"bytes": "5102305"
},
{
"name": "Shell",
"bytes": "1082"
}
],
"symlink_target": ""
} |
import datetime
import json
import re
import shlex
import subprocess
from google.api_core.exceptions import NotFound
import google.auth
from google.cloud import bigquery
from google.cloud import storage
from google.cloud.retail import ProductDetail, PurgeUserEventsRequest, \
UserEvent, UserEventServiceClient, WriteUserEventRequest
from google.cloud.retail_v2 import Product
from google.protobuf.timestamp_pb2 import Timestamp
project_id = google.auth.default()[1]
default_catalog = f"projects/{project_id}/locations/global/catalogs/default_catalog"
# get user event
def get_user_event(visitor_id):
timestamp = Timestamp()
timestamp.seconds = int(datetime.datetime.now().timestamp())
product = Product()
product.id = 'test_id'
product_detail = ProductDetail()
product_detail.product = product
user_event = UserEvent()
user_event.event_type = "detail-page-view"
user_event.visitor_id = visitor_id
user_event.event_time = timestamp
user_event.product_details = [product_detail]
print(user_event)
return user_event
# write user event
def write_user_event(visitor_id):
write_user_event_request = WriteUserEventRequest()
write_user_event_request.user_event = get_user_event(visitor_id)
write_user_event_request.parent = default_catalog
user_event = UserEventServiceClient().write_user_event(
write_user_event_request)
print("---the user event is written---")
print(user_event)
return user_event
# purge user event
def purge_user_event(visitor_id):
purge_user_event_request = PurgeUserEventsRequest()
purge_user_event_request.filter = f'visitorId="{visitor_id}"'
purge_user_event_request.parent = default_catalog
purge_user_event_request.force = True
purge_operation = UserEventServiceClient().purge_user_events(
purge_user_event_request)
print("---the purge operation was started:----")
print(purge_operation.operation.name)
def get_project_id():
get_project_command = "gcloud config get-value project --format json"
config = subprocess.check_output(shlex.split(get_project_command))
project_id = re.search('\"(.*?)\"', str(config)).group(1)
return project_id
def create_bucket(bucket_name: str):
"""Create a new bucket in Cloud Storage"""
print("Creating new bucket:" + bucket_name)
buckets_in_your_project = list_buckets()
if bucket_name in buckets_in_your_project:
print(f"Bucket {bucket_name} already exists")
else:
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
bucket.storage_class = "STANDARD"
new_bucket = storage_client.create_bucket(bucket, location="us")
print(
f"Created bucket {new_bucket.name} in {new_bucket.location} with storage class {new_bucket.storage_class}")
return new_bucket
def delete_bucket(bucket_name: str):
"""Delete a bucket from Cloud Storage"""
storage_client = storage.Client()
print("Deleting bucket:" + bucket_name)
buckets_in_your_project = list_buckets()
if bucket_name in buckets_in_your_project:
blobs = storage_client.list_blobs(bucket_name)
for blob in blobs:
blob.delete()
bucket = storage_client.get_bucket(bucket_name)
bucket.delete()
print(f"Bucket {bucket.name} is deleted")
else:
print(f"Bucket {bucket_name} is not found")
def list_buckets():
"""Lists all buckets"""
bucket_list = []
storage_client = storage.Client()
buckets = storage_client.list_buckets()
for bucket in buckets:
bucket_list.append(bucket.name)
return bucket_list
def upload_blob(bucket_name, source_file_name):
"""Uploads a file to the bucket."""
# The path to your file to upload
# source_file_name = "local/path/to/file"
print(f"Uploading data from {source_file_name} to the bucket {bucket_name}")
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
object_name = re.search('resources/(.*?)$', source_file_name).group(1)
blob = bucket.blob(object_name)
blob.upload_from_filename(source_file_name)
print(
f"File {source_file_name} uploaded to {object_name}."
)
def create_bq_dataset(dataset_name):
"""Create a BigQuery dataset"""
full_dataset_id = f"{project_id}.{dataset_name}"
bq = bigquery.Client()
print(f"Creating dataset {full_dataset_id}")
try:
bq.get_dataset(full_dataset_id)
print(f"dataset {full_dataset_id} already exists")
except NotFound:
# Construct a Dataset object to send to the API.
dataset = bigquery.Dataset(full_dataset_id)
dataset.location = "US"
bq.create_dataset(dataset)
print("dataset is created")
def create_bq_table(dataset, table_name, schema_file_path):
"""Create a BigQuery table"""
full_table_id = f"{project_id}.{dataset}.{table_name}"
bq = bigquery.Client()
print(f"Check if BQ table {full_table_id} exists")
try:
bq.get_table(full_table_id)
print(f"table {full_table_id} exists and will be deleted")
delete_bq_table(dataset, table_name)
except NotFound:
print(f"table {full_table_id} does not exist")
# Construct a Table object to send to the API.
with open(schema_file_path, "rb") as schema:
schema_dict = json.load(schema)
table = bigquery.Table(full_table_id, schema=schema_dict)
bq.create_table(table)
print(f"table {full_table_id} is created")
def delete_bq_table(dataset, table_name):
full_table_id = f"{project_id}.{dataset}.{table_name}"
bq = bigquery.Client()
bq.delete_table(full_table_id, not_found_ok=True)
print(f"Table '{full_table_id}' is deleted.")
def upload_data_to_bq_table(dataset, table_name, source, schema_file_path):
"""Upload data to the table from specified source file"""
full_table_id = f"{project_id}.{dataset}.{table_name}"
bq = bigquery.Client()
print(f"Uploading data from {source} to the table {full_table_id}")
with open(schema_file_path, "rb") as schema:
schema_dict = json.load(schema)
job_config = bigquery.LoadJobConfig(
source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
schema=schema_dict)
with open(source, "rb") as source_file:
job = bq.load_table_from_file(source_file, full_table_id,
job_config=job_config)
job.result() # Waits for the job to complete.
print("data was uploaded")
| {
"content_hash": "960728adc681711a67c2016fa4ac82c6",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 119,
"avg_line_length": 34.909574468085104,
"alnum_prop": 0.6756056681395703,
"repo_name": "googleapis/python-retail",
"id": "1e92e3081c1b18213e98dfd64f570301c5310ccc",
"size": "7162",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/interactive-tutorials/events/setup_events/setup_cleanup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
import unittest
from mock import MagicMock
from tests.tools.client.fixtures import topic_metadata, offset_fetch
from kafka.tools.client import Client
from kafka.tools.exceptions import GroupError
from kafka.tools.models.group import Group
from kafka.tools.models.topic import TopicOffsets
from kafka.tools.protocol.requests.offset_fetch_v1 import OffsetFetchV1Request
class InterfaceOffsetsTests(unittest.TestCase):
def setUp(self):
# Dummy client for testing - we're not going to connect that bootstrap broker
self.client = Client()
self.client._connected = True
# Get the broker and topic from a metadata update
self.client._update_from_metadata(topic_metadata())
self.group = Group('testgroup')
self.offset_fetch = offset_fetch()
def test_get_offsets_for_topics(self):
self.client._maybe_update_metadata_for_topics = MagicMock()
self.client._send_list_offsets_to_brokers = MagicMock()
self.client._send_list_offsets_to_brokers.return_value = {'topic1': 'responseobj'}
val = self.client.get_offsets_for_topics(['topic1'])
self.client._maybe_update_metadata_for_topics.assert_called_once_with(['topic1'])
self.client._send_list_offsets_to_brokers.assert_called_once()
values = self.client._send_list_offsets_to_brokers.call_args[0][0]
assert len(values) == 2
for broker_id in (1, 101):
assert values[broker_id]['replica_id'] == -1
assert len(values[broker_id]['topics']) == 1
assert values[broker_id]['topics'][0]['topic'] == 'topic1'
assert len(values[broker_id]['topics'][0]['partitions']) == 1
assert values[broker_id]['topics'][0]['partitions'][0]['timestamp'] == Client.OFFSET_LATEST
assert values[1]['topics'][0]['partitions'][0]['partition'] == 0
assert values[101]['topics'][0]['partitions'][0]['partition'] == 1
assert val == {'topic1': 'responseobj'}
def test_get_offsets_for_topic(self):
self.client.get_offsets_for_topics = MagicMock()
self.client.get_offsets_for_topics.return_value = {'topic1': 'thingitreturns'}
val = self.client.get_offsets_for_topic('topic1')
self.client.get_offsets_for_topics.assert_called_once_with(['topic1'], Client.OFFSET_LATEST)
assert val == 'thingitreturns'
def test_get_offsets_for_topic_bad_timestamp(self):
self.assertRaises(TypeError, self.client.get_offsets_for_topics, ['topic1'], timestamp='notanint')
def test_get_topics_for_group_string(self):
val = self.client._get_topics_for_group(self.group, 'testtopic')
assert val == ['testtopic']
def test_get_topics_for_group_list(self):
val = self.client._get_topics_for_group(self.group, ['topica', 'topicb'])
assert val == ['topica', 'topicb']
def test_get_topics_for_group_empty(self):
self.assertRaises(GroupError, self.client._get_topics_for_group, self.group, [])
def test_get_topics_for_group_subscribed(self):
self.group.subscribed_topics = MagicMock()
self.group.subscribed_topics.return_value = ['topica']
val = self.client._get_topics_for_group(self.group, None)
self.group.subscribed_topics.assert_called_once_with()
assert val == ['topica']
def test_get_offsets_for_group(self):
self.client.get_group = MagicMock()
self.client.get_group.return_value = self.group
self.client._get_topics_for_group = MagicMock()
self.client._get_topics_for_group.return_value = ['topic1']
self.client._maybe_update_metadata_for_topics = MagicMock()
self.client._send_group_aware_request = MagicMock()
self.client._send_group_aware_request.return_value = self.offset_fetch
val = self.client.get_offsets_for_group('testgroup')
self.client.get_group.assert_called_once_with('testgroup')
self.client._get_topics_for_group.assert_called_once_with(self.group, None)
self.client._maybe_update_metadata_for_topics.assert_called_once_with(['topic1'])
self.client._send_group_aware_request.assert_called_once()
assert self.client._send_group_aware_request.call_args[0][0] == 'testgroup'
req = self.client._send_group_aware_request.call_args[0][1]
assert isinstance(req, OffsetFetchV1Request)
assert req['group_id'] == 'testgroup'
assert len(req['topics']) == 1
assert req['topics'][0]['topic'] == 'topic1'
assert len(req['topics'][0]['partitions']) == 2
assert req['topics'][0]['partitions'][0] == 0
assert req['topics'][0]['partitions'][1] == 1
assert 'topic1' in val
assert isinstance(val['topic1'], TopicOffsets)
assert val['topic1'].partitions == [4829, 8904]
def test_set_offsets_for_group_bad_offsets(self):
self.assertRaises(TypeError, self.client.set_offsets_for_group, 'testgroup', 'notalist')
def test_set_offsets_for_group_offsets_none(self):
self.assertRaises(TypeError, self.client.set_offsets_for_group, 'testgroup', None)
def test_set_offsets_for_group_not_empty(self):
self.group.state = 'Stable'
self.client.get_group = MagicMock()
self.client.get_group.return_value = self.group
self.assertRaises(GroupError, self.client.set_offsets_for_group, 'testgroup', [])
self.client.get_group.assert_called_once_with('testgroup')
def test_set_offsets_for_group(self):
self.client.get_group = MagicMock()
self.client.get_group.return_value = self.group
self.client._send_set_offset_request = MagicMock()
self.client._send_set_offset_request.return_value = 'sendresponse'
self.client._parse_set_offset_response = MagicMock()
self.client._parse_set_offset_response.return_value = {'topic1': [0, 0]}
offsets = TopicOffsets(self.client.cluster.topics['topic1'])
offsets.partitions[0] = 2342
offsets.partitions[1] = 8793
val = self.client.set_offsets_for_group('testgroup', [offsets])
assert val == {'topic1': [0, 0]}
self.client.get_group.assert_called_once_with('testgroup')
self.client._send_set_offset_request.assert_called_once_with('testgroup', [offsets])
self.client._parse_set_offset_response.assert_called_once_with('sendresponse')
| {
"content_hash": "215a123f6c37302cf8bb66e9488f65ff",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 106,
"avg_line_length": 46.75182481751825,
"alnum_prop": 0.6619828259172521,
"repo_name": "toddpalino/kafka-tools",
"id": "14c909036ffcbce54825103b523857ea7017ed22",
"size": "6405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tools/client/test_interface_offsets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "707729"
}
],
"symlink_target": ""
} |
from model.user import User
testdata = [
User(firstname="Sherlock", lastname="Holmes",
address="221b, Baker Street, London, UK", email="Sherlock@museum.com",
email2="Holmes@museum.com", email3="HolmesWatson@museum.com", homephone="321-32-13",
mobilephone="+441712223355", workphone="123 12 31", additionalphone="+(44)1715553322")
]
testdata_2 = [
User(firstname="Sherlock", lastname="Holmes",
address="221b, Baker Street, London, UK", email="Sherlock@museum.com",
email2="Holmes@museum.com", email3="HolmesWatson@museum.com", homephone="321-32-13",
mobilephone="+441712223355", workphone="123 12 31", additionalphone="+(44)1715553322"),
User(firstname="John H.", lastname="Watson",
address="221b, Baker Street, London, UK", email="JohnH.Watson@Museum.com",
email2="Dr.Watson@Museum.com", email3="HolmesWatson@museum.com", homephone="321-32-13",
mobilephone="+441712223355", workphone="123 12 31", additionalphone="+(44)1715553322")
]
| {
"content_hash": "06e3ecae65058ee231838f9ff64d43db",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 96,
"avg_line_length": 46.90909090909091,
"alnum_prop": 0.6715116279069767,
"repo_name": "VanillaPupa/python_training",
"id": "d5e201b31d8f8a7e809cfd789bd76aa9a094d959",
"size": "1032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35146"
}
],
"symlink_target": ""
} |
import os
import json
from front_base.host_manager import HostManagerBase
class HostManager(HostManagerBase):
def __init__(self, config_fn):
self.config_fn = config_fn
self.info = {}
self.load()
def load(self):
if not os.path.isfile(self.config_fn):
return
try:
with open(self.config_fn, "r") as fd:
self.info = json.load(fd)
except:
pass
def save(self):
with open(self.config_fn, "w") as fd:
json.dump(self.info, fd, indent=2)
def set_host(self, info):
self.info = info
self.save()
def get_sni_host(self, ip):
if ip not in self.info:
return "", ""
return self.info[ip]["sni"], ""
def reset(self):
self.info = {} | {
"content_hash": "cc52a2352d63ed03d3349363507ff5a5",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 51,
"avg_line_length": 21.526315789473685,
"alnum_prop": 0.5268948655256723,
"repo_name": "zlsun/XX-Net",
"id": "dec3beac2909517cbfb7dda847c91bc7ebee4d8c",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/default/x_tunnel/local/tls_relay_front/host_manager.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3884"
},
{
"name": "C",
"bytes": "53301"
},
{
"name": "CSS",
"bytes": "86883"
},
{
"name": "HTML",
"bytes": "188948"
},
{
"name": "JavaScript",
"bytes": "6274"
},
{
"name": "Python",
"bytes": "15347559"
},
{
"name": "Shell",
"bytes": "7812"
},
{
"name": "Visual Basic",
"bytes": "1700"
}
],
"symlink_target": ""
} |
class binaryTree(object):
def __init__(self, value):
self.value = value
self.leftBranch = None
self.rightBranch = None
self.parent = None
def setLeftBranch(self, node):
self.leftBranch = node
self.rightBranch = node
self.parent = | {
"content_hash": "0bdddbc20687a6f3a9cb64a3e2a6195c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 34,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.569078947368421,
"repo_name": "xpgeng/exercises_of_machine_learning",
"id": "12660c41ee7897052e68b6520f71526646c990d4",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Decision_tree/temp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "256949"
},
{
"name": "Python",
"bytes": "13845"
}
],
"symlink_target": ""
} |
import os
from loguru import logger
# logger = logging.getLogger()
import sys
import os.path
import numpy as np
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "./extern/sPickle"))
# import numpy as np
from io3d.misc import suggest_filename
from io3d.misc import obj_from_file
from io3d.misc import obj_to_file
# def suggest_filename(file_path, exists=None):
# """
# Try if exist path and append number to its end.
# For debug you can set as input if file exists or not.
# """
# import os.path
# import re
# if not isinstance(exists, bool):
# exists = os.path.exists(file_path)
#
# if exists:
# file_path, file_extension = os.path.splitext(file_path)
# # print file_path
# m = re.search(r"\d+$", file_path)
# if m is None:
# # cislo = 2
# new_cislo_str = "2"
# else:
# cislostr = (m.group())
# cislo = int(cislostr) + 1
# file_path = file_path[:-len(cislostr)]
# new_cislo_str = str(cislo)
#
# file_path = file_path + new_cislo_str + file_extension # .zfill(2)
# # trorcha rekurze
# file_path = suggest_filename(file_path)
#
# return file_path
#
#
# def obj_from_file(filename='annotation.yaml', filetype='yaml'):
# ''' Read object from file '''
#
# if filetype == 'auto':
# _, ext = os.path.splitext(filename)
# filetype = ext[1:]
#
# if filetype == 'yaml':
# import yaml
# f = open(filename, 'rb')
# obj = yaml.load(f)
# f.close()
# elif filetype in ('pickle', 'pkl', 'pklz', 'picklezip'):
# fcontent = read_pkl_and_pklz(filename)
# # import pickle
# import cPickle as pickle
# # import sPickle as pickle
# obj = pickle.loads(fcontent)
# else:
# logger.error('Unknown filetype ' + filetype)
# return obj
#
#
# def read_pkl_and_pklz(filename):
# """
# Try read zipped or not zipped pickle file
# """
# fcontent = None
# try:
# import gzip
# f = gzip.open(filename, 'rb')
# fcontent = f.read()
# f.close()
# except IOError as e:
# # if the problem is in not gzip file
# logger.warning("Input gzip exception: " + str(e))
# f = open(filename, 'rb')
# fcontent = f.read()
# f.close()
# except Exception as e:
# # other problem
# import traceback
# logger.error("Input gzip exception: " + str(e))
# logger.error(traceback.format_exc())
#
# return fcontent
#
# from io3d.misc import obj_to_file
# def obj_to_file(obj, filename='annotation.yaml', filetype='yaml'):
# '''Writes annotation in file.
#
# Filetypes:
# yaml
# pkl, pickle
# pklz, picklezip
# '''
# # import json
# # with open(filename, mode='w') as f:
# # json.dump(annotation,f)
#
# # write to yaml
# d = os.path.dirname(os.path.abspath(filename))
# if not os.path.exists(d):
# os.makedirs(d)
#
# if filetype == 'auto':
# _, ext = os.path.splitext(filename)
# filetype = ext[1:]
#
# if filetype == 'yaml':
# f = open(filename, 'wb')
# import yaml
# yaml.dump(obj, f)
# f.close
# elif filetype in ('pickle', 'pkl'):
# f = open(filename, 'wb')
# import cPickle as pickle
# pickle.dump(obj, f)
# f.close
# elif filetype in ('streamingpicklezip', 'spklz'):
# # this is not working :-(
# import gzip
# import sPickle as pickle
# f = gzip.open(filename, 'wb', compresslevel=1)
# # f = open(filename, 'wb')
# pickle.s_dump(obj, f)
# f.close
# elif filetype in ('picklezip', 'pklz'):
# import gzip
# import cPickle as pickle
# f = gzip.open(filename, 'wb', compresslevel=1)
# # f = open(filename, 'wb')
# pickle.dump(obj, f)
# f.close
# elif filetype in('mat'):
#
# import scipy.io as sio
# sio.savemat(filename, obj)
# else:
# logger.error('Unknown filetype ' + filetype)
from io3d.misc import resize_to_shape
# def resize_to_shape(data, shape, zoom=None, mode='nearest', order=0):
# """
# Function resize input data to specific shape.
#
# :param data: input 3d array-like data
# :param shape: shape of output data
# :param zoom: zoom is used for back compatibility
# :mode: default is 'nearest'
# """
# # @TODO remove old code in except part
#
# try:
# # rint 'pred vyjimkou'
# # aise Exception ('test without skimage')
# # rint 'za vyjimkou'
# import skimage
# import skimage.transform
# # Now we need reshape seeds and segmentation to original size
#
# segm_orig_scale = skimage.transform.resize(
# data, shape, order=0,
# preserve_range=True
# )
#
# segmentation = segm_orig_scale
# logger.debug('resize to orig with skimage')
# except:
# import scipy
# import scipy.ndimage
# dtype = data.dtype
# if zoom is None:
# zoom = shape / np.asarray(data.shape).astype(np.double)
#
# segm_orig_scale = scipy.ndimage.zoom(
# data,
# 1.0 / zoom,
# mode=mode,
# order=order
# ).astype(dtype)
# logger.debug('resize to orig with scipy.ndimage')
#
# # @TODO odstranit hack pro oříznutí na stejnou velikost
# # v podstatě je to vyřešeno, ale nechalo by se to dělat elegantněji v zoom
# # tam je bohužel patrně bug
# # rint 'd3d ', self.data3d.shape
# # rint 's orig scale shape ', segm_orig_scale.shape
# shp = [
# np.min([segm_orig_scale.shape[0], shape[0]]),
# np.min([segm_orig_scale.shape[1], shape[1]]),
# np.min([segm_orig_scale.shape[2], shape[2]]),
# ]
# # elf.data3d = self.data3d[0:shp[0], 0:shp[1], 0:shp[2]]
# # mport ipdb; ipdb.set_trace() # BREAKPOINT
#
# segmentation = np.zeros(shape, dtype=dtype)
# segmentation[
# 0:shp[0],
# 0:shp[1],
# 0:shp[2]] = segm_orig_scale[0:shp[0], 0:shp[1], 0:shp[2]]
#
# del segm_orig_scale
# return segmentation
| {
"content_hash": "c031120fdfc44320c5d1aa1575ff9833",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 77,
"avg_line_length": 30.369668246445496,
"alnum_prop": 0.5482209737827716,
"repo_name": "mjirik/lisa",
"id": "5dd35f02cf19bdc7bfdf5b1d6a7a8a4cf6a01fc2",
"size": "6475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lisa/misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4321"
},
{
"name": "CMake",
"bytes": "3851"
},
{
"name": "Inno Setup",
"bytes": "6869"
},
{
"name": "Jupyter Notebook",
"bytes": "5822150"
},
{
"name": "Python",
"bytes": "882527"
},
{
"name": "Shell",
"bytes": "16064"
}
],
"symlink_target": ""
} |
"""Tornado request argument parsing module.
Example: ::
import tornado.web
from marshmallow import fields
from webargs.tornadoparser import use_args
class HelloHandler(tornado.web.RequestHandler):
@use_args({'name': fields.Str(missing='World')})
def get(self, args):
response = {'message': 'Hello {}'.format(args['name'])}
self.write(response)
"""
import tornado.web
import tornado.concurrent
from tornado.escape import _unicode
from webargs import core
from webargs.multidictproxy import MultiDictProxy
class HTTPError(tornado.web.HTTPError):
"""`tornado.web.HTTPError` that stores validation errors."""
def __init__(self, *args, **kwargs):
self.messages = kwargs.pop("messages", {})
self.headers = kwargs.pop("headers", None)
super().__init__(*args, **kwargs)
def is_json_request(req):
content_type = req.headers.get("Content-Type")
return content_type is not None and core.is_json(content_type)
class WebArgsTornadoMultiDictProxy(MultiDictProxy):
"""
Override class for Tornado multidicts, handles argument decoding
requirements.
"""
def __getitem__(self, key):
try:
value = self.data.get(key, core.missing)
if value is core.missing:
return core.missing
if key in self.multiple_keys:
return [
_unicode(v) if isinstance(v, (str, bytes)) else v for v in value
]
if value and isinstance(value, (list, tuple)):
value = value[0]
if isinstance(value, (str, bytes)):
return _unicode(value)
return value
# based on tornado.web.RequestHandler.decode_argument
except UnicodeDecodeError as exc:
raise HTTPError(400, f"Invalid unicode in {key}: {value[:40]!r}") from exc
class WebArgsTornadoCookiesMultiDictProxy(MultiDictProxy):
"""
And a special override for cookies because they come back as objects with a
`value` attribute we need to extract.
Also, does not use the `_unicode` decoding step
"""
def __getitem__(self, key):
cookie = self.data.get(key, core.missing)
if cookie is core.missing:
return core.missing
if key in self.multiple_keys:
return [cookie.value]
return cookie.value
class TornadoParser(core.Parser):
"""Tornado request argument parser."""
def _raw_load_json(self, req):
"""Return a json payload from the request for the core parser's load_json
Checks the input mimetype and may return 'missing' if the mimetype is
non-json, even if the request body is parseable as json."""
if not is_json_request(req):
return core.missing
# request.body may be a concurrent.Future on streaming requests
# this would cause a TypeError if we try to parse it
if isinstance(req.body, tornado.concurrent.Future):
return core.missing
return core.parse_json(req.body)
def load_querystring(self, req, schema):
"""Return query params from the request as a MultiDictProxy."""
return self._makeproxy(
req.query_arguments, schema, cls=WebArgsTornadoMultiDictProxy
)
def load_form(self, req, schema):
"""Return form values from the request as a MultiDictProxy."""
return self._makeproxy(
req.body_arguments, schema, cls=WebArgsTornadoMultiDictProxy
)
def load_headers(self, req, schema):
"""Return headers from the request as a MultiDictProxy."""
return self._makeproxy(req.headers, schema, cls=WebArgsTornadoMultiDictProxy)
def load_cookies(self, req, schema):
"""Return cookies from the request as a MultiDictProxy."""
# use the specialized subclass specifically for handling Tornado
# cookies
return self._makeproxy(
req.cookies, schema, cls=WebArgsTornadoCookiesMultiDictProxy
)
def load_files(self, req, schema):
"""Return files from the request as a MultiDictProxy."""
return self._makeproxy(req.files, schema, cls=WebArgsTornadoMultiDictProxy)
def handle_error(self, error, req, schema, *, error_status_code, error_headers):
"""Handles errors during parsing. Raises a `tornado.web.HTTPError`
with a 400 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
if status_code == 422:
reason = "Unprocessable Entity"
else:
reason = None
raise HTTPError(
status_code,
log_message=str(error.messages),
reason=reason,
messages=error.messages,
headers=error_headers,
)
def _handle_invalid_json_error(self, error, req, *args, **kwargs):
raise HTTPError(
400,
log_message="Invalid JSON body.",
reason="Bad Request",
messages={"json": ["Invalid JSON body."]},
)
def get_request_from_view_args(self, view, args, kwargs):
return args[0].request
parser = TornadoParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
| {
"content_hash": "547d0d2b86f695de33c48ef623ca5278",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 86,
"avg_line_length": 33.56687898089172,
"alnum_prop": 0.6280834914611005,
"repo_name": "sloria/webargs",
"id": "e9d9c4b30703713eb79562d44f35ef58f954334b",
"size": "5270",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/webargs/tornadoparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182724"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
__version__ = "0.10.0"
__author__ = "Jan Hajic jr."
STAFF_CROPOBJECT_CLASSES = ['staff_line', 'staff_space', 'staff']
'''It is useful for other tools (esp. visualization) to know
that objects from these classes form the staves.''' | {
"content_hash": "2aea6ff72f70c3d5d63025fca58804ca",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 65,
"avg_line_length": 32.111111111111114,
"alnum_prop": 0.698961937716263,
"repo_name": "hajicj/muscima",
"id": "2a7845a483c7b89be89729c62b6ec802d3bd3005",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "muscima/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "515599"
},
{
"name": "Shell",
"bytes": "1087"
}
],
"symlink_target": ""
} |
import os
import logging
from gemini import GeminiQuery
from puzzle.models import (Case, Individual)
from puzzle.plugins import BaseCaseMixin
logger = logging.getLogger(__name__)
class CaseMixin(BaseCaseMixin):
"""Class to store methods that deal with Cases in geimni plugin"""
def _add_individual(self, ind_obj):
"""Add a individual to the adapter
Args:
ind_obj (puzzle.models.Individual)
"""
logger.debug("Adding individual {0} to plugin".format(ind_obj.ind_id))
self.individual_objs.append(ind_obj)
def add_case(self, case_obj):
"""Add a case obj with individuals to adapter
Args:
case_obj (puzzle.models.Case)
"""
for ind_obj in case_obj.individuals:
self._add_individual(ind_obj)
logger.debug("Adding case {0} to plugin".format(case_obj.case_id))
self.case_objs.append(case_obj)
def cases(self, pattern=None):
"""Return all cases.
Args:
pattern: Allways None in gemini adapter
Returns:
case_objs(An iterator with Cases)
"""
return self.case_objs
def case(self, case_id=None):
"""Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
case(Case): A Case object
"""
cases = self.cases()
if case_id:
for case in cases:
if case.case_id == case_id:
return case
else:
if cases:
return cases[0]
return None
def individual(self, ind_id=None):
"""Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual)
"""
for ind_obj in self.individuals:
if ind_obj.ind_id == ind_id:
return ind_obj
return None
def individuals(self, *ind_ids):
"""Return information about individuals
Args:
ind_ids (list(str)): List of individual ids
Returns:
individuals (Iterable): Iterable with Individuals
"""
if ind_ids:
for ind_id in ind_ids:
for ind in self.individual_objs:
if ind.ind_id == ind_id:
yield ind
else:
for ind in self.individual_objs:
yield ind
| {
"content_hash": "f8b4fb21541ec8b16ed1e1f0367473b8",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 26.94,
"alnum_prop": 0.5077951002227171,
"repo_name": "robinandeer/puzzle",
"id": "0d4ac1d14a8b9f4ad534e75b7314a02ecb987b3f",
"size": "2694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puzzle/plugins/gemini/mixins/case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "251"
},
{
"name": "HTML",
"bytes": "55258"
},
{
"name": "JavaScript",
"bytes": "1100"
},
{
"name": "Python",
"bytes": "233511"
}
],
"symlink_target": ""
} |
import random
def name_to_number(name):
if name == "rock":
return 0
elif name == "Spock":
return 1
elif name == "paper":
return 2
elif name == "lizard":
return 3
elif name == "scissors":
return 4
else:
print "Invalid name argument"
def number_to_name(number):
if number == 0:
return "rock"
elif number == 1:
return "Spock"
elif number == 2:
return "paper"
elif number == 3:
return "lizard"
elif number == 4:
return "scissors"
else:
print "Number arg out of range (0-4) inclusive."
def rpsls(player_choice):
print ""
print "Player chooses " + player_choice
player_number = name_to_number(player_choice)
comp_number = random.randrange(0, 5)
comp_choice = number_to_name(comp_number)
print "Computer chooses " + comp_choice
mod = (player_number - comp_number) % 5
# test for winner
if mod >= 3:
print "Computer wins!"
elif mod >= 1:
print "Player wins!"
else:
print "Player and computer tie!"
# test
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
| {
"content_hash": "b5ac44004b626e883b92bcf5c1f631f9",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 56,
"avg_line_length": 20.779661016949152,
"alnum_prop": 0.5562805872756933,
"repo_name": "tblong/CodeSkulptor",
"id": "9159f9245ff11a10dee1afafb1b493fdd9d1ba94",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/P1_W1/Rock-paper-scissors-lizard-Spock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44265"
}
],
"symlink_target": ""
} |
import WikiwhoRelationships
from copy import deepcopy
from sys import argv,exit
import getopt
import numpy as np
import math
import random
import cgi
from structures.Word import Word
import operator
#from django.utils import simplejson
context = {}
context_obj = {}
random.seed(3)
def getGraph(filename):
# Compute distribution information
(revisions, order, relations) = WikiwhoRelationships.analyseArticle(filename)
# Graph structures.
edges_rev = {}
nodes = {}
# Metrics structures.
ordered_editors = []
ordered_not_vandalism = []
all_antagonized_editors = []
all_supported_editors = []
statsData = []
contributors_rev = {}
window = 50
rev_counter = -1
#C = {}
for (revision, vandalism) in order:
rev_counter = rev_counter + 1
if (vandalism):
#print "vandalism", revision
statsData.append({'revision': revision,
'distinct_editors': 0,
'deletion_edges_contributors_w': 0,
'deletion_outgoing_ratio': 0,
'deletion_incoming_ratio': 0,
'deletion_reciprocity': 0,
'deletion_weight_avg': 0,
'bipolarity' : 0,
'deletion_weight' : 0,
'adjacency_matrix' : ([], [], {}, 0),
'reciprocity_matrix' : {},
'weight_matrix' : {},
'context' : {},
'antagonistic_focus_avg' : {},
'negative_actions_weighted': 0,
'wikigini': 0,
})
continue
relation = relations[revision]
# Authorship distribution.
authorship = getAuthorshipDataFromRevision(revisions[revision], order, rev_counter)
totalWordCount = len(authorship)
authDistSum = sumAuthDist(authorship)
#print "authDistSum", authDistSum
# List of editors in each order.
ordered_editors.append(relation.author)
ordered_not_vandalism.append(revision)
if (relation.author in contributors_rev.keys()):
contributors_rev[relation.author].append(revision)
else:
contributors_rev.update({relation.author : [revision]})
# Update the nodes.
if relation.author in nodes.keys():
nodes[relation.author].append(revisions[revision].id)
else:
nodes.update({relation.author : [revisions[revision].id]})
# Update the edges.
# Edges: (edge_type, editor_source, rev_source, editor_target, rev_target, weight)
edges_rev.update({revision : []})
for elem in relation.deleted.keys():
edges_rev[revision].append(("deletion", relation.author, revision, revisions[elem].contributor_name, elem, relation.deleted[elem]))
for elem in relation.reintroduced.keys():
edges_rev[revision].append(("reintroduction", relation.author, revision, revisions[elem].contributor_name, elem, relation.reintroduced[elem]))
for elem in relation.redeleted.keys():
edges_rev[revision].append(("redeletion", relation.author, revision, revisions[elem].contributor_name, elem, relation.redeleted[elem]))
for elem in relation.revert.keys():
edges_rev[revision].append(("revert", relation.author, revision, revisions[elem].contributor_name, elem, relation.revert[elem]))
# Calculate metrics.
distinct_editors = 0
deletion_edges_contributors_w = 0
deletion_sender_ratio = 0
deletion_receivers_ratio = 0
deletion_reciprocal_edges = []
deletion_reciprocity = 0
deletion_edges_total = 0
deletion_weight_avg = 0
deletion_weight = 0
bipolarity = 0
#negative_actions_weighted = 0
# Compute wikigini: V1
i = 1
res = 0
sortedAuthDistSum = sorted(authDistSum.iteritems(), key=operator.itemgetter(1))
for tup in sortedAuthDistSum:
res = res + (i * tup[1])
i = i + 1
wikiGini = ((2.0 * res)/ (len(sortedAuthDistSum) * totalWordCount)) - ((len(sortedAuthDistSum) + 1.0 ) / len(sortedAuthDistSum))
A = []
editors_window = []
R = {}
C = {}
C2 = {}
C3 = {}
W = {}
X = {}
antagonistic_focus_avg = {}
#CC = {}
window1 = len(ordered_editors)
if (len(ordered_editors) >= window):
window1 = window
if True:
deletion_sender_ratio = set([])
deletion_receivers_ratio = set([])
editors_window = list(set(ordered_editors[len(ordered_editors)-window1:]))
A = np.zeros((len(editors_window), len(editors_window)))
R = {}
C = {}
C2 = {}
C3 = {}
W = np.zeros((len(editors_window), len(editors_window)))
X = {}
target_revs = {}
for past_rev in ordered_not_vandalism[len(ordered_not_vandalism)-window1:]:
#n =
for edge_pos in range(len(edges_rev[past_rev])-1, 0-1, -1):
edge = edges_rev[past_rev][edge_pos]
(edge_type, source, _, target, rev_target, weight) = edge
if edge_type == "deletion" or edge_type == "revert":
#print "edge", past_rev, rev_target, edge_type, source, target, weight
# Checking if the target editor belongs to the window.
if (target in editors_window):
# Counts the total number of edges in the window.
deletion_edges_total = deletion_edges_total + 1
# For metric 2: ratio of number of edges e only between editors in w.
deletion_edges_contributors_w = deletion_edges_contributors_w + 1
# For metric 3: ratio of n that sent nodes at least once in w.
deletion_sender_ratio.add(source)
# For metric 6: avg. weight of the edges e in w
deletion_weight_avg = deletion_weight_avg + weight
# For metric: total negative actions (weight).
deletion_weight = deletion_weight + weight
# Update adjacency matrix
s = editors_window.index(source)
t = editors_window.index(target)
A[s][t] = A[s][t] + math.log10(1 + weight) + 1
A[t][s] = A[t][s] + math.log10(1 + weight) + 1
W[s][t] = W[s][t] + weight
#W[t][s] = W[t][s] + weight
if (s,t) in R.keys():
R[(s,t)] = R[(s,t)] + weight # + 1
else:
R.update({(s,t) : weight})#1})
if (past_rev in X.keys()):
if (target in X[past_rev].keys()):
X[past_rev][target] = X[past_rev][target] + weight
else:
X[past_rev].update({target : weight})
else:
X.update({past_rev : {target : weight}})
# For metric 4: ratio of n that received edges at least once in w.
# Checking if the target revision belongs to the window.
if (rev_target in ordered_not_vandalism[len(ordered_not_vandalism)-window1:]):
deletion_receivers_ratio.add(target)
# for metric 5: ratio of e that was reciprocal
# Don't discriminate edges that target revisions outside the window.
if ((target, source) in deletion_reciprocal_edges):
deletion_reciprocal_edges.remove((target, source))
deletion_reciprocity = deletion_reciprocity + 1
else:
deletion_reciprocal_edges.append((source, target))
# Construction of context in window.
# Iterate over each revision in the window.
for past_rev in ordered_not_vandalism[len(ordered_not_vandalism)-window1:]:
pos = ordered_not_vandalism.index(past_rev)
prev_revision = ordered_not_vandalism[pos-1]
# DETECT: UNDO of delete
# Iterate over all the words of the current processed revision.
for w in range(0, len(context[past_rev])):
word = context[past_rev][w]
for elem in word.deleted:
# If it is not "self-action".
#and prev_revision not in word.used
#if (past_rev == 15):
# print word.value, revisions[past_rev].contributor_name, revisions[elem].contributor_name
# print "elem < past_rev", elem < past_rev
# print "revisions[past_rev].contributor_name != revisions[elem].contributor_name", revisions[past_rev].contributor_name != revisions[elem].contributor_name
# print "revisions[elem].contributor_name in editors_window", revisions[elem].contributor_name in editors_window
# print "....."
if (elem < past_rev and elem in revisions.keys() and revisions[past_rev].contributor_name != revisions[elem].contributor_name) and (revisions[elem].contributor_name in editors_window) and (prev_revision not in word.used):
s = editors_window.index(revisions[past_rev].contributor_name)
t = editors_window.index(revisions[elem].contributor_name)
# Add the context for the edge (s,t).
if (s,t) not in C2.keys():
C2.update({(s,t) : {'target': elem}})
target_revs.update({(s,t) : []})
# Add the information about the context.
if (past_rev,elem) not in C2[(s,t)]:
comment = "Comment: (Empty)"
if revisions[(past_rev)].comment != None:
comment = "Comment: <i>" + revisions[past_rev].comment.encode("utf-8") + "</i>"
#C2[(s,t)].update({(past_rev,elem) : ["<a target=_blank href=http://en.wikipedia.org/w/index.php?&diff=" + str(past_rev) + "> [Undo of Deletion] " + revisions[past_rev].contributor_name + "->" + revisions[elem].contributor_name + ". Revision: " + str(past_rev) +". " + comment + "</a>" ]})
C2[(s,t)].update({(past_rev,elem) : ["<a target=_blank href=http://en.wikipedia.org/w/index.php?&diff=" + str(past_rev) + "> [Undo of Deletion] " + revisions[past_rev].contributor_name + "(" + str(past_rev) + ")->" + revisions[elem].contributor_name + "(" + str(elem) +"). " + comment + "</a>" ]})
if (True):
# If the word is not in the context: add it.
if word not in C2[(s,t)][(past_rev,elem)]:
#target_revs[(s,t)].append(word.revision)
# Add new line if this sentence is a new one in the context.
try:
if context[(past_rev)][w-1] not in C2[(s,t)][(past_rev,elem)]:
C2[(s,t)][(past_rev,elem)].append("<br />")
except:
pass
# Add 4 tokens of pre-context.
for i in range(4,0,-1):
try:
if w-i >= 0 and context[(past_rev)][w-i] not in C2[(s,t)][(past_rev,elem)]:
C2[(s,t)][(past_rev,elem)].append(context[(past_rev)][w-i])
#C[(t,s)][past_rev].append(context[prev_revision][w-i])
#if past_rev == 82285999:
# print "w-i", context[prev_revision][w-i], context[prev_revision][w-i].value
except:
pass
# Append the word.
C2[(s,t)][(past_rev,elem)].append(word)
# Add 4 tokens of post-context.
for i in range(1,5):
try:
if context[(past_rev)][w+i] not in C2[(s,t)][(past_rev,elem)]:
C2[(s,t)][(past_rev,elem)].append(context[(past_rev)][w+i])
except:
pass
# If the word is already in context: add post-context.
else:
# Add 4 tokens of post-context.
for i in range(1,5):
try:
if context[(past_rev)][w+i] not in C2[(s,t)][(past_rev)]:
C2[(s,t)][(past_rev,elem)].append(context[(past_rev)][w+i])
except:
pass
# DETECT: UNDO of re-introduction.
# Iterate over all the words of the immediate previous revision.
for w in range(0, len(context[prev_revision])):
word = context[prev_revision][w]
if past_rev not in word.deleted:
continue
for elem in word.freq:
# If it is not "self-delete".
if elem < past_rev and revisions[past_rev].contributor_name != revisions[elem].contributor_name and revisions[elem].contributor_name in editors_window:
s = editors_window.index(revisions[past_rev].contributor_name)
t = editors_window.index(revisions[elem].contributor_name)
# Add the context for the edge (s,t).
if (s,t) not in C3.keys():
C3.update({(s,t) : {'target': elem}})
#target_revs.update({(s,t) : []})
# Add the information about the context.
if (past_rev,elem) not in C3[(s,t)]:
comment = "Comment: (Empty)"
if revisions[past_rev].comment != None:
comment = "Comment: <i>" + revisions[past_rev].comment.encode("utf-8") + "</i>"
#C3[(s,t)].update({(past_rev,elem) : ["<a target=_blank href=http://en.wikipedia.org/w/index.php?&diff=" + str(past_rev) + "> [Undo of Re-introduction] " + revisions[past_rev].contributor_name + "->" + revisions[elem].contributor_name + ". Revision: " + str(past_rev) +". " + comment + "</a>" ]})
C3[(s,t)].update({(past_rev,elem) : ["<a target=_blank href=http://en.wikipedia.org/w/index.php?&diff=" + str(past_rev) + "> [Undo of Re-introduction] " + revisions[past_rev].contributor_name + "(" + str(past_rev) + ")"+ "->" + revisions[elem].contributor_name + "(" + str(elem) + ")" +". " + comment + "</a>" ]})
if (True):
# If the word is not in the context: add it.
if word not in C3[(s,t)][(past_rev,elem)]:
#target_revs[(s,t)].append(word.revision)
# Add new line if this sentence is a new one in the context.
try:
if context[(prev_revision)][w-1] not in C3[(s,t)][(past_rev,elem)]:
C3[(s,t)][(past_rev,elem)].append("<br />")
except:
pass
# Add 4 tokens of pre-context.
for i in range(4,0,-1):
try:
if w-i >= 0 and context[(prev_revision)][w-i] not in C3[(s,t)][(past_rev,elem)]:
C3[(s,t)][(past_rev,elem)].append(context[prev_revision][w-i])
#C[(t,s)][past_rev].append(context[prev_revision][w-i])
#if past_rev == 82285999:
# print "w-i", context[prev_revision][w-i], context[prev_revision][w-i].value
except:
pass
# Append the word.
C3[(s,t)][(past_rev,elem)].append(word)
# Add 4 tokens of post-context.
for i in range(1,5):
try:
if context[prev_revision][w+i] not in C3[(s,t)][(past_rev,elem)]:
C3[(s,t)][(past_rev,elem)].append(context[prev_revision][w+i])
except:
pass
# If the word is already in context: add post-context.
else:
# Add 4 tokens of post-context.
for i in range(1,5):
try:
if context[prev_revision][w+i] not in C3[(s,t)][(past_rev,elem)]:
C3[(s,t)][(past_rev,elem)].append(context[prev_revision][w+i])
except:
pass
# DETECT: DELETION
# Iterate over all the words of the immediate previous revision.
for w in range(0, len(context[prev_revision])):
word = context[prev_revision][w]
# If the word will be deleted in the window: detect deletion edge (s,t)
if (past_rev in word.deleted and word.author_name in editors_window):
s = editors_window.index(revisions[past_rev].contributor_name)
t = editors_window.index(word.author_name)
# If it is not "self-delete".
if revisions[past_rev].contributor_name != word.author_name:
# Add the context for the edge (s,t).
if (s,t) not in C.keys():
C.update({(s,t) : {'target': word.author_name}})
#target_revs.update({(s,t) : []})
# Add the information about the context.
if past_rev not in C[(s,t)]:
comment = "Comment: (Empty)"
if revisions[past_rev].comment != None:
comment = "Comment: <i>" + revisions[past_rev].comment.encode("utf-8") + "</i>"
#C[(s,t)].update({past_rev : ["<a target=_blank href=http://en.wikipedia.org/w/index.php?&diff=" + str(past_rev) + "> [Deletion] " + revisions[past_rev].contributor_name + "->" + word.author_name + ". Revision: " + str(past_rev) +". " + comment + "</a>" ]})
C[(s,t)].update({past_rev : ["<a target=_blank href=http://en.wikipedia.org/w/index.php?&diff=" + str(past_rev) + "> [Deletion] " + revisions[past_rev].contributor_name + "(" + str(past_rev) + ")->" + word.author_name +"(" + str(prev_revision) + "). " + comment + "</a>" ]})
if (True):
# If the word is not in the context: add it.
if word not in C[(s,t)][past_rev]:
#target_revs[(s,t)].append(word.revision)
# Add new line if this sentence is a new one in the context.
try:
if context[prev_revision][w-1] not in C[(s,t)][past_rev]:
C[(s,t)][past_rev].append("<br />")
except:
pass
# Add 4 tokens of pre-context.
for i in range(4,0,-1):
try:
if w-i >= 0 and context[prev_revision][w-i] not in C[(s,t)][past_rev]:
C[(s,t)][past_rev].append(context[prev_revision][w-i])
#C[(t,s)][past_rev].append(context[prev_revision][w-i])
#if past_rev == 82285999:
# print "w-i", context[prev_revision][w-i], context[prev_revision][w-i].value
except:
pass
# Append the word.
C[(s,t)][past_rev].append(word)
# Add 4 tokens of post-context.
for i in range(1,5):
try:
if context[prev_revision][w+i] not in C[(s,t)][past_rev]:
C[(s,t)][past_rev].append(context[prev_revision][w+i])
except:
pass
# If the word is already in context: add post-context.
else:
# Add 4 tokens of post-context.
for i in range(1,5):
try:
if context[prev_revision][w+i] not in C[(s,t)][past_rev]:
C[(s,t)][past_rev].append(context[prev_revision][w+i])
except:
pass
# Print the context for DELETE.
for (s,t) in C.keys():
target = C[(s,t)]['target']
del C[(s,t)]['target']
for r in C[(s,t)].keys():
info = ""
if (r in X.keys() and target in X[r].keys()):
info = "<b>Disagreement focus:</b> " + str(X[r][target] / float(sum(X[r].values()))) + "<br /><br />"
if ((s,t) in antagonistic_focus_avg.keys()):
antagonistic_focus_avg[(s,t)].append(X[r][target] / float(sum(X[r].values())))
else:
antagonistic_focus_avg.update({(s,t): [X[r][target] / float(sum(X[r].values()))]})
C[(s,t)].update({r : [info + printDeleteContext(C[(s,t)][r], r, target)]})
# Print the context for UNDO of delete.
for (s,t) in C2.keys():
#target = C2[(s,t)]['target']
del C2[(s,t)]['target']
for (r,r2) in C2[(s,t)].keys():
target = r2
info = ""
if (r in X.keys() and revisions[target].contributor_name in X[r].keys()):
# #print "X keys", X.keys(), "X[r] keys", r, target, X[r].keys(), X[r].values()
info = "<b>Disagreement focus:</b> " + str(X[r][revisions[target].contributor_name] / float(sum(X[r].values()))) + "<br /><br />"
if ((s,t) in antagonistic_focus_avg.keys()):
antagonistic_focus_avg[(s,t)].append(X[r][revisions[target].contributor_name] / float(sum(X[r].values())))
else:
antagonistic_focus_avg.update({(s,t): [X[r][revisions[target].contributor_name] / float(sum(X[r].values()))]})
#if past_rev == 9:
# print "X", X, "info", info, "r", r, "target", target
if (s,t) in C.keys():
if (r in C[(s,t)].keys()):
C[(s,t)][r].append(printUndoOfDeletionContext(C2[(s,t)][(r,r2)], r, target))
else:
C[(s,t)].update({r : [info + printUndoOfDeletionContext(C2[(s,t)][(r,r2)], r, target)]})
else:
C.update({(s,t) : {r : [info + printUndoOfDeletionContext(C2[(s,t)][(r,r2)], r, target)]}})
# Print the context for UNDO of re-introduction.
for (s,t) in C3.keys():
#target = C3[(s,t)]['target']
del C3[(s,t)]['target']
for (r,r2) in C3[(s,t)].keys():
target = r2
info = ""
if (r in X.keys() and revisions[target].contributor_name in X[r].keys()):
# #print "X keys", X.keys(), "X[r] keys", r, target, X[r].keys(), X[r].values()
info = "<b>Disagreement focus:</b> " + str(X[r][revisions[target].contributor_name] / float(sum(X[r].values()))) + "<br /><br />"
if ((s,t) in antagonistic_focus_avg.keys()):
antagonistic_focus_avg[(s,t)].append(X[r][revisions[target].contributor_name] / float(sum(X[r].values())))
else:
antagonistic_focus_avg.update({(s,t): [X[r][revisions[target].contributor_name] / float(sum(X[r].values()))]})
#if past_rev == 9:
# print "X", X, "info", info, "r", r, "target", target
if (s,t) in C.keys():
if (r in C[(s,t)].keys()):
C[(s,t)][r].append(printUndoOfReintroductionContext(C3[(s,t)][(r,r2)], r, target))
else:
C[(s,t)].update({r : [info + printUndoOfReintroductionContext(C3[(s,t)][(r,r2)], r, target)]})
else:
C.update({(s,t) : {r : [info + printUndoOfReintroductionContext(C3[(s,t)][(r,r2)], r, target)]}})
# 1: Number of distinct editors n that edited in window1.
distinct_editors = len(set(ordered_editors[len(ordered_editors)-window1:]))
# 2: Ratio of # of edges e only between editors in w
deletion_edges_contributors_w = deletion_edges_contributors_w / float(distinct_editors)
# 3: Ratio of n that sent edges at least once in w
deletion_sender_ratio = len(deletion_sender_ratio) / float(distinct_editors)
# 4: Ratio of n that received edges at least once in w
deletion_receivers_ratio = len(deletion_receivers_ratio) / float(distinct_editors)
if (deletion_edges_total != 0):
# 5: Ratio of e that was reciprocal
deletion_reciprocity = deletion_reciprocity / float((deletion_edges_total / 2.0))
# 6: Average weight of the edges e in w
deletion_weight_avg = deletion_weight_avg / float(deletion_edges_total)
else:
deletion_reciprocity = 0
deletion_weight_avg = 0
#print "A before", A
# Update the reciprocity on the weights of the adjacency matrix.
for (s_index, t_index) in R.keys():
#print "s_index", s_index, "t_index", t_index, R
#if ((t_index, s_index) in R.keys()):
# reciprocity = min(R[(s_index, t_index)], R[(t_index, s_index)])
#else:
# reciprocity = 0
A[s_index][t_index] = A[s_index][t_index] #+ (2*reciprocity)
A[t_index][s_index] = A[t_index][s_index] #+ (2*reciprocity)
eigenvalues, _ = np.linalg.eig(A)
lambda_max = max(eigenvalues)
lambda_min = min(eigenvalues)
bipolarity = 0
if (lambda_max != 0):
bipolarity = -lambda_min / lambda_max
bipolarity = bipolarity.real
#print "bipolarity", bipolarity, "lambda_min", lambda_min, "lambda_max", lambda_max
#print "bipolarity", bipolarity
#print "A after", A
# antagonized_editors: Revert actions + delete actions in revision (distinct editors)
antagonized_editors = set([])
for elem in relation.revert.keys():
antagonized_editors.add(revisions[elem].contributor_id)
for elem in relation.deleted.keys():
antagonized_editors.add(revisions[elem].contributor_id)
all_antagonized_editors.append(len(antagonized_editors))
antagonized_editors_avg_w1 = 0
if (len(all_antagonized_editors) >= window1):
antagonized_editors_avg_w1 = sum(all_antagonized_editors[len(all_antagonized_editors)-window1:]) / float(window1)
# supported_editors: reintroductions + redeletes (distinct editors)
supported_editors = set([])
for elem in relation.reintroduced.keys():
supported_editors.add(revisions[elem].contributor_id)
for elem in relation.redeleted.keys():
supported_editors.add(revisions[elem].contributor_id)
all_supported_editors.append(len(supported_editors))
supported_editors_avg_w1 = 0
if (len(all_supported_editors) >= window1):
supported_editors_avg_w1 = sum(all_supported_editors[len(all_supported_editors)-window1:]) / float(window1)
statsData.append({'revision': revision,
'author': revisions[revision].contributor_name,
'distinct_editors': distinct_editors,
'deletion_edges_contributors_w': deletion_edges_contributors_w,
'deletion_sender_ratio': deletion_sender_ratio,
'deletion_receiver_ratio': deletion_receivers_ratio,
'deletion_reciprocity': deletion_reciprocity,
'deletion_weight_avg': deletion_weight_avg,
'deletion_weight': deletion_weight,
'antagonized_editors_avg_w1': antagonized_editors_avg_w1,
'supported_editors_avg_w1': supported_editors_avg_w1,
'bipolarity' : bipolarity,
'adjacency_matrix' : (A, editors_window, authDistSum, totalWordCount),
'reciprocity_matrix' : R,
'weight_matrix' : W,
'context' : C,
'antagonistic_focus_avg' : antagonistic_focus_avg,
'wikigini' : wikiGini})
#for r in context.keys():
# print "-----"
# print r
# for w in context[r]:
# print w.value
return statsData
def sumAuthDist(authors):
wordCount = {}
for author in authors:
if(author in wordCount.keys()):
wordCount[author] = wordCount[author]+1
else:
wordCount[author] = 1
return wordCount
def getAuthorshipDataFromRevision(revision, order, rev):
#print "Printing authorship for revision: ", revision.wikipedia_id, rev
#text = []
authors = []
(rev_id, _) = order[rev]
#global context_obj
global context
context.update({rev_id: []})
for hash_paragraph in revision.ordered_paragraphs:
p_copy = deepcopy(revision.paragraphs[hash_paragraph])
paragraph = p_copy.pop(0)
for hash_sentence in paragraph.ordered_sentences:
sentence = paragraph.sentences[hash_sentence].pop(0)
for i in range(0, len(sentence.words)):
word = sentence.words[i]
#word in sentence.words:
#text.append(word.value)
authors.append(word.author_name)
context[rev_id].append(word)
#context[rev_id].append(word)
return authors
def printDeleteContext(cc, rev, target_revs):#, source):
mystr = ""
for w in cc:
if isinstance(w, Word):
#if rev == 82784338:
# print w.value, "w.author_name", w.author_name, "target_revs", target_revs, "rev", rev, "w.deleted", w.deleted
if (w.author_name == target_revs and rev in w.deleted):
mystr = mystr + " <b><span class='text-danger'>" + cgi.escape(w.value) + "</span></b> " # + "@"+ w.author_name + "</b> "
else:
mystr = mystr + " " + cgi.escape(w.value) # + "@"+ w.author_name
#if ((w.revision in cc['revs']) and (source in w.deleted)):
# mystr = mystr + " <b>" + w.value + "</b> "# + "$"+ str(id(w)) + "</b> "
#else:
# mystr = mystr + " " + w.value #+ "$"+ str(id(w))
else:
mystr = mystr + " " + w
mystr = mystr.replace('"', '"')
return mystr
def printUndoOfDeletionContext(cc, rev, target_revs):#, source):
mystr = ""
for w in cc:
if isinstance(w, Word):
#if (rev == 143602582):
# print "w.value", w.value, "target_revs", target_revs, "w.deleted", w.deleted
if (target_revs in w.deleted):
mystr = mystr + " <b><span class='text-success'>" + cgi.escape(w.value) + "</span></b> " # + "@"+ w.author_name #+ "</b> "
else:
mystr = mystr + " " + cgi.escape(w.value) # + "@"+ w.author_name
else:
mystr = mystr + " " + w
mystr = mystr.replace('"', '"')
return mystr
def printUndoOfReintroductionContext(cc, rev, target_revs):#, source):
mystr = ""
for w in cc:
if isinstance(w, Word):
if (target_revs in w.freq and rev in w.deleted):
mystr = mystr + " <b><span class='text-danger'>" + cgi.escape(w.value) + "</span></b> " # + "@"+ w.author_name + "</b> "
else:
mystr = mystr + " " + cgi.escape(w.value) # + "@"+ w.author_name
else:
mystr = mystr + " " + w
mystr = mystr.replace('"', '"')
return mystr
def printContext(source, target):
global context
#print context
full_str = ""
#for (source, target) in context.keys():
if True:
#print
sentences = context[(source, target)]
mystr = ""
for s in sentences:
for w in s:
if (w.revision == target and source in w.deleted):
mystr = mystr + " <b>" + w.value + "$"+ str(id(w)) + "</b> "
else:
mystr = mystr + " " + w.value + "$"+ str(id(w))
#print mystr
full_str = full_str + "<br />" + mystr
mystr= ""
full_str = full_str.replace('"', '"')
return full_str
def printForD3(v, e, etype):
v_dict = {}
v_str = []
e_str = []
max_weight = 0
for edge in e:
((source, target), edge_type, weight) = edge
if (weight > max_weight):
max_weight = weight
for edge in e:
((source, target), edge_type, weight) = edge
if (edge_type == etype):
if (source not in v_dict.keys()):
v_dict.update({source : len(v_str)})
v_str.append("{\"name\": \"" + source + "\", \"group\":1}")
if (target not in v_dict.keys()):
v_dict.update({target : len(v_str)})
v_str.append("{\"name\": \"" + target + "\", \"group\":1}")
force = weight
e_str.append("{\"source\":" + str(v_dict[source]) + ", \"target\":" + str(v_dict[target]) + ", \"value\":"+ str(force) + "}")
print "graph={" + "\"nodes\": [" + ','.join(v_str) + "], \"links\": [" + ','.join(e_str) + "]}"
def printTimeForD3(v, e, etype, graph):
v_dict = {}
print "graph={"
count = 0
for rev in graph.keys():
v_str = []
e_str = []
for edge in graph[rev]['links']:
(edge_type, source, target, weight) = edge
if (edge_type == etype):
if (source not in v_dict.keys()):
v_dict.update({source : len(v_str)})
v_str.append("{\"name\": \"" + source + "\", \"group\":1}")
if (target not in v_dict.keys()):
v_dict.update({target : len(v_str)})
v_str.append("{\"name\": \"" + target + "\", \"group\":1}")
force = weight
e_str.append("{\"source\":" + str(v_dict[source]) + ", \"target\":" + str(v_dict[target]) + ", \"value\":"+ str(force) + "}")
print str(count) + ": {" + "\"nodes\": [" + ','.join(v_str) + "], \"links\": [" + ','.join(e_str) + "]},"
count = count + 1
print "}"
def printSnapshotsForD3(v, e, etype, graph):
v_dict = {}
v_str = []
e_str = []
print "graph={"
count = 0
for rev in graph.keys():
for edge in graph[rev]['links']:
(edge_type, source, target, weight) = edge
if (edge_type == etype):
if (source not in v_dict.keys()):
v_dict.update({source : len(v_str)})
v_str.append("{\"name\": \"" + source + "\", \"group\":1}")
if (target not in v_dict.keys()):
v_dict.update({target : len(v_str)})
v_str.append("{\"name\": \"" + target + "\", \"group\":1}")
force = weight
e_str.append("{\"source\":" + str(v_dict[source]) + ", \"target\":" + str(v_dict[target]) + ", \"value\":"+ str(force) + "}")
print str(count) + ": {" + "\"nodes\": [" + ','.join(v_str) + "], \"links\": [" + ','.join(e_str) + "]},"
count = count + 1
print "}"
def printForNeo4J(g, v, e):
code = []
article_tpl = "(%article_id%:ARTICLE {title: '%article%'})"
vertex_tpl = "(%editor%:EDITOR {name: '%editor%'})"
edge_tpl = "(%source%)-[:%edge_type% {weight: %weight%}]->(%target%)"
edge_article_tpl = "(%source%)-[:EDITED_BY {revisions:%revisions%}]->(%target%)"
# Create edges to article.
instance = article_tpl
instance = instance.replace("%article%", g.encode("utf-8"))
instance = instance.replace("%article_id%", g.encode("utf-8").replace(" ", "_"))
code.append(instance)
# Create nodes.
for vertex in v.keys():
instance = vertex_tpl
instance = instance.replace("%editor%", vertex.encode("utf-8"))
code.append(instance)
instance = edge_article_tpl
instance = instance.replace("%source%", g.encode("utf-8").replace(" ", "_"))
instance = instance.replace("%target%", vertex.encode("utf-8"))
instance = instance.replace("%revisions%", str(v[vertex]))
code.append(instance)
# Create edges.
for edge in e:
((source, target), edge_type, weight) = edge
instance = edge_tpl
instance = instance.replace("%source%", source.encode("utf-8"))
instance = instance.replace("%edge_type%", edge_type.upper())
instance = instance.replace("%weight%", str(weight))
instance = instance.replace("%target%", target.encode("utf-8"))
code.append(instance)
print "CREATE " + ",\n".join(code)
def printStats(stats, reciprocity):
# Stats to print
finalStats = []
# Stats per revisions
#distinct_editors = []
#deletion_edges_contributors_w = []
#deletion_outgoing_ratio = []
#deletion_incoming_ratio = []
deletion_reciprocity = []
#deletion_weight_avg = []
deletion_weight = []
#antagonized_editors_avg_w1 = []
#supported_editors_avg_w1 = []
bipolarity = []
weighted_reciprocity = []
wikigini = []
count = 0
total_negative_actions = []
#print "CHECK", len(stats), len(reciprocity)
#for elem in reciprocity
for elem in stats:
total_negative_actions.append(elem['deletion_weight'])
#print "total", total_negative_actions
max_total_negative_actions = max(total_negative_actions)
max_reciprocity = max(reciprocity)
#print "max", max_total_negative_actions
for i in range (0, len(stats)):
elem = stats[i]
elem2 = reciprocity[i]
#distinct_editors.append({"x": count, "y": elem['distinct_editors'], "z": elem["revision"]})
#deletion_edges_contributors_w.append({"x": count, "y": elem['deletion_edges_contributors_w'], "z": elem["revision"]})
#deletion_outgoing_ratio.append({"x": count, "y": elem['deletion_sender_ratio'], "z": elem["revision"]})
#deletion_incoming_ratio.append({"x": count, "y": elem['deletion_receiver_ratio'], "z": elem["revision"]})
#deletion_reciprocity.append({"Revision": count, "Value": elem['deletion_reciprocity'], "Wikipedia Revision": elem["revision"], "Metric": "Disagreement Reciprocity"})
#deletion_weight_avg.append({"x": count, "y": elem['deletion_weight_avg'], "z": elem["revision"]})
#antagonized_editors_avg_w1.append({"x": count, "y": elem['antagonized_editors_avg_w1'], "z": elem["revision"]})
#supported_editors_avg_w1.append({"x": count, "y": elem['supported_editors_avg_w1'], "z": elem["revision"]})
weighted_reciprocity.append({"Revision": count, "Value": elem2/float(max_reciprocity), "Wikipedia Revision": elem["revision"], "Metric": "Reciprocity"})
bipolarity.append({"Revision": count, "Value": elem['bipolarity'], "Wikipedia Revision": elem["revision"], "Metric": "Bipolarity"})
wikigini.append({"Revision": count, "Value": elem['wikigini'], "Wikipedia Revision": elem["revision"], "Metric": "Authorship Gini"})
deletion_weight.append({"Revision": count, "Value": elem['deletion_weight']/float(max_total_negative_actions), "Wikipedia Revision": elem["revision"], "Metric": "Number of Disagreement Actions (Normalized)"})
count = count + 1
#serie1 = {"key" : "No. Distinct Editors in w (w=20)", "values": distinct_editors}
#serie2 = {"key" : "Ratio of edges between editors in w (w=20) ", "values": deletion_edges_contributors_w, "disabled": "true"}
#serie3 = {"key" : "Ratio of sender nodes in w (w=20)", "values": deletion_outgoing_ratio, "disabled": "true"}
#serie4 = {"key" : "Ratio of receiver nodes in w (w=20)", "values": deletion_incoming_ratio, "disabled": "true"}
#serie5 = {"key" : "Negative reciprocity", "values": deletion_reciprocity, "disabled": "true"}
#serie6 = {"key" : "Avg. edge weight in w (w=20)", "values": deletion_weight_avg, "disabled": "true"}
#serie7 = {"key" : "Avg. antagonized editors in w (w=20)", "values": antagonized_editors_avg_w1, "disabled": "true"}
#serie8 = {"key" : "Avg. supported editors in w (w=20)", "values": supported_editors_avg_w1, "disabled": "true"}
#serie9 = {"key" : "Bipolarity", "values": bipolarity}
#finalStats.extend(deletion_reciprocity)
finalStats.extend(deletion_weight)
finalStats.extend(weighted_reciprocity)
finalStats.extend(bipolarity)
finalStats.extend(wikigini)
#finalStats.update({'revisions' : [serie1, serie2, serie3, serie4, serie5, serie6, serie7, serie8, serie9]})
return "curve = " + str(finalStats) + ";"
def printGraphD3(stats):
window = 50
X_CENTRE = 500
Y_CENTRE = 200#300
X_UNIT = 700
Y_UNIT = 1000 #1000
AREA = 1050
ZERO = 100
AUTHOR_Y_AREA = 800
colors_file = open("colors.txt", "r")
colors_svg = colors_file.readlines()
random.seed(2)
random.shuffle(colors_svg)
#print "colors", colors_svg
nodes_dict = {}
print "graph={"
count = 0
reciprocal_scores = []
for elem in stats:
(A, nodes, authDist, totalWords) = elem['adjacency_matrix']
R = elem['reciprocity_matrix']
W = elem['weight_matrix']
C = elem['context']
antagonistic_focus_avg = elem['antagonistic_focus_avg']
v_str = []
e_str = []
individual_reciprocal_scores = []
if (len(nodes) > 0):
# Compute eigenvalues and eigenvectors.
eigenvalues, eigenvectors = np.linalg.eig(A)
# Compute the two minimal (negative) eigenvalues.
copy_eigenvalues = eigenvalues.copy()
lambda_min = min(eigenvalues)
copy_eigenvalues[np.argmin(eigenvalues)] = float("inf")
lambda_min_prime = min(copy_eigenvalues)
# Compute eigenvectors associated to the minimal eigenvalues.
x = eigenvectors[:,np.argmin(eigenvalues)]
y = eigenvectors[:,np.argmin(copy_eigenvalues)]
if ((lambda_min < 0) and (lambda_min_prime < 0)):
y = y * (lambda_min_prime/lambda_min)
# Update nodes: editors.
i = 0
for node in nodes:
stroke = "#fff"
# Get the coordinates from the eigenvectors.
x_i = x[i].real
y_i = y[i].real
# Transpolate the coordinates.
x_i = min(X_CENTRE + (x_i * X_UNIT), X_CENTRE * 2)
y_i = min(Y_CENTRE + (y_i * Y_UNIT), Y_CENTRE * 2)
# Avoid nodes going out of the screen.
x_i = max(x_i, ZERO)
y_i = max(y_i, ZERO)
# Assign a color to the node.
if node not in nodes_dict.keys():
nodes_dict.update({node : len(nodes_dict) +1 })
group = colors_svg[nodes_dict[node] % len(colors_svg)].rstrip()
# Compute radio of nodes w.r.t. authorship.
if (node in authDist.keys()):
author_words = authDist[node]
else:
author_words = 0
v_ratio = author_words / float(totalWords)
# Mark the author node with black border.
if (node == elem["author"]):
stroke = "#000"
# Add the node to the JSON structure.
if (len(nodes)==1):
x_i = X_CENTRE
y_i = Y_CENTRE
v_str.append("{\"name\": \"" + node + "\", \"prop\": \"" + str(author_words) + "\", \"group\": \"" + str(group) + "\", \"x\": " + str(x_i) + ", \"y\":" + str(y_i) + ", \"value\": \"" + str(v_ratio) + "\", \"stroke\": \"" + str(stroke) + "\", \"fixed\": true}")
#elif (x[i].real == 0 and y[i].real == 0):
elif (x_i == X_CENTRE and y_i == Y_CENTRE and sum(A[i])==0):
v_str.append("{\"name\": \"" + node + "\", \"prop\": \"" + str(author_words) + "\", \"group\": \"" + str(group) + "\", \"x\": " + str(random.randint(100, AREA)) + ", \"y\":" + str(640) + ", \"value\": \"" + str(v_ratio) + "\", \"stroke\": \"" + str(stroke) + "\", \"fixed\": true}")
else:
v_str.append("{\"name\": \"" + node + "\", \"prop\": \"" + str(author_words) + "\", \"group\": \"" + str(group) + "\", \"x\": " + str(x_i) + ", \"y\":" + str(y_i) + ", \"value\": \"" + str(v_ratio) + "\", \"stroke\": \"" + str(stroke) + "\", \"fixed\": true}")
i = i + 1
# Update nodes: authors.
stroke = "#fff"
for node in authDist.keys():
# Select authors that are not editors.
if node not in nodes:
# Set the coordinates.
x_i = random.randint(100, AREA)
y_i = random.randint(100, AREA)
# Assign a color to the node.
if node not in nodes_dict.keys():
nodes_dict.update({node : len(nodes_dict) +1 })
group = colors_svg[nodes_dict[node] % len(colors_svg)].rstrip()
# Compute radio of nodes w.r.t. authorship.
author_words = authDist[node]
v_ratio = author_words / float(totalWords)
if (count>=window):
v_str.append("{\"name\": \"" + node + "\", \"prop\": \"" + str(author_words) + "\", \"group\": \"" + str(group) + "\", \"x\": " + str(x_i) + ", \"y\":" + str(AUTHOR_Y_AREA) + ", \"value\": \"" + str(v_ratio) + "\", \"stroke\": \"" + str(stroke) + "\", \"fixed\": true}")
else:
v_str.append("{\"name\": \"" + node + "\", \"prop\": \"" + str(author_words) + "\", \"group\": \"" + str(group) + "\", \"x\": " + str(x_i) + ", \"y\":" + str(y_i) + ", \"value\": \"" + str(v_ratio) + "\", \"stroke\": \"" + str(stroke) + "\", \"fixed\": true}")
# Update links.
i = 0
j = 0
for i in range(0, len(A)):
for j in range(i, len(A)):
if (A[i][j] > 0):
weight = A[i][j]
#weight = math.log(W[i][j] + W[j][i])
# Check if reciprocity.
if ((i, j) in R.keys()) and ((j, i) in R.keys()):
#reciprocity = min(R[(i, j)], R[(j, i)])
# OLD (naive) version of reciprocity.
#reciprocity_percentage = ((2.0*reciprocity)/float(W[i][j] + W[j][i]))
#
reciprocity_percentage = (min(W[i][j], W[j][i]) / float(max(W[i][j], W[j][i]))) * 0.5
aux = (sum(antagonistic_focus_avg[(i,j)]) + sum(antagonistic_focus_avg[(j,i)])) / float(len(antagonistic_focus_avg[(i,j)]) + len(antagonistic_focus_avg[(j,i)])) * 0.5
#if count == 105:
# print "reciprocity_percentage", reciprocity_percentage, "aux", aux
reciprocity_percentage = reciprocity_percentage + aux
individual_reciprocal_scores.append((W[i][j]+W[j][i])*reciprocity_percentage)
#reciprocal_scores.append({reciprocity_percentage)
contextstats = contextStats(W, C, i, j, reciprocity_percentage)
e_str.append("{\"source\":" + str(i) + ", \"target\":" + str(j) + ", \"value\": \""+ str(weight) + "\", \"stroke\": \"" + str("#B51404") + "\", \"opacity\": \"" + str(reciprocity_percentage+0.10) + "\", \"context\": \"" + contextstats + contextToStr(C,i,j, nodes[i], nodes[j]) + "\"}")
else:
contextstats = contextStats(W, C, i, j, 0)
e_str.append("{\"source\":" + str(i) + ", \"target\":" + str(j) + ", \"value\": \""+ str(weight) + "\", \"stroke\": \"" + str("#999") + "\", \"opacity\": \"" + str("0.55") + "\", \"context\": \"" + contextstats+ contextToStr(C,i,j,nodes[i], nodes[j]) + "\"}")
#e_str.append("{\"source\": \"" + source + "\", \"target\": \"" + target + "\", \"value\":"+ str(weight) + "}")
print "" + str(count) + "" + " : {" + "\"nodes\": [" + ','.join(v_str) + "], \"links\": [" + ','.join(e_str) + "]" + ", \"revision\": \"" + str (elem['revision'])+ "\"},"
count = count + 1
if (len(individual_reciprocal_scores) > 0):
reciprocal_scores.append(np.mean(individual_reciprocal_scores))
else:
reciprocal_scores.append(0)
print "};"
colors_file.close()
return reciprocal_scores
def contextToStr(C, i, j, name_i, name_j):
context_str = "<table class='table'>"
revs = []
if (i,j) in C.keys():
revs.extend(C[(i,j)])
if (j,i) in C.keys():
revs.extend(C[(j,i)])
revs.sort()
if ((i,j) in C.keys() and (j,i) in C.keys()):
context_str = context_str + "<tr>"
context_str = context_str + "<td width=50%>Actions from <b>" + name_i + "</b> to <b>" + name_j + "</b></td>"
context_str = context_str + "<td width=50%>Actions from <b>" + name_j + "</b> to <b>" + name_i + "</b></td>"
context_str = context_str + "</tr>"
elif (i,j) in C.keys():
context_str = context_str + "<tr>"
context_str = context_str + "<td width=50%>Actions from <b>" + name_i + "</b> to <b>" + name_j + "</b></td>"
context_str = context_str + "<td width=50%></td>"
context_str = context_str + "</tr>"
else:
context_str = context_str + "<tr>"
context_str = context_str + "<td width=50%>Actions from <b>" + name_j + "</b> to <b>" + name_i + "</b></td>"
context_str = context_str + "<td width=50%></td>"
context_str = context_str + "</tr>"
for rev_id in revs:
context_str = context_str + "<tr>"
if ((i,j) in C.keys() and (j,i) in C.keys()):
if (rev_id in C[(i,j)].keys()):
context_str = context_str + "<td>" + "<br /><br />".join(C[(i,j)][rev_id]) + "</td><td></td>"
if (rev_id in C[(j,i)].keys()):
context_str = context_str + "<td></td><td>" + "<br /><br />".join(C[(j,i)][rev_id]) + "</td>"
elif (i,j) in C.keys():
context_str = context_str + "<td>" + "<br /><br />".join(C[(i,j)][rev_id]) + "</td><td></td>"
else:
context_str = context_str + "<td>" + "<br /><br />".join(C[(j,i)][rev_id]) + "</td><td></td>"
context_str = context_str + "</tr>"
context_str = context_str + "</table>"
return context_str
def contextStats(W, C, i, j, reciprocity_percentage):
w = int(W[i][j]) + int(W[j][i])
neg = "<b>Number of disagreement actions:</b> " + str(w) + "<br />"
rec = "<b>Reciprocity score:</b> " + str(reciprocity_percentage) + "<br />"
r = 0
if (i,j) in C.keys():
r = r + len(C[(i,j)])
if (j,i) in C.keys():
r = r + len(C[(j,i)])
revs = "<b>Number of revisions where actions took place:</b> " + str(r) + "<br /><br />"
return neg + rec + revs
pass
#return Cij
def main(my_argv):
inputfile = ''
graph = None
gtype = None
edge = None
if (len(my_argv) <= 3):
try:
opts, _ = getopt.getopt(my_argv,"i:",["ifile=",])
except getopt.GetoptError:
print 'Usage: wikigraph.py -i <inputfile> [-g <graph>]'
exit(2)
else:
try:
opts, _ = getopt.getopt(my_argv,"i:t:e:g:",["ifile=", "type=", "edge=", "graph="])
except getopt.GetoptError:
print 'Usage: wikigraph.py -i <inputfile> -t <type_of_graph> -e <type_of_edge> [-g <graph>]'
exit(2)
for opt, arg in opts:
if opt in ('-h', "--help"):
print "wikigraph"
print
print 'Usage: wikigraph.py -i <inputfile> [-g <graph>]'
print "-i --ifile File to analyze"
print "-t --type Type of graph (d3, neo4j)"
print "-e --edge Edge to visualize"
print "-g --name of the article. If not specified, the parameter i is taken."
print "-h --help This help."
exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-g", "--graph"):
graph = arg
elif opt in ("-e", "--edge"):
edge = arg
elif opt in ("-t", "--type"):
gtype = arg
return (inputfile,gtype, edge, graph)
if __name__ == '__main__':
(file_name, gtype, edge, graph_name) = main(argv[1:])
#print "Calculating authorship for:", file_name
#time1 = time()
#v, e, graph = getTimeGraph(file_name)
statsData = getGraph(file_name)
if (gtype == "neo4j"):
#printForNeo4J(graph_name, v,e)
pass
elif (gtype == "d3"):
reciprocity = printGraphD3(statsData)
print printStats(statsData, reciprocity)
#printForD3(v, e, edge)
#printTimeForD3(v, e, edge, graph)
#printSnapshotsForD3(v, e, edge, graph)
#pass
else:
print "Type of graph not supported"
#print e
#pprint printStats(stats)
| {
"content_hash": "b1e7da855b32393973c6699be26eef17",
"timestamp": "",
"source": "github",
"line_count": 1334,
"max_line_length": 346,
"avg_line_length": 47.07646176911544,
"alnum_prop": 0.4312579617834395,
"repo_name": "wikiwho/whovis",
"id": "403a5dd2ea6445a5d65ec6ed530dfe7028192447",
"size": "62820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikigraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "175050"
}
],
"symlink_target": ""
} |
"""
Bit of pie in progress.
"""
from gi.repository import Gtk as gtk
from lib import graphics
from contrib.euclid import Vector2
import math
class Sector(graphics.Sprite):
def __init__(self, inner_radius, outer_radius, start_angle = 0, end_angle = 0):
graphics.Sprite.__init__(self, interactive = True)
self.inner_radius = inner_radius
self.outer_radius = outer_radius
self.start_angle = start_angle
self.end_angle = end_angle
self.fill = None
self.stroke = "#aaa"
self.connect("on-render", self.on_render)
def on_render(self, sprite):
angle = self.start_angle - self.end_angle
self.graphics.arc(0, 0, self.inner_radius, angle, 0)
if abs(angle) >= math.pi * 2:
self.graphics.move_to(self.outer_radius, 0)
else:
self.graphics.line_to(self.outer_radius, 0)
self.graphics.arc_negative(0, 0, self.outer_radius, 0, angle)
if self.fill:
self.graphics.close_path()
# just for fun
self.graphics.move_to(150, -15)
self.graphics.rectangle(150,-15,10,10)
self.graphics.fill_stroke(self.fill, self.stroke)
class Menu(graphics.Sprite):
def __init__(self, x, y):
graphics.Sprite.__init__(self, x, y, interactive=True, draggable=True)
self.graphics.arc(0, 0, 10, 0, math.pi * 2)
self.graphics.fill("#aaa")
self.menu = []
for i in range(20):
self.add_item()
def on_mouse_over(self, sprite):
sprite.fill = "#ddd"
def on_mouse_out(self, sprite):
sprite.fill = ""
def on_click(self, sprite, event):
self.add_item()
def add_item(self):
item = Sector(25, 50, math.pi / 2, 0)
item.connect("on-mouse-over", self.on_mouse_over)
item.connect("on-mouse-out", self.on_mouse_out)
item.connect("on-click", self.on_click)
self.menu.append(item)
self.add_child(item)
current_angle = 0
angle = math.pi * 2 / len(self.menu)
for i, item in enumerate(self.menu):
item.start_angle = current_angle
item.rotation = item.start_angle
item.end_angle = current_angle + angle #- angle * 0.1
item.inner_radius = 25 + len(self.menu) / 2.0 #+ i * 2
item.outer_radius = 50 + len(self.menu) * 2 #+ i * 2
current_angle += angle
class Scene(graphics.Scene):
def __init__(self):
graphics.Scene.__init__(self)
self.max_width = 50
self.menu = Menu(200, 200)
self.add_child(self.menu)
self.connect("on-enter-frame", self.on_enter_frame)
self.framerate = 30
def on_enter_frame(self, scene, context):
# turn the menu a bit and queue redraw
self.menu.rotation += 0.004
self.redraw()
class BasicWindow:
def __init__(self):
window = gtk.Window()
window.set_size_request(400, 400)
window.connect("delete_event", lambda *args: gtk.main_quit())
self.scene = Scene()
window.add(self.scene)
window.show_all()
if __name__ == "__main__":
example = BasicWindow()
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # gtk3 screws up ctrl+c
gtk.main()
| {
"content_hash": "f85d320dfef43ca0ff10defe20b904d9",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 83,
"avg_line_length": 28.68695652173913,
"alnum_prop": 0.5813882994846923,
"repo_name": "projecthamster/experiments",
"id": "baa82ea0d2bf7eceefc7ce00a66c579a2ebf3b67",
"size": "3402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pie_menu.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "734313"
}
],
"symlink_target": ""
} |
from datetime import datetime as Datetime, timezone as Timezone
from warnings import filterwarnings
import pytest
import pg8000
from pg8000.converters import INET_ARRAY, INTEGER
# Tests relating to the basic operation of the database driver, driven by the
# pg8000 custom interface.
@pytest.fixture
def db_table(request, con):
filterwarnings("ignore", "DB-API extension cursor.next()")
filterwarnings("ignore", "DB-API extension cursor.__iter__()")
con.paramstyle = "format"
with con.cursor() as cursor:
cursor.execute(
"CREATE TEMPORARY TABLE t1 (f1 int primary key, "
"f2 bigint not null, f3 varchar(50) null) "
)
def fin():
try:
with con.cursor() as cursor:
cursor.execute("drop table t1")
except pg8000.ProgrammingError:
pass
request.addfinalizer(fin)
return con
def test_database_error(cursor):
with pytest.raises(pg8000.ProgrammingError):
cursor.execute("INSERT INTO t99 VALUES (1, 2, 3)")
def test_parallel_queries(db_table):
with db_table.cursor() as cursor:
cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, None))
cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 10, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 100, None)
)
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (4, 1000, None)
)
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (5, 10000, None)
)
with db_table.cursor() as c1, db_table.cursor() as c2:
c1.execute("SELECT f1, f2, f3 FROM t1")
for row in c1:
f1, f2, f3 = row
c2.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (f1,))
for row in c2:
f1, f2, f3 = row
def test_parallel_open_portals(con):
with con.cursor() as c1, con.cursor() as c2:
c1count, c2count = 0, 0
q = "select * from generate_series(1, %s)"
params = (100,)
c1.execute(q, params)
c2.execute(q, params)
for c2row in c2:
c2count += 1
for c1row in c1:
c1count += 1
assert c1count == c2count
# Run a query on a table, alter the structure of the table, then run the
# original query again.
def test_alter(db_table):
with db_table.cursor() as cursor:
cursor.execute("select * from t1")
cursor.execute("alter table t1 drop column f3")
cursor.execute("select * from t1")
# Run a query on a table, drop then re-create the table, then run the
# original query again.
def test_create(db_table):
with db_table.cursor() as cursor:
cursor.execute("select * from t1")
cursor.execute("drop table t1")
cursor.execute("create temporary table t1 (f1 int primary key)")
cursor.execute("select * from t1")
def test_insert_returning(db_table):
with db_table.cursor() as cursor:
cursor.execute("CREATE TABLE t2 (id serial, data text)")
# Test INSERT ... RETURNING with one row...
cursor.execute("INSERT INTO t2 (data) VALUES (%s) RETURNING id", ("test1",))
row_id = cursor.fetchone()[0]
cursor.execute("SELECT data FROM t2 WHERE id = %s", (row_id,))
assert "test1" == cursor.fetchone()[0]
assert cursor.rowcount == 1
# Test with multiple rows...
cursor.execute(
"INSERT INTO t2 (data) VALUES (%s), (%s), (%s) " "RETURNING id",
("test2", "test3", "test4"),
)
assert cursor.rowcount == 3
ids = tuple([x[0] for x in cursor])
assert len(ids) == 3
def test_row_count(db_table):
with db_table.cursor() as cursor:
expected_count = 57
cursor.executemany(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
tuple((i, i, None) for i in range(expected_count)),
)
# Check rowcount after executemany
assert expected_count == cursor.rowcount
cursor.execute("SELECT * FROM t1")
# Check row_count without doing any reading first...
assert expected_count == cursor.rowcount
# Check rowcount after reading some rows, make sure it still
# works...
for i in range(expected_count // 2):
cursor.fetchone()
assert expected_count == cursor.rowcount
with db_table.cursor() as cursor:
# Restart the cursor, read a few rows, and then check rowcount
# again...
cursor.execute("SELECT * FROM t1")
for i in range(expected_count // 3):
cursor.fetchone()
assert expected_count == cursor.rowcount
# Should be -1 for a command with no results
cursor.execute("DROP TABLE t1")
assert -1 == cursor.rowcount
def test_row_count_update(db_table):
with db_table.cursor() as cursor:
cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, None))
cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 10, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 100, None)
)
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (4, 1000, None)
)
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (5, 10000, None)
)
cursor.execute("UPDATE t1 SET f3 = %s WHERE f2 > 101", ("Hello!",))
assert cursor.rowcount == 2
def test_int_oid(cursor):
# https://bugs.launchpad.net/pg8000/+bug/230796
cursor.execute("SELECT typname FROM pg_type WHERE oid = %s", (100,))
def test_unicode_query(cursor):
cursor.execute(
"CREATE TEMPORARY TABLE \u043c\u0435\u0441\u0442\u043e "
"(\u0438\u043c\u044f VARCHAR(50), "
"\u0430\u0434\u0440\u0435\u0441 VARCHAR(250))"
)
def test_executemany(db_table):
with db_table.cursor() as cursor:
cursor.executemany(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
((1, 1, "Avast ye!"), (2, 1, None)),
)
cursor.executemany(
"SELECT CAST(%s AS TIMESTAMP)",
((Datetime(2014, 5, 7, tzinfo=Timezone.utc),), (Datetime(2014, 5, 7),)),
)
def test_executemany_setinputsizes(cursor):
"""Make sure that setinputsizes works for all the parameter sets"""
cursor.execute(
"CREATE TEMPORARY TABLE t1 (f1 int primary key, f2 inet[] not null) "
)
cursor.setinputsizes(INTEGER, INET_ARRAY)
cursor.executemany(
"INSERT INTO t1 (f1, f2) VALUES (%s, %s)", ((1, ["1.1.1.1"]), (2, ["0.0.0.0"]))
)
def test_executemany_no_param_sets(cursor):
cursor.executemany("INSERT INTO t1 (f1, f2) VALUES (%s, %s)", [])
assert cursor.rowcount == -1
# Check that autocommit stays off
# We keep track of whether we're in a transaction or not by using the
# READY_FOR_QUERY message.
def test_transactions(db_table):
with db_table.cursor() as cursor:
cursor.execute("commit")
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, "Zombie")
)
cursor.execute("rollback")
cursor.execute("select * from t1")
assert cursor.rowcount == 0
def test_in(cursor):
cursor.execute("SELECT typname FROM pg_type WHERE oid = any(%s)", ([16, 23],))
ret = cursor.fetchall()
assert ret[0][0] == "bool"
def test_no_previous_tpc(con):
con.tpc_begin("Stacey")
with con.cursor() as cursor:
cursor.execute("SELECT * FROM pg_type")
con.tpc_commit()
# Check that tpc_recover() doesn't start a transaction
def test_tpc_recover(con):
con.tpc_recover()
with con.cursor() as cursor:
con.autocommit = True
# If tpc_recover() has started a transaction, this will fail
cursor.execute("VACUUM")
def test_tpc_prepare(con):
xid = "Stacey"
con.tpc_begin(xid)
con.tpc_prepare()
con.tpc_rollback(xid)
def test_empty_query(cursor):
"""No exception raised"""
cursor.execute("")
# rolling back when not in a transaction doesn't generate a warning
def test_rollback_no_transaction(con):
# Remove any existing notices
con.notices.clear()
# First, verify that a raw rollback does produce a notice
con.execute_unnamed("rollback")
assert 1 == len(con.notices)
# 25P01 is the code for no_active_sql_tronsaction. It has
# a message and severity name, but those might be
# localized/depend on the server version.
assert con.notices.pop().get(b"C") == b"25P01"
# Now going through the rollback method doesn't produce
# any notices because it knows we're not in a transaction.
con.rollback()
assert 0 == len(con.notices)
def test_context_manager_class(con):
assert "__enter__" in pg8000.legacy.Cursor.__dict__
assert "__exit__" in pg8000.legacy.Cursor.__dict__
with con.cursor() as cursor:
cursor.execute("select 1")
def test_close_prepared_statement(con):
ps = con.prepare("select 1")
ps.run()
res = con.run("select count(*) from pg_prepared_statements")
assert res[0][0] == 1 # Should have one prepared statement
ps.close()
res = con.run("select count(*) from pg_prepared_statements")
assert res[0][0] == 0 # Should have no prepared statements
def test_setinputsizes(con):
cursor = con.cursor()
cursor.setinputsizes(20)
cursor.execute("select %s", (None,))
retval = cursor.fetchall()
assert retval[0][0] is None
def test_setinputsizes_class(con):
cursor = con.cursor()
cursor.setinputsizes(bytes)
cursor.execute("select %s", (None,))
retval = cursor.fetchall()
assert retval[0][0] is None
def test_unexecuted_cursor_rowcount(con):
cursor = con.cursor()
assert cursor.rowcount == -1
def test_unexecuted_cursor_description(con):
cursor = con.cursor()
assert cursor.description is None
def test_not_parsed_if_no_params(mocker, cursor):
mock_convert_paramstyle = mocker.patch("pg8000.legacy.convert_paramstyle")
cursor.execute("ROLLBACK")
mock_convert_paramstyle.assert_not_called()
| {
"content_hash": "2dfd85faf1dca96d2f1659b0eb9b729e",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 88,
"avg_line_length": 30.3,
"alnum_prop": 0.6026985051446321,
"repo_name": "tlocke/pg8000",
"id": "e76f93339b364f88414a5332477c58b582cdfd46",
"size": "10302",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/legacy/test_query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "331262"
}
],
"symlink_target": ""
} |
"""
Coalescent methods
A note about population size. In this code all population sizes N or n are
uncorrected. If you need to compute a coalescent for a diploid species
you must multiply N by 2 before passing it to any of these functions.
"""
#=============================================================================
# imports
from __future__ import division
# python imports
from itertools import chain, izip
from math import exp, log, sqrt
import random
# rasmus imports
from rasmus import treelib, stats, util, linked_list
try:
from rasmus.symbolic import assign_vars
from rasmus.symbolic import derivate
from rasmus.symbolic import simplify
except ImportError:
# only experimental functions need symbolic
pass
# compbio imports
from . import birthdeath
# import root finder
try:
from scipy.optimize import brentq
brentq
except ImportError:
def brentq(f, a, b, disp=False):
return stats.bisect_root(f, a, b)
#=============================================================================
# single coalescent PDFs, CDFs, and sampling functions
def prob_coal(t, k, n):
"""
Returns the probability density of observing the first coalesce of 'k'
individuals in a population size of 'n' at generation 't'
"""
# k choose 2
k2 = k * (k-1) / 2
k2n = k2 / n
return k2n * exp(- k2n * t)
def sample_coal(k, n):
"""
Returns a sample coalescent time for 'k' individuals in a population 'n'
"""
# k choose 2
k2 = k * (k-1) / 2
k2n = k2 / n
return random.expovariate(k2n)
def sample_coal_times(k, n):
"""
Returns a sampling of (k-1) coalescences for 'k' lineages in a
population of size 'n'.
"""
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal(j, n))
return times[1:]
def prob_coal_counts(a, b, t, n):
"""
The probabiluty of going from 'a' lineages to 'b' lineages in time 't'
with population size 'n'
"""
if b <= 0:
return 0.0
C = stats.prod((b+y)*(a-y)/(a+y) for y in xrange(b))
s = exp(-b*(b-1)*t/2.0/n) * C
for k in xrange(b+1, a+1):
k1 = k - 1
C = (b+k1)*(a-k1)/(a+k1)/(b-k) * C
s += exp(-k*k1*t/2.0/n) * (2*k-1) / (k1+b) * C
return s / stats.factorial(b)
def prob_coal_counts_slow(a, b, t, n):
"""
The probability of going from 'a' lineages to 'b' lineages in time 't'
with population size 'n'
Implemented more directly, but slower. Good for testing against.
"""
s = 0.0
for k in xrange(b, a+1):
i = exp(-k*(k-1)*t/2.0/n) * \
(2*k-1)*(-1)**(k-b) / stats.factorial(b) / \
stats.factorial(k-b) / (k+b-1) * \
stats.prod((b+y)*(a-y)/(a+y) for y in xrange(k))
s += i
return s
def prob_coal_cond_counts(x, a, b, t, n):
"""
Returns the probability density of a coalescent happening at time 'x'
between 'a' lineages conditioned on there being 'b' lineages at time
't'. The population size is 'n'.
"""
lama = -a*(a-1)/2.0/n
C = stats.prod((b+y)*(a-1-y)/(a-1+y) for y in xrange(b))
s = exp(-b*(b-1)/2.0/n*(t-x) + lama*x) * C
for k in xrange(b+1, a):
k1 = k - 1
lam = -k*k1/2.0/n
C = (b+k1)*(a-1-k1)/(a-1+k1)/(b-k) * C
s += exp(lam*t + (lama-lam)*x) * (2*k-1) / (k1+b) * C
return s / stats.factorial(b) * (-lama) / prob_coal_counts(a, b, t, n)
def prob_coal_cond_counts_simple(x, a, b, t, n):
"""
Returns the probability density of a coalescent happening at time 'x'
between 'a' lineages conditioned on there being 'b' lineages at time
't'. The population size is 'n'.
"""
return (prob_coal_counts(a-1, b, t-x, n) * prob_coal(x, a, n) /
prob_coal_counts(a, b, t, n))
def cdf_coal_cond_counts(x, a, b, t, n):
"""
Returns the probability a coalescent happening *before* time 'x'
between 'a' lineages conditioned on there being 'b' lineages at time
't'. The population size is 'n'.
"""
lama = -a*(a-1)/2.0/n
C = stats.prod((b+y)*(a-1-y)/(a-1+y) for y in xrange(b))
c = -b*(b-1)/2.0/n
s = exp(c*t) * (exp((lama-c)*x)-1.0) / (lama-c) * C
for k in xrange(b+1, a):
k1 = k - 1
lam = -k*k1/2.0/n
C = (b+k1)*(a-1-k1)/(a-1+k1)/(b-k) * C
s += (exp(lam*t) * (exp((lama-lam)*x) - 1.0) / (lama - lam)
* (2*k-1) / (k1+b) * C)
return s / stats.factorial(b) * (-lama) / prob_coal_counts(a, b, t, n)
def sample_coal_cond_counts(a, b, t, n):
"""
Samples the next coalescent between 'a' lineages in a population size of
'n', conditioned on there being 'b' lineages at time 't'.
"""
# this code solves this equation for t
# cdf(t) - p = 0
# where p ~ U(0, 1)
p = random.random()
# compute constants
lama = -a*(a-1)/2.0/n
C0 = stats.prod((b+y)*(a-1-y)/(a-1+y) for y in xrange(b))
c = -b*(b-1)/2.0/n
d = 1.0/stats.factorial(b) * (-lama) / prob_coal_counts(a, b, t, n)
# CDF(t) - p
def f(x):
if x <= 0:
return x - p
if x >= t:
return 1.0 - p + (x - t)
C = C0
s = exp(c*t) * (exp((lama-c)*x)-1.0) / (lama-c) * C
for k in xrange(b+1, a):
k1 = k - 1
lam = -k*k1/2.0/n
C = (b+k1)*(a-1-k1)/(a-1+k1)/(b-k) * C
s += (exp(lam*t) * (exp((lama-lam)*x) - 1.0) / (lama - lam)
* (2*k-1) / (k1+b) * C)
return s * d - p
return brentq(f, 0.0, t, disp=False)
def prob_mrca(t, k, n):
"""
Probability density function of the age 't' of the most recent
common ancestor (MRCA) of 'k' lineages in a population size 'n'
"""
s = 0.0
for i in xrange(1, k):
lam = (i+1) * i / 2.0 / n
s += lam * exp(- lam * t) * mrca_const(i, 1, k-1)
return s
def cdf_mrca(t, k, n):
"""
Cumulative probability density of the age 't' of the most recent common
ancestor (MRCA) of 'k' lineages in a population size 'n'
"""
if k == 1:
return 1.0
s = 0.0
for i in xrange(1, k+1):
lam = i * (i-1) / (2.0 * n)
p = 1.0
for y in xrange(1, i):
p *= (y-k) / (k+y)
s += exp(-lam * t) * (2*i - 1) * p
return s
def mrca_const(i, a, b):
"""A constant used in calculating MRCA"""
# i+1 choose 2
y = (i+1) * i / 2.0
prod = 1.0
for j in xrange(a, b+1):
if j == i:
continue
# j+1 choose 2
x = (j+1) * j / 2.0
prod *= x / (x - y)
return prod
def prob_bounded_coal(t, k, n, T):
"""
Probability density function of seeing a coalescence at 't' from
'k' lineages in a population of size 'n' with bounding time 'T'
"""
if t > T:
return 0.0
if k == 2:
prob_coal(t, k, n)
return (prob_coal(t, k, n) * cdf_mrca(T-t, k-1, n) /
cdf_mrca(T, k, n))
def cdf_bounded_coal(t, k, n, T):
"""
Cumalative density function of seeing a coalescence at 't' from
'k' lineages in a population of size 'n' with bounding time 'T'
"""
i = k - 1
lam_i = (i+1)*i/2.0 / n
C = [mrca_const(j, 1, i-1) for j in xrange(1, i)]
#A = lam_i / n / cdf_mrca(T, k, n)
B = sum(C) / lam_i
F = [C[j-1] * exp(-(j+1)*j/2.0/n * T) / ((j+1)*j/2.0/n - lam_i)
for j in xrange(1, i)]
return (lam_i / cdf_mrca(T, k, n) *
(B * (1-exp(-lam_i * t))
- sum(F[j-1] * (exp(((j+1)*j/2.0/n - lam_i)*t)-1)
for j in xrange(1, i))))
def sample_bounded_coal(k, n, T):
"""
Sample a coalescent time 't' for 'k' lineages and population 'n'
on the condition that the MRCA is before 'T'
"""
# special case
if k == 2:
return sample_bounded_coal2(n, T)
# this code solves this equation for t
# cdf(t) - p = 0
# where p ~ U(0, 1)
i = k - 1
p = random.random()
# compute constants
lam_i = (i+1)*i/2.0 / n
C = [mrca_const(j, 1, i-1) for j in xrange(1, i)]
A = lam_i / cdf_mrca(T, k, n)
B = sum(C) / lam_i
F = [C[j-1] * exp(-(j+1)*j/2.0/n * T) / ((j+1)*j/2.0/n - lam_i)
for j in xrange(1, i)]
# CDF(t) - p
def f(t):
if t <= 0:
return t - p
if t >= T:
return 1.0 - p + (t - T)
return ((A * (B * (1-exp(-lam_i * t))
- sum(F[j-1] * (exp(((j+1)*j/2.0/n - lam_i)*t)-1)
for j in xrange(1, i)))) - p)
return brentq(f, 0.0, T, disp=False)
def sample_bounded_coal2(n, T):
"""
Sample a coalescent time 't' for 'k=2' lineages and population 'n'
on the condition that the MRCA is before 'T'
"""
# sample from a truncated expontial distribution
# k choose 2
lam = 1 / n
p = exp(-lam * T)
return - log(random.uniform(p, 1.0)) / lam
def sample_bounded_coal_reject(k, n, T):
"""
Sample a coalescent time 't' for 'k' lineages and population 'n'
on the condition that the MRCA is before 'T'
Uses rejection sampling. It works but is very inefficient.
"""
i = k - 1
consts = [mrca_const(j, 1, i-1) for j in xrange(1, i)]
x = sum(consts)
while True:
while True:
t = sample_coal(k, n)
if t < T:
break
if i == 1:
return t
y = sum(mrca_const(j, 1, i-1) * exp(-((j+1) * j / 2.0 / n) * (T - t))
for j in xrange(1, i))
r = 1 - y / x
if random.random() < r:
return t
def count_lineages_per_branch(tree, recon, stree):
"""
Returns the count of gene lineages present at each node in the species
tree 'tree' given a gene tree 'tree' and reconciliation 'recon'
"""
# init lineage counts
lineages = {}
for snode in stree:
lineages[snode] = [0, 0]
for node in tree.postorder():
snode = recon[node]
if node.is_leaf():
lineages[snode][0] += 1 # leaf lineage
else:
lineages[snode][1] -= 1 # coal
for snode in stree.postorder():
if not snode.is_leaf():
lineages[snode][0] = sum(lineages[x][1] for x in snode.children)
lineages[snode][1] += lineages[snode][0]
return lineages
def get_topology_stats(tree, recon, stree):
"""
The function computes terms necessary for many topology calculations
"""
# How many gene nodes per species
nodes_per_species = dict.fromkeys(stree, 0)
# How many descendent nodes recon to the same species
descend_nodes = {}
# iterate through tree
for node in tree.postorder():
if len(node.children) > 1:
nodes_per_species[recon[node]] += 1
if not node.is_leaf():
descend_nodes[node] = 1 + sum(descend_nodes.get(child, 0)
for child in node.children
if recon[child] == recon[node])
return nodes_per_species, descend_nodes
def prob_multicoal_recon_topology(tree, recon, stree, n,
lineages=None, top_stats=None):
"""
Returns the log probability of a reconciled gene tree ('tree', 'recon')
from the coalescent model given a species tree 'stree' and
population sizes 'n'
"""
popsizes = init_popsizes(stree, n)
if lineages is None:
lineages = count_lineages_per_branch(tree, recon, stree)
if top_stats is None:
top_stats = get_topology_stats(tree, recon, stree)
# iterate through species tree branches
lnp = 0.0 # log probability
for snode in stree.postorder():
if snode.parent:
# non root branch
a, b = lineages[snode]
try:
p = (util.safelog(prob_coal_counts(a, b, snode.dist,
popsizes[snode.name]))
+ stats.logfactorial(top_stats[0].get(snode, 0))
- log(num_labeled_histories(a, b)))
except:
print (a, b, snode.name, snode.dist, popsizes[snode.name],
prob_coal_counts(a, b, snode.dist,
popsizes[snode.name]),
)
raise
#p = log(prob_coal_counts(a, b, snode.dist,
# popsizes[snode.name]) *
# stats.factorial(top_stats[0].get(snode, 0))
# / num_labeled_histories(a, b))
lnp += p
else:
a = lineages[snode][0]
lnp += (stats.logfactorial(top_stats[0].get(snode, 0)) -
log(num_labeled_histories(a, 1)))
for node, cnt in top_stats[1].iteritems():
lnp -= log(cnt)
return lnp
def cdf_mrca_bounded_multicoal(gene_counts, T, stree, n,
sroot=None, sleaves=None, stimes=None,
tree=None, recon=None):
"""
What is the log probability that multispecies coalescent in species
tree 'stree' with population sizes 'n' and extant gene counts 'gene_counts'
will have a MRCA that occurs in branch 'sroot' before time 'T'.
As a convenience, you can pass None for gene_counts and give a reconciled
gene tree instead ('tree', 'recon').
"""
# determine active part of species tree
if sroot is None:
sroot = stree.root
if sleaves is None:
sleaves = set(sroot.leaves())
if len(sleaves) == 0:
return 0.0
# init gene counts
if gene_counts is None:
if tree is None:
gene_counts = dict.fromkeys([x.name for x in sleaves], 1)
else:
gene_counts = dict.fromkeys([x.name for x in sleaves], 0)
for leaf in tree.leaves():
gene_counts[recon[leaf].name] += 1
popsizes = init_popsizes(stree, n)
# get time to MRCA above sroot
if stimes is None:
stimes = treelib.get_tree_timestamps(stree, sroot, sleaves)
# use dynamic programming to calc prob of lineage counts
prob_counts = calc_prob_counts_table(gene_counts, T, stree, popsizes,
sroot, sleaves, stimes)
return util.safelog(prob_counts[sroot][1][1])
def calc_prob_counts_table(gene_counts, T, stree, popsizes,
sroot, sleaves, stimes):
# use dynamic programming to calc prob of lineage counts
# format: prob_counts[node] = [a, b]
prob_counts = {}
def walk(node):
if node in sleaves:
# leaf case
M = gene_counts[node.name]
# populate starting lineage counts
start = [0.0] * (M+1)
start[M] = 1.0
elif len(node.children) == 2:
# internal node case with 2 children
c1 = node.children[0]
c2 = node.children[1]
M1 = walk(c1)
M2 = walk(c2)
M = M1 + M2 # max lineage counts in this snode
end1 = prob_counts[c1][1]
end2 = prob_counts[c2][1]
# populate starting lineage counts
start = [0.0, 0.0]
for k in xrange(2, M+1):
start.append(sum(end1[i] * end2[k-i]
for i in xrange(1, k)
if i <= M1 and k-i <= M2))
elif len(node.children) == 1:
# single child case
c1 = node.children[0]
M1 = walk(c1)
M = M1 # max lineage counts in this snode
end1 = prob_counts[c1][1]
# populate starting lineage counts with child's ending counts
start = [0.0]
for k in xrange(1, M+1):
start.append(end1[k])
else:
# unhandled case
raise Exception("not implemented")
# populate ending lineage counts
n = popsizes[node.name]
ptime = stimes[node.parent] if node.parent else T
if ptime is None:
# unbounded end time, i.e. complete coalescence
end = [0.0, 1.0] + [0.0] * (M-1)
else:
# fixed end time
t = ptime - stimes[node]
end = [0.0]
for k in xrange(1, M+1):
end.append(
sum(prob_coal_counts(i, k, t, n) * start[i]
for i in xrange(k, M+1)))
prob_counts[node] = [start, end]
assert abs(sum(start) - 1.0) < .001, (start, node.children)
return M
walk(sroot)
return prob_counts
def prob_coal_bmc(t, u, utime, ucount, gene_counts, T, stree, n,
sroot=None, sleaves=None, stimes=None,
tree=None, recon=None):
"""
The PDF of the waiting time 't' for the next coalescent event in species
branch 'u' within a bounded multispecies coalescent (BMC) process.
"""
# NOTE: not implemented efficiently
if sroot is None:
sroot = stree.root
# find relevent leaves of stree (u should be treated as a leaf)
if sleaves is None:
sleaves = set()
def walk(node):
if node.is_leaf() or node == u:
sleaves.add(node)
else:
for child in node.children:
walk(child)
walk(sroot)
# find timestamps of stree nodes
if stimes is None:
# modify timestamp of u to be that of the previous coal (utime)
stimes = {u: utime}
stimes = treelib.get_tree_timestamps(stree, sroot, sleaves, stimes)
# init gene counts
if gene_counts is None:
if tree is None:
gene_counts = dict.fromkeys([x.name for x in sleaves], 1)
else:
gene_counts = dict.fromkeys([x.name for x in sleaves], 0)
for leaf in tree.leaves():
gene_counts[recon[leaf].name] += 1
# modify gene counts for species u
gene_counts[u.name] = ucount
popsizes = init_popsizes(stree, n)
p = cdf_mrca_bounded_multicoal(gene_counts, T, stree, popsizes,
sroot=sroot, sleaves=sleaves,
stimes=stimes, tree=tree, recon=recon)
gene_counts[u.name] = ucount - 1
stimes[u] = utime + t
p2 = cdf_mrca_bounded_multicoal(gene_counts, T, stree, popsizes,
sroot=sroot, sleaves=sleaves,
stimes=stimes, tree=tree, recon=recon)
gene_counts[u.parent.name] = ucount
stimes[u] = stimes[u.parent]
p3 = cdf_mrca_bounded_multicoal(gene_counts, T, stree, popsizes,
sroot=sroot, sleaves=sleaves,
stimes=stimes, tree=tree, recon=recon)
p4 = log(prob_coal(t, ucount, popsizes[u.name]))
p5 = log(prob_coal_counts(ucount, ucount,
stimes[u.parent] - utime, popsizes[u.name]))
return (p2 + p4) - stats.logsub(p, p3 + p5)
def prob_no_coal_bmc(u, utime, ucount, gene_counts, T, stree, n,
sroot=None, sleaves=None, stimes=None,
tree=None, recon=None):
"""
Returns the log probability of no coalescent occurring in branch u
of the species tree during a bounded multispecies coalescent (BMC).
"""
if sroot is None:
sroot = stree.root
# find relevent leaves of stree (u should be treated as a leaf)
if sleaves is None:
sleaves = set()
def walk(node):
if node.is_leaf() or node == u:
sleaves.add(node)
else:
for child in node.children:
walk(child)
walk(sroot)
# find timestamps of stree nodes
if stimes is None:
# modify timestamp of u to be that of the previous coal (utime)
stimes = {u: utime}
stimes = treelib.get_tree_timestamps(stree, sroot, sleaves, stimes)
# init gene counts
if gene_counts is None:
if tree is None:
gene_counts = dict.fromkeys([x.name for x in sleaves], 1)
else:
gene_counts = dict.fromkeys([x.name for x in sleaves], 0)
for leaf in tree.leaves():
gene_counts[recon[leaf].name] += 1
# modify gene counts for species u
gene_counts[u.name] = ucount
popsizes = init_popsizes(stree, n)
p = cdf_mrca_bounded_multicoal(gene_counts, T, stree, popsizes,
sroot=sroot, sleaves=sleaves, stimes=stimes,
tree=tree, recon=recon)
gene_counts[u.parent.name] = ucount
stimes[u] = stimes[u.parent]
p2 = cdf_mrca_bounded_multicoal(gene_counts, T, stree, popsizes,
sroot=sroot, sleaves=sleaves,
stimes=stimes, tree=tree, recon=recon)
p3 = log(prob_coal_counts(ucount, ucount,
stimes[u.parent] - utime, popsizes[u.name]))
return p2 - p + p3
def num_labeled_histories(nleaves, nroots):
n = 1.0
for i in xrange(nroots + 1, nleaves + 1):
n *= i * (i - 1) / 2.0
return n
def log_num_labeled_histories(nleaves, nroots):
n = 0.0
for i in xrange(nroots + 1, nleaves + 1):
n += log(i * (i - 1) / 2.0)
return n
def prob_bounded_multicoal_recon_topology(tree, recon, stree, n, T,
root=None, leaves=None,
lineages=None, top_stats=None,
stimes=None):
"""
Returns the log probability of a reconciled gene tree ('tree', 'recon')
from the coalescent model given a species tree 'stree' and
population sizes 'n' and stopping time 'T'
"""
# get input stats
popsizes = init_popsizes(stree, n)
if lineages is None:
lineages = count_lineages_per_branch(tree, recon, stree)
if top_stats is None:
top_stats = get_topology_stats(tree, recon, stree)
if stimes is None:
stimes = treelib.get_tree_timestamps(stree)
p = prob_multicoal_recon_topology(tree, recon, stree, popsizes,
lineages=lineages, top_stats=top_stats)
k_root = lineages[stree.root][0]
T_root = T - stimes[stree.root]
return (log(cdf_mrca(T_root, k_root, popsizes[recon[tree.root].name])) + p
- cdf_mrca_bounded_multicoal(
None, T, stree, popsizes,
tree=tree, recon=recon, stimes=stimes))
#=============================================================================
# sampling coalescent trees
#
# - normal kingman coalescent
# - censored coalescent
# - bounded coalescent (conditioned on completion before a fixed time)
#
def sample_coal_tree(k, n):
"""
Returns a simulated coalescent tree for 'k' leaves from a population 'n'.
"""
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal(j, n))
return make_tree_from_times(times)[0]
def sample_bounded_coal_tree(k, n, T, capped=False):
"""
Returns a simulated coalescent tree for 'k' leaves from a populations 'n'
with fixed maximum time 't'. The simulation is conditioned on returning
a tree that completely coaleces before time 'T'.
capped -- if True an artificial root to the tree. Used primarily by
other methods.
"""
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_bounded_coal(j, n, T - times[-1]))
return make_tree_from_times(times, t=T, capped=capped)[0]
def sample_bounded_coal_tree_reject(k, n, T, capped=False):
"""
Returns a simulated coalescence tree for k leaves from a populations n
with fixed maximum time t. The simulation is conditioned on returning
a tree that completely coaleces before time T.
This works, but is very inefficient. Use sample_coal_tree_bounded
instead.
"""
# sample times with rejection sampling
while True:
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal(j, n))
if times[-1] < T:
break
return make_tree_from_times(times, t=T, capped=capped)[0]
def sample_censored_coal_tree(k, n, t, capped=False):
"""
Returns a simulated coalescence tree for 'k' leaves from a population size
'n' with a fixed maximum time 't'.
The return value is the tuple (tree, lineages) where lineages is a set
of lineages that have not yet coalesced.
capped -- if True, remaining lineages are added as children to a artificial
tree root.
"""
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal(j, n))
if times[-1] > t:
times.pop()
break
return make_tree_from_times(times, k, t, capped=capped)
def sample_coal_cond_counts_tree(a, b, t, n, capped=False):
"""
Returns a simulated coalescence tree for 'a' leaves from a population size
'n', conditioned on their being 'b' lineages at time 't'.
The return value is the tuple (tree, lineages) where lineages is a set
of lineages that have not yet coalesced.
capped -- if True, remaining lineages are added as children to a artificial
tree root.
"""
times = [0]
for j in xrange(a, b, -1):
times.append(times[-1] + sample_coal_cond_counts(j, b, t-times[-1], n))
return make_tree_from_times(times, a, t, capped=capped)
def init_popsizes(stree, n):
"""
Uses 'n' to initialize a population size dict for species tree 'stree'
"""
if isinstance(n, (int, float)):
return dict.fromkeys(stree.nodes.keys(), n)
elif isinstance(n, dict):
return n
else:
raise Exception("n must be a int or dict.")
# TODO: right now this assumes that there are at least 1 or more genes
# in each extant species
def sample_multicoal_tree(stree, n, leaf_counts=None,
namefunc=None, sroot=None, sleaves=None):
"""
Returns a gene tree from a multi-species coalescence process
stree -- species tree
n -- population size (int or dict)
If n is a dict it must map from species name to
population size.
leaf_counts -- dict of species names to a starting gene count.
Default is 1 gene per extant species.
namefunc -- a function that generates new gene names given a species
name.
"""
if sleaves is None:
sleaves = set(stree.leaves())
if sroot is None:
sroot = stree.root
# initialize vector for how many genes per extant species
if leaf_counts is None:
leaf_counts = dict((l, 1) for l in stree.leaf_names())
# initialize function for generating new gene names
if namefunc is None:
spcounts = dict((l, 1) for l in stree.leaf_names())
def namefunc(sp):
name = sp + "_" + str(spcounts[sp])
spcounts[sp] += 1
return name
# initialize population sizes
popsizes = init_popsizes(stree, n)
# init gene counts
counts = dict((n.name, 0) for n in stree)
counts.update(leaf_counts)
# init reconciliation
recon = {}
# subtrees
subtrees = {}
queue = MultiPushQueue(sleaves)
# loop through species tree
for snode in queue:
# simulate population for one branch
k = counts[snode.name]
if snode != sroot:
# non basal branch
queue.push(snode.parent, len(snode.parent.children))
subtree, lineages = sample_censored_coal_tree(
k, popsizes[snode.name], snode.dist, capped=True)
counts[snode.parent.name] += len(lineages)
else:
# basal branch
subtree = sample_coal_tree(k, popsizes[snode.name])
lineages = [subtree.root]
subtrees[snode] = (subtree, lineages)
for node in subtree:
recon[node] = snode
# stitch subtrees together
tree = treelib.Tree()
# add all nodes to total tree
for subtree, lineages in subtrees.values():
tree.merge_names(subtree)
tree.remove(subtree.root)
del recon[subtree.root]
for snode in subtrees:
if snode not in sleaves:
subtree, lineages = subtrees[snode]
# get lineages from child subtrees
lineages2 = chain(*[subtrees[child][1]
for child in snode.children])
# ensure leaves are randomly attached
leaves = subtree.leaves()
random.shuffle(leaves)
# stitch leaves of the subtree to children subtree lineages
for leaf, lineage in izip(leaves, lineages2):
tree.add_child(leaf, lineage)
# set root
tree.root = subtrees[sroot][0].root
tree.add(tree.root)
recon[tree.root] = sroot
# name leaves
for node in tree:
if recon[node].is_leaf():
tree.rename(node.name, namefunc(recon[node].name))
return tree, recon
def sample_bounded_multicoal_tree(stree, n, T, leaf_counts=None, namefunc=None,
sroot=None, sleaves=None, stimes=None,
gene_counts=None):
"""
Returns a gene tree from a bounded multi-species coalescence process
stree -- species tree
n -- population size (int or dict)
If n is a dict it must map from species name to
population size.
T -- deadline for complete coalescence
leaf_counts -- dict of species names to a starting gene count.
Default is 1 gene per extant species.
namefunc -- a function that generates new gene names given a species
name.
sleaves -- you can specify a subtree of the stree by giving the a
list 'sleaves' of leaf nodes of the stree
sroot -- you can specify a subtree of the stree by giving the
subtree root node 'sroot'
"""
# initialize vector for how many genes per extant species
if sleaves is None:
sleaves = set(stree.leaves())
if sroot is None:
sroot = stree.root
if leaf_counts is None:
leaf_counts = dict((l.name, 1) for l in sleaves)
# initialize function for generating new gene names
if namefunc is None:
spcounts = dict((l.name, 1) for l in sleaves)
def namefunc(sp):
name = sp + "_" + str(spcounts[sp])
spcounts[sp] += 1
return name
# initialize population sizes
popsizes = init_popsizes(stree, n)
# init gene counts
if gene_counts is None:
gene_counts = dict.fromkeys([x.name for x in sleaves], 1)
# init species tree timestamps
if stimes is None:
stimes = treelib.get_tree_timestamps(stree)
# calc table
prob_counts = calc_prob_counts_table(gene_counts, T, stree, popsizes,
sroot, sleaves, stimes)
# init lineage counts
lineages = {sroot: [None, 1]}
for node in sleaves:
lineages[node] = [gene_counts[node.name], None]
# sample lineage counts
sample_lineage_counts(sroot, sleaves,
popsizes, stimes, T, lineages, prob_counts)
# sample coal times
tree, recon = coal_cond_lineage_counts(lineages, sroot, sleaves,
popsizes, stimes, T, namefunc)
return tree, recon
def sample_lineage_counts(node, leaves,
popsizes, stimes, T, lineages, prob_counts):
"""
Sample lineage counts conditioned on counts at root and leaves
of species tree
"""
a, b = lineages[node]
if node not in leaves:
if len(node.children) == 2:
# two child case
c1 = node.children[0]
c2 = node.children[1]
probs1 = prob_counts[c1][1]
probs2 = prob_counts[c2][1]
if b is None:
# special case where no ending count 'b' is conditioned
k1 = stats.sample(probs1)
k2 = stats.sample(probs2)
else:
# condition on ending count 'b'
if node.parent:
t = stimes[node.parent] - stimes[node]
else:
t = T - stimes[node]
n = popsizes[node.name]
reject = 0
while True:
k1 = stats.sample(probs1)
k2 = stats.sample(probs2)
if random.random() < prob_coal_counts(k1 + k2, b, t, n):
# accept
break
reject += 1
# set linages counts
lineages[node][0] = k1 + k2
if c1 not in lineages:
lineages[c1] = [None, k1]
else:
lineages[c1][1] = k1
if c2 not in lineages:
lineages[c2] = [None, k2]
else:
lineages[c2][1] = k2
# recurse
sample_lineage_counts(c1, leaves,
popsizes, stimes, T, lineages, prob_counts)
sample_lineage_counts(c2, leaves,
popsizes, stimes, T, lineages, prob_counts)
elif len(node.children) == 1:
# single child case
c1 = node.children[0]
probs1 = prob_counts[c1][1]
if b is None:
# special case where no ending count 'b' is conditioned
k1 = stats.sample(probs1)
else:
# condition on ending count 'b'
if node.parent:
t = stimes[node.parent] - stimes[node]
else:
t = T - stimes[node]
n = popsizes[node.name]
reject = 0
while True:
k1 = stats.sample(probs1)
if random.random() < prob_coal_counts(k1, b, t, n):
# accept
break
reject += 1
# set linages counts
lineages[node][0] = k1
if c1 not in lineages:
lineages[c1] = [None, k1]
else:
lineages[c1][1] = k1
# recurse
sample_lineage_counts(c1, leaves,
popsizes, stimes, T, lineages, prob_counts)
else:
# unhandled case
raise NotImplementedError
def coal_cond_lineage_counts(lineages, sroot, sleaves, popsizes, stimes, T,
namefunc):
"""Sample coalescent times conditioned on lineage counts"""
# init reconciliation and subtree dicts
recon = {}
subtrees = {}
caps = set()
# sample coalescent times
queue = MultiPushQueue(sleaves)
# loop through species tree
for snode in queue:
# simulate population for one branch
a, b = lineages[snode]
if snode != sroot:
t = stimes[snode.parent] - stimes[snode]
queue.push(snode.parent, len(snode.parent.children))
else:
t = T - stimes[snode] if T is not None else None
if t is None:
subtree = sample_coal_tree(a, popsizes[snode.name])
tops = [subtree.root]
else:
subtree, tops = sample_coal_cond_counts_tree(
a, b, t, popsizes[snode.name], capped=True)
caps.add(subtree.root)
subtrees[snode] = (subtree, tops)
for node in subtree:
recon[node] = snode
tree = join_subtrees(subtrees, recon, caps, sroot)
# set name leaves
for leaf in tree.leaves():
tree.rename(leaf.name, namefunc(recon[leaf].name))
return tree, recon
def join_subtrees(subtrees, recon, caps, sroot):
"""Join several subtrees together into one subtree"""
# stitch subtrees together
tree = treelib.Tree()
# add all nodes to total tree
for snode, (subtree, tops) in subtrees.iteritems():
tree.merge_names(subtree)
# remove cap nodes
for node in caps:
# remove cap node
tree.remove(node)
del recon[node]
for snode in subtrees:
subtree, tops = subtrees[snode]
# get lineages from child subtrees
lineages2 = list(chain(*[subtrees[child][1]
for child in snode.children]))
if len(lineages2) == 0:
# noting to connect
continue
# ensure leaves are randomly attached
leaves = subtree.leaves()
random.shuffle(leaves)
# stitch leaves of the subtree to children subtree lineages
for leaf, lineage in izip(leaves, lineages2):
tree.add_child(leaf, lineage)
# set root
tree.root = subtrees[sroot][0].root
if tree.root in caps and len(tree.root.children) == 1:
tree.root = tree.root.children[0]
return tree
def sample_bounded_multicoal_tree_reject(stree, n, T, leaf_counts=None,
namefunc=None, sleaves=None,
sroot=None):
"""
Returns a gene tree from a bounded multi-species coalescence process
stree -- species tree
n -- population size (int or dict)
If n is a dict it must map from species name to
population size.
T -- deadline for complete coalescence
leaf_counts -- dict of species names to a starting gene count.
Default is 1 gene per extant species.
namefunc -- a function that generates new gene names given a species
name.
sleaves -- you can specify a subtree of the stree by giving the a
list 'sleaves' of leaf nodes of the stree
sroot -- you can specify a subtree of the stree by giving the
subtree root node 'sroot'
"""
# initialize vector for how many genes per extant species
if sleaves is None:
sleaves = set(stree.leaves())
if sroot is None:
sroot = stree.root
if leaf_counts is None:
leaf_counts = dict((l.name, 1) for l in sleaves)
# initialize function for generating new gene names
if namefunc is None:
spcounts = dict((l.name, 1) for l in sleaves)
def namefunc(sp):
name = sp + "_" + str(spcounts[sp])
spcounts[sp] += 1
return name
# initialize population sizes
popsizes = init_popsizes(stree, n)
reject = 0
while True:
queue = MultiPushQueue(sleaves)
# init gene counts
counts = dict((n.name, 0) for n in stree)
counts.update(leaf_counts)
# init reconciliation
recon = {}
# subtrees
subtrees = {}
# loop through species tree
for snode in queue:
# simulate population for one branch
k = counts[snode.name]
if snode != sroot:
# non basal branch
subtree, lineages = sample_censored_coal_tree(
k, popsizes[snode.name], snode.dist, capped=True)
queue.push(snode.parent, len(snode.parent.children))
else:
# basal branch
subtree = sample_coal_tree(k, popsizes[snode.name])
lineages = subtree.root
subtrees[snode] = (subtree, lineages)
if snode != sroot:
counts[snode.parent.name] += len(lineages)
for node in subtree:
recon[node] = snode
# stitch subtrees together
tree = treelib.Tree()
# add all nodes to total tree
for subtree, lineages in subtrees.values():
tree.merge_names(subtree)
tree.remove(subtree.root)
del recon[subtree.root]
for snode in subtrees:
if not snode.is_leaf():
subtree, lineages = subtrees[snode]
# get lineages from child subtrees
lineages2 = chain(*[subtrees[child][1]
for child in snode.children])
# ensure leaves are randomly attached
leaves = subtree.leaves()
random.shuffle(leaves)
# stitch leaves of the subtree to children subtree lineages
for leaf, lineage in izip(leaves, lineages2):
tree.add_child(leaf, lineage)
# set root
tree.root = subtrees[sroot][0].root
tree.add(tree.root)
recon[tree.root] = sroot
# reject tree if basal branch goes past deadline
times = treelib.get_tree_timestamps(tree)
if times[tree.root] < T:
break
else:
reject += 1
# name leaves
for leaf in tree.leaves():
tree.rename(leaf.name, namefunc(recon[leaf].name))
return tree, recon
def make_tree_from_times(times, k=None, t=None, leaves=None, capped=False):
"""
Returns a Tree from a list of divergence times.
The topology is choosen by randomly choosing pairs of leaves.
"""
# initialize k
if k is None:
if leaves is not None:
k = len(leaves)
else:
k = len(times)
tree = treelib.Tree()
# initialize k children
if leaves is None:
children = set(treelib.TreeNode(tree.new_name()) for i in xrange(k))
else:
children = set(treelib.TreeNode(name) for name in leaves)
for child in children:
tree.add(child)
child.data["time"] = 0.0
# perform random merges
for i in xrange(1, len(times)):
# make new parent and merge children
parent = treelib.TreeNode(tree.new_name())
parent.data["time"] = times[i]
a, b = random.sample(children, 2)
tree.add_child(parent, a)
tree.add_child(parent, b)
# adjust children set
children.remove(a)
children.remove(b)
children.add(parent)
# set branch lengths
for node in tree:
if not node.parent:
if t is not None:
node.dist = t - node.data["time"]
else:
node.dist = 0.0
else:
node.dist = node.parent.data["time"] - node.data["time"]
# for convenience cap the tree for easy drawing/manipulation
if capped:
tree.make_root()
for node in children:
tree.add_child(tree.root, node)
else:
# set root
if len(children) == 1:
tree.root = list(children)[0]
# return tree and remaining lineages
return tree, children
#=============================================================================
# popsize inference
def mle_popsize_coal_times(k, times):
s = 0
i = k
last = 0
for t in times:
s += i*(i-1) * (t - last)
i -= 1
last = t
return s / float(2 * k - 2)
def mle_popsize_many_coal_times(k, times):
ntrees = len(times)
s = 0
for times2 in times:
i = k
last = 0
for t in times2:
s += i*(i-1) * (t - last)
i -= 1
last = t
return s / float(2*ntrees*(k - 1))
def mle_popsize_tree(tree):
timestamps = treelib.get_tree_timestamps(tree)
times = sorted([timestamps[node] for node in tree.postorder()
if len(node.children) == 2])
k = len(tree.leaves())
return mle_popsize_coal_times(k, times)
#=============================================================================
# helper data structures
class MultiPushQueue (object):
"""
A queue that requires multiple pushes before item is queued
"""
def __init__(self, lst):
self._lst = linked_list.LinkedList(lst)
self._count = {}
def __iter__(self):
return self
def push(self, item, needed):
count = self._count.setdefault(item, 0)
# must be queued 'needed' times
if count + 1 == needed:
self._lst.append(item)
else:
self._count[item] += 1
def next(self):
if len(self._lst) == 0:
raise StopIteration
else:
return self._lst.pop_front()
#=============================================================================
# allele frequency
def sample_allele_freq(p, n):
"""
Sample a new allele frequency using starting allele frequency p and
population size n
"""
if p <= 0.0:
return 0.0
if p >= 1.0:
return 1.0
if p < 0.05:
return min(float(stats.poissonvariate(p*n))/n, n)
if p > 0.95:
return 1.0 - min(float(stats.poissonvariate((1-p)*n))/n, n)
mu = p * n
sigma = sqrt(n * p*(1 - p))
p1 = random.normalvariate(mu, sigma) / n
if p1 < 0:
return 0.0
if p1 > 1:
return 1.0
return p1
def freq_CDF(p, N, t, T, k=50):
"""
Evaluates the CDF derived from Kimura.
p is initial frequency of the allele in the population
N is the population size
t is time (units?)
T is the upper limit of the CDF (int from 0 to T)
k is approximation for the upper limit in the (supposed to be) infinite sum
"""
return freq_CDF_legs_ends(legendre(1.0-2*p), legendre(1.0-2*T),
N, t, k=k)
def freq_CDF_legs_noends(leg_r, leg_T, N, t, k=50):
"""
Evaluates the CDF derived from Kimura using two Legendre polynomials.
This does not include the probabilities at 0 and 1 (partial CDF).
leg_r is the legendre_lambda associated with r
leg_T is the legendre_lambde associated with T (T', really)
N is the population size
t is the time elapsed
k is the upper limit to approximate the infinite sum
"""
s = 0.0
expconst = float(t) / 4.0 / N
for i in xrange(1, k+1):
newterm = .5 * (leg_r(i-1) - leg_r(i+1))
newterm *= exp(- i * (i+1) * expconst)
newterm *= 1 - leg_T(i)
s += newterm
return s
def freq_CDF_legs_ends(leg_r, leg_T, N, t, k=50):
"""
Evaluates the CDF derived from Kimura using two Legendre polynomials.
This includes the probabilities at 0 and 1 (full CDF).
leg_r is the legendre_lambda associated with r
leg_T is the legendre_lambde associated with T (T', really)
N is the population size
t is the time elapsed
k is the upper limit to approximate the infinite sum
"""
# leg_r(True) currently returns p, so this is probability of extinction
s = prob_fix(1.0-leg_r(True), N, t)
expconst = float(t) / 4.0 / N
for i in xrange(1, k+1):
newterm = .5 * (leg_r(i-1) - leg_r(i+1))
newterm *= exp(- i * (i+1) * expconst)
newterm *= 1 - leg_T(i)
s += newterm
# add fixation probability if T==1
return s if leg_T(True) < 1.0 else s + prob_fix(leg_r(True), N, t)
def freq_prob_range(p, N, t, T1, T2, k=50):
leg_r = legendre(1.0-2*p)
leg_T1 = legendre(1.0-2*T1)
leg_T2 = legendre(1.0-2*T2)
return (freq_CDF_legs_noends(leg_r, leg_T2, N, t, k=k) -
freq_CDF_legs_noends(leg_r, leg_T1, N, t, k=k))
# uses noends because probabilities at 0 and 1 may be
# determined using other methods
def sample_freq_CDF(p, N, t):
"""
Takes an allele frequency p, a population size N, and a time period t.
Samples from the CDF derived from Kimura to get a new allele frequency.
N.B.: The current version fails sometimes (on some N, t pairs), presumably
due to errors in freq_CDF_leg. These need to be fixed.
"""
# special cases
if p == 0.0:
return 0.0
elif p == 1.0:
return 1.0
elif t == 0.0:
return p
y = random.random()
leg_r = legendre(1.0-2*p)
extinction = prob_fix(1.0-p, N, t) # probability of allele extinction
if y < extinction:
return 0.0 # sample an extinction event
elif y > 1.0 - prob_fix_leg(leg_r, N, t):
return 1.0 # sample a fixation event
else:
def f(T):
# trims extinction probability, assures brentq works
return (freq_CDF_legs_noends(leg_r, legendre(1.0-2*T), N, t)
- y + extinction)
try:
return brentq(f, 0.0, 1.0, disp=False)
except:
print p, N, t
raise
# new function for determining Legendre polynomial evaluations
def legendre(r):
"""
Returns a lambda that calculates the Legendre polynomial based on a
recursive formula (43) from
http://mathworld.wolfram.com/LegendrePolynomial.html.
As the value r is constant, results to calls for different n are cached,
which reduces runtime for repeated calls.
The old legendre_old(n,r) function below is intractible for n>~10.
This function can run with n as high as one million in a fraction of a
second (using isolated calls, so no caching to build higher values of n).
"""
def cacheleg(i, d):
if type(i) == bool:
# utility function; may need to be removed
return (1.0-d[1])/2.0 if i else d[1]
assert (type(i) == int and i >= 0) # if i is not type bool
m = d['max']
if i <= m:
return d[i]
x = d[1]
for n in xrange(m+1, i+1):
d[n] = 1.0 * ((2 * n - 1) * x * d[n-1] - (n-1) * d[n-2]) / n
d['max'] = i
return d[i]
d = {0: 1.0, 1: r, 'max': 1}
assert -1.0 <= r and r <= 1.0 # ensure r in reasonable range
return lambda n: cacheleg(n, d)
def gegenbauer(i, r):
return ((i * (i+1)) / 2.0 * hypergeo(i+2, 1 - i, 2, (1 - r) / 2.0))
# this should be the fastest gegenbauer method now (21 July 2010)
def gegenbauer2(i, r):
leg = legendre(r)
return ((i * (i+1)) / float((2*i+1)*(1-r*r)) *
(leg(i-1) - leg(i+1)))
def gegenbauer3(n, a, z):
tot = 0
for k in xrange(int(n/2)+1):
tot += ((-1)**k * stats.gamma(n - k + a) / (
stats.gamma(a) * stats.factorial(k) * stats.factorial(n - 2*k))
* ((2*z) ** (n - 2*k)))
return tot
# TODO: determine proper k and esp values
def prob_fix(p, n, t, k=50, esp=0.000001):
"""Probability of fixation"""
r = 1 - 2*p
leg = legendre(r)
prob = p
for i in xrange(1, k+1):
term = (.5 * (-1)**i * (leg(i-1) - leg(i+1)) *
exp(-t * i * (i+1) / (4 * n)))
if term != 0.0 and abs(term) < esp:
return prob + term
prob += term
return prob
# added 02 August 2010
# saves information to leg_r
def prob_fix_leg(leg_r, n, t, k=50, esp=0.000001):
"""Probability of fixation"""
leg = leg_r
prob = leg(True) # gets p
for i in xrange(1, k+1):
term = (.5 * (-1)**i * (leg(i-1) - leg(i+1)) *
exp(-t * i * (i+1) / (4 * n)))
if term != 0.0 and abs(term) < esp:
return prob + term
prob += term
return prob
def hypergeo(a, b, c, z, k=100):
"""Hypergeometric function"""
terms = [0.0]
signs = [1.0]
for i in xrange(1, k+1):
term = float((i+a-1)*(i+b-1)*z)/(i+c-1)/i
signs.append(util.sign(term) * signs[-1])
if term == 0.0:
break
terms.append(log(abs(term)) + terms[i-1])
return sum(s*exp(i) for s, i in zip(signs, terms))
def loghypergeo(a, b, c, z, k=100):
"""
Hypergeometric function
Performs computation in log-space
"""
terms = [0.0]
signs = [1.0]
for i in xrange(1, k+1):
term = float((i+a-1)*(i+b-1)*z)/(i+c-1)/i
signs.append(util.sign(term) * signs[-1])
if term == 0.0:
break
terms.append(log(abs(term)) + terms[i-1])
sgn = 1
tot = -util.INF
for s, t in zip(signs, terms):
sgn, tot = stats.logadd_sign(sgn, tot, s, t)
return sgn, tot
def hypergeo_mult(i, z1, z2, k=100):
h1 = hypergeo(1-i, i+2, 2, z1, k)
h2 = hypergeo(1-i, i+2, 2, z2, k)
return h1 * h2
def freq_pdf(x, p, n, t, k=8):
if x > 0.5:
return freq_pdf(1.0-x, 1.0-p, n, t, k)
q = 1.0 - p
prob = -util.INF
sgn = 1
t4n = t / (4*n)
for i in xrange(1, k+1):
#term = (p * q * i * (i+1) * (2*i+1) *
# hypergeo(1-i,i+2,2,p) * hypergeo(1-i,i+2,2,x) *
# exp(-t * i * (i+1) / (4*n)))
lcoff = log(p * q * i * (i+1) * (2*i+1))
s1, h1 = loghypergeo(1-i, i+2, 2, p, i+2)
s2, h2 = loghypergeo(1-i, i+2, 2, x, i+2)
sgn2 = s1 * s2
term = (lcoff + h1 + h2 - (i * (i+1) * t4n))
sgn, prob = stats.logadd_sign(sgn, prob, sgn2, term)
return sgn * exp(prob)
#=============================================================================
if __name__ == "__main__":
from rasmus.common import plotfunc
#========================
# hypergeo speed
a, b, c, z, k = 30, 20, 12, .3, 40
util.tic("hypergeo")
for i in range(100):
hypergeo(a, b, c, z, k)
util.toc()
util.tic("loghypergeo")
for i in range(100):
loghypergeo(a, b, c, z, k)
util.toc()
if 0:
p0 = .5
k = 30
p = plotfunc(lambda x: freq_pdf(x, p0, 1000, 100, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 200, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 500, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 1000, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 2000, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 3000, k=k),
.01, .99, .01, style="lines")
p.enableOutput(True)
p.replot()
#p.plotfunc(lambda x: normalPdf(x, (.5, .1135)),
# .01, .99, .01, style="lines")
if 0:
p0 = .1
p = plotfunc(lambda x: freq_pdf(x, p0, 1000, 100, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 200, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 500, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 1000, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 2000, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 3000, k=25),
.01, .99, .01, style="lines")
p.enableOutput(True)
p.replot()
#p.plotfunc(lambda x: freq_pdf3(x, .5, 1000, 1000/10, k=40),
# .01, .99, .01, style="lines")
if 0:
p0 = .5
k = 30
p = plotfunc(lambda x: freq_pdf(x, p0, 1000, 30, k=k),
.01, .99, .01, style="lines")
p.enableOutput(True)
p.replot()
#=============================================================================
# old versions
# Legendre polynomial
# this function should be depreciated
def legendre_poly(n):
""" \frac{1}{2^n n!} d^n/dx^n [(x^2 - 1)^n] """
return simplify(('mult', ('scalar', 1.0 / (2 ** n * stats.factorial(n))),
derivate(('power', ('add', ('power', ('var', 'x'),
('scalar', 2)),
('scalar', -1)),
('scalar', n)),
'x', n)))
# this function should be depreciated
def legendre_old(n, r):
l = simplify(assign_vars(legendre_poly(n), {'x': r}))
assert l[0] == 'scalar'
return l[1]
### TODO: distribution problems arise from probability masses at 0 and 1
def freq_CDF_leg_old(leg, N, t, T, k=50):
"""
Evaluates the CDF derived from Kimura.
N.B.: Appears to fail sometimes; this needs to be fixed
leg is a Legendre (lambda) for evaluating the CDF
N is the population size
t is time (units?)
T is the upper limit of the CDF (int from 0 to T)
k is approximation for the upper limit in the (supposed to be) infinite sum
"""
def innersum(i, T, j=0, s=0.0, c=1.0):
if T == 0.0:
return 1.0
if j > i:
return s
newc = 1.0 if j == 0 else c * (-T) * (i+j) * (i-j+1) / j / j
return innersum(i, T, j+1, s+newc, newc)
# if p == 0.0: # none have the allele
# return 1.0 # all weight is at 0, so CDF is equal to 1
# if p == 1.0: # all have the allele
# return 1.0 if T == 1.0 else 0.0
s = 0.0
for i in xrange(1, k+1):
newterm = leg(i-1) - leg(i+1)
newterm *= exp(- i * (i+1) / 4.0 * t / N)
newterm *= .5 - .5 * innersum(i, T)
s += newterm
return s
def hypergeo_old(a, b, c, z, k=100):
"""Hypergeometric function"""
terms = [1.0]
for i in xrange(1, k+1):
terms.append(float((i+a-1)*(i+b-1)*z)/(i+c-1)/i * terms[i-1])
return sum(terms)
# this is depreciated; replaced by an equivalent (but faster) gegenbauer method
def gegenbauer2_old(i, r):
return ((i * (i+1)) / float((2*i+1)*(1-r*r)) *
(legendre_old(i-1, r) - legendre_old(i+1, r)))
def freq_pdf_old(x, p, n, t, k=8):
if x > 0.5:
return freq_pdf2(1.0-x, 1.0-p, n, t, k)
q = 1.0 - p
prob = -util.INF
sgn = 1
t4n = t / (4*n)
for i in xrange(1, k+1):
#term = (p * q * i * (i+1) * (2*i+1) *
# hypergeo(1-i,i+2,2,p) * hypergeo(1-i,i+2,2,x) *
# exp(-t * i * (i+1) / (4*n)))
lcoff = log(p * q * i * (i+1) * (2*i+1))
h1 = hypergeo(1-i, i+2, 2, p, i+2)
h2 = hypergeo(1-i, i+2, 2, x, i+2)
sgn2 = util.sign(h1) * util.sign(h2)
if sgn2 != 0:
term = (lcoff + log(abs(h1)) + log(abs(h2)) +
(- i * (i+1) * t4n))
sgn, prob = stats.logadd_sign(sgn, prob, sgn2, term)
return sgn * exp(prob)
def freq_pdf2(x, p, n, t, k=8):
r = 1 - 2*p
z = 1 - 2*x
prob = 0.0
for i in xrange(1, k+1):
term = ((2*i + 1) * (i - r*r) / float(i * (i+1)) *
gegenbauer(i, r) * gegenbauer(i, z) *
exp(-t * i * (i+1) / (4*n)))
print term
prob += term
return prob
def freq_pdf3(x, p, n, t, k=8):
q = 1.0 - p
prob = 0.0
for i in xrange(1, k+1):
term = (p * q * i * (i+1) * (2*i+1) *
hypergeo(1-i, i+2, 2, p, 40) * hypergeo(1-i, i+2, 2, x, 40) *
exp(-t * i * (i+1) / (4*n)))
prob += term
return prob
def freq_pdf4(x, p, n, t, k=8):
q = 1.0 - p
prob = 0.0
for i in xrange(1, k+1):
term = (p * q * i * (i+1) * (2*i+1) *
hypergeo_mult(i, p, x, 100) *
exp(-t * i * (i+1) / (4*n)))
prob += term
return prob
def cdf_mrca2(t, k, n):
"""
Cumulative probability density of the age 't' of the most recent common
ancestor (MRCA) of 'k' lineages in a population size 'n'
"""
if k == 1:
return 1.0
s = 0.0
for i in xrange(1, k):
lam = (i+1) * i / 2.0 / n
s += (1 - exp(- lam * t)) * mrca_const(i, 1, k-1)
return s
def prob_multicoal_recon_topology_old(tree, recon, stree, n,
root=None, leaves=None,
lineages=None, top_stats=None):
"""
Returns the log probability of a reconciled gene tree ('tree', 'recon')
from the coalescent model given a species tree 'stree' and
population sizes 'n'
This definately has a bug, that the current code fixes.
"""
popsizes = init_popsizes(stree, n)
if lineages is None:
lineages = count_lineages_per_branch(tree, recon, stree)
if top_stats is None:
top_stats = get_topology_stats(tree, recon, stree)
# iterate through species tree branches
lnp = 0.0 # log probability
for snode in stree.postorder():
if snode.parent:
# non root branch
a, b = lineages[snode]
lnp += log(prob_coal_counts(a, b, snode.dist,
popsizes[snode.name]))
lnp -= log(num_labeled_histories(a, b))
else:
a = lineages[snode][0]
lnp -= log(num_labeled_histories(a, 1))
# correct for topologies H(T)
# find connected subtrees that are in the same species branch
subtrees = []
subtree_root = {}
for node in tree.preorder():
if node.parent and recon[node] == recon[node.parent]:
subtree_root[node] = subtree_root[node.parent]
else:
subtrees.append(node)
subtree_root[node] = node
# find leaves through recursion
def walk(node, subtree, leaves):
if node.is_leaf():
leaves.append(node)
elif (subtree_root[node.children[0]] != subtree and
subtree_root[node.children[1]] != subtree):
leaves.append(node)
else:
for child in node.children:
walk(child, subtree, leaves)
# apply correction for each subtree
for subtree in subtrees:
leaves = []
for child in subtree.children:
walk(subtree, subtree, leaves)
if len(leaves) > 2:
lnp += log(birthdeath.num_topology_histories(subtree, leaves))
return lnp
def calc_prob_counts_table_old(gene_counts, T, stree, popsizes,
sroot, sleaves, stimes):
# use dynamic programming to calc prob of lineage counts
prob_counts = {}
def walk(node):
if node in sleaves:
# leaf case
M = gene_counts[node.name]
prob_counts[node] = [0.0] * (M+1)
prob_counts[node][M] = 1.0
return M
else:
# internal node case
assert len(node.children) == 2
c1 = node.children[0]
c2 = node.children[1]
ptime = stimes[node]
t1 = ptime - stimes[c1] # c1.dist
t2 = ptime - stimes[c2] # c2.dist
M1 = walk(c1)
M2 = walk(c2)
M = M1 + M2 # max lineage counts in this snode
n1 = popsizes[c1.name]
n2 = popsizes[c2.name]
prob_counts[node] = [0, 0]
for k in xrange(2, M+1):
prob_counts[node].append(sum(
sum(prob_coal_counts(i, m, t1, n1) *
prob_counts[c1][i]
for i in xrange(m, M1+1)) *
sum(prob_coal_counts(i, k-m, t2, n2) *
prob_counts[c2][i]
for i in xrange(k-m, M2+1))
for m in xrange(1, k)))
assert abs(sum(prob_counts[node]) - 1.0) < .001
return M
walk(sroot)
return prob_counts
def count_lineages_per_branch_old(tree, recon, stree, rev_recon=None):
"""
Returns the count of gene lineages present at each node in the species
tree 'tree' given a gene tree 'tree' and reconciliation 'recon'
"""
# init reverse reconciliation
if rev_recon is None:
rev_recon = get_rev_recon(tree, recon, stree)
# init lineage counts
lineages = {}
for snode in stree:
if snode.is_leaf():
lineages[snode] = [len([x for x in rev_recon[snode]
if x.is_leaf()]), 0]
else:
lineages[snode] = [0, 0]
# iterate through species tree branches
for snode in stree.postorder():
if snode.parent:
# non root branch
a = lineages[snode][0]
# subtract number of coals in branch
b = a - len([x for x in rev_recon.get(snode, [])
if len(x.children) > 1])
lineages[snode][1] = b
lineages[snode.parent][0] += b
else:
lineages[snode][1] = 1
lineages2 = count_lineages_per_branch(tree, recon, stree)
assert lineages == lineages2
return lineages
def get_rev_recon(tree, recon, stree):
"""
Returns a reverse reconciliation
A reverse reconciliation is a mapping from nodes in the species tree to
lists of nodes in the gene tree.
"""
rev_recon = {}
nodes = set(tree.postorder())
for node, snode in recon.iteritems():
if node not in nodes:
raise Exception("node '%s' not in tree" % node.name)
rev_recon.setdefault(snode, []).append(node)
return rev_recon
def get_topology_stats_old(tree, recon, stree, rev_recon=None):
"""
The function computes terms necessary for many topology calculations
"""
nodes_per_species = {} # How many gene nodes per species
descend_nodes = {} # How many descendent nodes recon to the same species
nodes_per_species = dict.fromkeys(stree, 0)
# init reverse reconciliation
if rev_recon is None:
rev_recon = get_rev_recon(tree, recon, stree)
# iterate through species tree
for snode, nodes in rev_recon.iteritems():
nodes_per_species[snode] = len([x for x in nodes
if len(x.children) > 1])
# iterate through tree
for node in tree.postorder():
if not node.is_leaf() and len(node.children) > 1:
descend_nodes[node] = 1 + sum(descend_nodes.get(child, 0)
for child in node.children
if recon[child] == recon[node])
return nodes_per_species, descend_nodes
# this is depreciated; replaced by prob_fix method using new legendre method
def prob_fix_old(p, n, t, k=8, esp=0.001):
"""Probability of fixation"""
r = 1 - 2*p
prob = p
for i in xrange(1, k+1):
term = (.5 * (-1)**i * (legendre_old(i-1, r) - legendre_old(i+1, r)) *
exp(-t * i * (i+1) / (4 * n)))
if term != 0.0 and abs(term) < esp:
return prob + term
prob += term
return prob
| {
"content_hash": "67dac195b57ca8252f8834c465bcd7c8",
"timestamp": "",
"source": "github",
"line_count": 2198,
"max_line_length": 79,
"avg_line_length": 30.297088262056416,
"alnum_prop": 0.527758172780923,
"repo_name": "mdrasmus/argweaver",
"id": "2b6ab957a9a581e84ffa2de647995581cdb6b4b9",
"size": "66593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "argweaver/deps/compbio/coal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33565"
},
{
"name": "C++",
"bytes": "1600820"
},
{
"name": "CSS",
"bytes": "1085"
},
{
"name": "HTML",
"bytes": "2742"
},
{
"name": "JavaScript",
"bytes": "101979"
},
{
"name": "Makefile",
"bytes": "4330"
},
{
"name": "Python",
"bytes": "1105013"
},
{
"name": "Shell",
"bytes": "4816"
}
],
"symlink_target": ""
} |
import logging
from sqlalchemy.inspection import inspect
from aquilon.worker.locks import NoLockKey, PlenaryKey
from aquilon.worker.templates import (Plenary, StructurePlenary,
add_location_info)
from aquilon.worker.templates.panutils import pan_assign, pan_include
from aquilon.aqdb.model import NetworkDevice
from aquilon.utils import nlist_key_re
LOGGER = logging.getLogger(__name__)
class PlenaryNetworkDeviceInfo(StructurePlenary):
@classmethod
def template_name(cls, dbmachine):
loc = dbmachine.location
return "network_device/%s/%s/%s" % (loc.hub.fullname.lower(),
loc.building, dbmachine.label)
def get_key(self, exclusive=True):
if inspect(self.dbobj).deleted:
return NoLockKey(logger=self.logger)
else:
# TODO: this should become a CompileKey if we start generating
# profiles for switches (see also templates/cluster.py)
return PlenaryKey(network_device=self.dbobj, logger=self.logger,
exclusive=exclusive)
def body(self, lines):
pan_assign(lines, "nodename", self.dbobj.label)
if self.dbobj.serial_no:
pan_assign(lines, "serialnumber", self.dbobj.serial_no)
lines.append("")
pan_assign(lines, "model_type", self.dbobj.model.model_type)
pan_include(lines, "hardware/network_device/%s/%s" %
(self.dbobj.model.vendor.name, self.dbobj.model.name))
for port in self.dbobj.consoles:
console_port = self.dbobj.consoles[port]
pan_assign(lines, "hardware/console/port/%s/server_name" % console_port.client_port,
console_port.console_server)
pan_assign(lines, "hardware/console/port/%s/server_port" % console_port.client_port,
console_port.port_number)
lines.append("")
add_location_info(lines, self.dbobj.location)
lines.append("")
interfaces = {}
for interface in self.dbobj.interfaces:
ifinfo = {}
ifinfo["type"] = interface.interface_type
if interface.mac:
ifinfo["hwaddr"] = interface.mac
interfaces[interface.name] = ifinfo
for name in sorted(interfaces):
# This is ugly. We can't blindly escape, because that would affect
# e.g. VLAN interfaces. Calling unescape() for a non-escaped VLAN
# interface name is safe though, so we can hopefully get rid of this
# once the templates are changed to call unescape().
if nlist_key_re.match(name):
pan_assign(lines, "cards/nic/%s" % name,
interfaces[name])
else:
pan_assign(lines, "cards/nic/{%s}" % name,
interfaces[name])
lines.append("")
Plenary.handlers[NetworkDevice] = PlenaryNetworkDeviceInfo
| {
"content_hash": "d8f329f9d3b87a85666c1c26d388e226",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 96,
"avg_line_length": 41.24657534246575,
"alnum_prop": 0.6054466954500166,
"repo_name": "quattor/aquilon",
"id": "f46b99ad61e150b66aaccd3047c289df59f2343a",
"size": "3730",
"binary": false,
"copies": "1",
"ref": "refs/heads/upstream",
"path": "lib/aquilon/worker/templates/network_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1823"
},
{
"name": "Makefile",
"bytes": "5732"
},
{
"name": "Mako",
"bytes": "4178"
},
{
"name": "PLSQL",
"bytes": "102109"
},
{
"name": "PLpgSQL",
"bytes": "8091"
},
{
"name": "Pan",
"bytes": "1058"
},
{
"name": "Perl",
"bytes": "6057"
},
{
"name": "Python",
"bytes": "5884984"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "33547"
},
{
"name": "Smarty",
"bytes": "4603"
}
],
"symlink_target": ""
} |
"""
Functions with single return
Nodes
-----
Func : Function definition
Contains: Declares, Returns, Params, Block
Property: name
Returns : Function return variables
Contains: Var, ...
Params : Function parameter variables
Get : Function call
Example: "y(4)"
Contains: Gets
Property: name
Var : Function call hidden as variable
Example "y"
Contains: nothing
"""
import matlab2cpp
from .function import type_string
from .variables import Get
from .assign import Assign
def Var(node):
"""
Function call as variable
Writing a function as if a variable, is equivalent to calling the function
without arguments.
property: name (of variable)
Examples:
>>> print(matlab2cpp.qscript("function y=f(); y=1; end; function g(); f"))
int f()
{
int y ;
y = 1 ;
return y ;
}
<BLANKLINE>
void g()
{
f() ;
}
"""
# push the job over to Get
return Get(node)
Returns = "", ""
"""single return value are used verbatim"""
def Params(node):
"""
Parameters in functions with one return
Adds type prefix.
Contains: Var*
Examples:
>>> code = "function y=f(a,b,c,d,e); y=1"
>>> from matlab2cpp.tree import Builder
>>> builder = Builder()
>>> builder.load("unamed", code)
>>> builder[0].ftypes = {"f":{"a": "int", "b":"double", "c":"cx_mat",
... "d":"func_lambda", "e":"struct", "y":"int"}}
>>> print(matlab2cpp.qscript(builder))
int f(int a, double b, cx_mat c, std::function d, _E e)
{
int y ;
y = 1 ;
return y ;
}
"""
out = ""
# if -ref, -reference flag option
if node.project.builder.reference:
out += ", ".join(["const " + type_string(child) + "& " + child.name if child.dim > 0 else
type_string(child) + " " + child.name for child in node])
else:
out = ", ".join([type_string(child) + " " + child.name for child in node])
return out
def Declares(node):
"""
Declarations in the beginning of function
Contains: Var*
Examples:
>>> print(matlab2cpp.qscript("function d=f(); a=1; b.c='2'; d.e(1)=[4,5]"))
_D f()
{
_B b ;
_D d ;
int a ;
a = 1 ;
b.c = "2" ;
sword _d [] = {4, 5} ;
d.e[0] = irowvec(_d, 2, false) ;
return d ;
}
"""
if not node:
return ""
returns = node.parent[1]
declares = {} # {"int" : ["a", "b"]} -> int a, b ;
structs = {} # {"_A" : "a"} -> _A a;
# fill declares and structs
for child in node[:]:
type = type_string(child)
if type not in declares:
declares[type] = []
declares[type].append(child)
if child.type == "structs":
structs[child.name] = child
# create output
out = ""
keys = sorted(declares.keys())
for key in keys:
val = sorted(declares[key], key=lambda x: x.name)
# datatype
out += "\n" + key + " "
# all variables with that type
for v in val:
out += str(v)
if v.name in structs:
structs_ = node.program[3]
struct = structs_[structs_.names.index(v.name)]
size = struct[struct.names.index("_size")].value
out += "[%s]" % size
out += ", "
out = out[:-2] + " ;"
return out[1:]
def Func(node):
"""
Function declaration
Contains: Declares Returns Params Block
Property: name (of function)
Examples:
>>> print(matlab2cpp.qscript("function y=f(); y=1"))
int f()
{
int y ;
y = 1 ;
return y ;
}
"""
# type information is in params and declare, not return
retname = node[1][0].name
if retname in node[0].names:
retval = node[0][node[0].names.index(retname)]
if retname in node[2].names:
retval = node[2][node[1].names.index(retname)]
rettype = type_string(retval)
# empty code_block function with return statement
if len(node[-1]) == 0:
return rettype + """ %(name)s(%(2)s)
{
return %(1)s
}"""
# function ends with a return statement
if node[-1][-1] and node[-1][-1][-1].cls == "Return":
return rettype + """ %(name)s(%(2)s)
{
%(0)s
%(3)s
}"""
return rettype + """ %(name)s(%(2)s)
{
%(0)s
%(3)s
return %(1)s ;
}"""
def Main(node):
"""
Main function
Contains: Declares Returns Params Block
Property: name (of function)
Examples:
>>> print(matlab2cpp.qcpp("4"))
#include <armadillo>
using namespace arma ;
<BLANKLINE>
int main(int argc, char** argv)
{
4 ;
return 0 ;
}
"""
# has variables to declare
if node[0]:
return """int main(int argc, char** argv)
{
%(0)s
%(3)s
return 0 ;
}"""
return """int main(int argc, char** argv)
{
%(3)s
return 0 ;
}"""
| {
"content_hash": "926ae64f93e78a5ead93e1787c1a8f50",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 97,
"avg_line_length": 21.135802469135804,
"alnum_prop": 0.5077881619937694,
"repo_name": "jonathf/matlab2cpp",
"id": "ae0c503023a5ade469d2ce072e8bcf212cb8199a",
"size": "5136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/matlab2cpp/rules/_func_return.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "3618"
},
{
"name": "Mathematica",
"bytes": "43"
},
{
"name": "Matlab",
"bytes": "1560"
},
{
"name": "Python",
"bytes": "558665"
}
],
"symlink_target": ""
} |
"""Developer helper tool for merging CLs from ToT to branches.
This simple program takes changes from gerrit/gerrit-int and creates new
changes for them on the desired branch using your gerrit/ssh credentials. To
specify a change on gerrit-int, you must prefix the change with a *.
Note that this script is best used from within an existing checkout of
Chromium OS that already has the changes you want merged to the branch in it
i.e. if you want to push changes to crosutils.git, you must have src/scripts
checked out. If this isn't true e.g. you are running this script from a
minilayout or trying to upload an internal change from a non internal checkout,
you must specify some extra options: use the --nomirror option and use -e to
specify your email address. This tool will then checkout the git repo fresh
using the credentials for the -e/email you specified and upload the change. Note
you can always use this method but it's slower than the "mirrored" method and
requires more typing :(.
Examples:
cros_merge_to_branch 32027 32030 32031 release-R22.2723.B
This will create changes for 32027, 32030 and 32031 on the R22 branch. To look
up the name of a branch, go into a git sub-dir and type 'git branch -a' and the
find the branch you want to merge to. If you want to upload internal changes
from gerrit-int, you must prefix the gerrit change number with a * e.g.
cros_merge_to_branch *26108 release-R22.2723.B
For more information on how to do this yourself you can go here:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/working-on-a-br\
anch
"""
from __future__ import print_function
import errno
import os
import re
import shutil
import sys
import tempfile
from chromite.cbuildbot import constants
from chromite.cbuildbot import repository
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import gerrit
from chromite.lib import git
from chromite.lib import patch as cros_patch
_USAGE = """
cros_merge_to_branch [*]change_number1 [[*]change_number2 ...] branch\n\n%s\
""" % __doc__
def _GetParser():
"""Returns the parser to use for this module."""
parser = commandline.OptionParser(usage=_USAGE)
parser.add_option('-d', '--draft', default=False, action='store_true',
help='upload a draft to Gerrit rather than a change')
parser.add_option('-n', '--dry-run', default=False, action='store_true',
dest='dryrun',
help='apply changes locally but do not upload them')
parser.add_option('-e', '--email',
help='if specified, use this email instead of '
'the email you would upload changes as; must be set w/'
'--nomirror')
parser.add_option('--nomirror', default=True, dest='mirror',
action='store_false', help='checkout git repo directly; '
'requires --email')
parser.add_option('--nowipe', default=True, dest='wipe', action='store_false',
help='do not wipe the work directory after finishing')
return parser
def _UploadChangeToBranch(work_dir, patch, branch, draft, dryrun):
"""Creates a new change from GerritPatch |patch| to |branch| from |work_dir|.
Args:
patch: Instance of GerritPatch to upload.
branch: Branch to upload to.
work_dir: Local directory where repository is checked out in.
draft: If True, upload to refs/draft/|branch| rather than refs/for/|branch|.
dryrun: Don't actually upload a change but go through all the steps up to
and including git push --dry-run.
Returns:
A list of all the gerrit URLs found.
"""
upload_type = 'drafts' if draft else 'for'
# Download & setup the patch if need be.
patch.Fetch(work_dir)
# Apply the actual change.
patch.CherryPick(work_dir, inflight=True, leave_dirty=True)
# Get the new sha1 after apply.
new_sha1 = git.GetGitRepoRevision(work_dir)
reviewers = set()
# Filter out tags that are added by gerrit and chromite.
filter_re = re.compile(
r'((Commit|Trybot)-Ready|Commit-Queue|(Reviewed|Submitted|Tested)-by): ')
# Rewrite the commit message all the time. Latest gerrit doesn't seem
# to like it when you use the same ChangeId on different branches.
msg = []
for line in patch.commit_message.splitlines():
if line.startswith('Reviewed-on: '):
line = 'Previous-' + line
elif filter_re.match(line):
# If the tag is malformed, or the person lacks a name,
# then that's just too bad -- throw it away.
ele = re.split(r'[<>@]+', line)
if len(ele) == 4:
reviewers.add('@'.join(ele[-3:-1]))
continue
msg.append(line)
msg += ['(cherry picked from commit %s)' % patch.sha1]
git.RunGit(work_dir, ['commit', '--amend', '-F', '-'],
input='\n'.join(msg).encode('utf8'))
# Get the new sha1 after rewriting the commit message.
new_sha1 = git.GetGitRepoRevision(work_dir)
# Create and use a LocalPatch to Upload the change to Gerrit.
local_patch = cros_patch.LocalPatch(
work_dir, patch.project_url, constants.PATCH_BRANCH,
patch.tracking_branch, patch.remote, new_sha1)
for reviewers in (reviewers, ()):
try:
return local_patch.Upload(
patch.project_url, 'refs/%s/%s' % (upload_type, branch),
carbon_copy=False, dryrun=dryrun, reviewers=reviewers)
except cros_build_lib.RunCommandError as e:
if (e.result.returncode == 128 and
re.search(r'fatal: user ".*?" not found', e.result.error)):
logging.warning('Some reviewers were not found (%s); '
'dropping them & retrying upload', ' '.join(reviewers))
continue
raise
def _SetupWorkDirectoryForPatch(work_dir, patch, branch, manifest, email):
"""Set up local dir for uploading changes to the given patch's project."""
logging.notice('Setting up dir %s for uploading changes to %s', work_dir,
patch.project_url)
# Clone the git repo from reference if we have a pointer to a
# ManifestCheckout object.
reference = None
if manifest:
# Get the path to the first checkout associated with this change. Since
# all of the checkouts share git objects, it doesn't matter which checkout
# we pick.
path = manifest.FindCheckouts(patch.project, only_patchable=True)[0]['path']
reference = os.path.join(constants.SOURCE_ROOT, path)
if not os.path.isdir(reference):
logging.error('Unable to locate git checkout: %s', reference)
logging.error('Did you mean to use --nomirror?')
# This will do an "raise OSError" with the right values.
os.open(reference, os.O_DIRECTORY)
# Use the email if email wasn't specified.
if not email:
email = git.GetProjectUserEmail(reference)
repository.CloneGitRepo(work_dir, patch.project_url, reference=reference)
# Set the git committer.
git.RunGit(work_dir, ['config', '--replace-all', 'user.email', email])
mbranch = git.MatchSingleBranchName(
work_dir, branch, namespace='refs/remotes/origin/')
if branch != mbranch:
logging.notice('Auto resolved branch name "%s" to "%s"', branch, mbranch)
branch = mbranch
# Finally, create a local branch for uploading changes to the given remote
# branch.
git.CreatePushBranch(
constants.PATCH_BRANCH, work_dir, sync=False,
remote_push_branch=git.RemoteRef('ignore', 'origin/%s' % branch))
return branch
def _ManifestContainsAllPatches(manifest, patches):
"""Returns true if the given manifest contains all the patches.
Args:
manifest: an instance of git.Manifest
patches: a collection of GerritPatch objects.
"""
for patch in patches:
if not manifest.FindCheckouts(patch.project):
logging.error('Your manifest does not have the repository %s for '
'change %s. Please re-run with --nomirror and '
'--email set', patch.project, patch.gerrit_number)
return False
return True
def main(argv):
parser = _GetParser()
options, args = parser.parse_args(argv)
if len(args) < 2:
parser.error('Not enough arguments specified')
changes = args[0:-1]
try:
patches = gerrit.GetGerritPatchInfo(changes)
except ValueError as e:
logging.error('Invalid patch: %s', e)
cros_build_lib.Die('Did you swap the branch/gerrit number?')
branch = args[-1]
# Suppress all logging info output unless we're running debug.
if not options.debug:
logging.getLogger().setLevel(logging.NOTICE)
# Get a pointer to your repo checkout to look up the local project paths for
# both email addresses and for using your checkout as a git mirror.
manifest = None
if options.mirror:
try:
manifest = git.ManifestCheckout.Cached(constants.SOURCE_ROOT)
except OSError as e:
if e.errno == errno.ENOENT:
logging.error('Unable to locate ChromiumOS checkout: %s',
constants.SOURCE_ROOT)
logging.error('Did you mean to use --nomirror?')
return 1
raise
if not _ManifestContainsAllPatches(manifest, patches):
return 1
else:
if not options.email:
chromium_email = '%s@chromium.org' % os.environ['USER']
logging.notice('--nomirror set without email, using %s', chromium_email)
options.email = chromium_email
index = 0
work_dir = None
root_work_dir = tempfile.mkdtemp(prefix='cros_merge_to_branch')
try:
for index, (change, patch) in enumerate(zip(changes, patches)):
# We only clone the project and set the committer the first time.
work_dir = os.path.join(root_work_dir, patch.project)
if not os.path.isdir(work_dir):
branch = _SetupWorkDirectoryForPatch(work_dir, patch, branch, manifest,
options.email)
# Now that we have the project checked out, let's apply our change and
# create a new change on Gerrit.
logging.notice('Uploading change %s to branch %s', change, branch)
urls = _UploadChangeToBranch(work_dir, patch, branch, options.draft,
options.dryrun)
logging.notice('Successfully uploaded %s to %s', change, branch)
for url in urls:
if url.endswith('\x1b[K'):
# Git will often times emit these escape sequences.
url = url[0:-3]
logging.notice(' URL: %s', url)
except (cros_build_lib.RunCommandError, cros_patch.ApplyPatchException,
git.AmbiguousBranchName, OSError) as e:
# Tell the user how far we got.
good_changes = changes[:index]
bad_changes = changes[index:]
logging.warning('############## SOME CHANGES FAILED TO UPLOAD ############')
if good_changes:
logging.notice(
'Successfully uploaded change(s) %s', ' '.join(good_changes))
# Printing out the error here so that we can see exactly what failed. This
# is especially useful to debug without using --debug.
logging.error('Upload failed with %s', str(e).strip())
if not options.wipe:
logging.error('Not wiping the directory. You can inspect the failed '
'change at %s; After fixing the change (if trivial) you can'
' try to upload the change by running:\n'
'git commit -a -c CHERRY_PICK_HEAD\n'
'git push %s HEAD:refs/for/%s', work_dir, patch.project_url,
branch)
else:
logging.error('--nowipe not set thus deleting the work directory. If you '
'wish to debug this, re-run the script with change(s) '
'%s and --nowipe by running:\n %s %s %s --nowipe',
' '.join(bad_changes), sys.argv[0], ' '.join(bad_changes),
branch)
# Suppress the stack trace if we're not debugging.
if options.debug:
raise
else:
return 1
finally:
if options.wipe:
shutil.rmtree(root_work_dir)
if options.dryrun:
logging.notice('Success! To actually upload changes, re-run without '
'--dry-run.')
else:
logging.notice('Successfully uploaded all changes requested.')
return 0
| {
"content_hash": "1d2dbe416a52972d71fe7576574bde6a",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 80,
"avg_line_length": 39.022364217252395,
"alnum_prop": 0.6643196332077943,
"repo_name": "guorendong/iridium-browser-ubuntu",
"id": "7b2be2b14fe764e5360e799cc3f646041f007544",
"size": "12384",
"binary": false,
"copies": "1",
"ref": "refs/heads/ubuntu/precise",
"path": "third_party/chromite/scripts/cros_merge_to_branch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "256197"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "15445429"
},
{
"name": "C++",
"bytes": "276628399"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "867238"
},
{
"name": "Emacs Lisp",
"bytes": "3348"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "7777"
},
{
"name": "HTML",
"bytes": "20250399"
},
{
"name": "Java",
"bytes": "9950308"
},
{
"name": "JavaScript",
"bytes": "13873772"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "179129"
},
{
"name": "Objective-C",
"bytes": "1871766"
},
{
"name": "Objective-C++",
"bytes": "9674498"
},
{
"name": "PHP",
"bytes": "42038"
},
{
"name": "PLpgSQL",
"bytes": "163248"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "474121"
},
{
"name": "Python",
"bytes": "11646662"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1151673"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
import logging
from core.emulator.coreemu import CoreEmu
from core.emulator.data import IpPrefixes, NodeOptions
from core.emulator.enumerations import EventTypes
from core.nodes.base import CoreNode
from core.nodes.lxd import LxcNode
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
coreemu = CoreEmu()
session = coreemu.create_session()
session.set_state(EventTypes.CONFIGURATION_STATE)
try:
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
options = NodeOptions(image="ubuntu")
# create node one
node1 = session.add_node(LxcNode, options=options)
interface1_data = prefixes.create_iface(node1)
# create node two
node2 = session.add_node(CoreNode)
interface2_data = prefixes.create_iface(node2)
# add link
session.add_link(node1.id, node2.id, interface1_data, interface2_data)
# instantiate
session.instantiate()
finally:
input("continue to shutdown")
coreemu.shutdown()
| {
"content_hash": "4dccea58f6ffefbf1bfdd40252bc9e3a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 30.41176470588235,
"alnum_prop": 0.6789168278529981,
"repo_name": "pexip/os-core-network",
"id": "b41520d8929bd5926428e94b280143f7304f1e7d",
"size": "1034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daemon/examples/lxd/lxd2core.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "92828"
},
{
"name": "Dockerfile",
"bytes": "2855"
},
{
"name": "HTML",
"bytes": "329"
},
{
"name": "M4",
"bytes": "8355"
},
{
"name": "Makefile",
"bytes": "12003"
},
{
"name": "Python",
"bytes": "1664476"
},
{
"name": "Shell",
"bytes": "31246"
},
{
"name": "Tcl",
"bytes": "923461"
}
],
"symlink_target": ""
} |
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from cinder.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
# NOTE(flaper87): The following 3 globals are used by `mask_password`
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
| {
"content_hash": "6bd682ddee44836270c68c9315be6d58",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 79,
"avg_line_length": 34.23571428571429,
"alnum_prop": 0.6089088253703318,
"repo_name": "github-borat/cinder",
"id": "b39dc1c39473dca28ead50bcdd42c6027173529e",
"size": "10223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/openstack/common/strutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6575951"
},
{
"name": "Shell",
"bytes": "8998"
}
],
"symlink_target": ""
} |
from traits.api import \
HasTraits, Int, Directory, Str, List, \
on_trait_change, Dict, Property, cached_property
from traitsui.api import \
View, Item
import numpy as np
from matresdev.db.simdb.simdb import simdb
import os.path
import re
import platform
import time
if platform.system() == 'Linux':
sysclock = time.time
elif platform.system() == 'Windows':
sysclock = time.clock
class AramisInfo(HasTraits):
'''Basic information of Aramis database obtained from directory name and
from files placed in the data directory.
'''
data_dir = Directory(auto_set=False, enter_set=True, params_changed=True)
'''Directory containing experiment data files exported by Aramis Software.
'''
npy_dir = Property(Directory, depends_on='data_dir')
'''Directory containing data files converted to .npy.
'''
@cached_property
def _get_npy_dir(self):
return os.path.join(self.data_dir, 'npy/')
aramis_stage_list = Property(List(Int), depends_on='data_dir')
'''List of stage numbers obtained from filenames of Aramis files
'''
@cached_property
def _get_aramis_stage_list(self):
try:
if os.path.exists(self.npy_dir):
file_list = [v for v in os.listdir(self.npy_dir) if os.path.splitext(v)[1] == ".npy"]
else:
file_list = [v for v in os.listdir(self.data_dir) if os.path.splitext(v)[1] == ".txt"]
aramis_stage_list = []
pat = r'displ.*-(?P<step>\d+).*'
for f in file_list:
m = re.match(pat, f)
if m:
aramis_stage_list.append(int(m.groupdict()['step']))
# sort filenames by the step number
# file_arr = np.array(file_list, dtype=str)
# idx = np.argsort(aramis_stage_list)
aramis_stage_list.sort()
# self.file_list = file_arr[idx].tolist()
return aramis_stage_list
except:
return []
number_of_steps = Property(Int, depends_on='data_dir')
'''Number of steps - can differ from max value in aramis_stage_list
'''
@cached_property
def _get_number_of_steps(self):
return len(self.aramis_stage_list)
step_list = Property(List(Int), depends_on='data_dir')
'''List of step numbers
'''
@cached_property
def _get_step_list(self):
return np.arange(self.number_of_steps, dtype=int).tolist()
specimen_name = Property(Str, depends_on='data_dir')
'''Specimen name obtained from the folder name
'''
@cached_property
def _get_specimen_name(self):
return os.path.split(self.data_dir)[-1]
undeformed_coords_filename = Str('undeformed_coords-Stage-0-0')
'''Name of the file containing coordinates in initial state
'''
displacements_basename = Str('displ-Stage-0-')
'''Name of the file containing displacement of facets
'''
facet_params_dict = Property(Dict, depends_on='data_dir')
'''Dictionary containing facet parameters obtained by decompilation of
the specimen_name.
'''
@cached_property
def _get_facet_params_dict(self):
pat = r'.*Xf(?P<fsz_x>\d+)s(?P<fst_x>\d+)-Yf(?P<fsz_y>\d+)s(?P<fst_y>\d+).*'
m = re.match(pat, self.specimen_name)
return m.groupdict()
n_px_facet_size_x = Property(Int, depends_on='data_dir')
'''Size of a facet in x-direction [pixel]
'''
@cached_property
def _get_n_px_facet_size_x(self):
return int(self.facet_params_dict['fsz_x'])
n_px_facet_step_x = Property(Int, depends_on='data_dir')
'''Distance between the mid-points of two facets in x-direction [pixel]
'''
@cached_property
def _get_n_px_facet_step_x(self):
return int(self.facet_params_dict['fst_x'])
n_px_facet_size_y = Property(Int, depends_on='data_dir')
'''Size of a facet in y-direction [pixel]
'''
@cached_property
def _get_n_px_facet_size_y(self):
return int(self.facet_params_dict['fsz_y'])
n_px_facet_step_y = Property(Int, depends_on='data_dir')
'''Distance between the mid-points of two facets in y-direction [pixel]
'''
@cached_property
def _get_n_px_facet_step_y(self):
return int(self.facet_params_dict['fst_y'])
view = View(
# Item('data_dir'),
Item('specimen_name', style='readonly'),
# Item('number_of_steps', style='readonly'),
# Item('n_px_facet_size_x', style='readonly'),
# Item('n_px_facet_size_y', style='readonly'),
# Item('n_px_facet_step_x', style='readonly'),
# Item('n_px_facet_step_y', style='readonly'),
id='aramisCDT.info',
resizable=True
)
| {
"content_hash": "6e9de9d831a5f4a90a8c82160229e416",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 102,
"avg_line_length": 32.95890410958904,
"alnum_prop": 0.5978802992518704,
"repo_name": "simvisage/aramis_cdt",
"id": "63f4cfbab2c243c0e24bd83f803ce2b11c04df1f",
"size": "5369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aramis_cdt/aramis_info.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "166087"
}
],
"symlink_target": ""
} |
from scrapy.item import Item, Field
class FisRaces(Item):
location = Field()
id = Field()
link = Field()
date = Field()
category = Field()
genre = Field()
info = Field()
discipline = Field()
table = Field()
class FisRanking(Item):
id = Field()
link = Field()
info = Field()
men = Field()
women = Field()
| {
"content_hash": "d67f85e189431069b8add1e7ae1aa3c1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 35,
"avg_line_length": 17.333333333333332,
"alnum_prop": 0.5576923076923077,
"repo_name": "Tooskich/python_core",
"id": "43f20a25ec3592428863c62cb123935113413f54",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rankings/fis/fis/items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "75827"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pytest
from prompt_toolkit.completion import Completion
from prompt_toolkit.document import Document
@pytest.fixture
def completer():
import pgcli.pgcompleter as pgcompleter
return pgcompleter.PGCompleter(smart_completion=False)
@pytest.fixture
def complete_event():
from mock import Mock
return Mock()
def test_empty_string_completion(completer, complete_event):
text = ''
position = 0
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set(map(Completion, completer.all_completions))
def test_select_keyword_completion(completer, complete_event):
text = 'SEL'
position = len('SEL')
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set([Completion(text='SELECT', start_position=-3)])
def test_function_name_completion(completer, complete_event):
text = 'SELECT MA'
position = len('SELECT MA')
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set([
Completion(text='MATERIALIZED VIEW', start_position=-2),
Completion(text='MAX', start_position=-2),
Completion(text='MAXEXTENTS', start_position=-2)])
def test_column_name_completion(completer, complete_event):
text = 'SELECT FROM users'
position = len('SELECT ')
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set(map(Completion, completer.all_completions))
def test_paths_completion(completer, complete_event):
text = '\i '
position = len(text)
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event,
smart_completion=True))
assert result > set([Completion(text="setup.py", start_position=0)])
def test_alter_well_known_keywords_completion(completer, complete_event):
text = 'ALTER '
position = len(text)
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event,
smart_completion=True))
assert result > set([
Completion(text="DATABASE", display_meta='keyword'),
Completion(text="TABLE", display_meta='keyword'),
Completion(text="SYSTEM", display_meta='keyword'),
])
assert Completion(text="CREATE", display_meta="keyword") not in result
| {
"content_hash": "354654c0ebd568771c63ad3f06574322",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 74,
"avg_line_length": 32.65,
"alnum_prop": 0.6860643185298622,
"repo_name": "koljonen/pgcli",
"id": "87fc4378d9f4523b95ef7e60f6d4e9498b068380",
"size": "2612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_naive_completion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gherkin",
"bytes": "3624"
},
{
"name": "Python",
"bytes": "339593"
},
{
"name": "Shell",
"bytes": "173"
}
],
"symlink_target": ""
} |
from kafka_utils.kafka_check.commands.replica_unavailability import _prepare_output
UNAVAILABLE_REPLICAS = [
('Topic0', 0),
('Topic0', 1),
('Topic13', 13),
]
UNAVAILABLE_BROKERS = [123456, 987456]
def test_prepare_output_ok_no_verbose():
expected = {
'message': "All replicas available for communication.",
'raw': {
'replica_unavailability_count': 0,
}
}
assert _prepare_output([], [], False, -1) == expected
def test_prepare_output_ok_verbose():
expected = {
'message': "All replicas available for communication.",
'raw': {
'replica_unavailability_count': 0,
'partitions': [],
}
}
assert _prepare_output([], [], True, -1) == expected
def test_prepare_output_critical_verbose():
expected = {
'message': "3 replicas unavailable for communication. Unavailable Brokers: 123456, 987456",
'verbose': (
"Partitions:\n"
"Topic0:0\n"
"Topic0:1\n"
"Topic13:13"
),
'raw': {
'replica_unavailability_count': 3,
'partitions': [
{'partition': 0, 'topic': 'Topic0'},
{'partition': 1, 'topic': 'Topic0'},
{'partition': 13, 'topic': 'Topic13'},
],
}
}
assert _prepare_output(UNAVAILABLE_REPLICAS, UNAVAILABLE_BROKERS, True, -1) == expected
def test_prepare_output_critical_verbose_with_head():
expected = {
'message': "3 replicas unavailable for communication. Unavailable Brokers: 123456, 987456",
'verbose': (
"Top 2 partitions:\n"
"Topic0:0\n"
"Topic0:1"
),
'raw': {
'replica_unavailability_count': 3,
'partitions': [
{'partition': 0, 'topic': 'Topic0'},
{'partition': 1, 'topic': 'Topic0'},
],
}
}
assert _prepare_output(UNAVAILABLE_REPLICAS, UNAVAILABLE_BROKERS, True, 2) == expected
| {
"content_hash": "d4ada806599abb7f1edb46711988f2b0",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 99,
"avg_line_length": 28.774647887323944,
"alnum_prop": 0.5315712187958884,
"repo_name": "Yelp/kafka-utils",
"id": "d0b29349b1f6da6f9842f3f57a991044976da459",
"size": "2616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/kafka_check/test_replica_unavailability.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2401"
},
{
"name": "Gherkin",
"bytes": "9025"
},
{
"name": "Makefile",
"bytes": "783"
},
{
"name": "Python",
"bytes": "887830"
},
{
"name": "Shell",
"bytes": "773"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns('pathfinder.views',
url(r'select/$', 'selector'),
url('select/ward', 'ward_selector'),
url('select/provider', 'provider_selector'),
url('select/hbc', 'hbc_selector'),
url('hbc/', 'home_based_care'),
url('ward/', 'ward_summary'),
url('provider/', 'provider_summary'),
)
| {
"content_hash": "0683fb3688e13c58ae52663f5238a85c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 48,
"avg_line_length": 30.166666666666668,
"alnum_prop": 0.6298342541436464,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "d2352d244f646d232b5dae3a977e19a0c1485820",
"size": "362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/_legacy/pathfinder/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
"""
https://leetcode.com/problems/daily-temperatures/
https://leetcode.com/submissions/detail/137437366/
"""
class Solution:
def dailyTemperatures(self, temperatures):
"""
:type temperatures: List[int]
:rtype: List[int]
"""
temperaturesLength = len(temperatures)
result = [0] * temperaturesLength
stack = [] # 从大到小
for index in range(temperaturesLength - 1, -1, -1):
while len(stack) and temperatures[stack[-1]] <= temperatures[index]:
stack.pop()
if len(stack):
result[index] = stack[-1] - index
stack.append(index)
return result
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(
solution.dailyTemperatures([73, 74, 75, 71, 69, 72, 76, 73]),
[1, 1, 4, 2, 1, 1, 0, 0]
)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "cb102a0ee67e0833542432685575be04",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 25,
"alnum_prop": 0.5548717948717948,
"repo_name": "vivaxy/algorithms",
"id": "a1d41bf484a61bd94d6d22491cbb138914721f06",
"size": "983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problems/daily_temperatures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "130225"
},
{
"name": "Python",
"bytes": "272982"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
from setuptools import setup
if __name__ == '__main__':
with open("README.md", 'r') as f:
readme = f.read()
setup(name="orangeslices",
version="0.0.1",
description="Statusline generator for lemonbar",
long_description=readme,
url="http://github.com/tryone144/orangeslices",
author="Bernd Busse",
license="MIT",
packages=["orangeslices", "orangeslices.slices"],
zip_safe=False)
| {
"content_hash": "c8a3ba396c059154c680bee699b6ae88",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 59,
"avg_line_length": 29.75,
"alnum_prop": 0.5714285714285714,
"repo_name": "tryone144/orangeslices",
"id": "d4a841455f9dc7baa29fd6848f9159e1aa4bdce9",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "31330"
}
],
"symlink_target": ""
} |
from django import template
from django.utils.safestring import mark_safe
from tagembed.providers.tedx import TedxEmbed
from tagembed.providers.youtube import YoutubeEmbed
register = template.Library()
@register.filter
def parse_tagembed(text):
ted = TedxEmbed()
youtube = YoutubeEmbed()
parsed_text = ted.parse(text)
parsed_text = youtube.parse(parsed_text)
return mark_safe(parsed_text)
| {
"content_hash": "c5950c68ccf7274b20f60fcf576f3efd",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 51,
"avg_line_length": 24.352941176470587,
"alnum_prop": 0.7608695652173914,
"repo_name": "ninjaotoko/django-tagembed",
"id": "aada2b8333194a436678025284d6d5fd1cf15845",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tagembed/templatetags/tagembed_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6574"
}
],
"symlink_target": ""
} |
"""
api/Zapi.py
@author: Josh Williams
Date Added: Tue Feb 21 15:15:15 CST 2006
This API takes care of all ZAPI direct DB access (api keys, account verification, etc)
"""
## STD LIBS
import datetime, md5, xmlrpclib
from pprint import pformat
## OUR LIBS
from AZTKAPI import AZTKAPI
import errors, validation, utils
from decorators import stack, zapi
## 3RD PARTY LIBS
from twisted.internet.defer import Deferred, DeferredList, maybeDeferred
from twisted.web.xmlrpc import Fault, XMLRPC
from twisted.python.failure import Failure
from twisted.internet import reactor
class ZAPI(AZTKAPI, XMLRPC):
"""
API for zoto's API key management system.
"""
enable_node = True
enable_web = True
enable_zapi = True
enable_image_server = False
def _start(self):
pass
start = _start
@stack
def create_key(self, owner, app_name, email, url):
"""
Creates an API key and stores it in the global DB.
@param owner: Name of the person/entity requesting the key.
@type owner: String
@param app_name: Name of the application that will be using the API key.
@type app_name: String
@param email: Contact address of the entity requesting the key.
@type email: String
@param url: Web address of the person/entity requesting the key.
@type url: String
@return: New API key
@rtype: String
"""
try:
validation.required(owner, 'owner')
validation.required(app_name, 'app_name')
validation.email(email)
except errors.ValidationError, ex:
self.log.warning("Validation failure: %s" % str(ex))
raise errors.APIError, str(ex)
@stack
def act(count, api_key):
@stack
def handle_count_result(result):
return result['count']
if count:
## self.log.debug("api_key => [%s] is a duplicate" % api_key)
t = datetime.datetime.now()
api_key = md5.md5("%s%s%s%s%s%d" % (owner, app_name, email, url, t.strftime("%Y%m%d%H%M%S"), t.microsecond)).hexdigest()
self.log.warning("Checking api key: %s" % api_key)
d2 = self.app.db.query(
"""
SELECT
count(*) AS count
FROM
api_keys
WHERE
api_key = %s
""", (api_key, ), single_row=True)
d2.addCallback(handle_count_result)
d2.addCallback(act, api_key)
return d2
else:
## self.log.debug("api_key => [%s] is NOT a duplicate" % api_key)
d2 = self.app.db.runOperation(
"""
INSERT INTO api_keys (
api_key,
owner,
app_name,
email,
url
) VALUES (
%(api_key)s,
%(owner)s,
%(app_name)s,
%(email)s,
%(url)s
)
""", {'api_key': api_key, 'owner': owner, 'app_name': app_name, 'email': email, 'url': url})
d2.addCallback(lambda _: api_key)
return d2
d = Deferred()
d.addCallback(act, "")
d.callback(1)
return d
@zapi("Creates and stores a Zoto API key.",
[ ('owner', "Application owner name", str),
('app_name', "Name of the application", str),
('email', "Email address of the application owner", str),
('url', "Website address where the application will be used", str)])
def xmlrpc_create_key(self, info, owner, app_name, email, url):
return self.create_key(owner, app_name, email, url)
def xmlrpc_authenticate(self, void, api_key, username, password):
"""
Authenticates a username/password for ZAPI access.
@param api_key: The key to get information for.
@type api_key: String
@param usename: username
@type: string
@param password: password
@type: string
@return: authentication info
@rtype: dictionary
"""
key_info = False
@stack
def check_info(result):
if not result:
return None
return self.app.api.users.check_authentication(username, password, None)
d = self.get_key_info(api_key)
d.addErrback(lambda _: (-1, _.getErrorMessage()))
return d
@stack
def get_key_info(self, api_key, include_key=False):
"""
Gets information about the supplied API key.
@param api_key: The key to get information for.
@type api_key: String
@return: api key info
@rtype: dictionary
@raise errors.ValidationError: when api_key validation fails.
"""
try:
validation.required(api_key, 'api_key')
except errors.ValidationError, ex:
self.log.warning("Validation failure: %s" % str(ex))
raise errors.APIError, str(ex)
return self.app.db.query(
"""
select
api_key,
owner,
app_name,
email,
url,
created
from
api_keys
where
api_key = %s
""", (api_key, ), single_row=True)
@stack
def get_key_list(self, email=""):
"""
Gets the list of current API keys on file.
@param email: (optional) Limits the results to keys registered to this address.
@type email: String
@return: key list
@rtype: dictionary
"""
if email:
where_clause = " where email = '%s'" % email
else:
where_clause = ""
return self.app.db.query(
"""
select
api_key,
owner,
app_name,
email,
url,
created
from
api_keys
%s
""" % where_clause)
@stack
def delete_key(self, api_key):
"""
Deletes the specified API key from the database.
@param api_key: API key to be deleted.
@type api_key: String
@return: nothing
@rtype: nothing
@raise errors.APIError: when api_key validation failed
"""
try:
validation.required(api_key, 'api_key')
except errors.ValidationError, ex:
self.log.warning("Validation failure: %s" % str(ex))
raise errors.APIError, str(ex)
return self.app.db.query(
"""
delete from
api_keys
where
api_key = %s
""", (api_key, ))
def zapi_perms(self, api_key, auth, needs_auth=False, target_username=None, target_media_id=None):
"""
Determines, based on authentication and target username, what permissions (if any)
are available to the caller.
@param api_key: API key making the call.
@type api_key: String
@param auth: Some form of authentication (either ZAPI token or user/pswd hash combo)
@type auth: String or tuple(user/pswd hash)
@param needs_auth: (optional) Whether or not the function being called REQUIRES
authentication
@type needs_auth: Boolean
@param target_username: (optional) Specific user trying to be acted upon.
@type target_username: String
@param target_media_id: (optional) Specific media_id trying to be acted upon.
@type target_media_id: String
@return: Dictionary of auth/permissions info.
@rtype: Dictionary (Deferred)
"""
self.log.debug("zapi_perms():")
self.log.debug("api_key: %s" % api_key)
self.log.debug("needs_auth: %s" % needs_auth)
self.log.debug("target_username: %s" % target_username)
self.log.debug("target_media_id: %s" % target_media_id)
# Get the info for the auth key (to make sure its valid)
d = self.get_key_info(api_key)
def act(valid):
if not valid:
raise Fault, (5066, "Invalid API key supplied")
info = {
"api_key": api_key,
"perms": 'public',
"authenticated": False,
"is_contact": False,
"userid": None,
"target_userid": 0,
"target_image_id": 0
}
if not auth:
raise Fault, (5073, "For anonymous access, use username:anonymous/password:anonymous")
# If the function requires auth, and none is present, bail
if needs_auth and not auth:
raise Fault, (5066, "This method requires an authorization token")
# test auth info
if auth:
if not isinstance(auth, dict):
# no idea what they sent
raise Fault, (5067, "Not sure what you're trying to do here!")
if not auth.has_key('username'):
raise Fault, (5070, "Username is required")
if not auth.has_key('password') and not auth.has_key('token') and not auth.has_key('pswd_hash'):
raise Fault, (5071, "Must supply either a password or an auth token")
username = auth['username']
info['username'] = username
if username != "anonymous":
d_user = self.app.api.users.get_user_id(username)
d_user.addCallback(get_auth, auth, info)
return d_user
else:
if needs_auth:
raise Fault, (5066, "This method requires authorization")
else:
return resolve_args(info)
def get_auth(result, auth_ind, info):
if result[0] != 0:
raise Fault, (6002, "Internal ZAPI Error: %s" % result[1])
info['userid'] = result[1]
if auth_ind.has_key('password'):
password = auth['password']
d2 = self.app.api.users.check_authentication(info['userid'], password, 0)
elif auth.has_key('pswd_hash'):
self.log.warning("checking a password hash: %s" % auth['pswd_hash'])
pswd_hash = auth['pswd_hash']
d2 = self.app.api.users.check_pswd_hash(info['userid'], pswd_hash)
else:
## we have an auth token
hash = auth['token']
d2 = self.app.api.users.check_authentication(info['userid'], None, 0, hash)
d2.addCallback(check_auth, info)
return d2
def resolve_args(info):
d3 = Deferred()
if target_username:
d3.addCallback(get_target_user_id)
if target_username and info['userid']:
d3.addCallback(get_contact)
if target_media_id:
d3.addCallback(get_target_image_id)
d3.callback(info)
return d3
def check_auth(result, info):
is_authd = False
if result[0] != 0:
raise Fault, (5070, "Auth creds (token or user/pass) are invalid")
auth_ind = result[1]
is_authd = True
info['authenticated'] = is_authd
return resolve_args(info)
def get_contact(info):
if isinstance(target_username, (list, tuple)):
return info ## Can't check multiple targets
d8 = self.app.api.contacts.get_is_contact(info['target_userid'], info['userid'])
d8.addCallback(check_contact, info)
return d8
def check_contact(results, info):
if results[0] == 0:
info['is_contact'] = results[1]
return info
else:
raise Fault(6001, "Internal ZAPI error: %s" % results[1])
def get_target_user_id(info):
multi_ids = []
def get_id(id_list, id, info):
d9 = self.app.api.users.get_user_id(id)
d9.addCallback(handle_multi_result, id_list)
return d9
def handle_multi_result(result, id_list):
if result[0] == 0:
id_list.append(result[1])
return id_list
else:
raise Fault(6006, "Internal ZAPI error: %s" % result[1])
if target_username and target_username != "*ALL*":
if isinstance(target_username, (tuple, list)):
d5 = Deferred()
for id in target_username:
d5.addCallback(get_id, id, info)
d5.addCallback(lambda _: (0, _))
d5.callback(multi_ids)
else:
d5 = self.app.api.users.get_user_id(target_username)
d5.addCallback(check_target_user_id, info)
return d5
else:
return info
def check_target_user_id(results, info):
if results[0] == 0:
info['target_userid'] = results[1]
return info
else:
raise Fault(6003, "Internal ZAPI error: %s" % results[1])
def get_target_image_id(info):
multi_ids = []
image_user = 0
if info['target_userid']:
image_user = target_username
elif info['userid']:
image_user = info['username']
else:
image_user = ''
def get_id(id_list, id, info):
d7 = self.app.api.images.get_image_id(image_user, id)
d7.addCallback(handle_multi_result, id_list)
return d7
def handle_multi_result(result, id_list):
if result[0] == 0:
id_list.append(result[1])
return id_list
else:
raise Fault(6005, "Internal ZAPI error: %s" % result[1])
if target_media_id and image_user:
if isinstance(target_media_id, (tuple, list)):
d6 = Deferred()
for id in target_media_id:
d6.addCallback(get_id, id, info)
d6.addCallback(lambda _: (0, _))
d6.callback(multi_ids)
else:
d6 = self.app.api.images.get_image_id(image_user, target_media_id)
d6.addCallback(check_target_image_id, info)
return d6
else:
return info
def check_target_image_id(results, info):
if results[0] == 0:
info['target_image_id'] = results[1]
return info
else:
raise Fault(6004, "Internal ZAPI error: %s" % results[1])
def handle_fail(fail):
self.log.warning(fail.type)
self.log.warning(fail.getErrorMessage())
if fail.type != xmlrpclib.Fault:
raise Fault (6000, "Internal ZAPI error: %s" % fail)
else:
raise fail
d.addCallback(act)
d.addErrback(handle_fail)
return d
| {
"content_hash": "ed6e0cfe26b562062a1048a1306ecdbd",
"timestamp": "",
"source": "github",
"line_count": 473,
"max_line_length": 124,
"avg_line_length": 25.858350951374206,
"alnum_prop": 0.6471261548524242,
"repo_name": "kordless/zoto-server",
"id": "e3d0e82ce70bd9cda46274bc9e26b5bca428955a",
"size": "12231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aztk/api/ZAPI.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1489011"
},
{
"name": "PHP",
"bytes": "15394"
},
{
"name": "Python",
"bytes": "905967"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
} |
import json
import requests
from globalvars import GlobalVars
import threading
import websocket
from collections import Iterable
import sys
import traceback
import time
import os
import datahandling
import parsing
import apigetpost
import spamhandling
class Metasmoke:
@classmethod
def init_websocket(self):
try:
GlobalVars.metasmoke_ws = websocket.create_connection(GlobalVars.metasmoke_ws_host, origin=GlobalVars.metasmoke_host)
GlobalVars.metasmoke_ws.send(json.dumps({"command": "subscribe", "identifier": "{\"channel\":\"SmokeDetectorChannel\",\"key\":\"" + GlobalVars.metasmoke_key + "\"}"}))
while True:
a = GlobalVars.metasmoke_ws.recv()
print(a)
try:
data = json.loads(a)
if "message" in data:
message = data['message']
if isinstance(message, Iterable):
if "message" in message:
GlobalVars.charcoal_hq.send_message(message['message'])
elif "blacklist" in message:
datahandling.add_blacklisted_user((message['blacklist']['uid'], message['blacklist']['site']), "metasmoke", message['blacklist']['post'])
elif "naa" in message:
post_site_id = parsing.fetch_post_id_and_site_from_url(message["naa"]["post_link"])
datahandling.add_ignored_post(post_site_id[0:2])
elif "fp" in message:
post_site_id = parsing.fetch_post_id_and_site_from_url(message["fp"]["post_link"])
datahandling.add_false_positive(post_site_id[0:2])
elif "report" in message:
post_data = apigetpost.api_get_post(message["report"]["post_link"])
if post_data is None or post_data is False:
continue
if datahandling.has_already_been_posted(post_data.site, post_data.post_id, post_data.title) and not datahandling.is_false_positive((post_data.post_id, post_data.site)):
continue
user = parsing.get_user_from_url(post_data.owner_url)
if user is not None:
datahandling.add_blacklisted_user(user, "metasmoke", post_data.post_url)
why = u"Post manually reported by user *{}* from metasmoke.\n".format(message["report"]["user"])
spamhandling.handle_spam(title=post_data.title,
body=post_data.body,
poster=post_data.owner_name,
site=post_data.site,
post_url=post_data.post_url,
poster_url=post_data.owner_url,
post_id=post_data.post_id,
reasons=["Manually reported " + post_data.post_type],
is_answer=post_data.post_type == "answer",
why=why,
owner_rep=post_data.owner_rep,
post_score=post_data.score,
up_vote_count=post_data.up_vote_count,
down_vote_count=post_data.down_vote_count,
question_id=post_data.question_id)
elif "commit_status" in message:
c = message["commit_status"]
sha = c["commit_sha"][:7]
if c["commit_sha"] != os.popen('git log --pretty=format:"%H" -n 1').read():
if c["status"] == "success":
if "autopull" in c["commit_message"]:
GlobalVars.charcoal_hq.send_message("[CI]({ci_link}) on {commit_sha} succeeded. Message contains 'autopull', pulling...".format(ci_link=c["ci_url"], commit_sha=sha))
time.sleep(2)
os._exit(3)
else:
GlobalVars.charcoal_hq.send_message("[CI]({ci_link}) on {commit_sha} succeeded.".format(ci_link=c["ci_url"], commit_sha=sha))
elif c["status"] == "failure":
GlobalVars.charcoal_hq.send_message("[CI]({ci_link}) on {commit_sha} failed.".format(ci_link=c["ci_url"], commit_sha=sha))
except Exception, e:
GlobalVars.metasmoke_ws = websocket.create_connection(GlobalVars.metasmoke_ws_host, origin=GlobalVars.metasmoke_host)
GlobalVars.metasmoke_ws.send(json.dumps({"command": "subscribe", "identifier": "{\"channel\":\"SmokeDetectorChannel\"}"}))
print e
try:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
except:
print "meh"
except:
print "Couldn't bind to MS websocket"
@classmethod
def send_stats_on_post(self, title, link, reasons, body, username, user_link, why, owner_rep, post_score, up_vote_count, down_vote_count):
if GlobalVars.metasmoke_host is None:
print "Metasmoke location not defined, not reporting"
return
metasmoke_key = GlobalVars.metasmoke_key
try:
if len(why) > 1024:
why = why[:512] + '...' + why[-512:]
post = {'title': title, 'link': link, 'reasons': reasons, 'body': body, 'username': username, 'user_link': user_link, 'why': why, 'user_reputation': owner_rep, 'score': post_score, 'upvote_count': up_vote_count, 'downvote_count': down_vote_count}
post = dict((k, v) for k, v in post.iteritems() if v) # Remove None values (if they somehow manage to get through)
payload = {'post': post, 'key': metasmoke_key}
headers = {'Content-type': 'application/json'}
requests.post(GlobalVars.metasmoke_host + "/posts.json", data=json.dumps(payload), headers=headers)
except Exception as e:
print e
@classmethod
def send_feedback_for_post(self, post_link, feedback_type, user_name, user_id, chat_host):
if GlobalVars.metasmoke_host is None:
print "Metasmoke location not defined; not reporting"
return
metasmoke_key = GlobalVars.metasmoke_key
try:
payload = {
'feedback': {
'user_name': user_name,
'chat_user_id': user_id,
'chat_host': chat_host,
'feedback_type': feedback_type,
'post_link': post_link
},
'key': metasmoke_key
}
headers = {'Content-type': 'application/json'}
requests.post(GlobalVars.metasmoke_host + "/feedbacks.json", data=json.dumps(payload), headers=headers)
except Exception as e:
print e
@classmethod
def send_deletion_stats_for_post(self, post_link, is_deleted):
if GlobalVars.metasmoke_host is None:
print "Metasmoke location not defined; not reporting deletion stats"
return
metasmoke_key = GlobalVars.metasmoke_key
try:
payload = {
'deletion_log': {
'is_deleted': is_deleted,
'post_link': post_link
},
'key': metasmoke_key
}
headers = {'Content-type': 'application/json'}
requests.post(GlobalVars.metasmoke_host + "/deletion_logs.json", data=json.dumps(payload), headers=headers)
except Exception as e:
print e
@classmethod
def send_status_ping(self):
if GlobalVars.metasmoke_host is None:
print "Metasmoke location not defined; not sending status ping"
return
threading.Timer(60, Metasmoke.send_status_ping).start()
metasmoke_key = GlobalVars.metasmoke_key
try:
payload = {
'location': GlobalVars.location,
'key': metasmoke_key
}
headers = {'content-type': 'application/json'}
requests.post(GlobalVars.metasmoke_host + "/status-update.json", data=json.dumps(payload), headers=headers)
except Exception as e:
print e
@classmethod
def update_code_privileged_users_list(self):
payload = {'key': GlobalVars.metasmoke_key}
headers = {'Content-type': 'application/json'}
response = requests.get(GlobalVars.metasmoke_host + "/api/users/code_privileged", data=json.dumps(payload), headers=headers).json()['items']
GlobalVars.code_privileged_users = {
GlobalVars.charcoal_room_id: response["stackexchange_chat_ids"],
GlobalVars.meta_tavern_room_id: response["meta_stackexchange_chat_ids"],
GlobalVars.socvr_room_id: response["stackoverflow_chat_ids"]
}
| {
"content_hash": "59f001d10aed827361f0fca061179c43",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 258,
"avg_line_length": 51.98421052631579,
"alnum_prop": 0.4982282069454288,
"repo_name": "ArtOfCode-/SmokeDetector",
"id": "df467b4de22d071fbf113e0165f9224cd121f58e",
"size": "9877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metasmoke.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "211451"
},
{
"name": "Shell",
"bytes": "1310"
}
],
"symlink_target": ""
} |
""" custom """
from examples.working_init import *
from threatconnect.Config.FilterOperator import FilterOperator, FilterSetOperator
""" Toggle the Boolean to enable specific examples """
enable_example1 = False
enable_example2 = False
enable_example3 = False
enable_example4 = False
enable_example5 = False
owners = ['Example Community']
# shared method to display results from examples below
def show_data(result_obj):
""" """
for obj in result_obj:
#
# get signature
#
print('\n{0!s:_^80}'.format(obj.name))
print('{0!s:<20}{1!s:<50}'.format('ID', obj.id))
print('{0!s:<20}{1!s:<50}'.format('Owner Name', obj.owner_name))
print('{0!s:<20}{1!s:<50}'.format('Date Added', obj.date_added))
print('{0!s:<20}{1!s:<50}'.format('Web Link', obj.weblink))
print('{0!s:<20}{1!s:<50}'.format('File Text', obj.file_text))
#
# signature contents
#
obj.download()
if obj.contents is not None:
print('\n{0!s:-^40}'.format(' Signature Contents '))
print(obj.contents)
#
# api_uris
#
if len(obj.request_uris) > 0:
print('\n{0!s:-^40}'.format(' Request URIs '))
for request_uri in obj.request_uris:
print('{0!s:<20}{1!s:<50}'.format(' URI', request_uri))
#
# matched filters
#
if len(obj.matched_filters) > 0:
print('\n{0!s:-^40}'.format(' API Matched Filters '))
for api_filter in obj.matched_filters:
print('{0!s:<20}{1!s:<50}'.format(' Filter', api_filter))
#
# resource attributes
#
obj.load_attributes()
if len(obj.attributes) > 0:
print('\n{0!s:-^40}'.format(' Attributes '))
for attr_obj in obj.attributes:
print('{0!s:<20}{1!s:<50}'.format(' Type', attr_obj.type))
print('{0!s:<20}{1!s:<50}'.format(' Value', attr_obj.value))
print('{0!s:<20}{1!s:<50}'.format(' Date Added', attr_obj.date_added))
print('{0!s:<20}{1!s:<50}'.format(' Last Modified', attr_obj.last_modified))
print('{0!s:<20}{1!s:<50}'.format(' Displayed', attr_obj.displayed))
#
# resource security label
#
obj.load_security_label()
if obj.security_label is not None:
print('\n{0!s:-^40}'.format(' Security Label '))
print('{0!s:<20}{1!s:<50}'.format(' Name', obj.security_label.name))
print('{0!s:<20}{1!s:<50}'.format(' Description', obj.security_label.description))
print('{0!s:<20}{1!s:<50}'.format(' Date Added', obj.security_label.date_added))
#
# resource tags
#
obj.load_tags()
if len(obj.tags) > 0:
print('\n{0!s:-^40}'.format(' Tags '))
for tag_obj in obj.tags:
print('{0!s:<20}{1!s:<50}'.format(' Name', tag_obj.name))
print('{0!s:<20}{1!s:<50}'.format(' Web Link', tag_obj.weblink))
#
# resource associations (groups)
#
g_header = True
for g_associations in obj.group_associations:
if g_header:
print('\n{0!s:-^40}'.format(' Group Associations '))
g_header = False
print('{0!s:<20}{1!s:<50}'.format(' ID', g_associations.id))
print('{0!s:<20}{1!s:<50}'.format(' Name', g_associations.name))
if hasattr(g_associations, 'type'):
print('{0!s:<20}{1!s:<50}'.format(' Type', g_associations.type))
print('{0!s:<20}{1!s:<50}'.format(' Owner Name', g_associations.owner_name))
print('{0!s:<20}{1!s:<50}'.format(' Date Added', g_associations.date_added))
print('{0!s:<20}{1!s:<50}'.format(' Web Link', g_associations.weblink))
#
# resource associations (indicators)
#
i_header = True
for i_associations in obj.indicator_associations:
if i_header:
print('\n{0!s:-^40}'.format(' Indicator Associations '))
i_header = False
print('{0!s:<20}{1!s:<50}'.format(' ID', i_associations.id))
print('{0!s:<20}{1!s:<50}'.format(' Indicator', i_associations.indicator))
print('{0!s:<20}{1!s:<50}'.format(' Type', i_associations.type))
print('{0!s:<20}{1!s:<50}'.format(' Description', i_associations.description))
print('{0!s:<20}{1!s:<50}'.format(' Owner', i_associations.owner_name))
print('{0!s:<20}{1!s:<50}'.format(' Rating', i_associations.rating))
print('{0!s:<20}{1!s:<50}'.format(' Confidence', i_associations.confidence))
print('{0!s:<20}{1!s:<50}'.format(' Date Added', i_associations.date_added))
print('{0!s:<20}{1!s:<50}'.format(' Last Modified', i_associations.last_modified))
print('{0!s:<20}{1!s:<50}'.format(' Web Link', i_associations.weblink))
#
# resource associations (victims)
#
v_header = True
for v_associations in obj.victim_associations:
if v_header:
print('\n{0!s:-^40}'.format(' Victim Associations '))
v_header = False
print('{0!s:<20}{1!s:<50}'.format(' ID', v_associations.id))
print('{0!s:<20}{1!s:<50}'.format(' Name', v_associations.name))
print('{0!s:<20}{1!s:<50}'.format(' Description', v_associations.description))
print('{0!s:<20}{1!s:<50}'.format(' Owner', v_associations.owner_name))
print('{0!s:<20}{1!s:<50}'.format(' Nationality', v_associations.nationality))
print('{0!s:<20}{1!s:<50}'.format(' Org', v_associations.org))
print('{0!s:<20}{1!s:<50}'.format(' Sub Org', v_associations.suborg))
print('{0!s:<20}{1!s:<50}'.format(' Work Location', v_associations.work_location))
print('{0!s:<20}{1!s:<50}\n'.format(' Web Link', v_associations.weblink))
#
# alternate output modes
#
print('\n{0!s:-^40}'.format(' CSV Format '))
print('{0!s}'.format(obj.csv_header))
print('{0!s}\n'.format(obj.csv))
print('\n{0!s:-^40}'.format(' JSON Format '))
print('{0!s}\n'.format(obj.json))
print('\n{0!s:-^40}'.format(' Key/Value Format '))
print('{0!s}\n'.format(obj.keyval))
#
# print report
#
print(tc.report.stats)
#
# displayed failed api request
#
for fail in tc.report.failures:
print(fail)
def main():
""" """
# set threat connect log (tcl) level
tc.set_tcl_file('log/tc.log', 'debug')
tc.set_tcl_console_level('critical')
if enable_example1:
""" This is a basic example that pull all signatures for the default org. """
# optionally set max results
tc.set_api_result_limit(500)
# signatures object
signatures = tc.signatures()
# retrieve resource
try:
signatures.retrieve()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# show indicator data
show_data(signatures)
if enable_example2:
""" This example adds a filter for a particular owner (owners is a list of owners). """
# optionally set max results
tc.set_api_result_limit(500)
# signatures object
signatures = tc.signatures()
# get filter
try:
filter1 = signatures.add_filter()
filter1.add_owner(owners)
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# retrieve resource
try:
signatures.retrieve()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# show indicator data
show_data(signatures)
if enable_example3:
""" This example adds a filter to pull an signatures by id. """
# optionally set max results
tc.set_api_result_limit(500)
# signatures object
signatures = tc.signatures()
# get filter
try:
filter1 = signatures.add_filter()
filter1.add_owner(owners)
filter1.add_id(43)
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# retrieve resource
try:
signatures.retrieve()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# show indicator data
show_data(signatures)
if enable_example4:
""" This example adds a filter with multiple sub filters. This request
will return any signatures that matches any filters. """
# optionally set max results
tc.set_api_result_limit(500)
# signatures object
signatures = tc.signatures()
# get filter
try:
filter1 = signatures.add_filter()
filter1.add_owner(owners)
filter1.add_adversary_id(5)
filter1.add_document_id(19)
filter1.add_email_id(17)
filter1.add_incident_id(34)
filter1.add_incident_id(708996)
filter1.add_indicator('10.20.30.40')
filter1.add_security_label('TLP Green')
filter1.add_tag('EXAMPLE')
filter1.add_threat_id(38)
filter1.add_victim_id(1)
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# retrieve resource
try:
signatures.retrieve()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# show indicator data
show_data(signatures)
if enable_example5:
""" This example adds multiple filters to limit the result set. This request
will return only signatures that match all filters. """
# optionally set max results
tc.set_api_result_limit(500)
# signatures object
signatures = tc.signatures()
# get filter
try:
filter1 = signatures.add_filter()
filter1.add_owner(owners)
# filter1.add_tag('EXAMPLE')
filter1.add_pf_date_added('2015-04-02T00:31:43Z', FilterOperator.GE)
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
try:
filter2 = signatures.add_filter()
filter2.add_filter_operator(FilterSetOperator.AND)
filter2.add_owner(owners)
# filter2.add_security_label('TLP Green')
filter2.add_pf_file_type('YARA', FilterOperator.EQ)
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# retrieve resource
try:
signatures.retrieve()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# show indicator data
show_data(signatures)
if __name__ == "__main__":
main()
| {
"content_hash": "48247142920b7ff93055a826a8a70708",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 95,
"avg_line_length": 34.8390092879257,
"alnum_prop": 0.5335466097929441,
"repo_name": "percipient/threatconnect-python",
"id": "167a7b5c0026c0ec9cc61e77b14b1979af71b650",
"size": "11278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/retrieve/signatures_retrieve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "381902"
}
],
"symlink_target": ""
} |
"""
WSGI config for OpenSource project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "OpenSource.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "87b272ddda4e9f3dc9ca3f11d2a112bc",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.214285714285715,
"alnum_prop": 0.7772151898734178,
"repo_name": "CubicDolphin/LicenseTracker",
"id": "fa303fa812a76f09904e04b7afddf185475a0dc0",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/OpenSource/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60401"
}
],
"symlink_target": ""
} |
from __future__ import division
from nltk.tokenize import RegexpTokenizer
import curses
from curses.ascii import isdigit
import nltk
from nltk.corpus import cmudict
import math
d = cmudict.dict()
def countSyllables(word):
return [len(list(y for y in x if isdigit(y[-1]))) for x in d[word.lower()]]
def FleschReadingEaseIndex(text):
words = nltk.wordpunct_tokenize(text)
sentences = nltk.sent_tokenize(text)
sentence_count = len(sentences)
word_count = 0
syllable_count = 0
for w in words :
try :
syllable_count = syllable_count + max(countSyllables(w))
word_count += 1
except :
pass
index = 206.835 - 1.015 * (word_count/sentence_count) - 84.6 * (syllable_count/word_count)
status = ""
if index >= 90 :
status = "very easy"
elif index >= 80 :
status = "easy"
elif index >= 70 :
status = "fairly easy"
elif index >= 60 :
status = "standard"
elif index >= 50 :
status = "fairly difficult"
elif index >= 30 :
status = "difficult"
else :
status = "very confusing"
print "Flesch Reading Ease index : ", index , " Status : ", status
def GunningFogIndex(text):
words = nltk.wordpunct_tokenize(text)
sentences = nltk.sent_tokenize(text)
sentence_count = len(sentences)
word_count = 0
complex_word_count = 0
for w in words :
try :
if max(countSyllables(w)) >= 3:
complex_word_count += 1
word_count += 1
except :
pass
index = 0.4 * ((word_count/sentence_count) + 100 * (complex_word_count/word_count))
print "Gunning Fog index : ", index
def ColemanLiauIndex(text):
tokenizer = RegexpTokenizer(r'\w+')
words = tokenizer.tokenize(text)
sentences = nltk.sent_tokenize(text)
sentence_count = len(sentences)
word_count = -1
character_count = 0
for w in words :
character_count += len(w)
word_count += 1
index = (5.88 * (character_count/word_count)) - (29.5 * (sentence_count/word_count)) - 15.8
print "Coleman Liau index : ", index
def AutomatedReadabilityIndex(text):
tokenizer = RegexpTokenizer(r'\w+')
words = tokenizer.tokenize(text)
sentences = nltk.sent_tokenize(text)
sentence_count = len(sentences)
word_count = 0
character_count = 0
for w in words :
character_count += len(w)
word_count += 1
index = (4.71 * (character_count/word_count)) + (0.5 * (word_count/sentence_count)) - 21.43
print "Automated Readability index : ", index
def SmogIndex(text):
words = nltk.wordpunct_tokenize(text)
sentences = nltk.sent_tokenize(text)
sentence_count = len(sentences)
polysyllable_count = 0
for w in words :
try :
if max(countSyllables(w)) >= 3:
polysyllable_count += 1
except :
pass
index = math.sqrt( polysyllable_count * ( 30/sentence_count)) + 3.1291
print "Smog index : ", index
def PowerSumnerKearl(text):
words = nltk.wordpunct_tokenize(text)
sentences = nltk.sent_tokenize(text)
sentence_count = len(sentences)
syllable_count = 0
for w in words :
try :
syllable_count += max(countSyllables(w))
except :
pass
index = 0.0778 * (word_count/sentence_count) + 100 * 0.0455 * (syllable_count/word_count) - 2.2029
print "PowerSumnerKearl : ", index
if __name__ == "__main__":
paragraph = raw_input("Enter text here : ")
FleschReadingEaseIndex(paragraph)
GunningFogIndex(paragraph)
ColemanLiauIndex(paragraph)
AutomatedReadabilityIndex(paragraph)
SmogIndex(paragraph)
| {
"content_hash": "864a79efbdbadacd8b2aed7a5dfef59a",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 102,
"avg_line_length": 27.57718120805369,
"alnum_prop": 0.5560963738135799,
"repo_name": "sujithvm/nlp-modules",
"id": "a115d21afc16fdec2e339350a50c810136b69359",
"size": "4109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readability index/readability_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9055"
}
],
"symlink_target": ""
} |
"""A language for manipulating forms using labels."""
import ufl
import functools
import operator
from firedrake import Constant
# ---------------------------------------------------------------------------- #
# Core routines for filtering terms
# ---------------------------------------------------------------------------- #
identity = lambda t: t
drop = lambda t: None
all_terms = lambda t: True
class Term(object):
"""A Term object contains a form and its labels."""
__slots__ = ["form", "labels"]
def __init__(self, form, label_dict=None):
"""
Args:
form (:class:`ufl.Form`): the form for this terms.
label_dict (dict, optional): dictionary of key-value pairs
corresponding to current form labels. Defaults to None.
"""
self.form = form
self.labels = label_dict or {}
def get(self, label, default=None):
"""
Returns the value of a label.
Args:
label (:class:`Label`): the label to return the value of.
Returns:
The value of a label.
"""
return self.labels.get(label.label)
def has_label(self, *labels, return_tuple=False):
"""
Whether the term has the specified labels attached to it.
Args:
*labels (:class:`Label`): a label or series of labels. A tuple is
automatically returned if multiple labels are provided as
arguments.
return_tuple (bool, optional): if True, forces a tuple to be
returned even if only one label is provided as an argument.
Defaults to False.
Returns:
bool or tuple: Booleans corresponding to whether the term has the
specified labels.
"""
if len(labels) == 1 and not return_tuple:
return labels[0].label in self.labels
else:
return tuple(self.has_label(l) for l in labels)
def __add__(self, other):
"""
Adds a term or labelled form to this term.
Args:
other (:class:`Term` or :class:`LabelledForm`): the term or labelled
form to add to this term.
Returns:
:class:`LabelledForm`: a labelled form containing the terms.
"""
if other is None:
return self
elif isinstance(other, Term):
return LabelledForm(self, other)
elif isinstance(other, LabelledForm):
return LabelledForm(self, *other.terms)
else:
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""
Subtracts a term or labelled form from this term.
Args:
other (:class:`Term` or :class:`LabelledForm`): the term or labelled
form to subtract from this term.
Returns:
:class:`LabelledForm`: a labelled form containing the terms.
"""
other = other * Constant(-1.0)
return self + other
def __mul__(self, other):
"""
Multiplies this term by another quantity.
Args:
other (float, :class:`Constant` or :class:`ufl.algebra.Product`):
the quantity to multiply this term by. If it is a float or int
then it is converted to a :class:`Constant` before the
multiplication.
Returns:
:class:`Term`: the product of the term with the quantity.
"""
if type(other) in (float, int):
other = Constant(other)
elif type(other) not in [Constant, ufl.algebra.Product]:
return NotImplemented
return Term(other*self.form, self.labels)
__rmul__ = __mul__
def __truediv__(self, other):
"""
Divides this term by another quantity.
Args:
other (float, :class:`Constant` or :class:`ufl.algebra.Product`):
the quantity to divide this term by. If it is a float or int
then it is converted to a :class:`Constant` before the
division.
Returns:
:class:`Term`: the quotient of the term divided by the quantity.
"""
if type(other) in (float, int, Constant, ufl.algebra.Product):
other = Constant(1.0 / other)
return self * other
else:
return NotImplemented
NullTerm = Term(None)
class LabelledForm(object):
"""
A form, broken down into terms that pair individual forms with labels.
The `LabelledForm` object holds a list of terms, which pair :class:`Form`
objects with :class:`Label`s. The `label_map` routine allows the terms to be
manipulated or selected based on particular filters.
"""
__slots__ = ["terms"]
def __init__(self, *terms):
"""
Args:
*terms (:class:`Term`): terms to combine to make the `LabelledForm`.
Raises:
TypeError: _description_
"""
if len(terms) == 1 and isinstance(terms[0], LabelledForm):
self.terms = terms[0].terms
else:
if any([type(term) is not Term for term in list(terms)]):
raise TypeError('Can only pass terms or a LabelledForm to LabelledForm')
self.terms = list(terms)
def __add__(self, other):
"""
Adds a form, term or labelled form to this labelled form.
Args:
other (:class:`ufl.Form`, :class:`Term` or :class:`LabelledForm`):
the form, term or labelled form to add to this labelled form.
Returns:
:class:`LabelledForm`: a labelled form containing the terms.
"""
if isinstance(other, ufl.Form):
return LabelledForm(*self, Term(other))
elif type(other) is Term:
return LabelledForm(*self, other)
elif type(other) is LabelledForm:
return LabelledForm(*self, *other)
elif other is None:
return self
else:
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""
Subtracts a form, term or labelled form from this labelled form.
Args:
other (:class:`ufl.Form`, :class:`Term` or :class:`LabelledForm`):
the form, term or labelled form to subtract from this labelled
form.
Returns:
:class:`LabelledForm`: a labelled form containing the terms.
"""
if type(other) is Term:
return LabelledForm(*self, Constant(-1.)*other)
elif type(other) is LabelledForm:
return LabelledForm(*self, *[Constant(-1.)*t for t in other])
elif type(other) is ufl.algebra.Product:
return LabelledForm(*self, Term(Constant(-1.)*other))
elif other is None:
return self
else:
return NotImplemented
def __mul__(self, other):
"""
Multiplies this labelled form by another quantity.
Args:
other (float, :class:`Constant` or :class:`ufl.algebra.Product`):
the quantity to multiply this labelled form by. If it is a float
or int then it is converted to a :class:`Constant` before the
multiplication. All terms in the form are multiplied.
Returns:
:class:`LabelledForm`: the product of all terms with the quantity.
"""
if type(other) in (float, int):
other = Constant(other)
# UFL can cancel constants to a Zero type which needs treating separately
elif type(other) is ufl.constantvalue.Zero:
other = Constant(0.0)
elif type(other) not in [Constant, ufl.algebra.Product]:
return NotImplemented
return self.label_map(all_terms, lambda t: Term(other*t.form, t.labels))
def __truediv__(self, other):
"""
Divides this labelled form by another quantity.
Args:
other (float, :class:`Constant` or :class:`ufl.algebra.Product`):
the quantity to divide this labelled form by. If it is a float
or int then it is converted to a :class:`Constant` before the
division. All terms in the form are divided.
Returns:
:class:`LabelledForm`: the quotient of all terms with the quantity.
"""
if type(other) in (float, int, Constant, ufl.algebra.Product):
other = Constant(1.0 / other)
return self * other
else:
return NotImplemented
__rmul__ = __mul__
def __iter__(self):
"""Returns an iterable of the terms in the labelled form."""
return iter(self.terms)
def __len__(self):
"""Returns the number of terms in the labelled form."""
return len(self.terms)
def label_map(self, term_filter, map_if_true=identity,
map_if_false=identity):
"""
Maps selected terms in the labelled form, returning a new labelled form.
Args:
term_filter (func): a function to filter the labelled form's terms.
map_if_true (func, optional): how to map the terms for which the
term_filter returns True. Defaults to identity.
map_if_false (func, optional): how to map the terms for which the
term_filter returns False. Defaults to identity.
Returns:
:class:`LabelledForm`: a new labelled form with the terms mapped.
"""
new_labelled_form = LabelledForm(
functools.reduce(operator.add,
filter(lambda t: t is not None,
(map_if_true(t) if term_filter(t) else
map_if_false(t) for t in self.terms)),
# TODO: Not clear what the initialiser should be!
# No initialiser means label_map can't work if everything is false
# None is a problem as cannot be added to Term
# NullTerm works but will need dropping ...
NullTerm))
# Drop the NullTerm
new_labelled_form.terms = list(filter(lambda t: t is not NullTerm,
new_labelled_form.terms))
return new_labelled_form
@property
def form(self):
"""
Provides the whole form from the labelled form.
Raises:
TypeError: if the labelled form has no terms.
Returns:
:class:`ufl.Form`: the whole form corresponding to all the terms.
"""
# Throw an error if there is no form
if len(self.terms) == 0:
raise TypeError('The labelled form cannot return a form as it has no terms')
else:
return functools.reduce(operator.add, (t.form for t in self.terms))
class Label(object):
"""Object for tagging forms, allowing them to be manipulated."""
__slots__ = ["label", "default_value", "value", "validator"]
def __init__(self, label, *, value=True, validator=None):
"""
Args:
label (str): the name of the label.
value (..., optional): the value for the label to take. Can be any
type (subject to the validator). Defaults to True.
validator (func, optional): function to check the validity of any
value later passed to the label. Defaults to None.
"""
self.label = label
self.default_value = value
self.validator = validator
def __call__(self, target, value=None):
"""
Applies the label to a form or term.
Args:
target (:class:`ufl.Form`, :class:`Term` or :class:`LabelledForm`):
the form, term or labelled form to be labelled.
value (..., optional): the value to attach to this label. Defaults
to None.
Raises:
ValueError: if the `target` is not a :class:`ufl.Form`,
:class:`Term` or :class:`LabelledForm`.
Returns:
:class:`Term` or :class:`LabelledForm`: a :class:`Term` is returned
if the target is a :class:`Term`, otherwise a
:class:`LabelledForm` is returned.
"""
# if value is provided, check that we have a validator function
# and validate the value, otherwise use default value
if value is not None:
assert self.validator
assert self.validator(value)
self.value = value
else:
self.value = self.default_value
if isinstance(target, LabelledForm):
return LabelledForm(*(self(t, value) for t in target.terms))
elif isinstance(target, ufl.Form):
return LabelledForm(Term(target, {self.label: self.value}))
elif isinstance(target, Term):
new_labels = target.labels.copy()
new_labels.update({self.label: self.value})
return Term(target.form, new_labels)
else:
raise ValueError("Unable to label %s" % target)
def remove(self, target):
"""
Removes a label from a term or labelled form.
This removes any :class:`Label` with this `label` from
`target`. If called on an :class:`LabelledForm`, it acts termwise.
Args:
target (:class:`Term` or :class:`LabelledForm`): term or labelled
form to have this label removed from.
Raises:
ValueError: if the `target` is not a :class:`Term` or a
:class:`LabelledForm`.
"""
if isinstance(target, LabelledForm):
return LabelledForm(*(self.remove(t) for t in target.terms))
elif isinstance(target, Term):
try:
d = target.labels.copy()
d.pop(self.label)
return Term(target.form, d)
except KeyError:
return target
else:
raise ValueError("Unable to unlabel %s" % target)
def update_value(self, target, new):
"""
Updates the label of a term or labelled form.
This updates the value of any :class:`Label` with this `label` from
`target`. If called on an :class:`LabelledForm`, it acts termwise.
Args:
target (:class:`Term` or :class:`LabelledForm`): term or labelled
form to have this label updated.
new (...): the new value for this label to take.
Raises:
ValueError: if the `target` is not a :class:`Term` or a
:class:`LabelledForm`.
"""
if isinstance(target, LabelledForm):
return LabelledForm(*(self.update_value(t, new) for t in target.terms))
elif isinstance(target, Term):
try:
d = target.labels.copy()
d[self.label] = new
return Term(target.form, d)
except KeyError:
return target
else:
raise ValueError("Unable to relabel %s" % target)
| {
"content_hash": "d02247c80a0b07bf53dde36d9b0e02a4",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 95,
"avg_line_length": 35.38139534883721,
"alnum_prop": 0.5542263704482713,
"repo_name": "firedrakeproject/gusto",
"id": "1117171804a14b3649dc63b1c3bd5376ad8f8184",
"size": "15214",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gusto/fml/form_manipulation_labelling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "765"
},
{
"name": "Python",
"bytes": "567084"
}
],
"symlink_target": ""
} |
""" Testing ``isestimable`` in regression module
"""
from __future__ import absolute_import
import numpy as np
from ..regression import isestimable
from numpy.testing import (assert_almost_equal,
assert_array_equal)
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
| {
"content_hash": "d7879f63528116da67f6815091185031",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 35.642857142857146,
"alnum_prop": 0.6205744822979292,
"repo_name": "alexis-roche/nipy",
"id": "815a530ab0707e5584b6f9b4e9352db194d40328",
"size": "1497",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nipy/algorithms/statistics/models/tests/test_estimable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1592552"
},
{
"name": "C++",
"bytes": "6037"
},
{
"name": "Makefile",
"bytes": "3630"
},
{
"name": "Matlab",
"bytes": "5508"
},
{
"name": "Python",
"bytes": "2891734"
}
],
"symlink_target": ""
} |
from .functions import call, popen, check_output, log
LPSTAT = '/usr/bin/lpstat'
LPADMIN = '/usr/sbin/lpadmin'
def reset():
"""Reset this printing system."""
pass
def delete_printer(printer):
log('Deleting printer %s' % printer)
return call(LPADMIN, '-x', printer)
def delete_printers():
"""Delete all print queues on the system."""
for p in check_output(LPSTAT, '-p').split("\n"):
printer = p.split(' ')[1]
delete_printer(printer)
def add_printer(printer, options={}, delete=True):
"""Add a printer queue.
A printer is a tuple (name, PPD path, LPD address)
"""
if delete:
delete_printer(printer[1])
name = printer[0]
ppd = '/Library/Printers/PPDs/Contents/Resources/%s' % printer[1]
# Install the printer
cmd = ['/usr/sbin/lpadmin',
'-p', name.replace(' ', '-'),
'-L', name[0:2],
'-D', name,
'-v', 'lpd://%s' % printer[2],
'-P', ppd,
'-E',
'-o', 'printer-is-shared=false',
'-o', 'printer-error-policy=abort-job']
for option in options.keys():
cmd.append('-o')
cmd.append(str(option) + '=' + str(options[option]))
return popen(cmd)
| {
"content_hash": "c927185cfaf1528f5887d4780ad43783",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 69,
"avg_line_length": 24.76,
"alnum_prop": 0.5533117932148627,
"repo_name": "filipp/MacHammer",
"id": "55fd31dd1b4cdc983a67352a8a5925b38ffce267",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "machammer/printers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "56478"
},
{
"name": "Python",
"bytes": "18703"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-recommendations-ai documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-recommendations-ai"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-recommendations-ai",
"github_user": "googleapis",
"github_repo": "python-recommendations-ai",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-recommendations-ai-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-recommendations-ai.tex",
"google-cloud-recommendations-ai Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-recommendations-ai",
"google-cloud-recommendations-ai Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-recommendations-ai",
"google-cloud-recommendations-ai Documentation",
author,
"google-cloud-recommendations-ai",
"google-cloud-recommendations-ai Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": (
"https://googleapis.dev/python/google-api-core/latest/",
None,
),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| {
"content_hash": "6bde73db2e14d1fc9cc957e2a049c9a2",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 87,
"avg_line_length": 32.6640625,
"alnum_prop": 0.6935342422068086,
"repo_name": "googleapis/python-recommendations-ai",
"id": "77cfe26e30740afef9129db0ca5a678b85a7f412",
"size": "12543",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1316965"
},
{
"name": "Shell",
"bytes": "30696"
}
],
"symlink_target": ""
} |
'''
Module to handle generating test files.
'''
from __future__ import absolute_import, division, print_function
import shutil
import sys
from os.path import dirname, join, isdir, exists
def create_files(dir_path, m):
"""
Create the test files for pkg in the directory given. The resulting
test files are configuration (i.e. platform, architecture, Python and
numpy version, CE/Pro) independent.
Return False, if the package has no tests (for any configuration), and
True if it has.
"""
has_files = False
for fn in m.get_value('test/files', []):
has_files = True
path = join(m.path, fn)
if isdir(path):
shutil.copytree(path, join(dir_path, fn))
else:
shutil.copy(path, dir_path)
return has_files
def create_shell_files(dir_path, m):
has_tests = False
if sys.platform == 'win32':
name = 'run_test.bat'
else:
name = 'run_test.sh'
if exists(join(m.path, name)):
shutil.copy(join(m.path, name), dir_path)
has_tests = True
with open(join(dir_path, name), 'a') as f:
f.write('\n\n')
for cmd in m.get_value('test/commands', []):
f.write(cmd)
f.write('\n')
has_tests = True
return has_tests
def create_py_files(dir_path, m):
has_tests = False
with open(join(dir_path, 'run_test.py'), 'w') as fo:
fo.write("# tests for %s (this is a generated file)\n" % m.dist())
with open(join(dirname(__file__), 'header_test.py')) as fi:
fo.write(fi.read() + '\n')
fo.write("print('===== testing package: %s =====')\n" % m.dist())
for name in m.get_value('test/imports', []):
fo.write('print("import: %r")\n' % name)
fo.write('import %s\n' % name)
fo.write('\n')
has_tests = True
try:
with open(join(m.path, 'run_test.py')) as fi:
fo.write("# --- run_test.py (begin) ---\n")
fo.write(fi.read())
fo.write("# --- run_test.py (end) ---\n")
has_tests = True
except IOError:
fo.write("# no run_test.py exists for this package\n")
fo.write("\nprint('===== %s OK =====')\n" % m.dist())
return has_tests
def create_pl_files(dir_path, m):
has_tests = False
with open(join(dir_path, 'run_test.pl'), 'w') as fo:
print(r'# tests for %s (this is a generated file)' % m.dist(), file=fo)
print(r'print("===== testing package: %s =====\n");' % m.dist(),
file=fo)
print(r'my $expected_version = "%s";' % m.version().rstrip('0'),
file=fo)
for name in m.get_value('test/imports'):
print(r'print("import: %s\n");' % name, file=fo)
print('use %s;\n' % name, file=fo)
# Don't try to print version for complex imports
if ' ' not in name:
print(("if (defined {0}->VERSION) {{\n" +
"\tmy $given_version = {0}->VERSION;\n" +
"\t$given_version =~ s/0+$//;\n" +
"\tdie('Expected version ' . $expected_version . ' but" +
" found ' . $given_version) unless ($expected_version " +
"eq $given_version);\n" +
"\tprint('\tusing version ' . {0}->VERSION . '\n');\n" +
"\n}}").format(name), file=fo)
has_tests = True
try:
with open(join(m.path, 'run_test.pl')) as fi:
print("# --- run_test.pl (begin) ---", file=fo)
fo.write(fi.read())
print("# --- run_test.pl (end) ---", file=fo)
has_tests = True
except IOError:
fo.write("# no run_test.pl exists for this package\n")
print('\nprint("===== %s OK =====\\n");' % m.dist(), file=fo)
return has_tests
| {
"content_hash": "a7421fbca465f608a0dcbf9656877750",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 80,
"avg_line_length": 35.5045045045045,
"alnum_prop": 0.5041867546308043,
"repo_name": "tacaswell/conda-build",
"id": "f63cf222c2bd4ae387858ef943c249306cc97ddc",
"size": "3941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conda_build/create_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
def extractAutomtlWordpressCom(item):
'''
Parser for 'automtl.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "98767825836de4f753a4b44045ae4b58",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6311030741410488,
"repo_name": "fake-name/ReadableWebProxy",
"id": "6805c4e3e3ec8a812239bdb2b7f3854a66d8bf34",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractAutomtlWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = False
def forwards(self, orm):
# Changing field 'SentryApp.proxy_user'
db.alter_column(
'sentry_sentryapp',
'proxy_user_id',
self.gf('django.db.models.fields.related.OneToOneField')(
unique=True,
null=True,
on_delete=models.SET_NULL,
to=orm['sentry.User']))
# Changing field 'SentryApp.application'
db.alter_column(
'sentry_sentryapp',
'application_id',
self.gf('django.db.models.fields.related.OneToOneField')(
unique=True,
null=True,
on_delete=models.SET_NULL,
to=orm['sentry.ApiApplication']))
def backwards(self, orm):
pass
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'6fec732746ca4b90a6370668e43e4256ee670ed0d6dc4f8b911f3a9cfd57d0bb'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'75299420b5e44ee68bcbdb9f278597f4e224e0bc2750473fbff1b9a953ffaa44'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Stunning Skylark'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'9c52174bf4c64723a1b7a5a598d5d840'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 10, 12, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 11, 11, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'0157108f16ca45729df58193180fada6ca6b3e55e4f24a5293ea2c72cde6ef05'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'e5db5b8df3274e0688ca93e3af8a26daa16415133bda47c6b8d55fd3837e9f6b'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.assistantactivity': {
'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'"},
'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 10, 19, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deletedorganization': {
'Meta': {'object_name': 'DeletedOrganization'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'object_name': 'DeletedProject'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'object_name': 'DeletedTeam'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.discoversavedquery': {
'Meta': {'object_name': 'DiscoverSavedQuery'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.DiscoverSavedQueryProject']", 'symmetrical': 'False'}),
'query': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.discoversavedqueryproject': {
'Meta': {'unique_together': "(('project', 'discover_saved_query'),)", 'object_name': 'DiscoverSavedQueryProject'},
'discover_saved_query': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DiscoverSavedQuery']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventattachment': {
'Meta': {'unique_together': "(('project_id', 'event_id', 'file'),)", 'object_name': 'EventAttachment', 'index_together': "(('project_id', 'date_added'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.externalissue': {
'Meta': {'unique_together': "(('organization_id', 'integration_id', 'key'),)", 'object_name': 'ExternalIssue'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'metadata': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.fileblobowner': {
'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.Team']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.groupenvironment': {
'Meta': {'unique_together': "[('group_id', 'environment_id')]", 'object_name': 'GroupEnvironment', 'index_together': "[('environment_id', 'first_release_id')]"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'first_release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouphashtombstone': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHashTombstone'},
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'object_name': 'GroupShare'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'25973ef0f4a647b6b99226a8052112f3'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'), ('idp', 'user'))", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'external_id'),)", 'object_name': 'IdentityProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'})
},
'sentry.integrationexternalproject': {
'Meta': {'unique_together': "(('organization_integration_id', 'external_id'),)", 'object_name': 'IntegrationExternalProject'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization_integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'resolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'unresolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.latestrelease': {
'Meta': {'unique_together': "(('repository_id', 'environment_id'),)", 'object_name': 'LatestRelease'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'deploy_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectavatar': {
'Meta': {'object_name': 'ProjectAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Project']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdebugfile': {
'Meta': {'unique_together': "(('project', 'debug_id'),)", 'object_name': 'ProjectDebugFile', 'db_table': "'sentry_projectdsymfile'"},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'debug_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'uuid'"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectownership': {
'Meta': {'object_name': 'ProjectOwnership'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectredirect': {
'Meta': {'unique_together': "(('organization', 'redirect_slug'),)", 'object_name': 'ProjectRedirect'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'redirect_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'dsym_file'),)", 'object_name': 'ProjectSymCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.pullrequest': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'), ('organization_id', 'merge_commit_sha'))"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.pullrequestcommit': {
'Meta': {'unique_together': "(('pull_request', 'commit'),)", 'object_name': 'PullRequestCommit', 'db_table': "'sentry_pullrequest_commit'"},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'pull_request': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.PullRequest']"})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.relay': {
'Meta': {'object_name': 'Relay'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'relay_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseprojectenvironment': {
'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 11, 11, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'51564db1f18149d7a8c6c525f238a1c6'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.sentryapp': {
'Meta': {'object_name': 'SentryApp'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiApplication']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'owned_sentry_apps'", 'to': "orm['sentry.User']"}),
'proxy_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.User']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'31d3f1a0-d09f-4b72-a7f9-5141d288d3e0'", 'max_length': '64'}),
'webhook_url': ('django.db.models.fields.TextField', [], {})
},
'sentry.sentryappinstallation': {
'Meta': {'object_name': 'SentryAppInstallation'},
'api_grant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiGrant']"}),
'authorization': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiAuthorization']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_app_installations'", 'to': "orm['sentry.Organization']"}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'installations'", 'to': "orm['sentry.SentryApp']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'cdb79ccc-ccbb-4ad3-92cc-fe2c17dd6aeb'", 'max_length': '64'})
},
'sentry.servicehook': {
'Meta': {'object_name': 'ServiceHook'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'d20dfeb2c668444ca6ff40a2add65b8a00bccdd65e4a4158b2f8934b5a6e23eb'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teamavatar': {
'Meta': {'object_name': 'TeamAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Team']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sentry_app': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'nmpb4j0iu3np1J7rcVwPd3jWeiNBWMhk'", 'max_length': '32'})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userpermission': {
'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
| {
"content_hash": "797205df46c135fd283d79392a3f52da",
"timestamp": "",
"source": "github",
"line_count": 1251,
"max_line_length": 236,
"avg_line_length": 91.9832134292566,
"alnum_prop": 0.5799549843140323,
"repo_name": "mvaled/sentry",
"id": "36a55a79a64eb4d1447d9b5e6cdeec8402b3ac49",
"size": "115095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/south_migrations/0438_auto__add_index_sentryapp_status__chg_field_sentryapp_proxy_user__chg_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
from oslo_log import log as logging
import webob.exc
from webob import util as woutil
import nova.conf
from nova.i18n import _, _LE
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code, title="", explanation=""):
self.code = code
# There is a strict rule about constructing status line for HTTP:
# '...Status-Line, consisting of the protocol version followed by a
# numeric status code and its associated textual phrase, with each
# element separated by SP characters'
# (http://www.faqs.org/rfcs/rfc2616.html)
# 'code' and 'title' can not be empty because they correspond
# to numeric status code and its associated text
if title:
self.title = title
else:
try:
self.title = woutil.status_reasons[self.code]
except KeyError:
msg = _LE("Improper or unknown HTTP status code used: %d")
LOG.error(msg, code)
self.title = woutil.status_generic_reasons[self.code // 100]
self.explanation = explanation
super(ConvertedException, self).__init__()
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# NOTE(melwitt): This is done in a separate method so it can be
# monkey-patched during testing to make it a hard failure.
self._log_exception()
message = self.msg_fmt
self.message = message
super(NovaException, self).__init__(message)
def _log_exception(self):
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in self.kwargs.items():
LOG.error("%s: %s" % (name, value)) # noqa
def format_message(self):
# NOTE(mrodden): use the first argument to the python Exception object
# which should be our full NovaException message, (see __init__)
return self.args[0]
class EncryptionFailure(NovaException):
msg_fmt = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
msg_fmt = _("Failed to decrypt text: %(reason)s")
class RevokeCertFailure(NovaException):
msg_fmt = _("Failed to revoke certificate for %(project_id)s")
class VirtualInterfaceCreateException(NovaException):
msg_fmt = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
msg_fmt = _("Creation of virtual interface with "
"unique mac address failed")
class VirtualInterfacePlugException(NovaException):
msg_fmt = _("Virtual interface plugin failed")
class VirtualInterfaceUnplugException(NovaException):
msg_fmt = _("Failed to unplug virtual interface: %(reason)s")
class GlanceConnectionFailed(NovaException):
msg_fmt = _("Connection to glance host %(server)s failed: "
"%(reason)s")
class CinderConnectionFailed(NovaException):
msg_fmt = _("Connection to cinder host failed: %(reason)s")
class UnsupportedCinderAPIVersion(NovaException):
msg_fmt = _('Nova does not support Cinder API version %(version)s')
class Forbidden(NovaException):
msg_fmt = _("Forbidden")
code = 403
class AdminRequired(Forbidden):
msg_fmt = _("User does not have admin privileges")
class PolicyNotAuthorized(Forbidden):
msg_fmt = _("Policy doesn't allow %(action)s to be performed.")
class VolumeLimitExceeded(Forbidden):
msg_fmt = _("Volume resource quota exceeded")
class ImageNotActive(NovaException):
# NOTE(jruzicka): IncorrectState is used for volumes only in EC2,
# but it still seems like the most appropriate option.
msg_fmt = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
msg_fmt = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
msg_fmt = _("Bad Request - Invalid Parameters")
code = 400
class InvalidBDM(Invalid):
msg_fmt = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class InvalidBDMImage(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get image %(id)s.")
class InvalidBDMBootSequence(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"Boot sequence for the instance "
"and image/block device mapping "
"combination is not valid.")
class InvalidBDMLocalsLimit(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"You specified more local devices than the "
"limit allows")
class InvalidBDMEphemeralSize(InvalidBDM):
msg_fmt = _("Ephemeral disks requested are larger than "
"the instance type allows. If no size is given "
"in one block device mapping, flavor ephemeral "
"size will be used.")
class InvalidBDMSwapSize(InvalidBDM):
msg_fmt = _("Swap drive requested is larger than instance type allows.")
class InvalidBDMFormat(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"%(details)s")
class InvalidBDMForLegacy(InvalidBDM):
msg_fmt = _("Block Device Mapping cannot "
"be converted to legacy format. ")
class InvalidBDMVolumeNotBootable(InvalidBDM):
msg_fmt = _("Block Device %(id)s is not bootable.")
class InvalidAttribute(Invalid):
msg_fmt = _("Attribute not supported: %(attr)s")
class ValidationError(Invalid):
msg_fmt = "%(detail)s"
class VolumeAttachFailed(Invalid):
msg_fmt = _("Volume %(volume_id)s could not be attached. "
"Reason: %(reason)s")
class VolumeUnattached(Invalid):
msg_fmt = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
msg_fmt = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts. And its status is %(volume_status)s.")
class VolumeEncryptionNotSupported(Invalid):
msg_fmt = _("Volume encryption is not supported for %(volume_type)s "
"volume %(volume_id)s")
class InvalidKeypair(Invalid):
msg_fmt = _("Keypair data is invalid: %(reason)s")
class InvalidRequest(Invalid):
msg_fmt = _("The request is invalid.")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received: %(reason)s")
class InvalidVolume(Invalid):
msg_fmt = _("Invalid volume: %(reason)s")
class InvalidVolumeAccessMode(Invalid):
msg_fmt = _("Invalid volume access mode: %(access_mode)s")
class InvalidMetadata(Invalid):
msg_fmt = _("Invalid metadata: %(reason)s")
class InvalidMetadataSize(Invalid):
msg_fmt = _("Invalid metadata size: %(reason)s")
class InvalidPortRange(Invalid):
msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
msg_fmt = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type %(content_type)s.")
class InvalidAPIVersionString(Invalid):
msg_fmt = _("API Version String %(version)s is of invalid format. Must "
"be of format MajorNum.MinorNum.")
class VersionNotFoundForAPIMethod(Invalid):
msg_fmt = _("API version %(version)s is not supported on this method.")
class InvalidGlobalAPIVersion(Invalid):
msg_fmt = _("Version %(req_ver)s is not supported by the API. Minimum "
"is %(min_ver)s and maximum is %(max_ver)s.")
class ApiVersionsIntersect(Invalid):
msg_fmt = _("Version of %(name)s %(min_ver)s %(max_ver)s intersects "
"with another versions.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
msg_fmt = "%(err)s"
class InvalidAggregateAction(Invalid):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidAggregateActionAdd(InvalidAggregateAction):
msg_fmt = _("Cannot add host to aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionDelete(InvalidAggregateAction):
msg_fmt = _("Cannot remove host from aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdate(InvalidAggregateAction):
msg_fmt = _("Cannot update aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdateMeta(InvalidAggregateAction):
msg_fmt = _("Cannot update metadata of aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidSortKey(Invalid):
msg_fmt = _("Sort key supplied was not valid.")
class InvalidStrTime(Invalid):
msg_fmt = _("Invalid datetime string: %(reason)s")
class InvalidNUMANodesNumber(Invalid):
msg_fmt = _("The property 'numa_nodes' cannot be '%(nodes)s'. "
"It must be a number greater than 0")
class InvalidName(Invalid):
msg_fmt = _("An invalid 'name' value was provided. "
"The name must be: %(reason)s")
class InstanceInvalidState(Invalid):
msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
msg_fmt = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
msg_fmt = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
msg_fmt = _("Failed to suspend instance: %(reason)s")
class InstanceResumeFailure(Invalid):
msg_fmt = _("Failed to resume instance: %(reason)s")
class InstancePowerOnFailure(Invalid):
msg_fmt = _("Failed to power on instance: %(reason)s")
class InstancePowerOffFailure(Invalid):
msg_fmt = _("Failed to power off instance: %(reason)s")
class InstanceRebootFailure(Invalid):
msg_fmt = _("Failed to reboot instance: %(reason)s")
class InstanceTerminationFailure(Invalid):
msg_fmt = _("Failed to terminate instance: %(reason)s")
class InstanceDeployFailure(Invalid):
msg_fmt = _("Failed to deploy instance: %(reason)s")
class MultiplePortsNotApplicable(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class InvalidFixedIpAndMaxCountRequest(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class ServiceUnavailable(Invalid):
msg_fmt = _("Service is unavailable at this time.")
class ServiceNotUnique(Invalid):
msg_fmt = _("More than one possible service found.")
class ComputeResourcesUnavailable(ServiceUnavailable):
msg_fmt = _("Insufficient compute resources: %(reason)s.")
class HypervisorUnavailable(NovaException):
msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s")
class ComputeServiceUnavailable(ServiceUnavailable):
msg_fmt = _("Compute service of %(host)s is unavailable at this time.")
class ComputeServiceInUse(NovaException):
msg_fmt = _("Compute service of %(host)s is still in use.")
class UnableToMigrateToSelf(Invalid):
msg_fmt = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
msg_fmt = _("The supplied hypervisor type of is invalid.")
class HypervisorTooOld(Invalid):
msg_fmt = _("This compute node's hypervisor is older than the minimum "
"supported version: %(version)s.")
class DestinationHypervisorTooOld(Invalid):
msg_fmt = _("The instance requires a newer hypervisor version than "
"has been provided.")
class ServiceTooOld(Invalid):
msg_fmt = _("This service is older (v%(thisver)i) than the minimum "
"(v%(minver)i) version of the rest of the deployment. "
"Unable to continue.")
class DestinationDiskExists(Invalid):
msg_fmt = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
msg_fmt = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
msg_fmt = _("Unacceptable CPU info: %(reason)s")
class InvalidIpAddressError(Invalid):
msg_fmt = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
msg_fmt = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
msg_fmt = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
msg_fmt = _("Disk format %(disk_format)s is not acceptable")
class InvalidDiskInfo(Invalid):
msg_fmt = _("Disk info file is invalid: %(reason)s")
class DiskInfoReadWriteFail(Invalid):
msg_fmt = _("Failed to read or write disk info file: %(reason)s")
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class ImageBadRequest(Invalid):
msg_fmt = _("Request of image %(image_id)s got BadRequest response: "
"%(response)s")
class InstanceUnacceptable(Invalid):
msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
msg_fmt = _("Invalid ID received %(id)s.")
class ConstraintNotMet(NovaException):
msg_fmt = _("Constraint not met.")
code = 412
class NotFound(NovaException):
msg_fmt = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
msg_fmt = _("No agent-build associated with id %(id)s.")
class AgentBuildExists(NovaException):
msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s "
"architecture %(architecture)s exists.")
class VolumeAttachmentNotFound(NotFound):
msg_fmt = _("Volume attachment %(attachment_id)s could not be found.")
class VolumeNotFound(NotFound):
msg_fmt = _("Volume %(volume_id)s could not be found.")
class UndefinedRootBDM(NovaException):
msg_fmt = _("Undefined Block Device Mapping root: BlockDeviceMappingList "
"contains Block Device Mappings from multiple instances.")
class BDMNotFound(NotFound):
msg_fmt = _("No Block Device Mapping with id %(id)s.")
class VolumeBDMNotFound(NotFound):
msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.")
class VolumeBDMIsMultiAttach(Invalid):
msg_fmt = _("Block Device Mapping %(volume_id)s is a multi-attach volume"
" and is not valid for this operation.")
class VolumeBDMPathNotFound(VolumeBDMNotFound):
msg_fmt = _("No volume Block Device Mapping at path: %(path)s")
class DeviceDetachFailed(NovaException):
msg_fmt = _("Device detach failed for %(device)s: %(reason)s")
class DeviceNotFound(NotFound):
msg_fmt = _("Device '%(device)s' not found.")
class SnapshotNotFound(NotFound):
msg_fmt = _("Snapshot %(snapshot_id)s could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")
class AutoDiskConfigDisabledByImage(Invalid):
msg_fmt = _("Requested image %(image)s "
"has automatic disk resize disabled.")
class ImageNotFound(NotFound):
msg_fmt = _("Image %(image_id)s could not be found.")
class PreserveEphemeralNotSupported(Invalid):
msg_fmt = _("The current driver does not support "
"preserving ephemeral partitions.")
class ProjectNotFound(NotFound):
msg_fmt = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
msg_fmt = _("Cannot find SR to read/write VDI.")
class InstanceMappingNotFound(NotFound):
msg_fmt = _("Instance %(uuid)s has no mapping to a cell.")
class NetworkDhcpReleaseFailed(NovaException):
msg_fmt = _("Failed to release IP %(address)s with MAC %(mac_address)s")
class NetworkInUse(NovaException):
msg_fmt = _("Network %(network_id)s is still in use.")
class NetworkSetHostFailed(NovaException):
msg_fmt = _("Network set host failed for network %(network_id)s.")
class NetworkNotCreated(Invalid):
msg_fmt = _("%(req)s is required to create a network.")
class LabelTooLong(Invalid):
msg_fmt = _("Maximum allowed length for 'label' is 255.")
class InvalidIntValue(Invalid):
msg_fmt = _("%(key)s must be an integer.")
class InvalidCidr(Invalid):
msg_fmt = _("%(cidr)s is not a valid IP network.")
class InvalidAddress(Invalid):
msg_fmt = _("%(address)s is not a valid IP address.")
class AddressOutOfRange(Invalid):
msg_fmt = _("%(address)s is not within %(cidr)s.")
class DuplicateVlan(NovaException):
msg_fmt = _("Detected existing vlan with id %(vlan)d")
code = 409
class CidrConflict(NovaException):
msg_fmt = _('Requested cidr (%(cidr)s) conflicts '
'with existing cidr (%(other)s)')
code = 409
class NetworkHasProject(NetworkInUse):
msg_fmt = _('Network must be disassociated from project '
'%(project_id)s before it can be deleted.')
class NetworkNotFound(NotFound):
msg_fmt = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
msg_fmt = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
msg_fmt = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
msg_fmt = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
msg_fmt = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
msg_fmt = _("No networks defined.")
class NoMoreNetworks(NovaException):
msg_fmt = _("No more available networks.")
class NetworkNotFoundForProject(NetworkNotFound):
msg_fmt = _("Either network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkAmbiguous(Invalid):
msg_fmt = _("More than one possible network found. Specify "
"network ID(s) to select which one(s) to connect to.")
class UnableToAutoAllocateNetwork(Invalid):
msg_fmt = _('Unable to automatically allocate a network for project '
'%(project_id)s')
class NetworkRequiresSubnet(Invalid):
msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot"
" instances on.")
class ExternalNetworkAttachForbidden(Forbidden):
msg_fmt = _("It is not allowed to create an interface on "
"external network %(network_uuid)s")
class NetworkMissingPhysicalNetwork(NovaException):
msg_fmt = _("Physical network is missing for network %(network_uuid)s")
class VifDetailsMissingVhostuserSockPath(Invalid):
msg_fmt = _("vhostuser_sock_path not present in vif_details"
" for vif %(vif_id)s")
class VifDetailsMissingMacvtapParameters(Invalid):
msg_fmt = _("Parameters %(missing_params)s not present in"
" vif_details for vif %(vif_id)s. Check your Neutron"
" configuration to validate that the macvtap parameters are"
" correct.")
class OvsConfigurationFailure(NovaException):
msg_fmt = _("OVS configuration failed with: %(inner_exception)s.")
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(Invalid):
msg_fmt = _("Port %(port_id)s is still in use.")
class PortRequiresFixedIP(Invalid):
msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.")
class PortNotUsable(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotUsableDNS(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s. "
"Value %(value)s assigned to dns_name attribute does not "
"match instance's hostname %(hostname)s")
class PortNotFree(Invalid):
msg_fmt = _("No free port available for instance %(instance)s.")
class PortBindingFailed(Invalid):
msg_fmt = _("Binding failed for port %(port_id)s, please check neutron "
"logs for more information.")
class PortUpdateFailed(Invalid):
msg_fmt = _("Port update failed for port %(port_id)s: %(reason)s")
class FixedIpExists(NovaException):
msg_fmt = _("Fixed IP %(address)s already exists.")
class FixedIpNotFound(NotFound):
msg_fmt = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
msg_fmt = _("Fixed IP not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s has zero fixed IPs.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
msg_fmt = _("Network host %(host)s has zero fixed IPs "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed IP '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
msg_fmt = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAssociateFailed(NovaException):
msg_fmt = _("Fixed IP associate failed for network: %(net)s.")
class FixedIpAlreadyInUse(NovaException):
msg_fmt = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
msg_fmt = _("More than one instance is associated with fixed IP address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
msg_fmt = _("Fixed IP address %(address)s is invalid.")
class FixedIpInvalidOnHost(Invalid):
msg_fmt = _("The fixed IP associated with port %(port_id)s is not "
"compatible with the host.")
class NoMoreFixedIps(NovaException):
msg_fmt = _("No fixed IP addresses available for network: %(net)s")
class NoFixedIpsDefined(NotFound):
msg_fmt = _("Zero fixed IPs could be found.")
class FloatingIpExists(NovaException):
msg_fmt = _("Floating IP %(address)s already exists.")
class FloatingIpNotFound(NotFound):
msg_fmt = _("Floating IP not found for ID %(id)s.")
class FloatingIpDNSExists(Invalid):
msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
msg_fmt = _("Floating IP not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
msg_fmt = _("Floating IP not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
msg_fmt = _("Multiple floating IPs are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
msg_fmt = _("Floating IP pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
msg_fmt = _("Zero floating IPs available.")
safe = True
class FloatingIpAssociated(NovaException):
msg_fmt = _("Floating IP %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
msg_fmt = _("Floating IP %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
msg_fmt = _("Zero floating IPs exist.")
class NoFloatingIpInterface(NotFound):
msg_fmt = _("Interface %(interface)s not found.")
class FloatingIpAllocateFailed(NovaException):
msg_fmt = _("Floating IP allocate failed.")
class FloatingIpAssociateFailed(NovaException):
msg_fmt = _("Floating IP %(address)s association has failed.")
class FloatingIpBadRequest(Invalid):
msg_fmt = _("The floating IP request failed with a BadRequest")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
msg_fmt = _("Cannot disassociate auto assigned floating IP")
class KeypairNotFound(NotFound):
msg_fmt = _("Keypair %(name)s not found for user %(user_id)s")
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class ServiceBinaryExists(NovaException):
msg_fmt = _("Service with host %(host)s binary %(binary)s exists.")
class ServiceTopicExists(NovaException):
msg_fmt = _("Service with host %(host)s topic %(topic)s exists.")
class HostNotFound(NotFound):
msg_fmt = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
msg_fmt = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
msg_fmt = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
msg_fmt = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
msg_fmt = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class InvalidQuotaMethodUsage(Invalid):
msg_fmt = _("Wrong quota method %(method)s used on resource %(res)s")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota could not be found")
class QuotaExists(NovaException):
msg_fmt = _("Quota exists for project %(project_id)s, "
"resource %(resource)s")
class QuotaResourceUnknown(QuotaNotFound):
msg_fmt = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
msg_fmt = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
class QuotaUsageRefreshNotAllowed(Invalid):
msg_fmt = _("Quota usage refresh of resource %(resource)s for project "
"%(project_id)s, user %(user_id)s, is not allowed. "
"The allowed resources are %(syncable)s.")
class ReservationNotFound(QuotaNotFound):
msg_fmt = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
msg_fmt = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
msg_fmt = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
msg_fmt = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
msg_fmt = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExists(Invalid):
msg_fmt = _("Security group %(security_group_name)s already exists "
"for project %(project_id)s.")
class SecurityGroupExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
msg_fmt = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
msg_fmt = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class NoUniqueMatch(NovaException):
msg_fmt = _("No Unique Match Found.")
code = 409
class NoActiveMigrationForInstance(NotFound):
msg_fmt = _("Active live migration for instance %(instance_id)s not found")
class MigrationNotFound(NotFound):
msg_fmt = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
msg_fmt = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class MigrationNotFoundForInstance(MigrationNotFound):
msg_fmt = _("Migration %(migration_id)s not found for instance "
"%(instance_id)s")
class InvalidMigrationState(Invalid):
msg_fmt = _("Migration %(migration_id)s state of instance "
"%(instance_uuid)s is %(state)s. Cannot %(method)s while the "
"migration is in this state.")
class ConsoleLogOutputException(NovaException):
msg_fmt = _("Console log output could not be retrieved for instance "
"%(instance_id)s. Reason: %(reason)s")
class ConsolePoolExists(NovaException):
msg_fmt = _("Console pool with host %(host)s, console_type "
"%(console_type)s and compute_host %(compute_host)s "
"already exists.")
class ConsolePoolNotFoundForHostType(NotFound):
msg_fmt = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
msg_fmt = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotAvailable(NotFound):
msg_fmt = _("Guest does not have a console available.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
msg_fmt = _("Invalid console type %(console_type)s")
class ConsoleTypeUnavailable(Invalid):
msg_fmt = _("Unavailable console type %(console_type)s.")
class ConsolePortRangeExhausted(NovaException):
msg_fmt = _("The console port range %(min_port)d-%(max_port)d is "
"exhausted.")
class FlavorNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s could not be found.")
class FlavorNotFoundByName(FlavorNotFound):
msg_fmt = _("Flavor with name %(flavor_name)s could not be found.")
class FlavorAccessNotFound(NotFound):
msg_fmt = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class FlavorExtraSpecUpdateCreateFailed(NovaException):
msg_fmt = _("Flavor %(id)s extra spec cannot be updated or created "
"after %(retries)d retries.")
class CellNotFound(NotFound):
msg_fmt = _("Cell %(cell_name)s doesn't exist.")
class CellExists(NovaException):
msg_fmt = _("Cell with name %(name)s already exists.")
class CellRoutingInconsistency(NovaException):
msg_fmt = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
msg_fmt = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
msg_fmt = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
msg_fmt = _("No cells available matching scheduling criteria.")
class CellsUpdateUnsupported(NovaException):
msg_fmt = _("Cannot update cells configuration file.")
class InstanceUnknownCell(NotFound):
msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.")
class FlavorExtraSpecsNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ComputeHostMetricNotFound(NotFound):
msg_fmt = _("Metric %(name)s could not be found on the compute "
"host node %(host)s.%(node)s.")
class FileNotFound(NotFound):
msg_fmt = _("File %(file_path)s could not be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
msg_fmt = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
msg_fmt = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
msg_fmt = _("Class %(class_name)s could not be found: %(exception)s")
class InstanceTagNotFound(NotFound):
msg_fmt = _("Instance %(instance_id)s has no tag '%(tag)s'")
class KeyPairExists(NovaException):
msg_fmt = _("Key pair '%(key_name)s' already exists.")
class InstanceExists(NovaException):
msg_fmt = _("Instance %(name)s already exists.")
class FlavorExists(NovaException):
msg_fmt = _("Flavor with name %(name)s already exists.")
class FlavorIdExists(NovaException):
msg_fmt = _("Flavor with ID %(flavor_id)s already exists.")
class FlavorAccessExists(NovaException):
msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
msg_fmt = _("%(path)s is not on local storage: %(reason)s")
class StorageError(NovaException):
msg_fmt = _("Storage error: %(reason)s")
class MigrationError(NovaException):
msg_fmt = _("Migration error: %(reason)s")
class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error: %(reason)s")
class MigrationPreCheckClientException(MigrationError):
msg_fmt = _("Client exception during Migration Pre check: %(reason)s")
class MigrationSchedulerRPCError(MigrationError):
msg_fmt = _("Migration select destinations error: %(reason)s")
class RPCPinnedToOldVersion(NovaException):
msg_fmt = _("RPC is pinned to old version")
class MalformedRequestBody(NovaException):
msg_fmt = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
msg_fmt = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
msg_fmt = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
msg_fmt = _("Resize error: %(reason)s")
class CannotResizeDisk(NovaException):
msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
class FlavorMemoryTooSmall(NovaException):
msg_fmt = _("Flavor's memory is too small for requested image.")
class FlavorDiskTooSmall(NovaException):
msg_fmt = _("The created instance's disk would be too small.")
class FlavorDiskSmallerThanImage(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is too small for requested image. Flavor disk "
"is %(flavor_size)i bytes, image is %(image_size)i bytes.")
class FlavorDiskSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is smaller than the minimum size specified in "
"image metadata. Flavor disk is %(flavor_size)i bytes, "
"minimum size is %(image_min_disk)i bytes.")
class VolumeSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Volume is smaller than the minimum size specified in image "
"metadata. Volume size is %(volume_size)i bytes, minimum "
"size is %(image_min_disk)i bytes.")
class InsufficientFreeMemory(NovaException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(NovaException):
msg_fmt = _("No valid host was found. %(reason)s")
class MaxRetriesExceeded(NoValidHost):
msg_fmt = _("Exceeded maximum number of retries. %(reason)s")
class QuotaError(NovaException):
msg_fmt = _("Quota exceeded: code=%(code)s")
# NOTE(cyeoh): 413 should only be used for the ec2 API
# The error status code for out of quota for the nova api should be
# 403 Forbidden.
code = 413
safe = True
class TooManyInstances(QuotaError):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)s of %(allowed)s %(overs)s")
class FloatingIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of floating IPs exceeded")
class FixedIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of fixed IPs exceeded")
class MetadataLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file path too long")
class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of security groups or rules exceeded")
class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
class AggregateError(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class InstancePasswordSetFailed(NovaException):
msg_fmt = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class InstanceNotFound(NotFound):
msg_fmt = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class MarkerNotFound(NotFound):
msg_fmt = _("Marker %(marker)s could not be found.")
class CouldNotFetchImage(NovaException):
msg_fmt = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
msg_fmt = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
msg_fmt = _("Instance %(instance_uuid)s is locked")
class ConfigDriveInvalidValue(Invalid):
msg_fmt = _("Invalid value for Config Drive option: %(option)s")
class ConfigDriveUnsupportedFormat(Invalid):
msg_fmt = _("Config drive format '%(format)s' is not supported.")
class ConfigDriveMountFailed(NovaException):
msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class ConfigDriveNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s requires config drive, but it "
"does not exist.")
class InterfaceAttachFailed(Invalid):
msg_fmt = _("Failed to attach network adapter device to "
"%(instance_uuid)s")
class InterfaceAttachFailedNoNetwork(InterfaceAttachFailed):
msg_fmt = _("No specific network was requested and none are available "
"for project '%(project_id)s'.")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from "
"%(instance_uuid)s")
class InstanceUserDataTooLarge(NovaException):
msg_fmt = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
msg_fmt = _("User data needs to be valid base 64.")
class InstanceUpdateConflict(NovaException):
msg_fmt = _("Conflict updating instance %(instance_uuid)s. "
"Expected: %(expected)s. Actual: %(actual)s")
class UnknownInstanceUpdateConflict(InstanceUpdateConflict):
msg_fmt = _("Conflict updating instance %(instance_uuid)s, but we were "
"unable to determine the cause")
class UnexpectedTaskStateError(InstanceUpdateConflict):
pass
class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError):
pass
class InstanceActionNotFound(NovaException):
msg_fmt = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
msg_fmt = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
msg_fmt = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not supported.')
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
msg_fmt = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
class BuildAbortException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
class RescheduledException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class ShadowTableExists(NovaException):
msg_fmt = _("Shadow table with name %(name)s already exists.")
class InstanceFaultRollback(NovaException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
class OrphanedObjectError(NovaException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class ObjectActionError(NovaException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class AgentError(NovaException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")
class InstanceGroupIdExists(NovaException):
msg_fmt = _("Instance group %(group_uuid)s already exists.")
class InstanceGroupMemberNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no member with "
"id %(instance_id)s.")
class InstanceGroupSaveException(NovaException):
msg_fmt = _("%(field)s should not be part of the updates.")
class ImageDownloadModuleError(NovaException):
msg_fmt = _("There was an error with the download module %(module)s. "
"%(reason)s")
class ImageDownloadModuleMetaDataError(ImageDownloadModuleError):
msg_fmt = _("The metadata for this location will not work with this "
"module %(module)s. %(reason)s.")
class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError):
msg_fmt = _("The method %(method_name)s is not implemented.")
class ImageDownloadModuleConfigurationError(ImageDownloadModuleError):
msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.")
class ResourceMonitorError(NovaException):
msg_fmt = _("Error when creating resource monitor: %(monitor)s")
class PciDeviceWrongAddressFormat(NovaException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
class PciDeviceInvalidDeviceName(NovaException):
msg_fmt = _("Invalid PCI Whitelist: "
"The PCI whitelist can specify devname or address,"
" but not both")
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
class PciDeviceNotFound(NotFound):
msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
class PciDeviceInvalidStatus(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceVFInvalidStatus(Invalid):
msg_fmt = _(
"Not all Virtual Functions of PF %(compute_node_id)s:%(address)s "
"are free.")
class PciDevicePFInvalidStatus(Invalid):
msg_fmt = _(
"Physical Function %(compute_node_id)s:%(address)s, related to VF"
" %(compute_node_id)s:%(vf_address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceInvalidOwner(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
"instead of %(hopeowner)s")
class PciDeviceRequestFailed(NovaException):
msg_fmt = _(
"PCI device request %(requests)s failed")
class PciDevicePoolEmpty(NovaException):
msg_fmt = _(
"Attempt to consume PCI device %(compute_node_id)s:%(address)s "
"from empty pool")
class PciInvalidAlias(Invalid):
msg_fmt = _("Invalid PCI alias definition: %(reason)s")
class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
class PciConfigInvalidWhitelist(Invalid):
msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s")
# Cannot be templated, msg needs to be constructed when raised.
class InternalError(NovaException):
"""Generic hypervisor errors.
Consider subclassing this to provide more specific exceptions.
"""
msg_fmt = "%(err)s"
class PciDevicePrepareFailed(NovaException):
msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
"%(instance_uuid)s: %(reason)s")
class PciDeviceDetachFailed(NovaException):
msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
class PciDeviceUnsupportedHypervisor(NovaException):
msg_fmt = _("%(type)s hypervisor does not support PCI devices")
class KeyManagerError(NovaException):
msg_fmt = _("Key manager error: %(reason)s")
class VolumesNotRemoved(Invalid):
msg_fmt = _("Failed to remove volume(s): (%(reason)s)")
class VolumeRebaseFailed(NovaException):
msg_fmt = _("Volume rebase failed: %(reason)s")
class InvalidVideoMode(Invalid):
msg_fmt = _("Provided video model (%(model)s) is not supported.")
class RngDeviceNotExist(Invalid):
msg_fmt = _("The provided RNG device path: (%(path)s) is not "
"present on the host.")
class RequestedVRamTooHigh(NovaException):
msg_fmt = _("The requested amount of video memory %(req_vram)d is higher "
"than the maximum allowed by flavor %(max_vram)d.")
class InvalidWatchdogAction(Invalid):
msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
class LiveMigrationWithOldNovaNotSupported(NovaException):
msg_fmt = _("Live migration with API v2.25 requires all the Mitaka "
"upgrade to be complete before it is available.")
class LiveMigrationURINotAvailable(NovaException):
msg_fmt = _('No live migration URI configured and no default available '
'for "%(virt_type)s" hypervisor virtualization type.')
class UnshelveException(NovaException):
msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s")
class ImageVCPULimitsRangeExceeded(Invalid):
msg_fmt = _("Image vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPUTopologyRangeExceeded(Invalid):
msg_fmt = _("Image vCPU topology %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPULimitsRangeImpossible(Invalid):
msg_fmt = _("Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"are impossible to satisfy for vcpus count %(vcpus)d")
class InvalidArchitectureName(Invalid):
msg_fmt = _("Architecture name '%(arch)s' is not recognised")
class ImageNUMATopologyIncomplete(Invalid):
msg_fmt = _("CPU and memory allocation must be provided for all "
"NUMA nodes")
class ImageNUMATopologyForbidden(Forbidden):
msg_fmt = _("Image property '%(name)s' is not permitted to override "
"NUMA configuration set against the flavor")
class ImageNUMATopologyAsymmetric(Invalid):
msg_fmt = _("Asymmetric NUMA topologies require explicit assignment "
"of CPUs and memory to nodes in image or flavor")
class ImageNUMATopologyCPUOutOfRange(Invalid):
msg_fmt = _("CPU number %(cpunum)d is larger than max %(cpumax)d")
class ImageNUMATopologyCPUDuplicates(Invalid):
msg_fmt = _("CPU number %(cpunum)d is assigned to two nodes")
class ImageNUMATopologyCPUsUnassigned(Invalid):
msg_fmt = _("CPU number %(cpuset)s is not assigned to any node")
class ImageNUMATopologyMemoryOutOfRange(Invalid):
msg_fmt = _("%(memsize)d MB of memory assigned, but expected "
"%(memtotal)d MB")
class InvalidHostname(Invalid):
msg_fmt = _("Invalid characters in hostname '%(hostname)s'")
class NumaTopologyNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a NUMA topology")
class MigrationContextNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a migration "
"context.")
class SocketPortRangeExhaustedException(NovaException):
msg_fmt = _("Not able to acquire a free port for %(host)s")
class SocketPortInUseException(NovaException):
msg_fmt = _("Not able to bind %(host)s:%(port)d, %(error)s")
class ImageSerialPortNumberInvalid(Invalid):
msg_fmt = _("Number of serial ports '%(num_ports)s' specified in "
"'%(property)s' isn't valid.")
class ImageSerialPortNumberExceedFlavorValue(Invalid):
msg_fmt = _("Forbidden to exceed flavor value of number of serial "
"ports passed in image meta.")
class SerialPortNumberLimitExceeded(Invalid):
msg_fmt = _("Maximum number of serial port exceeds %(allowed)d "
"for %(virt_type)s")
class InvalidImageConfigDrive(Invalid):
msg_fmt = _("Image's config drive option '%(config_drive)s' is invalid")
class InvalidHypervisorVirtType(Invalid):
msg_fmt = _("Hypervisor virtualization type '%(hv_type)s' is not "
"recognised")
class InvalidVirtualMachineMode(Invalid):
msg_fmt = _("Virtual machine mode '%(vmmode)s' is not recognised")
class InvalidToken(Invalid):
msg_fmt = _("The token '%(token)s' is invalid or has expired")
class InvalidConnectionInfo(Invalid):
msg_fmt = _("Invalid Connection Info")
class InstanceQuiesceNotSupported(Invalid):
msg_fmt = _('Quiescing is not supported in instance %(instance_id)s')
class InstanceAgentNotEnabled(Invalid):
msg_fmt = _('Guest agent is not enabled for the instance')
safe = True
class QemuGuestAgentNotEnabled(InstanceAgentNotEnabled):
msg_fmt = _('QEMU guest agent is not enabled')
class SetAdminPasswdNotSupported(Invalid):
msg_fmt = _('Set admin password is not supported')
safe = True
class MemoryPageSizeInvalid(Invalid):
msg_fmt = _("Invalid memory page size '%(pagesize)s'")
class MemoryPageSizeForbidden(Invalid):
msg_fmt = _("Page size %(pagesize)s forbidden against '%(against)s'")
class MemoryPageSizeNotSupported(Invalid):
msg_fmt = _("Page size %(pagesize)s is not supported by the host.")
class CPUPinningNotSupported(Invalid):
msg_fmt = _("CPU pinning is not supported by the host: "
"%(reason)s")
class CPUPinningInvalid(Invalid):
msg_fmt = _("CPU set to pin %(requested)s must be a subset of "
"free CPU set %(free)s")
class CPUUnpinningInvalid(Invalid):
msg_fmt = _("CPU set to unpin %(requested)s must be a subset of "
"pinned CPU set %(pinned)s")
class CPUPinningUnknown(Invalid):
msg_fmt = _("CPU set to pin %(requested)s must be a subset of "
"known CPU set %(cpuset)s")
class CPUUnpinningUnknown(Invalid):
msg_fmt = _("CPU set to unpin %(requested)s must be a subset of "
"known CPU set %(cpuset)s")
class ImageCPUPinningForbidden(Forbidden):
msg_fmt = _("Image property 'hw_cpu_policy' is not permitted to override "
"CPU pinning policy set against the flavor")
class ImageCPUThreadPolicyForbidden(Forbidden):
msg_fmt = _("Image property 'hw_cpu_thread_policy' is not permitted to "
"override CPU thread pinning policy set against the flavor")
class UnsupportedPolicyException(Invalid):
msg_fmt = _("ServerGroup policy is not supported: %(reason)s")
class CellMappingNotFound(NotFound):
msg_fmt = _("Cell %(uuid)s has no mapping.")
class NUMATopologyUnsupported(Invalid):
msg_fmt = _("Host does not support guests with NUMA topology set")
class MemoryPagesUnsupported(Invalid):
msg_fmt = _("Host does not support guests with custom memory page sizes")
class InvalidImageFormat(Invalid):
msg_fmt = _("Invalid image format '%(format)s'")
class UnsupportedImageModel(Invalid):
msg_fmt = _("Image model '%(image)s' is not supported")
class HostMappingNotFound(Invalid):
msg_fmt = _("Host '%(name)s' is not mapped to any cell")
class RealtimeConfigurationInvalid(Invalid):
msg_fmt = _("Cannot set realtime policy in a non dedicated "
"cpu pinning policy")
class CPUThreadPolicyConfigurationInvalid(Invalid):
msg_fmt = _("Cannot set cpu thread pinning policy in a non dedicated "
"cpu pinning policy")
class RequestSpecNotFound(NotFound):
msg_fmt = _("RequestSpec not found for instance %(instance_uuid)s")
class UEFINotSupported(Invalid):
msg_fmt = _("UEFI is not supported")
class TriggerCrashDumpNotSupported(Invalid):
msg_fmt = _("Triggering crash dump is not supported")
class UnsupportedHostCPUControlPolicy(Invalid):
msg_fmt = _("Requested CPU control policy not supported by host")
class LibguestfsCannotReadKernel(Invalid):
msg_fmt = _("Libguestfs does not have permission to read host kernel.")
class MaxDBRetriesExceeded(NovaException):
msg_fmt = _("Max retries of DB transaction exceeded attempting to "
"perform %(action)s.")
class RealtimePolicyNotSupported(Invalid):
msg_fmt = _("Realtime policy not supported by hypervisor")
class RealtimeMaskNotFoundOrInvalid(Invalid):
msg_fmt = _("Realtime policy needs vCPU(s) mask configured with at least "
"1 RT vCPU and 1 ordinary vCPU. See hw:cpu_realtime_mask "
"or hw_cpu_realtime_mask")
class OsInfoNotFound(NotFound):
msg_fmt = _("No configuration information found for operating system "
"%(os_name)s")
class BuildRequestNotFound(NotFound):
msg_fmt = _("BuildRequest not found for instance %(uuid)s")
class AttachInterfaceNotSupported(Invalid):
msg_fmt = _("Attaching interfaces is not supported for "
"instance %(instance_uuid)s.")
class InvalidReservedMemoryPagesOption(Invalid):
msg_fmt = _("The format of the option 'reserved_huge_pages' is invalid. "
"(found '%(conf)s') Please refer to the nova "
"config-reference.")
class ConcurrentUpdateDetected(NovaException):
msg_fmt = _("Another thread concurrently updated the data. "
"Please retry your update")
class ResourceClassNotFound(NotFound):
msg_fmt = _("No such resource class %(resource_class)s.")
class ResourceProviderInUse(NovaException):
msg_fmt = _("Resource provider has allocations.")
class InventoryWithResourceClassNotFound(NotFound):
msg_fmt = _("No inventory of class %(resource_class)s found.")
class InvalidResourceClass(Invalid):
msg_fmt = _("Resource class '%(resource_class)s' invalid.")
class ResourceClassExists(NovaException):
msg_fmt = _("Resource class %(resource_class)s already exists.")
class ResourceClassInUse(Invalid):
msg_fmt = _("Cannot delete resource class %(resource_class)s. "
"Class is in use in inventory.")
class ResourceClassCannotDeleteStandard(Invalid):
msg_fmt = _("Cannot delete standard resource class %(resource_class)s.")
class ResourceClassCannotUpdateStandard(Invalid):
msg_fmt = _("Cannot update standard resource class %(resource_class)s.")
class InvalidInventory(Invalid):
msg_fmt = _("Inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s' invalid.")
class InventoryInUse(InvalidInventory):
msg_fmt = _("Inventory for '%(resource_classes)s' on "
"resource provider '%(resource_provider)s' in use.")
class InvalidInventoryCapacity(InvalidInventory):
msg_fmt = _("Invalid inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. "
"The reserved value is greater than or equal to total.")
class InvalidAllocationCapacityExceeded(InvalidInventory):
msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. The requested "
"amount would exceed the capacity.")
class InvalidAllocationConstraintsViolated(InvalidInventory):
msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. The requested "
"amount would violate inventory constraints.")
class UnsupportedPointerModelRequested(Invalid):
msg_fmt = _("Pointer model '%(model)s' requested is not supported by "
"host.")
class NotSupportedWithOption(Invalid):
msg_fmt = _("%(operation)s is not supported in conjunction with the "
"current %(option)s setting. Please refer to the nova "
"config-reference.")
class Unauthorized(NovaException):
msg_fmt = _("Not authorized.")
code = 401
class NeutronAdminCredentialConfigurationInvalid(Invalid):
msg_fmt = _("Networking client is experiencing an unauthorized exception.")
class PlacementNotConfigured(NovaException):
msg_fmt = _("This compute is not configured to talk to the placement "
"service. Configure the [placement] section of nova.conf "
"and restart the service.")
class InvalidEmulatorThreadsPolicy(Invalid):
msg_fmt = _("CPU emulator threads option requested is invalid, "
"given: '%(requested)s', available: '%(available)s'.")
class BadRequirementEmulatorThreadsPolicy(Invalid):
msg_fmt = _("An isolated CPU emulator threads option requires a dedicated "
"CPU policy option.")
class PowerVMAPIFailed(NovaException):
msg_fmt = _("PowerVM API failed to complete for instance=%(inst_name)s. "
"%(reason)s")
class TraitNotFound(NotFound):
msg_fmt = _("No such trait %(name)s.")
class TraitExists(NovaException):
msg_fmt = _("The Trait %(name)s already exists")
class TraitCannotDeleteStandard(Invalid):
msg_fmt = _("Cannot delete standard trait %(name)s.")
class TraitInUse(Invalid):
msg_fmt = _("The trait %(name)s is in use by a resource provider.")
| {
"content_hash": "91f9b2aaa9d99d4a6980d4869df7d555",
"timestamp": "",
"source": "github",
"line_count": 2155,
"max_line_length": 79,
"avg_line_length": 29.022737819025522,
"alnum_prop": 0.6765956766436428,
"repo_name": "rajalokan/nova",
"id": "42c50ea8e28fdf79880a81a38a02f2f8c12f14a8",
"size": "63276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
} |
from src.emo_cls import EmoClassifier
if __name__ == '__main__':
e = EmoClassifier(is_use_emoticons=True,
is_dump_cls=True,
is_load_cached_cls=True,
verbose=True)
e.terms_cls.show_most_informative_features(10)
e.bigrams_cls.show_most_informative_features(10)
e.trigrams_cls.show_most_informative_features(10)
example_sents = ( (u'Było super, to były moje najfajniejsze wakacje!'),
(u'To chyba najzabawniejszy kabaret jaki oglądałem'),
(u'Wszystkiego najlepszego i wesołych świąt'),
(u'Niestety, mieliśmy dużego pecha i przegraliśmy'),
(u'Zachorowałem i leżę w łóżku'),
(u'To bardzo smutna wiadomość, nie mogę tego zrozumieć'),
(u'Zxcjhgoiu ooijasddnakjz zczxnzbxcz qdqdqqfefw sdsdfsdfsdf'),
(u'Przystojniaczek! :-) :D :('))
for sent in example_sents:
print 'Sentence:', sent
res = e.classify(sent)
print 'Classified as: %s (%.2f)\n' % res
| {
"content_hash": "2e6ca26e236a1ef0095d4492ac54f08d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 84,
"avg_line_length": 43.56,
"alnum_prop": 0.5748393021120294,
"repo_name": "wojtekwalczak/EmoClassifier",
"id": "984f6787876369ca86c6e4efc959134a76335c0f",
"size": "1154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_emo_cls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24067"
}
],
"symlink_target": ""
} |
import pytest
@pytest.fixture
def stepwise_testdir(testdir):
# Rather than having to modify our testfile between tests, we introduce
# a flag for whether or not the second test should fail.
testdir.makeconftest(
"""
def pytest_addoption(parser):
group = parser.getgroup('general')
group.addoption('--fail', action='store_true', dest='fail')
group.addoption('--fail-last', action='store_true', dest='fail_last')
"""
)
# Create a simple test suite.
testdir.makepyfile(
test_a="""
def test_success_before_fail():
assert 1
def test_fail_on_flag(request):
assert not request.config.getvalue('fail')
def test_success_after_fail():
assert 1
def test_fail_last_on_flag(request):
assert not request.config.getvalue('fail_last')
def test_success_after_last_fail():
assert 1
"""
)
testdir.makepyfile(
test_b="""
def test_success():
assert 1
"""
)
# customize cache directory so we don't use the tox's cache directory, which makes tests in this module flaky
testdir.makeini(
"""
[pytest]
cache_dir = .cache
"""
)
return testdir
@pytest.fixture
def error_testdir(testdir):
testdir.makepyfile(
test_a="""
def test_error(nonexisting_fixture):
assert 1
def test_success_after_fail():
assert 1
"""
)
return testdir
@pytest.fixture
def broken_testdir(testdir):
testdir.makepyfile(
working_testfile="def test_proper(): assert 1", broken_testfile="foobar"
)
return testdir
def test_run_without_stepwise(stepwise_testdir):
result = stepwise_testdir.runpytest("-v", "--strict-markers", "--fail")
result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"])
result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"])
result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"])
def test_fail_and_continue_with_stepwise(stepwise_testdir):
# Run the tests with a failing second test.
result = stepwise_testdir.runpytest(
"-v", "--strict-markers", "--stepwise", "--fail"
)
assert not result.stderr.str()
stdout = result.stdout.str()
# Make sure we stop after first failing test.
assert "test_success_before_fail PASSED" in stdout
assert "test_fail_on_flag FAILED" in stdout
assert "test_success_after_fail" not in stdout
# "Fix" the test that failed in the last run and run it again.
result = stepwise_testdir.runpytest("-v", "--strict-markers", "--stepwise")
assert not result.stderr.str()
stdout = result.stdout.str()
# Make sure the latest failing test runs and then continues.
assert "test_success_before_fail" not in stdout
assert "test_fail_on_flag PASSED" in stdout
assert "test_success_after_fail PASSED" in stdout
def test_run_with_skip_option(stepwise_testdir):
result = stepwise_testdir.runpytest(
"-v",
"--strict-markers",
"--stepwise",
"--stepwise-skip",
"--fail",
"--fail-last",
)
assert not result.stderr.str()
stdout = result.stdout.str()
# Make sure first fail is ignore and second fail stops the test run.
assert "test_fail_on_flag FAILED" in stdout
assert "test_success_after_fail PASSED" in stdout
assert "test_fail_last_on_flag FAILED" in stdout
assert "test_success_after_last_fail" not in stdout
def test_fail_on_errors(error_testdir):
result = error_testdir.runpytest("-v", "--strict-markers", "--stepwise")
assert not result.stderr.str()
stdout = result.stdout.str()
assert "test_error ERROR" in stdout
assert "test_success_after_fail" not in stdout
def test_change_testfile(stepwise_testdir):
result = stepwise_testdir.runpytest(
"-v", "--strict-markers", "--stepwise", "--fail", "test_a.py"
)
assert not result.stderr.str()
stdout = result.stdout.str()
assert "test_fail_on_flag FAILED" in stdout
# Make sure the second test run starts from the beginning, since the
# test to continue from does not exist in testfile_b.
result = stepwise_testdir.runpytest(
"-v", "--strict-markers", "--stepwise", "test_b.py"
)
assert not result.stderr.str()
stdout = result.stdout.str()
assert "test_success PASSED" in stdout
@pytest.mark.parametrize("broken_first", [True, False])
def test_stop_on_collection_errors(broken_testdir, broken_first):
"""Stop during collection errors. Broken test first or broken test last
actually surfaced a bug (#5444), so we test both situations."""
files = ["working_testfile.py", "broken_testfile.py"]
if broken_first:
files.reverse()
result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files)
result.stdout.fnmatch_lines("*error during collection*")
def test_xfail_handling(testdir, monkeypatch):
"""Ensure normal xfail is ignored, and strict xfail interrupts the session in sw mode
(#5547)
"""
monkeypatch.setattr("sys.dont_write_bytecode", True)
contents = """
import pytest
def test_a(): pass
@pytest.mark.xfail(strict={strict})
def test_b(): assert {assert_value}
def test_c(): pass
def test_d(): pass
"""
testdir.makepyfile(contents.format(assert_value="0", strict="False"))
result = testdir.runpytest("--sw", "-v")
result.stdout.fnmatch_lines(
[
"*::test_a PASSED *",
"*::test_b XFAIL *",
"*::test_c PASSED *",
"*::test_d PASSED *",
"* 3 passed, 1 xfailed in *",
]
)
testdir.makepyfile(contents.format(assert_value="1", strict="True"))
result = testdir.runpytest("--sw", "-v")
result.stdout.fnmatch_lines(
[
"*::test_a PASSED *",
"*::test_b FAILED *",
"* Interrupted*",
"* 1 failed, 1 passed in *",
]
)
testdir.makepyfile(contents.format(assert_value="0", strict="True"))
result = testdir.runpytest("--sw", "-v")
result.stdout.fnmatch_lines(
[
"*::test_b XFAIL *",
"*::test_c PASSED *",
"*::test_d PASSED *",
"* 2 passed, 1 deselected, 1 xfailed in *",
]
)
| {
"content_hash": "7a3b3f1dc153333d8157418cc67df6d6",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 113,
"avg_line_length": 28.63013698630137,
"alnum_prop": 0.6271132376395534,
"repo_name": "alfredodeza/pytest",
"id": "3bc77857d9761845586f7f346f883c494895342d",
"size": "6270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_stepwise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "837013"
}
],
"symlink_target": ""
} |
import tests.periodicities.period_test as per
per.buildModel((120 , 'BH' , 200));
| {
"content_hash": "781defc091b8ca1c6449774d8086caf1",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 21,
"alnum_prop": 0.7142857142857143,
"repo_name": "antoinecarme/pyaf",
"id": "417b4a0031bb23e0d7716b5523f3abef28388170",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/periodicities/Business_Hour/Cycle_Business_Hour_200_BH_120.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
This Python module implements a spatio-temporal filter method
for correcting interferograms for atmospheric phase screen (APS)
signals.
"""
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
import os
from copy import deepcopy
from collections import OrderedDict
from typing import List
import numpy as np
from numpy import isnan
from scipy.fftpack import fft2, ifft2, fftshift, ifftshift
from scipy.interpolate import griddata
import pyrate.constants as C
from pyrate.core.logger import pyratelogger as log
from pyrate.core import shared, ifgconstants as ifc, mpiops
from pyrate.core.covariance import cvd_from_phase, RDist
from pyrate.core.algorithm import get_epochs
from pyrate.core.shared import Ifg, Tile, EpochList, nan_and_mm_convert
from pyrate.core.timeseries import time_series
from pyrate.merge import assemble_tiles
from pyrate.configuration import MultiplePaths, Configuration
def spatio_temporal_filter(params: dict) -> None:
"""
Applies a spatio-temporal filter to remove the atmospheric phase screen
(APS) and saves the corrected interferograms. Firstly the incremental
time series is computed using the SVD method, before a cascade of temporal
then spatial Gaussian filters is applied. The resulting APS corrections are
saved to disc before being subtracted from each interferogram.
:param params: Dictionary of PyRate configuration parameters.
"""
if params[C.APSEST]:
log.info('Doing APS spatio-temporal filtering')
else:
log.info('APS spatio-temporal filtering not required')
return
tiles = params[C.TILES]
preread_ifgs = params[C.PREREAD_IFGS]
ifg_paths = [ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]]
# perform some checks on existing ifgs
log.debug('Checking APS correction status')
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_APS_ERROR):
log.debug('Finished APS correction')
return # return if True condition returned
aps_paths = [MultiplePaths.aps_error_path(i, params) for i in ifg_paths]
if all(a.exists() for a in aps_paths):
log.warning('Reusing APS errors from previous run')
_apply_aps_correction(ifg_paths, aps_paths, params)
return
# obtain the incremental time series using SVD
tsincr = _calc_svd_time_series(ifg_paths, params, preread_ifgs, tiles)
mpiops.comm.barrier()
# get lists of epochs and ifgs
ifgs = list(OrderedDict(sorted(preread_ifgs.items())).values())
epochlist = mpiops.run_once(get_epochs, ifgs)[0]
# first perform temporal high pass filter
ts_hp = temporal_high_pass_filter(tsincr, epochlist, params)
# second perform spatial low pass filter to obtain APS correction in ts domain
ifg = Ifg(ifg_paths[0]) # just grab any for parameters in slpfilter
ifg.open()
ts_aps = spatial_low_pass_filter(ts_hp, ifg, params)
ifg.close()
# construct APS corrections for each ifg
_make_aps_corrections(ts_aps, ifgs, params)
# apply correction to ifgs and save ifgs to disc.
_apply_aps_correction(ifg_paths, aps_paths, params)
# update/save the phase_data in the tiled numpy files
shared.save_numpy_phase(ifg_paths, params)
def _calc_svd_time_series(ifg_paths: List[str], params: dict, preread_ifgs: dict,
tiles: List[Tile]) -> np.ndarray:
"""
Helper function to obtain time series for spatio-temporal filter
using SVD method
"""
# Is there other existing functions that can perform this same job?
log.info('Calculating incremental time series via SVD method for APS '
'correction')
# copy params temporarily
new_params = deepcopy(params)
new_params[C.TIME_SERIES_METHOD] = 2 # use SVD method
process_tiles = mpiops.array_split(tiles)
nvels = None
for t in process_tiles:
log.debug(f'Calculating time series for tile {t.index} during APS '
f'correction')
ifgp = [shared.IfgPart(p, t, preread_ifgs, params) for p in ifg_paths]
mst_tile = np.load(Configuration.mst_path(params, t.index))
tsincr = time_series(ifgp, new_params, vcmt=None, mst=mst_tile)[0]
np.save(file=os.path.join(params[C.TMPDIR],
f'tsincr_aps_{t.index}.npy'), arr=tsincr)
nvels = tsincr.shape[2]
nvels = mpiops.comm.bcast(nvels, root=0)
mpiops.comm.barrier()
# need to assemble tsincr from all processes
tsincr_g = _assemble_tsincr(ifg_paths, params, preread_ifgs, tiles, nvels)
log.debug('Finished calculating time series for spatio-temporal filter')
return tsincr_g
def _assemble_tsincr(ifg_paths: List[str], params: dict, preread_ifgs: dict,
tiles: List[Tile], nvels: np.float32) -> np.ndarray:
"""
Helper function to reconstruct time series images from tiles
"""
# pre-allocate dest 3D array
shape = preread_ifgs[ifg_paths[0]].shape
tsincr_p = {}
process_nvels = mpiops.array_split(range(nvels))
for i in process_nvels:
tsincr_p[i] = assemble_tiles(shape, params[C.TMPDIR], tiles,
out_type='tsincr_aps', index=i)
tsincr_g = shared.join_dicts(mpiops.comm.allgather(tsincr_p))
return np.dstack([v[1] for v in sorted(tsincr_g.items())])
def _make_aps_corrections(ts_aps: np.ndarray, ifgs: List[Ifg], params: dict) -> None:
"""
Function to convert the time series APS filter output into interferometric
phase corrections and save them to disc.
:param ts_aps: Incremental APS time series array.
:param ifgs: List of Ifg class objects.
:param params: Dictionary of PyRate configuration parameters.
"""
log.debug('Reconstructing interferometric observations from time series')
# get first and second image indices
_ , n = mpiops.run_once(get_epochs, ifgs)
index_first, index_second = n[:len(ifgs)], n[len(ifgs):]
num_ifgs_tuples = mpiops.array_split(list(enumerate(ifgs)))
for i, ifg in [(int(num), ifg) for num, ifg in num_ifgs_tuples]:
# sum time slice data from first to second epoch
ifg_aps = np.sum(ts_aps[:, :, index_first[i]: index_second[i]], axis=2)
aps_error_on_disc = MultiplePaths.aps_error_path(ifg.tmp_path, params)
np.save(file=aps_error_on_disc, arr=ifg_aps) # save APS as numpy array
mpiops.comm.barrier()
def _apply_aps_correction(ifg_paths: List[str], aps_paths: List[str], params: dict) -> None:
"""
Function to read and apply (subtract) APS corrections from interferogram data.
"""
for ifg_path, aps_path in mpiops.array_split(list(zip(ifg_paths, aps_paths))):
# read the APS correction from numpy array
aps_corr = np.load(aps_path)
# open the Ifg object
ifg = Ifg(ifg_path)
ifg.open(readonly=False)
# convert NaNs and convert to mm
nan_and_mm_convert(ifg, params)
# subtract the correction from the ifg phase data
ifg.phase_data[~np.isnan(ifg.phase_data)] -= aps_corr[~np.isnan(ifg.phase_data)]
# set meta-data tags after aps error correction
ifg.dataset.SetMetadataItem(ifc.PYRATE_APS_ERROR, ifc.APS_REMOVED)
# write phase data to disc and close ifg.
ifg.write_modified_phase()
ifg.close()
def spatial_low_pass_filter(ts_hp: np.ndarray, ifg: Ifg, params: dict) -> np.ndarray:
"""
Filter time series data spatially using a Gaussian low-pass
filter defined by a cut-off distance. If the cut-off distance is
defined as zero in the parameters dictionary then it is calculated for
each time step using the pyrate.covariance.cvd_from_phase method.
:param ts_hp: Array of temporal high-pass time series data, shape (ifg.shape, n_epochs)
:param ifg: pyrate.core.shared.Ifg Class object.
:param params: Dictionary of PyRate configuration parameters.
:return: ts_lp: Low-pass filtered time series data of shape (ifg.shape, n_epochs).
"""
log.info('Applying spatial low-pass filter')
nvels = ts_hp.shape[2]
cutoff = params[C.SLPF_CUTOFF]
# nanfill = params[cf.SLPF_NANFILL]
# fillmethod = params[cf.SLPF_NANFILL_METHOD]
if cutoff == 0:
r_dist = RDist(ifg)() # only needed for cvd_for_phase
else:
r_dist = None
log.info(f'Gaussian spatial filter cutoff is {cutoff:.3f} km for all '
f'{nvels} time-series images')
process_nvel = mpiops.array_split(range(nvels))
process_ts_lp = {}
for i in process_nvel:
process_ts_lp[i] = _slpfilter(ts_hp[:, :, i], ifg, r_dist, params)
ts_lp_d = shared.join_dicts(mpiops.comm.allgather(process_ts_lp))
ts_lp = np.dstack([v[1] for v in sorted(ts_lp_d.items())])
log.debug('Finished applying spatial low pass filter')
return ts_lp
def _interpolate_nans_2d(arr: np.ndarray, method: str) -> None:
"""
In-place array interpolation and NaN-fill using scipy.interpolation.griddata.
:param arr: 2D ndarray to be interpolated.
:param method: Method; one of 'nearest', 'linear', and 'cubic'.
"""
log.debug(f'Interpolating array with "{method}" method')
r, c = np.indices(arr.shape)
arr[np.isnan(arr)] = griddata(
(r[~np.isnan(arr)], c[~np.isnan(arr)]), # points we know
arr[~np.isnan(arr)], # values we know
(r[np.isnan(arr)], c[np.isnan(arr)]), # points to interpolate
method=method, fill_value=0)
def _slpfilter(phase: np.ndarray, ifg: Ifg, r_dist: float, params: dict) -> np.ndarray:
"""
Wrapper function for spatial low pass filter
"""
cutoff = params[C.SLPF_CUTOFF]
nanfill = params[C.SLPF_NANFILL]
fillmethod = params[C.SLPF_NANFILL_METHOD]
if np.all(np.isnan(phase)): # return for nan matrix
return phase
if cutoff == 0:
_, alpha = cvd_from_phase(phase, ifg, r_dist, calc_alpha=True)
cutoff = 1.0 / alpha
log.info(f'Gaussian spatial filter cutoff is {cutoff:.3f} km')
return gaussian_spatial_filter(phase, cutoff, ifg.x_size, ifg.y_size, nanfill, fillmethod)
def gaussian_spatial_filter(image: np.ndarray, cutoff: float, x_size: float,
y_size: float, nanfill: bool = True,
fillmethod: str = 'nearest') -> np.ndarray:
"""
Function to apply a Gaussian spatial low-pass filter to a 2D image with
unequal pixel resolution in x and y dimensions. Performs filtering in the
Fourier domain. Any NaNs in the image are interpolated prior to Fourier
transformation, with NaNs being replaced in to the filtered output image.
:param image: 2D image to be filtered
:param cutoff: filter cutoff in kilometres
:param x_size: pixel size in x dimension, in metres
:param y_size: pixel size in y dimension, in metres
:param nanfill: interpolate image to fill NaNs
:param fillmethod: interpolation method ('nearest', 'cubic', or 'linear')
:return: filt: Gaussian low-pass filtered 2D image
"""
# create NaN mask of image
mask = np.isnan(image)
# in-place nearest-neighbour interpolation to fill NaNs
# nearest neighbour will fill values outside the convex hull
if nanfill:
_interpolate_nans_2d(image, fillmethod)
rows, cols = image.shape
pad = 4096
# pad the image to a large square array.
# TODO: implement variable padding dependent on image size
im = np.pad(image, ((0, pad - rows), (0, pad - cols)), 'constant')
# fast fourier transform of the input image
imf = fftshift(fft2(im))
# calculate centre coords of image
cx = np.floor(pad / 2)
cy = np.floor(pad / 2)
# calculate distance array
[xx, yy] = np.meshgrid(range(pad), range(pad))
xx = (xx - cx) * x_size # these are in meters as x_size in metres
yy = (yy - cy) * y_size
dist = np.sqrt(xx ** 2 + yy ** 2) / ifc.METRE_PER_KM # change m to km
# Estimate sigma value for Gaussian kernel function in spectral domain
# by converting cutoff distance to wavenumber and applying a scaling
# factor based on fixed kernel window size.
sigma = np.std(dist) * (1 / cutoff)
# Calculate kernel weights
wgt = _kernel(dist, sigma)
# Apply Gaussian smoothing kernel
outf = imf * wgt
# Inverse Fourier transform
out = np.real(ifft2(ifftshift(outf)))
filt = out[:rows, :cols] # grab non-padded part
filt[mask] = np.nan # re-insert nans in output image
return filt
# TODO: use tiles here and distribute amongst processes
def temporal_high_pass_filter(tsincr: np.ndarray, epochlist: EpochList,
params: dict) -> np.ndarray:
"""
Isolate high-frequency components of time series data by subtracting
low-pass components obtained using a Gaussian filter defined by a
cut-off time period (in days).
:param tsincr: Array of incremental time series data of shape (ifg.shape, n_epochs).
:param epochlist: A pyrate.core.shared.EpochList Class instance.
:param params: Dictionary of PyRate configuration parameters.
:return: ts_hp: Filtered high frequency time series data; shape (ifg.shape, nepochs).
"""
log.info('Applying temporal high-pass filter')
threshold = params[C.TLPF_PTHR]
cutoff_day = params[C.TLPF_CUTOFF]
if cutoff_day < 1 or type(cutoff_day) != int:
raise ValueError(f'tlpf_cutoff must be an integer greater than or '
f'equal to 1 day. Value provided = {cutoff_day}')
# convert cutoff in days to years
cutoff_yr = cutoff_day / ifc.DAYS_PER_YEAR
log.info(f'Gaussian temporal filter cutoff is {cutoff_day} days '
f'({cutoff_yr:.4f} years)')
intv = np.diff(epochlist.spans) # time interval for the neighboring epochs
span = epochlist.spans[: tsincr.shape[2]] + intv / 2 # accumulated time
rows, cols = tsincr.shape[:2]
tsfilt_row = {}
process_rows = mpiops.array_split(list(range(rows)))
for r in process_rows:
tsfilt_row[r] = np.empty(tsincr.shape[1:], dtype=np.float32) * np.nan
for j in range(cols):
# Result of gaussian filter is low frequency time series
tsfilt_row[r][j, :] = gaussian_temporal_filter(tsincr[r, j, :],
cutoff_yr, span, threshold)
tsfilt_combined = shared.join_dicts(mpiops.comm.allgather(tsfilt_row))
tsfilt = np.array([v[1] for v in tsfilt_combined.items()])
log.debug("Finished applying temporal high-pass filter")
# Return the high-pass time series by subtracting low-pass result from input
return tsincr - tsfilt
def gaussian_temporal_filter(tsincr: np.ndarray, cutoff: float, span: np.ndarray,
thr: int) -> np.ndarray:
"""
Function to apply a Gaussian temporal low-pass filter to a 1D time-series
vector for one pixel with irregular temporal sampling.
:param tsincr: 1D time-series vector to be filtered.
:param cutoff: filter cutoff in years.
:param span: 1D vector of cumulative time spans, in years.
:param thr: threshold for non-NaN values in tsincr.
:return: ts_lp: Low-pass filtered time series vector.
"""
nanmat = ~isnan(tsincr)
sel = np.nonzero(nanmat)[0] # don't select if nan
ts_lp = np.empty(tsincr.shape, dtype=np.float32) * np.nan
m = len(sel)
if m >= thr:
for k in range(m):
yr = span[sel] - span[sel[k]]
# apply Gaussian smoothing kernel
wgt = _kernel(yr, cutoff)
wgt /= np.sum(wgt)
ts_lp[sel[k]] = np.sum(tsincr[sel] * wgt)
return ts_lp
def _kernel(x: np.ndarray, sigma: float) -> np.ndarray:
"""
Gaussian low-pass filter kernel
"""
return np.exp(-0.5 * (x / sigma) ** 2)
| {
"content_hash": "966e2c597435fdc183f25f7653b366e5",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 94,
"avg_line_length": 41.6246719160105,
"alnum_prop": 0.6638501797086828,
"repo_name": "GeoscienceAustralia/PyRate",
"id": "9308fab03fe91a2013b16169fc40807d2a6f0af3",
"size": "16528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrate/core/aps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3589"
},
{
"name": "NASL",
"bytes": "13536"
},
{
"name": "Python",
"bytes": "877157"
},
{
"name": "Rascal",
"bytes": "17551"
},
{
"name": "Shell",
"bytes": "7631"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.