code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
import logging
from django.conf import settings
from django.template.loader import render_to_string
try:
import weasyprint as wp
PDF_EXPORT_AVAILABLE = True
except ImportError:
PDF_EXPORT_AVAILABLE = False
from froide.helper.text_utils import remove_closing_inclusive
logger = logging.getLogger(__name__)
class PDFGenerator(object):
def __init__(self, obj):
self.obj = obj
def get_pdf_bytes(self):
if not PDF_EXPORT_AVAILABLE:
return b''
return self.make_doc()
def get_html_string(self):
ctx = self.get_context_data(self.obj)
return render_to_string(self.template_name, ctx)
def make_doc(self):
html = self.get_html_string()
doc = wp.HTML(string=html)
return doc.write_pdf()
def get_context_data(self, obj):
return {
'object': obj,
'SITE_NAME': settings.SITE_NAME
}
class FoiRequestPDFGenerator(PDFGenerator):
template_name = 'foirequest/pdf/foirequest.html'
class LetterPDFGenerator(PDFGenerator):
template_name = 'foirequest/pdf/message_letter.html'
def get_publicbody(self):
return self.obj.request.public_body
def get_recipient_address(self):
pb = self.get_publicbody()
pb_address = ''
if pb is not None:
address = pb.address.splitlines()
pb_address = [pb.name] + address
pb_address = '\n'.join(pb_address)
return pb_address
def get_context_data(self, obj):
ctx = super().get_context_data(obj)
ctx.update({
'recipient_address': self.get_recipient_address(),
'text': self.get_letter_text(obj),
})
return ctx
def get_letter_text(self, message):
text = message.plaintext.split(message.sender_email)[0]
text = remove_closing_inclusive(text)
return text
| stefanw/froide | froide/foirequest/pdf_generator.py | Python | mit | 1,902 |
from ase.io import read
from ase.calculators.emt import EMT
from ase.neb import NEB
from ase.optimize import BFGS
# read the last structures (of 5 images used in NEB)
images = read('neb.traj@-5:')
for i in range(1, len(images) - 1):
images[i].set_calculator(EMT())
neb = NEB(images)
qn = BFGS(neb, trajectory='neb_restart.traj')
qn.run(fmax=0.005)
| misdoro/python-ase | doc/tutorials/neb/diffusion4.py | Python | gpl-2.0 | 355 |
from afqueue.common.encoding_utilities import cast_string, cast_list_of_strings
from afqueue.common.exception_formatter import ExceptionFormatter #@UnresolvedImport
from afqueue.data_objects.exchange_wrapper import ExchangeWrapper #@UnresolvedImport
from afqueue.data_objects.data_queue_wrapper import DataQueueWrapper #@UnresolvedImport
from afqueue.common import client_queue_types #@UnresolvedImport
import bson, redis #@UnresolvedImport
class RedisConnectionFailureException(Exception):
pass
class RedisConnectionUnknownException(Exception):
pass
class SharedMemoryManager(object):
def __init__(self, qm_connection_string):
# Connect to shared memory.
host, port = qm_connection_string.split(":")
self._qm_redis_client = redis.StrictRedis(host=host, port=int(port), db=0)
# Create the empty PQM redis client dictionary.
self._pqm_redis_client_dict = dict()
self._pqm_redis_connection_information_dict = dict()
def add_pqm_connection(self, pqm_id_string, pqm_connection_string):
"""
Creates a new connection to the shared memory storage of the given PQM using the PQM connection string.
Tracks the connection by the PQM ID string for use; use the query_remote_shared_memory to access.
"""
# Create and add the new connection.
host, port = pqm_connection_string.split(":")
self._pqm_redis_client_dict[pqm_id_string] = redis.StrictRedis(host=host, port=int(port), db=0)
self._pqm_redis_connection_information_dict[pqm_id_string] = pqm_connection_string
def add_qm_to_pecking_order_list(self, qm_id_string):
"""
Adds the given QM ID string to the end of the pecking order list.
"""
# Ensure the QM ID string doesn't already appear in the list.
if qm_id_string not in self.get_pecking_order_list():
self._add_qm_to_pecking_order_list(qm_id_string)
def attach_queues_to_exchanges(self, client_queue_list):
"""
Attaches each queue in the given client queue list to the exchange designated in that queue.
Note: Manipulating object containers in memory should all be done from one thread due to the nature of the shared memory.
"""
try:
# Attach each queue to its declared exchange and update storage.
for client_queue in client_queue_list:
# Get the exchange, add the queue to the routing key, and update it.
exchange_wrapper = self._get_exchange_wrapper(client_queue.exchange_name)
exchange_wrapper.attach_queue(client_queue.routing_key, client_queue.name)
self._set_exchange_wrapper(exchange_wrapper)
# Add the queue.
self._add_queue(client_queue)
except:
raise ExceptionFormatter.get_full_exception()
def clear_connection_reply_data(self):
"""
Removes all connection reply data from shared memory.
"""
self._clear_connection_reply_data()
@staticmethod
def convert_queue_lock_owner_dict_to_network_format(shared_memory_queue_lock_owner_dict):
"""
We convert the queue lock owner dictionary to a network format for more efficient network transmission.
Supply the current owner dictionary in shared memory to this method to get the network form of the dictionary.
"""
# Convert the owner dictionary from being keyed off queue name to being keyed off owner.
queue_lock_owner_dict = dict()
for queue_name, qm_id_string in list(shared_memory_queue_lock_owner_dict.items()):
if qm_id_string != "":
queue_lock_owner_dict.setdefault(qm_id_string, list()).append(queue_name)
return queue_lock_owner_dict
@staticmethod
def convert_queue_lock_owner_dict_from_network_format(network_queue_lock_owner_dict):
"""
We convert the queue lock owner dictionary to a network format for more efficient network transmission.
Supply the a the network format of the owner dictionary to this method to get the shared memory form of the dictionary.
"""
# Convert the owner dictionary from being keyed off owner to being keyed off queue name.
queue_lock_owner_dict = dict()
for qm_id_string, queue_name_list in list(network_queue_lock_owner_dict.items()):
for queue_name in queue_name_list:
queue_lock_owner_dict[queue_name] = qm_id_string
return queue_lock_owner_dict
@staticmethod
def create_queue_lock_string(qm_id_string, process_id_string):
"""
Creates a queue lock string from the given parameters.
"""
return "{0}.{1}".format(qm_id_string, process_id_string)
def delete_value(self, key):
"""
Deletes a previously stored value.
"""
self._delete_value(key)
def get_and_delete_value(self, key):
"""
Returns and deletes a previously stored value.
"""
self._delete_value(key)
return self._get_value(key)
def get_connected_pqm_id_string_list(self):
"""
Returns a list of the PQM ID strings of all remotely connected shared memory storage.
"""
return list(self._pqm_redis_client_dict.keys())
def get_connection_reply_data(self, thread_name, qm_id_string):
"""
Returns the connection reply data for the given thread / QM ID string from shared memory.
"""
return self._get_connection_reply_data(thread_name, qm_id_string)
def get_current_master_id_string(self):
"""
Returns the QM ID String, denoted by the first item in the pecking order list, in the given shared memory object.
"""
current_pecking_order_list = self._get_pecking_order_list()
if current_pecking_order_list == None or len(current_pecking_order_list) == 0:
return None
return current_pecking_order_list[0]
def get_exchange_wrapper_name_list(self):
"""
Returns a list of all exchanges currently in shared memory.
"""
# Forward.
return self._get_exchange_wrapper_name_list()
def get_exchange_wrapper(self, exchange_name):
"""
Returns the exchange with the given name in shared memory.
Returns None if the exchange does not exist.
"""
# Forward.
return self._get_exchange_wrapper(exchange_name)
def get_exchange_wrapper_dict(self):
"""
Returns a dictionary of all exchanges currently in shared memory.
"""
# Forward.
return self._get_exchange_wrapper_dict()
def get_locked_queue_names(self):
"""
Returns the list of queues which have current locks in shared memory.
"""
# Return the keys of the current queue lock owner dictionary.
queue_lock_owner_dict = self.get_queue_lock_owner_dict()
return list(queue_lock_owner_dict.keys())
def get_memory_usage(self):
"""
Returns the total memory, in bytes, currently in use by shared memory.
"""
info = self._get_info()
if info == None:
return None
return info["used_memory"]
def get_network_memory_statistics_dict(self):
"""
Returns the current network statistics dictionary.
Returns an empty dictionary if no entry exists.
"""
return self._get_network_memory_statistics_dict()
def get_ordered_queue_owners_dict(self):
"""
Gets the current ordered queue owners dictionary from shared memory.
"""
# Forward.
ordered_queue_owners_dict = self._get_ordered_queue_owners_dict()
if ordered_queue_owners_dict == None:
return dict()
return ordered_queue_owners_dict
def get_pecking_order_list(self):
"""
Returns the current pecking order list (QM ID strings) in shared memory.
"""
# Forward.
return self._get_pecking_order_list()
def get_queue_lock_data(self, queue_name):
"""
Gets the current queue lock owner data of the given queue in shared memory.
Returns two items: the owner ID string and the owner's process ID.
"""
queue_lock_owner_string = self._get_queue_lock_owner_string(queue_name)
if queue_lock_owner_string == None:
queue_lock_owner_string = "."
return self.split_queue_lock_string(queue_lock_owner_string)
def get_queue_locks_owned_sets(self, test_queue_lock_owner_dict):
"""
Expects the queue lock owner dictionary to be keyed off of queue names, valued with QM owner ID string.
Tests each entry in the given dictionary against the data in shared memory.
Returns two sets: A set of all queue names which have locks matching those given; a set of the queue names which do not have locks matching those given.
"""
try:
queue_lock_owner_dict = self.get_queue_lock_owner_dict()
owned_set = set()
not_owned_set = set()
for queue_name, owner_id_string in list(test_queue_lock_owner_dict.items()):
if queue_lock_owner_dict.get(queue_name, "") != owner_id_string:
not_owned_set.add(queue_name)
else:
owned_set.add(queue_name)
return owned_set, not_owned_set
except:
raise ExceptionFormatter.get_full_exception()
def get_queue_lock_owner_dict(self):
"""
Returns the current queue lock owner dictionary in shared memory.
"""
return self._get_queue_lock_owner_dict()
def get_queue_name_list(self):
"""
Returns a list of all queue currently in shared memory.
"""
# Forward.
return list(self._get_queue_name_set())
def get_queue_wrapper(self, queue_name):
"""
Returns the queue with the given name in shared memory.
Returns None if the queue does not exist.
"""
# Get the full queue wrapper dictionary.
data_queue_wrapper_dict = self.get_queue_wrapper_dict()
# Return the queue wrapper requested.
return data_queue_wrapper_dict.get(queue_name, None)
def get_queue_wrapper_dict(self):
"""
Returns a dictionary of all queue wrappers.
Note this call builds the queue wrappers; they aren't stored in shared memory like exchange wrappers.
"""
# Fill out the current queue names.
queue_name_list = self.get_queue_name_list()
queue_type_dict = self._get_queue_type_dict()
data_queue_wrapper_dict = dict()
for queue_name in queue_name_list:
data_queue_wrapper_dict[queue_name] = DataQueueWrapper(queue_name, queue_type_dict.get(queue_name, client_queue_types.DISTRIBUTED))
# Forward.
return data_queue_wrapper_dict
def get_queue_size(self, queue_name):
"""
Returns the size of the given queue name.
Returns -1 if the queue does not exist.
"""
if queue_name not in self.get_queue_name_list():
return -1
return self._get_queue_size(queue_name)
def get_queue_sizes(self, queue_name_list = None):
"""
Returns the size of each queue currently tracked.
Returns a dictionary with keys being the queue name and values being the queue size (int).
Will have no entry for any queues which do not exist.
"""
if queue_name_list == None:
queue_name_list = self.get_queue_name_list()
return dict(list(zip(queue_name_list, self._get_queue_sizes(queue_name_list))))
def get_thread_name_dict(self):
"""
Returns the current thread name dictionary stored in shared memory.
"""
return self._get_thread_name_dict()
def get_value(self, key):
"""
Returns a previously stored value.
"""
return self._get_value(key)
def is_master(self, qm_id_string):
"""
Returns True/False based on whether or not the given QM ID String matches the current master in the given shared memory object.
"""
return self.get_current_master_id_string() == qm_id_string
def ping(self):
"""
Pings the shared memory.
Returns True if successful.
Note: Intended to be used in a remote query.
"""
return self._ping()
def pop_queue_data(self, queue_name, requested_count):
"""
Attempts to pop the requested count of data from the given queue name's queue.
Returns two items:
the data in a list
the count of data remaining in the queue
"""
try:
# Forward.
data_list, queue_size = self._pop_queue(queue_name, requested_count)
# Return the data and the remaining queue size.
return data_list, queue_size
except:
raise ExceptionFormatter.get_full_exception()
def purge_queue(self, queue_name):
"""
Purges the given queue of all its data in shared memory.
"""
self._purge_queue(queue_name)
def push_queue_data_list(self, queue_name, data_list):
"""
Attempts to push the data in the given data list into the queue of the given name in shared memory.
Returns the current size of the queue after the push (integer).
"""
return self._push_queue_data_list(queue_name, data_list)
def query_remote_shared_memory(self, pqm_id_string, query_method, *arguments):
"""
Directs the shared memory object to query the remote shared memory of the given PQM.
Supply the method to run against the remote shared memory and the arguments required to normally make that call.
Note you will need the full method signature. For example:
If you call with shared_memory_manager.query_remote_shared_memory, supply shared_memory_manager.<method_name>.
Note: If a connection information can't be found, a RedisConnectionUnknownException will be raised. Be sure to check for this and handle as desired!
Note: If a connection can't be made to remote shared memory, a RedisConnectionFailureException will be raised. Be sure to check for this and handle as desired!
"""
# Get the remote client; raise an unknown exception if we have no data mapped for the PQM ID String.
remote_redis_client = self._pqm_redis_client_dict.get(pqm_id_string, None)
if remote_redis_client == None:
raise RedisConnectionUnknownException(pqm_id_string)
# Temporarily overwrite our client and run the command.
qm_redis_client = self._qm_redis_client
self._qm_redis_client = remote_redis_client
# Ensure we don't permanently set our main redis client to the remote redis client by trapping
try:
# Run the query, reset our redis client, and return the result.
result = query_method(*arguments)
self._qm_redis_client = qm_redis_client
return result
except:
# If we hit an exception, reset our redis client, and raise a connection failure exception.
self._qm_redis_client = qm_redis_client
#del(self._pqm_redis_client_dict[pqm_id_string])
raise RedisConnectionFailureException(ExceptionFormatter.get_message())
def remove_connection_reply_data(self, thread_name, qm_id_string):
"""
Removes the connection reply data for the given thread / QM ID string from shared memory.
"""
self._remove_connection_reply_data(thread_name, qm_id_string)
def remove_pqm_connection(self, pqm_id_string):
"""
Removes the remote connection to the given PQMs shared memory storage.
"""
if pqm_id_string in list(self._pqm_redis_client_dict.keys()):
del(self._pqm_redis_client_dict[pqm_id_string])
if pqm_id_string in list(self._pqm_redis_connection_information_dict.keys()):
del(self._pqm_redis_connection_information_dict[pqm_id_string])
def remove_qm_from_pecking_order_list(self, qm_id_string):
"""
Removes the given QM ID string from the pecking order list.
"""
# Forward.
self._remove_qm_from_pecking_order_list(qm_id_string)
def remove_queue_names_from_memory(self, queue_name_list):
"""
Will remove all queues which match the names in the given queue name list.
Will remove all queue names in the given queue name list from all exchanges.
Note: Manipulating object containers in memory should all be done from one thread due to the nature of the shared memory.
"""
# Get our current state in memory.
current_exchange_wrapper_dict = self.get_exchange_wrapper_dict()
# Remove the queue names from each exchange.
for queue_name in queue_name_list:
for exchange_wrapper in list(current_exchange_wrapper_dict.values()):
if exchange_wrapper.is_queue_attached(queue_name):
exchange_wrapper.detach_queue(queue_name)
self._set_exchange_wrapper(exchange_wrapper)
# Remove the queue names from our queue sets.
for queue_name in queue_name_list:
self._remove_queue(queue_name)
@staticmethod
def split_queue_lock_string(queue_lock_owner_string):
"""
Splits the given queue lock string into its separate components.
"""
return queue_lock_owner_string.split(".")
def set_connection_reply_data(self, thread_name, qm_id_string, data):
"""
Sets the connection reply data for the given thread / QM ID string in shared memory.
"""
# Forward.
self._set_connection_reply_data(thread_name, qm_id_string, data)
def set_network_memory_statistics_dict(self, network_memory_statistics_dict):
"""
Sets the current network statistics dictionary.
Pass a None value to delete the current stored dictionary.
"""
# Forward.
self._set_network_memory_statistics_dict(network_memory_statistics_dict)
def set_ordered_queue_owners_dict(self, ordered_queue_owners_dict):
"""
Sets the current ordered queue owners dictionary in the given shared memory object to the given dictionary.
"""
# Forward.
self._set_ordered_queue_owners_dict(ordered_queue_owners_dict)
def set_queue_lock_dict(self, queue_lock_owner_dict):
"""
Sets the current queue lock owner dictionary in the given shared memory object to the given queue lock owner dictionary.
"""
# Forward.
self._set_queue_lock_dict(queue_lock_owner_dict)
def set_queue_size_snapshot_dict(self, queue_size_snapshot_dict):
"""
Sets the current snapshot dictionary of queue sizes in shared memory.
Note: these are queue size snapshots taken at regular intervals; true queue sizes should be obtained by the get_queue_size method.
"""
# Forward.
self._set_queue_size_snapshot_dict(queue_size_snapshot_dict)
def set_queue_lock_owner(self, queue_name, qm_id_string):
"""
Sets the current queue lock owner for the given queue name in the given shared memory object.
If an empty string ("") is given for the QM ID String, the current owner data for the given queue name will be deleted.
"""
try:
# If the ID string is blank, just remove the key.
if qm_id_string == "":
self._remove_queue_lock_owner(queue_name)
else:
self._set_queue_lock_owner(queue_name, qm_id_string)
except:
raise ExceptionFormatter.get_full_exception()
def set_pecking_order_list(self, qm_id_string_list):
"""
Sets the current pecking order list (QM ID strings).
"""
# Forward.
self._set_pecking_order_list(qm_id_string_list)
def set_thread_name_dict(self, thread_name_dict):
"""
Sets the current thread name dictionary.
"""
# Forward.
self._set_thread_name_dict(thread_name_dict)
def store_value(self, key, data, expire_seconds):
"""
Stores the given value.
Include an expire time
"""
self._store_value(key, data, expire_seconds)
def update_exchange_wrappers(self, exchange_wrapper_list):
"""
Updates shared memory with the given exchange wrappers.
For each wrapper given:
If the wrapper already exists with the same name and type, no action is taken.
If the wrapper exists with the given name but has a different type, overwrites.
If the wrapper does not exist, writes.
Note: Manipulating object containers in memory should all be done from one thread due to the nature of the shared memory.
"""
# Go through all exchanges.
for exchange_wrapper in exchange_wrapper_list:
current_exchange_wrapper = self._get_exchange_wrapper(exchange_wrapper.name)
if current_exchange_wrapper == None:
self._set_exchange_wrapper(exchange_wrapper)
elif current_exchange_wrapper.name != exchange_wrapper.name or current_exchange_wrapper.type != exchange_wrapper.type:
self._set_exchange_wrapper(exchange_wrapper)
def update_from_master_setup_data(self, exchange_wrapper_list, queue_wrapper_list):
"""
Updates shared memory from the given setup data.
"""
# Completely replace the exchange wrappers.
for exchange_wrapper_name in self.get_exchange_wrapper_name_list():
self._remove_exchange(exchange_wrapper_name)
self.update_exchange_wrappers(exchange_wrapper_list)
# Determine which queues should actually be deleted and delete them.
current_queue_name_set = set(self.get_queue_name_list())
given_queue_name_set = set([qw.name for qw in queue_wrapper_list])
remove_queue_name_set = current_queue_name_set - given_queue_name_set
self.remove_queue_names_from_memory(list(remove_queue_name_set))
# Go through all the queue wrappers given and add them.
# Note that we remove old tracking of the queue but not the queue data itself.
# This ensures we don't damage existing data during this process yet get the correct settings for the queue.
for queue_wrapper in queue_wrapper_list:
# Remove.
self._remove_queue(queue_wrapper.name, False)
# Add.
self._add_queue(queue_wrapper)
def which_client_queues_exist_and_are_attached(self, client_queue_list):
"""
Compares the settings in the client queue objects in the given list against what currently exists in shared memory.
Returns two lists of client queue objects:
1) Objects whose settings have been validated in shared memory.
2) Objects whose settings have been invalidated in shared memory.
"""
try:
# Get the current memory state.
current_exchange_dict = self.get_exchange_wrapper_dict()
current_queue_name_list = self.get_queue_name_list()
# Test each client queue given.
client_queue_validated_list = list()
client_queue_invalidated_list = list()
for client_queue in client_queue_list:
exchange = current_exchange_dict.get(client_queue.name, None)
# If the exchange doesn't exist for any client queue in the given list, we have invalidated the test.
if exchange == None:
client_queue_invalidated_list.append(client_queue)
# If the exchange doens't have the queue name attached to the routing key for any client queue in the given list, we have invalidated the test.
elif exchange.is_queue_attached_to_routing_key(client_queue.routing_key, client_queue.name) == False:
client_queue_invalidated_list.append(client_queue)
# If the queue name doesn't exist for any client queue in the given list, we have invalidated the test.
elif client_queue.name not in current_queue_name_list:
client_queue_invalidated_list.append(client_queue)
# If this client queue didn't fail, mark it as validated.
else:
client_queue_validated_list.append(client_queue)
# We have validation if none of the given client queues have failed.
return client_queue_validated_list, client_queue_invalidated_list
except:
raise ExceptionFormatter.get_full_exception()
def which_exchange_wrapper_names_exist(self, exchange_wrapper_name_set):
"""
Compares the exchange names in the given set against the exchange names in shared memory.
Returns two sets:
1) The exchange names which exist
2) The exchange names which do not exist.
"""
try:
does_not_exist_set = exchange_wrapper_name_set - set(self.get_exchange_wrapper_name_list())
return exchange_wrapper_name_set - does_not_exist_set, does_not_exist_set
except:
raise ExceptionFormatter.get_full_exception()
def which_queue_names_are_attached(self, queue_name_set):
"""
Compares the queue names in the given set against the queue names in all exchanges in shared memory.
Returns two sets:
1) The queue names which are attached to any exchange in shared memory.
2) The queue names which are not attached to any exchange in shared memory.
"""
# Create the return list.
attached_queue_name_set = set()
# Get our current state in memory.
current_exchange_wrapper_dict = self.get_exchange_wrapper_dict()
# Remove the queue names from each exchange.
for queue_name in queue_name_set:
for exchange_wrapper in list(current_exchange_wrapper_dict.values()):
if exchange_wrapper.is_queue_attached(queue_name):
attached_queue_name_set.add(queue_name)
# Return out.
return attached_queue_name_set, queue_name_set - attached_queue_name_set
def which_queue_names_exist_or_are_attached(self, queue_name_set):
"""
Compares the queue names in the given list against all queue names which currently exist in shared memory, either as queues or in exchanges' attached queue lists.
Returns two sets of client queue objects:
1) Queue names which exist as queues and/or attached to any exchanges in shared memory.
2) Queue names which do not exist as queues and are not attached to any exchanges in shared memory.
"""
# Of the given queue names, get which ones exist as queues in shared memory.
current_queue_names_set = self._get_queue_name_set().intersection(queue_name_set)
# Testing which queue names exist in exchanges is a more expensive operation.
# We won't need to test queues which we already found as queues; remove them from the set.
exchange_test_queue_name_set = queue_name_set - current_queue_names_set
# Extend the list with the queue names which are attached to exchanges.
attached_queue_name_set, _ = self.which_queue_names_are_attached(exchange_test_queue_name_set)
current_queue_names_set = current_queue_names_set.union(attached_queue_name_set)
# Return the two results.
return current_queue_names_set, queue_name_set - current_queue_names_set
def _add_queue(self, queue_object):
self._qm_redis_client.sadd("data_queue_set", queue_object.name)
self._qm_redis_client.hset("data_queue_types", queue_object.name, queue_object.type)
def _add_qm_to_pecking_order_list(self, qm_id_string):
self._qm_redis_client.rpush("pecking_order", qm_id_string)
def _clear_connection_reply_data(self):
self._qm_redis_client.delete("reply_data")
def _delete_value(self, key):
self._qm_redis_client.delete("sv:{0}".format(key))
def _get_connection_reply_data(self, thread_name, qm_id_string):
return self._qm_redis_client.hget("reply_data", self._get_connection_reply_data_key(thread_name, qm_id_string))
def _get_connection_reply_data_key(self, thread_name, qm_id_string):
return "{0}.{1}".format(thread_name, qm_id_string)
def _get_exchange_wrapper(self, exchange_wrapper_name):
exchange_wrapper_name = cast_string(exchange_wrapper_name)
dumped_exchange_wrapper = self._qm_redis_client.get("ew:" + exchange_wrapper_name)
if dumped_exchange_wrapper == None:
return None
return ExchangeWrapper.load(dumped_exchange_wrapper)
def _get_exchange_wrapper_dict(self):
try:
# Get the list of keys.
exchange_wrapper_key_list = self._get_exchange_wrapper_key_list()
# Return an empty dictionary if we have no keys.
if len(exchange_wrapper_key_list) == 0:
return dict()
# If there are items in the dictionary, get the dumped exchange wrapper list.
dumped_exchange_wrapper_list = self._qm_redis_client.mget(exchange_wrapper_key_list)
# Form the exchange wrapper dictionary from the full keys and dumped values.
exchange_wrapper_dict = dict()
for _ in range(len(exchange_wrapper_key_list)):
# Take the last element off both lists - they will be in order due to the nature of lists.
full_key = exchange_wrapper_key_list.pop()
dumped_value = dumped_exchange_wrapper_list.pop()
# If the dumped string value is valid, load the exchange wrapper from the dumped value and set it as the real exchange wrapper name.
if dumped_value != None:
exchange_wrapper_dict[full_key[3:]] = ExchangeWrapper.load(dumped_value)
# Return our exchange wrapper dictionary.
return exchange_wrapper_dict
"""
# Ensure there are only valid fields in the dumped exchange wrapper list.
while None in dumped_exchange_wrapper_list:
dumped_exchange_wrapper_list.remove(None)
# Pull the dumped exchange wrappers from the keys, load, and store in a list.
exchange_wrapper_list = [ExchangeWrapper.load(dumped) for dumped in dumped_exchange_wrapper_list]
# Get the names from the keys, create a map of names to wrappers, and return the map.
exchange_wrapper_name_list = [key[3:] for key in exchange_wrapper_key_list]
return dict(zip(exchange_wrapper_name_list, exchange_wrapper_list))
"""
except:
raise ExceptionFormatter.get_full_exception()
def _get_exchange_wrapper_key_list(self):
keys_list = self._qm_redis_client.keys("ew:*")
return cast_list_of_strings(keys_list)
def _get_exchange_wrapper_name_list(self):
return [key[3:] for key in self._get_exchange_wrapper_key_list()]
def _get_info(self):
return self._qm_redis_client.info()
def _get_network_memory_statistics_dict(self):
raw_network_memory_statistics_dict = self._qm_redis_client.get("net_stats")
if raw_network_memory_statistics_dict == None:
return dict()
return bson.loads(raw_network_memory_statistics_dict)
def _get_ordered_queue_owners_dict(self):
#return self._qm_redis_client.hgetall("oq_owners")
dumped_ordered_queue_owners_dict = self._qm_redis_client.get("oq_owners")
if dumped_ordered_queue_owners_dict == None:
return None
return bson.loads(dumped_ordered_queue_owners_dict)
def _get_queue_lock_owner_string(self, queue_name):
queue_lock_owner_string = self._qm_redis_client.hget("queue_locks", queue_name)
return cast_string(queue_lock_owner_string)
def _get_queue_lock_owner_dict(self):
queue_lock_owner_dict = self._qm_redis_client.hgetall("queue_locks")
return dict([(k.decode(), v.decode()) for k, v in queue_lock_owner_dict.items()])
def _get_queue_name_set(self):
queue_name_set = self._qm_redis_client.smembers("data_queue_set")
return cast_list_of_strings(queue_name_set)
def _get_queue_size(self, queue_name):
return self._qm_redis_client.llen(queue_name)
def _get_queue_sizes(self, queue_name_list):
# Utilize a pipeline object to handle the call in one transaction.
pipeline = self._qm_redis_client.pipeline()
for queue_name in queue_name_list:
pipeline.llen(queue_name)
return pipeline.execute()
def _get_queue_type_dict(self):
get_queue_type_dict = self._qm_redis_client.hgetall("data_queue_types")
return dict([(k.decode(), v.decode()) for k, v in get_queue_type_dict.items()])
def _get_pecking_order_list(self):
pecking_order_list = self._qm_redis_client.lrange("pecking_order", 0, -1)
return cast_list_of_strings(pecking_order_list)
def _get_thread_name_dict(self):
return bson.loads(self._qm_redis_client.get("thread_names"))
def _get_value(self, key):
value = self._qm_redis_client.get("sv:{0}".format(key))
return cast_string(value)
def _ping(self):
return self._qm_redis_client.echo("") == ""
def _pop_queue(self, queue_name, element_count = 1):
try:
# If we are below our threshold for using a bulk pop, do singular pops.
if element_count < 3:
# Form the return list and pop our element count.
data_list = list()
for _ in range(element_count):
# Add data if valid; break if not.
data = self._qm_redis_client.lpop(queue_name)
if data == None:
break
data_list.append(data)
# Get the end queue size.
if len(data_list) == 0:
queue_size = 0
else:
queue_size = self._qm_redis_client.llen(queue_name)
# Bulk pop if we are allowed to.
else:
p = self._qm_redis_client.pipeline()
p.lrange(queue_name, 0, element_count - 1)
p.ltrim(queue_name, element_count, -1)
p.llen(queue_name)
data_list, _, queue_size = p.execute()
# Return result.
return data_list, queue_size
except:
raise ExceptionFormatter.get_full_exception()
def _purge_queue(self, queue_name):
queue_size = self._get_queue_size(queue_name)
if queue_size > 0:
self._qm_redis_client.ltrim(queue_name, queue_size, -1)
def _push_queue_data_list(self, queue_name, data_list):
return self._qm_redis_client.rpush(queue_name, *data_list)
def _remove_connection_reply_data(self, thread_name, qm_id_string):
self._qm_redis_client.hdel("reply_data", self._get_connection_reply_data_key(thread_name, qm_id_string))
def _remove_exchange(self, exchange_wrapper_name):
exchange_wrapper_name = cast_string(exchange_wrapper_name)
self._qm_redis_client.delete("ew:" + exchange_wrapper_name)
def _remove_qm_from_pecking_order_list(self, qm_id_string):
self._qm_redis_client.lrem("pecking_order", 1, qm_id_string)
def _remove_queue(self, queue_name, remove_data = True):
self._qm_redis_client.srem("data_queue_set", queue_name)
self._qm_redis_client.hdel("data_queue_types", queue_name)
if remove_data == True:
self._qm_redis_client.delete(queue_name)
def _remove_queue_lock_owner(self, queue_name):
self._qm_redis_client.hdel("queue_locks", queue_name)
def _set_connection_reply_data(self, thread_name, qm_id_string, data):
self._qm_redis_client.hset("reply_data", self._get_connection_reply_data_key(thread_name, qm_id_string), data)
def _set_exchange_wrapper(self, exchange_wrapper):
self._qm_redis_client.set("ew:" + cast_string(exchange_wrapper.name), exchange_wrapper.dump())
def _set_network_memory_statistics_dict(self, network_memory_statistics_dict):
if network_memory_statistics_dict == None:
self._qm_redis_client.delete("net_stats")
else:
self._qm_redis_client.set("net_stats",bson.dumps(network_memory_statistics_dict))
def _set_pecking_order_list(self, qm_id_string_list):
self._qm_redis_client.delete("pecking_order")
if len(qm_id_string_list) > 0:
self._qm_redis_client.rpush("pecking_order", *qm_id_string_list)
def _set_queue_lock_dict(self, queue_lock_owner_dict):
self._qm_redis_client.delete("queue_locks")
if len(queue_lock_owner_dict) > 0:
self._qm_redis_client.hmset("queue_locks", queue_lock_owner_dict)
def _set_ordered_queue_owners_dict(self, ordered_queue_owners_dict):
self._qm_redis_client.delete("oq_owners")
if len(ordered_queue_owners_dict) > 0:
self._qm_redis_client.set("oq_owners", bson.dumps(ordered_queue_owners_dict))
#self._qm_redis_client.hmset("oq_owners", ordered_queue_owners_dict)
def _set_queue_lock_owner(self, queue_name, owner_string):
self._qm_redis_client.hset("queue_locks", queue_name, owner_string)
def _set_queue_size_snapshot_dict(self, queue_size_snapshot_dict):
self._qm_redis_client.set("queue_size_snapshot", bson.dumps(queue_size_snapshot_dict))
def _set_thread_name_dict(self, thread_name_dict):
self._qm_redis_client.set("thread_names", bson.dumps(thread_name_dict))
def _store_value(self, key, data, expire_seconds):
if expire_seconds == None:
self._qm_redis_client.set("sv:{0}".format(key), data)
else:
self._qm_redis_client.setex("sv:{0}".format(key), data, expire_seconds)
| appfirst/distributed_queue_manager | afqueue/source/shared_memory_manager.py | Python | mit | 41,372 |
from unittest import mock
import datetime
from django.test import TestCase
from django.utils import timezone
from django.core.management import call_command
from model_mommy import mommy
from matches.models import Bracket, Round, Match, MatchNotification
from players.models import Player
class SendCurrentMatchupsTestCase(TestCase):
@mock.patch('matches.models.Match.notify_players')
def test_sendcurrentmatchups(self, mock_notify_players):
"""
Test that we call notify_players for any current matchups
"""
bracket = mommy.make(Bracket)
round_1 = mommy.make(Round, bracket=bracket,
start_datetime=timezone.now()-datetime.timedelta(days=8),
end_datetime=timezone.now()-datetime.timedelta(days=1))
round_2 = mommy.make(Round, bracket=bracket,
start_datetime=timezone.now()-datetime.timedelta(days=1),
end_datetime=timezone.now()+datetime.timedelta(days=6))
round_3 = mommy.make(Round, bracket=bracket,
start_datetime=timezone.now()+datetime.timedelta(days=6),
end_datetime=timezone.now()+datetime.timedelta(days=13))
match_1 = mommy.make(Match, player_1_init=mommy.make(Player),
player_2_init=mommy.make(Player),
round=round_1)
match_2 = mommy.make(Match, player_1_init=mommy.make(Player),
player_2_init=mommy.make(Player),
round=round_2)
match_3 = mommy.make(Match, player_1_init=mommy.make(Player),
player_2_init=mommy.make(Player),
round=round_2)
match_4 = mommy.make(Match, player_1_init=mommy.make(Player),
player_2_init=mommy.make(Player),
round=round_3)
call_command('sendcurrentmatchups')
self.assertEqual(mock_notify_players.call_count, 2)
| kevinharvey/django-tourney | tourney/matches/tests/test_commands.py | Python | gpl-3.0 | 2,074 |
#-*- coding: utf-8 -*-
try:
from PIL import Image
from PIL import ExifTags
except ImportError:
try:
import Image
import ExifTags
except ImportError:
raise ImportError("The Python Imaging Library was not found.")
def get_exif(im):
try:
exif_raw = im._getexif() or {}
except:
return {}
ret = {}
for tag, value in exif_raw.items():
decoded = ExifTags.TAGS.get(tag, tag)
ret[decoded] = value
return ret
def get_exif_for_file(file):
im = Image.open(file, 'r')
return get_exif(im)
def get_subject_location(exif_data):
try:
r = (int(exif_data['SubjectLocation'][0]), int(exif_data['SubjectLocation'][1]),)
except:
r = None
return r
| stefanfoulis/django-filer-travis-testing | filer/utils/pil_exif.py | Python | bsd-3-clause | 759 |
from a10sdk.common.A10BaseClass import A10BaseClass
class XmlSchema(A10BaseClass):
"""Class Description::
XML-Schema File.
Class xml-schema supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param period: {"description": "Specify the period in second", "format": "number", "type": "number", "maximum": 31536000, "minimum": 60, "optional": true}
:param remote_file: {"optional": true, "type": "string", "description": "profile name for remote url", "format": "url"}
:param use_mgmt_port: {"default": 0, "optional": true, "type": "number", "description": "Use management port as source port", "format": "flag"}
:param xml_schema: {"description": "XML-Schema File", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/export-periodic/xml-schema/{xml_schema}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "xml_schema"]
self.b_key = "xml-schema"
self.a10_url="/axapi/v3/export-periodic/xml-schema/{xml_schema}"
self.DeviceProxy = ""
self.uuid = ""
self.period = ""
self.remote_file = ""
self.use_mgmt_port = ""
self.xml_schema = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/export/export_periodic_xml_schema.py | Python | apache-2.0 | 1,752 |
"""Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils._testing import assert_almost_equal
from sklearn.metrics.cluster._bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert _jaccard(a1, a1, a1, a1) == 1
assert _jaccard(a1, a1, a2, a2) == 0.25
assert _jaccard(a1, a1, a3, a3) == 1.0 / 7
assert _jaccard(a1, a1, a4, a4) == 0
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert consensus_score((a, a), (a, a)) == 1
assert consensus_score((a, a), (b, b)) == 1
assert consensus_score((a, b), (a, b)) == 1
assert consensus_score((a, b), (b, a)) == 1
assert consensus_score((a, a), (b, a)) == 0
assert consensus_score((a, a), (a, b)) == 0
assert consensus_score((b, b), (a, b)) == 0
assert consensus_score((b, b), (b, a)) == 0
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| glemaitre/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | Python | bsd-3-clause | 1,698 |
#!/usr/bin/env python
# encoding: utf-8
import os
import os.path as osp
import platform
import time
import commands
import xml.etree.ElementTree as ET
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
OS_SYSTEM = platform.system()
IS_WINDOWS = OS_SYSTEM == "Windows"
if not IS_WINDOWS:
from pwd import getpwuid
class Command(object):
def __init__(self, cmd):
self.__cmd = cmd
@property
def cmdline(self):
return self.__cmd
def run(self):
try:
status, stdout = commands.getstatusoutput(self.cmdline)
return status, stdout
except:
return None, None
class LogParser(object):
def __init__(self, usr=None, pwd=None, who=None, path=None, sdate=None, edate=None):
self.usr = usr
self.pwd = pwd
self.who = who
self.path = path
self.sdate = sdate
self.edate = edate
def getcmd(self):
pass
def yield_log(self):
raise NotImplementedError
def get_log_by_date(self):
"""
group by date
{
'date1': [(rev, author, msg), (rev, author, msg)],
'date2': [(rev, author, msg), (rev, author, msg)]
}
"""
logs = OrderedDict()
for rev, author, date, msg in self.yield_log():
if date in logs:
logs[date].append((rev, author, msg))
else:
logs[date] = [(rev, author, msg)]
return logs
class SVNLogParser(LogParser):
def __init__(self, usr=None, pwd=None, who=None, path=None, sdate=None, edate=None):
super(SVNLogParser, self).__init__(usr=usr, pwd=pwd,
who=who, path=path, sdate=sdate, edate=edate)
def getcmd(self):
cmd = u'svn log'
if self.path:
cmd += u' {0}'.format(self.path)
if self.usr:
cmd += u' --username {0}'.format(self.usr)
if self.pwd:
cmd += u' --password {0}'.format(self.pwd)
if self.sdate and self.edate:
cmd += u' -r {%s}:{%s}'%(self.sdate, self.edate)
cmd += u' --xml'
cmd += u' --search {0}'.format(self.who or self.usr)
return Command(cmd)
def yield_log(self):
_, stdout = self.getcmd().run()
if not stdout:
return
root = ET.fromstring(stdout)
for log in root.findall('logentry'):
rev = log.get('revision')
author = log.find('author').text
date = log.find('date').text[:10]
msg = log.find('msg').text
yield rev, author, date, msg
class GITLogParser(LogParser):
def __init__(self, usr=None, pwd=None, who=None, path=None, sdate=None, edate=None):
super(GITLogParser, self).__init__(usr=usr, pwd=pwd,
who=who, path=path, sdate=sdate, edate=edate)
def getcmd(self):
cmd = u'git log'
if self.path:
cmd = cmd + u' {0}'.format(self.path)
if self.sdate:
cmd = cmd + u' --after="{0}"'.format(self.sdate)
if self.edate:
cmd = cmd + u' --before="{0}"'.format(self.edate)
cmd += u' --date="short" --pretty=format:"%T,%cd, %s"'
cmd += u' --committer {0}'.format(self.who or self.usr)
return Command(cmd)
def yield_log(self):
cmd = self.getcmd()
_, stdout = cmd.run()
if not stdout:
return
for line in stdout.split('\n'):
rev, date, msg = line.split(',')
yield rev, self.who or self.usr , date, msg
class TodoLogParser(LogParser):
"""
GTD: my `vim-airline-todo` plugin
"""
def __init__(self, usr=None, pwd=None, who=None, path=None, sdate=None, edate=None):
super(TodoLogParser, self).__init__(usr=usr, pwd=pwd,
who=who, path=path, sdate=sdate, edate=edate)
@staticmethod
def get_file_info(filename):
if not filename or not osp.isfile(filename):
return None, None, None, None
mtime = osp.getmtime(filename)
ltime = time.localtime(mtime)
date = time.strftime('%Y-%m-%d', ltime)
msg = osp.basename(filename)
if not IS_WINDOWS:
author = getpwuid(os.stat(filename).st_uid).pw_name
else:
author = None
rev = 0
return rev, author, date, msg
def yield_filenames(self, ignore_hide_file=True, ignore_prefix=(), ignore_suffix=()):
"""other params may be useful in future"""
path = osp.expanduser(self.path)
path = osp.abspath(path)
if not path or not osp.isdir(path):
return
for root, dirs, fnames in os.walk(path):
for f in fnames:
if ignore_hide_file and f.startswith('.'):
continue
if f.endswith(ignore_suffix):
continue
if f.startswith(ignore_prefix):
continue
yield osp.join(root, f)
def yield_log(self, ignore_hide_file=True, ignore_prefix=(), ignore_suffix=()):
tasks = self.yield_filenames(ignore_hide_file, ignore_prefix, ignore_suffix)
for t in tasks:
rev, author, date, msg = self.get_file_info(t)
if self.sdate <= date <= self.edate:
yield date, msg, self.who or self.usr or author or 'unkown', rev
| Zuckonit/weekr | weekr/core/logparser.py | Python | gpl-2.0 | 5,443 |
from __future__ import division
import pyaudio
import wave
import sys
import scipy
import numpy as np
import struct
#from scikits.audiolab import flacread
from numpy.fft import rfft, irfft
from numpy import argmax, sqrt, mean, diff, log
import matplotlib
from scipy.signal import blackmanharris, fftconvolve
from time import time
from parabolic import parabolic
#for frequency estimation using harmonic product spectrum
from pylab import subplot, plot, log, copy, show
chunk = 2**8
def freq_from_fft(sig, fs):
"""Estimate frequency from peak of FFT
"""
# Compute Fourier transform of windowed signal
windowed = sig * blackmanharris(chunk)
fftData = rfft(windowed)
# Find the peak and interpolate to get a more accurate peak
i = argmax(abs(fftData)) # Just use this for less-accurate, naive version
if i != len(fftData)-1:
true_i = parabolic(log(abs(fftData)), i)[0]
# Convert to equivalent frequency
return fs * true_i / len(windowed)
else:
return fs * i / len(windowed)
def freq_from_fft_quadratic(sig, rate):
fftData = np.abs(np.fft.rfft(sig))**2
#print(fftData[0:4])
# find the maximum
fftMax = fftData[1:].argmin() + 1
#print ("FFT Max: " + str(fftMax))
# use quadratic interpolation around the max
if fftMax != len(fftData)-1:
y0,y1,y2 = np.log(fftData[fftMax-1:fftMax+2:])
#print("y0: %f y1: %f y2: %f" %(y0, y1, y2))
x1 = ((y2 - y0) * .5) / (2 * y1 - y2 - y0)
# find the frequency and output it
frequency = (fftMax+x1)*rate/chunk
else:
frequency = fftMax*rate/chunk
return frequency
"""
def freq_from_autocorr(sig, fs):
# Calculate autocorrelation (same thing as convolution, but with
# one input reversed in time), and throw away the negative lags
corr = fftconvolve(sig, sig[::-1], mode='full')
corr = corr[len(corr)/2:]
# Find the first low point
d = diff(corr)
start = find(d > 0)#[0]
# Find the next peak after the low point (other than 0 lag). This bit is
# not reliable for long signals, due to the desired peak occurring between
# samples, and other peaks appearing higher.
# Should use a weighting function to de-emphasize the peaks at longer lags.
peak = argmax(corr[start:]) + start
px, py = parabolic(corr, peak)
return fs / px
"""
"""
def freq_from_autocorr2(signal, fs):
"""
"""Estimate frequency using autocorrelation
Pros: Best method for finding the true fundamental of any repeating wave,
even with strong harmonics or completely missing fundamental
Cons: Not as accurate, doesn't work for inharmonic things like musical
instruments, this implementation has trouble with finding the true peak
"""
"""
# Calculate autocorrelation (same thing as convolution, but with one input
# reversed in time), and throw away the negative lags
signal -= mean(signal) # Remove DC offset
corr = fftconvolve(signal, signal[::-1], mode='full')
corr = corr[len(corr)/2:]
# Find the first low point
d = diff(corr)
start = matplotlib.mlab.find(d > 0)[0]
# Find the next peak after the low point (other than 0 lag). This bit is
# not reliable for long signals, due to the desired peak occurring between
# samples, and other peaks appearing higher.
i_peak = argmax(corr[start:]) + start
i_interp = parabolic(corr, i_peak)[0]
return fs / i_interp
"""
def capture_audio(filename)
wf = wave.open(sys.argv[1], 'rb')
(channels,sample_width,rate,frames,comptype,compname) = wf.getparams()
p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(sample_width),
channels = channels,
rate = rate,
output = True)
data = wf.readframes(chunk)
while len(data) == chunk*sample_width:
indata = np.array(wave.struct.unpack("%dh"%(len(data)/sample_width),data))
frequency = freq_from_fft(indata, rate)
if frequency < 1000 and frequency > 500:
print "frequency: %f Hz" % (frequency)
stream.write(data)
data = wf.readframes(chunk)
#stream may not divide evenly into chunks
if data:
stream.write(data)
wf.close()
stream.stop_stream()
stream.close()
p.terminate()
def main():
#open wav file
wf = wave.open(sys.argv[1], 'rb')
(channels,sample_width,rate,frames,comptype,compname) = wf.getparams()
FPS = 25.0
trimby = 10
divby = 100
#print("channels: %d sample width: %d rate: %d frames: %d chunk: %d" %(channels, sample_width, rate, frames, chunk))
# instantiate PyAudio
p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(sample_width),
channels = channels,
rate = rate,
output = True)
data = wf.readframes(chunk)
freq_sum = 0.0
freq_count = 0
freq_max = 0.0
freq_min = 999999999999
while len(data) == chunk*sample_width:
# unpack data
indata = np.array(wave.struct.unpack("%dh"%(len(data)/sample_width),data))
#remainder of calculations
frequency = freq_from_fft(indata, rate)
if frequency < 1000 and frequency > 500:
print "frequency: %f Hz" % (frequency)
if frequency < 1000 and frequency >= 0:
#print "frequency: %f Hz" % (frequency)
freq_sum += frequency
freq_count += 1
if frequency < freq_min:
freq_min = frequency
if frequency > freq_max:
freq_max = frequency
# write data out to the audio stream after first round of calculations
stream.write(data)
# read some more data
data = wf.readframes(chunk)
avg_freq = freq_sum/freq_count
print("Average frequency for this clip: %f" %(avg_freq))
print("Min frequency for this clip: %f" %(freq_min))
print("Max frequency for this clip: %f" %(freq_max))
if data:
stream.write(data)
wf.close()
stream.stop_stream()
stream.close()
p.terminate()
if __name__ == '__main__':
main()
| LucidBlue/mykeepon-storyteller | src/audio_capture_test.py | Python | bsd-3-clause | 5,709 |
import six
import json
from userjs.userjs_settings import JSON_HANDLERS
def _json_handlers(obj):
"""Extra handlers that JSON aren't able to parse.
The only built-in conversion is for datetime. User configured handlers
are tried for other types. If they all fail, raise TypeError.
"""
if hasattr(obj, 'isoformat'):
return obj.isoformat()
elif JSON_HANDLERS:
for handler in JSON_HANDLERS:
try:
return handler(obj)
except TypeError:
pass
raise TypeError('%s is not JSON serializable' % repr(obj))
def jsondumps(obj):
"""Creates a JSON string that can handle datetime objects.
"""
return json.dumps(obj, separators=(',', ':'), default=_json_handlers)
def get_field_value(obj, field_name):
"""Get a value from an object
This tries to get a value from an object that's similar to the way Django's
queries work. If it's unresolvable, return None instead of raising an
exception.
Not sure if there's a utility like this in Django that's available to use.
"""
value = obj
for field in field_name.split('__'):
if not hasattr(value, field):
return None
value = getattr(value, field)
if six.callable(value):
return value()
return value
| tweekmonster/django-userjs | userjs/utils.py | Python | bsd-3-clause | 1,325 |
'''
@date Sep 10, 2010
@author Matthew Todd
This file is part of Test Parser
by Matthew A. Todd
Test Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Test Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Test Parser. If not, see <http://www.gnu.org/licenses/>
'''
#from .IRunner import IRunner
from TestParser.Common.Constants import CONSTANTS
from .IRunner import InvalidRunnerException
import sys
class InvalidFileException(InvalidRunnerException):
'''
Raised when the file is invalid
'''
def __init__(self, file):
super().__init__(None)
self.file = file
def __str__(self):
return "Could not open file '%s'" % self.file
def __repr__(self):
return str(self)
class FileRunner():
'''
Not an actual runner in the normal sense. Reads in a data file (possibly
stdin) and passes the data on.
We use @property runner for two reasons:
1) so that the interface matches (our file is set/get/del with the
name runner)
2) so that we can set runnerName
@date Sep 10, 2010
'''
def __init__(self):
self._file = None
self.runnerName = None
@property
def runner(self):
return self._file
@runner.setter
def runner(self, gRunner): #@DuplicatedSignature
'''
@param runner filename/path to the file
'''
self.runnerName = gRunner
self._file = gRunner
@runner.deleter
def runner(self): #@DuplicatedSignature
del self._file
def run(self):
try:
if self._file == "-":
return sys.stdin.read()
with open(self._file, 'r') as f:
return f.read()
except:
CONSTANTS.logger.error("Failed to read file %s" % self._file)
raise InvalidFileException(self._file)
def runAll(self):
return self.run()
def runPrevious(self):
'''
Log b/c runPrevious with Files can lead to unexpected behavior.
'''
CONSTANTS.logger.info("Running previous FileRunner, file = %s" % self._file)
return self.run()
| matcatc/Test_Parser | src/TestParser/Model/FileRunner.py | Python | gpl-3.0 | 2,639 |
# -*- coding: UTF-8 -*-
#! python3 # noqa E265
"""
Isogeo API v1 - Enums for Resource entity accepted kinds
See: http://help.isogeo.com/api/complete/index.html#definition-application
"""
# #############################################################################
# ########## Libraries #############
# ##################################
# standard library
from enum import Enum, auto
# #############################################################################
# ########## Classes ###############
# ##################################
class ApplicationTypes(Enum):
"""Closed list of accepted Application (metadata subresource) kinds in Isogeo API.
:Example:
>>> # parse members and values
>>> print("{0:<30} {1:>20}".format("Enum", "Value"))
>>> for md_kind in ApplicationTypes:
>>> print("{0:<30} {1:>20}".format(md_kind, md_kind.value))
Enum Value
ApplicationTypes.group 1
ApplicationTypes.user 2
>>> # check if a var is an accepted value
>>> print("group" in ApplicationTypes.__members__)
True
>>> print("User" in ApplicationTypes.__members__) # case sensitive
False
>>> print("confidential" in ApplicationTypes.__members__)
False
See: https://docs.python.org/3/library/enum.html
"""
group = auto()
user = auto()
# ##############################################################################
# ##### Stand alone program ########
# ##################################
if __name__ == "__main__":
"""standalone execution."""
print("{0:<30} {1:>20}".format("Enum", "Value"))
for md_kind in ApplicationTypes:
print("{0:<30} {1:>20}".format(md_kind, md_kind.value))
print(len(ApplicationTypes))
print("group" in ApplicationTypes.__members__)
print("User" in ApplicationTypes.__members__)
print("confidential" in ApplicationTypes.__members__)
| isogeo/isogeo-plugin-qgis | modules/isogeo_pysdk/enums/application_types.py | Python | gpl-3.0 | 2,044 |
import subprocess
import os
import pyblish
path = os.path.join(os.path.dirname(__file__), 'pyblish_util.py')
executable = os.path.dirname(os.path.dirname(pyblish.__file__))
executable = os.path.dirname(os.path.dirname(os.path.dirname(executable)))
executable = os.path.dirname(executable)
executable = os.path.join(executable, 'bin', 'python.bat')
args = [executable, path]
subprocess.Popen(args)
| mkolar/pyblish-ftrack | pyblish_ftrack/ftrack_event_plugin_path/environment_wrapper.py | Python | lgpl-3.0 | 402 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Q2(c): Recurrent neural nets for NER
"""
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import sys
import tensorflow as tf
import numpy as np
logger = logging.getLogger("hw3.q2.1")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class RNNCell(tf.nn.rnn_cell.RNNCell):
"""Wrapper around our RNN cell implementation that allows us to play
nicely with TensorFlow.
"""
def __init__(self, input_size, state_size):
self.input_size = input_size
self._state_size = state_size
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Updates the state using the previous @state and @inputs.
Remember the RNN equations are:
h_t = sigmoid(x_t W_x + h_{t-1} W_h + b)
TODO: In the code below, implement an RNN cell using @inputs
(x_t above) and the state (h_{t-1} above).
- Define W_x, W_h, b to be variables of the apporiate shape
using the `tf.get_variable' functions. Make sure you use
the names "W_x", "W_h" and "b"!
- Compute @new_state (h_t) defined above
Tips:
- Remember to initialize your matrices using the xavier
initialization as before.
Args:
inputs: is the input vector of size [None, self.input_size]
state: is the previous state vector of size [None, self.state_size]
scope: is the name of the scope to be used when defining the variables inside.
Returns:
a pair of the output vector and the new state vector.
"""
scope = scope or type(self).__name__
# It's always a good idea to scope variables in functions lest they
# be defined elsewhere!
with tf.variable_scope(scope):
### YOUR CODE HERE (~6-10 lines)
W_x = tf.get_variable("W_x",shape=(self.input_size, self._state_size),initializer=tf.contrib.layers.xavier_initializer())
W_h = tf.get_variable("W_h",shape=(self._state_size, self._state_size),initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable("b",shape=(self._state_size),initializer=tf.zeros_initializer())
new_state = tf.sigmoid(
tf.matmul(inputs,W_x) + tf.matmul(state,W_h) + b
)
### END YOUR CODE ###
# For an RNN , the output and state are the same (N.B. this
# isn't true for an LSTM, though we aren't using one of those in
# our assignment)
output = new_state
return output, new_state
def test_rnn_cell():
with tf.Graph().as_default():
with tf.variable_scope("test_rnn_cell"):
x_placeholder = tf.placeholder(tf.float32, shape=(None,3))
h_placeholder = tf.placeholder(tf.float32, shape=(None,2))
with tf.variable_scope("rnn"):
tf.get_variable("W_x", initializer=np.array(np.eye(3,2), dtype=np.float32))
tf.get_variable("W_h", initializer=np.array(np.eye(2,2), dtype=np.float32))
tf.get_variable("b", initializer=np.array(np.ones(2), dtype=np.float32))
tf.get_variable_scope().reuse_variables()
cell = RNNCell(3, 2)
y_var, ht_var = cell(x_placeholder, h_placeholder, scope="rnn")
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
x = np.array([
[0.4, 0.5, 0.6],
[0.3, -0.2, -0.1]], dtype=np.float32)
h = np.array([
[0.2, 0.5],
[-0.3, -0.3]], dtype=np.float32)
y = np.array([
[0.832, 0.881],
[0.731, 0.622]], dtype=np.float32)
ht = y
y_, ht_ = session.run([y_var, ht_var], feed_dict={x_placeholder: x, h_placeholder: h})
print("y_ = " + str(y_))
print("ht_ = " + str(ht_))
assert np.allclose(y_, ht_), "output and state should be equal."
assert np.allclose(ht, ht_, atol=1e-2), "new state vector does not seem to be correct."
def do_test(_):
logger.info("Testing rnn_cell")
test_rnn_cell()
logger.info("Passed!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tests the RNN cell implemented as part of Q2 of Homework 3')
subparsers = parser.add_subparsers()
command_parser = subparsers.add_parser('test', help='')
command_parser.set_defaults(func=do_test)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
| kabrapratik28/Stanford_courses | cs224n/assignment3/q2_rnn_cell.py | Python | apache-2.0 | 5,031 |
import unittest2
from faker import Faker
from nose.plugins.attrib import attr
from . import helper
from hapi.contacts import ContactsClient
fake = Faker()
class ContactsClientTestCase(unittest2.TestCase):
""" Unit tests for the HubSpot Contacts API Python client.
This file contains some unittest tests for the Contacts API.
Questions, comments: http://developers.hubspot.com/docs/methods/contacts/contacts-overview
"""
test_contact_json = {
"properties": [
{
"property": "email",
"value": fake.email()
},
{
"property": "firstname",
"value": fake.first_name()
},
{
"property": "lastname",
"value": fake.last_name()
},
{
"property": "website",
"value": fake.url()
},
{
"property": "company",
"value": fake.company()
},
{
"property": "phone",
"value": fake.phone_number()
},
{
"property": "address",
"value": fake.street_address()
},
{
"property": "city",
"value": fake.city()
},
{
"property": "state",
"value": fake.state()
},
{
"property": "zip",
"value": fake.zipcode()
}
]
}
def setUp(self):
self.client = ContactsClient(**helper.get_options())
self.contacts = []
def tearDown(self):
""" Cleans up the created objects. """
if self.contacts:
[self.client.delete_a_contact(contact) for contact in self.contacts]
@attr('api')
def test_create_or_update_a_contact(self):
""" Test the create or update a contact endpoint is valid. """
email = self.test_contact_json['properties'][0]['value']
response = self.client.create_or_update_a_contact(email, data=self.test_contact_json)
self.assertTrue(len(response) > 0)
self.contacts.append(response['vid'])
@attr('api')
def test_get_contact_by_email(self):
""" Test that the get contact by email address endoint is valid. """
email = self.test_contact_json['properties'][0]['value']
contact = self.client.create_or_update_a_contact(email, data=self.test_contact_json)['vid']
response = self.client.get_contact_by_email(email)
self.assertTrue(len(response) > 0)
self.contacts.append(contact)
@attr('api')
def test_update_a_contact(self):
""" Test that the update contact endpoint is valid and that changes persist. """
email = self.test_contact_json['properties'][0]['value']
contact = self.client.create_or_update_a_contact(email, data=self.test_contact_json)['vid']
new_contact_json = self.test_contact_json.copy()
new_contact_json['properties'][4]['value'] = new_contact_json['properties'][4]['value'] + ' UPDATED'
response = self.client.update_a_contact(contact, data=self.test_contact_json)
contact_response = self.client.get_contact_by_email(email)
self.assertEqual(contact_response['properties']['company']['value'], new_contact_json['properties'][4]['value'])
self.contacts.append(contact)
@attr('api')
def test_delete_a_contact(self):
""" Test that the delete contact endpoint is valid. """
email = self.test_contact_json['properties'][0]['value']
contact = self.client.create_or_update_a_contact(email, data=self.test_contact_json)['vid']
response = self.client.delete_a_contact(contact)
self.assertTrue(len(response) > 0)
| CBitLabs/hapipy | hapi/test/test_contacts.py | Python | apache-2.0 | 3,881 |
from django.contrib.sitemaps import Sitemap
from django.conf import settings
from django.db import models
class BaseContent(models.Model):
title = models.CharField(max_length=200)
content = models.TextField(blank=True,
help_text='Syntax examples (reStructuredText):<br /><br />'
'<code><span style="font-weight: bold">**bold**</span>'
' <em>*italicts*</em> '
'``inline_code()`` '
'`long link <http://url.com>`_ and `short link`_<br />'
'.. _short link: http://url.com'
' <em>(put this at the bottom)</em>'
'<br /><br />'
'* bulleted list<br />'
'#. numbered list<br /><br />'
'.. image:: http://example.com/image.png<br /><br />'
'.. sourcecode:: python<br /><br />'
' def f(x):<br />'
' pass</code>'
'<br /><br />'
'See the <a href="'
'http://docutils.sourceforge.net/docs/user/rst/quickref.html'
'" target="_blank">quick reference</a> for more details.')
# This stores the generated HTML code from our wiki syntax
pre_rendered_content = models.TextField(blank=True, editable=False)
last_update = models.DateTimeField(auto_now=True)
@property
def rendered_content(self):
from django.utils.safestring import mark_safe
return mark_safe(self.pre_rendered_content)
class Meta:
abstract = True
def save(self, *args, **kwargs):
# Pre-generate HTML code from our markup for faster access, later
from .markup import html_body
self.pre_rendered_content = html_body(self.content)
super(BaseContent, self).save(*args, **kwargs)
class Page(BaseContent):
url = models.CharField('URL', max_length=200)
show_share_buttons = models.BooleanField(default=True,
help_text='Show buttons for sharing this page on Twitter, Facebook, etc.')
def __unicode__(self):
return u"%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
if not self.url.endswith('/') and settings.APPEND_SLASH:
self.url = "%s/" % self.url
if not self.url.startswith('/'):
self.url = "/" + self.url
return self.url
class Block(models.Model):
name = models.CharField(max_length=200)
content = models.TextField(blank=True)
def __unicode__(self):
return self.name
class PagesSitemap(Sitemap):
changefreq = "daily"
def items(self):
return Page.objects.all()[:2000]
def lastmod(self, obj):
return obj.last_update
| umitproject/tease-o-matic | minicms/models.py | Python | bsd-3-clause | 2,811 |
#!/usr/bin/env python
#
# Tate - lightweight Mazacoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import time
import sys
import os
import threading
import traceback
import json
import Queue
import util
from network import Network
from util import print_error, print_stderr, parse_json
from simple_config import SimpleConfig
DAEMON_PORT=8001
def do_start_daemon(config):
import subprocess
logfile = open(os.path.join(config.path, 'daemon.log'),'w')
p = subprocess.Popen(["python",__file__], stderr=logfile, stdout=logfile, close_fds=True)
print_stderr("starting daemon (PID %d)"%p.pid)
def get_daemon(config, start_daemon=True):
import socket
daemon_port = config.get('daemon_port', DAEMON_PORT)
daemon_started = False
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('', daemon_port))
if not daemon_started:
print_stderr("Connected to daemon on port %d"%daemon_port)
return s
except socket.error:
if not start_daemon:
return False
elif not daemon_started:
do_start_daemon(config)
daemon_started = True
else:
time.sleep(0.1)
class ClientThread(threading.Thread):
def __init__(self, server, s):
threading.Thread.__init__(self)
self.server = server
self.daemon = True
self.client_pipe = util.SocketPipe(s)
self.response_queue = Queue.Queue()
self.server.add_client(self)
def reading_thread(self):
while self.running:
try:
request = self.client_pipe.get()
except util.timeout:
continue
if request is None:
self.running = False
break
if request.get('method') == 'daemon.stop':
self.server.stop()
continue
self.server.send_request(self, request)
def run(self):
self.running = True
threading.Thread(target=self.reading_thread).start()
while self.running:
try:
response = self.response_queue.get(timeout=0.1)
except Queue.Empty:
continue
try:
self.client_pipe.send(response)
except socket.error:
self.running = False
break
self.server.remove_client(self)
class NetworkServer(threading.Thread):
def __init__(self, config):
threading.Thread.__init__(self)
self.daemon = True
self.debug = False
self.config = config
self.network = Network(config)
# network sends responses on that queue
self.network_queue = Queue.Queue()
self.running = False
self.lock = threading.RLock()
# each GUI is a client of the daemon
self.clients = []
self.request_id = 0
self.requests = {}
def is_running(self):
with self.lock:
return self.running
def stop(self):
with self.lock:
self.running = False
def start(self):
self.running = True
threading.Thread.start(self)
def add_client(self, client):
for key in ['status','banner','updated','servers','interfaces']:
value = self.network.get_status_value(key)
client.response_queue.put({'method':'network.status', 'params':[key, value]})
with self.lock:
self.clients.append(client)
print_error("new client:", len(self.clients))
def remove_client(self, client):
with self.lock:
self.clients.remove(client)
print_error("client quit:", len(self.clients))
def send_request(self, client, request):
with self.lock:
self.request_id += 1
self.requests[self.request_id] = (request['id'], client)
request['id'] = self.request_id
if self.debug:
print_error("-->", request)
self.network.requests_queue.put(request)
def run(self):
self.network.start(self.network_queue)
while self.is_running():
try:
response = self.network_queue.get(timeout=0.1)
except Queue.Empty:
continue
if self.debug:
print_error("<--", response)
response_id = response.get('id')
if response_id:
with self.lock:
client_id, client = self.requests.pop(response_id)
response['id'] = client_id
client.response_queue.put(response)
else:
# notification
for client in self.clients:
client.response_queue.put(response)
self.network.stop()
print_error("server exiting")
def daemon_loop(server):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
daemon_port = server.config.get('daemon_port', DAEMON_PORT)
daemon_timeout = server.config.get('daemon_timeout', 5*60)
s.bind(('', daemon_port))
s.listen(5)
s.settimeout(1)
t = time.time()
while server.running:
try:
connection, address = s.accept()
except socket.timeout:
if not server.clients:
if time.time() - t > daemon_timeout:
print_error("Daemon timeout")
break
else:
t = time.time()
continue
t = time.time()
client = ClientThread(server, connection)
client.start()
server.stop()
# sleep so that other threads can terminate cleanly
time.sleep(0.5)
print_error("Daemon exiting")
if __name__ == '__main__':
import simple_config, util
config = simple_config.SimpleConfig()
util.set_verbosity(True)
server = NetworkServer(config)
server.start()
try:
daemon_loop(server)
except KeyboardInterrupt:
print "Ctrl C - Stopping daemon"
server.stop()
sys.exit(1)
| mazaclub/tate | lib/daemon.py | Python | gpl-3.0 | 6,841 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QGIS Web Processing Service Plugin
-------------------------------------------------------------------
Date : 09 November 2009
Copyright : (C) 2009 by Dr. Horst Duester
email : horst dot duester at kappasys dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from qgis.PyQt.QtCore import *
from qgis.PyQt import QtXml
from qgis.PyQt.QtWidgets import QApplication, QMessageBox
from qgis.PyQt.QtSql import *
from qgis.core import QgsVectorFileWriter, QgsDataSourceUri
import os, sys, string, tempfile, base64
from ..apicompat.sipv2.compat import pystring
import html
# Execute example:
#
#<?xml version="1.0" encoding="utf-8" standalone="yes"?>
#<wps:Execute service="WPS" version="1.0.0"
#xmlns:wps="http://www.opengis.net/wps/1.0.0"
#xmlns:ows="http://www.opengis.net/ows/1.1"
#xmlns:xlink="http://www.w3.org/1999/xlink"
#xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
#xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd">
#
# <ows:Identifier>returner</ows:Identifier>
# <wps:DataInputs>
# <wps:Input>
# <ows:Identifier>data</ows:Identifier>
# <ows:Title>data</ows:Title>
# <wps:Data>
# <wps:ComplexData mimeType="text/xml" schema="">
# <ogr:FeatureCollection xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# xsi:schemaLocation="/"
# xmlns:ogr="http://ogr.maptools.org/"
# xmlns:gml="http://www.opengis.net/gml">
# <gml:boundedBy>
# <gml:Box>
# <gml:coord>
# <gml:X>9.5</gml:X>
# <gml:Y>47.0666667</gml:Y>
# </gml:coord>
# <gml:coord>
# <gml:X>9.6083946</gml:X>
# <gml:Y>47.2397558</gml:Y>
# </gml:coord>
# </gml:Box>
# </gml:boundedBy>
# <gml:featureMember>
# <ogr:qt_temp fid="qt_temp.0">
# <ogr:geometryProperty>
# <gml:Point>
# <gml:coordinates>
# 9.5450182,47.178495</gml:coordinates>
# </gml:Point>
# </ogr:geometryProperty>
# <ogr:osm_id>32011241</ogr:osm_id>
# <ogr:name>Oberplanken</ogr:name>
# <ogr:type>suburb</ogr:type>
# <ogr:population>0</ogr:population>
# </ogr:qt_temp>
# </gml:featureMember>
# <!-- ... -->
# </ogr:FeatureCollection>
# </wps:ComplexData>
# </wps:Data>
# </wps:Input>
# <wps:Input>
# <ows:Identifier>text</ows:Identifier>
# <ows:Title>text</ows:Title>
# <wps:Data>
# <wps:LiteralData>25</wps:LiteralData>
# </wps:Data>
# </wps:Input>
# </wps:DataInputs>
# <wps:ResponseForm>
# <wps:ResponseDocument lineage="false"
# storeExecuteResponse="false" status="false">
# <wps:Output>
# <ows:Identifier>text</ows:Identifier>
# </wps:Output>
# <wps:Output asReference="true" mimeType="text/xml">
# <ows:Identifier>output2</ows:Identifier>
# </wps:Output>
# <wps:Output asReference="true" mimeType="text/xml">
# <ows:Identifier>output1</ows:Identifier>
# </wps:Output>
# </wps:ResponseDocument>
# </wps:ResponseForm>
#</wps:Execute>
def createTmpBase64(rLayer):
tmpFile = tempfile.NamedTemporaryFile(prefix="base64", delete=False)
tmpFileName = tmpFile.name
# try:
# tmpFile = tempfile.NamedTemporaryFile(prefix="base64", delete=False)
# infile = open(unicode(rLayer.source()))
# tmpFileName = tmpFile.name
# outfile = open(tmpFileName, 'w')
# base64.encode(infile,outfile)
# outfile.close()
# outfile = open(tmpFileName, 'r')
# base64String = outfile.read()
# outfile.close()
# os.remove(tmpFile.name)
if rLayer.dataProvider().name() == 'ogr' or rLayer.dataProvider().name() == 'gdal':
infile = open(rLayer.source())
outfile = tmpFile #open(tmpFileName, 'w')
base64.encode(infile,outfile)
outfile.close()
infile.close()
outfile = open(tmpFileName, 'r')
base64String = outfile.read()
outfile.close()
os.remove(tmpFileName)
return base64String
else:
QMessageBox.critical(None, QApplication.translate("QgsWps",'Error'), QApplication.translate("QgsWps",'Datatype %s of layer %s is not supported!' % (rLayer.dataProvider().name(), rLayer.name())))
return ''
# except:
# QMessageBox.critical(None, QApplication.translate("QgsWps","Error"), QApplication.translate("QgsWps","Unable to create temporal file: ") + filename + QApplication.translate("QgsWps"," for base64 encoding") )
# return None
def createTmpGML(vLayer, processSelection="False", supportedGML="GML2"):
if supportedGML == "": # Neither GML, GML2 or GML3 are supported!
return 0
myQTempFile = QTemporaryFile()
myQTempFile.open()
tmpFile = myQTempFile.fileName()+".gml"
myQTempFile.close()
if vLayer.dataProvider().name() == "postgres":
encoding = getDBEncoding(vLayer.dataProvider())
else:
encoding = vLayer.dataProvider().encoding()
processSelected = False
if processSelection and vLayer.selectedFeatureCount() > 0:
processSelected = True
# FORMAT=GML3 only works with OGR >= 1.8.0, otherwise GML2 is always returned
if supportedGML == "GML3":
dso = ["FORMAT=GML3"]
else: # "GML" or "GML2"
dso = []
lco = []
# error = QgsVectorFileWriter.writeAsVectorFormat(vLayer, tmpFile, encoding, vLayer.dataProvider().crs(), "GML", processSelected, "", dso, lco)
error, msg = QgsVectorFileWriter.writeAsVectorFormat(vLayer, tmpFile, encoding, vLayer.dataProvider().crs(), "GML", processSelected, dso, lco)
if error != QgsVectorFileWriter.NoError:
QMessageBox.information(None, 'Error', 'Process stopped with errors\n'+msg)
else:
myFile = QFile(tmpFile)
if (not myFile.open(QIODevice.ReadOnly | QIODevice.Text)):
QMessageBox.information(None, '', QApplication.translate("QgsWps","File open problem"))
pass
myGML = QTextStream(myFile)
myGML.setCodec(encoding)
gmlString = ""
# Overread the first Line of GML Result
dummy = myGML.readLine()
gmlString += myGML.readAll()
myFile.close()
myFilePath = QFileInfo(myFile).dir().path()
myFileInfo = myFilePath+'/'+QFileInfo(myFile).completeBaseName()
QFile(myFileInfo+'.xsd').remove()
QFile(myFileInfo+'.gml').remove()
return pystring(gmlString).strip()
def getDBEncoding(layerProvider):
dbConnection = QgsDataSourceUri(layerProvider.dataSourceUri())
db = QSqlDatabase.addDatabase("QPSQL","WPSClient")
db.setHostName(dbConnection.host())
db.setDatabaseName(dbConnection.database())
db.setUserName(dbConnection.username())
db.setPassword(dbConnection.password())
db.setPort(int(dbConnection.port()))
db.open()
query = "select pg_encoding_to_char(encoding) as encoding "
query += "from pg_catalog.pg_database "
query += "where datname = '"+dbConnection.database()+"' "
result = QSqlQuery(query,db)
result.first()
encoding = pystring(result.value(0))
db.close()
return encoding
def htmlescape(text):
return html.escape(text, True)
class ExecutionRequest(QObject):
"""
Compose request XML for WPS execution
"""
def __init__(self, process):
QObject.__init__(self)
self.process = process
self.request = ""
def addExecuteRequestHeader(self):
identifier = htmlescape(self.process.processIdentifier)
version = htmlescape(self.process.getServiceVersion())
self.request = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n"
self.request += "<wps:Execute service=\"WPS\" version=\""+ version + "\"" + \
" xmlns:wps=\"http://www.opengis.net/wps/1.0.0\"" + \
" xmlns:ows=\"http://www.opengis.net/ows/1.1\"" +\
" xmlns:xlink=\"http://www.w3.org/1999/xlink\"" +\
" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\""\
" xsi:schemaLocation=\"http://www.opengis.net/wps/1.0.0" +\
" http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd\">"
self.request += "<ows:Identifier>"+identifier+"</ows:Identifier>\n"
def addExecuteRequestEnd(self):
self.request += "</wps:Execute>"
def addDataInputsStart(self):
self.request += "<wps:DataInputs>\n"
def addDataInputsEnd(self):
self.request += "</wps:DataInputs>\n"
def addExecuteRequestInputStart(self, identifier, includeData=True):
identifier = htmlescape(identifier)
self.request += "<wps:Input>\n"
self.request += "<ows:Identifier>"+identifier+"</ows:Identifier>\n"
self.request += "<ows:Title>"+identifier+"</ows:Title>\n"
if includeData: self.request += "<wps:Data>\n"
def addExecuteRequestInputEnd(self, includeData=True):
if includeData: self.request += "</wps:Data>\n"
self.request += "</wps:Input>\n"
def addReferenceInput(self, identifier, mimeType, schema, encoding, ref):
# text/plain inputs ########################################################
# Handle 'as reference' playlist
mimeType = htmlescape(mimeType)
schema = "schema=\"%s\"" % htmlescape(schema) if schema else ""
encoding = "encoding=\"%s\"" % htmlescape(encoding) if encoding else ""
ref = htmlescape(ref)
self.addExecuteRequestInputStart(identifier, False)
self.request += "<wps:Reference mimeType=\"%s\" %s %s xlink:href=\"%s\" />\n" % (mimeType, schema, encoding, ref)
self.addExecuteRequestInputEnd(False)
def addPlainTextInput(self, identifier, text):
# text/plain inputs ########################################################
# It's not a playlist
self.addExecuteRequestInputStart(identifier)
self.request += "<wps:ComplexData>" + htmlescape(text) + "</wps:ComplexData>\n"
self.addExecuteRequestInputEnd()
def addGeometryInput(self, identifier, mimeType, schema, encoding, gmldata, useSelected):
# Single raster and vector inputs ##########################################
mimeType = htmlescape(mimeType)
schema = htmlescape(schema)
encoding = 'encoding="%s"' % htmlescape(encoding) if encoding else ''
#if self.tools.isMimeTypeVector(mimeType) != None and encoding != "base64":
self.addExecuteRequestInputStart(identifier)
self.request += "<wps:ComplexData mimeType=\"%s\" schema=\"%s\" %s>" % (mimeType, schema, encoding)
self.request += "<![CDATA["
self.request += gmldata.replace("> <","><")
self.request = self.request.replace("xsi:schemaLocation=\"http://ogr.maptools.org/ qt_temp.xsd\"",
"xsi:schemaLocation=\"" + schema.rsplit('/',1)[0] + "/ " + schema + "\"")
self.request += "]]>"
self.request += "</wps:ComplexData>\n"
# self.request += "<wps:ComplexData mimeType=\"%s\" schema=\"%s\" %s>" % (mimeType, schema, encoding)
# self.request += gmldata.replace("> <","><")
#
# self.request = self.request.replace("xsi:schemaLocation=\"http://ogr.maptools.org/ qt_temp.xsd\"",
# "xsi:schemaLocation=\"" + schema.rsplit('/',1)[0] + "/ " + schema + "\"")
#
# self.request += "</wps:ComplexData>\n"
self.addExecuteRequestInputEnd()
def addGeometryBase64Input(self, identifier, mimeType, data):
# Single raster and vector inputs ##########################################
mimeType = htmlescape(mimeType)
#elif self.tools.isMimeTypeVector(mimeType) != None or self.tools.isMimeTypeRaster(mimeType) != None:
self.addExecuteRequestInputStart(identifier)
self.request += "<wps:ComplexData mimeType=\"" + mimeType + "\" encoding=\"base64\">\n"
self.request += createTmpBase64(data)
self.request += "</wps:ComplexData>\n"
self.addExecuteRequestInputEnd()
def addFileBase64Input(self, identifier, mimeType, filename):
mimeType = htmlescape(mimeType)
self.addExecuteRequestInputStart(identifier)
self.request += "<wps:ComplexData mimeType=\"" + mimeType + "\" encoding=\"base64\">\n"
file = open(filename, 'r')
self.request += base64.encodestring(file.read())
file.close()
self.request += "</wps:ComplexData>\n"
self.addExecuteRequestInputEnd()
def addMultipleGeometryInput(self, identifier, mimeType, schema, encoding, gmldata, useSelected):
# Multiple raster and vector inputs ########################################
mimeType = htmlescape(mimeType)
schema = htmlescape(schema)
encoding = 'encoding="%s"' % htmlescape(encoding) if encoding else ''
#if self.tools.isMimeTypeVector(mimeType) != None and mimeType == "text/xml":
self.addExecuteRequestInputStart(identifier)
self.request += "<wps:ComplexData mimeType=\"%s\" schema=\"%s\" %s>" % (mimeType, schema, encoding)
self.request += gmldata.replace("> <", "><")
self.request += "</wps:ComplexData>\n"
self.addExecuteRequestInputEnd()
def addMultipleGeometryBase64Input(self, identifier, mimeType, data):
# Multiple raster and vector inputs ########################################
#elif self.tools.isMimeTypeVector(mimeType) != None or self.tools.isMimeTypeRaster(mimeType) != None:
mimeType = htmlescape(mimeType)
self.addExecuteRequestInputStart(identifier)
self.request += "<wps:ComplexData mimeType=\"" + mimeType + "\" encoding=\"base64\">\n"
self.request += createTmpBase64(data)
self.request += "</wps:ComplexData>\n"
self.addExecuteRequestInputEnd()
def addLiteralDataInput(self, identifier, text):
self.addExecuteRequestInputStart(identifier)
self.request += "<wps:LiteralData>"+htmlescape(str(text))+"</wps:LiteralData>\n"
self.addExecuteRequestInputEnd()
def addBoundingBoxInput(self, identifier, bboxArray):
self.addExecuteRequestInputStart(identifier)
self.request += '<wps:BoundingBoxData ows:dimensions="2">'
self.request += '<ows:LowerCorner>'+bboxArray[0]+' '+bboxArray[1]+'</ows:LowerCorner>'
self.request += '<ows:UpperCorner>'+bboxArray[2]+' '+bboxArray[3]+'</ows:UpperCorner>'
self.request += "</wps:BoundingBoxData>\n"
self.addExecuteRequestInputEnd()
def addResponseFormStart(self):
self.request += "<wps:ResponseForm>\n"
# The server should store the result. No lineage should be returned or status
self.request += "<wps:ResponseDocument lineage=\"false\" storeExecuteResponse=\"false\" status=\"false\">\n"
def addResponseFormEnd(self):
self.request += "</wps:ResponseDocument>\n"
self.request += "</wps:ResponseForm>\n"
def addLiteralDataOutput(self, identifier):
# Attach ALL literal outputs #############################################
self.request += "<wps:Output>\n"
self.request += "<ows:Identifier>"+identifier+"</ows:Identifier>\n"
self.request += "</wps:Output>\n"
def addReferenceOutput(self, identifier, mimeType, schema, encoding):
mimeType = htmlescape(mimeType)
schema = "schema=\"%s\"" % htmlescape(schema) if schema else ""
encoding = "encoding=\"%s\"" % htmlescape(encoding) if encoding else ""
self.request += "<wps:Output asReference=\"true\" mimeType=\"%s\" %s %s >" % (mimeType, schema, encoding)
# Playlists can be sent as reference or as complex data
# For the latter, comment out next lines
#self.request += "<wps:Output asReference=\"" + \
# ("false" if "playlist" in mimeType.lower() else "true") + \
# "\" mimeType=\"" + mimeType + \
# (("\" schema=\"" + schema) if schema != "" else "") + "\">"
self.request += "<ows:Identifier>" + htmlescape(identifier) + "</ows:Identifier>\n"
self.request += "</wps:Output>\n"
| sourcepole/qgis-wps-client | wpslib/executionrequest.py | Python | gpl-2.0 | 17,172 |
#!/usr/bin/env python
# From
# http://twistedmatrix.com/documents/current/web/howto/using-twistedweb.html
#
# And modified to accept a maximum size following the example at:
# http://stackoverflow.com/questions/6491932/need-help-writing-a-twisted-proxy
# JBC. March 2012.
"""\
Be a nice http proxy server. To use with scrapy spiders.
"""
import sys, logging
from optparse import OptionParser
from urlparse import urlparse
from twisted.web import proxy, http
from twisted.internet import reactor
# So a proxy.Proxy really has the meat deep inside, in ProxyClient:
#
# Proxy.requestFactory = ProxyRequest
# ProxyRequest.protocols = {'http': ProxyClientFactory}
# ProxyClientFactory.protocol = ProxyClient
# ProxyClient has rawDataReceived (from HTTPClient) and handleResponseEnd
#
# All this is in twisted/web/proxy.py
#
# Our main interest is the rawDataReceived() function it inherits from
# HTTPClient, at twisted/web/http.py
class MyProxyClient(proxy.ProxyClient):
MAX_BYTES = {'html': 10000000,
'xml': 10000000}
# would be nice if configurable, but wtf
def __init__(self, *args, **kwargs):
proxy.ProxyClient.__init__(self, *args, **kwargs)
self.buffer = ''
self.extension = urlparse(self.rest).path.split('.')[-1]
def rawDataReceived(self, data):
self.buffer += data
max_bytes = self.MAX_BYTES.get(self.extension, 500000)
if len(self.buffer) > max_bytes:
logging.warning('Total data (%d) exceeded maximum size (%d)' % \
(len(self.buffer), max_bytes))
self.handleResponseEnd()
def handleResponseEnd(self):
if not self._finished:
self.father.responseHeaders.setRawHeaders('content-length',
[len(self.buffer)])
self.father.write(self.buffer)
proxy.ProxyClient.handleResponseEnd(self)
class ProxyFactory(http.HTTPFactory):
class MyProxy(http.HTTPChannel):
class ProxyRequest(proxy.ProxyRequest):
class MyProxyClientFactory(proxy.ProxyClientFactory):
protocol = MyProxyClient
protocols = {'http': MyProxyClientFactory}
def process(self):
try:
proxy.ProxyRequest.process(self)
except KeyError:
logging.warning('Error processing request - skipped')
# TODO: trap other errors, like using Request.notifyFinish
# This may come handy:
# http://twistedmatrix.com/documents/current/web/howto/web-in-60/interrupted.html
requestFactory = ProxyRequest
def buildProtocol(self, addr):
logging.info('New connection from: %s' % addr)
return self.MyProxy()
def main():
parser = OptionParser(usage='%prog [--port <num>] [--verbose]',
description=__doc__)
parser.add_option('-p', '--port', type='int', default=8080,
help='listening port')
parser.add_option('-l', '--logfile', default='-',
help='name of logging file ("-" for stderr)')
parser.add_option('-v', '--verbose', action='store_true')
opts, rest = parser.parse_args()
kwargs = {'format': '%(asctime)s %(levelname)s %(message)s',
'level': logging.DEBUG if opts.verbose else logging.WARN}
if opts.logfile != '-':
kwargs['filename'] = opts.logfile
logging.basicConfig(**kwargs)
reactor.listenTCP(opts.port, ProxyFactory())
reactor.run()
if __name__ == '__main__':
main()
| torrents-com/content | scrapy/torrents/remote/http_proxy.py | Python | agpl-3.0 | 3,618 |
"Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
| BitWriters/Zenith_project | zango/lib/python3.5/site-packages/django/core/cache/backends/locmem.py | Python | mit | 4,287 |
# Copyright 2014 Massimo Santini, Raffaella Migliaccio
#
# This file is part of MarkovDrummer.
#
# MarkovDrummer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MarkovDrummer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MarkovDrummer. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import # to allow for import from midi (the external library)
from pprint import PrettyPrinter; pp = PrettyPrinter( indent = 4 ).pprint
from math import ceil
import midi
from ..midi.utils import track2eventdict, eventdict2track, is_noteon
from ..midi.constants import pitch2part
def events2beat( events ):
return tuple( sorted( set(
map( lambda _: _.data[ 0 ], filter( is_noteon, events ) )
) ) )
def beat2events( beat ):
return [ midi.NoteOnEvent( tick = 0, channel = 9, data = [ pitch, 120 ] ) for pitch in beat ]
def beats2track( beats, tick, tick_off = None ):
if tick_off is None: tick_off = tick / 4
eventdict = dict()
for n, beat in enumerate( beats ):
events_at_tick = beat2events( beat )
if events_at_tick: eventdict[ n * tick ] = events_at_tick
tmp = eventdict2track( eventdict, tick_off )
return midi.Track( tmp )
def track2beats( track, tick_per_quantum ):
t2ed = track2eventdict( track )
beats = []
last_beat = 0
for tick, events in sorted( t2ed.items() ):
beat = tick / tick_per_quantum
delta = beat - last_beat - 1
if delta: beats.extend( [ tuple() ] * delta )
events = events2beat( events )
if events: beats.append( events )
last_beat = beat
return beats
def writetables( tables, path ):
TABLE_HTML = """
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<title>Explain</title>
<style>
table {{
margin: 1em;
}}
table, td, th {{
border: 1pt solid black;
border-collapse: collapse;
}}
td {{
min-width: 1em;
max-width: 1em;
width: 1em;
}}
.nob {{
border: none;
}}
.on {{
background-color: green;
}}
th {{
white-space: nowrap;
text-align: left;
}}
</style>
</head>
<body>
{}
</body>
</html>
"""
with open( path, 'w' ) as f: f.write( TABLE_HTML.format( '\n'.join( tables ) ) )
def beats2pitches( beats ):
pitches = set()
for beat in beats: pitches.update( beat )
return sorted( pitches, reverse = True )
def beats2table( beats ):
table = [ '<table>' ]
if len( beats ) < 10:
table.append( ''.join(
[ '<th> ' ] + [ '<th>{:02}'.format( n ) for n in range( 1, len( beats ) + 1 ) ]
) )
else:
table.append( ''.join( [ '<th> ' ] * ( len( beats ) + 1 ) ) )
for pitch in beats2pitches( beats ):
table.append( ''.join(
[ '<tr><th>' + pitch2part( pitch ) ]
+ [ '<td class=on>X' if pitch in beat else '<td> ' for beat in beats ]
) )
table.append( '</table>' )
return '\n'.join( table )
def model2tables( model ):
def _t( ngram, nexts, pitches ):
table = [ '<table>' ]
table.append( ''.join(
[ '<th> <th colspan={}>ngram'.format( len( ngram ) ) ] + [ '<th> ' ] * len( nexts )
) )
for pitch in pitches:
table.append( ''.join(
[ '<tr><th>' + pitch2part( pitch ) ]
+ [ '<td class=on>X' if pitch in beat else '<td> ' for beat in ngram ]
+ [ '<td class=on>X' if pitch in beat else '<td> ' for beat in nexts ]
) )
table.append( '</table>' )
return '\n'.join( table )
pitches = set()
for ngram, nexts in model.items():
pitches.update( beats2pitches( ngram + tuple( nexts ) ) )
pitches = sorted( pitches, reverse = True )
tables = []
for ngram, nexts in model.items():
tables.append( _t( ngram, nexts, pitches ) )
return tables
| mapio/markovdrummer | markovdrummer/midi/symbolic.py | Python | gpl-3.0 | 4,031 |
import fileinput
import argparse
from astexport import __version__, __prog_name__
from astexport.parse import parse
from astexport.export import export_json
def create_parser():
parser = argparse.ArgumentParser(
prog=__prog_name__,
description="Python source code in, JSON AST out. (v{})".format(
__version__
)
)
parser.add_argument(
"-i", "--input",
default="-",
help="file to read from or '-' to use standard input (default)"
)
parser.add_argument(
"-p", "--pretty",
action="store_true",
help="print indented JSON"
)
parser.add_argument(
"-v", "--version",
action="store_true",
help="print version and exit"
)
return parser
def main():
"""Read source from stdin, parse and export the AST as JSON"""
parser = create_parser()
args = parser.parse_args()
if args.version:
print("{} version {}".format(__prog_name__, __version__))
return
source = "".join(fileinput.input(args.input))
tree = parse(source)
json = export_json(tree, args.pretty)
print(json)
| fpoli/python-astexport | astexport/cli.py | Python | mit | 1,148 |
from app.resources import ProtectedResource
from flask import jsonify
from app.models import Community
from app.util import is_not_valid_entity_name
from app.decorators import json_content
from app import db, app
class CommunityResource(ProtectedResource):
def get(self):
communities = Community.query.all()
return { 'communities' : [i.serialize for i in communities] }
@json_content
def post(self):
if 'name' not in request.json:
return { 'message' : "missed required name field"}, 400
community_name = request.json['name']
if is_not_valid_entity_name(community_name):
return { 'message' : "invalid name" }
q = Community.query.filter_by(name=community_name)
if q.count() != 0:
return { 'message' : "name already used" }
db.session.add(Community(name=community_name, owner=User.query.first()))
db.session.commit()
return ""
| dpfg/kicker-scorer-api | app/resources/communities.py | Python | mit | 959 |
from __future__ import absolute_import, unicode_literals
from dash.orgs.models import Org
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from optparse import make_option
from temba.utils import format_iso8601
from tracpro.polls.models import Poll, Response
class Command(BaseCommand):
args = "org_id [options]"
option_list = BaseCommand.option_list + (
make_option('--minutes',
action='store',
type='int',
dest='minutes',
default=0,
help='Number of previous minutes to fetch'),
make_option('--hours',
action='store',
type='int',
dest='hours',
default=0,
help='Number of previous hours to fetch'),
make_option('--days',
action='store',
type='int',
dest='days',
default=0,
help='Number of previous days to fetch'),)
help = 'Fetches old responses for the currently active polls'
def handle(self, *args, **options):
org_id = int(args[0]) if args else None
if not org_id:
raise CommandError("Most provide valid org id")
try:
org = Org.objects.get(pk=org_id)
except Org.DoesNotExist:
raise CommandError("No such org with id %d" % org_id)
minutes, hours, days = options['minutes'], options['hours'], options['days']
if not (minutes or hours or days):
raise CommandError("Must provide at least one of --minutes --hours or --days")
since = timezone.now() - relativedelta(minutes=minutes, hours=hours, days=days)
self.stdout.write('Fetching responses for org %s since %s...' % (org.name, since.strftime('%b %d, %Y %H:%M')))
client = org.get_temba_client()
polls_by_flow_uuids = {p.flow_uuid: p for p in Poll.get_all(org)}
runs = client.get_runs(flows=polls_by_flow_uuids.keys(), after=since)
self.stdout.write("Fetched %d runs for org %s" % (len(runs), org.id))
created = 0
updated = 0
for run in runs:
poll = polls_by_flow_uuids[run.flow]
try:
response = Response.from_run(org, run, poll=poll)
except ValueError, e:
self.stderr.write("Unable to save run #%d due to error: %s" % (run.id, e.message))
continue
if getattr(response, 'is_new', False):
created += 1
else:
updated += 1
self.stdout.write("Created %d new responses and updated %d existing responses" % (created, updated))
| ewheeler/tracpro | tracpro/polls/management/commands/fetchruns.py | Python | bsd-3-clause | 2,846 |
# -*- coding: utf-8 -*-
#
# django-azurite documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 6 22:02:52 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from azurite import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-azurite'
copyright = u'2013, Drew Tempelmeyer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'flask_theme_support.FlaskyStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = ['_themes']
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': None,
# 'github_fork': 'drewtempelmeyer/django-azurite',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-azuritedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-azurite.tex', u'django-azurite Documentation',
u'Drew Tempelmeyer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-azurite', u'django-azurite Documentation',
[u'Drew Tempelmeyer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-azurite', u'django-azurite Documentation',
u'Drew Tempelmeyer', 'django-azurite', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| drewtempelmeyer/django-azurite | docs/conf.py | Python | mit | 8,044 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_items(price_list, sales_or_purchase, item=None, item_group=None):
condition = ""
args = {"price_list": price_list}
if sales_or_purchase == "Sales":
condition = "i.is_sales_item='Yes'"
else:
condition = "i.is_purchase_item='Yes'"
if item_group and item_group != "All Item Groups":
condition += " and i.item_group='%s'" % item_group.replace("'", "\'")
if item:
condition += " and CONCAT(i.name, i.item_name) like %(name)s"
args["name"] = "%%%s%%" % item
return frappe.db.sql("""select i.name, i.item_name, i.image,
item_det.price_list_rate, item_det.currency
from `tabItem` i LEFT JOIN
(select item_code, price_list_rate, currency from
`tabItem Price` where price_list=%s) item_det
ON
item_det.item_code=i.name
where
%s""" % ('%(price_list)s', condition), args, as_dict=1)
@frappe.whitelist()
def get_item_code(barcode_serial_no):
input_via = "serial_no"
item_code = frappe.db.sql("""select name, item_code from `tabSerial No` where
name=%s""", (barcode_serial_no), as_dict=1)
if not item_code:
input_via = "barcode"
item_code = frappe.db.sql("""select name from `tabItem` where barcode=%s""",
(barcode_serial_no), as_dict=1)
if item_code:
return item_code, input_via
else:
frappe.throw(frappe._("Invalid Barcode or Serial No"))
@frappe.whitelist()
def get_mode_of_payment():
return frappe.get_list("Mode of Payment")
| suyashphadtare/vestasi-erp-1 | erpnext/erpnext/accounts/doctype/sales_invoice/pos.py | Python | agpl-3.0 | 1,595 |
import os
from flask import Flask
from flask.ext.mongoengine import MongoEngine
from flask.ext.login import LoginManager
UPLOAD_FOLDER = '/srv/cars/cars/data/images'
if 'TRAVIS' in os.environ:
UPLOAD_FOLDER = '{0}/{1}'.format(os.environ['TRAVIS_BUILD_DIR'],
'cars/data/images')
app = Flask(__name__)
#app.config['DEBUG'] = True
app.config['MONGODB_SETTINGS'] = {'DB': 'cars'}
app.config['SECRET_KEY'] = 'super_secret_key'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
db = MongoEngine(app)
db.connection.admin.command('setParameter', textSearchEnabled=True)
lm = LoginManager(app)
lm.init_app(app)
lm.login_view = 'login'
from . import controllers
__version__ = '0.1'
| wiliamsouza/cars | cars/__init__.py | Python | apache-2.0 | 715 |
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.types import builtin_to_string
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
class Var(object):
"""
Var represents the outputs of an Operation. Most Vars are derived from an
Operation (including const), and all Vars must have `sym_type`.
Example Usage:
from coremltools.converters.mil.mil import (
Builder as mb,
Function,
types
)
func_inputs = {"a": mb.placeholder(shape=(1,2)),
"b": mb.placeholder(shape=(1,2)) }
with Function(func_inputs) as ssa_func:
a, b = ssa_func.inputs["a"], ssa_func.inputs["b"]
res = mb.add(x=a, y=b) # res is Var
assert types.is_tensor(res.sym_type)
assert res.rank == 2
assert res.dtype == types.float # since a, b are by default float
# value is not available at compile time in this case. If
# materializable, res.val would be a numpy / primitive value
assert res.val is None
Comment: Except InternalVar and Vars created in while_loop and by
placeholder, all Var should only be constructed by Operation to represent
outputs.
Comment: Var hides the details of sym_type vs sym_val vs materialized
value, which was represented by 2 objects prior to refactoring.
# Properties:
name: (str)
name in MIL proto NamedValueType. Name is assigned by the parent
Operation.
sym_type [_sym_type]: (builtin type class)
All Var must have a (possibly symbolic) type, usually derived from
type inference of upstream ops or from default values in _Input.
sym_val [_sym_val]: (builtin type instance)
Possibly symbolic value.
val [_sym_val]: (np.ndarray or python primitive scalar)
Numpy (scalar / tensor) value. `val` is not None iff `sym_val` is
not None and does not contain symbols. Read-only.
op [_op]: (Operation)
The Operation this Var is derived from. May not be None except
for InternalVar. Read-only.
op_output_idx: (int)
Idx of the output from Operation corresponding to _Input. May be
None.
child_ops [_child_ops]: list[Operation]
Ops that take this Var as an input.
"""
__slots__ = [
"name",
"_sym_type",
"_sym_val",
"_op",
"op_output_idx",
"_child_ops",
"consuming_blocks",
]
def __init__(self, name, sym_type, sym_val=None, op=None, op_output_idx=None):
"""
sym_type (builtin type)
sym_val (builtin value)
op (Operation)
op_output_idx (int)
"""
self.name = name
self._sym_type = sym_type
self._sym_val = sym_val
self._op = op
self.op_output_idx = op_output_idx
# An op can appear twice if it consumes a var twice (e.g.,
# add(%1, %1), while_loop(loop_vars=(%1, %1)).
self._child_ops = list()
# A variable may not be consumed by any op (i.e. len(self._child_ops)
# == 0) but is still used as block output. A var can be output of
# multiple blocks (e.g., both current block and nested blocks)
self.consuming_blocks = list()
@property
def sym_type(self):
return self._sym_type
@property
def shape(self):
if types.is_tensor(self._sym_type):
return self._sym_type.get_shape()
return tuple()
@property
def rank(self):
return len(self.shape)
@property
def dtype(self):
if types.is_tensor(self._sym_type):
return self._sym_type.get_primitive()
return self._sym_type
@property
def sym_val(self):
if self._sym_val is None:
return None
return self._sym_val.val
@property
def val(self):
if self._sym_val is None or any_symbolic(self._sym_val.val):
return None
return self._sym_val.val
@property
def op(self):
return self._op
@property
def child_ops(self):
return self._child_ops
def add_child_op(self, new_op):
self._child_ops.append(new_op)
def remove_child_op(self, target_op, no_check=False):
if target_op not in self._child_ops:
if no_check:
return # no-op
msg = "Op {} does not takes Var {} as input"
raise ValueError(msg.format(target_op.name, self.name))
self._child_ops.remove(target_op)
def shape_str(self):
annotation = ""
if self.val is not None:
annotation = "*"
elif self.sym_val is not None:
annotation = "^"
shape_str = str(self.shape)[:-1] # trim the ")"
if self.rank > 1:
shape_str += ", "
if types.builtin_to_string(self.dtype) is None:
shape_str += ")" + annotation
else:
shape_str += types.builtin_to_string(self.dtype) + ")" + annotation
return shape_str
def type_str(self):
is_tensor = types.is_tensor(self.sym_type)
is_list = types.is_list(self.sym_type)
if is_tensor:
type_string = "(Tensor)"
elif is_list:
type_string = "(List)"
else:
type_string = "(Scalar)"
return type_string
def set_name(self, name):
self.name = name
def is_tensor_or_scalar_of(self, dtype: str):
return (types.is_tensor(self.sym_type) or types.is_scalar(self.sym_type)) and builtin_to_string(self.dtype) == dtype
def __str__(self):
return "%" + self.name + ": " + self.shape_str() + self.type_str()
class ListVar(Var):
__slots__ = ["_elem_type", "init_length", "dynamic_length"]
def __init__(
self, name, elem_type=None, init_length=None, dynamic_length=True, sym_val=None, **kwargs
):
"""
elem_type (builtin.tensor)
init_length (int): initial length
dynamic_length (bool): True to allow list to grow. False uses
init_length as the fixed size (init_length is runtime length).
sym_val: value of the list, if available
"""
super(ListVar, self).__init__(
name=name,
sym_type=types.list(elem_type, init_length, dynamic_length),
sym_val=sym_val,
**kwargs
)
self._elem_type = elem_type
self.init_length = init_length
self.dynamic_length = dynamic_length
@property
def shape(self):
raise ValueError("shape not applicable to ListVar '{}'.".format(self.name))
@property
def rank(self):
raise ValueError("rank not applicable to ListVar '{}'".format(self.name))
@property
def dtype(self):
raise ValueError("dtype not applicable to ListVar '{}'".format(self.name))
@property
def elem_type(self):
return self._elem_type
@property
def elem_shape(self):
if self._elem_type == types.unknown:
return None
return self._elem_type.get_shape()
def shape_str(self):
length = "?"
if not self.dynamic_length:
length = str(self.init_length)
if self._elem_type == types.unknown:
return "List[{}, unknown]".format(length)
if self._elem_type == types.str:
return "List[{}, str]".format(length)
elif self._elem_type == types.int64:
return "List[{}, int]".format(length)
else:
elem_shape = self._elem_type.get_shape()
elem_dtype = self._elem_type.get_primitive()
shape_str = str(elem_shape)[:-1] # trim the ")"
if len(elem_shape) > 1:
shape_str += ", "
shape_str += types.builtin_to_string(elem_dtype) + ")"
return "List[{}, {}]".format(length, shape_str)
class InternalVar(Var):
"""
Internal Var (with '__' prefix and won't appear in SSA) will ALWAYS have
`sym_val == builtin.unknown`. InternalVar are constructed by builder only.
Comment: Internal Var can be used to represent diverse types such as enum
type `DataType.FLOAT32`.
"""
def __init__(self, val, name=None):
super(InternalVar, self).__init__(
name=name, sym_type=types.unknown, sym_val=types.unknown(val)
)
| apple/coremltools | coremltools/converters/mil/mil/var.py | Python | bsd-3-clause | 8,592 |
# encoding=utf8
# The python elasticsearch binding
"""The python elasticsearch binding
"""
| lipixun/pyelastic | elastic/search/__init__.py | Python | gpl-2.0 | 93 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates the display name of the custom targeting values
belonging to a custom targeting key.
To determine which custom targeting keys exist, run
get_all_custom_targeting_keys_and_values.py."""
# Import appropriate modules from the client library.
from googleads import dfp
CUSTOM_TARGETING_KEY_ID = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE'
def main(client, key_id):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201508')
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
query = 'WHERE customTargetingKeyId = :keyId'
statement = dfp.FilterStatement(query, values)
while True:
# Get custom targeting values by statement.
response = custom_targeting_service.getCustomTargetingValuesByStatement(
statement.ToStatement())
# Update each local custom targeting value object by changing its name.
if 'results' in response:
updated_values = []
for value in response['results']:
if not value['displayName']:
value['displayName'] = value['name']
value['displayName'] += ' (Deprecated)'
updated_values.append(value)
values = custom_targeting_service.updateCustomTargetingValues(
updated_values)
# Display results.
for value in values:
print ('Custom targeting value with id \'%s\', name \'%s\', and display'
' name \'%s\' was updated.'
% (value['id'], value['name'], value['displayName']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
if response['totalResultSetSize'] == 0:
print 'No custom targeting values were updated.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CUSTOM_TARGETING_KEY_ID)
| richardfergie/googleads-python-lib | examples/dfp/v201508/custom_targeting_service/update_custom_targeting_values.py | Python | apache-2.0 | 2,539 |
__author__ = "Yinchong Yang"
__copyright__ = "Siemens AG, 2018"
__licencse__ = "MIT"
__version__ = "0.1"
"""
MIT License
Copyright (c) 2018 Siemens AG
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
We first sample MNIST digits to form sequences of random lengths.
The sequence is labeled as one if it contains a zero, and is labeled zero otherwise.
This simulates a high dimensional sequence classification task, such as predicting therapy decision
and survival of patients based on their historical clinical event information.
We train plain LSTM and Tensor-Train LSTM for this task.
After the training, we apply Layer-wise Relevance Propagation to identify the digit(s) that
have influenced the classification.
Apparently, we would expect the LRP algorithm would assign high relevance value to the zero(s)
in the sequence.
These experiments turn out to be successful, which demonstrates that
i) the LSTM and TT-LSTM can indeed learn the mapping from a zero to the sequence class, and that
ii) both LSTMs have no problem in storing the zero pattern over a period of time, because the
classifier is deployed only at the last hidden state, and that
iii) the implementation of the LRP algorithm, complex as it is, is also correct, in that
the zeros are assigned high relevance scores.
Especially the experiments with the plain LSTM serve as simulation study supporting our submission of
“Yinchong Yang, Volker Tresp, Marius Wunderle, Peter A. Fasching,
Explaining Therapy Predictions with Layer-wise Relevance Propagation in Neural Networks, at IEEE ICHI 2018”.
The original LRP for LSTM from the repository:
https://github.com/ArrasL/LRP_for_LSTM
which we modified and adjusted for keras models.
Feel free to experiment with the hyper parameters and suggest other sequence classification tasks.
Have fun ;)
"""
import pickle
import sys
import numpy as np
from numpy import newaxis as na
import keras
from keras.layers.recurrent import Recurrent
from keras import backend as K
from keras.engine import InputSpec
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.engine.topology import Layer
from TTLayer import *
from TTRNN import TT_LSTM
def make_seq(n, x, y, maxlen=32, seed=123):
np.random.seed(seed)
lens = np.random.choice(range(2, maxlen), n)
seqs = np.zeros((n, maxlen, 28**2))
labels = np.zeros(n)
digits_label = np.zeros((n, maxlen), dtype='int32')-1
ids = np.zeros((n, maxlen), dtype='int64')-1
for i in range(n):
digits_inds = np.random.choice(range(x.shape[0]), lens[i])
ids[i, -lens[i]::] = digits_inds
seqs[i, -lens[i]::, :] = x[digits_inds]
digits_label[i, -lens[i]::] = y[digits_inds]
class_inds = y[digits_inds]
if True:
# option 1: is there any 0 in the sequence?
labels[i] = (0 in class_inds)
else:
# option 2: even number of 0 -> label=0, odd number of 0 -> label=1
labels[i] = len(np.where(class_inds == 0)[0]) % 2 == 1
return [seqs, labels, digits_label, ids]
# From: https://github.com/ArrasL/LRP_for_LSTM
def lrp_linear(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False):
"""
LRP for a linear layer with input dim D and output dim M.
Args:
- hin: forward pass input, of shape (D,)
- w: connection weights, of shape (D, M)
- b: biases, of shape (M,)
- hout: forward pass output, of shape (M,) (unequal to np.dot(w.T,hin)+b if more than one incoming layer!)
- Rout: relevance at layer output, of shape (M,)
- bias_nb_units: number of lower-layer units onto which the bias/stabilizer contribution is redistributed
- eps: stabilizer (small positive number)
- bias_factor: for global relevance conservation set to 1.0, otherwise 0.0 to ignore bias redistribution
Returns:
- Rin: relevance at layer input, of shape (D,)
"""
sign_out = np.where(hout[na, :] >= 0, 1., -1.) # shape (1, M)
numer = (w * hin[:, na]) + \
((bias_factor * b[na, :] * 1. + eps * sign_out * 1.) * 1. / bias_nb_units) # shape (D, M)
denom = hout[na, :] + (eps * sign_out * 1.) # shape (1, M)
message = (numer / denom) * Rout[na, :] # shape (D, M)
Rin = message.sum(axis=1) # shape (D,)
# Note: local layer relevance conservation if bias_factor==1.0 and bias_nb_units==D
# global network relevance conservation if bias_factor==1.0 (can be used for sanity check)
if debug:
print("local diff: ", Rout.sum() - Rin.sum())
return Rin
def sigmoid(x):
x = x.astype('float128')
return 1. / (1. + np.exp(-x))
# Modified from https://github.com/ArrasL/LRP_for_LSTM
def lstm_lrp(l, d, train_data = True):
if train_data:
x_l = X_tr[l]
y_l = Y_tr[l]
z_l = Z_tr[l]
# d_l = d_tr[l]
else:
x_l = X_te[l]
y_l = Y_te[l]
z_l = Z_te[l]
# d_l = d_te[l]
# calculate the FF pass in LSTM for every time step
pre_gates = np.zeros((MAXLEN, d*4))
gates = np.zeros((MAXLEN, d * 4))
h = np.zeros((MAXLEN, d))
c = np.zeros((MAXLEN, d))
for t in range(MAXLEN):
z = np.dot(x_l[t], Ws)
if t > 0:
z += np.dot(h[t-1], Us)
z += b
pre_gates[t] = z
z0 = z[0:d]
z1 = z[d:2*d]
z2 = z[2*d:3*d]
z3 = z[3 * d::]
i = sigmoid(z0)
f = sigmoid(z1)
c[t] = f * c[t-1] + i * np.tanh(z2)
o = sigmoid(z3)
h[t] = o * np.tanh(c[t])
gates[t] = np.concatenate([i, f, np.tanh(z2), o])
# check: z_l[12] / h[-1][12]
Rh = np.zeros((MAXLEN, d))
Rc = np.zeros((MAXLEN, d))
Rg = np.zeros((MAXLEN, d))
Rx = np.zeros((MAXLEN, 28**2))
bias_factor = 0
Rh[MAXLEN-1] = lrp_linear(hin=z_l,
w=Dense_w,
b=np.array(Dense_b),
hout=np.dot(z_l, Dense_w)+Dense_b,
Rout=np.array([y_l]),
bias_nb_units=len(z_l),
eps=eps,
bias_factor=bias_factor)
for t in reversed(range(MAXLEN)):
# t = MAXLEN-1
# print t
Rc[t] += Rh[t]
# Rc[t] = Rh[t]
if t > 0:
Rc[t-1] = lrp_linear(gates[t, d: 2 * d] * c[t - 1], # gates[t , 2 *d: 3 *d ] *c[ t -1],
np.identity(d),
np.zeros((d)),
c[t],
Rc[t],
2*d,
eps,
bias_factor,
debug=False)
Rg[t] = lrp_linear(gates[t, 0:d] * gates[t, 2*d:3*d], # h_input: i + g
np.identity(d), # W
np.zeros((d)), # b
c[t], # h_output
Rc[t], # R_output
2 * d,
eps,
bias_factor,
debug=False)
# foo = np.dot(x_l[t], Ws[:,2*d:3*d]) + np.dot(h[t-1], Us[:, 2*d:3*d]) + b[2*d:3*d]
Rx[t] = lrp_linear(x_l[t],
Ws[:,2*d:3*d],
b[2*d:3*d],
pre_gates[t, 2*d:3*d],
Rg[t],
d + 28 ** 2,
eps,
bias_factor,
debug=False)
if t > 0:
Rh[t-1] = lrp_linear(h[t-1],
Us[:,2*d:3*d],
b[2*d:3*d],
pre_gates[t, 2 * d:3 * d],
Rg[t],
d + 28**2,
eps,
bias_factor,
debug=False)
# hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False
# Rx[np.where(d_l==-1.)[0]] *= 0
return Rx
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Model, Input
from keras.layers import Dense, GRU, LSTM, Dropout, Masking
from keras.optimizers import *
from keras.regularizers import l2
from sklearn.metrics import *
# Script configurations ###################################################################
seed=111111
use_TT = True # whether use Tensor-Train or plain RNNs
# Prepare the data ########################################################################
# Load the MNIST data and build sequences:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], -1)
x_test = x_test.reshape(x_test.shape[0], -1)
MAXLEN = 32 # max length of the sequences
X_tr, Y_tr, d_tr, idx_tr = make_seq(n=10000, x=x_train, y=y_train, maxlen=MAXLEN, seed=seed)
X_te, Y_te, d_te, idx_te = make_seq(n=1000, x=x_test, y=y_test, maxlen=MAXLEN, seed=seed+1)
# Define the model ######################################################################
if use_TT:
# TT settings
tt_input_shape = [7, 7, 16]
tt_output_shape = [4, 4, 4]
tt_ranks = [1, 4, 4, 1]
rnn_size = 64
X = Input(shape=X_tr.shape[1::])
X_mask = Masking(mask_value=0.0, input_shape=X_tr.shape[1::])(X)
if use_TT:
Z = TT_LSTM(tt_input_shape=tt_input_shape, tt_output_shape=tt_output_shape, tt_ranks=tt_ranks,
return_sequences=False, recurrent_dropout=.5)(X_mask)
Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-2))(Z)
else:
Z = LSTM(units=rnn_size, return_sequences=False, recurrent_dropout=.5)(X_mask) # dropout=.5,
Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-2))(Z)
rnn_model = Model(X, Out)
rnn_model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model and save the results ######################################################
rnn_model.fit(X_tr, Y_tr, epochs=50, batch_size=32, validation_split=.2, verbose=2)
Y_hat = rnn_model.predict(X_tr, verbose=2).reshape(-1)
train_acc = (np.round(Y_hat) == Y_tr).mean()
Y_pred = rnn_model.predict(X_te, verbose=2).reshape(-1)
(np.round(Y_pred) == Y_te).mean()
pred_acc = (np.round(Y_pred) == Y_te).mean()
# Collect all hidden layers ################################################################
if use_TT:
# Reconstruct the fully connected input-to-hidden weights:
from keras.initializers import constant
_tt_output_shape = np.copy(tt_output_shape)
_tt_output_shape[0] *= 4
fc_w = rnn_model.get_weights()[0]
fc_layer = TT_Layer(tt_input_shape=tt_input_shape, tt_output_shape=_tt_output_shape, tt_ranks=tt_ranks,
kernel_initializer=constant(value=fc_w), use_bias=False)
fc_input = Input(shape=(X_tr.shape[2],))
fc_output = fc_layer(fc_input)
fc_model = Model(fc_input, fc_output)
fc_model.compile('sgd', 'mse')
fc_recon_mat = fc_model.predict(np.identity(X_tr.shape[2]))
# Reconstruct the entire LSTM:
fc_Z = LSTM(units=np.prod(tt_output_shape), return_sequences=False, dropout=.5, recurrent_dropout=.5,
weights=[fc_recon_mat, rnn_model.get_weights()[2], rnn_model.get_weights()[1]])(X_mask)
else:
fc_Z = LSTM(units=rnn_size, return_sequences=False, dropout=.5, recurrent_dropout=.5,
weights=rnn_model.get_weights()[0:3])(X_mask)
fc_Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-3),
weights=rnn_model.get_weights()[3::])(fc_Z)
fc_rnn_model = Model(X, fc_Out)
fc_rnn_model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy',
metrics=['accuracy'])
fc_rnn_model.evaluate(X_te, Y_te, verbose=2)
# Calculate the LRP: #########################################################################
fc_Z_model = Model(X, fc_Z)
fc_Z_model.compile('sgd', 'mse')
Y_hat_fc = fc_rnn_model.predict(X_tr)
Y_pred_fc = fc_rnn_model.predict(X_te)
Ws = fc_rnn_model.get_weights()[0]
Us = fc_rnn_model.get_weights()[1]
b = fc_rnn_model.get_weights()[2]
Dense_w = fc_rnn_model.get_weights()[3]
Dense_b = fc_rnn_model.get_weights()[4]
Z_tr = fc_Z_model.predict(X_tr)
Z_te = fc_Z_model.predict(X_te)
eps = 1e-4
is_number_flag = np.where(d_te != -1)
# All relevance scores of the test sequences
lrp_te = np.vstack([lstm_lrp(i, rnn_size, False).sum(1) for i in range(X_te.shape[0])])
lrp_auroc = roc_auc_score((d_te == 0).astype('int')[is_number_flag].reshape(-1),
lrp_te[is_number_flag].reshape(-1))
lrp_auprc = average_precision_score((d_te == 0).astype('int')[is_number_flag].reshape(-1),
lrp_te[is_number_flag].reshape(-1))
# The reported results:
print pred_acc
print lrp_auroc
print lrp_auprc
| Tuyki/TT_RNN | MNISTSeq.py | Python | mit | 14,227 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import datetime
import logging
import threading
from google.appengine.api import apiproxy_stub_map, background_thread, runtime
from google.appengine.ext import db, deferred
import app
_stats = {}
_queue = collections.deque()
_lock = threading.Lock()
_processing = False
QUEUE_THRESHOLD = 1000
def flush_summaries_to_datastore():
global _processing
try:
with _lock:
if _processing:
return
_processing = True
start_time = datetime.datetime.now()
while datetime.datetime.now() - start_time < datetime.timedelta(seconds=15):
with _lock:
if not _queue:
return
# This should be non-destructive in case db transaction fails.
key_name = _queue[0]
stats_value = _stats[key_name]
app.GTestSummary.get_or_insert(
key_name=key_name,
weekly_timestamp=stats_value['weekly_timestamp'],
buildbot_root=stats_value['buildbot_root'],
builder=stats_value['builder'],
step_name=stats_value['step_name'],
fullname=stats_value['fullname'])
def tx_summary():
summary = app.GTestSummary.get_by_key_name(key_name)
for result in stats_value['results']:
if not summary.max_run_time_ms:
summary.max_run_time_ms = 0.0
if not summary.run_time_ms:
summary.run_time_ms = 0.0
if not summary.result_count:
summary.result_count = 0
if not summary.crash_or_hang_count:
summary.crash_or_hang_count = 0
if not summary.failure_count:
summary.failure_count = 0
summary.max_run_time_ms = max(summary.max_run_time_ms,
float(result['run_time_ms']))
summary.run_time_ms = (
(summary.run_time_ms * summary.result_count) +
float(result['run_time_ms'])) / (summary.result_count + 1)
summary.result_count += 1
if summary.result_count >= 10:
summary.enough_samples = True
if result['is_crash_or_hang']:
summary.crash_or_hang_count += 1
if not result['is_successful']:
summary.failure_count += 1
summary.crash_or_hang_rate = \
float(summary.crash_or_hang_count) / summary.result_count
summary.failure_rate = \
float(summary.failure_count) / summary.result_count
summary.put()
db.run_in_transaction(tx_summary)
# Now that transaction has succeeded, update in-memory state.
_queue.remove(key_name)
del _stats[key_name]
finally:
with _lock:
_processing = False
def process_gtest_results(buildbot_root,
builder,
step_name,
time_finished,
results):
# _processing is global but we're not modifying it.
# global _processing
with _lock:
logging.debug('stats before: %d' % len(_stats.keys()))
for fullname, result in results.items():
weekly_timestamp = (time_finished.date() -
datetime.timedelta(days=time_finished.weekday()))
key_name='%s-%s-%s-%s-%s' % (buildbot_root,
builder,
step_name,
fullname,
weekly_timestamp)
if key_name not in _stats:
_stats[key_name] = {
'buildbot_root': buildbot_root,
'builder': builder,
'step_name': step_name,
'fullname': fullname,
'weekly_timestamp': weekly_timestamp,
'results': [],
}
_queue.append(key_name)
_stats[key_name]['results'].append(result)
if not _processing and len(_stats) > QUEUE_THRESHOLD:
background_thread.start_new_background_thread(
flush_summaries_to_datastore, [])
| nicko96/Chrome-Infra | appengine/chromium_build_logs/gtest_summaries.py | Python | bsd-3-clause | 4,193 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio Demosite.
# Copyright (C) 2012, 2013 CERN.
#
# Invenio Demosite is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio Demosite is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.ext.sqlalchemy import db
from invenio.ext.login import current_user, login_user, logout_user
from invenio.testsuite import make_flask_test_suite, run_test_suite, \
FlaskSQLAlchemyTest, InvenioFixture
from fixture import SQLAlchemyFixture
from invenio.webaccount_fixtures import UserData, UsergroupData, \
UserUsergroupData
def fixture_builder():
from invenio_accounts.models import User, Usergroup, UserUsergroup
return SQLAlchemyFixture(env={'UserData': User, 'UsergroupData': Usergroup,
'UserUsergroupData': UserUsergroup},
engine=db.metadata.bind,
session=db.session)
fixture = InvenioFixture(fixture_builder)
class WebAccountTest(FlaskSQLAlchemyTest):
@fixture.with_data(UserData)
def test_low_level_login(data, self):
users = data.UserData
assert current_user.is_guest
login_user(users.admin.id)
assert current_user.get_id() == users.admin.id
logout_user()
assert current_user.get_id() != users.admin.id
assert current_user.is_guest
login_user(users.romeo.id)
assert not current_user.is_guest
assert current_user.get_id() == users.romeo.id
login_user(users.admin.id)
assert current_user.get_id() == users.admin.id
logout_user()
@fixture.with_data(UserData)
def test_login(data, self):
users = data.UserData
# Valid credentials.
for name, u in users:
response = self.login(u.nickname, u.password)
assert u.nickname in response.data
self.logout()
# Valid credentials using email.
for name, u in users:
response = self.login(u.email, u.password)
assert u.nickname in response.data
self.logout()
# Empty form should not work.
response = self.login('', '')
assert 'logout' not in response.data
# Not existing user.
response = self.login('NOT EXISTS', '')
assert 'logout' not in response.data
# Existing password with not existing user name.
response = self.login('NOT EXISTS', users.romeo.password)
assert 'logout' not in response.data
# Invalid password for admin.
response = self.login(users.admin.nickname, 'FAIL')
assert 'logout' not in response.data
@fixture.with_data(UserData)
def test_change_password(data, self):
from invenio_accounts.models import User
NEW_PASSWORD = 'admin'
users = data.UserData
response = self.login(users.admin.nickname, users.admin.password)
assert users.admin.nickname in response.data
self.logout()
admin = User.query.filter(User.id == users.admin.id).one()
admin.password = NEW_PASSWORD
db.session.merge(admin)
db.session.commit()
new_passwd = db.session.query(User.password).filter(User.id == users.admin.id).one()
assert users.admin.password != new_passwd
# Invalid password for admin.
response = self.login(users.admin.nickname, users.admin.password)
assert 'logout' not in response.data
# Valid credentials.
response = self.login(users.admin.nickname, NEW_PASSWORD)
assert users.admin.nickname in response.data
self.logout()
class UserGroupTest(FlaskSQLAlchemyTest):
@fixture.with_data(UserData, UsergroupData, UserUsergroupData)
def test_group_relation_consistency(data, self):
from invenio_accounts.models import User, Usergroup
orig_len = len(dict(data.UserUsergroupData))
user_len = sum(len(u.usergroups) for u in User.query.all())
ugrp_len = sum(len(g.users) for g in Usergroup.query.all())
assert orig_len == user_len
assert user_len == ugrp_len
TEST_SUITE = make_flask_test_suite(WebAccountTest, UserGroupTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| mvesper/invenio-demosite | invenio_demosite/testsuite/flask/test_accounts.py | Python | gpl-2.0 | 4,810 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2;
import re;
import string;
import sys;
from BeautifulSoup import BeautifulSoup
month_num = {
'Jan' : '01',
'Feb' : '02',
'Mar' : '03',
'Apr' : '04',
'May' : '05',
'Jun' : '06',
'Jul' : '07',
'Aug' : '08',
'Sep' : '09',
'Oct' : '10',
'Nov' : '11',
'Dec' : '12'
''
};
def process_date(raw_date):
global month_num;
raw_list=raw_date.split(' ');
month_str=month_num[raw_list[0]];
day_list=raw_list[1].split(',');
if len(day_list[0]) == 1:
day_str = '0' + day_list[0];
else:
day_str = day_list[0];
year_str = raw_list[2];
return year_str + '-' + month_str + '-' + day_str;
def process_num(raw_num):
raw_list=raw_num.split(',');
sz = len(raw_list);
str_num=raw_list[0];
for i in range(1,sz):
str_num = str_num+raw_list[i];
return str_num;
str_url = "http://finance.yahoo.com/q/hp?s=%5EKS11+Historical+Prices";
req=urllib2.Request(str_url);
resp=urllib2.urlopen(req);
respHtml=resp.read();
HtmlEncoding = "UTF-8";
soup = BeautifulSoup(respHtml, fromEncoding=HtmlEncoding);
tag_top = soup.find('table', {"class":"yfnc_datamodoutline1"});
tag_body = tag_top.contents[0].contents[0].contents[0];
str_date = process_date(tag_body.contents[1].contents[0].contents[0]);
open_price = process_num(tag_body.contents[1].contents[1].contents[0]);
high_price = process_num(tag_body.contents[1].contents[2].contents[0]);
low_price = process_num(tag_body.contents[1].contents[3].contents[0]);
close_price = process_num(tag_body.contents[1].contents[4].contents[0]);
volume = process_num(tag_body.contents[1].contents[5].contents[0]);
if volume != "0":
daily_file = sys.argv[1];
history_file = sys.argv[2];
daily_fp = open(daily_file, 'w');
history_fp = open(history_file, 'a');
title_str = "Date,Open Price,High Price,Low Price,Close Price,Volume(KRW)\n";
daily_fp.write(title_str);
day_market_data = str_date+","+open_price+","+high_price+","+low_price+","+close_price+","+volume+'\n';
daily_fp.write(day_market_data);
history_fp.write(day_market_data);
daily_fp.close();
history_fp.close();
| guoxiaoyong/simple-useful | cxx_learn/cronx/spider/spider_daily_kospi.py | Python | cc0-1.0 | 2,199 |
from django.test import TestCase
from django.contrib.gis import geos
from linz2osm.convert.processing.poly_winding import PolyWindingCW, PolyWindingCCW
class TestPolyWinding(TestCase):
def test_ring_clockwise(self):
cw = [(0,0), (10,10), (20,0), (0,0)]
ccw = cw[:]
ccw.reverse()
p = PolyWindingCCW()
self.assertEqual(True, p.ring_is_clockwise(cw))
self.assertEqual(False, p.ring_is_clockwise(ccw))
p = PolyWindingCW()
self.assertEqual(True, p.ring_is_clockwise(p.wind_ring(cw, is_outer=True)))
self.assertEqual(True, p.ring_is_clockwise(p.wind_ring(ccw, is_outer=True)))
self.assertEqual(False, p.ring_is_clockwise(p.wind_ring(cw, is_outer=False)))
self.assertEqual(False, p.ring_is_clockwise(p.wind_ring(ccw, is_outer=False)))
p = PolyWindingCCW()
self.assertEqual(False, p.ring_is_clockwise(p.wind_ring(cw, is_outer=True)))
self.assertEqual(False, p.ring_is_clockwise(p.wind_ring(ccw, is_outer=True)))
self.assertEqual(True, p.ring_is_clockwise(p.wind_ring(cw, is_outer=False)))
self.assertEqual(True, p.ring_is_clockwise(p.wind_ring(ccw, is_outer=False)))
def test_polygon_orientation_simple(self):
geoms = {
"cw" : [(0,0), (10,10), (20,0), (0,0)],
"ccw" : [(0,0), (20,0), (10,10), (0,0)],
}
for dir in ('cw', 'ccw'):
for name, ring in geoms.items():
print "layer=%s, data=%s" % (dir, name)
p = PolyWindingCCW() if (dir == 'ccw') else PolyWindingCW()
g_out = p.handle(geos.Polygon(ring))
coords = g_out[0].tuple
if dir == 'cw':
self.assert_(p.ring_is_clockwise(coords), "Outer-ring coords are anticlockwise (expecting cw)!")
else:
self.assert_(not p.ring_is_clockwise(coords), "Outer-ring coords are clockwise (expecting ccw)!")
def test_polygon_orientation_multipolygon(self):
geoms = {
"cw+ccw": ([(0,0), (10,10), (20,0), (0,0)], [(8,2), (12,2), (10,4), (8,2)]),
"cw+cw": ([(0,0), (10,10), (20,0), (0,0)], [(8,2), (10,4), (12,2), (8,2)]),
"ccw+cw": ([(0,0), (20,0), (10,10), (0,0)], [(8,2), (10,4), (12,2), (8,2)]),
"ccw+ccw": ([(0,0), (20,0), (10,10), (0,0)], [(8,2), (12,2), (10,4), (8,2)]),
}
for dir in ('cw', 'ccw'):
for name, rings in geoms.items():
print "layer=%s, data=%s" % (dir, name)
p = PolyWindingCCW() if (dir == 'ccw') else PolyWindingCW()
g_out = p.handle(geos.Polygon(*rings))
for i,mn in enumerate(g_out):
coords = g_out[i].tuple
if i == 0:
if dir == 'cw':
self.assert_(p.ring_is_clockwise(coords), "Outer-ring coords are anticlockwise!")
else:
self.assert_(not p.ring_is_clockwise(coords), "Outer-ring coords are clockwise!")
else:
if dir == 'cw':
self.assert_(not p.ring_is_clockwise(coords), "Inner-ring coords are clockwise!")
else:
self.assert_(p.ring_is_clockwise(coords), "Inner-ring coords are anticlockwise!")
def test_polygon_orientation_concurrent(self):
geoms = {
"cw" : [(0,0), (10,10), (10,10), (20,0), (0,0)],
"ccw" : [(0,0), (20,0), (10,10), (10,10), (0,0)],
}
for dir in ('cw', 'ccw'):
for name, ring in geoms.items():
print "layer=%s, data=%s" % (dir, name)
p = PolyWindingCCW() if (dir == 'ccw') else PolyWindingCW()
g_out = p.handle(geos.Polygon(ring))
coords = g_out[0].tuple
if dir == 'cw':
self.assert_(p.ring_is_clockwise(coords), "Outer-ring with concurrent points coords are anticlockwise (expecting cw)!")
else:
self.assert_(not p.ring_is_clockwise(coords), "Outer-ring with concurrent points coords are clockwise (expecting ccw)!")
| opennewzealand/linz2osm | linz2osm/convert/processing/tests/test_poly_winding.py | Python | gpl-3.0 | 4,382 |
# -*- coding: utf-8 -*-
import unittest
from openerp.tests import common
class test_single_transaction_case(common.SingleTransactionCase):
"""
Check the whole-class transaction behavior of SingleTransactionCase.
"""
def test_00(self):
"""Create a partner."""
cr, uid = self.cr, self.uid
self.registry('res.partner').create(cr, uid, {'name': 'test_per_class_teardown_partner'})
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(1, len(ids), "Test partner not found.")
def test_01(self):
"""Find the created partner."""
cr, uid = self.cr, self.uid
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(1, len(ids), "Test partner not found.")
def test_20a(self):
""" Create a partner with a XML ID """
cr, uid = self.cr, self.uid
res_partner = self.registry('res.partner')
ir_model_data = self.registry('ir.model.data')
pid, _ = res_partner.name_create(cr, uid, 'Mr Blue')
ir_model_data.create(cr, uid, {'name': 'test_partner_blue',
'module': 'base',
'model': 'res.partner',
'res_id': pid})
def test_20b(self):
""" Resolve xml id with ref() and browse_ref() """
cr, uid = self.cr, self.uid
res_partner = self.registry('res.partner')
xid = 'base.test_partner_blue'
p_ref = self.ref(xid)
self.assertTrue(p_ref, "ref() should resolve xid to database ID")
partner = res_partner.browse(cr, uid, p_ref)
p_browse_ref = self.browse_ref(xid)
self.assertEqual(partner, p_browse_ref, "browse_ref() should resolve xid to browse records")
class test_transaction_case(common.TransactionCase):
"""
Check the per-method transaction behavior of TransactionCase.
"""
def test_00(self):
"""Create a partner."""
cr, uid = self.cr, self.uid
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(0, len(ids), "Test partner found.")
self.registry('res.partner').create(cr, uid, {'name': 'test_per_class_teardown_partner'})
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(1, len(ids), "Test partner not found.")
def test_01(self):
"""Don't find the created partner."""
cr, uid = self.cr, self.uid
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(0, len(ids), "Test partner found.")
def test_20a(self):
""" Create a partner with a XML ID then resolve xml id with ref() and browse_ref() """
cr, uid = self.cr, self.uid
res_partner = self.registry('res.partner')
ir_model_data = self.registry('ir.model.data')
pid, _ = res_partner.name_create(cr, uid, 'Mr Yellow')
ir_model_data.create(cr, uid, {'name': 'test_partner_yellow',
'module': 'base',
'model': 'res.partner',
'res_id': pid})
xid = 'base.test_partner_yellow'
p_ref = self.ref(xid)
self.assertEquals(p_ref, pid, "ref() should resolve xid to database ID")
partner = res_partner.browse(cr, uid, pid)
p_browse_ref = self.browse_ref(xid)
self.assertEqual(partner, p_browse_ref, "browse_ref() should resolve xid to browse records")
if __name__ == '__main__':
unittest.main()
| vileopratama/vitech | src/openerp/addons/base/tests/test_basecase.py | Python | mit | 3,826 |
# coding:utf-8
import sys
import os
import json
import requests
import urllib
apikey = os.environ.get("GOOGLE_API_KEY")
TIMEOUT = 30
def stt_google_wav(filename):
q = {"output": "json", "lang": "ja-JP", "key": apikey}
url = "http://www.google.com/speech-api/v2/recognize?%s" % (urllib.parse.urlencode(q))
headers = {"Content-Type": "audio/l16; rate=16000"}
data = open(filename, "rb").read()
response = requests.post(
url,
headers=headers,
data=data,
timeout=TIMEOUT
)
jsonunits = response.text.split(os.linesep)
res = ""
for unit in jsonunits:
if not unit:
continue
obj = json.loads(unit)
alternatives = obj["result"]
if len(alternatives) > 0:
breakflag = False
for obj in alternatives:
results = obj["alternative"]
for result in results:
res = result["transcript"]
breakflag = True
break
if breakflag:
break
return res
if __name__ == '__main__':
print(stt_google_wav(sys.argv[1]))
| shiraco/techcircle_pepper_handson_b | google_stt/stt.py | Python | mit | 1,167 |
# -*- coding: utf-8 -*-
"""
The same code as word2vec.py, but different input data and only two models instead four.
Competition: HomeDepot Search Relevance
Author: Kostia Omelianchuk
Team: Turing test
"""
from config_IgorKostia import *
import gensim
import logging
import numpy as np
from sklearn.ensemble import RandomForestRegressor, BaggingRegressor, GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor
from nltk.stem.snowball import SnowballStemmer, PorterStemmer
import nltk
from time import time
import re
import os
import math as m
import pandas as pd
from gensim import models
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
df_all=pd.read_csv(PROCESSINGTEXT_DIR+"/df_train_and_test_processed_wo_google.csv", encoding="ISO-8859-1")
df_all1=pd.read_csv(PROCESSINGTEXT_DIR+"/df_product_descriptions_processed_wo_google.csv", encoding="ISO-8859-1")
df_all2 = pd.merge(df_all, df_all1, how="left", on="product_uid")
df_all = df_all2
df_all1=pd.read_csv(PROCESSINGTEXT_DIR+"/df_attribute_bullets_processed_wo_google.csv", encoding="ISO-8859-1")
df_all2 = pd.merge(df_all, df_all1, how="left", on="product_uid")
df_all = df_all2
df_attr = pd.read_csv(PROCESSINGTEXT_DIR+'/df_attributes_kostia.csv', encoding="ISO-8859-1")
df_all = pd.merge(df_all, df_attr, how='left', on='product_uid')
def replace_nan(s):
if pd.isnull(s)==True:
s=""
return s
p = df_all.keys()
for i in range(len(p)):
print p[i]
df_all['search_term_stemmed'] = df_all['search_term_stemmed'].map(lambda x:replace_nan(x))
df_all['product_title_stemmed'] = df_all['product_title_stemmed'].map(lambda x:replace_nan(x))
df_all['product_description_stemmed'] = df_all['product_description_stemmed'].map(lambda x:replace_nan(x))
df_all['brand_parsed'] = df_all['brand_parsed'].map(lambda x:replace_nan(x))
df_all['material_parsed'] = df_all['material_parsed'].map(lambda x:replace_nan(x))
df_all['attribute_bullets_stemmed'] = df_all['attribute_bullets_stemmed'].map(lambda x:replace_nan(x))
df_all['value'] = df_all['value'].map(lambda x:replace_nan(x))
df_all['search_term'] = df_all['search_term'].map(lambda x:replace_nan(x))
df_all['product_title'] = df_all['product_title'].map(lambda x:replace_nan(x))
df_all['product_description'] = df_all['product_description'].map(lambda x:replace_nan(x))
df_all['brand'] = df_all['brand'].map(lambda x:replace_nan(x))
df_all['material'] = df_all['material'].map(lambda x:replace_nan(x))
df_all['attribute_bullets'] = df_all['attribute_bullets'].map(lambda x:replace_nan(x))
df_all['value'] = df_all['value'].map(lambda x:replace_nan(x))
st = df_all["search_term_stemmed"]
pt = df_all["product_title_stemmed"]
pd = df_all["product_description_stemmed"]
br = df_all["brand_parsed"]
mr = df_all["material_parsed"]
ab = df_all["attribute_bullets_stemmed"]
at = df_all["value"]
##st + pt +pd vocab
#t = list()
#for i in range(len(st)):
# p = st[i].split()
# t.append(p)
#
#for i in range(len(pt)):
# p = pt[i].split()
# t.append(p)
#
#for i in range(len(pd)):
# p = pd[i].split()
# t.append(p)
#
##for i in range(len(br)):
## p = br[i].split()
## t.append(p)
##
##for i in range(len(mr)):
## p = mr[i].split()
## t.append(p)
#
#for i in range(len(ab)):
# p = ab[i].split()
# t.append(p)
#
#for i in range(len(at)):
# p = at[i].split()
# t.append(p)
print "first vocab"
#st conc pt conc pd vocab
t1 = list()
for i in range(len(st)):
p = st[i].split()+pt[i].split()+pd[i].split()+br[i].split()+mr[i].split()+ab[i].split()+at[i].split()
t1.append(p)
print "second vocab"
#st + pt +pd +br + mr vocab w/o pars
st1 = df_all["search_term"]
pt1 = df_all["product_title"]
pd1 = df_all["product_description"]
br1 = df_all["brand"]
mr1 = df_all["material"]
ab1 = df_all["attribute_bullets"]
at1 = df_all["value"]
#t2 = list()
#for i in range(len(st)):
# p = st1[i].split()
# t2.append(p)
#
#for i in range(len(pt)):
# p = pt1[i].split()
# t2.append(p)
#
#for i in range(len(pd)):
# p = pd1[i].split()
# t2.append(p)
#
##for i in range(len(br)):
## p = br1[i].split()
## t2.append(p)
##
##for i in range(len(mr)):
## p = mr1[i].split()
## t2.append(p)
#
#for i in range(len(ab1)):
# p = ab1[i].split()
# t2.append(p)
#
#for i in range(len(at1)):
# p = at1[i].split()
# t2.append(p)
#
#print "third vocab"
#st conc pt conc pd conc br conc mr vocab w/o pars
t3 = list()
for i in range(len(st)):
p = st1[i].split()+pt1[i].split()+pd1[i].split()+br1[i].split()+mr1[i].split()+ab1[i].split()+at1[i].split()
t3.append(p)
print "fourth vocab"
#model0 = gensim.models.Word2Vec(t, sg=1, window=10, sample=1e-5, negative=5, size=300)
model1 = gensim.models.Word2Vec(t1, sg=1, window=10, sample=1e-5, negative=5, size=300)
print "model prepared"
#model2 = gensim.models.Word2Vec(t2, sg=1, window=10, sample=1e-5, negative=5, size=300)
model3 = gensim.models.Word2Vec(t3, sg=1, window=10, sample=1e-5, negative=5, size=300)
print "model prepared"
#model4 = gensim.models.Word2Vec(t, sg=0, hs=1, window=10, size=300)
#model5 = gensim.models.Word2Vec(t1, sg=0, hs=1,window=10, size=300)
#model6 = gensim.models.Word2Vec(t2, sg=0, hs=1, window=10, size=300)
#model7 = gensim.models.Word2Vec(t3, sg=0, hs=1,window=10, size=300)
#model_list=[model0,model1,model2,model3] #,model4 ,model5,model6,model7]
model_list=[model1,model3]
n_sim=list()
for model in model_list:
print "model features calculation"
n_sim_pt=list()
for i in range(len(st)):
w1=st[i].split()
w2=pt[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_pt.append(0)
else:
n_sim_pt.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_pt)
n_sim_pd=list()
for i in range(len(st)):
w1=st[i].split()
w2=pd[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_pd.append(0)
else:
n_sim_pd.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_pd)
n_sim_at=list()
for i in range(len(st)):
w1=st[i].split()
w2=at[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_at.append(0)
else:
n_sim_at.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_at)
n_sim_all=list()
for i in range(len(st)):
w1=st[i].split()
w2=pt[i].split()+pd[i].split()+br[i].split()+mr[i].split()+ab[i].split()+at[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_all.append(0)
else:
n_sim_all.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_all)
n_sim_all1=list()
for i in range(len(st)):
w1=st1[i].split()
w2=pt1[i].split()+pd1[i].split()+br1[i].split()+mr1[i].split()+ab1[i].split()+at1[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_all1.append(0)
else:
n_sim_all1.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_all1)
n_sim_ptpd=list()
for i in range(len(st)):
w1=pt[i].split()
w2=pd[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_ptpd.append(0)
else:
n_sim_ptpd.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_ptpd)
print "model features done"
st_names=["id"]
#for j in range(len(n_sim)):
name_list=list([6,7,8,9,10,11,18,19,20,21,22,23])
for j in range(len(n_sim)):
df_all["word2vec_"+str(name_list[j])]=n_sim[j]
st_names.append("word2vec_"+str(name_list[j]))
b=df_all[st_names]
b.to_csv(FEATURES_DIR+"/df_word2vec_wo_google_dict.csv", index=False)
| ChenglongChen/Kaggle_HomeDepot | Code/Igor&Kostia/word2vec_without_google_dict.py | Python | mit | 9,073 |
"""Test interact and interactive."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from collections import OrderedDict
import nose.tools as nt
import IPython.testing.tools as tt
from IPython.kernel.comm import Comm
from IPython.html import widgets
from IPython.html.widgets import interact, interactive, Widget, interaction
from IPython.utils.py3compat import annotate
#-----------------------------------------------------------------------------
# Utility stuff
#-----------------------------------------------------------------------------
class DummyComm(Comm):
comm_id = 'a-b-c-d'
def open(self, *args, **kwargs):
pass
def send(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
_widget_attrs = {}
displayed = []
undefined = object()
def setup():
_widget_attrs['_comm_default'] = getattr(
Widget, '_comm_default', undefined)
Widget._comm_default = lambda self: DummyComm()
_widget_attrs['_ipython_display_'] = Widget._ipython_display_
def raise_not_implemented(*args, **kwargs):
raise NotImplementedError()
Widget._ipython_display_ = raise_not_implemented
def teardown():
for attr, value in _widget_attrs.items():
if value is undefined:
delattr(Widget, attr)
else:
setattr(Widget, attr, value)
def f(**kwargs):
pass
def clear_display():
global displayed
displayed = []
def record_display(*args):
displayed.extend(args)
#-----------------------------------------------------------------------------
# Actual tests
#-----------------------------------------------------------------------------
def check_widget(w, **d):
"""Check a single widget against a dict"""
for attr, expected in d.items():
if attr == 'cls':
nt.assert_is(w.__class__, expected)
else:
value = getattr(w, attr)
nt.assert_equal(value, expected,
"%s.%s = %r != %r" % (
w.__class__.__name__, attr, value, expected)
)
def check_widgets(container, **to_check):
"""Check that widgets are created as expected"""
# build a widget dictionary, so it matches
widgets = {}
for w in container.children:
widgets[w.description] = w
for key, d in to_check.items():
nt.assert_in(key, widgets)
check_widget(widgets[key], **d)
def test_single_value_string():
a = u'hello'
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.Text,
description='a',
value=a,
)
def test_single_value_bool():
for a in (True, False):
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.Checkbox,
description='a',
value=a,
)
def test_single_value_dict():
for d in [
dict(a=5),
dict(a=5, b='b', c=dict),
]:
c = interactive(f, d=d)
w = c.children[0]
check_widget(w,
cls=widgets.Dropdown,
description='d',
values=d,
value=next(iter(d.values())),
)
def test_single_value_float():
for a in (2.25, 1.0, -3.5):
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.FloatSlider,
description='a',
value=a,
min=-a if a > 0 else 3 * a,
max=3 * a if a > 0 else -a,
step=0.1,
readout=True,
)
def test_single_value_int():
for a in (1, 5, -3):
c = interactive(f, a=a)
nt.assert_equal(len(c.children), 1)
w = c.children[0]
check_widget(w,
cls=widgets.IntSlider,
description='a',
value=a,
min=-a if a > 0 else 3 * a,
max=3 * a if a > 0 else -a,
step=1,
readout=True,
)
def test_list_tuple_2_int():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1, 1))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1, -1))
for min, max in [(0, 1), (1, 10), (1, 2), (-5, 5), (-20, -19)]:
c = interactive(f, tup=(min, max), lis=[min, max])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.IntSlider,
min=min,
max=max,
step=1,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_3_int():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1, 2, 0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1, 2, -1))
for min, max, step in [(0, 2, 1), (1, 10, 2), (1, 100, 2), (-5, 5, 4), (-100, -20, 4)]:
c = interactive(f, tup=(min, max, step), lis=[min, max, step])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.IntSlider,
min=min,
max=max,
step=step,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_2_float():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1.0, 1.0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(0.5, -0.5))
for min, max in [(0.5, 1.5), (1.1, 10.2), (1, 2.2), (-5., 5), (-20, -19.)]:
c = interactive(f, tup=(min, max), lis=[min, max])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.FloatSlider,
min=min,
max=max,
step=.1,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_3_float():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1, 2, 0.0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(-1, -2, 1.))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1, 2., -1.))
for min, max, step in [(0., 2, 1), (1, 10., 2), (1, 100, 2.), (-5., 5., 4), (-100, -20., 4.)]:
c = interactive(f, tup=(min, max, step), lis=[min, max, step])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.FloatSlider,
min=min,
max=max,
step=step,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_str():
values = ['hello', 'there', 'guy']
first = values[0]
dvalues = OrderedDict((v, v) for v in values)
c = interactive(f, tup=tuple(values), lis=list(values))
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.Dropdown,
value=first,
values=dvalues
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_invalid():
for bad in [
(),
(5, 'hi'),
('hi', 5),
({},),
(None,),
]:
with nt.assert_raises(ValueError):
print(bad) # because there is no custom message in assert_raises
c = interactive(f, tup=bad)
def test_defaults():
@annotate(n=10)
def f(n, f=4.5, g=1):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSlider,
value=10,
),
f=dict(
cls=widgets.FloatSlider,
value=4.5,
),
g=dict(
cls=widgets.IntSlider,
value=1,
),
)
def test_default_values():
@annotate(n=10, f=(0, 10.), g=5, h={'a': 1, 'b': 2}, j=['hi', 'there'])
def f(n, f=4.5, g=1, h=2, j='there'):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSlider,
value=10,
),
f=dict(
cls=widgets.FloatSlider,
value=4.5,
),
g=dict(
cls=widgets.IntSlider,
value=5,
),
h=dict(
cls=widgets.Dropdown,
values={'a': 1, 'b': 2},
value=2
),
j=dict(
cls=widgets.Dropdown,
values={'hi': 'hi', 'there': 'there'},
value='there'
),
)
def test_default_out_of_bounds():
@annotate(f=(0, 10.), h={'a': 1}, j=['hi', 'there'])
def f(f='hi', h=5, j='other'):
pass
c = interactive(f)
check_widgets(c,
f=dict(
cls=widgets.FloatSlider,
value=5.,
),
h=dict(
cls=widgets.Dropdown,
values={'a': 1},
value=1,
),
j=dict(
cls=widgets.Dropdown,
values={'hi': 'hi', 'there': 'there'},
value='hi',
),
)
def test_annotations():
@annotate(n=10, f=widgets.FloatText())
def f(n, f):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSlider,
value=10,
),
f=dict(
cls=widgets.FloatText,
),
)
def test_priority():
@annotate(annotate='annotate', kwarg='annotate')
def f(kwarg='default', annotate='default', default='default'):
pass
c = interactive(f, kwarg='kwarg')
check_widgets(c,
kwarg=dict(
cls=widgets.Text,
value='kwarg',
),
annotate=dict(
cls=widgets.Text,
value='annotate',
),
)
@nt.with_setup(clear_display)
def test_decorator_kwarg():
with tt.monkeypatch(interaction, 'display', record_display):
@interact(a=5)
def foo(a):
pass
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSlider,
value=5,
)
@nt.with_setup(clear_display)
def test_decorator_no_call():
with tt.monkeypatch(interaction, 'display', record_display):
@interact
def foo(a='default'):
pass
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
@nt.with_setup(clear_display)
def test_call_interact():
def foo(a='default'):
pass
with tt.monkeypatch(interaction, 'display', record_display):
ifoo = interact(foo)
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
@nt.with_setup(clear_display)
def test_call_interact_kwargs():
def foo(a='default'):
pass
with tt.monkeypatch(interaction, 'display', record_display):
ifoo = interact(foo, a=10)
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSlider,
value=10,
)
@nt.with_setup(clear_display)
def test_call_decorated_on_trait_change():
"""test calling @interact decorated functions"""
d = {}
with tt.monkeypatch(interaction, 'display', record_display):
@interact
def foo(a='default'):
d['a'] = a
return a
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
# test calling the function directly
a = foo('hello')
nt.assert_equal(a, 'hello')
nt.assert_equal(d['a'], 'hello')
# test that setting trait values calls the function
w.value = 'called'
nt.assert_equal(d['a'], 'called')
@nt.with_setup(clear_display)
def test_call_decorated_kwargs_on_trait_change():
"""test calling @interact(foo=bar) decorated functions"""
d = {}
with tt.monkeypatch(interaction, 'display', record_display):
@interact(a='kwarg')
def foo(a='default'):
d['a'] = a
return a
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='kwarg',
)
# test calling the function directly
a = foo('hello')
nt.assert_equal(a, 'hello')
nt.assert_equal(d['a'], 'hello')
# test that setting trait values calls the function
w.value = 'called'
nt.assert_equal(d['a'], 'called')
def test_fixed():
c = interactive(f, a=widgets.fixed(5), b='text')
nt.assert_equal(len(c.children), 1)
w = c.children[0]
check_widget(w,
cls=widgets.Text,
value='text',
description='b',
)
def test_default_description():
c = interactive(f, b='text')
w = c.children[0]
check_widget(w,
cls=widgets.Text,
value='text',
description='b',
)
def test_custom_description():
c = interactive(f, b=widgets.Text(value='text', description='foo'))
w = c.children[0]
check_widget(w,
cls=widgets.Text,
value='text',
description='foo',
)
def test_interact_manual_button():
c = interactive(f, __manual=True)
w = c.children[0]
check_widget(w, cls=widgets.Button)
def test_interact_manual_nocall():
callcount = 0
def calltest(testarg):
callcount += 1
c = interactive(calltest, testarg=5, __manual=True)
c.children[0].value = 10
nt.assert_equal(callcount, 0)
def test_int_range_logic():
irsw = widgets.IntRangeSlider
w = irsw(value=(2, 4), min=0, max=6)
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.value = (4, 2)
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.value = (-1, 7)
check_widget(w, cls=irsw, value=(0, 6), min=0, max=6)
w.min = 3
check_widget(w, cls=irsw, value=(3, 6), min=3, max=6)
w.max = 3
check_widget(w, cls=irsw, value=(3, 3), min=3, max=3)
w.min = 0
w.max = 6
w.lower = 2
w.upper = 4
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.value = (0, 1) # lower non-overlapping range
check_widget(w, cls=irsw, value=(0, 1), min=0, max=6)
w.value = (5, 6) # upper non-overlapping range
check_widget(w, cls=irsw, value=(5, 6), min=0, max=6)
w.value = (-1, 4) # semi out-of-range
check_widget(w, cls=irsw, value=(0, 4), min=0, max=6)
w.lower = 2
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.value = (-2, -1) # wholly out of range
check_widget(w, cls=irsw, value=(0, 0), min=0, max=6)
w.value = (7, 8)
check_widget(w, cls=irsw, value=(6, 6), min=0, max=6)
with nt.assert_raises(ValueError):
w.min = 7
with nt.assert_raises(ValueError):
w.max = -1
with nt.assert_raises(ValueError):
w.lower = 5
with nt.assert_raises(ValueError):
w.upper = 1
w = irsw(min=2, max=3)
check_widget(w, min=2, max=3)
w = irsw(min=100, max=200)
check_widget(w, lower=125, upper=175, value=(125, 175))
with nt.assert_raises(ValueError):
irsw(value=(2, 4), lower=3)
with nt.assert_raises(ValueError):
irsw(value=(2, 4), upper=3)
with nt.assert_raises(ValueError):
irsw(value=(2, 4), lower=3, upper=3)
with nt.assert_raises(ValueError):
irsw(min=2, max=1)
with nt.assert_raises(ValueError):
irsw(lower=5)
with nt.assert_raises(ValueError):
irsw(upper=5)
def test_float_range_logic():
frsw = widgets.FloatRangeSlider
w = frsw(value=(.2, .4), min=0., max=.6)
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.value = (.4, .2)
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.value = (-.1, .7)
check_widget(w, cls=frsw, value=(0., .6), min=0., max=.6)
w.min = .3
check_widget(w, cls=frsw, value=(.3, .6), min=.3, max=.6)
w.max = .3
check_widget(w, cls=frsw, value=(.3, .3), min=.3, max=.3)
w.min = 0.
w.max = .6
w.lower = .2
w.upper = .4
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.value = (0., .1) # lower non-overlapping range
check_widget(w, cls=frsw, value=(0., .1), min=0., max=.6)
w.value = (.5, .6) # upper non-overlapping range
check_widget(w, cls=frsw, value=(.5, .6), min=0., max=.6)
w.value = (-.1, .4) # semi out-of-range
check_widget(w, cls=frsw, value=(0., .4), min=0., max=.6)
w.lower = .2
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.value = (-.2, -.1) # wholly out of range
check_widget(w, cls=frsw, value=(0., 0.), min=0., max=.6)
w.value = (.7, .8)
check_widget(w, cls=frsw, value=(.6, .6), min=.0, max=.6)
with nt.assert_raises(ValueError):
w.min = .7
with nt.assert_raises(ValueError):
w.max = -.1
with nt.assert_raises(ValueError):
w.lower = .5
with nt.assert_raises(ValueError):
w.upper = .1
w = frsw(min=2, max=3)
check_widget(w, min=2, max=3)
w = frsw(min=1., max=2.)
check_widget(w, lower=1.25, upper=1.75, value=(1.25, 1.75))
with nt.assert_raises(ValueError):
frsw(value=(2, 4), lower=3)
with nt.assert_raises(ValueError):
frsw(value=(2, 4), upper=3)
with nt.assert_raises(ValueError):
frsw(value=(2, 4), lower=3, upper=3)
with nt.assert_raises(ValueError):
frsw(min=.2, max=.1)
with nt.assert_raises(ValueError):
frsw(lower=5)
with nt.assert_raises(ValueError):
frsw(upper=5)
| mattvonrocketstein/smash | smashlib/ipy3x/html/widgets/tests/test_interaction.py | Python | mit | 18,598 |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import unittest
from iptest import IronPythonTestCase, is_cli, is_mono, is_netcoreapp, is_posix, run_test, skipUnlessIronPython
from types import FunctionType
global init
# defined globally because unqualified exec isn't allowed in
# a nested function.
def unqualified_exec():
print x
exec ""
def copyfunc(f, name):
return FunctionType(f.func_code, f.func_globals, name, f.func_defaults, f.func_closure)
def substitute_globals(f, name, globals):
return FunctionType(f.func_code, globals, name, f.func_defaults, f.func_closure)
global_variable = 13
def create_fn_with_closure():
x=13
def f():
return x
return f
def x(a,b,c):
z = 8
if a < b:
return c
elif c < 5 :
return a + b
else:
return z
class C1:
def f0(self): return 0
def f1(self, a): return 1
def f2(self, a, b): return 2
def f3(self, a, b, c): return 3
def f4(self, a, b, c, d): return 4
def f5(self, a, b, c, d, e): return 5
def f6(self, a, b, c, d, e, f): return 6
def f7(self, a, b, c, d, e, f, g): return 7
class FunctionTest(IronPythonTestCase):
def test_basics(self):
self.assertTrue(x(1,2,10) == 10)
self.assertTrue(x(2,1,4) == 3)
self.assertTrue(x(1,1,10) == 8)
def f():
pass
f.a = 10
self.assertTrue(f.a == 10)
self.assertEqual(f.__module__, __name__)
def g():
g.a = 20
g()
self.assertTrue(g.a == 20)
def foo(): pass
self.assertEqual(foo.func_code.co_filename.lower().endswith('test_function.py'), True)
self.assertEqual(foo.func_code.co_firstlineno, 76) # if you added lines to the top of this file you need to update this number.
def test_inherit_function(self):
def foo(): pass
# Cannot inherit from a function
def CreateSubType(t):
class SubType(t): pass
return SubType
self.assertRaisesRegexp(TypeError, ".*\n?.* is not an acceptable base type", CreateSubType, type(foo))
def test_varargs(self):
def a(*args): return args
def b(*args): return a(*args)
self.assertEqual(b(1,2,3), (1,2,3))
def test_default_values(self):
def xwd(a=0,b=1,c=3):
z = 8
if a < b:
return c
elif c < 5 :
return a + b
else:
return z
self.assertEqual(x,x)
self.assertEqual(xwd(), 3)
self.assertRaises(TypeError, (lambda:x()))
self.assertEqual(xwd(2), 3)
self.assertRaises(TypeError, (lambda:x(1)))
self.assertEqual(xwd(0,5), 3)
self.assertRaises(TypeError, (lambda:x(0,5)))
self.assertEqual( (x == "not-a-Function3"), False)
def test_missin_params(self):
def y(a,b,c,d):
return a+b+c+d
def ywd(a=0, b=1, c=2, d=3):
return a+b+c+d
self.assertEqual(y, y)
self.assertEqual(ywd(), 6)
self.assertRaises(TypeError, y)
self.assertEqual(ywd(4), 10)
self.assertRaises(TypeError, y, 4)
self.assertEqual(ywd(4,5), 14)
self.assertRaises(TypeError, y, 4, 5)
self.assertEqual(ywd(4,5,6), 18)
self.assertRaises(TypeError, y, 4,5,6)
self.assertEqual( (y == "not-a-Function4"), False)
def test__doc__(self):
def foo(): "hello world"
self.assertEqual(foo.__doc__, 'hello world')
def test_coverage(self):
# function5
def f1(a=1, b=2, c=3, d=4, e=5): return a * b * c * d * e
def f2(a, b=2, c=3, d=4, e=5): return a * b * c * d * e
def f3(a, b, c=3, d=4, e=5): return a * b * c * d * e
def f4(a, b, c, d=4, e=5): return a * b * c * d * e
def f5(a, b, c, d, e=5): return a * b * c * d * e
def f6(a, b, c, d, e): return a * b * c * d * e
for f in (f1, f2, f3, f4, f5, f6):
self.assertRaises(TypeError, f, 1, 1, 1, 1, 1, 1) # 6 args
self.assertEqual(f(10,11,12,13,14), 10 * 11 * 12 * 13 * 14) # 5 args
for f in (f1, f2, f3, f4, f5):
self.assertEqual(f(10,11,12,13), 10 * 11 * 12 * 13 * 5) # 4 args
for f in (f6,):
self.assertRaises(TypeError, f, 1, 1, 1, 1)
for f in (f1, f2, f3, f4):
self.assertEqual(f(10,11,12), 10 * 11 * 12 * 4 * 5) # 3 args
for f in (f5, f6):
self.assertRaises(TypeError, f, 1, 1, 1)
for f in (f1, f2, f3):
self.assertEqual(f(10,11), 10 * 11 * 3 * 4 * 5) # 2 args
for f in (f4, f5, f6):
self.assertRaises(TypeError, f, 1, 1)
for f in (f1, f2):
self.assertEqual(f(10), 10 * 2 * 3 * 4 * 5) # 1 args
for f in (f3, f4, f5, f6):
self.assertRaises(TypeError, f, 1)
for f in (f1,):
self.assertEqual(f(), 1 * 2 * 3 * 4 * 5) # no args
for f in (f2, f3, f4, f5, f6):
self.assertRaises(TypeError, f)
def test_class_method(self):
# method
class C2: pass
c1, c2 = C1(), C2()
line = ""
for i in range(8):
args = ",".join(['1'] * i)
line += "self.assertEqual(c1.f%d(%s), %d)\n" % (i, args, i)
line += "self.assertEqual(C1.f%d(c1,%s), %d)\n" % (i, args, i)
#line += "try: C1.f%d(%s) \nexcept TypeError: pass \nelse: raise AssertionError\n" % (i, args)
#line += "try: C1.f%d(c2, %s) \nexcept TypeError: pass \nelse: raise AssertionError\n" % (i, args)
#print line
exec line
def test_set_attr_instance_method(self):
def SetAttrOfInstanceMethod():
C1.f0.attr = 1
self.assertRaises(AttributeError, SetAttrOfInstanceMethod)
C1.f0.im_func.attr = 1
self.assertEqual(C1.f0.attr, 1)
self.assertEqual(dir(C1.f0).__contains__("attr"), True)
self.assertEqual(C1.f0.__module__, __name__)
def test_kwargs(self):
def f(x=0, y=10, z=20, *args, **kws):
return (x, y, z), args, kws
self.assertTrue(f(10, l=20) == ((10, 10, 20), (), {'l': 20}))
self.assertTrue(f(1, *(2,), **{'z':20}) == ((1, 2, 20), (), {}))
self.assertTrue(f(*[1,2,3]) == ((1, 2, 3), (), {}))
def a(*args, **kws): return args, kws
def b(*args, **kws):
return a(*args, **kws)
self.assertTrue(b(1,2,3, x=10, y=20) == ((1, 2, 3), {'y': 20, 'x': 10}))
def b(*args, **kws):
return a(**kws)
self.assertTrue(b(1,2,3, x=10, y=20) == ((), {'y': 20, 'x': 10}))
try:
b(**[])
self.assertTrue(False)
except TypeError:
pass
def f(x, *args):
return (x, args)
self.assertEqual(f(1, *[2]), (1, (2,)))
self.assertEqual(f(7, *(i for i in range(3))), (7, (0, 1, 2,)))
self.assertEqual(f(9, *range(11, 13)), (9, (11, 12)))
def test_sorted_kwargs(self):
"""verify we can call sorted w/ keyword args"""
import operator
inventory = [('apple', 3), ('banana', 2), ('pear', 5), ('orange', 1)]
getcount = operator.itemgetter(1)
sorted_inventory = sorted(inventory, key=getcount)
def test_kwargs2(self):
"""verify proper handling of keyword args for python functions"""
def kwfunc(a,b,c): pass
try:
kwfunc(10, 20, b=30)
self.assertTrue(False)
except TypeError:
pass
try:
kwfunc(10, None, b=30)
self.assertTrue(False)
except TypeError:
pass
try:
kwfunc(10, None, 40, b=30)
self.assertTrue(False)
except TypeError:
pass
if is_cli:
import System
htlist = [System.Collections.Generic.Dictionary[System.Object, System.Object](), System.Collections.Hashtable()]
for ht in htlist:
def foo(**kwargs):
return kwargs['key']
ht['key'] = 'xyz'
self.assertEqual(foo(**ht), 'xyz')
def foo(a,b):
return a-b
self.assertEqual(foo(b=1, *(2,)), 1)
# kw-args passed to init through method instance
s = self
class foo:
def __init__(self, group=None, target=None):
s.assertEqual(group, None)
s.assertEqual(target,'baz')
a = foo(target='baz')
foo.__init__(a, target='baz')
@skipUnlessIronPython()
def test_params_method_no_params(self):
"""call a params method w/ no params"""
import clr
import System
self.assertEqual('abc\ndef'.Split()[0], 'abc')
self.assertEqual('abc\ndef'.Split()[1], 'def')
x = 'a bc def'.Split()
self.assertEqual(x[0], 'a')
self.assertEqual(x[1], 'bc')
self.assertEqual(x[2], '')
self.assertEqual(x[3], '')
self.assertEqual(x[4], 'def')
# calling Double.ToString(...) should work - Double is
# an OpsExtensibleType and doesn't define __str__ on this
# overload
self.assertEqual(System.Double.ToString(1.0, 'f', System.Globalization.CultureInfo.InvariantCulture), '1.00')
def test_incorrect_number_of_args(self):
"""Incorrect number of arguments"""
def f(a): pass
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", f)
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (3 given)", f, 1, 2, 3)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
#self.assertRaises calls f(*args), which generates a different AST than f(1,2,3)
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", lambda:f())
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (3 given)", lambda:f(1, 2, 3))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=2))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=2))
def f(a,b,c,d,e,f,g,h,i,j): pass
self.assertRaisesMessage(TypeError, "f() takes exactly 10 arguments (0 given)", f)
self.assertRaisesMessage(TypeError, "f() takes exactly 10 arguments (3 given)", f, 1, 2, 3)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
self.assertRaisesMessage(TypeError, "f() takes exactly 10 arguments (0 given)", lambda:f())
self.assertRaisesMessage(TypeError, "f() takes exactly 10 arguments (3 given)", lambda:f(1, 2, 3))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=2))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=2))
def f(a, b=2): pass
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (0 given)", f)
self.assertRaisesMessage(TypeError, "f() takes at most 2 arguments (3 given)", f, 1, 2, 3)
if is_cli: #CPython bug 9326
self.assertRaisesMessage(TypeError, "f() takes at least 1 non-keyword argument (0 given)", f, b=2)
else:
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (1 given)", f, b=2)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=3)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, b=2, dummy=3)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, 1, dummy=3)
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (0 given)", lambda:f())
self.assertRaisesMessage(TypeError, "f() takes at most 2 arguments (3 given)", lambda:f(1, 2, 3))
if is_cli: #CPython bug 9326
self.assertRaisesMessage(TypeError, "f() takes at least 1 non-keyword argument (0 given)", lambda:f(b=2))
else:
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (1 given)", lambda:f(b=2))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=3))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(b=2, dummy=3))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=3))
def f(a, *argList): pass
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (0 given)", f)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, 1, dummy=2)
self.assertRaisesMessage(TypeError, "f() takes at least 1 argument (0 given)", lambda:f())
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=2))
self.assertRaisesMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=2))
def f(a, **keywordDict): pass
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", f)
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (3 given)", f, 1, 2, 3)
if is_cli: #CPython bug 9326
self.assertRaisesMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", f, dummy=2)
self.assertRaisesMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", f, dummy=2, dummy2=3)
else:
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", f, dummy=2)
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", f, dummy=2, dummy2=3)
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", lambda:f())
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (3 given)", lambda:f(1, 2, 3))
if is_cli: #CPython bug 9326
self.assertRaisesMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", lambda:f(dummy=2))
self.assertRaisesMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", lambda:f(dummy=2, dummy2=3))
else:
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", lambda:f(dummy=2))
self.assertRaisesMessage(TypeError, "f() takes exactly 1 argument (0 given)", lambda:f(dummy=2, dummy2=3))
self.assertRaisesMessages(TypeError, "abs() takes exactly 1 argument (0 given)",
"abs() takes exactly one argument (0 given)", abs)
self.assertRaisesMessages(TypeError, "abs() takes exactly 1 argument (3 given)",
"abs() takes exactly one argument (3 given)", abs, 1, 2, 3)
self.assertRaisesMessages(TypeError, "abs() got an unexpected keyword argument 'dummy'",
"abs() takes no keyword arguments", abs, dummy=2)
self.assertRaisesMessages(TypeError, "abs() takes exactly 1 argument (2 given)",
"abs() takes no keyword arguments", abs, 1, dummy=2)
self.assertRaisesMessages(TypeError, "abs() takes exactly 1 argument (0 given)",
"abs() takes exactly one argument (0 given)", lambda:abs())
self.assertRaisesMessages(TypeError, "abs() takes exactly 1 argument (3 given)",
"abs() takes exactly one argument (3 given)", lambda:abs(1, 2, 3))
self.assertRaisesMessages(TypeError, "abs() got an unexpected keyword argument 'dummy'",
"abs() takes no keyword arguments", lambda:abs(dummy=2))
self.assertRaisesMessages(TypeError, "abs() takes exactly 1 argument (2 given)",
"abs() takes no keyword arguments", lambda:abs(1, dummy=2))
# list([m]) has one default argument (built-in type)
#self.assertRaisesMessage(TypeError, "list() takes at most 1 argument (2 given)", list, 1, 2)
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, list, [], dict({"dummy":2}))
#======== BUG 697 ===========
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, list, [1], dict({"dummy":2}))
# complex([x,y]) has two default argument (OpsReflectedType type)
#self.assertRaisesMessage(TypeError, "complex() takes at most 2 arguments (3 given)", complex, 1, 2, 3)
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, complex, [], dict({"dummy":2}))
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, complex, [1], dict({"dummy":2}))
# bool([x]) has one default argument (OpsReflectedType and valuetype type)
#self.assertRaisesMessage(TypeError, "bool() takes at most 1 argument (2 given)", bool, 1, 2)
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, bool, [], dict({"dummy":2}))
#self.assertRaisesMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, bool, [1], dict({"dummy":2}))
class UserClass(object): pass
self.assertRaisesMessage(TypeError, "object.__new__() takes no parameters", UserClass, 1)
self.assertRaisesMessage(TypeError, "object.__new__() takes no parameters", apply, UserClass, [], dict({"dummy":2}))
class OldStyleClass: pass
self.assertRaisesMessage(TypeError, "this constructor takes no arguments", OldStyleClass, 1)
self.assertRaisesMessage(TypeError, "this constructor takes no arguments", apply, OldStyleClass, [], dict({"dummy":2}))
@skipUnlessIronPython()
def test_runtime_type_checking(self):
"""accepts / returns runtype type checking tests"""
import clr
@clr.accepts(object)
def foo(x):
return x
self.assertEqual(foo('abc'), 'abc')
self.assertEqual(foo(2), 2)
self.assertEqual(foo(2L), 2L)
self.assertEqual(foo(2.0), 2.0)
self.assertEqual(foo(True), True)
@clr.accepts(str)
def foo(x):
return x
self.assertEqual(foo('abc'), 'abc')
self.assertRaises(AssertionError, foo, 2)
self.assertRaises(AssertionError, foo, 2L)
self.assertRaises(AssertionError, foo, 2.0)
self.assertRaises(AssertionError, foo, True)
@clr.accepts(str, bool)
def foo(x, y):
return x, y
self.assertEqual(foo('abc', True), ('abc', True))
self.assertRaises(AssertionError, foo, ('abc',2))
self.assertRaises(AssertionError, foo, ('abc',2L))
self.assertRaises(AssertionError, foo, ('abc',2.0))
class bar:
@clr.accepts(clr.Self(), str)
def foo(self, x):
return x
a = bar()
self.assertEqual(a.foo('xyz'), 'xyz')
self.assertRaises(AssertionError, a.foo, 2)
self.assertRaises(AssertionError, a.foo, 2L)
self.assertRaises(AssertionError, a.foo, 2.0)
self.assertRaises(AssertionError, a.foo, True)
@clr.returns(str)
def foo(x):
return x
self.assertEqual(foo('abc'), 'abc')
self.assertRaises(AssertionError, foo, 2)
self.assertRaises(AssertionError, foo, 2L)
self.assertRaises(AssertionError, foo, 2.0)
self.assertRaises(AssertionError, foo, True)
@clr.accepts(bool)
@clr.returns(str)
def foo(x):
if x: return str(x)
else: return 0
self.assertEqual(foo(True), 'True')
self.assertRaises(AssertionError, foo, 2)
self.assertRaises(AssertionError, foo, 2)
self.assertRaises(AssertionError, foo, False)
@clr.returns(None)
def foo(): pass
self.assertEqual(foo(), None)
def test_error_message(self):
try:
buffer()
except TypeError, e:
# make sure we get the right type name when calling w/ wrong # of args
self.assertEqual(str(e)[:8], 'buffer()')
#try:
# list(1,2,3)
#except TypeError, e:
# make sure we get the right type name when calling w/ wrong # of args
# self.assertEqual(str(e)[:6], 'list()')
# oldinstance
class foo:
def bar(self): pass
def bar1(self, xyz): pass
class foo2: pass
class foo3(object): pass
self.assertRaises(TypeError, foo.bar)
self.assertRaises(TypeError, foo.bar1, None, None)
self.assertRaises(TypeError, foo.bar1, None, 'abc')
self.assertRaises(TypeError, foo.bar1, 'xyz', 'abc')
self.assertRaises(TypeError, foo.bar, foo2())
self.assertRaises(TypeError, foo.bar, foo3())
# usertype
class foo(object):
def bar(self): pass
def bar1(self, xyz): pass
self.assertRaises(TypeError, foo.bar)
self.assertRaises(TypeError, foo.bar1, None, None)
self.assertRaises(TypeError, foo.bar1, None, 'abc')
self.assertRaises(TypeError, foo.bar1, 'xyz', 'abc')
self.assertRaises(TypeError, foo.bar, foo2())
self.assertRaises(TypeError, foo.bar, foo3())
def test_caller_context(self):
# access a method w/ caller context w/ an args parameter.
def foo(*args):
return hasattr(*args)
self.assertEqual(foo('', 'index'), True)
@skipUnlessIronPython()
def test_dispatch_to_ReflectOptimized(self):
"""dispatch to a ReflectOptimized method"""
from iptest.console_util import IronPythonInstance
from System import Environment
from sys import executable
wkdir = self.test_dir
if "-X:LightweightScopes" in Environment.GetCommandLineArgs():
ipi = IronPythonInstance(executable, wkdir, "-X:LightweightScopes", "-X:BasicConsole")
else:
ipi = IronPythonInstance(executable, wkdir, "-X:BasicConsole")
if (ipi.Start()):
try:
result = ipi.ExecuteLine("from iptest.ipunittest import load_ironpython_test")
result = ipi.ExecuteLine("load_ironpython_test()")
result = ipi.ExecuteLine("from IronPythonTest import DefaultParams")
response = ipi.ExecuteLine("DefaultParams.FuncWithDefaults(1100, z=82)")
self.assertEqual(response, '1184')
finally:
ipi.End()
def test_zip(self):
p = ((1, 2),)
self.assertEqual(zip(*(p * 10)), [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 2, 2, 2, 2, 2, 2, 2, 2, 2)])
self.assertEqual(zip(*(p * 10)), [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 2, 2, 2, 2, 2, 2, 2, 2, 2)])
def test_super(self):
class A(object): pass
class B(A): pass
#unbound super
for x in [super(B), super(B,None)]:
self.assertEqual(x.__thisclass__, B)
self.assertEqual(x.__self__, None)
self.assertEqual(x.__self_class__, None)
# super w/ both types
x = super(B,B)
self.assertEqual(x.__thisclass__,B)
self.assertEqual(x.__self_class__, B)
self.assertEqual(x.__self__, B)
# super w/ type and instance
b = B()
x = super(B, b)
self.assertEqual(x.__thisclass__,B)
self.assertEqual(x.__self_class__, B)
self.assertEqual(x.__self__, b)
# super w/ mixed types
x = super(A,B)
self.assertEqual(x.__thisclass__,A)
self.assertEqual(x.__self_class__, B)
self.assertEqual(x.__self__, B)
# invalid super cases
try:
x = super(B, 'abc')
self.assertUnreachable()
except TypeError:
pass
try:
super(B,A)
self.assertUnreachable()
except TypeError:
pass
class A(object):
def __init__(self, name):
self.__name__ = name
def meth(self):
return self.__name__
classmeth = classmethod(meth)
class B(A): pass
b = B('derived')
self.assertEqual(super(B,b).__thisclass__.__name__, 'B')
self.assertEqual(super(B,b).__self__.__name__, 'derived')
self.assertEqual(super(B,b).__self_class__.__name__, 'B')
self.assertEqual(super(B,b).classmeth(), 'B')
# descriptor supper
class A(object):
def meth(self): return 'A'
class B(A):
def meth(self):
return 'B' + self.__super.meth()
B._B__super = super(B)
b = B()
self.assertEqual(b.meth(), 'BA')
def test_class_method_calls(self):
"""class method should get correct meta class."""
class D(object):
@classmethod
def classmeth(cls): pass
self.assertEqual(D.classmeth.im_class, type)
class MetaType(type): pass
class D(object):
__metaclass__ = MetaType
@classmethod
def classmeth(cls): pass
self.assertEqual(D.classmeth.im_class, MetaType)
def test_cases(self):
def runTest(testCase):
class foo(testCase.subtype):
def __new__(cls, param):
ret = testCase.subtype.__new__(cls, param)
self.assertTrue(ret == testCase.newEq)
self.assertTrue((ret != testCase.newEq) != True)
return ret
def __init__(self, param):
testCase.subtype.__init__(self, param)
self.assertTrue(self == testCase.initEq)
self.assertTrue((self != testCase.initEq) != True)
a = foo(testCase.param)
self.assertTrue((type(a) == foo) == testCase.match)
class TestCase(object):
__slots__ = ['subtype', 'newEq', 'initEq', 'match', 'param']
def __init__(self, subtype, newEq, initEq, match, param):
self.match = match
self.subtype = subtype
self.newEq = newEq
self.initEq = initEq
self.param = param
cases = [TestCase(int, 2, 2, True, 2),
TestCase(list, [], [2,3,4], True, (2,3,4)),
TestCase(deque, deque(), deque((2,3,4)), True, (2,3,4)),
TestCase(set, set(), set((2,3,4)), True, (2,3,4)),
TestCase(frozenset, frozenset((2,3,4)), frozenset((2,3,4)), True, (2,3,4)),
TestCase(tuple, (2,3,4), (2,3,4), True, (2,3,4)),
TestCase(str, 'abc', 'abc', True, 'abc'),
TestCase(float, 2.3, 2.3, True, 2.3),
TestCase(type, type(object), type(object), False, object),
TestCase(long, 10000000000L, 10000000000L, True, 10000000000L),
#TestCase(complex, complex(2.0, 0), complex(2.0, 0), True, 2.0), # complex is currently a struct w/ no extensibel, we fail here
# TestCase(file, 'abc', True), # ???
]
for case in cases:
runTest(case)
@unittest.skipIf(is_posix or is_netcoreapp, 'missing System.Windows.Forms support')
@skipUnlessIronPython()
def test_call_base_init(self):
"""verify we can call the base init directly"""
import clr
clr.AddReferenceByPartialName('System.Windows.Forms')
from System.Windows.Forms import Form
class MyForm(Form):
def __init__(self, title):
Form.__init__(self)
self.Text = title
a = MyForm('abc')
self.assertEqual(a.Text, 'abc')
#TestCase(bool, True, True), # not an acceptable base type
def test_func_flags(self):
def foo0(): pass
def foo1(*args): pass
def foo2(**args): pass
def foo3(*args, **kwargs): pass
def foo4(a): pass
def foo5(a, *args): pass
def foo6(a, **args): pass
def foo7(a, *args, **kwargs): pass
def foo8(a,b,c,d,e,f): pass
def foo9(a,b): pass
self.assertEqual(foo0.func_code.co_flags & 12, 0)
self.assertEqual(foo1.func_code.co_flags & 12, 4)
self.assertEqual(foo2.func_code.co_flags & 12, 8)
self.assertEqual(foo3.func_code.co_flags & 12, 12)
self.assertEqual(foo4.func_code.co_flags & 12, 0)
self.assertEqual(foo5.func_code.co_flags & 12, 4)
self.assertEqual(foo6.func_code.co_flags & 12, 8)
self.assertEqual(foo7.func_code.co_flags & 12, 12)
self.assertEqual(foo8.func_code.co_flags & 12, 0)
self.assertEqual(foo9.func_code.co_flags & 12, 0)
self.assertEqual(foo0.func_code.co_argcount, 0)
self.assertEqual(foo1.func_code.co_argcount, 0)
self.assertEqual(foo2.func_code.co_argcount, 0)
self.assertEqual(foo3.func_code.co_argcount, 0)
self.assertEqual(foo4.func_code.co_argcount, 1)
self.assertEqual(foo5.func_code.co_argcount, 1)
self.assertEqual(foo6.func_code.co_argcount, 1)
self.assertEqual(foo7.func_code.co_argcount, 1)
self.assertEqual(foo8.func_code.co_argcount, 6)
self.assertEqual(foo9.func_code.co_argcount, 2)
def test_big_calls(self):
# check various function call sizes and boundaries
sizes = [3, 4, 5, 7, 8, 9, 13, 15, 16, 17, 23, 24, 25, 31, 32, 33, 47, 48, 49, 63, 64, 65, 127, 128, 129, 254, 255, 256, 257, 258, 511, 512, 513]
# mono has a limitation of < 1023
if not is_mono:
sizes.extend([1023, 1024, 1025, 2047, 2048, 2049])
for size in sizes:
# w/o defaults
exec 'def f(' + ','.join(['a' + str(i) for i in range(size)]) + '): return ' + ','.join(['a' + str(i) for i in range(size)])
# w/ defaults
exec 'def g(' + ','.join(['a' + str(i) + '=' + str(i) for i in range(size)]) + '): return ' + ','.join(['a' + str(i) for i in range(size)])
if size <= 255 or is_cli:
# CPython allows function definitions > 255, but not calls w/ > 255 params.
exec 'a = f(' + ', '.join([str(x) for x in xrange(size)]) + ')'
self.assertEqual(a, tuple(xrange(size)))
exec 'a = g()'
self.assertEqual(a, tuple(xrange(size)))
exec 'a = g(' + ', '.join([str(x) for x in xrange(size)]) + ')'
self.assertEqual(a, tuple(xrange(size)))
exec 'a = f(*(' + ', '.join([str(x) for x in xrange(size)]) + '))'
self.assertEqual(a, tuple(xrange(size)))
def test_compile(self):
x = compile("print 2/3", "<string>", "exec", 8192)
self.assertTrue((x.co_flags & 8192) == 8192)
x = compile("2/3", "<string>", "eval", 8192)
self.assertEqual(eval(x), 2.0 / 3.0)
names = [ "", ".", "1", "\n", " ", "@", "%^",
"a", "A", "Abc", "aBC", "filename.py",
"longlonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglong",
"""
stuff
more stuff
last stuff
"""
]
for name in names:
self.assertEqual(compile("print 2/3", name, "exec", 8192).co_filename,
name)
def test_filename(self):
c = compile("x = 2", "test", "exec")
self.assertEqual(c.co_filename, 'test')
def test_name(self):
def f(): pass
f.__name__ = 'g'
self.assertEqual(f.__name__, 'g')
self.assertTrue(repr(f).startswith('<function g'))
f.func_name = 'x'
self.assertEqual(f.__name__, 'x')
self.assertTrue(repr(f).startswith('<function x'))
def test_argcount(self):
def foo0(): pass
def foo1(*args): pass
def foo2(**args): pass
def foo3(*args, **kwargs): pass
def foo4(a): pass
def foo5(a, *args): pass
def foo6(a, **args): pass
def foo7(a, *args, **kwargs): pass
def foo8(a,b,c,d,e,f): pass
def foo9(a,b): pass
self.assertEqual(foo0.func_code.co_argcount, 0)
self.assertEqual(foo1.func_code.co_argcount, 0)
self.assertEqual(foo2.func_code.co_argcount, 0)
self.assertEqual(foo3.func_code.co_argcount, 0)
self.assertEqual(foo4.func_code.co_argcount, 1)
self.assertEqual(foo5.func_code.co_argcount, 1)
self.assertEqual(foo6.func_code.co_argcount, 1)
self.assertEqual(foo7.func_code.co_argcount, 1)
self.assertEqual(foo8.func_code.co_argcount, 6)
self.assertEqual(foo9.func_code.co_argcount, 2)
def test_defaults(self):
defaults = [None, object, int, [], 3.14, [3.14], (None,), "a string"]
for default in defaults:
def helperFunc(): pass
self.assertEqual(helperFunc.func_defaults, None)
self.assertEqual(helperFunc.func_defaults, None)
def helperFunc1(a): pass
self.assertEqual(helperFunc1.func_defaults, None)
self.assertEqual(helperFunc1.func_defaults, None)
def helperFunc2(a=default): pass
self.assertEqual(helperFunc2.func_defaults, (default,))
helperFunc2(a=7)
self.assertEqual(helperFunc2.func_defaults, (default,))
def helperFunc3(a, b=default, c=[42]): c.append(b)
self.assertEqual(helperFunc3.func_defaults, (default, [42]))
helperFunc3("stuff")
self.assertEqual(helperFunc3.func_defaults, (default, [42, default]))
def test_splat_defaults(self):
def g(a, b, x=None):
return a, b, x
def f(x, *args):
return g(x, *args)
self.assertEqual(f(1, *(2,)), (1,2,None))
def test_argument_eval_order(self):
"""Check order of evaluation of function arguments"""
x = [1]
def noop(a, b, c):
pass
noop(x.append(2), x.append(3), x.append(4))
self.assertEqual(x, [1,2,3,4])
def test_method_attr_access(self):
class foo(object):
def f(self): pass
abc = 3
method = type(foo.f)
self.assertEqual(method(foo, 'abc').abc, 3)
#TODO: @skip("interpreted") # we don't have FuncEnv's in interpret modes so this always returns None
def test_function_closure_negative(self):
def f(): pass
for assignment_val in [None, 1, "a string"]:
try:
f.func_closure = assignment_val
self.assertUnreachable("func_closure is a read-only attribute of functions")
except TypeError, e:
pass
def test_paramless_function_call_error(self):
def f(): pass
try:
f(*(1, ))
self.assertUnreachable()
except TypeError: pass
try:
f(**{'abc':'def'})
self.assertUnreachable()
except TypeError: pass
def test_function_closure(self):
def f(): pass
self.assertEqual(f.func_closure, None)
def f():
def g(): pass
return g
self.assertEqual(f().func_closure, None)
def f():
x = 4
def g(): return x
return g
self.assertEqual(sorted([x.cell_contents for x in f().func_closure]), [4])
def f():
x = 4
def g():
y = 5
def h(): return x,y
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5])
# don't use z
def f():
x = 4
def g():
y = 5
z = 7
def h(): return x,y
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5])
def f():
x = 4
def g():
y = 5
z = 7
def h(): return x,y,z
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5, 7])
def f():
x = 4
a = 9
def g():
y = 5
z = 7
def h(): return x,y
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5])
# closure cells are not recreated
callRes = f()
a = sorted([id(x) for x in callRes.func_closure])
b = sorted([id(x) for x in callRes.func_closure])
self.assertEqual(a, b)
def f():
x = 4
a = 9
def g():
y = 5
z = 7
def h(): return x,y,a,z
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5, 7, 9])
self.assertRaises(TypeError, hash, f().func_closure[0])
def f():
x = 5
def g():
return x
return g
def h():
x = 5
def g():
return x
return g
def j():
x = 6
def g():
return x
return g
self.assertEqual(f().func_closure[0], h().func_closure[0])
self.assertTrue(f().func_closure[0] != j().func_closure[0])
# <cell at 45: int object at 44>
self.assertTrue(repr(f().func_closure[0]).startswith('<cell at '))
self.assertTrue(repr(f().func_closure[0]).find(': int object at ') != -1)
def test_func_code(self):
def foo(): pass
def assign(): foo.func_code = None
self.assertRaises(TypeError, assign)
def def_func_doc(self):
foo.func_doc = 'abc'
self.assertEqual(foo.__doc__, 'abc')
foo.__doc__ = 'def'
self.assertEqual(foo.func_doc, 'def')
foo.func_doc = None
self.assertEqual(foo.__doc__, None)
self.assertEqual(foo.func_doc, None)
def test_func_defaults(self):
def f(a, b): return (a, b)
f.func_defaults = (1,2)
self.assertEqual(f(), (1,2))
f.func_defaults = (1,2,3,4)
self.assertEqual(f(), (3,4))
f.func_defaults = None
self.assertRaises(TypeError, f)
f.func_defaults = (1,2)
self.assertEqual(f.func_defaults, (1,2))
del f.func_defaults
self.assertEqual(f.func_defaults, None)
del f.func_defaults
self.assertEqual(f.func_defaults, None)
def func_with_many_args(one, two, three, four, five, six, seven, eight, nine, ten, eleven=None, twelve=None, thirteen=None, fourteen=None, fifteen=None, sixteen=None, seventeen=None, eighteen=None, nineteen=None):
print 'hello'
func_with_many_args(None, None, None, None, None, None, None, None, None, None)
def test_func_dict(self):
def f(): pass
f.abc = 123
self.assertEqual(f.func_dict, {'abc': 123})
f.func_dict = {'def': 'def'}
self.assertEqual(hasattr(f, 'def'), True)
self.assertEqual(getattr(f, 'def'), 'def')
f.func_dict = {}
self.assertEqual(hasattr(f, 'abc'), False)
self.assertEqual(hasattr(f, 'def'), False)
self.assertRaises(TypeError, lambda : delattr(f, 'func_dict'))
self.assertRaises(TypeError, lambda : delattr(f, '__dict__'))
def test_method(self):
class C:
def method(self): pass
method = type(C.method)(id, None, 'abc')
self.assertEqual(method.im_class, 'abc')
class myobj:
def __init__(self, val):
self.val = val
self.called = []
def __hash__(self):
self.called.append('hash')
return hash(self.val)
def __eq__(self, other):
self.called.append('eq')
return self.val == other.val
def __call__(*args): pass
func1, func2 = myobj(2), myobj(2)
inst1, inst2 = myobj(3), myobj(3)
method = type(C().method)
m1 = method(func1, inst1)
m2 = method(func2, inst2)
self.assertEqual(m1, m2)
self.assertTrue('eq' in func1.called)
self.assertTrue('eq' in inst1.called)
hash(m1)
self.assertTrue('hash' in func1.called)
self.assertTrue('hash' in inst1.called)
def test_function_type(self):
def f1(): pass
def f2(a): pass
def f3(a, b, c): pass
def f4(*a, **b): pass
def decorator(f): return f
@decorator
def f5(a): pass
for x in [ f2, f3, f4, f5]:
self.assertEqual(type(f1), type(x))
def test_name_mangled_params(self):
def f1(__a): pass
def f2(__a): return __a
def f3(a, __a): return __a
def f4(_a, __a): return _a + __a
f1("12")
self.assertEqual(f2("hello"), "hello")
self.assertEqual(f3("a","b"), "b")
self.assertEqual(f4("a","b"), "ab")
def test_splat_none(self):
def f(*args): pass
def g(**kwargs): pass
def h(*args, **kwargs): pass
#CodePlex 20250
self.assertRaisesMessage(TypeError, "f() argument after * must be a sequence, not NoneType",
lambda : f(*None))
self.assertRaisesMessage(TypeError, "g() argument after ** must be a mapping, not NoneType",
lambda : g(**None))
self.assertRaisesMessage(TypeError, "h() argument after ** must be a mapping, not NoneType",
lambda : h(*None, **None))
def test_exec_funccode(self):
# can't exec a func code w/ parameters
def f(a, b, c): print a, b, c
self.assertRaises(TypeError, lambda : eval(f.func_code))
# can exec *args/**args
def f(*args): pass
exec f.func_code in {}, {}
def f(*args, **kwargs): pass
exec f.func_code in {}, {}
# can't exec function which closes over vars
def f():
x = 2
def g():
print x
return g.func_code
self.assertRaises(TypeError, lambda : eval(f()))
def test_exec_funccode_filename(self):
import sys
mod = type(sys)('fake_mod_name')
mod.__file__ = 'some file'
exec "def x(): pass" in mod.__dict__
self.assertEqual(mod.x.func_code.co_filename, '<string>')
def test_func_code_variables(self):
def CompareCodeVars(code, varnames, names, freevars, cellvars):
self.assertEqual(code.co_varnames, varnames)
self.assertEqual(code.co_names, names)
self.assertEqual(code.co_freevars, freevars)
self.assertEqual(code.co_cellvars, cellvars)
# simple local
def f():
a = 2
CompareCodeVars(f.func_code, ('a', ), (), (), ())
# closed over var
def f():
a = 2
def g():
print a
return g
CompareCodeVars(f.func_code, ('g', ), (), (), ('a', ))
CompareCodeVars(f().func_code, (), (), ('a', ), ())
# tuple parameters
def f((a, b)): pass
CompareCodeVars(f.func_code, ('.0', 'a', 'b'), (), (), ())
def f((a, b), (c, d)): pass
CompareCodeVars(f.func_code, ('.0', '.1', 'a', 'b', 'c', 'd'), (), (), ())
# explicitly marked global
def f():
global a
a = 2
CompareCodeVars(f.func_code, (), ('a', ), (), ())
# implicit global
def f():
print some_global
CompareCodeVars(f.func_code, (), ('some_global', ), (), ())
# global that's been "closed over"
def f():
global a
a = 2
def g():
print a
return g
CompareCodeVars(f.func_code, ('g', ), ('a', ), (), ())
CompareCodeVars(f().func_code, (), ('a', ), (), ())
# multi-depth closure
def f():
a = 2
def g():
x = a
def h():
y = a
return h
return g
CompareCodeVars(f.func_code, ('g', ), (), (), ('a', ))
CompareCodeVars(f().func_code, ('x', 'h'), (), ('a', ), ())
CompareCodeVars(f()().func_code, ('y', ), (), ('a', ), ())
# multi-depth closure 2
def f():
a = 2
def g():
def h():
y = a
return h
return g
CompareCodeVars(f.func_code, ('g', ), (), (), ('a', ))
CompareCodeVars(f().func_code, ('h', ), (), ('a', ), ())
CompareCodeVars(f()().func_code, ('y', ), (), ('a', ), ())
# closed over parameter
def f(a):
def g():
return a
return g
CompareCodeVars(f.func_code, ('a', 'g'), (), (), ('a', ))
CompareCodeVars(f(42).func_code, (), (), ('a', ), ())
self.assertEqual(unqualified_exec.func_code.co_names, ('x', ))
def test_delattr(self):
def f(): pass
f.abc = 42
del f.abc
def g(): f.abc
self.assertRaises(AttributeError, g)
def test_cp35180(self):
def foo():
return 13
def bar():
return 42
dpf = copyfunc(foo, "dpf")
self.assertEqual(dpf(), 13)
foo.func_code = bar.func_code
self.assertEqual(foo(), 42)
self.assertEqual(dpf(), 13)
self.assertEqual(foo.__module__, '__main__')
self.assertEqual(dpf.__module__, '__main__')
def test_cp34932(self):
def get_global_variable():
return global_variable
def set_global_variable(v):
global global_variable
global_variable = v
alt_globals = {'global_variable' : 66 }
get_global_variable_x = substitute_globals(get_global_variable, "get_global_variable_x", alt_globals)
set_global_variable_x = substitute_globals(set_global_variable, "set_global_variable_x", alt_globals)
self.assertEqual(get_global_variable(), 13)
self.assertEqual(get_global_variable_x(), 66)
self.assertEqual(get_global_variable(), 13)
set_global_variable_x(7)
self.assertEqual(get_global_variable_x(), 7)
self.assertEqual(get_global_variable(), 13)
self.assertEqual(get_global_variable_x.__module__, None)
self.assertEqual(set_global_variable_x.__module__, None)
get_global_variable_y = substitute_globals(get_global_variable, "get_global_variable_x", globals())
self.assertEqual(get_global_variable_y(), 13)
self.assertEqual(get_global_variable_y.__module__, '__main__')
def test_issue1351(self):
class X(object):
def __init__(self, res):
self.called = []
self.res = res
def __eq__(self, other):
self.called.append('eq')
return self.res
def foo(self):
pass
a = X(True)
b = X(False)
self.assertEqual(a.foo, a.foo)
self.assertNotIn('eq', a.called)
self.assertEqual(a.foo, b.foo)
self.assertIn('eq', a.called)
self.assertEqual(b.foo, b.foo)
self.assertNotIn('eq', b.called)
self.assertNotEqual(b.foo, a.foo)
self.assertIn('eq', b.called)
def test_function_type(self):
fn_with_closure = create_fn_with_closure()
def fn_no_closure():
pass
self.assertRaises(NotImplementedError, copyfunc, fn_with_closure, "new_fn_name")
self.assertRaises(NotImplementedError, FunctionType, fn_with_closure.func_code,
fn_with_closure.func_globals, "name", fn_with_closure.func_defaults)
self.assertRaises(NotImplementedError, FunctionType, fn_with_closure.func_code,
fn_with_closure.func_globals, "name", fn_with_closure.func_defaults,
fn_with_closure.func_closure)
self.assertRaises(NotImplementedError, FunctionType, fn_no_closure.func_code,
fn_no_closure.func_globals, "name", fn_no_closure.func_defaults,
fn_with_closure.func_closure)
run_test(__name__)
| slozier/ironpython2 | Tests/test_function.py | Python | apache-2.0 | 51,122 |
import glob
import os.path
import platform
def find_datafiles():
system = platform.system()
if system == 'Windows':
file_ext = '*.exe'
else:
file_ext = '*.sh'
path = os.path.abspath(os.path.join(__path__[0], 'bootstrappers', file_ext))
return [('', glob.glob(path))]
| manuelcortez/socializer | src/update/__init__.py | Python | gpl-2.0 | 304 |
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
from django.views.generic.edit import FormView
from django.utils.importlib import import_module
# import contact form class based on value in settings.py
full_class = getattr(settings, 'CONTACT_FORM_CLASS', 'quix.django.contact.forms.ContactForm')
module_name = '.'.join(full_class.split('.')[0:-1])
module = import_module(module_name)
class_instance = getattr(module, full_class.split('.')[-1])
class ContactView(FormView):
template_name = getattr(settings, 'CONTACT_FORM_TEMPLATE', 'contact/form.html')
form_class = class_instance
success_url = reverse_lazy("contact-success")
def form_valid(self, form):
form.send_email()
return super(ContactView, self).form_valid(form)
| Quixotix/quix.django.contact | quix/django/contact/views.py | Python | bsd-3-clause | 792 |
# -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['movie2k.ac']
self.base_link = 'http://www.movie2k.ac'
self.search_link = '/search/%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases))
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'tab-plot_german'})
r = dom_parser.parse_dom(r, 'tbody')
r = dom_parser.parse_dom(r, 'tr')
for i in r:
if re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip():
hoster = re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip()
link = re.search('(?<=href=\")(.*?)(?=\")', i[1]).group()
rel = re.search('(?<=oddCell qualityCell">)(\n.*?)(?=<\/td>)', i[1]).group().strip()
quality, info = source_utils.get_release_quality(rel)
if not quality:
quality = 'SD'
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles):
try:
query = self.search_link % (urllib.quote_plus(urllib.quote_plus(cleantitle.query(titles[0]))))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'coverBox'})
r = dom_parser.parse_dom(r, 'li')
r = dom_parser.parse_dom(r, 'span', attrs={'class': 'name'})
r = dom_parser.parse_dom(r, 'a')
title = r[0][1]
title = cleantitle.get(title)
if title in t:
return source_utils.strip_domain(r[0][0]['href'])
else:
return
except:
return
| TheWardoctor/Wardoctors-repo | script.module.uncoded/lib/resources/lib/sources/de/movie2k-ac.py | Python | apache-2.0 | 3,611 |
# -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Affero General Public License,
# publicada pela Free Software Foundation, em sua versão 3 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Affero General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Affero General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from pysped.xml_sped import *
from pysped.nfe.leiaute import ESQUEMA_ATUAL_VERSAO_3 as ESQUEMA_ATUAL
from pysped.nfe.leiaute import nfe_200
import os
DIRNAME = os.path.dirname(__file__)
class Exporta(XMLNFe):
def __init__(self):
super(Exporta, self).__init__()
self.UFSaidaPais = TagCaracter(nome='UFSaidaPais', codigo='ZA02', tamanho=[2, 2], raiz='//NFe/infNFe/exporta', obrigatorio=False)
self.xLocExporta = TagCaracter(nome='xLocExporta', codigo='ZA03', tamanho=[1, 60], raiz='//NFe/infNFe/exporta', obrigatorio=False)
self.xLocDespacho = TagCaracter(nome='xLocDespacho', codigo='ZA04', tamanho=[1, 60], raiz='//NFe/infNFe/exporta', obrigatorio=False)
def get_xml(self):
if not (self.UFSaidaPais.valor or self.xLocExporta.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<exporta>'
xml += self.UFSaidaPais.xml
xml += self.xLocExporta.xml
xml += self.xLocDespacho.xml
xml += '</exporta>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.UFSaidaPais.xml = arquivo
self.xLocExporta.xml = arquivo
self.xLocDespacho.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
if not (self.UFSaidaPais.valor or self.xLocExporta.valor):
return ''
txt = 'ZA|'
txt += self.UFSaidaPais.txt + '|'
txt += self.xLocExporta.txt + '|'
txt += self.xLocDespacho.txt + '|'
txt += '\n'
return txt
txt = property(get_txt)
class InfNFe(nfe_200.InfNFe):
def __init__(self):
super(InfNFe, self).__init__()
self.exporta = Exporta()
class Deduc(nfe_200.Deduc):
def __init__(self):
super(Deduc, self).__init__()
class ForDia(nfe_200.ForDia):
def __init__(self):
super(ForDia, self).__init__()
class Cana(nfe_200.Cana):
def __init__(self):
super(Cana, self).__init__()
class IPIDevol(XMLNFe):
def __init__(self):
super(IPIDevol, self).__init__()
self.vIPIDevol = TagDecimal(nome='vIPIDevol', codigo='I50', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/impostoDevol/IPI')
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.vIPIDevol.valor:
xml += '<IPI>'
xml += self.vIPIDevol.xml
xml += '</IPI>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vIPIDevol.xml = arquivo
xml = property(get_xml, set_xml)
class ImpostoDevol(XMLNFe):
def __init__(self):
super(ImpostoDevol, self).__init__()
self.pDevol = TagDecimal(nome='pDevol', codigo='I50', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='//det/impostoDevol')
self.IPI = IPIDevol()
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.pDevol.valor:
xml += '<impostoDevol>'
xml += self.pDevol.xml
xml += self.IPI.xml
xml += '</impostoDevol>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.pDevol.xml = arquivo
xml = property(get_xml, set_xml)
class ISSQN(nfe_200.ISSQN):
def __init__(self):
super(ISSQN, self).__init__()
self.vAliq = TagDecimal(nome='vAliq' , codigo='U03', tamanho=[1, 5, 1], decimais=[0, 4, 4], raiz='//det/imposto/ISSQN')
self.cListServ = TagCaracter(nome='cListServ', codigo='U06', tamanho=[5, 5], raiz='//det/imposto/ISSQN')
#
# Campos novos da versão 3.10
#
self.vDeducao = TagDecimal(nome='vDeducao', codigo='U07', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.vOutro = TagDecimal(nome='vOutro', codigo='U08', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.vDescIncond = TagDecimal(nome='vDescIncond', codigo='U09', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.vDescCond = TagDecimal(nome='vDescCond', codigo='U10', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.vISSRet = TagDecimal(nome='vISSRet', codigo='U11', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.indISS = TagCaracter(nome='indISS', codigo='U12', tamanho=[1, 2], raiz='//det/imposto/ISSQN')
self.cServico = TagCaracter(nome='cServico', codigo='U13', tamanho=[1, 20], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.cMun = TagInteiro(nome='cMun' , codigo='U14', tamanho=[7, 7, 7], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.cPais = TagInteiro(nome='cPais' , codigo='U15', tamanho=[4, 4, 4], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.nProcesso = TagCaracter(nome='nProcesso', codigo='U16', tamanho=[1, 30], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.indIncentivo = TagCaracter(nome='indIncentivo', codigo='U17', tamanho=[1, 1], raiz='//det/imposto/ISSQN', valor='2')
def get_xml(self):
if not (self.indISS.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<ISSQN>'
xml += self.vBC.xml
xml += self.vAliq.xml
xml += self.vISSQN.xml
xml += self.cMunFG.xml
xml += self.cListServ.xml
xml += self.vDeducao.xml
xml += self.vOutro.xml
xml += self.vDescIncond.xml
xml += self.vDescCond.xml
xml += self.vISSRet.xml
xml += self.indISS.xml
xml += self.cServico.xml
xml += self.cMun.xml
xml += self.cPais.xml
xml += self.nProcesso.xml
xml += self.indIncentivo.xml
xml += '</ISSQN>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vAliq.xml = arquivo
self.vISSQN.xml = arquivo
self.cMunFG.xml = arquivo
self.cListServ.xml = arquivo
self.vDeducao.xml = arquivo
self.vOutro.xml = arquivo
self.vDescIncond.xml = arquivo
self.vDescCond.xml = arquivo
self.vISSRet.xml = arquivo
self.indISS.xml = arquivo
self.cServico.xml = arquivo
self.cMun.xml = arquivo
self.cPais.xml = arquivo
self.nProcesso.xml = arquivo
self.indIncentivo.xml = arquivo
xml = property(get_xml, set_xml)
class COFINSST(nfe_200.COFINSST):
def __init__(self):
super(COFINSST, self).__init__()
class TagCSTCOFINS(nfe_200.TagCSTCOFINS):
def __init__(self, *args, **kwargs):
super(TagCSTCOFINS, self).__init__(*args, **kwargs)
class COFINS(nfe_200.COFINS):
def __init__(self):
super(COFINS, self).__init__()
self.pCOFINS = TagDecimal(nome='pCOFINS' , codigo='S08', tamanho=[1, 5, 1], decimais=[0, 4, 4], raiz='')
def get_xml(self):
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<COFINS>'
xml += '<' + self.nome_tag + '>'
xml += self.CST.xml
if self.CST.valor in ('01', '02'):
xml += self.vBC.xml
xml += self.pCOFINS.xml
xml += self.vCOFINS.xml
elif self.CST.valor == '03':
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
xml += self.vCOFINS.xml
elif self.CST.valor in ('04', '06', '07', '08', '09'):
pass
else:
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pCOFINS.xml
xml += self.vCOFINS.xml
xml += '</' + self.nome_tag + '></COFINS>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o COFINS, primeiro temos que descobrir em
# qual grupo de situação tributária ele está
#
if self._le_noh('//det/imposto/COFINS/COFINSAliq') is not None:
self.CST.valor = '01'
elif self._le_noh('//det/imposto/COFINS/COFINSQtde') is not None:
self.CST.valor = '03'
elif self._le_noh('//det/imposto/COFINS/COFINSNT') is not None:
self.CST.valor = '04'
else:
self.CST.valor = '99'
#
# Agora podemos ler os valores tranquilamente...
#
self.CST.xml = arquivo
self.vBC.xml = arquivo
self.pCOFINS.xml = arquivo
self.vCOFINS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
xml = property(get_xml, set_xml)
class PISST(nfe_200.PISST):
def __init__(self):
super(PISST, self).__init__()
class TagCSTPIS(nfe_200.TagCSTPIS):
def __init__(self, *args, **kwargs):
super(TagCSTPIS, self).__init__(*args, **kwargs)
class PIS(nfe_200.PIS):
def __init__(self):
super(PIS, self).__init__()
self.pPIS = TagDecimal(nome='pPIS' , codigo='Q08', tamanho=[1, 5, 1], decimais=[0, 4, 4], raiz='')
def get_xml(self):
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<PIS>'
xml += '<' + self.nome_tag + '>'
xml += self.CST.xml
if self.CST.valor in ('01', '02'):
xml += self.vBC.xml
xml += self.pPIS.xml
xml += self.vPIS.xml
elif self.CST.valor == '03':
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
xml += self.vPIS.xml
elif self.CST.valor in ('04', '06', '07', '08', '09'):
pass
else:
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pPIS.xml
xml += self.vPIS.xml
xml += '</' + self.nome_tag + '></PIS>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o PIS, primeiro temos que descobrir em
# qual grupo de situação tributária ele está
#
if self._le_noh('//det/imposto/PIS/PISAliq') is not None:
self.CST.valor = '01'
elif self._le_noh('//det/imposto/PIS/PISQtde') is not None:
self.CST.valor = '03'
elif self._le_noh('//det/imposto/PIS/PISNT') is not None:
self.CST.valor = '04'
else:
self.CST.valor = '99'
#
# Agora podemos ler os valores tranquilamente...
#
self.CST.xml = arquivo
self.vBC.xml = arquivo
self.pPIS.xml = arquivo
self.vPIS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
xml = property(get_xml, set_xml)
class II(nfe_200.II):
def __init__(self):
super(II, self).__init__()
class TagCSTIPI(nfe_200.TagCSTIPI):
def __init__(self, *args, **kwargs):
super(TagCSTIPI, self).__init__(*args, **kwargs)
class IPI(nfe_200.IPI):
def __init__(self):
super(IPI, self).__init__()
self.pIPI = TagDecimal(nome='pIPI', codigo='O13', tamanho=[1, 5, 1], decimais=[0, 4, 4], raiz='')
def get_xml(self):
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<IPI>'
xml += self.clEnq.xml
xml += self.CNPJProd.xml
xml += self.cSelo.xml
xml += self.qSelo.xml
xml += self.cEnq.xml
xml += '<' + self.nome_tag + '>'
xml += self.CST.xml
if self.CST.valor in ('00', '49', '50', '99'):
if self.qUnid.valor or self.vUnid.valor:
xml += self.qUnid.xml
xml += self.vUnid.xml
else:
xml += self.vBC.xml
xml += self.pIPI.xml
xml += self.vIPI.xml
xml += '</' + self.nome_tag + '></IPI>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o IPI, primeiro temos que descobrir em
# qual grupo de situação tributária ele está
#
if self._le_noh('//det/imposto/IPI/IPINT') is not None:
self.CST.valor = '01'
else:
self.CST.valor = '00'
#
# Agora podemos ler os valores tranquilamente...
#
self.CST.xml = arquivo
self.clEnq.xml = arquivo
self.CNPJProd.xml = arquivo
self.cSelo.xml = arquivo
self.qSelo.xml = arquivo
self.cEnq.xml = arquivo
self.vBC.xml = arquivo
self.qUnid.xml = arquivo
self.vUnid.xml = arquivo
self.pIPI.xml = arquivo
self.vIPI.xml = arquivo
xml = property(get_xml, set_xml)
class TagCSOSN(nfe_200.TagCSOSN):
def __init__(self, *args, **kwargs):
super(TagCSOSN, self).__init__(*args, **kwargs)
class TagCSTICMS(nfe_200.TagCSTICMS):
def __init__(self, *args, **kwargs):
super(TagCSTICMS, self).__init__(*args, **kwargs)
self.nome = 'CST'
self.codigo = 'N12'
self.tamanho = [2, 2]
self.raiz = ''
self.grupo_icms = None
def set_valor(self, novo_valor):
super(TagCSTICMS, self).set_valor(novo_valor)
if not self.grupo_icms:
return None
#
# Definimos todas as tags como não obrigatórias
#
self.grupo_icms.vICMSDeson.obrigatorio = False
self.grupo_icms.vICMSOp.obrigatorio = False
self.grupo_icms.pDif.obrigatorio = False
self.grupo_icms.vICMSDif.obrigatorio = False
#
# Por segurança, zeramos os valores das tags do
# grupo ICMS ao redefinirmos o código da situação
# tributária
#
self.grupo_icms.vICMSDeson.valor = '0.00'
self.grupo_icms.vICMSOp.valor = '0.00'
self.grupo_icms.pDif.valor = '0.00'
self.grupo_icms.vICMSDif.valor = '0.00'
#
# Redefine a raiz para todas as tags do grupo ICMS
#
self.grupo_icms.vICMSDeson.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vICMSOp.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pDif.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vICMSDif.raiz = self.grupo_icms.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class ICMS(nfe_200.ICMS):
def __init__(self):
super(ICMS, self).__init__()
self.pRedBC = TagDecimal(nome='pRedBC' , codigo='N14', tamanho=[1, 5, 1], decimais=[0, 4, 4], raiz='')
self.pICMS = TagDecimal(nome='pICMS' , codigo='N16', tamanho=[1, 5, 1], decimais=[0, 4, 4], raiz='')
self.pMVAST = TagDecimal(nome='pMVAST' , codigo='N19', tamanho=[1, 5, 1], decimais=[0, 4, 4], raiz='')
self.pRedBCST = TagDecimal(nome='pRedBCST', codigo='N20', tamanho=[1, 5, 1], decimais=[0, 4, 4], raiz='')
self.pICMSST = TagDecimal(nome='pICMSST' , codigo='N22', tamanho=[1, 5, 1], decimais=[0, 4, 4], raiz='')
self.pCredSN = TagDecimal(nome='pCredSN' , codigo='N29', tamanho=[1, 15, 1], decimais=[0, 4, 4], raiz='')
#
# Novos campos para o ICMS desonerado
#
self.vICMSDeson = TagDecimal(nome='vICMSDeson', codigo='N27a', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='')
self.vICMSOp = TagDecimal(nome='vICMSOp', codigo='P16a', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='', obrigatorio=False)
self.pDif = TagDecimal(nome='pDif', codigo='P16b', tamanho=[1, 7, 1], decimais=[0, 2, 4], raiz='', obrigatorio=False)
self.vICMSDif = TagDecimal(nome='vICMSDif', codigo='P16b', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='', obrigatorio=False)
#
# Situação tributária do Simples Nacional
#
self.CSOSN = TagCSOSN()
self.CSOSN.grupo_icms = self
self.CSOSN.valor = '400'
#
# Situação tributária tradicional
#
self.CST = TagCSTICMS()
self.CST.grupo_icms = self
self.CST.valor = '41'
def get_xml(self):
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<ICMS><' + self.nome_tag + '>'
xml += self.orig.xml
#
# Se for regime tradicional (não Simples Nacional)
#
if self.regime_tributario != 1:
xml += self.CST.xml
if self.CST.valor == '00':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
elif self.CST.valor == '10':
if not self.partilha:
xml += self.modBC.xml
xml += self.vBC.xml
#xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
else:
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
xml += self.pBCOp.xml
xml += self.UFST.xml
elif self.CST.valor == '20':
xml += self.modBC.xml
xml += self.pRedBC.xml
xml += self.vBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.vICMSDeson.xml
xml += self.motDesICMS.xml
elif self.CST.valor == '30':
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
xml += self.vICMSDeson.xml
xml += self.motDesICMS.xml
elif self.CST.valor in ('40', '41', '50'):
xml += self.vICMSDeson.xml
xml += self.motDesICMS.xml
elif self.CST.valor == '51':
xml += self.modBC.xml
xml += self.pRedBC.xml
xml += self.vBC.xml
xml += self.pICMS.xml
xml += self.vICMSOp.xml
xml += self.pDif.xml
xml += self.vICMSDif.xml
xml += self.vICMS.xml
elif self.CST.valor == '60':
xml += self.vBCSTRet.xml
xml += self.vICMSSTRet.xml
elif self.CST.valor == '70':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
xml += self.vICMSDeson.xml
xml += self.motDesICMS.xml
elif self.CST.valor == '90':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
if self.partilha:
xml += self.pBCOp.xml
xml += self.UFST.xml
#
# O regime tributário é o Simples Nacional
#
else:
xml += self.CSOSN.xml
if self.CSOSN.valor == '101':
xml += self.pCredSN.xml
xml += self.vCredICMSSN.xml
elif self.CSOSN.valor in ('102', '103', '300', '400'):
pass
elif self.CSOSN.valor == '201':
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
xml += self.pCredSN.xml
xml += self.vCredICMSSN.xml
elif self.CSOSN.valor in ('202', '203'):
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
elif self.CSOSN.valor == '500':
xml += self.vBCSTRet.xml
xml += self.vICMSSTRet.xml
elif self.CSOSN.valor == '900':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
xml += self.pCredSN.xml
xml += self.vCredICMSSN.xml
xml += '</' + self.nome_tag + '></ICMS>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o ICMS, primeiro temos que descobrir em
# qual grupo de situação tributária ele está
#
self.partilha = False
self.repasse = False
if self._le_noh('//det/imposto/ICMS/ICMS00') is not None:
self.regime_tributario = 3
self.CST.valor = '00'
elif self._le_noh('//det/imposto/ICMS/ICMS10') is not None:
self.regime_tributario = 3
self.CST.valor = '10'
elif self._le_noh('//det/imposto/ICMS/ICMS20') is not None:
self.regime_tributario = 3
self.CST.valor = '20'
elif self._le_noh('//det/imposto/ICMS/ICMS30') is not None:
self.regime_tributario = 3
self.CST.valor = '30'
elif self._le_noh('//det/imposto/ICMS/ICMS40') is not None:
self.regime_tributario = 3
self.CST.valor = '40'
elif self._le_noh('//det/imposto/ICMS/ICMS51') is not None:
self.regime_tributario = 3
self.CST.valor = '51'
elif self._le_noh('//det/imposto/ICMS/ICMS60') is not None:
self.regime_tributario = 3
self.CST.valor = '60'
elif self._le_noh('//det/imposto/ICMS/ICMS70') is not None:
self.regime_tributario = 3
self.CST.valor = '70'
elif self._le_noh('//det/imposto/ICMS/ICMS90') is not None:
self.regime_tributario = 3
self.CST.valor = '90'
elif self._le_noh('//det/imposto/ICMS/ICMSPart') is not None:
self.regime_tributario = 3
self.partilha = True
self.CST.valor = '10'
elif self._le_noh('//det/imposto/ICMS/ICMSST') is not None:
self.regime_tributario = 3
self.repasse = True
self.CST.valor = '41'
elif self._le_noh('//det/imposto/ICMS/ICMSSN101') is not None:
self.regime_tributario = 1
self.CSOSN.valor = '101'
elif self._le_noh('//det/imposto/ICMS/ICMSSN102') is not None:
self.regime_tributario = 1
self.CSOSN.valor = '102'
elif self._le_noh('//det/imposto/ICMS/ICMSSN201') is not None:
self.regime_tributario = 1
self.CSOSN.valor = '201'
elif self._le_noh('//det/imposto/ICMS/ICMSSN202') is not None:
self.regime_tributario = 1
self.CSOSN.valor = '202'
elif self._le_noh('//det/imposto/ICMS/ICMSSN500') is not None:
self.regime_tributario = 1
self.CSOSN.valor = '500'
elif self._le_noh('//det/imposto/ICMS/ICMSSN900') is not None:
self.regime_tributario = 1
self.CSOSN.valor = '900'
#
# Agora podemos ler os valores tranquilamente...
#
self.orig.xml = arquivo
if self.regime_tributario == 1:
self.CSOSN.xml = arquivo
else:
self.CST.xml = arquivo
self.modBC.xml = arquivo
self.vBC.xml = arquivo
self.pRedBC.xml = arquivo
self.pICMS.xml = arquivo
self.vICMS.xml = arquivo
self.modBCST.xml = arquivo
self.pMVAST.xml = arquivo
self.pRedBCST.xml = arquivo
self.vBCST.xml = arquivo
self.pICMSST.xml = arquivo
self.vICMSST.xml = arquivo
self.vBCSTRet.xml = arquivo
self.vICMSSTRet.xml = arquivo
self.vICMSDeson.xml = arquivo
self.vICMSOp.xml = arquivo
self.pDif.xml = arquivo
self.vICMSDif.xml = arquivo
if self.regime_tributario == 1:
self.pCredSN.xml = arquivo
self.vCredICMSSN.xml = arquivo
else:
self.UFST.xml = arquivo
self.pBCOp.xml = arquivo
self.motDesICMS.xml = arquivo
self.vBCSTDest.xml = arquivo
self.vICMSSTDest.xml = arquivo
xml = property(get_xml, set_xml)
class ICMSUFDest(XMLNFe):
def __init__(self):
super(ICMSUFDest, self).__init__()
self.vBCUFDest = TagDecimal(nome='vBCUFDest', codigo='AI01', tamanho=[1, 13, 1], decimais=[2, 4, 2], raiz='//det/imposto/ICMSUFDest')
self.pFCPUFDest = TagDecimal(nome='pFCPUFDest', codigo='AI02', tamanho=[1, 3, 1], decimais=[2, 4, 2], raiz='//det/imposto/ICMSUFDest')
self.pICMSUFDest = TagDecimal(nome='pICMSUFDest', codigo='AI03', tamanho=[1, 3, 1], decimais=[2, 4, 2], raiz='//det/imposto/ICMSUFDest')
self.pICMSInter= TagDecimal(nome='pICMSInter', codigo='AI04', tamanho=[1, 3, 1], decimais=[2, 4, 2], raiz='//det/imposto/ICMSUFDest')
self.pICMSInterPart = TagDecimal(nome='pICMSInterPart', codigo='AI05', tamanho=[1, 3, 1], decimais=[2, 4, 2], raiz='//det/imposto/ICMSUFDest')
self.vFCPUFDest = TagDecimal(nome='vFCPUFDest', codigo='AI06', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//det/imposto/ICMSUFDest')
self.vICMSUFDest = TagDecimal(nome='vICMSUFDest', codigo='AI07', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//det/imposto/ICMSUFDest')
self.vICMSUFRemet = TagDecimal(nome='vICMSUFRemet', codigo='AI08', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//det/imposto/ICMSUFDest')
def get_xml(self):
xml = XMLNFe.get_xml(self)
if not self.pICMSInter.valor:
return ''
xml += '<ICMSUFDest>'
xml += self.vBCUFDest.xml
xml += self.pFCPUFDest.xml
xml += self.pICMSUFDest.xml
xml += self.pICMSInter.xml
xml += self.pICMSInterPart.xml
xml += self.vFCPUFDest.xml
xml += self.vICMSUFDest.xml
xml += self.vICMSUFRemet.xml
xml += '</ICMSUFDest>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBCUFDest.xml = arquivo
self.pFCPUFDest.xml = arquivo
self.pICMSUFDest.xml = arquivo
self.pICMSInter.xml = arquivo
self.pICMSInterPart.xml = arquivo
self.vFCPUFDest.xml = arquivo
self.vICMSUFDest.xml = arquivo
self.vICMSUFRemet.xml = arquivo
xml = property(get_xml, set_xml)
class Imposto(nfe_200.Imposto):
def __init__(self):
super(Imposto, self).__init__()
self.ICMS = ICMS()
self.IPI = IPI()
self.PIS = PIS()
self.COFINS = COFINS()
self.ISSQN = ISSQN()
self.ICMSUFDest = ICMSUFDest()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<imposto>'
xml += self.vTotTrib.xml
# Enviar ICMS, IPI e II somente quando não for serviço
if not self.ISSQN.vAliq.valor:
xml += self.ICMS.xml
xml += self.IPI.xml
xml += self.II.xml
else:
xml += self.ISSQN.xml
xml += self.PIS.xml
xml += self.PISST.xml
xml += self.COFINS.xml
xml += self.COFINSST.xml
xml += self.ICMSUFDest.xml
xml += '</imposto>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vTotTrib.xml = arquivo
self.ICMS.xml = arquivo
self.IPI.xml = arquivo
self.II.xml = arquivo
self.PIS.xml = arquivo
self.PISST.xml = arquivo
self.COFINS.xml = arquivo
self.COFINSST.xml = arquivo
self.ISSQN.xml = arquivo
self.ICMSUFDest.xml = arquivo
xml = property(get_xml, set_xml)
class CIDE(nfe_200.CIDE):
def __init__(self):
super(CIDE, self).__init__()
class Comb(nfe_200.Comb):
def __init__(self):
super(Comb, self).__init__()
self.pMixGN = TagDecimal(nome='pMixGN', codigo='LA03', tamanho=[1, 2, 1], decimais=[0, 4, 4], raiz='//det/prod/comb', obrigatorio=False)
def get_xml(self):
if not self.cProdANP.valor:
return ''
xml = XMLNFe.get_xml(self)
xml += '<comb>'
xml += self.cProdANP.xml
xml += self.pMixGN.xml
xml += self.CODIF.xml
xml += self.qTemp.xml
xml += self.CIDE.xml
xml += '</comb>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cProdANP.xml = arquivo
self.pMixGN.xml = arquivo
self.CODIF.xml = arquivo
self.qTemp.xml = arquivo
self.CIDE.xml = arquivo
xml = property(get_xml, set_xml)
class Arma(nfe_200.Arma):
def __init__(self):
super(Arma, self).__init__()
class Med(nfe_200.Med):
def __init__(self):
super(Med, self).__init__()
class VeicProd(nfe_200.VeicProd):
def __init__(self):
super(VeicProd, self).__init__()
class ExportInd(XMLNFe):
def __init__(self):
super(ExportInd, self).__init__()
self.nRE = TagInteiro(nome='nRE', codigo='I53', tamanho=[1, 12], raiz='//detExport/exportInd', obrigatorio=False)
self.chNFe = TagCaracter(nome='chNFe', codigo='I54', tamanho=[44, 44], raiz='//detExport/exportInd', obrigatorio=False)
self.qExport = TagDecimal(nome='qExport', codigo='I55', tamanho=[1, 12, 1], decimais=[0, 2, 4], raiz='//detExport/exportInd', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.nRE.valor or self.chNFe.valor or self.qExport.valor:
xml += '<exportInd>'
xml += self.nRE.xml
xml += self.chNFe.xml
xml += self.qExport.xml
xml += '</exportInd>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nRE.xml = arquivo
self.chNFe.xml = arquivo
self.qExport.xml = arquivo
xml = property(get_xml, set_xml)
class DetExport(XMLNFe):
def __init__(self):
super(DetExport, self).__init__()
self.nDraw = TagInteiro(nome='nDraw', codigo='I50', tamanho=[1, 11], raiz='//detExport', obrigatorio=False)
self.exportInd = ExportInd()
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.nDraw.valor or self.exportInd.xml:
xml += '<detExport>'
xml += self.nDraw.xml
xml += self.exportInd.xml
xml += '</detExport>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nDraw.xml = arquivo
self.exportInd.xml = arquivo
xml = property(get_xml, set_xml)
class Adi(nfe_200.Adi):
def __init__(self):
super(Adi, self).__init__()
class DI(nfe_200.DI):
def __init__(self):
super(DI, self).__init__()
self.tpViaTransp = TagCaracter(nome='tpViaTransp', codigo='I23a', tamanho=[1, 1], raiz='//DI')
self.vAFRMM = TagDecimal(nome='vAFRMM' , codigo='I23b', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//DI', obrigatorio=False)
self.tpIntermedio = TagCaracter(nome='tpIntermedio', codigo='I23c', tamanho=[1, 1], raiz='//DI')
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='I23d', tamanho=[14, 14], raiz='//DI', obrigatorio=False)
self.UFTerceiro = TagCaracter(nome='UFTerceiro', codigo='I23e', tamanho=[2, 2], raiz='//DI', obrigatorio=False)
def get_xml(self):
if not self.nDI:
return ''
xml = XMLNFe.get_xml(self)
xml += '<DI>'
xml += self.nDI.xml
xml += self.dDI.xml
xml += self.xLocDesemb.xml
xml += self.UFDesemb.xml
xml += self.dDesemb.xml
xml += self.tpViaTransp.xml
xml += self.vAFRMM.xml
xml += self.tpIntermedio.xml
xml += self.CNPJ.xml
xml += self.UFTerceiro.xml
xml += self.cExportador.xml
for a in self.adi:
xml += a.xml
xml += '</DI>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nDI.xml = arquivo
self.dDI.xml = arquivo
self.xLocDesemb.xml = arquivo
self.UFDesemb.xml = arquivo
self.dDesemb.xml = arquivo
self.tpViaTransp.xml = arquivo
self.vAFRMM.xml = arquivo
self.tpIntermedio.xml = arquivo
self.CNPJ.xml = arquivo
self.UFTerceiro.xml = arquivo
self.cExportador.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
adis = self._le_nohs('//DI/adi')
self.adi = []
if adis is not None:
self.adi = [Adi() for a in adis]
for i in range(len(adis)):
self.adi[i].xml = adis[i]
xml = property(get_xml, set_xml)
class Prod(nfe_200.Prod):
def __init__(self):
super(Prod, self).__init__()
#self.NCM = TagCaracter(nome='NCM' , codigo='I05' , tamanho=[2, 8] , raiz='//det/prod')
#self.qCom = TagDecimal(nome='qCom' , codigo='I10' , tamanho=[1, 15, 1], decimais=[0, 4, 4], raiz='//det/prod')
#self.vUnCom = TagDecimal(nome='vUnCom' , codigo='I10a', tamanho=[1, 21, 1], decimais=[0, 10, 4], raiz='//det/prod')
#self.qTrib = TagDecimal(nome='qTrib' , codigo='I14' , tamanho=[1, 15, 1], decimais=[0, 4, 4], raiz='//det/prod')
#self.vUnTrib = TagDecimal(nome='vUnTrib' , codigo='I14a', tamanho=[1, 21, 1], decimais=[0, 10, 4], raiz='//det/prod')
#self.vOutro = TagDecimal(nome='vOutro' , codigo='I17a', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/prod', obrigatorio=False)
#self.indTot = TagInteiro(nome='indTot' , codigo='I17b', tamanho=[1, 1, 1], raiz='//det/prod', valor=1)
#self.xPed = TagCaracter(nome='xPed' , codigo='I30' , tamanho=[1, 15], raiz='//det/prod', obrigatorio=False)
#self.nItemPed = TagCaracter(nome='nItemPed', codigo='I31' , tamanho=[1, 6], raiz='//det/prod', obrigatorio=False)
#self.nFCI = TagCaracter(nome='nFCI' , codigo='I70' , tamanho=[36, 36, 36], raiz='//det/prod', obrigatorio=False)
self.NVE = TagCaracter(nome='NVE', codigo='I05', tamanho=[0, 8], raiz='//det/prod', obrigatorio=False)
self.CEST = TagCaracter(nome='CEST', codigo='I05c', tamanho=[0, 7], raiz='//det/prod', obrigatorio=False)
self.detExport = DetExport()
self.veicProd = VeicProd()
self.comb = Comb()
self.nRECOPI = TagCaracter(nome='nRECOPI', codigo='LB01', tamanho=[20, 20, 20], raiz='//det/prod', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<prod>'
xml += self.cProd.xml
xml += self.cEAN.xml
xml += self.xProd.xml
xml += self.NCM.xml
xml += self.NVE.xml
xml += self.CEST.xml
xml += self.EXTIPI.xml
#xml += self.genero.xml
xml += self.CFOP.xml
xml += self.uCom.xml
xml += self.qCom.xml
xml += self.vUnCom.xml
xml += self.vProd.xml
xml += self.cEANTrib.xml
xml += self.uTrib.xml
xml += self.qTrib.xml
xml += self.vUnTrib.xml
xml += self.vFrete.xml
xml += self.vSeg.xml
xml += self.vDesc.xml
xml += self.vOutro.xml
xml += self.indTot.xml
for d in self.DI:
xml += d.xml
xml += self.detExport.xml
xml += self.xPed.xml
xml += self.nItemPed.xml
xml += self.nFCI.xml
xml += self.veicProd.xml
for m in self.med:
xml += m.xml
for a in self.arma:
xml += a.xml
xml += self.comb.xml
xml += self.nRECOPI.xml
xml += '</prod>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cProd.xml = arquivo
self.cEAN.xml = arquivo
self.xProd.xml = arquivo
self.NCM.xml = arquivo
self.NVE.xml = arquivo
self.CEST.xml = arquivo
self.EXTIPI.xml = arquivo
#self.genero.xml = arquivo
self.CFOP.xml = arquivo
self.uCom.xml = arquivo
self.qCom.xml = arquivo
self.vUnCom.xml = arquivo
self.vProd.xml = arquivo
self.cEANTrib.xml = arquivo
self.uTrib.xml = arquivo
self.qTrib.xml = arquivo
self.vUnTrib.xml = arquivo
self.vFrete.xml = arquivo
self.vSeg.xml = arquivo
self.vDesc.xml = arquivo
self.vOutro.xml = arquivo
self.indTot.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.DI = self.le_grupo('//det/prod/DI', DI)
self.detExport.xml = arquivo
self.xPed.xml = arquivo
self.nItemPed.xml = arquivo
self.nFCI.xml = arquivo
self.veicProd.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.med = self.le_grupo('//det/prod/med', Med)
self.arma = self.le_grupo('//det/prod/arma', Arma)
self.comb.xml = arquivo
self.nRECOPI.xml = arquivo
xml = property(get_xml, set_xml)
class Det(nfe_200.Det):
def __init__(self):
super(Det, self).__init__()
self.prod = Prod()
self.imposto = Imposto()
self.impostoDevol = ImpostoDevol()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.nItem.xml
xml += self.prod.xml
xml += self.imposto.xml
xml += self.impostoDevol.xml
xml += self.infAdProd.xml
xml += '</det>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nItem.xml = arquivo
self.prod.xml = arquivo
self.imposto.xml = arquivo
self.impostoDevol.xml = arquivo
self.infAdProd.xml = arquivo
xml = property(get_xml, set_xml)
class Compra(nfe_200.Compra):
def __init__(self):
super(Compra, self).__init__()
class ProcRef(nfe_200.ProcRef):
def __init__(self):
super(ProcRef, self).__init__()
class ObsFisco(nfe_200.ObsFisco):
def __init__(self):
super(ObsFisco, self).__init__()
class ObsCont(nfe_200.ObsCont):
def __init__(self):
super(ObsCont, self).__init__()
class InfAdic(nfe_200.InfAdic):
def __init__(self):
super(InfAdic, self).__init__()
class Card(XMLNFe):
def __init__(self):
super(Card, self).__init__()
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='XA05', tamanho=[14, 14], raiz='//pag/card')
self.tBand = TagCaracter(nome='tBand', codigo='YA01', tamanho=[ 2, 2], raiz='//pag/card')
self.cAut = TagCaracter(nome='cAut' , codigo='YA01', tamanho=[20, 20], raiz='//pag/card')
def get_xml(self):
if not (self.CNPJ.valor or self.tBand.valor or self.cAut.valor):
return ''
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<card>'
xml += self.CNPJ.xml
xml += self.tBand.xml
xml += self.cAut.xml
xml += '</card>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.tBand.xml = arquivo
self.cAut.xml = arquivo
xml = property(get_xml, set_xml)
class Pag(XMLNFe):
def __init__(self):
super(Pag, self).__init__()
self.tPag = TagCaracter(nome='tPag', codigo='YA01', tamanho=[2, 2, 2], raiz='//pag')
self.vPag = TagDecimal(nome='vPag' , codigo='YA02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//pag')
self.card = Card()
def get_xml(self):
if not (self.tPag.valor or self.vPag.valor or self.card.xml):
return ''
#
# Define as tags baseado no código da situação tributária
#
xml = XMLNFe.get_xml(self)
xml += '<pag>'
xml += self.tPag.xml
xml += self.vPag.xml
xml += self.card.xml
xml += '</pag>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.tPag.xml = arquivo
self.vPag.xml = arquivo
self.cad.xml = arquivo
xml = property(get_xml, set_xml)
class Dup(nfe_200.Dup):
def __init__(self):
super(Dup, self).__init__()
class Fat(nfe_200.Fat):
def __init__(self):
super(Fat, self).__init__()
class Cobr(nfe_200.Cobr):
def __init__(self):
super(Cobr, self).__init__()
class Lacres(nfe_200.Lacres):
def __init__(self):
super(Lacres, self).__init__()
class Vol(nfe_200.Vol):
def __init__(self, xml=None):
super(Vol, self).__init__()
class Reboque(nfe_200.Reboque):
def __init__(self):
super(Reboque, self).__init__()
class VeicTransp(nfe_200.VeicTransp):
def __init__(self):
super(VeicTransp, self).__init__()
class RetTransp(nfe_200.RetTransp):
def __init__(self):
super(RetTransp, self).__init__()
self.pICMSRet = TagDecimal(nome='vICMSRet', codigo='X14', tamanho=[1, 15, 1], decimais=[0, 4, 4], raiz='//NFe/infNFe/transp/retTransp')
def get_xml(self):
if not (self.vServ.valor or self.vBCRet.valor or self.pICMSRet.valor or self.vICMSRet.valor or self.CFOP.valor or self.cMunFG.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<retTransp>'
xml += self.vServ.xml
xml += self.vBCRet.xml
xml += self.pICMSRet.xml
xml += self.vICMSRet.xml
xml += self.CFOP.xml
xml += self.cMunFG.xml
xml += '</retTransp>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vServ.xml = arquivo
self.vBCRet.xml = arquivo
self.pICMSRet.xml = arquivo
self.vICMSRet.xml = arquivo
self.CFOP.xml = arquivo
self.cMunFG.xml = arquivo
xml = property(get_xml, set_xml)
class Transporta(nfe_200.Transporta):
def __init__(self):
super(Transporta, self).__init__()
class Transp(nfe_200.Transp):
def __init__(self):
super(Transp, self).__init__()
self.retTransp = RetTransp()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<transp>'
xml += self.modFrete.xml
xml += self.transporta.xml
xml += self.retTransp.xml
xml += self.veicTransp.xml
for r in self.reboque:
xml += r.xml
for v in self.vol:
xml += v.xml
xml += '</transp>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.modFrete.xml = arquivo
self.transporta.xml = arquivo
self.retTransp.xml = arquivo
self.veicTransp.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.reboque = self.le_grupo('//NFe/infNFe/transp/reboque', Reboque)
self.vol = self.le_grupo('//NFe/infNFe/transp/vol', Vol)
xml = property(get_xml, set_xml)
class RetTrib(nfe_200.RetTrib):
def __init__(self):
super(RetTrib, self).__init__()
class ISSQNTot(nfe_200.ISSQNTot):
def __init__(self):
super(ISSQNTot, self).__init__()
self.dCompet = TagData(nome='dCompet' , codigo='W22a' , raiz='//NFe/infNFe/total/ISSQNtot')
self.vDeducao = TagDecimal(nome='vDeducao' , codigo='W22b', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vOutro = TagDecimal(nome='vOutro' , codigo='W22c', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vDescIncond = TagDecimal(nome='vDescIncond', codigo='W22d', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vDescCond = TagDecimal(nome='vDescCond' , codigo='W22e', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vISSRet = TagDecimal(nome='vISSRet' , codigo='W22f', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.cRegTrib = TagCaracter(nome='cRegTrib' , codigo='W22g', tamanho=[0, 1, 1] , raiz='//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
def get_xml(self):
if not (self.vServ.valor or self.vBC.valor or self.vISS.valor or self.vPIS.valor or self.vCOFINS.valor or
self.dCompet.valor or self.vDeducao.valor or self.vOutro.valor or self.vDescIncond.valor or self.vDescCond.valor
or self.vISSRet.valor or self.cRegTrib.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<ISSQNtot>'
xml += self.vServ.xml
xml += self.vBC.xml
xml += self.vISS.xml
xml += self.vPIS.xml
xml += self.vCOFINS.xml
xml += self.dCompet.xml
xml += self.vDeducao.xml
xml += self.vOutro.xml
xml += self.vDescIncond.xml
xml += self.vDescCond.xml
xml += self.vISSRet.xml
xml += self.cRegTrib.xml
xml += '</ISSQNtot>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vServ.xml = arquivo
self.vBC.xml = arquivo
self.vISS.xml = arquivo
self.vPIS.xml = arquivo
self.vCOFINS.xml = arquivo
self.dCompet.xml = arquivo
self.vDeducao.xml = arquivo
self.vOutro.xml = arquivo
self.vDescIncond.xml = arquivo
self.vDescCond.xml = arquivo
self.vISSRet.xml = arquivo
self.cRegTrib.xml = arquivo
xml = property(get_xml, set_xml)
class ICMSTot(nfe_200.ICMSTot):
def __init__(self):
super(ICMSTot, self).__init__()
self.vICMSDeson = TagDecimal(nome='vICMSDeson', codigo='W04a', tamanho=[1, 15, 1], decimais=[1, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vFCPUFDest = TagDecimal(nome='vFCPUFDest', codigo='W04b', tamanho=[1, 15, 1], decimais=[1, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vICMSUFDest = TagDecimal(nome='vICMSUFDest', codigo='W04c', tamanho=[1, 15, 1], decimais=[1, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
self.vICMSUFRemet = TagDecimal(nome='vICMSUFRemet', codigo='W04d', tamanho=[1, 15, 1], decimais=[1, 2, 2], raiz='//NFe/infNFe/total/ICMSTot')
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<ICMSTot>'
xml += self.vBC.xml
xml += self.vICMS.xml
xml += self.vICMSDeson.xml
xml += self.vFCPUFDest.xml
xml += self.vICMSUFDest.xml
xml += self.vICMSUFRemet.xml
xml += self.vBCST.xml
xml += self.vST.xml
xml += self.vProd.xml
xml += self.vFrete.xml
xml += self.vSeg.xml
xml += self.vDesc.xml
xml += self.vII.xml
xml += self.vIPI.xml
xml += self.vPIS.xml
xml += self.vCOFINS.xml
xml += self.vOutro.xml
xml += self.vNF.xml
xml += self.vTotTrib.xml
xml += '</ICMSTot>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vICMS.xml = arquivo
self.vICMSDeson.xml = arquivo
self.vFCPUFDest.xml = arquivo
self.vICMSUFDest.xml = arquivo
self.vICMSUFRemet.xml = arquivo
self.vBCST.xml = arquivo
self.vST.xml = arquivo
self.vProd.xml = arquivo
self.vFrete.xml = arquivo
self.vSeg.xml = arquivo
self.vDesc.xml = arquivo
self.vII.xml = arquivo
self.vIPI.xml = arquivo
self.vPIS.xml = arquivo
self.vCOFINS.xml = arquivo
self.vOutro.xml = arquivo
self.vNF.xml = arquivo
self.vTotTrib.xml = arquivo
xml = property(get_xml, set_xml)
class Total(nfe_200.Total):
def __init__(self):
super(Total, self).__init__()
self.ICMSTot = ICMSTot()
self.ISSQNTot = ISSQNTot()
#self.retTrib = RetTrib()
class AutXML(XMLNFe):
def __init__(self):
super(AutXML, self).__init__()
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='GA02', tamanho=[14, 14], raiz='/', obrigatorio=False)
self.CPF = TagCaracter(nome='CPF' , codigo='GA03', tamanho=[11, 11], raiz='/', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.CNPJ.valor or self.CPF.valor:
xml += '<autXML>'
if self.CNPJ.valor:
xml += self.CNPJ.xml
else:
xml += self.CPF.xml
xml += '</autXML>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
xml = property(get_xml, set_xml)
class Entrega(nfe_200.Entrega):
def __init__(self):
super(Entrega, self).__init__()
class Retirada(nfe_200.Retirada):
def __init__(self):
super(Retirada, self).__init__()
class EnderDest(nfe_200.EnderDest):
def __init__(self):
super(EnderDest, self).__init__()
class Dest(nfe_200.Dest):
def __init__(self):
super(Dest, self).__init__()
self.modelo = '55'
self.enderDest = EnderDest()
self.idEstrangeiro = TagCaracter(nome='idEstrangeiro' , codigo='E03a', tamanho=[0 , 20] , raiz='//NFe/infNFe/dest', obrigatorio=False)
self.indIEDest = TagCaracter(nome='indIEDest', codigo='E16a', tamanho=[1 , 1], raiz='//NFe/infNFe/dest', obrigatorio=True)
self.IE = TagCaracter(nome='IE' , codigo='E17', tamanho=[ 2, 14] , raiz='//NFe/infNFe/dest', obrigatorio=False)
self.IM = TagCaracter(nome='IM', codigo='E18a', tamanho=[ 1, 15] , raiz='//NFe/infNFe/dest', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.modelo == '65' and (not self.CNPJ.valor) and (not self.CPF.valor) and (not self.idEstrangeiro.valor):
return xml
xml += '<dest>'
#
# Força o uso da tag CNPJ quando a nota for em homologação
#
if self.CNPJ.valor == '99999999000191':
xml += self.CNPJ.xml
elif self.CPF.valor:
xml += self.CPF.xml
elif self.CNPJ.valor:
xml += self.CNPJ.xml
elif self.idEstrangeiro.valor:
xml += self.idEstrangeiro.xml
if self.xNome.valor:
xml += self.xNome.xml
xml += self.enderDest.xml
xml += self.indIEDest.xml
if (not self.idEstrangeiro.valor) or (self.indIEDest.valor != '2' and self.IE.valor):
xml += self.IE.xml
xml += self.ISUF.xml
xml += self.IM.xml
xml += self.email.xml
xml += '</dest>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.idEstrangeiro.xml = arquivo
self.xNome.xml = arquivo
self.enderDest.xml = arquivo
self.indIEDest.xml = arquivo
self.IE.xml = arquivo
self.ISUF.xml = arquivo
self.IM.xml = arquivo
self.email.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'E|'
txt += self.xNome.txt + '|'
txt += self.IE.txt + '|'
txt += self.ISUF.txt + '|'
txt += self.email.txt + '|'
txt += '\n'
if self.CPF.valor:
txt += 'E03|' + self.CPF.txt + '|\n'
else:
txt += 'E02|' + self.CNPJ.txt + '|\n'
txt += self.enderDest.txt
return txt
txt = property(get_txt)
class Avulsa(nfe_200.Avulsa):
def __init__(self):
super(Avulsa, self).__init__()
class EnderEmit(nfe_200.EnderEmit):
def __init__(self):
super(EnderEmit, self).__init__()
class Emit(nfe_200.Emit):
def __init__(self):
super(Emit, self).__init__()
self.enderEmit = EnderEmit()
class RefECF(nfe_200.RefECF):
def __init__(self):
super(RefECF, self).__init__()
class RefNFP(nfe_200.RefNFP):
def __init__(self):
super(RefNFP, self).__init__()
class RefNF(nfe_200.RefNF):
def __init__(self):
super(RefNF, self).__init__()
class NFRef(nfe_200.NFRef):
def __init__(self):
super(NFRef, self).__init__()
class Ide(nfe_200.Ide):
def __init__(self):
super(Ide, self).__init__()
self.dhEmi = TagDataHoraUTC(nome='dhEmi' , codigo='B09' , raiz='//NFe/infNFe/ide')
self.dhSaiEnt = TagDataHoraUTC(nome='dhSaiEnt', codigo='B10' , raiz='//NFe/infNFe/ide', obrigatorio=False)
self.dhCont = TagDataHoraUTC(nome='dhCont' , codigo='B28', raiz='//NFe/infNFe/ide', obrigatorio=False)
self.idDest = TagCaracter(nome='idDest' , codigo='B11a', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide', valor='1')
self.indFinal = TagCaracter(nome='indFinal' , codigo='B25a', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide', valor='0')
self.indPres = TagCaracter(nome='indPres' , codigo='B25b', tamanho=[ 1, 1, 1], raiz='//NFe/infNFe/ide', valor='9')
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<ide>'
xml += self.cUF.xml
xml += self.cNF.xml
xml += self.natOp.xml
xml += self.indPag.xml
xml += self.mod.xml
xml += self.serie.xml
xml += self.nNF.xml
xml += self.dhEmi.xml
self.dEmi.valor = self.dhEmi.valor
xml += self.dhSaiEnt.xml
self.dSaiEnt.valor = self.dhSaiEnt.valor
self.hSaiEnt.valor = self.hSaiEnt.valor
xml += self.tpNF.xml
xml += self.idDest.xml
xml += self.cMunFG.xml
xml += self.tpImp.xml
xml += self.tpEmis.xml
xml += self.cDV.xml
xml += self.tpAmb.xml
xml += self.finNFe.xml
xml += self.indFinal.xml
xml += self.indPres.xml
xml += self.procEmi.xml
xml += self.verProc.xml
xml += self.dhCont.xml
xml += self.xJust.xml
for nr in self.NFref:
xml += nr.xml
xml += '</ide>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cUF.xml = arquivo
self.cNF.xml = arquivo
self.natOp.xml = arquivo
self.indPag.xml = arquivo
self.mod.xml = arquivo
self.serie.xml = arquivo
self.nNF.xml = arquivo
self.dEmi.xml = arquivo
self.dhEmi.xml = arquivo
self.dSaiEnt.xml = arquivo
self.dhSaiEnt.xml = arquivo
self.hSaiEnt.xml = arquivo
self.tpNF.xml = arquivo
self.idDest.xml = arquivo
self.cMunFG.xml = arquivo
self.dEmi.valor = self.dhEmi.valor
self.dSaiEnt.valor = self.dhSaiEnt.valor
self.hSaiEnt.valor = self.hSaiEnt.valor
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.NFref = self.le_grupo('//NFe/infNFe/ide/NFref', NFRef)
self.tpImp.xml = arquivo
self.tpEmis.xml = arquivo
self.cDV.xml = arquivo
self.tpAmb.xml = arquivo
self.finNFe.xml = arquivo
self.indFinal.xml = arquivo
self.indPres.xml = arquivo
self.procEmi.xml = arquivo
self.verProc.xml = arquivo
self.dhCont.xml = arquivo
self.xJust.xml = arquivo
xml = property(get_xml, set_xml)
class InfNFe(nfe_200.InfNFe):
def __init__(self):
super(InfNFe, self).__init__()
self.versao = TagDecimal(nome='infNFe' , codigo='A01', propriedade='versao', raiz='//NFe', namespace=NAMESPACE_NFE, valor='3.10')
self.ide = Ide()
self.emit = Emit()
self.avulsa = Avulsa()
self.dest = Dest()
self.dest.modelo = self.ide.mod.valor
self.retirada = Retirada()
self.entrega = Entrega()
self.autXML = []
self.det = []
self.total = Total()
self.transp = Transp()
self.cobr = Cobr()
self.pag = []
self.infAdic = InfAdic()
self.exporta = Exporta()
self.compra = Compra()
self.cana = Cana()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<infNFe versao="' + unicode(self.versao.valor) + '" Id="' + self.Id.valor + '">'
xml += self.ide.xml
xml += self.emit.xml
xml += self.avulsa.xml
xml += self.dest.xml
xml += self.retirada.xml
xml += self.entrega.xml
for a in self.autXML:
xml += a.xml
for d in self.det:
d.imposto.ICMS.regime_tributario = self.emit.CRT.valor
xml += d.xml
xml += self.total.xml
xml += self.transp.xml
xml += self.cobr.xml
if self.ide.mod.valor == '65':
for p in self.pag:
xml += p.xml
xml += self.infAdic.xml
xml += self.exporta.xml
xml += self.compra.xml
xml += self.cana.xml
xml += '</infNFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.Id.xml = arquivo
self.ide.xml = arquivo
self.emit.xml = arquivo
self.avulsa.xml = arquivo
self.dest.xml = arquivo
self.dest.modelo = self.ide.mod.valor
self.retirada.xml = arquivo
self.entrega.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.autXML = self.le_grupo('//NFe/infNFe/autXML', AutXML)
self.det = self.le_grupo('//NFe/infNFe/det', Det)
self.total.xml = arquivo
self.transp.xml = arquivo
self.cobr.xml = arquivo
self.pag = self.le_grupo('//NFe/infNFe/pag', Pag)
self.infAdic.xml = arquivo
self.exporta.xml = arquivo
self.compra.xml = arquivo
self.cana.xml = arquivo
xml = property(get_xml, set_xml)
def get_txt(self):
txt = 'A|'
txt += self.versao.txt + '|'
txt += self.Id.txt + '|'
txt += '\n'
txt += self.ide.txt
txt += self.emit.txt
txt += self.avulsa.txt
txt += self.dest.txt
txt += self.retirada.txt
txt += self.entrega.txt
for d in self.det:
txt += d.txt
txt += self.total.txt
txt += self.transp.txt
txt += self.cobr.txt
txt += self.infAdic.txt
txt += self.exporta.txt
txt += self.compra.txt
#txt += self.cana.txt
return txt
txt = property(get_txt)
class NFe(nfe_200.NFe):
def __init__(self):
super(NFe, self).__init__()
self.infNFe = InfNFe()
self.Signature = Signature()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'nfe_v3.10.xsd'
def monta_chave(self):
chave = unicode(self.infNFe.ide.cUF.valor).strip().rjust(2, '0')
chave += unicode(self.infNFe.ide.dhEmi.valor.strftime('%y%m')).strip().rjust(4, '0')
chave += unicode(self.infNFe.emit.CNPJ.valor).strip().rjust(14, '0')
chave += '55'
chave += unicode(self.infNFe.ide.serie.valor).strip().rjust(3, '0')
chave += unicode(self.infNFe.ide.nNF.valor).strip().rjust(9, '0')
#
# Inclui agora o tipo da emissão
#
chave += unicode(self.infNFe.ide.tpEmis.valor).strip().rjust(1, '0')
chave += unicode(self.infNFe.ide.cNF.valor).strip().rjust(8, '0')
chave += unicode(self.infNFe.ide.cDV.valor).strip().rjust(1, '0')
self.chave = chave
def monta_dados_contingencia_fsda(self):
dados = unicode(self.infNFe.ide.cUF.valor).zfill(2)
dados += unicode(self.infNFe.ide.tpEmis.valor).zfill(1)
dados += unicode(self.infNFe.emit.CNPJ.valor).zfill(14)
dados += unicode(int(self.infNFe.total.ICMSTot.vNF.valor * 100)).zfill(14)
#
# Há ICMS próprio?
#
if self.infNFe.total.ICMSTot.vICMS.valor:
dados += '1'
else:
dados += '2'
#
# Há ICMS ST?
#
if self.infNFe.total.ICMSTot.vST.valor:
dados += '1'
else:
dados += '2'
dados += self.infNFe.ide.dhEmi.valor.strftime('%d').zfill(2)
digito = self._calcula_dv(dados)
dados += unicode(digito)
self.dados_contingencia_fsda = dados
def crt_desconto(self):
return (
self.infNFe.total.ICMSTot.vDesc.valor +
self.infNFe.total.ICMSTot.vICMSDeson.valor
) | kmee/PySPED | pysped/nfe/leiaute/nfe_310.py | Python | lgpl-2.1 | 68,964 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
import account
| 3dfxsoftware/cbss-addons | l10n_ve_commerce/__init__.py | Python | gpl-2.0 | 1,073 |
# -*- coding: utf-8 -*-
"""
djadmin2's permission handling. The permission classes have the same API as
the permission handling classes of the django-rest-framework. That way, we can
reuse them in the admin's REST API.
The permission checks take place in callables that follow the following
interface:
* They get passed in the current ``request``, an instance of the currently
active ``view`` and optionally the object that should be used for
object-level permission checking.
* Return ``True`` if the permission shall be granted, ``False`` otherwise.
The permission classes are then just fancy wrappers of these basic checks of
which it can hold multiple.
"""
from __future__ import division, absolute_import, unicode_literals
import logging
import re
from django.contrib.auth import get_permission_codename
from django.db.utils import DEFAULT_DB_ALIAS
from django.apps import apps
from django.core.exceptions import ValidationError
from django.db import router
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible, force_text
logger = logging.getLogger('djadmin2')
def is_authenticated(request, view, obj=None):
'''
Checks if the current user is authenticated.
'''
return request.user.is_authenticated()
def is_staff(request, view, obj=None):
'''
Checks if the current user is a staff member.
'''
return request.user.is_staff
def is_superuser(request, view, obj=None):
'''
Checks if the current user is a superuser.
'''
return request.user.is_superuser
def model_permission(permission):
'''
This is actually a permission check factory. It means that it will return
a function that can then act as a permission check. The returned callable
will check if the user has the with ``permission`` provided model
permission. You can use ``{app_label}`` and ``{model_name}`` as
placeholders in the permission name. They will be replaced with the
``app_label`` and the ``model_name`` (in lowercase) of the model that the
current view is operating on.
Example:
.. code-block:: python
check_add_perm = model_permission('{app_label}.add_{model_name}')
class ModelAddPermission(permissions.BasePermission):
permissions = [check_add_perm]
'''
def has_permission(request, view, obj=None):
model_class = getattr(view, 'model', None)
queryset = getattr(view, 'queryset', None)
if model_class is None and queryset is not None:
model_class = queryset.model
assert model_class, (
'Cannot apply model permissions on a view that does not '
'have a `.model` or `.queryset` property.')
try:
# django 1.8+
model_name = model_class._meta.model_name
except AttributeError:
model_name = model_class._meta.module_name
permission_name = permission.format(
app_label=model_class._meta.app_label,
model_name=model_name)
return request.user.has_perm(permission_name, obj)
return has_permission
class BasePermission(object):
'''
Provides a base class with a common API. It implements a compatible
interface to django-rest-framework permission backends.
'''
permissions = []
permissions_for_method = {}
def get_permission_checks(self, request, view):
permission_checks = []
permission_checks.extend(self.permissions)
method_permissions = self.permissions_for_method.get(request.method, ())
permission_checks.extend(method_permissions)
return permission_checks
# needs to be compatible to django-rest-framework
def has_permission(self, request, view, obj=None):
if request.user:
for permission_check in self.get_permission_checks(request, view):
if not permission_check(request, view, obj):
return False
return True
return False
# needs to be compatible to django-rest-framework
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view, obj)
class IsStaffPermission(BasePermission):
'''
It ensures that the user is authenticated and is a staff member.
'''
permissions = (
is_authenticated,
is_staff)
class IsSuperuserPermission(BasePermission):
'''
It ensures that the user is authenticated and is a superuser. However it
does not check if the user is a staff member.
'''
permissions = (
is_authenticated,
is_superuser)
# TODO: needs documentation
# TODO: needs integration into the REST API
class ModelPermission(BasePermission):
'''
Checks if the necessary model permissions are set for the accessed object.
'''
# Map methods into required permission codes.
# Override this if you need to also provide 'view' permissions,
# or if you want to provide custom permission checks.
permissions_for_method = {
'GET': (),
'OPTIONS': (),
'HEAD': (),
'POST': (model_permission('{app_label}.add_{model_name}'),),
'PUT': (model_permission('{app_label}.change_{model_name}'),),
'PATCH': (model_permission('{app_label}.change_{model_name}'),),
'DELETE': (model_permission('{app_label}.delete_{model_name}'),),
}
class ModelViewPermission(BasePermission):
'''
Checks if the user has the ``<app>.view_<model>`` permission.
'''
permissions = (model_permission('{app_label}.view_{model_name}'),)
class ModelAddPermission(BasePermission):
'''
Checks if the user has the ``<app>.add_<model>`` permission.
'''
permissions = (model_permission('{app_label}.add_{model_name}'),)
class ModelChangePermission(BasePermission):
'''
Checks if the user has the ``<app>.change_<model>`` permission.
'''
permissions = (model_permission('{app_label}.change_{model_name}'),)
class ModelDeletePermission(BasePermission):
'''
Checks if the user has the ``<app>.delete_<model>`` permission.
'''
permissions = (model_permission('{app_label}.delete_{model_name}'),)
@python_2_unicode_compatible
class TemplatePermissionChecker(object):
'''
Can be used in the template like:
.. code-block:: html+django
{{ permissions.has_view_permission }}
{{ permissions.has_add_permission }}
{{ permissions.has_change_permission }}
{{ permissions.has_delete_permission }}
{{ permissions.blog_post.has_view_permission }}
{{ permissions.blog_comment.has_add_permission }}
So in general:
.. code-block:: html+django
{{ permissions.has_<view_name>_permission }}
{{ permissions.<object admin name>.has_<view name>_permission }}
And using object-level permissions:
.. code-block:: html+django
{% load admin2_tags %}
{{ permissions.has_delete_permission|for_object:object }}
{% with permissions|for_object:object as object_permissions %}
{{ object_permissions.has_delete_permission }}
{% endwith %}
And dynamically checking the permissions on a different admin:
.. code-block:: html+django
{% load admin2_tags %}
{% for admin in list_of_model_admins %}
{% with permissions|for_admin:admin as permissions %}
{{ permissions.has_delete_permission }}
{% endwith %}
{% endfor %}
If you don't know the permission you want to check at compile time (e.g.
you cannot put ``has_add_permission`` in the template because the exact
permission name might be passed into the context dynamically) you can bind
the view name with the ``for_view`` filter:
.. code-block:: html+django
{% load admin2_tags %}
{% with "add" as view_name %}
{% if permissions|for_view:view_name %}
<a href="...">{{ view_name|capfirst }} model</a>
{% endif %}
{% endwith %}
The attribute access of ``has_<view name>_permission`` will check for the
permissions of the view on the currently bound model admin not with the
name ``<view name>``, but with the name that the ``view_name_mapping``
returns for it. That step is needed since ``add`` is not the real
attribute name in which the ``ModelAddFormView`` on the model admin lives.
In the future we might get rid of that and this will also make it possible
to check for any view assigned to the admin, like
``{{ permissions.auth_user.has_change_password_permission }}``. But this
needs an interface beeing implemented like suggested in:
https://github.com/twoscoops/django-admin2/issues/142
'''
_has_named_permission_regex = re.compile('^has_(?P<name>\w+)_permission$')
view_name_mapping = {
'view': 'detail_view',
'add': 'create_view',
'change': 'update_view',
'delete': 'delete_view',
}
def __init__(self, request, model_admin, view=None, obj=None):
self._request = request
self._model_admin = model_admin
self._view = view
self._obj = obj
def clone(self):
return self.__class__(
request=self._request,
model_admin=self._model_admin,
view=self._view,
obj=self._obj)
def bind_admin(self, admin):
'''
Return a clone of the permission wrapper with a new model_admin bind
to it.
'''
if isinstance(admin, six.string_types):
try:
admin = self._model_admin.admin.get_admin_by_name(admin)
except ValueError:
return ''
new_permissions = self.clone()
new_permissions._view = None
new_permissions._model_admin = admin
return new_permissions
def bind_view(self, view):
'''
Return a clone of the permission wrapper with a new view bind to it.
'''
if isinstance(view, six.string_types):
if view not in self.view_name_mapping:
return ''
view_name = self.view_name_mapping[view]
view = getattr(self._model_admin, view_name).view
# we don't support binding view classes yet, only the name of views
# are processed. We have the problem with view classes that we cannot
# tell which model admin it was attached to.
else:
return ''
# if view is a class and not instantiated yet, do it!
if isinstance(view, type):
view = view(
request=self._request,
**self._model_admin.get_default_view_kwargs())
new_permissions = self.clone()
new_permissions._view = view
return new_permissions
def bind_object(self, obj):
'''
Return a clone of the permission wrapper with a new object bind
to it for object-level permissions.
'''
new_permissions = self.clone()
new_permissions._obj = obj
return new_permissions
#########################################
# interface exposed to the template users
def __getitem__(self, key):
match = self._has_named_permission_regex.match(key)
if match:
# the key was a has_*_permission, so bind the correspodning view
view_name = match.groupdict()['name']
return self.bind_view(view_name)
# the name might be a named object admin. So get that one and bind it
# to the permission checking
try:
admin_site = self._model_admin.admin
model_admin = admin_site.get_admin_by_name(key)
except ValueError:
raise KeyError
return self.bind_admin(model_admin)
def __nonzero__(self):
# if no view is bound we will return false, since we don't know which
# permission to check we stay save in disallowing the access
return self._cast_bool()
def __bool__(self):
return self._cast_bool()
def _cast_bool(self):
if self._view is None:
return False
if self._obj is None:
return self._view.has_permission()
else:
return self._view.has_permission(self._obj)
def __str__(self):
if self._view is None:
return ''
return force_text(bool(self))
def create_view_permissions(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs): # noqa
"""
Create 'view' permissions for all models.
``django.contrib.auth`` only creates add, change and delete permissions.
Since we want to support read-only views, we need to add our own
permission.
Copied from ``https://github.com/django/django/blob/1.9.6/django/contrib/auth/management/__init__.py#L60``.
"""
if not app_config.models_module:
return
try:
Permission = apps.get_model('auth', 'Permission')
except LookupError:
return
if not router.allow_migrate_model(using, Permission):
return
from django.contrib.contenttypes.models import ContentType
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_config.get_models():
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(using).get_for_model(klass)
ctypes.add(ctype)
perm = (get_permission_codename('view', klass._meta), 'Can view %s' % (klass._meta.verbose_name_raw))
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(Permission.objects.using(using).filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
perms = [
Permission(codename=codename, name=name, content_type=ct)
for ct, (codename, name) in searched_perms
if (ct.pk, codename) not in all_perms
]
# Validate the permissions before bulk_creation to avoid cryptic
# database error when the verbose_name is longer than 50 characters
permission_name_max_length = Permission._meta.get_field('name').max_length
verbose_name_max_length = permission_name_max_length - 11 # len('Can change ') prefix
for perm in perms:
if len(perm.name) > permission_name_max_length:
raise ValidationError(
"The verbose_name of %s.%s is longer than %s characters" % (
perm.content_type.app_label,
perm.content_type.model,
verbose_name_max_length,
)
)
Permission.objects.using(using).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
| andrewsmedina/django-admin2 | djadmin2/permissions.py | Python | bsd-3-clause | 15,152 |
"""
Cobra RMI Framework
Cobra is a remote method invocation interface that is very "pythony". It is
MUCH like its inspiration pyro, but slimmer and safer for things like threading
and object de-registration. Essentially, cobra allows you to call methods from
and get/set attributes on objects that exist on a remote system.
"""
# Copyright (C) 2011 Invisigoth - See LICENSE file for details
import os
import json
import time
import types
import queue
import pickle
import socket
import struct
import logging
import traceback
import urllib.parse
from threading import currentThread, Thread, RLock, Timer, Lock
from socketserver import ThreadingTCPServer, BaseRequestHandler
try:
import msgpack
dumpargs = {}
loadargs = {'use_list': 0}
if msgpack.version >= (0, 4, 1):
dumpargs['use_bin_type'] = 1
if msgpack.version < (1, 0, 0):
loadargs['encoding'] = 'utf-8'
else:
loadargs['strict_map_key'] = False
except ImportError:
msgpack = None
logger = logging.getLogger(__name__)
daemon = None
version = "Cobra2"
COBRA_PORT=5656
COBRASSL_PORT=5653
cobra_retrymax = None # Optional *global* retry max count
socket_builders = {} # Registered socket builders
# Message Types
COBRA_HELLO = 0
COBRA_CALL = 1
COBRA_GETATTR = 2
COBRA_SETATTR = 3
COBRA_ERROR = 4
COBRA_GOODBYE = 5
COBRA_AUTH = 6
COBRA_NEWOBJ = 7 # Used to return object references
SFLAG_MSGPACK = 0x0001
SFLAG_JSON = 0x0002
class CobraException(Exception):
"""Base for Cobra exceptions"""
pass
class CobraClosedException(CobraException):
"""Raised when a connection is unexpectedly closed."""
pass
class CobraRetryException(CobraException):
"""Raised when the retrymax (if present) for a proxy object is exceeded."""
pass
class CobraPickleException(CobraException):
"""Raised when pickling fails."""
pass
class CobraAuthException(CobraException):
'''Raised when specified auth data is rejected'''
pass
class CobraPermDenied(CobraException):
'''Raised when a call/setattr/getattr is not allowed'''
class CobraErrorException(Exception):
'''
Raised when we receive a COBRA_ERROR message and the current options
dont support serializing exception objects.
'''
def connectSocket(host, port, timeout=None):
"""
Make the long names go away....
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout is not None:
s.settimeout(timeout)
s.connect((host, port))
return s
def getCallerInfo():
"""
This function may be used from *inside* a method being called
by a remote caller. It will return a tuple of host,port for the
other side of the connection... use wisely ;)
"""
return getattr(currentThread(), "_cobra_caller_info", None)
def getLocalInfo():
"""
This function returns the local host,port combination being
used in the socket servicing the current request
"""
return getattr(currentThread(), "_cobra_local_info", None)
def getUserInfo():
'''
Get the cobra authenticated username of the current user
( or None if no user was authenticated )
'''
return getattr(currentThread(), "_cobra_authuser", None)
def setCallerInfo(callerinfo):
"""
This is necessary because of crazy python method call
name munging for thread attributes ;)
"""
currentThread()._cobra_caller_info = callerinfo
def setUserInfo(authuser):
currentThread()._cobra_authuser = authuser
def setLocalInfo(localinfo):
currentThread()._cobra_local_info = localinfo
def nocobra(f):
f.__no_cobra__ = True
return f
def newobj(f):
f._cobra_newobj = True
return f
def newobjwith(f):
f._cobra_newobj = True
f._cobra_newobjwith = True
return f
class CobraMethod:
def __init__(self, proxy, methname):
self.proxy = proxy
self.methname = methname
self.__name__ = methname
def __call__(self, *args, **kwargs):
name = self.proxy._cobra_name
logger.debug("Calling: %s, %s, %s, %s", name, self.methname, repr(args)[:20], repr(kwargs)[:20])
casync = kwargs.pop('_cobra_async', None)
if casync:
csock = self.proxy._cobra_getsock()
return csock.cobraAsyncTransaction(COBRA_CALL, name, (self.methname, args, kwargs))
with self.proxy._cobra_getsock() as csock:
mtype, name, data = csock.cobraTransaction(COBRA_CALL, name, (self.methname, args, kwargs))
if mtype == COBRA_CALL:
return data
if mtype == COBRA_NEWOBJ:
uri = swapCobraObject(self.proxy._cobra_uri, data)
return CobraProxy(uri)
raise data
def pickledumps(o):
return pickle.dumps( o, protocol=pickle.HIGHEST_PROTOCOL )
def jsonloads(b):
return json.loads(b)
def jsondumps(b):
return json.dumps(b)
class CobraSocket:
def __init__(self, socket, sflags=0):
self.sflags = sflags
self.socket = socket
self.dumps = pickledumps
self.loads = pickle.loads
if sflags & SFLAG_MSGPACK:
if not msgpack:
raise Exception('Missing "msgpack" python module ( http://visi.kenshoto.com/viki/Msgpack )')
def msgpackloads(b):
return msgpack.loads(b, **loadargs)
def msgpackdumps(b):
return msgpack.dumps(b, **dumpargs)
self.dumps = msgpackdumps
self.loads = msgpackloads
if sflags & SFLAG_JSON:
self.dumps = jsondumps
self.loads = jsonloads
def __del__(self):
self.socket.close()
def getSockName(self):
return self.socket.getsockname()
def getPeerName(self):
return self.socket.getpeername()
def sendMessage(self, mtype, objname, data):
"""
Send message is responsable for transmission of cobra messages,
and socket reconnection in the event that the send fails for network
reasons.
"""
# NOTE: for errors while using msgpack, we must send only the str
if mtype == COBRA_ERROR and self.sflags & (SFLAG_MSGPACK | SFLAG_JSON):
data = str(data)
try:
buf = self.dumps(data)
except Exception as e:
raise CobraPickleException("The arguments/attributes must be serializable: %s" % e)
obj = objname.encode('utf-8')
self.sendExact(struct.pack("<III", mtype, len(obj), len(buf)) + obj + buf)
def recvMessage(self):
"""
Returns tuple of mtype, objname, and data
This method is *NOT* responsable for re-connection, because there
is not context on the server side for what to send on re-connect.
Client side uses of the CobraSocket object should use cobraTransaction
to ensure re-tranmission of the request on reception errors.
"""
hdr = self.recvExact(12)
mtype, nsize, dsize = struct.unpack("<III", hdr)
name = self.recvExact(nsize).decode('utf-8')
data = self.loads(self.recvExact(dsize))
# NOTE: for errors while using msgpack, we must send only the str
if mtype == COBRA_ERROR and self.sflags & (SFLAG_MSGPACK | SFLAG_JSON):
data = CobraErrorException(data)
return (mtype, name, data)
def recvExact(self, size):
buf = b""
s = self.socket
while len(buf) != size:
x = s.recv(size - len(buf))
if len(x) == 0:
raise CobraClosedException("Socket closed in recvExact...")
buf += x
return buf
def sendExact(self, buf):
self.socket.sendall(buf)
class SocketBuilder:
def __init__(self, host, port, timeout=None):
self.host = host
self.port = port
self.timeout = timeout
self.retrymax = None
self.ssl = False
self.sslca = None
self.sslcrt = None
self.sslkey = None
def setTimeout(self, timeout):
'''
Set the timeout for newly created sockets.
'''
self.timeout = timeout
def setSslEnabled(self, status):
self.ssl = status
def setSslCa(self, crtfile):
'''
Set the SSL Certificate Authority for this socket builder.
( This enables checking the server's presented cert )
'''
self.ssl = True
self.sslca = crtfile
def setSslClientCert(self, crtfile, keyfile):
'''
Set the cert/key used by this client to negotiate SSL.
'''
self.ssl = True
self.sslcrt = crtfile
self.sslkey = keyfile
def __call__(self):
host = self.host
port = self.port
timeout = self.timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
sock.settimeout(self.timeout)
if self.ssl:
import ssl
sslkwargs = {}
if self.sslca:
sslkwargs['ca_certs'] = self.sslca
sslkwargs['cert_reqs']=ssl.CERT_REQUIRED
if self.sslcrt and self.sslkey:
sslkwargs['keyfile'] = self.sslkey
sslkwargs['certfile'] = self.sslcrt
sock = ssl.wrap_socket(sock, **sslkwargs)
sock.connect((self.host, self.port))
return sock
class CobraAsyncTrans:
def __init__(self, csock, mtype, objname, data):
self.data = data
self.csock = csock
self.mtype = mtype
self.objname = objname
# Issue the call..
self.asyncCobraTransaction()
def asyncCobraTransaction(self):
"""
This is an API for clients to use. It will retransmit
a sendMessage() automagically on recpt of an exception
in recvMessage()
"""
while True:
try:
self.csock.sendMessage(self.mtype, self.objname, self.data)
return
except CobraAuthException as e:
raise
except (socket.error,CobraClosedException):
self.csock.reConnect()
def wait(self):
try:
while True:
try:
mtype,name,data = self.csock.recvMessage()
if mtype == COBRA_CALL:
return data
raise data
except CobraAuthException as e:
raise
except (socket.error, CobraClosedException) as e:
# force a reconnect
self.csock.reConnect()
self.asyncCobraTransaction()
finally:
if self.csock.pool:
self.csock.pool.put(self.csock)
self.csock = None
class CobraClientSocket(CobraSocket):
def __init__(self, sockctor, retrymax=cobra_retrymax, sflags=0, authinfo=None, pool=None):
CobraSocket.__init__(self, sockctor(), sflags=sflags)
self.sockctor = sockctor
self.retries = 0
self.trashed = False
self.retrymax = retrymax
self.authinfo = authinfo
self.pool = pool
def __enter__(self):
return self
def __exit__(self, extype, value, tb):
if self.pool:
self.pool.put(self)
def reConnect(self):
"""
Handle the event where we need to reconnect
"""
while self.retrymax is None or self.retries < self.retrymax:
logger.info("COBRA: Reconnection Attempt\n")
try:
self.socket = self.sockctor()
# A bit messy but... a fix for now...
# If we have authinfo lets authenticate
authinfo = self.authinfo
if authinfo is not None:
self.sendMessage(COBRA_AUTH, '', authinfo)
mtype,rver,data = self.recvMessage()
if mtype != COBRA_AUTH:
raise CobraAuthException('Authentication Failed!')
self.retries = 0
return
except CobraAuthException as e:
raise
except Exception as e:
logger.warning("reConnect hit exception: %s" % str(e))
time.sleep( max(2 ** self.retries, 10) )
self.retries += 1
self.trashed = True
raise CobraRetryException()
def cobraAsyncTransaction(self, mtype, objname, data):
return CobraAsyncTrans(self, mtype, objname, data)
def cobraTransaction(self, mtype, objname, data):
"""
This is an API for clients to use. It will retransmit
a sendMessage() automagically on recpt of an exception
in recvMessage()
"""
while True:
try:
self.sendMessage(mtype, objname, data)
return self.recvMessage()
except CobraAuthException:
raise
except CobraClosedException:
self.reConnect()
except socket.error:
self.reConnect()
class CobraDaemon(ThreadingTCPServer):
def __init__(self, host="", port=COBRA_PORT, sslcrt=None, sslkey=None, sslca=None, msgpack=False, json=False):
'''
Construct a cobra daemon object.
Parameters:
host - Optional hostname/ip to bind the service to (default: inaddr_any)
port - The port to bind (Default: COBRA_PORT)
msgpack - Use msgpack serialization
# SSL Options
sslcrt / sslkey - Specify sslcrt and sslkey to enable SSL server side
sslca - Specify an SSL CA key to use validating client certs
'''
self.thr = None
self.run = True
self.shared = {}
self.dowith = {}
self.host = host
self.port = port
self.reflock = RLock()
self.refcnts = {}
self.authmod = None
self.sflags = 0
if msgpack and json:
raise Exception('CobraDaemon can not use both msgpack *and* json!')
if msgpack:
requireMsgpack()
self.sflags |= SFLAG_MSGPACK
if json:
self.sflags |= SFLAG_JSON
# SSL Options
self.sslca = sslca
self.sslcrt = sslcrt
self.sslkey = sslkey
self.cansetattr = True
self.cangetattr = True
if sslcrt and not os.path.isfile(sslcrt):
raise Exception('CobraDaemon: sslcrt param must be a file!')
if sslkey and not os.path.isfile(sslkey):
raise Exception('CobraDaemon: sslkey param must be a file!')
if sslca and not os.path.isfile(sslca):
raise Exception('CobraDaemon: sslca param must be a file!')
self.allow_reuse_address = True
ThreadingTCPServer.__init__(self, (host, port), CobraRequestHandler)
if port == 0:
self.port = self.socket.getsockname()[1]
self.daemon_threads = True
self.recvtimeout = None
def logCallerError(self, oname, args, msg=""):
pass
def setGetAttrEnabled(self, status):
self.cangetattr = status
def setSetAttrEnabled(self, status):
self.cansetattr = status
def setSslCa(self, crtfile):
'''
Set the SSL Certificate Authority by this server.
( to validate client certs )
'''
self.sslca = crtfile
def setSslServerCert(self, crtfile, keyfile):
'''
Set the cert/key used by this server to negotiate SSL.
'''
self.sslcrt = crtfile
self.sslkey = keyfile
def fireThread(self):
self.thr = Thread(target=self.serve_forever)
self.thr.setDaemon(True)
self.thr.start()
def stopServer(self):
self.run = False
self.shutdown()
self.server_close()
self.thr.join()
def serve_forever(self):
try:
ThreadingTCPServer.serve_forever(self)
except Exception as e:
if not self.run:
return
raise
def setAuthModule(self, authmod):
'''
Enable an authentication module for this server
( all connections *must* be authenticated through the authmod )
NOTE: See cobra.auth.* for various auth module implementations
Example:
import cobra.auth.shadow as c_a_shadow
authmod = c_a_shadow.ShadowFileAuth('passwdfile.txt')
cdaemon = CobraDaemon()
cdaemon.setAuthModule()
'''
self.authmod = authmod
def getSharedObject(self, name):
return self.shared.get(name, None)
def getSharedObjects(self):
'''
Return a list of (name, obj) for the currently shared objects.
Example:
for name,obj in daemon.getSharedObjects():
print('%s: %r' % (name,obj))
'''
return self.shared.items()
def getSharedName(self, obj):
'''
If this object is shared already, get the name...
'''
for name, sobj in self.shared.items():
if sobj == obj:
return name
return None
def getRandomName(self):
return ''.join(['%.2x' % x for x in os.urandom(16)])
def shareObject(self, obj, name=None, doref=False, dowith=False):
"""
Share an object in this cobra server. By specifying
doref=True you will let CobraProxy objects decide that
the object is done and should be un-shared. Also, if
name is None a random name is chosen. Use dowith=True
to cause sharing/unsharing to enter/exit (requires doref=True).
Returns: name (or the newly generated random one)
"""
refcnt = None
if dowith and not doref:
raise Exception('dowith *requires* doref!')
if doref:
refcnt = 0
if dowith:
obj.__enter__()
if name is None:
name = self.getRandomName()
self.shared[name] = obj
self.dowith[name] = dowith
self.refcnts[name] = refcnt
return name
def getObjectRefCount(self, name):
return self.refcnts.get(name)
def decrefObject(self, name, ok=True):
"""
Decref this object and if it reaches 0, unshare it.
"""
logger.debug('Decrementing: %s', name)
self.reflock.acquire()
try:
refcnt = self.refcnts.get(name, None)
if refcnt is not None:
refcnt -= 1
self.refcnts[name] = refcnt
if refcnt == 0:
self.unshareObject(name,ok=ok)
finally:
self.reflock.release()
def increfObject(self, name):
logger.debug('Incrementing: %s', name)
self.reflock.acquire()
try:
refcnt = self.refcnts.get(name, None)
if refcnt is not None:
refcnt += 1
self.refcnts[name] = refcnt
finally:
self.reflock.release()
def unshareObject(self, name, ok=True):
logger.debug('Unsharing %s', name)
self.refcnts.pop(name, None)
obj = self.shared.pop(name, None)
# If we are using a with block, notify it
if self.dowith.pop(name, False):
args = (None,None,None)
if not ok:
args = (Exception, Exception('with boom'), None)
obj.__exit__(*args)
return obj
class CobraRequestHandler(BaseRequestHandler):
def handle(self):
c = CobraConnectionHandler(self.server, self.request)
c.handleClient()
class CobraConnectionHandler:
def __init__(self, daemon, socket):
self.daemon = daemon
self.socket = socket
self.handlers = (
self.handleHello,
self.handleCall,
self.handleGetAttr,
self.handleSetAttr,
self.handleError,
self.handleGoodbye,
self.handleError,
)
def handleClient(self):
peer = self.socket.getpeername()
me = self.socket.getsockname()
logger.info("Got a connection from: %s" % str(peer))
sock = self.socket
if self.daemon.sslkey:
import ssl
sslca = self.daemon.sslca
keyfile = self.daemon.sslkey
certfile = self.daemon.sslcrt
sslreq = ssl.CERT_NONE
# If they specify a CA key, require valid client certs
if sslca:
sslreq=ssl.CERT_REQUIRED
sock = ssl.wrap_socket(sock,
keyfile=keyfile, certfile=certfile,
ca_certs=sslca, cert_reqs=sslreq,
server_side=True)
if self.daemon.recvtimeout:
sock.settimeout( self.daemon.recvtimeout )
authuser = None
csock = CobraSocket(sock, sflags=self.daemon.sflags)
setCallerInfo(peer)
setLocalInfo(me)
# If we have an authmod, they must send an auth message first
if self.daemon.authmod:
mtype,name,data = csock.recvMessage()
if mtype != COBRA_AUTH:
csock.sendMessage(COBRA_ERROR, '', CobraAuthException('Authentication Required!'))
return
authuser = self.daemon.authmod.authCobraUser( data )
if not authuser:
csock.sendMessage(COBRA_ERROR, '', CobraAuthException('Authentication Failed!'))
return
csock.sendMessage(COBRA_AUTH, '', authuser)
setUserInfo( authuser )
while True:
try:
mtype,name,data = csock.recvMessage()
except CobraClosedException:
break
except socket.error:
logger.warning("Cobra socket error in handleClient")
break
# If they re-auth ( app layer ) later, lets handle it...
if mtype == COBRA_AUTH and self.daemon.authmod:
authuser = self.daemon.authmod.authCobraUser(data)
if not authuser:
csock.sendMessage(COBRA_ERROR,'',CobraAuthException('Authentication Failed!'))
continue
setUserInfo(authuser)
csock.sendMessage(COBRA_AUTH, '', authuser)
continue
if self.daemon.authmod and not self.daemon.authmod.checkUserAccess( authuser, name ):
csock.sendMessage(COBRA_ERROR, name, Exception('Access Denied For User: %s' % authuser))
continue
obj = self.daemon.getSharedObject(name)
logger.debug("MSG FOR: %s:%s", str(name), type(obj))
if obj is None:
try:
csock.sendMessage(COBRA_ERROR, name, Exception("Unknown object requested: %s" % name))
except CobraClosedException:
pass
logger.warning("Got request for unknown object: %s" % name)
continue
try:
handler = self.handlers[mtype]
except:
try:
csock.sendMessage(COBRA_ERROR, name, Exception("Invalid Message Type"))
except CobraClosedException:
pass
logger.warning("Got Invalid Message Type: %d for %s" % (mtype, data))
continue
try:
handler(csock, name, obj, data)
except Exception as e:
logger.warning("cobra handler hit exception: %s" % str(e))
try:
csock.sendMessage(COBRA_ERROR, name, e)
except TypeError as typee:
# Probably about pickling...
csock.sendMessage(COBRA_ERROR, name, Exception(str(e)))
except CobraClosedException:
pass
def handleError(self, csock, oname, obj, data):
raise NotImplementedError("How did we hit handleError?")
def handleHello(self, csock, oname, obj, data):
"""
Hello messages are used to get the initial cache of
method names for the newly connected object.
"""
logger.debug("Hello")
self.daemon.increfObject(oname)
ret = {}
for name in dir(obj):
attr = getattr(obj, name, None)
if isinstance(attr, (types.MethodType, types.BuiltinMethodType, types.FunctionType, CobraMethod)):
ret[name] = True
try:
csock.sendMessage(COBRA_HELLO, version, ret)
except CobraClosedException:
pass
def handleCall(self, csock, oname, obj, data):
logger.debug("Calling %s", str(data))
methodname, args, kwargs = data
meth = getattr(obj, methodname)
if getattr(meth,'__no_cobra__',False):
raise CobraPermDenied('%s is tagged nocall!' % methodname)
try:
ret = meth(*args, **kwargs)
if getattr(meth,'_cobra_newobj',None):
dowith = getattr(meth,'_cobra_newobjwith',False)
objname = self.daemon.shareObject(ret, doref=True, dowith=dowith)
csock.sendMessage(COBRA_NEWOBJ, "", objname)
return
csock.sendMessage(COBRA_CALL, "", ret)
except CobraClosedException:
pass
except Exception as e:
self.daemon.logCallerError(oname, data, msg=traceback.format_exc())
raise
def handleGetAttr(self, csock, oname, obj, name):
logger.debug("Getting Attribute: %s", str(name))
if not self.daemon.cangetattr:
raise CobraPermDenied('getattr disallowed!')
try:
csock.sendMessage(COBRA_GETATTR, "", getattr(obj, name))
except CobraClosedException:
pass
def handleSetAttr(self, csock, oname, obj, data):
logger.debug("Setting Attribute: %s", str(data))
if not self.daemon.cansetattr:
raise CobraPermDenied('setattr disallowed!')
name,value = data
setattr(obj, name, value)
try:
csock.sendMessage(COBRA_SETATTR, "", "")
except CobraClosedException:
pass
def handleGoodbye(self, csock, oname, obj, data):
logger.debug("Goodbye")
self.daemon.decrefObject(oname,ok=data)
try:
csock.sendMessage(COBRA_GOODBYE, "", "")
except CobraClosedException:
pass
def isCobraUri(uri):
try:
x = urllib.parse.urlparse(uri)
if x.scheme not in ["cobra", "cobrassl"]:
return False
except Exception as e:
return False
return True
def chopCobraUri(uri):
purl = urllib.parse.urlparse(uri)
scheme = purl.scheme
host = purl.hostname
name = purl.path.strip('/')
port = purl.port
if not port:
port = COBRA_PORT
# Do we have any URL options?
urlparams = {}
for urlopt in purl.query.split('&'):
urlval = 1
if urlopt.find('=') != -1:
urlopt,urlval = urlopt.split('=',1)
urlopt = urlopt.lower()
urlparams[urlopt] = urlval
return scheme,host,port,name,urlparams
class CobraProxy:
'''
A proxy object for remote objects shared with Cobra
A few optional keyword arguments are handled by all cobra protocols:
retrymax - Max transparent reconnect attempts
timeout - Socket timeout for a cobra socket
authinfo - A dict, probably like {'user':'username','passwd':'mypass'}
( but it can be auth module specific )
msgpack - Use msgpack serialization
sockpool - Fixed sized pool of cobra sockets (not socket per thread)
Also, the following protocol options may be passed through the URI:
msgpack=1
authinfo=<base64( json( <authinfo dict> ))>
'''
def __init__(self, URI, retrymax=None, timeout=None, **kwargs):
scheme, host, port, name, urlparams = chopCobraUri( URI )
logger.debug("Spinning up CobraProxy on %s:%s with object: %s", host, port, repr(name))
self._cobra_uri = URI
self._cobra_scheme = scheme
self._cobra_host = host
self._cobra_port = port
self._cobra_slookup = (host,port)
self._cobra_name = name
self._cobra_retrymax = urlparams.get('retrymax', retrymax)
self._cobra_timeout = urlparams.get('timeout', timeout)
self._cobra_kwargs = kwargs
self._cobra_gothello = False
self._cobra_sflags = 0
self._cobra_spoolcnt = int(urlparams.get('sockpool', 0))
self._cobra_sockpool = None
if self._cobra_timeout is not None:
self._cobra_timeout = int(self._cobra_timeout)
if self._cobra_retrymax is not None:
self._cobra_retrymax = int(self._cobra_retrymax)
if urlparams.get('msgpack'):
requireMsgpack()
self._cobra_sflags |= SFLAG_MSGPACK
if urlparams.get('json'):
self._cobra_sflags |= SFLAG_JSON
urlauth = urlparams.get('authinfo')
if urlauth:
authinfo = json.loads(urlauth.decode('base64'))
self._cobra_kwargs['authinfo'] = authinfo
# If they asked for msgpack
if kwargs.get('msgpack'):
requireMsgpack()
self._cobra_sflags |= SFLAG_MSGPACK
if kwargs.get('json'):
self._cobra_sflags |= SFLAG_JSON
if self._cobra_spoolcnt:
self._cobra_sockpool = queue.Queue()
# timeout reqeuired for pool usage
if not self._cobra_timeout:
self._cobra_timeout = 60
# retry max required on pooling
if not self._cobra_retrymax:
self._cobra_retrymax = 3
[self._cobra_sockpool.put(self._cobra_newsock()) for i in range(self._cobra_spoolcnt)]
# If we got passed as user/passwd in our kwargs
with self._cobra_getsock() as csock:
mtype,rver,data = csock.cobraTransaction(COBRA_HELLO, name, "")
if mtype == COBRA_ERROR:
csock.trashed = True
if self._cobra_sflags & (SFLAG_MSGPACK|SFLAG_JSON):
data = Exception(data)
raise data
if rver != version:
csock.trashed = True
raise Exception("Server Version Not Supported: %s" % rver)
if mtype != COBRA_HELLO:
csock.trashed = True
raise Exception("Invalid Cobra Hello Response")
self._cobra_gothello = True
self._cobra_methods = data
def cobraAuthenticate(self, authinfo):
'''
Re-authenticate to the server ( and store auth info for reconnect ).
'''
with self._cobra_getsock() as csock:
mtype,rver,data = csock.cobraTransaction(COBRA_AUTH, '', authinfo)
if mtype == COBRA_AUTH:
self._cobra_kwargs['authinfo'] = authinfo
return True
return False
def _cobra_getsock(self, thr=None):
if self._cobra_spoolcnt:
sock = self._cobra_sockpool.get()
else:
if not thr: # if thread isn't specified, use the current thread
thr = currentThread()
tsocks = getattr(thr, 'cobrasocks', None)
if tsocks is None:
tsocks = {}
thr.cobrasocks = tsocks
sock = tsocks.get(self._cobra_slookup)
if not sock or sock.trashed:
# Lets build a new socket... shall we?
sock = self._cobra_newsock()
# If we have authinfo lets authenticate
authinfo = self._cobra_kwargs.get('authinfo')
if authinfo is not None:
mtype,rver,data = sock.cobraTransaction(COBRA_AUTH, '', authinfo)
if mtype != COBRA_AUTH:
raise CobraAuthException('Authentication Failed!')
if not self._cobra_spoolcnt:
tsocks[self._cobra_slookup] = sock
return sock
def _cobra_newsock(self):
"""
This is only used by *clients*
"""
host = self._cobra_host
port = self._cobra_port
timeout = self._cobra_timeout
retrymax = self._cobra_retrymax
builder = getSocketBuilder(host,port)
if builder is None:
builder = SocketBuilder(host,port)
builder.setTimeout(timeout) # Might be None...
if self._cobra_scheme == 'cobrassl':
builder.setSslEnabled(True)
addSocketBuilder(host, port, builder)
authinfo = self._cobra_kwargs.get('authinfo')
return CobraClientSocket(builder, retrymax=retrymax, sflags=self._cobra_sflags, authinfo=authinfo, pool=self._cobra_sockpool)
def __dir__(self):
'''
return a list of proxied method names
'''
return self._cobra_methods.keys()
def __getstate__(self):
return self.__dict__
def __setstate__(self, sdict):
self.__dict__.update(sdict)
def __hash__(self):
return hash(self._cobra_uri)
def __nonzero__(self):
return True
def __repr__(self):
return str(self)
def __str__(self):
return "<CobraProxy %s>" % self._cobra_uri
def __eq__(self, obj):
ouri = getattr(obj, '_cobra_uri', None)
return self._cobra_uri == ouri
def __ne__(self, obj):
if self == obj:
return False
return True
def __setattr__(self, name, value):
logger.debug('Setattr: %s:%s', name, repr(value)[:20])
if name.startswith('_cobra_'):
self.__dict__[name] = value
return
with self._cobra_getsock() as csock:
mtype,name,data = csock.cobraTransaction(COBRA_SETATTR, self._cobra_name, (name, value))
if mtype == COBRA_ERROR:
raise data
elif mtype == COBRA_SETATTR:
return
else:
raise Exception("Invalid Cobra Response")
def __getattr__(self, name):
logger.debug('Getattr: %s', name)
if name == "__getinitargs__":
raise AttributeError()
# Handle methods
if self._cobra_methods.get(name, False):
return CobraMethod(self, name)
with self._cobra_getsock() as csock:
mtype, name, data = csock.cobraTransaction(COBRA_GETATTR, self._cobra_name, name)
if mtype == COBRA_ERROR:
raise data
return data
# For use with ref counted proxies
def __enter__(self):
return self
def __exit__(self, extype, value, tb):
with self._cobra_getsock() as csock:
ok = True
if extype is not None: # Tell the server we broke...
ok = False
csock.cobraTransaction(COBRA_GOODBYE, self._cobra_name, ok)
def addSocketBuilder( host, port, builder ):
'''
Register a global socket builder which should be used
when constructing sockets to the given host/port.
'''
socket_builders[ (host,port) ] = builder
def getSocketBuilder(host, port):
'''
Retrieve the registered socket builder for the given host/port.
'''
return socket_builders.get((host,port))
def initSocketBuilder(host,port):
'''
Retrieve or initialize a socket builder for the host/port.
'''
builder = socket_builders.get((host,port))
if builder is None:
builder = SocketBuilder(host,port)
socket_builders[ (host,port) ] = builder
return builder
def startCobraServer(host="", port=COBRA_PORT):
global daemon
if daemon is None:
daemon = CobraDaemon(host,port)
daemon.fireThread()
return daemon
def runCobraServer(host='', port=COBRA_PORT):
daemon = CobraDaemon(host,port)
daemon.serve_forever()
def shareObject(obj, name=None, doref=False):
"""
If shareObject is called before startCobraServer
or startCobraSslServer, it will call startCobraServer
"""
global daemon
if daemon is None:
startCobraServer()
return daemon.shareObject(obj, name, doref=doref)
def unshareObject(name):
return daemon.unshareObject(name)
def swapCobraObject(uri, newname):
'''
Parse out the object name from a given cobra
URI and return a newly constructed URI for
the shared object <newname> on the same server.
'''
scheme, host, port, name, urlparams = chopCobraUri( uri )
paramstr = ''
if urlparams:
paramstr = '?' + ('&'.join(['%s=%s' % (k,v) for (k,v) in urlparams.items()]))
return '%s://%s:%d/%s%s' % (scheme,host,port,newname,paramstr)
def requireMsgpack():
try:
import msgpack
except ImportError:
raise Exception('Missing "msgpack" python module ( http://visi.kenshoto.com/viki/Msgpack )')
| bat-serjo/vivisect | cobra/__init__.py | Python | apache-2.0 | 37,044 |
"""Custom exceptions which used in Mimesis."""
from typing import Any, Optional, Union
from mimesis.enums import Locale
class LocaleError(ValueError):
"""Raised when a locale isn't supported."""
def __init__(self, locale: Union[Locale, str]) -> None:
"""Initialize attributes for informative output.
:param locale: Locale.
"""
self.locale = locale
def __str__(self) -> str:
return f"Invalid locale «{self.locale}»"
class SchemaError(ValueError):
"""Raised when schema is unsupported."""
def __str__(self) -> str:
return "Schema should a callable object."
class NonEnumerableError(TypeError):
"""Raised when object is not instance of Enum."""
message = "You should use one item of: «{}» of the object mimesis.enums.{}"
def __init__(self, enum_obj: Any) -> None:
"""Initialize attributes for informative output.
:param enum_obj: Enum object.
"""
if enum_obj:
self.name = enum_obj
self.items = ", ".join(map(str, enum_obj))
else:
self.items = ""
def __str__(self) -> str:
return self.message.format(self.items, self.name.__name__)
class FieldError(ValueError):
def __init__(self, name: Optional[str] = None) -> None:
"""Initialize attributes for more informative output.
:param name: Name of the field..
"""
self.name = name
self.message = "A field «{}» is not supported."
self.message_none = "Field cannot be None."
def __str__(self) -> str:
if self.name is None:
return self.message_none
return self.message.format(self.name)
| lk-geimfari/elizabeth | mimesis/exceptions.py | Python | mit | 1,703 |
from nltk.corpus import stopwords
import sys
HEX_1 = 16**3
HEX_2 = 16**2
HEX_3 = 16
HEX_4 = 1
digit_to_hex = {i: k for i, k in enumerate('0123456789abcdef')}
def get_hex(an_int):
out = []
for place in (HEX_1, HEX_2, HEX_3, HEX_4):
out.append(digit_to_hex[an_int / place])
an_int = an_int % place
return str("\\u" + ''.join(out))
def reencode_word(word):
ords = map(ord, word)
out = []
for c in ords:
if c < 128:
out.append(chr(c))
else:
out.append(get_hex(c))
return str(''.join(out))
if __name__ == '__main__':
if len(sys.argv) < 3 or sys.argv[2] not in ('header', 'impl'):
raise ValueError('usage: <script> $language header|impl')
language = sys.argv[1]
func_name = 'is%sStopword' % language.title()
if sys.argv[2] == 'impl':
words = map(reencode_word, stopwords.words(language))
print '#include <string>'
print '#include <unordered_set>'
print '#include "stopwords/%s_stopwords.h"' % language
print ''
print 'using namespace std;'
print ''
print 'namespace relevanced {'
print 'namespace stopwords {'
print ''
print 'const unordered_set<string> stopwordSet {'
for i in xrange(len(words)):
word = words[i]
line = ' "%s"' % word
if i < (len(words) - 1):
line += ','
print line
print '};'
print ''
print 'bool %s(const string &word) {' % func_name
print ' return stopwordSet.count(word) > 0;'
print '}'
print ''
print '} // stopwords'
print '} // relevanced'
print ''
elif sys.argv[2] == 'header':
print '#include <string>'
print ''
print 'namespace relevanced {'
print 'namespace stopwords {'
print ''
print 'bool %s(const std::string &word);' % func_name
print ''
print '} // stopwords'
print '} // relevanced'
print ''
| scivey/relevanced | scripts/dump_nltk_stopwords.py | Python | mit | 2,058 |
import hashlib
import requests
import re
import time
import random
secret_addr = '7a126c6c89988807e84f887a3cee48c84f789c6910f80f547dc250ec5db23a5e'
spin_url = 'http://dogespin.l8.lv/ajax-spin.php'
root_url = 'http://dogespin.l8.lv/'
hash_re = re.compile(r'spinHash=\'([0-9abcdef]+)\'')
colors = ['black','red']
def get_next_hash(url=root_url):
r = requests.get(url)
hash = hash_re.findall(r.text)
return hash[0] if hash else None
def spin(color, bet, hash=get_next_hash, balance=0, spin_url=spin_url, secret=secret_addr):
if callable(hash):
hash = hash()
if color not in colors:
raise ValueError("invalid color '%s'" % color)
bet = int(bet)
if not bet:
raise ValueError("invalid bet '%s'")
data = {
'mode': 'realmoney',
'spinHash': hash,
'secretURL': secret,
'bet': 'cell-%s:%s;' % (color, bet)
}
if balance:
data['balance'] = balance
data['mode'] = 'playmoney'
del data['secretURL']
response = requests.post(spin_url, data)
if response and not response.text == "ERROR":
response_values = response.text.split(':')
(result,balance,bet_value,payout,
change,game_hash,hash_data,next_spin_hash) = response_values[0:8]
return [result, balance, bet_value, change, next_spin_hash, payout, hash_data, game_hash]
else:
return response.text, response
def rsleep(rs):
rnd = (random.random())
if rs and not isinstance(rs, list):
secs = rs
elif isinstance(rs, list) and len(rs) > 0:
secs = random.sample(rs, 1)[0]
else:
secs = random.randint(1,10)
print "sleeping %ss" % secs
time.sleep(secs)
def get_color(col):
return colors[random.randint(0,1) if col == 'rand' else col]
def start_spinner(secret, starting_bet=1, max_bet=64, play=0, qt_loss=100, qt_win=1000, rs=0, col='rand'):
"""
Start spinner
"""
next_color = get_color(col)
next_bet = starting_bet
next_hash = get_next_hash()
rsleep(rs)
total_payout = 0
roll = 0
print " "
try:
while True:
try:
roll += 1
print "Roll #%s" % roll
print "Now betting %s DOGE on %s. [spinHash: %s]" % (next_bet, next_color, next_hash)
result = spin(next_color, next_bet, next_hash, play, secret=secret)
if result[0]=="ERROR":
return result
except requests.HTTPError:
print "HTTP error"
else:
rolled = result[0]
balance = int(result[1])
bet = int(result[2])
change = int(result[3])
hash_data = result[6]
total_payout += change
if play:
play = balance
if not next_hash == hashlib.sha256(hash_data).hexdigest():
print "HASH MISMATCH: %s" % [result]
raise KeyboardInterrupt
next_hash = result[4]
print "rolled %s" % rolled
print "Change: %s DOGE" % change
print "Balance: %s DOGE" % balance
print "Totals: %s DOGE" % total_payout
print "----------------------"
next_color = get_color(col)
if change < 0 and bet <= max_bet:
next_bet = bet*2
print "\nDoubling\n"
else:
print "\nResetting bet\n"
next_bet = starting_bet
if qt_loss and total_payout <= qt_loss * -1:
print "Significant loss"
raise KeyboardInterrupt
if qt_win and total_payout >= qt_win:
print "Significant win"
raise KeyboardInterrupt
finally:
rsleep(rs)
except KeyboardInterrupt:
print "Stopping..."
print "%s: %s DOGE" % ("Totals:", total_payout)
| powhex/dogespin | dogespin.py | Python | mit | 4,074 |
#
# This file is part of do-mpc
#
# do-mpc: An environment for the easy, modular and efficient implementation of
# robust nonlinear model predictive control
#
# Copyright (c) 2014-2019 Sergio Lucia, Alexandru Tatulea-Codrean
# TU Dortmund. All rights reserved
#
# do-mpc is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# do-mpc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with do-mpc. If not, see <http://www.gnu.org/licenses/>.
import do_mpc.tools
import do_mpc.model
import do_mpc.controller
import do_mpc.estimator
import do_mpc.optimizer
import do_mpc.simulator
import do_mpc.graphics
import do_mpc.sampling
| do-mpc/do-mpc | do_mpc/__init__.py | Python | lgpl-3.0 | 1,132 |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.core.wrapper.wrapperbase as ecwb
import enum as enum
class BatteryStatus(enum.Enum):
UNKNOWN = 0
DISCHARGING = 1
CHARGING = 2
FULL = 3
NOT_CHARGING = 4 # This is an android-only state - unsure how often we will encounter it
class Battery(ecwb.WrapperBase):
props = {"battery_level_pct": ecwb.WrapperBase.Access.RO, # percentage of the battery left. value between 0 and 100
"battery_status": ecwb.WrapperBase.Access.RO, # Current status - charging, discharging or full
"android_health": ecwb.WrapperBase.Access.RO, # android-only battery health indicator
"android_plugged": ecwb.WrapperBase.Access.RO, # source that it is plugged into
"android_technology": ecwb.WrapperBase.Access.RO, # technology used to make the battery
"android_temperature": ecwb.WrapperBase.Access.RO, # android-only: current temperature
"android_voltage": ecwb.WrapperBase.Access.RO, # android-only: current voltage
"ts": ecwb.WrapperBase.Access.RO,
"local_dt": ecwb.WrapperBase.Access.RO,
"fmt_time": ecwb.WrapperBase.Access.RO
}
enums = {"battery_status": BatteryStatus}
geojson = []
nullable = []
local_dates = ['local_dt']
def _populateDependencies(self):
pass
| shankari/e-mission-server | emission/core/wrapper/battery.py | Python | bsd-3-clause | 1,602 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for preprocessing Felix* examples."""
from typing import Optional, Union
from felix import bert_example
from felix import example_builder_for_felix_insert
from felix import insertion_converter
from felix import pointing_converter
from felix import utils
def initialize_builder(
use_pointing, use_open_vocab, label_map_file,
max_seq_length, max_predictions_per_seq, vocab_file,
do_lower_case,
special_glue_string_for_sources,
max_mask,
insert_after_token,
):
"""Returns a builder for tagging and insertion BERT examples."""
is_felix_insert = (not use_pointing and use_open_vocab)
label_map = utils.read_label_map(
label_map_file, use_str_keys=(not is_felix_insert))
if use_pointing:
if use_open_vocab:
converter_insertion = insertion_converter.InsertionConverter(
max_seq_length=max_seq_length,
max_predictions_per_seq=max_predictions_per_seq,
label_map=label_map,
vocab_file=vocab_file)
converter_tagging = pointing_converter.PointingConverter({},
do_lower_case)
builder = bert_example.BertExampleBuilder(
label_map=label_map,
vocab_file=vocab_file,
max_seq_length=max_seq_length,
converter=converter_tagging,
do_lower_case=do_lower_case,
use_open_vocab=use_open_vocab,
converter_insertion=converter_insertion,
special_glue_string_for_sources=special_glue_string_for_sources)
else: # Pointer disabled.
if use_open_vocab:
builder = example_builder_for_felix_insert.FelixInsertExampleBuilder(
label_map,
vocab_file,
do_lower_case,
max_seq_length,
max_predictions_per_seq,
max_mask,
insert_after_token,
special_glue_string_for_sources)
else:
raise ValueError('LaserTagger model cannot be trained with the Felix '
'codebase yet, set `FLAGS.use_open_vocab=True`')
return builder
| google-research/google-research | felix/preprocess.py | Python | apache-2.0 | 2,661 |
import os, re, shutil
for (base, _, files) in os.walk("essays",):
for f in files:
if f.endswith(".markdown"):
fp = os.path.join(base, f)
_, np = os.path.split(base)
np = re.sub(r"_def$", "", np)
np = os.path.join("essays", np+".markdown")
# print fp, "=>", np
# shutil.copy(fp, np)
cmd = 'git mv "{0}" "{1}"'.format(fp, np)
print cmd
os.system(cmd)
| DigitalPublishingToolkit/Society-of-the-Query-Reader | scripts/gather_essays.py | Python | gpl-3.0 | 470 |
from django.db import transaction
from evesde.models.locations import Station
from evesde.eveapi import get_api_connection
def import_conquerable_stations():
"""Import all conquerable stations and outposts from the EVE API"""
api = get_api_connection()
stations = Station.objects.all()
objs = []
for station in api.eve.ConquerableStationList().outposts:
print "Importing %s" % station.stationName
try:
obj = stations.get(pk=station.stationID)
except Station.DoesNotExist:
obj = Station(pk=station.stationID)
obj.name = station.stationName
obj.system_id = station.solarSystemID
obj.x = 0
obj.y = 0
obj.z = 0
objs.append(obj)
with transaction.atomic():
for obj in objs:
obj.save() | nikdoof/django-evesde | evesde/eveapi/eve.py | Python | bsd-3-clause | 823 |
"""Numeric integration of data coming from a source sensor over time."""
from decimal import Decimal, DecimalException
import logging
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
PLATFORM_SCHEMA,
STATE_CLASS_TOTAL,
SensorEntity,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
CONF_METHOD,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TIME_DAYS,
TIME_HOURS,
TIME_MINUTES,
TIME_SECONDS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_SOURCE_ID = "source"
CONF_SOURCE_SENSOR = "source"
CONF_ROUND_DIGITS = "round"
CONF_UNIT_PREFIX = "unit_prefix"
CONF_UNIT_TIME = "unit_time"
CONF_UNIT_OF_MEASUREMENT = "unit"
TRAPEZOIDAL_METHOD = "trapezoidal"
LEFT_METHOD = "left"
RIGHT_METHOD = "right"
INTEGRATION_METHOD = [TRAPEZOIDAL_METHOD, LEFT_METHOD, RIGHT_METHOD]
# SI Metric prefixes
UNIT_PREFIXES = {None: 1, "k": 10 ** 3, "M": 10 ** 6, "G": 10 ** 9, "T": 10 ** 12}
# SI Time prefixes
UNIT_TIME = {
TIME_SECONDS: 1,
TIME_MINUTES: 60,
TIME_HOURS: 60 * 60,
TIME_DAYS: 24 * 60 * 60,
}
ICON = "mdi:chart-histogram"
DEFAULT_ROUND = 3
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_UNIT_OF_MEASUREMENT),
PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_ROUND_DIGITS, default=DEFAULT_ROUND): vol.Coerce(int),
vol.Optional(CONF_UNIT_PREFIX, default=None): vol.In(UNIT_PREFIXES),
vol.Optional(CONF_UNIT_TIME, default=TIME_HOURS): vol.In(UNIT_TIME),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_METHOD, default=TRAPEZOIDAL_METHOD): vol.In(
INTEGRATION_METHOD
),
}
),
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the integration sensor."""
integral = IntegrationSensor(
config[CONF_SOURCE_SENSOR],
config.get(CONF_NAME),
config[CONF_ROUND_DIGITS],
config[CONF_UNIT_PREFIX],
config[CONF_UNIT_TIME],
config.get(CONF_UNIT_OF_MEASUREMENT),
config[CONF_METHOD],
)
async_add_entities([integral])
class IntegrationSensor(RestoreEntity, SensorEntity):
"""Representation of an integration sensor."""
def __init__(
self,
source_entity,
name,
round_digits,
unit_prefix,
unit_time,
unit_of_measurement,
integration_method,
):
"""Initialize the integration sensor."""
self._sensor_source_id = source_entity
self._round_digits = round_digits
self._state = None
self._method = integration_method
self._name = name if name is not None else f"{source_entity} integral"
self._unit_template = (
f"{'' if unit_prefix is None else unit_prefix}{{}}{unit_time}"
)
self._unit_of_measurement = unit_of_measurement
self._unit_prefix = UNIT_PREFIXES[unit_prefix]
self._unit_time = UNIT_TIME[unit_time]
self._attr_state_class = STATE_CLASS_TOTAL
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
if state := await self.async_get_last_state():
try:
self._state = Decimal(state.state)
except (DecimalException, ValueError) as err:
_LOGGER.warning("Could not restore last state: %s", err)
else:
self._attr_device_class = state.attributes.get(ATTR_DEVICE_CLASS)
if self._unit_of_measurement is None:
self._unit_of_measurement = state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
@callback
def calc_integration(event):
"""Handle the sensor state changes."""
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
if self._unit_of_measurement is None:
unit = new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._unit_of_measurement = self._unit_template.format(
"" if unit is None else unit
)
if (
self.device_class is None
and new_state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_POWER
):
self._attr_device_class = DEVICE_CLASS_ENERGY
if (
old_state is None
or new_state is None
or old_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE)
or new_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE)
):
return
try:
# integration as the Riemann integral of previous measures.
area = 0
elapsed_time = (
new_state.last_updated - old_state.last_updated
).total_seconds()
if self._method == TRAPEZOIDAL_METHOD:
area = (
(Decimal(new_state.state) + Decimal(old_state.state))
* Decimal(elapsed_time)
/ 2
)
elif self._method == LEFT_METHOD:
area = Decimal(old_state.state) * Decimal(elapsed_time)
elif self._method == RIGHT_METHOD:
area = Decimal(new_state.state) * Decimal(elapsed_time)
integral = area / (self._unit_prefix * self._unit_time)
assert isinstance(integral, Decimal)
except ValueError as err:
_LOGGER.warning("While calculating integration: %s", err)
except DecimalException as err:
_LOGGER.warning(
"Invalid state (%s > %s): %s", old_state.state, new_state.state, err
)
except AssertionError as err:
_LOGGER.error("Could not calculate integral: %s", err)
else:
if isinstance(self._state, Decimal):
self._state += integral
else:
self._state = integral
self.async_write_ha_state()
async_track_state_change_event(
self.hass, [self._sensor_source_id], calc_integration
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
if isinstance(self._state, Decimal):
return round(self._state, self._round_digits)
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_SOURCE_ID: self._sensor_source_id}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
| jawilson/home-assistant | homeassistant/components/integration/sensor.py | Python | apache-2.0 | 7,671 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses code from TLSLlite
# TLSLite Author: Trevor Perrin)
import binascii
from x509 import ASN1_Node
def a2b_base64(s):
try:
b = bytearray(binascii.a2b_base64(s))
except Exception as e:
raise SyntaxError("base64 error: %s" % e)
return b
def b2a_base64(b):
return binascii.b2a_base64(b)
def dePem(s, name):
"""Decode a PEM string into a bytearray of its payload.
The input must contain an appropriate PEM prefix and postfix
based on the input name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
The first such PEM block in the input will be found, and its
payload will be base64 decoded and returned.
"""
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
start = s.find(prefix)
if start == -1:
raise SyntaxError("Missing PEM prefix")
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN %s-----" % name) : end]
retBytes = a2b_base64(s) # May raise SyntaxError
return retBytes
def dePemList(s, name):
"""Decode a sequence of PEM blocks into a list of bytearrays.
The input must contain any number of PEM blocks, each with the appropriate
PEM prefix and postfix based on the input name string, e.g. for
name="TACK BREAK SIG". Arbitrary text can appear between and before and
after the PEM blocks. For example:
" Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:10Z -----BEGIN TACK
BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6Ap0Fgd9SSTOECeAKOUAym8zcYaXUwpk0+WuPYa7Zmm
SkbOlK4ywqt+amhWbg9txSGUwFO5tWUHT3QrnRlE/e3PeNFXLx5Bckg= -----END TACK
BREAK SIG----- Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:11Z
-----BEGIN TACK BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6BVCWfcjN36lx6JwxmZQncS6sww7DecFO/qjSePCxwM
+kdDqX/9/183nmjx6bf0ewhPXkA0nVXsDYZaydN8rJU1GaMlnjcIYxY= -----END TACK
BREAK SIG----- "
All such PEM blocks will be found, decoded, and return in an ordered list
of bytearrays, which may have zero elements if not PEM blocks are found.
"""
bList = []
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
while 1:
start = s.find(prefix)
if start == -1:
return bList
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s2 = s[start+len(prefix) : end]
retBytes = a2b_base64(s2) # May raise SyntaxError
bList.append(retBytes)
s = s[end+len(postfix) : ]
def pem(b, name):
"""Encode a payload bytearray into a PEM string.
The input will be base64 encoded, then wrapped in a PEM prefix/postfix
based on the name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
"""
s1 = b2a_base64(b)[:-1] # remove terminating \n
s2 = ""
while s1:
s2 += s1[:64] + "\n"
s1 = s1[64:]
s = ("-----BEGIN %s-----\n" % name) + s2 + \
("-----END %s-----\n" % name)
return s
def pemSniff(inStr, name):
searchStr = "-----BEGIN %s-----" % name
return searchStr in inStr
def parse_private_key(s):
"""Parse a string containing a PEM-encoded <privateKey>."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return _parsePKCS8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return _parseSSLeay(bytes)
else:
raise SyntaxError("Not a PEM private key file")
def _parsePKCS8(bytes):
s = ASN1_Node(str(bytes))
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID_node = s.next_node(version_node)
ii = s.first_child(rsaOID_node)
rsaOID = decode_OID(s.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
if rsaOID != '1.2.840.113549.1.1.1':
raise SyntaxError("Unrecognized AlgorithmIdentifier")
privkey_node = s.next_node(rsaOID_node)
value = s.get_value_of_type(privkey_node, 'OCTET STRING')
return _parseASN1PrivateKey(value)
def _parseSSLeay(bytes):
return _parseASN1PrivateKey(ASN1_Node(str(bytes)))
def bytesToNumber(s):
return int(binascii.hexlify(s), 16)
def _parseASN1PrivateKey(s):
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = s.next_node(version_node)
e = s.next_node(n)
d = s.next_node(e)
p = s.next_node(d)
q = s.next_node(p)
dP = s.next_node(q)
dQ = s.next_node(dP)
qInv = s.next_node(dQ)
return map(lambda x: bytesToNumber(s.get_value_of_type(x, 'INTEGER')), [n, e, d, p, q, dP, dQ, qInv])
| cryptapus/electrum-uno | lib/pem.py | Python | mit | 6,584 |
from pymongo import MongoClient
from helpers import db_url, db_database, parse_csv, to_int, to_float
mongo = MongoClient(db_url)
db = mongo[db_database]
print("Updating school information from 2015 data...")
for school in parse_csv("../data/2015/ks5_attainment.csv"):
# All schools are RECTYPE=1. Other RECTYPEs are used for local averages.
# Closed schools are ICLOSE=1. We skip them too.
if (school["RECTYPE"] != "1") or (school["ICLOSE"] == "1"):
continue
db.schools.update(
{ "_id": school["URN"] },
{ "$set": {
"lea": to_int(school["LEA"]),
"name": school["SCHNAME"],
"address": [school["ADDRESS1"], school["ADDRESS2"], school["ADDRESS3"]],
"town": school["TOWN"],
"postcode": school["PCODE"],
"phone": school["TELNUM"],
"type": school["NFTYPE"],
"religious": school["RELDENOM"],
"admissions": school["ADMPOL"],
"gender": school["GENDER1618"].capitalize(),
"ages": school["AGERANGE"],
"performance.2015.students": {
"16-18": to_int(school["TPUP1618"], True),
"ks5": to_int(school["TALLPUPA"]),
"academic": to_int(school["TALLPUP_ACADA"]),
"vocational": to_int(school["TALLPUP_VQA"]),
"a-level": to_int(school["TALLPUP_ALEVA"])
},
"performance.2015.aps.a-level": {
"student": to_float(school["APSFTE_ALEVA"], True),
"entry": to_float(school["TALLPPE_ALEVA"], True)
}
}
},
upsert=True
)
if (school["TPUP1618"] == "NEW"):
db.schools.update({"_id": school["URN"]}, {"$set": {"new": True}})
mongo.close()
print("\nDone.")
| danielgavrilov/schools | db/insert_schools.py | Python | mit | 1,894 |
class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications.
"""
def __new__(cls, *args, **kwargs):
instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
super(OrderedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(OrderedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(OrderedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(OrderedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(OrderedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(OrderedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(OrderedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Return the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Insert the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value)
def copy(self):
"""Return a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replace the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(OrderedDict, self).clear()
self.keyOrder = []
def index(self, key):
""" Return the index of a given key. """
return self.keyOrder.index(key)
def index_for_location(self, location):
""" Return index or None for a given location. """
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i
def add(self, key, value, location):
""" Insert by key location. """
i = self.index_for_location(location)
if i is not None:
self.insert(i, key, value)
else:
self.__setitem__(key, value)
def link(self, key, location):
""" Change location of an existing item. """
n = self.keyOrder.index(key)
del self.keyOrder[n]
i = self.index_for_location(location)
try:
if i is not None:
self.keyOrder.insert(i, key)
else:
self.keyOrder.append(key)
except Error:
# restore to prevent data loss and reraise
self.keyOrder.insert(n, key)
raise Error
| sorenh/cc | vendor/tornado/website/markdown/odict.py | Python | apache-2.0 | 5,157 |
# -*- coding: UTF-8 -*-
from django.core.management.base import BaseCommand, CommandError
from p3 import models
from assopy import utils
import time
class Command(BaseCommand):
"""
"""
def handle(self, *args, **options):
try:
email = args[0]
except IndexError:
qs = models.P3Profile.objects\
.filter(country='')\
.exclude(profile__location='')\
.select_related('profile__user')
else:
qs = models.P3Profile.objects\
.filter(profile__user__email=email)\
.select_related('profile__user')
for p in qs:
c = utils.geocode_country(p.profile.location)
print p.profile.user.email, '-', p.profile.location, '->', c
p.country = c
p.save()
time.sleep(1)
| pythonitalia/pycon_site | p3/management/commands/update_attendee_country.py | Python | bsd-2-clause | 873 |
foo = 0 | asedunov/intellij-community | python/testData/inspections/PyUnresolvedReferencesInspection/OneUnsedOneMarked/library.py | Python | apache-2.0 | 7 |
from django.core import exceptions
from olympia.amo.fields import HttpHttpsOnlyURLField
from olympia.amo.tests import TestCase
class HttpHttpsOnlyURLFieldTestCase(TestCase):
def setUp(self):
super(HttpHttpsOnlyURLFieldTestCase, self).setUp()
self.field = HttpHttpsOnlyURLField()
def test_invalid_scheme_validation_error(self):
with self.assertRaises(exceptions.ValidationError):
self.field.clean(u'javascript://foo.com/')
def test_invalid_ftp_scheme_validation_error(self):
with self.assertRaises(exceptions.ValidationError):
self.field.clean(u'ftp://foo.com/')
def test_invalid_ftps_scheme_validation_error(self):
with self.assertRaises(exceptions.ValidationError):
self.field.clean(u'ftps://foo.com/')
def test_no_scheme_assumes_http(self):
assert self.field.clean(u'foo.com') == 'http://foo.com'
def test_http_scheme(self):
assert self.field.clean(u'http://foo.com/') == u'http://foo.com/'
def test_https_scheme(self):
assert self.field.clean(u'https://foo.com/') == u'https://foo.com/'
def test_catches_invalid_url(self):
# https://github.com/mozilla/addons-server/issues/1452
with self.assertRaises(exceptions.ValidationError):
assert self.field.clean(u'https://test.[com')
| harikishen/addons-server | src/olympia/amo/tests/test_fields.py | Python | bsd-3-clause | 1,350 |
from treeherder.config.settings import *
DATABASES["default"]["TEST"] = {"NAME": "test_treeherder"}
TREEHERDER_TEST_PROJECT = "%s_jobs" % DATABASES["default"]["TEST"]["NAME"]
# this makes celery calls synchronous, useful for unit testing
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
# Reconfigure pulse to operate on default vhost of rabbitmq
PULSE_URI = BROKER_URL
PULSE_EXCHANGE_NAMESPACE = 'test'
# Set a fake api key for testing bug filing
BZ_API_KEY = "12345helloworld"
BZ_API_URL = "https://thisisnotbugzilla.org"
# ELASTIC SEARCH
# Prefix indices used in tests to avoid clobbering data
ELASTIC_SEARCH.update({
"index_prefix": "test",
})
AUTOCLASSIFY_JOBS = True
| akhileshpillai/treeherder | tests/settings.py | Python | mpl-2.0 | 703 |
#!/usr/bin/env python
"""
Python script to convert a list of active and passive residues into
ambiguous interaction restraints for HADDOCK
"""
def active_passive_to_ambig(active1, passive1, active2, passive2, segid1='A', segid2='B'):
"""Convert active and passive residues to Ambiguous Interaction Restraints
Parameters
----------
active1 : list
List of active residue numbers of the first segid
passive1 : list
List of passive residue numbers of the first segid
passive2 : list
List of passive residue numbers of the second segid
active2 : list
List of active residue numbers of the second segid
active2 : list
List of passive residue numbers of the second segid
segid1 : string
Segid to use for the first model
segid2 : string
Segid to use for the second model
"""
all1 = active1 + passive1
all2 = active2 + passive2
for resi1 in active1:
print('assign (resi {:d} and segid {:s})'.format(resi1, segid1))
print('(')
c = 0
for resi2 in all2:
print(' (resi {:d} and segid {:s})'.format(resi2, segid2))
c += 1
if c != len(all2):
print(' or')
print(') 2.0 2.0 0.0\n')
for resi2 in active2:
print('assign (resi {:d} and segid {:s})'.format(resi2, segid2))
print('(\n')
c = 0
for resi1 in all1:
print(' (resi {:d} and segid {:s})'.format(resi1, segid1))
c += 1
if c != len(all1):
print(' or\n')
print(') 2.0 2.0 0.0\n')
def main():
import sys
if len(sys.argv) != 3:
print('\nUsage:\n python active-passive_to_ambig.py <active-passive-file1> <active-passive-file2>\n\n' +
'where <active-passive-file> is a file consisting of two space-delimited lines with\n' +
'the first line active residues numbers and the second line passive residue numbers\n')
sys.exit()
active1, passive1 = [[int(x) for x in line.split()] for line in open(sys.argv[1])]
active2, passive2 = [[int(x) for x in line.split()] for line in open(sys.argv[2])]
active_passive_to_ambig(active1, passive1, active2, passive2)
if __name__ == '__main__':
main()
| haddocking/haddock-tools | active-passive-to-ambig.py | Python | apache-2.0 | 2,352 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
import sys
from pyscf.nao.m_c2r import c2r_c
from pyscf.nao.m_ao_matelem import ao_matelem_c
from pyscf.nao.m_pack2den import ij2pack_u
from scipy.linalg import eigh
from timeit import default_timer as timer
#
#
#
class local_vertex_c(ao_matelem_c):
''' Constructor of the local product functions and the product vertex coefficients. '''
def __init__(self, ao_log):
ao_matelem_c.__init__(self, ao_log.rr, ao_log.pp)
self.init_one_set(ao_log) # @classmethod ???
self.dkappa_pp = 4*np.pi*np.log( self.kk[1]/self.kk[0])*self.kk
self.c2r_c = c2r_c(2*self.jmx) # local vertex c2r[:,:] coefficients
#
def get_local_vertex(self, sp):
from pyscf.nao.m_thrj import thrj
"""
Constructor of vertex for a given specie
Args:
sp : specie number
Result:
Dictionary with the product functions, vertex coefficients and eigenvalues
in each angular-momentum "sektor"
dominant products functions: j2xff
dominant vertex coefficients (angular part of): j2xww
eigenvalues of Coulomb metric : j2eva
"""
assert(sp>-1)
mu2s = self.ao1.sp_mu2s[sp]
mu2j = self.ao1.sp_mu2j[sp]
info = self.ao1.sp2info[sp]
mu2ff = self.ao1.psi_log[sp]
no = self.ao1.sp2norbs[sp]
nmu = self.ao1.sp2nmult[sp]
jmx_sp = np.amax(mu2j)
j2nf=np.zeros((2*jmx_sp+1), dtype=int) # count number of radial functions products per angular momentum
for mu1,j1,s1,f1 in info:
for mu2,j2,s2,f2 in info:
if mu2<mu1: continue
for j in range(abs(j1-j2),j1+j2+1,2):
j2nf[j] = j2nf[j] + 1
j_p2mus = [ [p for p in range(j2nf[j]) ] for j in range(2*jmx_sp+1)]
j_p2js = [ [p for p in range(j2nf[j]) ] for j in range(2*jmx_sp+1)]
j2p = np.zeros((2*jmx_sp+1), dtype=int)
for mu1,j1,s1,f1 in info:
for mu2,j2,s2,f2 in info:
if mu2<mu1: continue
for j in range(abs(j1-j2),j1+j2+1,2):
j_p2mus[j][j2p[j]] = [mu1,mu2]
j_p2js[j][j2p[j]] = [j1,j2]
j2p[j]+=1
pack2ff = np.zeros((nmu*(nmu+1)//2,self.nr)) # storage for original products
for mu2 in range(nmu):
for mu1 in range(mu2+1): pack2ff[ij2pack_u(mu1,mu2),:] = mu2ff[mu1,:]*mu2ff[mu2,:]
j2xff = [] # Storage for dominant product's functions (list of numpy arrays: x*f(r)*f(r))
j2xww = [] # Storage for dominant product's vertex (angular part of: x*wigner*wigner)
j2eva = [] # Storage for eigenvalues in each angular momentum "sector"
t1 = 0
tstart = timer()
for j,dim in enumerate(j2nf): # Metrik ist dim * dim in diesem Sektor
lev2ff = np.zeros((dim,self.nr))
for lev in range(dim): lev2ff[lev,:] = self.sbt(pack2ff[ ij2pack_u( *j_p2mus[j][lev] ),:], j, 1)
metric = np.zeros((dim,dim))
for lev_1 in range(dim):
for lev_2 in range(lev_1+1):
metric[lev_2,lev_1]=metric[lev_1,lev_2]=(lev2ff[lev_1,:]*lev2ff[lev_2,:]*self.dkappa_pp).sum() # Coulomb Metrik enthaelt Faktor 1/p**2
eva,x=eigh(metric)
j2eva.append(eva)
xff = np.zeros((dim,self.nr)) #!!!! Jetzt dominante Orbitale bilden
for domi in range(dim):
for n in range(dim):
xff[domi,:] = xff[domi,:] + x[n,domi]*pack2ff[ij2pack_u(*j_p2mus[j][n]),:]
j2xff.append(xff)
kinematical_vertex = np.zeros((dim, 2*j+1, no, no)) # Build expansion coefficients V^ab_mu defined by f^a(r) f^b(r) = V^ab_mu F^mu(r)
for num,[[mu1,mu2], [j1,j2]] in enumerate(zip(j_p2mus[j],j_p2js[j])):
if j<abs(j1-j2) or j>j1+j2 : continue
for m1,o1 in zip(range(-j1,j1+1), range(mu2s[mu1],mu2s[mu1+1])):
for m2,o2 in zip(range(-j2,j2+1), range(mu2s[mu2],mu2s[mu2+1])):
m=m1+m2
if abs(m)>j: continue
i3y=self.get_gaunt(j1,m1,j2,m2)*(-1.0)**m
kinematical_vertex[num,j+m,o2,o1] = kinematical_vertex[num,j+m,o1,o2] = i3y[j-abs(j1-j2)]
xww = np.zeros((dim, 2*j+1, no, no))
for domi in range(dim):
xww0 = np.einsum('n,nmab->mab', x[:,domi], kinematical_vertex[:,:,:,:])
xww[domi,:,:,:] = self.c2r_c.c2r_moo(j, xww0, info)
j2xww.append(xww)
#tfinish = timer()
#print(tfinish-tstart, t1)
return {"j2xww": j2xww, "j2xff": j2xff, "j2eva": j2eva}
| gkc1000/pyscf | pyscf/nao/m_local_vertex.py | Python | apache-2.0 | 4,993 |
#python
import k3d
k3d.check_node_environment(context, "MeshSourceScript")
# Perform required one-time setup to store geometric points in the mesh ...
points = context.output.create_points()
point_selection = context.output.create_point_selection()
# Perform required one-time setup to store cubic curves in the mesh ...
curves = k3d.cubic_curve.create(context.output)
# Create an array to store constant curve widths ...
constantwidth = curves.constant_attributes().create("constantwidth", "k3d::double_t")
# Create an array to store per-curve curve colors ...
Cs = curves.curve_attributes().create("Cs", "k3d::color")
# Add some curves ...
curves.periodic().append(False)
curves.material().append(None)
constantwidth.append(0.5)
for j in range(5):
curves.curve_first_points().append(len(curves.curve_points()))
curves.curve_point_counts().append(7)
curves.curve_selections().append(0.0)
curves.curve_points().append(len(points) + 0)
curves.curve_points().append(len(points) + 1)
curves.curve_points().append(len(points) + 2)
curves.curve_points().append(len(points) + 3)
curves.curve_points().append(len(points) + 4)
curves.curve_points().append(len(points) + 5)
curves.curve_points().append(len(points) + 6)
positions = [(0, 0, 5), (-5, 0, 5), (-5, 0, 0), (0, 0, 0), (5, 0, 0), (5, 0, -5), (0, 0, -5)]
for position in positions:
points.append(k3d.point3(position[0] + (j * 5), position[1], position[2]))
point_selection.append(0.0)
Cs.append(k3d.color(1, 1, j * 0.2))
| barche/k3d | share/k3d/scripts/MeshSourceScript/cubic_curves.py | Python | gpl-2.0 | 1,499 |
# This file is protected via CODEOWNERS
__version__ = "1.26.2"
| prrvchr/GContactOOo | uno/lib/python/urllib3/_version.py | Python | gpl-3.0 | 63 |
from Parser import Parser
from urllib import quote_plus
from HTMLParser import HTMLParser
import xml.etree.ElementTree as ET
import os
import re
class PONSParser(Parser):
def __init__(self):
self.langKeys = {}
self.sourceTargetPairs = {}
self.exceptSpans = "^genus$|^style$|^case$|^rhetoric$|^region$|^number$|^topic$|^category$|^perf$|^target$|^info$"
self.parseLangXML()
def createUrl(self, searchTerm, sourceLang, targetLang):
search = quote_plus(str(searchTerm))
src = quote_plus(sourceLang)
combo = quote_plus(self.sourceTargetPairs[sourceLang][targetLang])
return "http://en.pons.com/translate?q=" + search + "&l=" + combo + "&in=" + src + "&lf=" + src
def getTranslation(self, searchTerm, sourceLang, targetLang, loadGrammarInfos):
doc = self.getSoup(searchTerm, sourceLang, targetLang)
if doc is None:
return
translations = []
hp = HTMLParser()
if not loadGrammarInfos:
[s.extract() for s in doc.findAll("span", {"class" : re.compile(self.exceptSpans)})]
sources = doc.findAll("div", {"class" : re.compile("^source$")})
targets = doc.findAll("div", {"class" : re.compile("^target( rtl)?$")})
for i in range(len(sources)):
source = hp.unescape("".join(sources[i].findAll(text=True)).strip())
target = hp.unescape("".join(targets[i].findAll(text=True)).strip())
translations.append([source, target])
return translations
def getSourceLanguages(self):
return self.langKeys
def getTargetLanguages(self, sourceLanguage):
targetLangs = {}
for code, codePair in self.sourceTargetPairs[sourceLanguage].iteritems():
targetLangs[code] = self.langKeys[code]
return targetLangs
def parseLangXML(self):
tree = ET.parse(os.path.join(os.path.dirname(__file__), "pons_lang_codes.xml"))
root = tree.getroot()
for language in root:
lang = ""
code = ""
targets = {}
for languageAttribute in language:
if languageAttribute.tag == "name":
lang = languageAttribute.text
elif languageAttribute.tag == "code":
code = languageAttribute.text
elif languageAttribute.tag == "targets":
targets = self.getTargets(languageAttribute)
self.langKeys[code] = lang
self.sourceTargetPairs[code] = targets
def getTargets(self, targets):
targetsArr = {}
for target in targets:
langCode = ""
combCode = ""
for targetAttr in target:
if targetAttr.tag == "lang":
langCode = targetAttr.text
elif targetAttr.tag == "code":
combCode = targetAttr.text
targetsArr[langCode] = combCode
return targetsArr
def getLangCode(self, language):
for code, name in self.langKeys.iteritems():
if name == language:
return code
| jannewulf/Anki-Translator | TranslatorAddon/Parser/PONSParser.py | Python | gpl-3.0 | 3,147 |
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import sys, os, time, socket, traceback
from functools import partial
from PyQt4.Qt import (QCoreApplication, QIcon, QObject, QTimer,
QPixmap, QSplashScreen, QApplication)
from calibre import prints, plugins, force_unicode
from calibre.constants import (iswindows, __appname__, isosx, DEBUG, islinux,
filesystem_encoding, get_portable_base)
from calibre.utils.ipc import gui_socket_address, RC
from calibre.gui2 import (ORG_NAME, APP_UID, initialize_file_icon_provider,
Application, choose_dir, error_dialog, question_dialog, gprefs)
from calibre.gui2.main_window import option_parser as _option_parser
from calibre.utils.config import prefs, dynamic
from calibre.library.database2 import LibraryDatabase2
from calibre.library.sqlite import sqlite, DatabaseException
if iswindows:
winutil = plugins['winutil'][0]
class AbortInit(Exception):
pass
def option_parser():
parser = _option_parser('''\
%prog [opts] [path_to_ebook]
Launch the main calibre Graphical User Interface and optionally add the ebook at
path_to_ebook to the database.
''')
parser.add_option('--with-library', default=None, action='store',
help=_('Use the library located at the specified path.'))
parser.add_option('--start-in-tray', default=False, action='store_true',
help=_('Start minimized to system tray.'))
parser.add_option('-v', '--verbose', default=0, action='count',
help=_('Log debugging information to console'))
parser.add_option('--no-update-check', default=False, action='store_true',
help=_('Do not check for updates'))
parser.add_option('--ignore-plugins', default=False, action='store_true',
help=_('Ignore custom plugins, useful if you installed a plugin'
' that is preventing calibre from starting'))
parser.add_option('-s', '--shutdown-running-calibre', default=False,
action='store_true',
help=_('Cause a running calibre instance, if any, to be'
' shutdown. Note that if there are running jobs, they '
'will be silently aborted, so use with care.'))
return parser
def find_portable_library():
base = get_portable_base()
if base is None: return
import glob
candidates = [os.path.basename(os.path.dirname(x)) for x in glob.glob(
os.path.join(base, u'*%smetadata.db'%os.sep))]
if not candidates:
candidates = [u'Calibre Library']
lp = prefs['library_path']
if not lp:
lib = os.path.join(base, candidates[0])
else:
lib = None
q = os.path.basename(lp)
for c in candidates:
c = c
if c.lower() == q.lower():
lib = os.path.join(base, c)
break
if lib is None:
lib = os.path.join(base, candidates[0])
if len(lib) > 74:
error_dialog(None, _('Path too long'),
_("Path to Calibre Portable (%s) "
'too long. Must be less than 59 characters.')%base, show=True)
raise AbortInit()
prefs.set('library_path', lib)
if not os.path.exists(lib):
os.mkdir(lib)
def init_qt(args):
from calibre.gui2.ui import Main
parser = option_parser()
opts, args = parser.parse_args(args)
find_portable_library()
if opts.with_library is not None:
if not os.path.exists(opts.with_library):
os.makedirs(opts.with_library)
if os.path.isdir(opts.with_library):
prefs.set('library_path', os.path.abspath(opts.with_library))
prints('Using library at', prefs['library_path'])
QCoreApplication.setOrganizationName(ORG_NAME)
QCoreApplication.setApplicationName(APP_UID)
override = 'calibre-gui' if islinux else None
app = Application(args, override_program_name=override)
actions = tuple(Main.create_application_menubar())
app.setWindowIcon(QIcon(I('lt.png')))
return app, opts, args, actions
def get_default_library_path():
fname = _('Calibre Library')
if iswindows:
fname = 'Calibre Library'
if isinstance(fname, unicode):
try:
fname = fname.encode(filesystem_encoding)
except:
fname = 'Calibre Library'
x = os.path.expanduser('~'+os.sep+fname)
if not os.path.exists(x):
try:
os.makedirs(x)
except:
x = os.path.expanduser('~')
return x
def get_library_path(parent=None):
library_path = prefs['library_path']
if library_path is None: # Need to migrate to new database layout
base = os.path.expanduser('~')
if iswindows:
base = winutil.special_folder_path(winutil.CSIDL_PERSONAL)
if not base or not os.path.exists(base):
from PyQt4.Qt import QDir
base = unicode(QDir.homePath()).replace('/', os.sep)
candidate = choose_dir(None, 'choose calibre library',
_('Choose a location for your calibre e-book library'),
default_dir=base)
if not candidate:
candidate = os.path.join(base, 'Calibre Library')
library_path = os.path.abspath(candidate)
if not os.path.exists(library_path):
try:
os.makedirs(library_path)
except:
error_dialog(parent, _('Failed to create library'),
_('Failed to create calibre library at: %r.')%library_path,
det_msg=traceback.format_exc(), show=True)
library_path = choose_dir(parent, 'choose calibre library',
_('Choose a location for your new calibre e-book library'),
default_dir=get_default_library_path())
return library_path
def repair_library(library_path):
from calibre.gui2.dialogs.restore_library import repair_library_at
return repair_library_at(library_path)
class GuiRunner(QObject):
'''Make sure an event loop is running before starting the main work of
initialization'''
def __init__(self, opts, args, actions, listener, app, gui_debug=None):
self.startup_time = time.time()
self.opts, self.args, self.listener, self.app = opts, args, listener, app
self.gui_debug = gui_debug
self.actions = actions
self.main = None
QObject.__init__(self)
self.splash_screen = None
self.timer = QTimer.singleShot(1, self.initialize)
if DEBUG:
prints('Starting up...')
def start_gui(self, db):
from calibre.gui2.ui import Main
main = Main(self.opts, gui_debug=self.gui_debug)
if self.splash_screen is not None:
self.splash_screen.showMessage(_('Initializing user interface...'))
with gprefs: # Only write gui.json after initialization is complete
main.initialize(self.library_path, db, self.listener, self.actions)
if self.splash_screen is not None:
self.splash_screen.finish(main)
if DEBUG:
prints('Started up in %.2f seconds'%(time.time() -
self.startup_time), 'with', len(db.data), 'books')
add_filesystem_book = partial(main.iactions['Add Books'].add_filesystem_book, allow_device=False)
sys.excepthook = main.unhandled_exception
if len(self.args) > 1:
files = [os.path.abspath(p) for p in self.args[1:] if not
os.path.isdir(p)]
if len(files) < len(sys.argv[1:]):
prints('Ignoring directories passed as command line arguments')
if files:
add_filesystem_book(files)
self.app.file_event_hook = add_filesystem_book
self.main = main
def initialization_failed(self):
print 'Catastrophic failure initializing GUI, bailing out...'
QCoreApplication.exit(1)
raise SystemExit(1)
def initialize_db_stage2(self, db, tb):
if db is None and tb is not None:
# DB Repair failed
error_dialog(self.splash_screen, _('Repairing failed'),
_('The database repair failed. Starting with '
'a new empty library.'),
det_msg=tb, show=True)
if db is None:
candidate = choose_dir(self.splash_screen, 'choose calibre library',
_('Choose a location for your new calibre e-book library'),
default_dir=get_default_library_path())
if not candidate:
self.initialization_failed()
try:
self.library_path = candidate
db = LibraryDatabase2(candidate)
except:
error_dialog(self.splash_screen, _('Bad database location'),
_('Bad database location %r. calibre will now quit.'
)%self.library_path,
det_msg=traceback.format_exc(), show=True)
self.initialization_failed()
self.start_gui(db)
def initialize_db(self):
db = None
try:
db = LibraryDatabase2(self.library_path)
except (sqlite.Error, DatabaseException):
repair = question_dialog(self.splash_screen, _('Corrupted database'),
_('The library database at %s appears to be corrupted. Do '
'you want calibre to try and rebuild it automatically? '
'The rebuild may not be completely successful. '
'If you say No, a new empty calibre library will be created.')
% force_unicode(self.library_path, filesystem_encoding),
det_msg=traceback.format_exc()
)
if repair:
if repair_library(self.library_path):
db = LibraryDatabase2(self.library_path)
except:
error_dialog(self.splash_screen, _('Bad database location'),
_('Bad database location %r. Will start with '
' a new, empty calibre library')%self.library_path,
det_msg=traceback.format_exc(), show=True)
self.initialize_db_stage2(db, None)
def show_splash_screen(self):
self.splash_pixmap = QPixmap()
self.splash_pixmap.load(I('library.png'))
self.splash_screen = QSplashScreen(self.splash_pixmap)
self.splash_screen.showMessage(_('Starting %s: Loading books...') %
__appname__)
self.splash_screen.show()
QApplication.instance().processEvents()
def initialize(self, *args):
if gprefs['show_splash_screen']:
self.show_splash_screen()
self.library_path = get_library_path(parent=self.splash_screen)
if not self.library_path:
self.initialization_failed()
self.initialize_db()
def run_in_debug_mode(logpath=None):
e = sys.executable if getattr(sys, 'frozen', False) else sys.argv[0]
import tempfile, subprocess
fd, logpath = tempfile.mkstemp('.txt')
os.close(fd)
if hasattr(sys, 'frameworks_dir'):
base = os.path.dirname(sys.frameworks_dir)
if 'console.app' not in base:
base = os.path.join(base, 'console.app', 'Contents')
exe = os.path.basename(e)
exe = os.path.join(base, 'MacOS', exe+'-debug')
else:
base, ext = os.path.splitext(e)
exe = base + '-debug' + ext
print 'Starting debug executable:', exe
creationflags = 0
if iswindows:
import win32process
creationflags = win32process.CREATE_NO_WINDOW
subprocess.Popen([exe, '--gui-debug', logpath], stdout=open(logpath, 'w'),
stderr=subprocess.STDOUT, stdin=open(os.devnull, 'r'),
creationflags=creationflags)
def run_gui(opts, args, actions, listener, app, gui_debug=None):
initialize_file_icon_provider()
if not dynamic.get('welcome_wizard_was_run', False):
from calibre.gui2.wizard import wizard
wizard().exec_()
dynamic.set('welcome_wizard_was_run', True)
runner = GuiRunner(opts, args, actions, listener, app, gui_debug=gui_debug)
ret = app.exec_()
if getattr(runner.main, 'run_wizard_b4_shutdown', False):
from calibre.gui2.wizard import wizard
wizard().exec_()
if getattr(runner.main, 'restart_after_quit', False):
e = sys.executable if getattr(sys, 'frozen', False) else sys.argv[0]
if getattr(runner.main, 'debug_on_restart', False):
run_in_debug_mode()
else:
import subprocess
print 'Restarting with:', e, sys.argv
if hasattr(sys, 'frameworks_dir'):
app = os.path.dirname(os.path.dirname(sys.frameworks_dir))
subprocess.Popen('sleep 3s; open '+app, shell=True)
else:
subprocess.Popen([e] + sys.argv[1:])
else:
if iswindows:
try:
runner.main.system_tray_icon.hide()
except:
pass
if getattr(runner.main, 'gui_debug', None) is not None:
e = sys.executable if getattr(sys, 'frozen', False) else sys.argv[0]
import subprocess
creationflags = 0
if iswindows:
import win32process
creationflags = win32process.CREATE_NO_WINDOW
subprocess.Popen([e, '--show-gui-debug', runner.main.gui_debug],
creationflags=creationflags, stdout=open(os.devnull, 'w'),
stderr=subprocess.PIPE, stdin=open(os.devnull, 'r'))
return ret
def cant_start(msg=_('If you are sure it is not running')+', ',
what=None):
base = '<p>%s</p><p>%s %s'
where = __appname__ + ' '+_('may be running in the system tray, in the')+' '
if isosx:
where += _('upper right region of the screen.')
else:
where += _('lower right region of the screen.')
if what is None:
if iswindows:
what = _('try rebooting your computer.')
else:
what = _('try deleting the file')+': '+ gui_socket_address()
info = base%(where, msg, what)
error_dialog(None, _('Cannot Start ')+__appname__,
'<p>'+(_('%s is already running.')%__appname__)+'</p>'+info, show=True)
raise SystemExit(1)
def build_pipe(print_error=True):
t = RC(print_error=print_error)
t.start()
t.join(3.0)
if t.is_alive():
if iswindows():
cant_start()
else:
f = os.path.expanduser('~/.calibre_calibre GUI.lock')
cant_start(what=_('try deleting the file')+': '+f)
raise SystemExit(1)
return t
def shutdown_other(rc=None):
if rc is None:
rc = build_pipe(print_error=False)
if rc.conn is None:
prints(_('No running calibre found'))
return # No running instance found
from calibre.utils.lock import singleinstance
rc.conn.send('shutdown:')
prints(_('Shutdown command sent, waiting for shutdown...'))
for i in xrange(50):
if singleinstance('calibre GUI'):
return
time.sleep(0.1)
prints(_('Failed to shutdown running calibre instance'))
raise SystemExit(1)
def communicate(opts, args):
t = build_pipe()
if opts.shutdown_running_calibre:
shutdown_other(t)
else:
if len(args) > 1:
args[1] = os.path.abspath(args[1])
t.conn.send('launched:'+repr(args))
t.conn.close()
raise SystemExit(0)
def main(args=sys.argv):
gui_debug = None
if args[0] == '__CALIBRE_GUI_DEBUG__':
gui_debug = args[1]
args = ['calibre']
try:
app, opts, args, actions = init_qt(args)
except AbortInit:
return 1
from calibre.utils.lock import singleinstance
from multiprocessing.connection import Listener
si = singleinstance('calibre GUI')
if si and opts.shutdown_running_calibre:
return 0
if si:
try:
listener = Listener(address=gui_socket_address())
except socket.error:
if iswindows:
cant_start()
if os.path.exists(gui_socket_address()):
os.remove(gui_socket_address())
try:
listener = Listener(address=gui_socket_address())
except socket.error:
cant_start()
else:
return run_gui(opts, args, actions, listener, app,
gui_debug=gui_debug)
else:
return run_gui(opts, args, actions, listener, app,
gui_debug=gui_debug)
otherinstance = False
try:
listener = Listener(address=gui_socket_address())
except socket.error: # Good si is correct (on UNIX)
otherinstance = True
else:
# On windows only singleinstance can be trusted
otherinstance = True if iswindows else False
if not otherinstance and not opts.shutdown_running_calibre:
return run_gui(opts, args, actions, listener, app, gui_debug=gui_debug)
communicate(opts, args)
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except Exception as err:
if not iswindows: raise
tb = traceback.format_exc()
from PyQt4.QtGui import QErrorMessage
logfile = os.path.join(os.path.expanduser('~'), 'calibre.log')
if os.path.exists(logfile):
log = open(logfile).read().decode('utf-8', 'ignore')
d = QErrorMessage()
d.showMessage(('<b>Error:</b>%s<br><b>Traceback:</b><br>'
'%s<b>Log:</b><br>%s')%(unicode(err),
unicode(tb).replace('\n', '<br>'),
log.replace('\n', '<br>')))
| yeyanchao/calibre | src/calibre/gui2/main.py | Python | gpl-3.0 | 17,746 |
#
# Copyright (C) 2009, 2010 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
# pylint: disable=R0903, R0913
"""Graph representation and manipulation."""
import logging
from math import sqrt
from nav.web.geomap.utils import (map_dict, nansafe_max, identity, first,
group, avg, filter_dict, subdict,
map_dict_lazy)
_logger = logging.getLogger('nav.web.geomap.graph')
# Specifications of how to combine the properties when combining nodes
# and edges:
AGGREGATE_PROPERTIES_PLACE = {
'load': (nansafe_max, 'load'),
'num_rooms': len,
'num_netboxes': (sum, 'num_netboxes'),
'rooms': identity
}
AGGREGATE_PROPERTIES_ROOM = {
'id': (first, 'roomid'),
'descr': (first, 'room_descr'),
'load': (nansafe_max, 'load'),
'num_netboxes': len,
'netboxes': identity
}
AGGREGATE_PROPERTIES_EDGE = {
'id': lambda edges: 'ce[%s]' % combine_ids(edges, lambda e: e['id']),
'num_edges': len,
'capacity': (sum, 'capacity'),
'load_in': (sum, 'load_in'),
'load_out': (sum, 'load_out'),
'subedges': identity
}
def build_graph(db_results):
"""Make a Graph object based on the dictionaries resulting from get_data.
"""
(netboxes, connections) = db_results
graph = Graph()
# create Node objects:
for netbox in netboxes:
graph.add_node(Node(netbox['netboxid'], netbox['lon'], netbox['lat'],
netbox))
# create Edge objects:
for connection in connections.values():
if (not connection['forward']['local_netboxid'] in graph.nodes or
not connection['reverse']['local_netboxid'] in graph.nodes):
continue
graph.add_edge(
Edge(connection['forward']['id'],
connection['reverse']['id'],
graph.nodes[connection['forward']['local_netboxid']],
graph.nodes[connection['reverse']['local_netboxid']],
connection['forward'],
connection['reverse']))
return graph
def simplify(graph, bounds, viewport_size, limit):
"""Remove and combine edges and nodes in a graph.
Objects outside the interesting area (given by bounds) are
removed, and those that are inside are combined so that they are
not too close together (based on viewport_size and limit).
Arguments:
graph -- the Graph object to simplify. It is destructively
modified.
bounds -- a dictionary with keys (minLon, maxLon, minLat, maxLat)
describing the bounds of the interesting region.
viewport_size -- a dictionary with keys (width, height), the width
and height of the user's viewport for the map in pixels.
limit -- the minimum distance (in pixels) there may be between two
points without them being collapsed to one.
"""
area_filter(graph, bounds)
create_rooms(graph)
create_places(graph, bounds, viewport_size, limit)
combine_edges(graph, AGGREGATE_PROPERTIES_EDGE)
def area_filter(graph, bounds):
"""Restrict a graph to a geographical area.
Removes objects outside bounds from graph. An edge is retained if
at least one of its endpoints is inside bounds. A node is
retained if it is an endpoint of such an edge (even if the node
itself is outside bounds).
Arguments:
graph -- the Graph object to filter. It is destructively
modified.
bounds -- a dictionary with keys (minLon, maxLon, minLat, maxLat)
describing the bounds of the interesting region.
"""
def in_bounds(node):
"""Check if node is within bounds"""
return (bounds['minLon'] <= node.lon <= bounds['maxLon'] and
bounds['minLat'] <= node.lat <= bounds['maxLat'])
def edge_connected_to(edge, nodehash):
"""Check if edge is connected to a node in the nodehash"""
return edge.source.id in nodehash or edge.target.id in nodehash
nodes = filter_dict(in_bounds, graph.nodes)
edges = filter_dict(lambda edge: edge_connected_to(edge, nodes),
graph.edges)
node_ids = (set(nodes.keys())
| set([e.source.id for e in edges.values()])
| set([e.target.id for e in edges.values()]))
graph.nodes = subdict(graph.nodes, node_ids)
graph.edges = edges
def create_rooms(graph):
"""Convert a graph of netboxes to a graph of rooms.
graph is assumed to have one nodes representing netboxes. These
are combined so that there is one node for each room. Each room
node has a property 'netboxes' (available as
roomnode.properties['netboxes']) which is a list of the original
nodes it is based on.
Arguments:
graph -- a Graph object. It is destructively modified.
"""
collapse_nodes(graph,
group(lambda node: node.properties['roomid'],
graph.nodes.values()),
AGGREGATE_PROPERTIES_ROOM)
def create_places(graph, bounds, viewport_size, limit):
"""Convert a graph of rooms to a graph of 'places'.
A 'place' is a set of one or more rooms. The position of a place
is the average of the positions of its rooms. The places are
created such that no two places are closer than limit to each
other. Each place node has a property 'rooms' (available as
placenode.properties['rooms']) which is a list of the room nodes
it is based on.
Arguments:
graph -- a Graph object. It is destructively modified.
bounds -- a dictionary with keys (minLon, maxLon, minLat, maxLat)
describing the bounds of the interesting region.
viewport_size -- a dictionary with keys (width, height), the width
and height of the user's viewport for the map in pixels.
limit -- the minimum distance (in pixels) there may be between two
points without them being collapsed to one.
"""
# TODO:
#
# -- This may give division by zero with bogus input (should check
# for zeros -- what should we do then?)
#
# -- Should take into account that longitudes wrap around. Is
# there any way to detect whether we have a map wider than the
# earth, or do we need an extra parameter?
width = bounds['maxLon'] - bounds['minLon']
height = bounds['maxLat'] - bounds['minLat']
lon_scale = float(viewport_size['width']) / width
lat_scale = float(viewport_size['height']) / height
def square(var):
"""Square a number"""
return var * var
def distance(node1, node2):
"""Calculate distance from node1 to node2"""
return sqrt(square((node1.lon - node2.lon) * lon_scale) +
square((node1.lat - node2.lat) * lat_scale))
places = []
for node in graph.nodes.values():
for place in places:
if distance(node, place['position']) < limit:
place['rooms'].append(node)
place['position'].lon = avg([n.lon for n in place['rooms']])
place['position'].lat = avg([n.lat for n in place['rooms']])
break
else:
places.append({'position': Node(None, node.lon, node.lat, None),
'rooms': [node]})
collapse_nodes(graph,
[place['rooms'] for place in places],
AGGREGATE_PROPERTIES_PLACE)
def collapse_nodes(graph, node_sets, property_aggregators):
"""Collapse sets of nodes to single nodes.
Replaces each set of nodes in node_sets by a single (new) node and
redirects the edges correspondingly. Edges which would end up
having both endpoints in the same node are removed.
Each new node is positioned at the average of the positions of the
node set it represents. It also gets a property containing the
original nodes; the name of this property is given by
subnode_list_name.
Properties from the original nodes may be combined to form
aggregate values in the new node. The property_aggregators
argument determines how (and whether) this is done. Some useful
aggregator functions are sum and avg (for numbers) and lambda lst:
', '.join(map(str, lst)).
Arguments:
graph -- a Graph object. It is destructively modified.
node_sets -- a list of lists of nodes in graph. Each node should
occur in exactly one of the lists.
subnode_list_name -- name for the property containing the original
nodes a newly created node represents.
property_aggregators -- describes how to create aggregate
properties. Dictionary with names of properties as keys and
aggregator functions as corresponding values. Each aggregator
function should take a single argument, a list.
"""
if property_aggregators is None:
property_aggregators = {}
graph.nodes = {}
nodehash = {}
for node_set in node_sets:
properties = aggregate_properties(
[x.properties for x in node_set],
property_aggregators)
new_node = Node('cn[%s]' % combine_ids(node_set),
avg([n.lon for n in node_set]),
avg([n.lat for n in node_set]),
properties)
for node in node_set:
nodehash[node.id] = new_node
graph.add_node(new_node)
# Now nodehash maps original node ids to new node objects. Use it
# to redirect the edges to the new nodes:
for edge in graph.edges.values():
edge.source = nodehash[edge.source.id]
edge.target = nodehash[edge.target.id]
graph.edges = filter_dict(lambda e: e.source != e.target, graph.edges)
def combine_ids(objects, get_id=lambda o: o.id):
"""Combine the IDs of a list (or set) of objects to a string.
Used when generating IDs for collapsed objects. The IDs are
sorted so that the resulting ID is uniquely determined by the set
of objects.
"""
ids = [str(get_id(o)) for o in objects]
ids.sort()
return ';'.join(ids)
def aggregate_properties(objects, aggregators):
"""Combine the properties of a list of objects.
Constructs a lazy dictionary (see class lazy_dict in utils.js) of
properties.
Arguments:
objects -- a list of Node or Edge objects
aggregators -- dictionary, specifies how to combine the
properties. For each item in aggregators, a property with the same
key is created. The aggregator value should be either a function,
in which case the property value is created by calling that
function on the list of objects; or a pair (function, prop), in
which case the property value is created by calling the function
on a list containing each object's value for property prop.
"""
def apply_aggregator(aggr):
"""Run aggregator with objects as arguments"""
if isinstance(aggr, tuple):
fun = aggr[0]
prop = aggr[1]
lst = [x[prop] for x in objects]
else:
fun = aggr
lst = objects
return fun(lst)
return map_dict_lazy(apply_aggregator, aggregators)
def combine_edges(graph, property_aggregators):
"""Combine edges with the same endpoints.
Replaces the edges in graph with new edge objects, where any set
of edges between the same two nodes is replaced by a single edge.
Each new edge has a property 'subedges'
(edge.properties['subedges']) which contains the original edge
objects.
Arguments:
graph -- a Graph object. It is destructively modified.
"""
if property_aggregators is None:
property_aggregators = {}
edges_by_node = dict([(node_id, set()) for node_id in graph.nodes])
for edge in graph.edges.values():
edges_by_node[edge.source.id].add(edge)
edges_by_node[edge.target.id].add(edge)
edge_sets = {}
for edge in graph.edges.values():
if edge.id in edge_sets:
continue
eset = list(edges_by_node[edge.source.id] &
edges_by_node[edge.target.id])
for edge_set in eset:
edge_sets[edge_set] = eset
edge_sets = map_dict(equalize_edge_orientation, edge_sets)
edges = [create_edge(x, property_aggregators) for x in edge_sets.values()]
graph.edges = dict([(e.id, e) for e in edges])
def create_edge(eset, property_aggregators):
"""Creates edge from the edge set and applies properties"""
return Edge('ce[%s]' % combine_ids(eset),
'ce[%s]' % combine_ids(eset, lambda e: e.reverse_id),
eset[0].source,
eset[0].target,
aggregate_properties([x.source_data for x in eset],
property_aggregators),
aggregate_properties([x.target_data for x in eset],
property_aggregators))
def equalize_edge_orientation(edges):
"""Make all edges have the same direction.
Arguments:
edges -- list of edges between the same pair of nodes
"""
reference = edges[0]
def fix_orientation(edge):
"""Fix orientation of edge"""
if edge.source != reference.source:
return reverse_edge(edge)
return edge
return [fix_orientation(x) for x in edges]
def reverse_edge(edge):
"""Reverse the direction of an edge.
Returns a new Edge object; the argument is not modified.
"""
return Edge(edge.reverse_id, edge.id,
edge.target, edge.source,
edge.target_data, edge.source_data)
class Node:
"""Representation of a node in a graph."""
def __init__(self, node_id, lon, lat, properties):
self.id = node_id
self.lon = lon
self.lat = lat
self.properties = properties
class Edge:
"""Representation of an edge in a graph."""
def __init__(self, edge_id, reverse_id, source, target, source_data,
target_data):
self.id = edge_id
self.reverse_id = reverse_id
self.source = source
self.target = target
self.source_data = source_data
self.target_data = target_data
class Graph:
"""Representation of a graph of geographical positions."""
def __init__(self):
self.nodes = {}
self.edges = {}
def add_node(self, node):
"""Add node to graph"""
self.nodes[node.id] = node
def add_edge(self, edge):
"""Add edge to graph"""
self.edges[edge.id] = edge
| alexanderfefelov/nav | python/nav/web/geomap/graph.py | Python | gpl-2.0 | 15,044 |
import MOCs
from utils import validate
from TestCase.MVSTestCase import *
class TestDIDSend(MVSTestCaseBase):
@classmethod
def setUpClass(cls):
#check if the did are created.
ec, message = mvs_rpc.list_dids()
if ec != 0:
return
exist_symbols = [i["symbol"] for i in message['dids']]
assert (Alice.did_symbol in exist_symbols)
assert (Bob.did_symbol in exist_symbols)
def test_0_didsend_etp(self):
#send to did
tx_hash = Alice.didsend_etp(Bob.did_symbol, 12345)
Alice.mining()
validate.validate_tx(self, tx_hash, Alice, Bob, 12345, 10**4)
#send to address
tx_hash = Alice.didsend_etp(Zac.mainaddress(), 54321)
Alice.mining()
validate.validate_tx(self, tx_hash, Alice, Zac, 54321, 10 ** 4)
def test_1_didsend_etp_from(self):
# did -> did
tx_hash = Alice.didsend_etp_from(Alice.did_symbol, Bob.did_symbol, 12345)
Alice.mining()
validate.validate_tx(self, tx_hash, Alice, Bob, 12345, 10 ** 4)
# did -> addr
tx_hash = Alice.didsend_etp_from(Alice.did_symbol, Zac.mainaddress(), 54321)
Alice.mining()
validate.validate_tx(self, tx_hash, Alice, Zac, 54321, 10 ** 4)
# addr -> did
tx_hash = Alice.didsend_etp_from(Alice.mainaddress(), Bob.did_symbol, 56789)
Alice.mining()
validate.validate_tx(self, tx_hash, Alice, Bob, 56789, 10 ** 4)
# addr -> addr
tx_hash = Alice.didsend_etp_from(Alice.mainaddress(), Bob.mainaddress(), 98765)
Alice.mining()
validate.validate_tx(self, tx_hash, Alice, Bob, 98765, 10 ** 4)
class TestDIDSendMore(MVSTestCaseBase):
def test_0_didsend_more(self):
receivers = {
Bob.mainaddress(): 100000,
Cindy.did_symbol: 100001,
Dale.mainaddress(): 100002,
Eric.did_symbol: 100003,
}
specific_fee = 12421
ec, message = mvs_rpc.didsendmore(Alice.name, Alice.password, receivers, Alice.addresslist[1], specific_fee)
self.assertEqual(ec, 0, message)
Alice.mining()
# change is did
ec, message = mvs_rpc.didsendmore(Alice.name, Alice.password, receivers, Frank.did_symbol)
self.assertEqual(ec, 0, message)
Alice.mining()
# change is None
ec, message = mvs_rpc.didsendmore(Alice.name, Alice.password, receivers)
self.assertEqual(ec, 0, message)
Alice.mining()
def test_1_didsend_more(self):
did_symbol = 'Zac@'+common.get_random_str()
Alice.send_etp(Zac.mainaddress(), 10**8)
Alice.mining()
Zac.register_did(symbol=did_symbol)
Alice.mining()
receivers = {
Zac.mainaddress(): 100000,
did_symbol: 200000,
Cindy.did_symbol: 100001,
Dale.mainaddress(): 100002,
Eric.did_symbol: 100003,
}
ec, message = mvs_rpc.didsendmore(Alice.name, Alice.password, receivers, Alice.did_symbol)
self.assertEqual(ec, 0, message)
Alice.mining()
self.assertEqual(300000,Zac.get_balance(),"sendmore failed")
class TestDIDSendAsset(MVSTestCaseBase):
@classmethod
def setUpClass(cls):
# check if the did are created.
ec, message = mvs_rpc.list_dids()
if ec != 0:
return
exist_symbols = [i["symbol"] for i in message['dids']]
assert (Alice.did_symbol in exist_symbols)
assert (Bob.did_symbol in exist_symbols)
def get_asset_amount(self, role, asset_symbol):
addressassets = role.get_addressasset(role.mainaddress())
addressasset = list( filter(lambda a: a.symbol == asset_symbol, addressassets) )
if len(addressasset) == 1:
previous_quantity = addressasset[0].quantity
previous_decimal = addressasset[0].decimal_number
return previous_quantity * (10 ** previous_decimal)
elif len(addressasset) == 0:
return 0
self.assertEqual(0,1,addressasset)
def test_2_didsend_asset(self):
domain_symbol, asset_symbol = Alice.create_random_asset()
Alice.mining()
# send to did
pA = self.get_asset_amount(Alice, asset_symbol)
pB = self.get_asset_amount(Bob, asset_symbol)
tx_hash = Alice.didsend_asset(Bob.did_symbol, 1, asset_symbol)
Alice.mining()
cA = self.get_asset_amount(Alice, asset_symbol)
cB = self.get_asset_amount(Bob, asset_symbol)
self.assertEqual(pA, cA + 1)
self.assertEqual(pB, cB - 1)
def test_3_didsend_asset_from(self):
domain_symbol, asset_symbol = Alice.create_random_asset()
Alice.mining()
# send to did
pA = self.get_asset_amount(Alice, asset_symbol)
pB = self.get_asset_amount(Bob, asset_symbol)
tx_hash = Alice.didsend_asset_from(Alice.did_symbol, Bob.did_symbol, 1, asset_symbol)
Alice.mining()
cA = self.get_asset_amount(Alice, asset_symbol)
cB = self.get_asset_amount(Bob, asset_symbol)
self.assertEqual(pA, cA + 1)
self.assertEqual(pB, cB - 1)
class Testdidcommon(MVSTestCaseBase):
def test_1_registerdid(self):
special_symbol=['@','.','-','_']
optional = {}
for i in range(len(special_symbol)):
optional[Zac.addresslist[i]] = 10**8
mvs_rpc.sendmore(Alice.name, Alice.password, optional)
Alice.mining()
for i ,symbol in enumerate(special_symbol):
did_symbol = '%s%stest%d%s'%(Zac.did_symbol,symbol,i,common.get_random_str())
ec, message = Zac.register_did(Zac.addresslist[i], did_symbol)
self.assertEqual(ec, 0, message)
Alice.mining()
self.assertEqual(Zac.get_didaddress(did_symbol), Zac.addresslist[i], 'Failed when registerdid with:'+symbol)
def test_2_didchangeaddress(self):
did_symbol = 'Zac@'+common.get_random_str()
Alice.send_etp(Zac.mainaddress(), 10**8)
Alice.mining()
ec, message = Zac.register_did(symbol=did_symbol)
self.assertEqual(ec, 0, message)
Alice.mining()
self.assertEqual(Zac.get_didaddress(did_symbol), Zac.mainaddress(), 'Failed when registerdid with:'+did_symbol)
Alice.send_etp(Zac.addresslist[1], 10**4)
Alice.mining()
ec, message = mvs_rpc.change_did(Zac.name, Zac.password, Zac.addresslist[1], did_symbol)
self.assertEqual(ec, 0, message)
Alice.mining()
self.assertEqual(Zac.get_didaddress(did_symbol), Zac.addresslist[1], 'Failed when registerdid with:'+did_symbol)
Alice.send_etp(Zac.mainaddress(), 10**4)
Alice.mining()
ec, message = mvs_rpc.change_did(Zac.name, Zac.password, Zac.mainaddress(), did_symbol)
self.assertEqual(ec, 0, message)
Alice.mining()
self.assertEqual(Zac.get_didaddress(did_symbol), Zac.mainaddress(), 'Failed when registerdid with:'+did_symbol)
class TestdidUTXOcommon(MVSTestCaseBase):
def test_didsend_twice(self):
Alice.send_etp(Zac.mainaddress(), 10**10)
Alice.mining()
##registerdid
did_symbol = 'Zac@'+common.get_random_str()
ec, message = Zac.register_did(symbol=did_symbol)
self.assertEqual(ec, 0, message)
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
Alice.mining()
#didsendfrom
ec, message = mvs_rpc.didsend_from(Zac.name,Zac.password, did_symbol, Alice.mainaddress(),10000)
self.assertEqual(ec, 0, message)
ec, message = mvs_rpc.didsend_from(Zac.name,Zac.password, did_symbol, Alice.mainaddress(),10000)
self.assertEqual(ec, 0, message)
Alice.mining()
#didsend
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
Alice.mining()
#didsendmore
receivers = {
Bob.mainaddress(): 100000,
Cindy.did_symbol: 100001,
Dale.mainaddress(): 100002,
Eric.did_symbol: 100003,
}
ec, message = mvs_rpc.didsendmore(Zac.name, Zac.password, receivers, did_symbol, 10000)
self.assertEqual(ec, 0, message)
ec, message = mvs_rpc.didsendmore(Zac.name, Zac.password, receivers, did_symbol, 10000)
self.assertEqual(ec, 0, message)
Alice.mining()
#create asset
domain_symbol, asset_symbol = Zac.create_random_asset(did_symbol=did_symbol)
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
Alice.mining()
#sendasset
ec, message = mvs_rpc.didsend_asset(Zac.name,Zac.password, Alice.did_symbol,asset_symbol, 100)
self.assertEqual(ec, 0, message)
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
Alice.mining()
#sendassetfrom
ec, message = mvs_rpc.didsend_asset_from(Zac.name,Zac.password, did_symbol,Alice.did_symbol,asset_symbol, 100)
self.assertEqual(ec, 0, message)
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
Alice.mining()
#register mit
mit_symbol = ("MIT." + common.get_random_str()).upper()
content = "MIT of Zac: " + mit_symbol
ec, message = mvs_rpc.register_mit(Zac.name, Zac.password, did_symbol, mit_symbol, content)
self.assertEqual(ec, code.success, message)
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
Alice.mining()
# transfer mit
ec, message = Zac.transfer_mit(Bob.did_symbol, mit_symbol)
self.assertEqual(ec, code.success, message)
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
Alice.mining()
#issue cert
cert_symbol = Zac.issue_naming_cert(domain_symbol,did_symbol)
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
Alice.mining()
#transfer cert
ec, message = mvs_rpc.transfer_cert(Zac.name, Zac.password, Alice.did_symbol, cert_symbol,
'naming',
fee=None)
self.assertEqual(ec, 0, message)
ec, message = mvs_rpc.didsend(Zac.name,Zac.password, Alice.did_symbol,10000)
self.assertEqual(ec, 0, message)
Alice.mining()
| mvs-live/metaverse | test/test-rpc-v3/TestCase/Identity/test_did.py | Python | agpl-3.0 | 10,939 |
import sys
import platform # Unused import
print(sys.path)
class Something:
def method_without_self():
pass
def something(self):
pass
| seblat/coala-bears | tests/python/test_files/pylint_test.py | Python | agpl-3.0 | 167 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import geoposition.fields
class Migration(migrations.Migration):
dependencies = [
('beacon', '0004_auto_20150730_2114'),
]
operations = [
migrations.RemoveField(
model_name='reply',
name='child',
),
migrations.RemoveField(
model_name='reply',
name='code',
),
migrations.RemoveField(
model_name='reply',
name='parent',
),
migrations.AddField(
model_name='reply',
name='inquiry',
field=models.OneToOneField(default=1, to='beacon.Inquiry'),
preserve_default=False,
),
migrations.AddField(
model_name='reply',
name='position',
field=geoposition.fields.GeopositionField(max_length=42, default=1),
preserve_default=False,
),
]
| SorenOlegnowicz/tracker | tracker/beacon/migrations/0005_auto_20150731_1659.py | Python | agpl-3.0 | 1,003 |
#!/usr/bin/env python
#
# Copyright 2011-2014 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from datetime import datetime
import splunklib.client as client
try:
import utils
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
__all__ = [
"AnalyticsTracker",
]
ANALYTICS_INDEX_NAME = "sample_analytics"
ANALYTICS_SOURCETYPE = "sample_analytics"
APPLICATION_KEY = "application"
EVENT_KEY = "event"
DISTINCT_KEY = "distinct_id"
EVENT_TERMINATOR = "\\r\\n-----end-event-----\\r\\n"
PROPERTY_PREFIX = "analytics_prop__"
class AnalyticsTracker:
def __init__(self, application_name, splunk_info, index = ANALYTICS_INDEX_NAME):
self.application_name = application_name
self.splunk = client.connect(**splunk_info)
self.index = index
if not self.index in self.splunk.indexes:
self.splunk.indexes.create(self.index)
assert(self.index in self.splunk.indexes)
if ANALYTICS_SOURCETYPE not in self.splunk.confs['props']:
self.splunk.confs["props"].create(ANALYTICS_SOURCETYPE)
stanza = self.splunk.confs["props"][ANALYTICS_SOURCETYPE]
stanza.submit({
"LINE_BREAKER": "(%s)" % EVENT_TERMINATOR,
"CHARSET": "UTF-8",
"SHOULD_LINEMERGE": "false"
})
assert(ANALYTICS_SOURCETYPE in self.splunk.confs['props'])
@staticmethod
def encode(props):
encoded = " ";
for k,v in props.iteritems():
# We disallow dictionaries - it doesn't quite make sense.
assert(not isinstance(v, dict))
# We do not allow lists
assert(not isinstance(v, list))
# This is a hack to escape quotes
if isinstance(v, str):
v = v.replace('"', "'")
encoded += ('%s%s="%s" ' % (PROPERTY_PREFIX, k, v))
return encoded
def track(self, event_name, time = None, distinct_id = None, **props):
if time is None:
time = datetime.now().isoformat()
event = '%s %s="%s" %s="%s" ' % (
time,
APPLICATION_KEY, self.application_name,
EVENT_KEY, event_name)
assert(not APPLICATION_KEY in props.keys())
assert(not EVENT_KEY in props.keys())
if distinct_id is not None:
event += ('%s="%s" ' % (DISTINCT_KEY, distinct_id))
assert(not DISTINCT_KEY in props.keys())
event += AnalyticsTracker.encode(props)
self.splunk.indexes[self.index].submit(event, sourcetype=ANALYTICS_SOURCETYPE)
def main():
usage = ""
argv = sys.argv[1:]
splunk_opts = utils.parse(argv, {}, ".splunkrc", usage=usage)
tracker = AnalyticsTracker("cli_app", splunk_opts.kwargs)
#tracker.track("test_event", "abc123", foo="bar", bar="foo")
if __name__ == "__main__":
main()
| kkirsche/splunk-sdk-python | examples/analytics/input.py | Python | apache-2.0 | 3,592 |
import urllib2, urllib, urllister
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
while True:
print "Welcome"
print "Press Enter When Download Finishes and Q to quit"
raw_i=raw_input("Song Name and Artist: ")
x = urllib.quote_plus(raw_i)
site1 = urllib2.urlopen('http://www.youtube.com/results?search_query=%s'%x)
y = site1.read()
parser = urllister.URLLister()
parser.feed(y)
parser.close()
for url in parser.urls:
if "watch?v=" in url:
v = url
break
vid = ("http://www.youtube.com%s"%v)
driver = webdriver.Chrome()
driver.get("http://www.youtube-mp3.org/")
elem = driver.find_element_by_id("youtube-url")
elem.clear()
elem.send_keys(vid)
elem.send_keys(Keys.RETURN)
time.sleep(1)
download = driver.page_source
parser = urllister.URLLister()
parser.feed(download)
parser.close
for url in parser.urls:
if "/get?video_id" in url:
down = url
download_url = ("http://www.youtube-mp3.org%s"%down)
driver.get(download_url)
x = raw_input("")
driver.quit()
if x == 'q':
quit()
else:
pass
| ActiveState/code | recipes/Python/578530_Music_Downloader/recipe-578530.py | Python | mit | 1,243 |
# This file is licensed seperately of the rest of the codebase. This is due to
# BioPython's failure to merge https://github.com/biopython/biopython/pull/544
# in a timely fashion. Please use this file however you see fit!
#
#
# Copyright (c) 2015-2017 Center for Phage Technology. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY CENTER FOR PHAGE TECHNOLOGY "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CENTER FOR PHAGE TECHNOLOGY OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from Bio import SeqIO
import tempfile
import sys
def parse_xmfa(xmfa):
"""Simple XMFA parser until https://github.com/biopython/biopython/pull/544
"""
current_lcb = []
current_seq = {}
for line in xmfa.readlines():
if line.startswith("#"):
continue
if line.strip() == "=":
if "id" in current_seq:
current_lcb.append(current_seq)
current_seq = {}
yield current_lcb
current_lcb = []
else:
line = line.strip()
if line.startswith(">"):
if "id" in current_seq:
current_lcb.append(current_seq)
current_seq = {}
data = line.strip().split()
# 0 1 2 3 4 5
# > 1:5986-6406 + CbK.fa # CbK_gp011
id, loc = data[1].split(":")
start, end = loc.split("-")
current_seq = {
"rid": "_".join(data[1:]),
"id": id,
"start": int(start),
"end": int(end),
"strand": 1 if data[2] == "+" else -1,
"file": data[3],
"seq": "",
"comment": "",
}
if len(data) > 5:
current_seq["comment"] = " ".join(data[5:])
else:
current_seq["seq"] += line.strip()
HEADER_TPL = "> {id}:{start}-{end} {strand} {file} # {comment}\n"
def split_by_n(seq, n):
"""A generator to divide a sequence into chunks of n units."""
# http://stackoverflow.com/questions/9475241/split-python-string-every-nth-character
while seq:
yield seq[:n]
seq = seq[n:]
def to_xmfa(lcbs, handle=sys.stdout):
handle.write("#FormatVersion Mauve1\n")
for lcb in lcbs:
for aln in lcb:
handle.write(
HEADER_TPL.format(
id=aln["id"],
start=aln["start"],
end=aln["end"],
strand="+" if aln["strand"] > 0 else "-",
file=aln["file"],
comment=aln["comment"],
)
)
for line in split_by_n(aln["seq"], 80):
handle.write(line + "\n")
handle.write("=\n")
def percent_identity(a, b):
"""Calculate % identity, ignoring gaps in the host sequence
"""
match = 0
mismatch = 0
for char_a, char_b in zip(list(a), list(b)):
if char_a == "-":
continue
if char_a == char_b:
match += 1
else:
mismatch += 1
if match + mismatch == 0:
return 0.0
return 100 * float(match) / (match + mismatch)
def id_tn_dict(sequences, tmpfile=False):
"""Figure out sequence IDs
"""
label_convert = {}
correct_chrom = None
if not isinstance(sequences, list):
sequences = [sequences]
i = 0
for sequence_file in sequences:
for record in SeqIO.parse(sequence_file, "fasta"):
if correct_chrom is None:
correct_chrom = record.id
i += 1
key = str(i)
label_convert[key] = {"record_id": record.id, "len": len(record.seq)}
if tmpfile:
label_convert[key] = tempfile.NamedTemporaryFile(delete=False)
return label_convert
| TAMU-CPT/galaxy-tools | tools/comparative/xmfa.py | Python | gpl-3.0 | 5,025 |
#TODO: Consider
# - Saving unwritten events locally when we cannot write to server
# - Download events to a local file
import re
from icalendar import Calendar, Event
from httplib import BadStatusLine
from events import *
import myProduct
WebDAVAvailable = False
importerr = ""
try:
from urlparse import urlparse, urlunsplit
except:
importerr = "missing urlparse, therefore, no CalDAV support!"
#WebDAVAvailable = False
else:
try:
import dav
except:
importerr = "can't import 'dav' directory in iCalendarEvent Control's directory; no CalDAV support!"
#WebDAVAvailable = False
else:
WebDAVAvailable = True
# 3 minutes
POLL_CYCLES = 18
# Eliminate all CREATED tags. Some apps (*ahem* Google Calendar) set this
# to a weird, illegal date (00001231T000000Z).
# We have to catch it before we try to create a Calendar from it.
re_CREATED = re.compile("^CREATED.*", re.MULTILINE)
#
# CalDAV handler
#
class CalDAVbackend:
"""
Try to create an instance of this class. It will raise
an ImportWarning if it could not import the proper modules.
This tries to load a CalDAV calendar from uri.
This will raise a LookupError if it can't fetch the resource.
Returns an icalendar Calendar object.
"""
def __init__(self):
if not WebDAVAvailable:
raise ImportWarning(importerr)
self.__resource = None
self.__ctag = ""
self.__color = ""
def registerChangeCallback(self, func, uri):
self.__changeCallback = func
self.__pollCntr = 0
def pollForChanges(self):
self.__pollCntr += 1
if self.__pollCntr >= POLL_CYCLES:
if self.__resource:
try:
self.__resource.update()
except dav.davresource.DAVError, (status, reason, davresource):
print "Polling calendar for changes failed (%s: %s)" % (status, reason)
print "Will retry next chance we get, don't worry"
self.__pollCntr -= 1
return True
except: # BadStatusLine:
# Poll for changes again as soon as we can
self.__pollCntr -= 1
return True
ctag = self.__resource.get_resource_property('getctag', 'http://calendarserver.org/ns/')
if ctag and (self.__ctag != ctag):
self.__ctag = ctag
self.__changeCallback(None, FILE_CHANGED)
self.__pollCntr = 0
# This function will be called again forever until it's removed
return True
def refresh(self, uri):
"""
Refreshes the resource if it exists.
Returns True on success, False on failure.
"""
# Update the resource if it exists
if self.__resource:
print "trying to update CalDAV calendar"
try:
self.__resource.update()
except:
# Move on to the other condition
print "update failed, will try to fetch"
self.__resource = None
# Don't make this an 'else', we may have to handle the above exception
if not self.__resource:
if not self.__open(uri, ''):
print "failed to refresh by re-fetching"
return False
# Check to see if the update failed...
if self.__resource._result == None:
print "Resource failed to update"
return False
return True
def read(self, uri):
"""
Returns an icalendar Calendar object no matter what.
(If the calendar's empty, it should be an empty
calendar...)
"""
# Try opening it
if not self.__open(uri, ''):
# Do nothing but stop processing
print "failed to open remote resource for reading"
raise LookupError
# Iterate through calendars fetched from the remote server
# It seems CalDAV (or at least Google Calendar) adds one
# calendar "child object" per event
#TODO: will this raise an error?
cal_string = ""
try:
for rcal in self.__resource.get_child_objects():
cal_string += '\n' + rcal.get().read()
except dav.davresource.DAVError, (status, reason, davresource):
print "Reading child elements of calendar failed (%s: %s)" % (status, reason)
raise
# Eliminate all instances of the CREATED tag
cal_string = re_CREATED.sub("", cal_string)
try:
tmp_cals = Calendar.from_string(cal_string, multiple=True)
# If it fails to open, don't update 'events'
except:
print "that's odd...icalendar couldn't read fetched calendars..."
print cal_string
raise
else:
# We only really want VEVENT information from each calendar
cal = Calendar()
for c in tmp_cals:
for subcomponent in c.walk('VEVENT'):
cal.add_component(subcomponent)
# If there's a calendar color, get that too
self.__color = self.__resource.get_resource_property('calendar-color', 'http://apple.com/ns/ical/')
return cal
def __open(self, uri, mode):
"""
Returns True if successfully opened the CalDAV collection,
False otherwise.
"""
print "fetching CalDAV calendar"
self.__resource = None
# First, parse the URL
scheme, server, path, params, query, fragment = urlparse(uri)
# Get username and pass from 'server'
try:
i = server.rindex('@')
except:
# Flag this condition as "no auth"
user = None
pw = None
i = -1
else:
# getting user name and pw string didn't raise an exception
try:
user_end = server[:i].index(':')
except:
user = server[:i]
pw = ""
else:
# trying to differentiate between user name and
# pw didn't raise an exception
user = server[:user_end]
pw = server[user_end+1:i]
finally:
server = server[i+1:]
if scheme == "https":
conn = dav.DAVSConnection(server)
elif scheme == "http":
conn = dav.DAVConnection(server)
else:
# No connection obtained
print "Can't connect; unknown scheme (%s)" % scheme
return False
if user:
conn.set_auth(user, pw)
# Build the URL w/o user and pass
new_url = urlunsplit((scheme, server, path, params, query))
self.__resource = None
try:
# Error here when no connection available; it can't be
# assumed that because it didn't throw an exception it's ok
self.__resource = dav.DAVCollection(new_url, conn)
except dav.davresource.DAVNoCollectionError:
print "URL is not a WebDAV \"collection\" (%s)" % new_url;
return False
except TypeError:
print "Bad username (%s) or password" % user
return False
# Initialize ctag
self.__ctag = self.__resource.get_resource_property('getctag', 'http://calendarserver.org/ns/')
return True
def close(self):
"""
Closes the connection to the CalDAV server
"""
self.__resource = None
def new(self, cal, uri, event):
"""
Expects an icalendar Event
"""
if not self.__resource:
if not self.__open(uri, 'w'):
print "could not open \"%s\", event not created" % uri
return
else:
if not self.refresh(uri):
print "could not refresh \"%s\", event not created" % uri
return
new_cal = Calendar()
new_cal['prodid'] = myProduct.prodid
new_cal['version'] = myProduct.version
new_cal.add_component(event)
# Create a new file; the name will be the pseudorandom UID
try:
self.__resource.create_file(event['uid'] + ".ics", new_cal.as_string())
except dav.davresource.DAVCreationFailedError, (status, reason, url):
print "File creation failed (%s, %s, \"%s\")" % (status, reason, url)
# TODO: Make this more robust
# If this fails, raise the error. What else can I do?
#self.__resource.create_file(event['uid'] + "-2.ics", new_cal.as_string())
else:
print "Remote file created"
# Note: This assumes only one event is contained in each resource!
def delete(self, cal, uri, uid):
if not self.__resource:
if not self.__open(uri, 'w'):
print "could not open \"%s\", event not deleted" % uri
return False
else:
if not self.refresh(uri):
print "could not refresh \"%s\", event not deleted" % uri
return False
for child in self.__resource.get_child_objects():
c = Calendar.from_string( child.get().read() )
for ev in c.walk('VEVENT'):
print "UID for calendar with event %s is %s" % (ev['summary'], ev.decoded('uid', "-1"))
if ev.decoded('uid', -1) == uid:
# We need to delete by the file's name
#self.__resource.delete(str(uid) + ".ics")
path = child.url.path
if path[-1] == '/':
path = path[:-1]
self.__resource.delete(path.split('/')[-1])
# Trigger a re-read ASAP
self.__pollCntr = POLL_CYCLES
break
# Maybe it succeeded...
# At least the resource doesn't need to be re-opened
return True
| sergiomb2/gdesklets | Controls/iCalendarEvent/CalDAVbackend.py | Python | gpl-2.0 | 10,287 |
from flask import render_template
def index():
return render_template("index.html")
| Cydrobolt/spectre | spectre/core.py | Python | apache-2.0 | 89 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import random
import re
import time
import zlib
import six
from six import moves
from tempest_lib.common.utils import data_utils
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest import config
from tempest import test
from tempest_lib import decorators
CONF = config.CONF
class ObjectTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(ObjectTest, cls).resource_setup()
cls.container_name = data_utils.rand_name(name='TestContainer')
cls.container_client.create_container(cls.container_name)
cls.containers = [cls.container_name]
@classmethod
def resource_cleanup(cls):
cls.delete_containers(cls.containers)
super(ObjectTest, cls).resource_cleanup()
def _create_object(self, metadata=None):
# setup object
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
self.object_client.create_object(self.container_name,
object_name, data, metadata=metadata)
return object_name, data
def _upload_segments(self):
# create object
object_name = data_utils.rand_name(name='LObject')
data = data_utils.arbitrary_string()
segments = 10
data_segments = [data + str(i) for i in six.moves.xrange(segments)]
# uploading segments
for i in six.moves.xrange(segments):
resp, _ = self.object_client.create_object_segments(
self.container_name, object_name, i, data_segments[i])
return object_name, data_segments
def _copy_object_2d(self, src_object_name, metadata=None):
dst_object_name = data_utils.rand_name(name='TestObject')
resp, _ = self.object_client.copy_object_2d_way(self.container_name,
src_object_name,
dst_object_name,
metadata=metadata)
return dst_object_name, resp
def _check_copied_obj(self, dst_object_name, src_body,
in_meta=None, not_in_meta=None):
resp, dest_body = self.object_client.get_object(self.container_name,
dst_object_name)
self.assertEqual(src_body, dest_body)
if in_meta:
for meta_key in in_meta:
self.assertIn('x-object-meta-' + meta_key, resp)
if not_in_meta:
for meta_key in not_in_meta:
self.assertNotIn('x-object-meta-' + meta_key, resp)
@test.attr(type='smoke')
@test.idempotent_id('5b4ce26f-3545-46c9-a2ba-5754358a4c62')
def test_create_object(self):
# create object
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
# create another object
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(data, body)
@test.idempotent_id('5daebb1d-f0d5-4dc9-b541-69672eff00b0')
def test_create_object_with_content_disposition(self):
# create object with content_disposition
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
metadata = {}
metadata['content-disposition'] = 'inline'
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=None)
self.assertIn('content-disposition', resp)
self.assertEqual(resp['content-disposition'], 'inline')
self.assertEqual(body, data)
@test.idempotent_id('605f8317-f945-4bee-ae91-013f1da8f0a0')
def test_create_object_with_content_encoding(self):
# create object with content_encoding
object_name = data_utils.rand_name(name='TestObject')
# put compressed string
data_before = 'x' * 2000
data = zlib.compress(data_before)
metadata = {}
metadata['content-encoding'] = 'deflate'
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
# download compressed object
metadata = {}
metadata['accept-encoding'] = 'deflate'
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=metadata)
self.assertEqual(body, data_before)
@test.idempotent_id('73820093-0503-40b1-a478-edf0e69c7d1f')
def test_create_object_with_etag(self):
# create object with etag
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
md5 = hashlib.md5(data).hexdigest()
metadata = {'Etag': md5}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(data, body)
@test.idempotent_id('84dafe57-9666-4f6d-84c8-0814d37923b8')
def test_create_object_with_expect_continue(self):
# create object with expect_continue
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
metadata = {'Expect': '100-continue'}
resp = self.object_client.create_object_continue(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertIn('status', resp)
self.assertEqual(resp['status'], '100')
self.object_client.create_object_continue(
self.container_name,
object_name,
data,
metadata=None)
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(data, body)
@decorators.skip_because(bug="1417492")
@test.attr(type='gate')
@test.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
def test_create_object_with_transfer_encoding(self):
# create object with transfer_encoding
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string(1024)
status, _, resp_headers = self.object_client.put_object_with_chunk(
container=self.container_name,
name=object_name,
contents=moves.cStringIO(data),
chunk_size=512)
self.assertHeaders(resp_headers, 'Object', 'PUT')
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(data, body)
@test.idempotent_id('0f3d62a6-47e3-4554-b0e5-1a5dc372d501')
def test_create_object_with_x_fresh_metadata(self):
# create object with x_fresh_metadata
object_name_base = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
metadata_1 = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name_base,
data,
metadata=metadata_1)
object_name = data_utils.rand_name(name='TestObject')
metadata_2 = {'X-Copy-From': '%s/%s' % (self.container_name,
object_name_base),
'X-Fresh-Metadata': 'true'}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
'',
metadata=metadata_2)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
#Bug = 1417489
#self.assertNotIn('x-object-meta-test-meta', resp)
self.assertEqual(data, body)
@test.idempotent_id('1c7ed3e4-2099-406b-b843-5301d4811baf')
def test_create_object_with_x_object_meta(self):
# create object with object_meta
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
self.assertEqual(data, body)
@test.idempotent_id('e4183917-33db-4153-85cc-4dacbb938865')
def test_create_object_with_x_object_metakey(self):
# create object with the blank value of metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
metadata = {'X-Object-Meta-test-meta': ''}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], '')
self.assertEqual(data, body)
@test.idempotent_id('ce798afc-b278-45de-a5ce-2ea124b98b99')
def test_create_object_with_x_remove_object_meta(self):
# create object with x_remove_object_meta
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=metadata_add)
metadata_remove = {'X-Remove-Object-Meta-test-meta': 'Meta'}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata_remove)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta', resp)
self.assertEqual(data, body)
@test.idempotent_id('ad21e342-7916-4f9e-ab62-a1f885f2aaf9')
def test_create_object_with_x_remove_object_metakey(self):
# create object with the blank value of remove metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=metadata_add)
metadata_remove = {'X-Remove-Object-Meta-test-meta': ''}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata_remove)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta', resp)
self.assertEqual(data, body)
@test.idempotent_id('17738d45-03bd-4d45-9e0b-7b2f58f98687')
def test_delete_object(self):
# create object
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
# delete object
resp, _ = self.object_client.delete_object(self.container_name,
object_name)
self.assertHeaders(resp, 'Object', 'DELETE')
@test.attr(type='smoke')
@test.idempotent_id('7a94c25d-66e6-434c-9c38-97d4e2c29945')
def test_update_object_metadata(self):
# update object metadata
object_name, data = self._create_object()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
@test.idempotent_id('48650ed0-c189-4e1e-ad6b-1d4770c6e134')
def test_update_object_metadata_with_remove_metadata(self):
# update object metadata with remove metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
update_metadata = {'X-Remove-Object-Meta-test-meta1': 'Meta1'}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
update_metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta1', resp)
@test.idempotent_id('f726174b-2ded-4708-bff7-729d12ce1f84')
def test_update_object_metadata_with_create_and_remove_metadata(self):
# creation and deletion of metadata with one request
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
update_metadata = {'X-Object-Meta-test-meta2': 'Meta2',
'X-Remove-Object-Meta-test-meta1': 'Meta1'}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
update_metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta1', resp)
self.assertIn('x-object-meta-test-meta2', resp)
self.assertEqual(resp['x-object-meta-test-meta2'], 'Meta2')
@test.idempotent_id('08854588-6449-4bb7-8cca-f2e1040f5e6f')
def test_update_object_metadata_with_x_object_manifest(self):
# update object metadata with x_object_manifest
# uploading segments
object_name, data_segments = self._upload_segments()
# creating a manifest file
data_empty = ''
object_prefix = '%s/%s' % (self.container_name, object_name)
metadata = {'X-Object-Manifest': object_prefix}
resp, _ = self.object_client.create_object(self.container_name,
object_name,
data_empty,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
#Bug = 1417462
#update_metadata = {'X-Object-Manifest': object_prefix}
#resp, _ = self.object_client.update_object_metadata(
# self.container_name,
# object_name,
# update_metadata,
# metadata_prefix='')
#self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertIn('x-object-manifest', resp)
self.assertNotEqual(len(resp['x-object-manifest']), 0)
@test.idempotent_id('0dbbe89c-6811-4d84-a2df-eca2bdd40c0e')
def test_update_object_metadata_with_x_object_metakey(self):
# update object metadata with a blenk value of metadata
object_name, data = self._create_object()
update_metadata = {'X-Object-Meta-test-meta': ''}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
update_metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], '')
@test.idempotent_id('9a88dca4-b684-425b-806f-306cd0e57e42')
def test_update_object_metadata_with_x_remove_object_metakey(self):
# update object metadata with a blank value of remove metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
create_metadata = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
update_metadata = {'X-Remove-Object-Meta-test-meta': ''}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
update_metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta', resp)
@test.attr(type='smoke')
@test.idempotent_id('9a447cf6-de06-48de-8226-a8c6ed31caf2')
def test_list_object_metadata(self):
# get object metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=metadata)
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertHeaders(resp, 'Object', 'HEAD')
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
@test.idempotent_id('170fb90e-f5c3-4b1f-ae1b-a18810821172')
def test_list_no_object_metadata(self):
# get empty list of object metadata
object_name, data = self._create_object()
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertHeaders(resp, 'Object', 'HEAD')
self.assertNotIn('x-object-meta-', str(resp))
@test.idempotent_id('23a3674c-d6de-46c3-86af-ff92bfc8a3da')
def test_list_object_metadata_with_x_object_manifest(self):
# get object metadata with x_object_manifest
# uploading segments
object_name, data_segments = self._upload_segments()
# creating a manifest file
object_prefix = '%s/%s' % (self.container_name, object_name)
metadata = {'X-Object-Manifest': object_prefix}
data_empty = ''
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data_empty,
metadata=metadata)
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
# Check only the existence of common headers with custom matcher
self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
'Object', 'HEAD'))
self.assertIn('x-object-manifest', resp)
# Etag value of a large object is enclosed in double-quotations.
# This is a special case, therefore the formats of response headers
# are checked without a custom matcher.
#Bug = 1417462
#self.assertTrue(resp['etag'].startswith('\"'))
#self.assertTrue(resp['etag'].endswith('\"'))
#self.assertTrue(resp['etag'].strip('\"').isalnum())
self.assertTrue(resp['etag'].isalnum())
#self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
self.assertNotEqual(len(resp['content-type']), 0)
#self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
# resp['x-trans-id']))
self.assertNotEqual(len(resp['date']), 0)
self.assertEqual(resp['accept-ranges'], 'bytes')
self.assertEqual(resp['x-object-manifest'],
'%s/%s' % (self.container_name, object_name))
@test.attr(type='smoke')
@test.idempotent_id('02610ba7-86b7-4272-9ed8-aa8d417cb3cd')
def test_get_object(self):
# retrieve object's data (in response body)
# create object
object_name, data = self._create_object()
# get object
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@test.idempotent_id('005f9bf6-e06d-41ec-968e-96c78e0b1d82')
def test_get_object_with_metadata(self):
# get object with metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=metadata)
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=None)
self.assertHeaders(resp, 'Object', 'GET')
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
self.assertEqual(body, data)
@test.idempotent_id('05a1890e-7db9-4a6c-90a8-ce998a2bddfa')
def test_get_object_with_range(self):
# get object with range
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string(100)
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=None)
rand_num = random.randint(3, len(data) - 1)
metadata = {'Range': 'bytes=%s-%s' % (rand_num - 3, rand_num - 1)}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data[rand_num - 3: rand_num])
@test.idempotent_id('11b4515b-7ba7-4ca8-8838-357ded86fc10')
def test_get_object_with_x_object_manifest(self):
# get object with x_object_manifest
# uploading segments
object_name, data_segments = self._upload_segments()
# creating a manifest file
object_prefix = '%s/%s' % (self.container_name, object_name)
metadata = {'X-Object-Manifest': object_prefix}
data_empty = ''
resp, body = self.object_client.create_object(
self.container_name,
object_name,
data_empty,
metadata=metadata)
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=None)
# Check only the existence of common headers with custom matcher
self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
'Object', 'GET'))
self.assertIn('x-object-manifest', resp)
# Etag value of a large object is enclosed in double-quotations.
# This is a special case, therefore the formats of response headers
# are checked without a custom matcher.
#Bug = 1417462
#self.assertTrue(resp['etag'].startswith('\"'))
#self.assertTrue(resp['etag'].endswith('\"'))
self.assertTrue(resp['etag'].isalnum())
#self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
self.assertNotEqual(len(resp['content-type']), 0)
#self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
# resp['x-trans-id']))
self.assertNotEqual(len(resp['date']), 0)
self.assertEqual(resp['accept-ranges'], 'bytes')
self.assertEqual(resp['x-object-manifest'],
'%s/%s' % (self.container_name, object_name))
self.assertEqual(''.join(data_segments), body)
@test.idempotent_id('c05b4013-e4de-47af-be84-e598062b16fc')
def test_get_object_with_if_match(self):
# get object with if_match
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string(10)
create_md5 = hashlib.md5(data).hexdigest()
create_metadata = {'Etag': create_md5}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
list_metadata = {'If-Match': create_md5}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@test.idempotent_id('be133639-e5d2-4313-9b1f-2d59fc054a16')
def test_get_object_with_if_modified_since(self):
# get object with if_modified_since
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
time_now = time.time()
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=None)
http_date = time.ctime(time_now - 86400)
list_metadata = {'If-Modified-Since': http_date}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@test.idempotent_id('641500d5-1612-4042-a04d-01fc4528bc30')
def test_get_object_with_if_none_match(self):
# get object with if_none_match
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string(10)
create_md5 = hashlib.md5(data).hexdigest()
create_metadata = {'Etag': create_md5}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
list_data = data_utils.arbitrary_string(15)
list_md5 = hashlib.md5(list_data).hexdigest()
list_metadata = {'If-None-Match': list_md5}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@test.idempotent_id('0aa1201c-10aa-467a-bee7-63cbdd463152')
def test_get_object_with_if_unmodified_since(self):
# get object with if_unmodified_since
object_name, data = self._create_object()
time_now = time.time()
http_date = time.ctime(time_now + 86400)
list_metadata = {'If-Unmodified-Since': http_date}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@test.idempotent_id('94587078-475f-48f9-a40f-389c246e31cd')
def test_get_object_with_x_newest(self):
# get object with x_newest
object_name, data = self._create_object()
list_metadata = {'X-Newest': 'true'}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@test.idempotent_id('1a9ab572-1b66-4981-8c21-416e2a5e6011')
def test_copy_object_in_same_container(self):
# create source object
src_object_name = data_utils.rand_name(name='SrcObject')
src_data = data_utils.arbitrary_string(size=len(src_object_name) * 2,
base_text=src_object_name)
resp, _ = self.object_client.create_object(self.container_name,
src_object_name,
src_data)
# create destination object
dst_object_name = data_utils.rand_name(name='DstObject')
dst_data = data_utils.arbitrary_string(size=len(dst_object_name) * 3,
base_text=dst_object_name)
resp, _ = self.object_client.create_object(self.container_name,
dst_object_name,
dst_data)
# copy source object to destination
resp, _ = self.object_client.copy_object_in_same_container(
self.container_name, src_object_name, dst_object_name)
self.assertHeaders(resp, 'Object', 'PUT')
# check data
resp, body = self.object_client.get_object(self.container_name,
dst_object_name)
self.assertEqual(body, src_data)
@test.idempotent_id('2248abba-415d-410b-9c30-22dff9cd6e67')
def test_copy_object_to_itself(self):
# change the content type of an existing object
# create object
object_name, data = self._create_object()
# get the old content type
resp_tmp, _ = self.object_client.list_object_metadata(
self.container_name, object_name)
# change the content type of the object
metadata = {'content-type': 'text/plain; charset=UTF-8'}
self.assertNotEqual(resp_tmp['content-type'], metadata['content-type'])
resp, _ = self.object_client.copy_object_in_same_container(
self.container_name, object_name, object_name, metadata)
self.assertHeaders(resp, 'Object', 'PUT')
#Bug = 1417458
# check the content type
#resp, _ = self.object_client.list_object_metadata(self.container_name,
# object_name)
#self.assertEqual(resp['content-type'], metadata['content-type'])
@test.idempotent_id('06f90388-2d0e-40aa-934c-e9a8833e958a')
def test_copy_object_2d_way(self):
# create source object
src_object_name = data_utils.rand_name(name='SrcObject')
src_data = data_utils.arbitrary_string(size=len(src_object_name) * 2,
base_text=src_object_name)
resp, _ = self.object_client.create_object(self.container_name,
src_object_name, src_data)
# create destination object
dst_object_name = data_utils.rand_name(name='DstObject')
dst_data = data_utils.arbitrary_string(size=len(dst_object_name) * 3,
base_text=dst_object_name)
resp, _ = self.object_client.create_object(self.container_name,
dst_object_name, dst_data)
# copy source object to destination
resp, _ = self.object_client.copy_object_2d_way(self.container_name,
src_object_name,
dst_object_name)
self.assertHeaders(resp, 'Object', 'COPY')
#Bug 1417469
#self.assertEqual(
# resp['x-copied-from'],
# self.container_name + "/" + src_object_name)
# check data
self._check_copied_obj(dst_object_name, src_data)
@test.idempotent_id('aa467252-44f3-472a-b5ae-5b57c3c9c147')
def test_copy_object_across_containers(self):
# create a container to use as asource container
src_container_name = data_utils.rand_name(name='TestSourceContainer')
self.container_client.create_container(src_container_name)
self.containers.append(src_container_name)
# create a container to use as a destination container
dst_container_name = data_utils.rand_name(
name='TestDestinationContainer')
self.container_client.create_container(dst_container_name)
self.containers.append(dst_container_name)
# create object in source container
object_name = data_utils.rand_name(name='Object')
data = data_utils.arbitrary_string(size=len(object_name) * 2,
base_text=object_name)
resp, _ = self.object_client.create_object(src_container_name,
object_name, data)
# set object metadata
meta_key = data_utils.rand_name(name='test')
meta_value = data_utils.rand_name(name='MetaValue')
orig_metadata = {meta_key: meta_value}
resp, _ = self.object_client.update_object_metadata(src_container_name,
object_name,
orig_metadata)
self.assertHeaders(resp, 'Object', 'POST')
# copy object from source container to destination container
resp, _ = self.object_client.copy_object_across_containers(
src_container_name, object_name, dst_container_name,
object_name)
self.assertHeaders(resp, 'Object', 'PUT')
# check if object is present in destination container
resp, body = self.object_client.get_object(dst_container_name,
object_name)
self.assertEqual(body, data)
actual_meta_key = 'x-object-meta-' + meta_key
self.assertIn(actual_meta_key, resp)
self.assertEqual(resp[actual_meta_key], meta_value)
@test.idempotent_id('5a9e2cc6-85b6-46fc-916d-0cbb7a88e5fd')
def test_copy_object_with_x_fresh_metadata(self):
# create source object
metadata = {'x-object-meta-src': 'src_value'}
src_object_name, data = self._create_object(metadata)
# copy source object with x_fresh_metadata header
metadata = {'X-Fresh-Metadata': 'true'}
dst_object_name, resp = self._copy_object_2d(src_object_name,
metadata)
self.assertHeaders(resp, 'Object', 'COPY')
self.assertNotIn('x-object-meta-src', resp)
#Bug = 1417469
#self.assertEqual(resp['x-copied-from'],
# self.container_name + "/" + src_object_name)
# check that destination object does NOT have any object-meta
#Bug = 1417489
#self._check_copied_obj(dst_object_name, data, not_in_meta=["src"])
self._check_copied_obj(dst_object_name, data, in_meta=["src"])
@test.idempotent_id('a28a8b99-e701-4d7e-9d84-3b66f121460b')
def test_copy_object_with_x_object_metakey(self):
# create source object
metadata = {'x-object-meta-src': 'src_value'}
src_obj_name, data = self._create_object(metadata)
# copy source object to destination with x-object-meta-key
metadata = {'x-object-meta-test': ''}
dst_obj_name, resp = self._copy_object_2d(src_obj_name, metadata)
self.assertHeaders(resp, 'Object', 'COPY')
#Bug = 1417469
#expected = {'x-object-meta-test': '',
# 'x-object-meta-src': 'src_value',
# 'x-copied-from': self.container_name + "/" + src_obj_name}
#for key, value in six.iteritems(expected):
# self.assertIn(key, resp)
# self.assertEqual(value, resp[key])
# check destination object
#Bug = 1417466
#self._check_copied_obj(dst_obj_name, data, in_meta=["test", "src"])
self._check_copied_obj(dst_obj_name, data, in_meta=["src"])
@test.idempotent_id('edabedca-24c3-4322-9b70-d6d9f942a074')
def test_copy_object_with_x_object_meta(self):
# create source object
metadata = {'x-object-meta-src': 'src_value'}
src_obj_name, data = self._create_object(metadata)
# copy source object to destination with object metadata
metadata = {'x-object-meta-test': 'value'}
dst_obj_name, resp = self._copy_object_2d(src_obj_name, metadata)
self.assertHeaders(resp, 'Object', 'COPY')
#Bug = 1417469
#expected = {'x-object-meta-test': 'value',
# 'x-object-meta-src': 'src_value',
# 'x-copied-from': self.container_name + "/" + src_obj_name}
#for key, value in six.iteritems(expected):
# self.assertIn(key, resp)
# self.assertEqual(value, resp[key])
# check destination object
#Bug = 1417466
#self._check_copied_obj(dst_obj_name, data, in_meta=["test", "src"])
self._check_copied_obj(dst_obj_name, data, in_meta=["src"])
@test.idempotent_id('e3e6a64a-9f50-4955-b987-6ce6767c97fb')
def test_object_upload_in_segments(self):
# create object
object_name = data_utils.rand_name(name='LObject')
data = data_utils.arbitrary_string()
segments = 10
data_segments = [data + str(i) for i in six.moves.xrange(segments)]
# uploading segments
for i in six.moves.xrange(segments):
resp, _ = self.object_client.create_object_segments(
self.container_name, object_name, i, data_segments[i])
# creating a manifest file
metadata = {'X-Object-Manifest': '%s/%s/'
% (self.container_name, object_name)}
resp, _ = self.object_client.create_object(self.container_name,
object_name, metadata=metadata, data='')
self.assertHeaders(resp, 'Object', 'PUT')
#Bug = 1417462
#resp, _ = self.object_client.update_object_metadata(
# self.container_name, object_name, metadata, metadata_prefix='')
#self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name, object_name)
# Etag value of a large object is enclosed in double-quotations.
# After etag quotes are checked they are removed and the response is
# checked if all common headers are present and well formatted
#self.assertTrue(resp['etag'].startswith('\"'))
#self.assertTrue(resp['etag'].endswith('\"'))
resp['etag'] = resp['etag'].strip('"')
self.assertHeaders(resp, 'Object', 'HEAD')
self.assertIn('x-object-manifest', resp)
self.assertEqual(resp['x-object-manifest'],
'%s/%s/' % (self.container_name, object_name))
# downloading the object
resp, body = self.object_client.get_object(
self.container_name, object_name)
self.assertEqual(''.join(data_segments), body)
@test.idempotent_id('50d01f12-526f-4360-9ac2-75dd508d7b68')
def test_get_object_if_different(self):
# http://en.wikipedia.org/wiki/HTTP_ETag
# Make a conditional request for an object using the If-None-Match
# header, it should get downloaded only if the local file is different,
# otherwise the response code should be 304 Not Modified
object_name, data = self._create_object()
# local copy is identical, no download
md5 = hashlib.md5(data).hexdigest()
headers = {'If-None-Match': md5}
url = "%s/%s" % (self.container_name, object_name)
resp, _ = self.object_client.get(url, headers=headers)
self.assertEqual(resp['status'], '304')
# When the file is not downloaded from Swift server, response does
# not contain 'X-Timestamp' header. This is the special case, therefore
# the existence of response headers is checked without custom matcher.
#Bug = 1417481
#self.assertIn('content-type', resp)
#self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
#self.assertIn('accept-ranges', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
# local copy is different, download
local_data = "something different"
md5 = hashlib.md5(local_data).hexdigest()
headers = {'If-None-Match': md5}
resp, body = self.object_client.get(url, headers=headers)
self.assertHeaders(resp, 'Object', 'GET')
class PublicObjectTest(base.BaseObjectTest):
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@classmethod
def setup_credentials(cls):
super(PublicObjectTest, cls).setup_credentials()
cls.os = cls.os_roles_operator
cls.os_alt = cls.os_roles_operator_alt
@classmethod
def setup_clients(cls):
super(PublicObjectTest, cls).setup_clients()
cls.identity_client_alt = cls.os_alt.identity_client
def setUp(self):
super(PublicObjectTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
super(PublicObjectTest, self).tearDown()
@test.idempotent_id('07c9cf95-c0d4-4b49-b9c8-0ef2c9b27193')
def test_access_public_container_object_without_using_creds(self):
# make container public-readable and access an object in it object
# anonymously, without using credentials
# update container metadata to make it publicly readable
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers, metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
data = data_utils.arbitrary_string(size=len(object_name),
base_text=object_name)
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
# list container metadata
resp_meta, _ = self.container_client.list_container_metadata(
self.container_name)
self.assertHeaders(resp_meta, 'Container', 'HEAD')
self.assertIn('x-container-read', resp_meta)
#Bug = 1417498
#self.assertEqual(resp_meta['x-container-read'], '.r:*,.rlistings')
self.assertEqual(resp_meta['x-container-read'], '.r:*')
# trying to get object with empty headers as it is public readable
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=None
)
resp, body = self.object_client.get_object(
self.container_name, object_name)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@test.idempotent_id('54e2a2fe-42dc-491b-8270-8e4217dd4cdc')
def test_access_public_object_with_another_user_creds(self):
# make container public-readable and access an object in it using
# another user's credentials
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
data = data_utils.arbitrary_string(size=len(object_name) * 1,
base_text=object_name)
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
# list container metadata
resp, _ = self.container_client.list_container_metadata(
self.container_name)
#ceph does not return container header in response of HEAD request.
#self.assertHeaders(resp, 'Container', 'HEAD')
self.assertIn('x-container-read', resp)
#Bug = 1417498
#self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
self.assertEqual(resp['x-container-read'], '.r:*')
# get auth token of alternative user
alt_auth_data = self.identity_client_alt.auth_provider.auth_data
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=alt_auth_data
)
# access object using alternate user creds
resp, body = self.object_client.get_object(
self.container_name, object_name)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
| roopali8/tempest | tempest/api/object_storage/test_object_services.py | Python | apache-2.0 | 48,905 |
"""
Follow up for "Unique Paths":
Now consider if some obstacles are added to the grids. How many unique paths
would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
For example,
There is one obstacle in the middle of a 3x3 grid as illustrated below.
[
[0,0,0],
[0,1,0],
[0,0,0]
]
The total number of unique paths is 2.
Note: m and n will be at most 100.
"""
class Solution:
# @param obstacleGrid, a list of lists of integers
# @return an integer
def uniquePathsWithObstacles(self, obstacleGrid):
n = len(obstacleGrid)
m = len(obstacleGrid[0])
t = [[-1 for i in range(m)] for j in range(n)]
return self.unique_paths(obstacleGrid, m - 1, n - 1, t)
def unique_paths(self, grid, x, y, t):
if x == 0 and y == 0:
t[y][x] = 1 if grid[y][x] == 0 else 0
return t[y][x]
elif grid[y][x] == 1:
t[y][x] = 0
return t[y][x]
elif t[y][x] != -1:
return t[y][x]
elif x > 0 and y == 0:
t[y][x] = self.unique_paths(grid, x - 1, y, t)
return t[y][x]
elif y > 0 and x == 0:
t[y][x] = self.unique_paths(grid, x, y - 1, t)
return t[y][x]
else:
a = self.unique_paths(grid, x - 1, y, t)
b = self.unique_paths(grid, x, y - 1, t)
t[y][x] = a + b
return t[y][x]
| shichao-an/leetcode-python | unique_paths_ii/solution2.py | Python | bsd-2-clause | 1,435 |
# -*- coding: utf-8 -*-
"""
.. module:: bitalino
:synopsis: BITalino API
*Created on Fri Jun 20 2014*
"""
import math
import platform
import re
import struct
import time
import numpy
import serial
def find():
"""
:returns: list of (tuples) with name and MAC address of each device found
Searches for bluetooth devices nearby.
"""
if platform.system() == 'Windows' or platform.system() == 'Linux':
import bluetooth
nearby_devices = bluetooth.discover_devices(lookup_names=True)
return nearby_devices
else:
raise Exception(ExceptionCode.INVALID_PLATFORM)
class ExceptionCode():
INVALID_ADDRESS = "The specified address is invalid."
INVALID_PLATFORM = "This platform does not support bluetooth connection."
CONTACTING_DEVICE = "The computer lost communication with the device."
DEVICE_NOT_IDLE = "The device is not idle."
DEVICE_NOT_IN_ACQUISITION = "The device is not in acquisition mode."
INVALID_PARAMETER = "Invalid parameter."
class BITalino(object):
"""
:param macAddress: MAC address or serial port for the bluetooth device
:type macAddress: str
:raises Exception: invalid MAC address or serial port
Connects to the bluetooth device with the MAC address or serial port provided.
Possible values for parameter *macAddress*:
* MAC address: e.g. ``00:0a:95:9d:68:16``
* Serial port - device name: depending on the operating system. e.g. ``COM3`` on Windows; ``/dev/tty.bitalino-DevB`` on Mac OS X; ``/dev/ttyUSB0`` on GNU/Linux.
"""
def __init__(self, macAddress):
regCompiled = re.compile('^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$');
checkMatch = re.match(regCompiled, macAddress);
if (checkMatch):
if platform.system() == 'Windows' or platform.system() == 'Linux':
import bluetooth
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.socket.connect((macAddress, 1))
self.serial = False
else:
raise Exception(ExceptionCode.INVALID_PLATFORM)
elif (macAddress[0:3] == 'COM' and platform.system() == 'Windows') or (
macAddress[0:5] == '/dev/' and platform.system() != 'Windows'):
self.socket = serial.Serial(macAddress, 115200)
self.serial = True
else:
raise Exception(ExceptionCode.INVALID_ADDRESS)
self.started = False
self.macAddress = macAddress
def start(self, SamplingRate=1000, analogChannels=[0, 1, 2, 3, 4, 5]):
"""
:param SamplingRate: sampling frequency (Hz)
:type SamplingRate: int
:param analogChannels: channels to be acquired
:type analogChannels: array, tuple or list of int
:raises Exception: device already in acquisition (not IDLE)
:raises Exception: sampling rate not valid
:raises Exception: list of analog channels not valid
Sets the sampling rate and starts acquisition in the analog channels set.
Setting the sampling rate and starting the acquisition implies the use of the method :meth:`send`.
Possible values for parameter *SamplingRate*:
* 1
* 10
* 100
* 1000
Possible values, types, configurations and examples for parameter *analogChannels*:
=============== ====================================
Values 0, 1, 2, 3, 4, 5
Types list ``[]``, tuple ``()``, array ``[[]]``
Configurations Any number of channels, identified by their value
Examples ``[0, 3, 4]``, ``(1, 2, 3, 5)``
=============== ====================================
.. note:: To obtain the samples, use the method :meth:`read`.
"""
if (self.started == False):
if int(SamplingRate) not in [1, 10, 100, 1000]:
raise Exception(ExceptionCode.INVALID_PARAMETER)
if int(SamplingRate) == 1000:
commandSRate = 3
elif int(SamplingRate) == 100:
commandSRate = 2
elif int(SamplingRate) == 10:
commandSRate = 1
elif int(SamplingRate) == 1:
commandSRate = 0
if isinstance(analogChannels, list):
analogChannels = analogChannels
elif isinstance(analogChannels, tuple):
analogChannels = list(analogChannels)
elif isinstance(analogChannels, numpy.ndarray):
analogChannels = analogChannels.astype('int').tolist()
else:
raise Exception(ExceptionCode.INVALID_PARAMETER)
analogChannels = list(set(analogChannels))
if len(analogChannels) == 0 or len(analogChannels) > 6 or any(
[item not in range(6) or type(item) != int for item in analogChannels]):
raise Exception(ExceptionCode.INVALID_PARAMETER)
self.send((commandSRate << 6) | 0x03)
commandStart = 1
for i in analogChannels:
commandStart = commandStart | 1 << (2 + i)
self.send((commandSRate << 6) | 0x03)
self.send(commandStart)
self.started = True
self.analogChannels = analogChannels
else:
raise Exception(ExceptionCode.DEVICE_NOT_IDLE)
def stop(self):
"""
:raises Exception: device not in acquisition (IDLE)
Stops the acquisition. Stoping the acquisition implies the use of the method :meth:`send`.
"""
if (self.started):
self.send(0)
self.started = False
self.version()
else:
raise Exception(ExceptionCode.DEVICE_NOT_IN_ACQUISITION)
def close(self):
"""
Closes the bluetooth or serial port socket.
"""
self.socket.close()
def send(self, data):
"""
Sends a command to the BITalino device.
"""
time.sleep(0.1)
if self.serial:
self.socket.write(chr(data))
else:
self.socket.send(chr(data))
def battery(self, value=0):
"""
:param value: threshold value
:type value: int
:raises Exception: device in acquisition (not IDLE)
:raises Exception: threshold value is invalid
Sets the battery threshold for the BITalino device. Setting the battery threshold implies the use of the method :meth:`send`.
Possible values for parameter *value*:
=============== ======= =====================
Range *value* Corresponding threshold (Volts)
=============== ======= =====================
Minimum *value* 0 3.4 Volts
Maximum *value* 63 3.8 Volts
=============== ======= =====================
"""
if (self.started == False):
if 0 <= int(value) <= 63:
commandBattery = int(value) << 2
self.send(commandBattery)
else:
raise Exception(ExceptionCode.INVALID_PARAMETER)
else:
raise Exception(ExceptionCode.DEVICE_NOT_IDLE)
def trigger(self, digitalArray=[0, 0, 0, 0]):
"""
:param digitalArray: array which acts on digital outputs according to the value: 0 or 1
:type digitalArray: array, tuple or list of int
:raises Exception: list of digital channel output is not valid
:raises Exception: device not in acquisition (IDLE)
Acts on digital output channels of the BITalino device. Triggering these digital outputs implies the use of the method :meth:`send`.
Each position of the array *digitalArray* corresponds to a digital output, in ascending order. Possible values, types, configurations and examples for parameter *digitalArray*:
=============== ====================================
Values 0 or 1
Types list ``[]``, tuple ``()``, array ``[[]]``
Configurations 4 values, one for each digital channel output
Examples ``[1, 0, 1, 0]``: Digital 0 and 2 will be set to 1 while Digital 1 and 3 will be set to 0
=============== ====================================
"""
if (self.started):
if isinstance(digitalArray, list):
digitalArray = digitalArray
elif isinstance(digitalArray, tuple):
digitalArray = list(digitalArray)
elif isinstance(digitalArray, numpy.ndarray):
digitalArray = digitalArray.astype('int').tolist()
else:
raise Exception(ExceptionCode.INVALID_PARAMETER)
pValues = [0, 1]
if len(digitalArray) != 4 or any([item not in pValues or type(item) != int for item in digitalArray]):
raise Exception(ExceptionCode.INVALID_PARAMETER)
data = 3
for i, j in enumerate(digitalArray):
data = data | j << (2 + i)
self.send(data)
else:
raise Exception(ExceptionCode.DEVICE_NOT_IN_ACQUISITION)
def read(self, nSamples=100):
"""
:param nSamples: number of samples to acquire
:type nSamples: int
:returns: array with the acquired data
:raises Exception: device not in acquisition (in IDLE)
:raises Exception: lost communication with the device
Acquires `nSamples` from BITalino. Reading samples from BITalino implies the use of the method :meth:`receive`.
Requiring a low number of samples (e.g. ``nSamples = 1``) may be computationally expensive; it is recommended to acquire batches of samples (e.g. ``nSamples = 100``).
The data acquired is organized in a matrix whose lines correspond to samples and the columns are as follows:
* Sequence Number
* 4 Digital Channels (always present)
* 1-6 Analog Channels (as defined in the :meth:`start` method)
Example matrix for ``analogChannels = [0, 1, 3]`` used in :meth:`start` method:
================== ========= ========= ========= ========= ======== ======== ========
Sequence Number* Digital 0 Digital 1 Digital 2 Digital 3 Analog 0 Analog 1 Analog 3
================== ========= ========= ========= ========= ======== ======== ========
0
1
(...)
15
0
1
(...)
================== ========= ========= ========= ========= ======== ======== ========
.. note:: *The sequence number overflows at 15
"""
if (self.started):
nChannels = len(self.analogChannels)
if nChannels <= 4:
number_bytes = int(math.ceil((12. + 10. * nChannels) / 8.))
else:
number_bytes = int(math.ceil((52. + 6. * (nChannels - 4)) / 8.))
dataAcquired = numpy.zeros((nSamples, 5 + nChannels))
for sample in range(nSamples):
Data = self.receive(number_bytes)
decodedData = list(struct.unpack(number_bytes * "B ", Data))
crc = decodedData[-1] & 0x0F
decodedData[-1] = decodedData[-1] & 0xF0
x = 0
for i in range(number_bytes):
for bit in range(7, -1, -1):
x = x << 1
if (x & 0x10):
x = x ^ 0x03
x = x ^ ((decodedData[i] >> bit) & 0x01)
if (crc == x & 0x0F):
dataAcquired[sample, 0] = decodedData[-1] >> 4
dataAcquired[sample, 1] = decodedData[-2] >> 7 & 0x01
dataAcquired[sample, 2] = decodedData[-2] >> 6 & 0x01
dataAcquired[sample, 3] = decodedData[-2] >> 5 & 0x01
dataAcquired[sample, 4] = decodedData[-2] >> 4 & 0x01
if nChannels > 0:
dataAcquired[sample, 5] = ((decodedData[-2] & 0x0F) << 6) | (decodedData[-3] >> 2)
if nChannels > 1:
dataAcquired[sample, 6] = ((decodedData[-3] & 0x03) << 8) | decodedData[-4]
if nChannels > 2:
dataAcquired[sample, 7] = (decodedData[-5] << 2) | (decodedData[-6] >> 6)
if nChannels > 3:
dataAcquired[sample, 8] = ((decodedData[-6] & 0x3F) << 4) | (decodedData[-7] >> 4)
if nChannels > 4:
dataAcquired[sample, 9] = ((decodedData[-7] & 0x0F) << 2) | (decodedData[-8] >> 6)
if nChannels > 5:
dataAcquired[sample, 10] = decodedData[-8] & 0x3F
else:
raise Exception(ExceptionCode.CONTACTING_DEVICE)
return dataAcquired
else:
raise Exception(ExceptionCode.DEVICE_NOT_IN_ACQUISITION)
def version(self):
"""
:returns: str with the version of BITalino
:raises Exception: device in acquisition (not IDLE)
Retrieves the BITalino version. Retrieving the version implies the use of the methods :meth:`send` and :meth:`receive`.
"""
if (self.started == False):
self.send(7)
version_str = ''
while True:
version_str += self.receive(1)
if version_str[-1] == '\n' and 'BITalino' in version_str:
break
return version_str[version_str.index("BITalino"):-1]
else:
raise Exception(ExceptionCode.DEVICE_NOT_IDLE)
def receive(self, nbytes):
"""
:param nbytes: number of bytes to retrieve
:type nbytes: int
:return: string packed binary data
Retrieves `nbytes` from the BITalino device and returns it as a string pack with length of `nbytes`.
"""
reader = self.socket.read if self.serial else self.socket.recv
data = ''
while len(data) < nbytes:
data += reader(1)
return data
if __name__ == '__main__':
macAddress = '/dev/tty.bitalino-DevB' # "98:D3:31:B1:84:2C"
batteryThreshold = 30
acqChannels = [0, 3]
samplingRate = 1000
nSamples = 10
digitalOutput = [0, 0, 1, 1]
# Connect to BITalino
device = BITalino(macAddress)
# Set battery threshold
device.battery(batteryThreshold)
# Read BITalino version
print device.version()
# Start Acquisition
device.start(samplingRate, acqChannels)
# Read samples
print device.read(nSamples)
# Turn BITalino led on
device.trigger(digitalOutput)
# Stop acquisition
device.stop()
# Close connection
device.close()
| chipimix/thesis | bitalino.py | Python | gpl-3.0 | 15,005 |
from __future__ import print_function
import os
import sys
import argparse
import pysam
def convert_sams2bams(sams):
separator = ","
split_sams = sams.split(separator)
bams = list()
for sam in split_sams:
bam = sam.replace(".sam", ".bam")
bams.append(bam)
os.system("samtools view -Sbh {} > {}".format(sam, bam))
return separator.join(bams)
def index_bams(bams):
split_bams = bams.split(",")
for bam in split_bams:
index_files = [bam + '.bai', bam + '.csi']
for index_file in index_files:
if os.path.isfile(index_file): # if the bam file has been indexed.
print("'{}' is indexed already: '{}'".format(bam, index_file))
return
print("Indexing '{}'.".format(bam))
os.system("samtools index " + bam)
def prepare_bams(options):
"""
convert sam files to bam files and store the filename in options.b1 & options.b2.
Ensure bams are indexed.
"""
if options.s1:
options.b1 = convert_sams2bams(options.s1)
if options.s2:
options.b2 = convert_sams2bams(options.s2)
if options.b1:
index_bams(options.b1)
if options.b2:
index_bams(options.b2)
def file_check(string, expected_ext):
"""
check the existence of the files and whether they are with the right extensions
:param string: original string jointed file names by comma
:param extension: a string like ".bam" in lowercase
:return: error message or None
"""
name_arr = string.split(',')
for name in name_arr:
if not os.path.isfile(name):
return '{} is not a file'.format(name)
extension = os.path.splitext(name)[1]
if extension.lower() != expected_ext.lower():
return '{} has extension {} but expected {} (ignoring case)'.format(
name, extension, expected_ext)
return None
def checkout(parser, options):
"""
check out the required arguments
:return: None
"""
# bam files and sam files are alternative, the same for the case of events_file and coordinate
# events_file should be provided together with event_type
if ((options.s1 is None and options.b1 is None
and options.s2 is None and options.b2 is None)):
parser.error("Not enough arguments! Please provide at least one of"
" --s1, --b1, --s2, --b2")
if (((options.s1 is not None or options.s2 is not None)
and (options.b1 is not None or options.b2 is not None))):
parser.error("Specify either sam files or bam files not both")
if (options.events_file is None or options.event_type is None) and options.coordinate is None:
parser.error("Not enough arguments! Please provide "
"1) coordinates with gff3 files. or "
"2) events files together with events type.")
used_sample_1 = False
used_sample_2 = False
if options.s1 is not None:
used_sample_1 = True
file_check_error = file_check(options.s1, ".sam")
if file_check_error:
parser.error("Error checking sam files given as --s1: {}".format(
file_check_error))
if options.s2 is not None:
used_sample_2 = True
file_check_error = file_check(options.s2, ".sam")
if file_check_error:
parser.error("Error checking sam files given as --s2: {}".format(
file_check_error))
if options.b1 is not None:
used_sample_1 = True
file_check_error = file_check(options.b1, ".bam")
if file_check_error:
parser.error("Error checking bam files given as --b1: {}".format(
file_check_error))
if options.b2 is not None:
used_sample_2 = True
file_check_error = file_check(options.b2, ".bam")
if file_check_error:
parser.error("Error checking bam files given as --b2: {}".format(
file_check_error))
if options.l1 is None:
if used_sample_1:
parser.error('Must provide --l1 if using --s1 or --b1')
else:
options.l1 = 'DefaultLabel1'
if options.l2 is None:
if used_sample_2:
parser.error('Must provide --l2 if using --s2 or --b2')
else:
options.l2 = 'DefaultLabel2'
if options.events_file:
file_check_error = file_check(options.events_file, ".txt")
if file_check_error:
parser.error("Error checking rMATS output given as -e: {}".format(
file_check_error))
def conf_setting_file(options, gene_no_str=None, gene_symbol=None, events_name_level=None, id_str=None):
"""
configure the setting files
the empty of gene_no_str means plotting with events file, otherwise with coordinates
"""
if gene_no_str is not None:
setting_file = open(os.path.join(options.out_dir, "Sashimi_index_" + gene_no_str,
"sashimi_plot_settings.txt"), 'w')
else:
setting_file = open(os.path.join(options.sashimi_path, "sashimi_plot_settings.txt"), 'w')
setting_file.write("[data]\n")
sam_dir = "" # since bam path is accessible, we don't need to provide a prefix particularly
setting_file.write("bam_prefix = " + sam_dir + "\n")
setting_file.write("miso_prefix = " + sam_dir + "\n")
# setting string for replicates
bam_files_arr1 = []
bam_files_arr2 = []
sample_1 = list()
sample_2 = list()
# sam files have already been converted into bam files and stored in options.b1&b2
if options.b1:
sample_1 = options.b1.split(',')
if options.b2:
sample_2 = options.b2.split(',')
for s in sample_1: # sample1
bam_files_arr1.append('\"' + s + '\"')
for s in sample_2: # sample2
bam_files_arr2.append('\"' + s + '\"')
setting_bam_str = ','.join(bam_files_arr1 + bam_files_arr2)
len_sample1 = len(sample_1)
len_sample2 = len(sample_2)
setting_file.write("bam_files = [{0}]\n".format(setting_bam_str))
setting_file.write("miso_files = [{0}]\n".format(setting_bam_str))
setting_file.write("[plotting]\n")
# use a dict to store the configuration
setting = {}
if len_sample1 < 5:
setting['fig_height'] = 7
else:
setting["fig_height"] = 14
setting["fig_width"] = 8
setting["exon_scale"] = str(options.exon_s)
setting["intron_scale"] = str(options.intron_s)
setting["logged"] = False
setting["font_size"] = options.font_size
setting["bar_posteriors"] = False
setting["nyticks"] = 4
if gene_no_str is None:
setting["nxticks"] = 11
else:
setting["nxticks"] = 6
setting["show_ylabel"] = True
setting["show_xlabel"] = True
setting["plot_title"] = "\"gene symbol\""
setting["plot_label"] = "plot_label"
setting["show_posteriors"] = False
setting["number_junctions"] = not options.hide_number
setting["resolution"] = ".5"
setting["reverse_minus"] = True
setting["min_counts"] = max(options.min_counts, 0)
setting["text_background"] = options.text_background
if options.group_info is None:
setting["group_info"] = False
else:
setting["group_info"] = True
for item in setting:
setting_file.write("{0} = {1}\n".format(item, setting[item]))
# setting color
setting_color_str = ""
if options.color is None:
colors_arr1 = ['\"#CC0011\"'] * len_sample1
colors_arr2 = ['\"#FF8800\"'] * len_sample2
setting_color_str = ','.join(colors_arr1 + colors_arr2)
else:
colors_arr = ["\"{0}\"".format(c) for c in options.color.split(',')]
setting_color_str = ','.join(colors_arr)
setting_file.write("colors = [{0}]\n".format(setting_color_str))
# setting label
sample_labels_arr1 = []
sample_labels_arr2 = []
if gene_no_str is None: # the case with coordinate
for rr in range(0, len_sample1):
sample_labels_arr1.append('\"{0}-{1}\"'.format(options.l1, str(rr + 1)))
for rr in range(0, len_sample2):
sample_labels_arr2.append('\"{0}-{1}\"'.format(options.l2, str(rr + 1)))
else: # the case with events file
inc_level_str = events_name_level.get(gene_symbol)
items = inc_level_str.split('_')
inc_level1 = items[0]
inc_level2 = items[1]
inc_items1 = inc_level1.split(',')
inc_items2 = inc_level2.split(',')
warning_flag = False
for rr in range(0, len_sample1):
try:
inc_1 = "{0:.2f}".format(float(inc_items1[rr]))
except Exception:
inc_1 = "{0:.2f}".format(float('nan'))
warning_flag = True
sample_labels_arr1.append('\"' + gene_symbol + ' ' + options.l1 + '-' + str(rr + 1) + ' IncLevel: '
+ inc_1 + '\"')
for rr in range(0, len_sample2):
try:
inc_2 = "{0:.2f}".format(float(inc_items2[rr]))
except Exception:
inc_2 = "{0:.2f}".format(float('nan'))
warning_flag = True
sample_labels_arr2.append('\"' + gene_symbol + ' ' + options.l2 + '-' + str(rr + 1) + ' IncLevel: '
+ inc_2 + '\"')
if warning_flag:
print("Warning: The inclusion levels of Event '{}' contains"
" 'NA' value, which could lead to unexpected output."
.format(id_str), file=sys.stderr)
setting_label_str = ','.join(sample_labels_arr1 + sample_labels_arr2)
setting_file.write("sample_labels = [{0}]\n".format(setting_label_str))
setting_file.close()
def parse_gff3_record(record):
"""TODO: Docstring for parse_gff3_record.
:returns: TODO
"""
res = {}
eles = record.split('\t')
res['seqid'] = eles[0]
res['source'] = eles[1]
res['type'] = eles[2]
res['start'] = eles[3]
res['end'] = eles[4]
res['score'] = eles[5]
res['strand'] = eles[6]
res['phase'] = eles[7]
res['attributes'] = eles[8]
return res
def get_python_executable():
# Try to get the absolute path of the executable for the running
# Python interpreter.
python_executable = sys.executable
if not python_executable:
# Fallback
print('Absolute path for current Python interpreter not found.'
' Using "python" without a full path to run scripts',
file=sys.stderr)
python_executable = 'python'
return python_executable
def plot_c(options, id_str):
"""
the plot part of the coordinate method
"""
path_index_gff = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'MISO/misopy/index_gff.py')
path_sashimi_plot = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'MISO/misopy/sashimi_plot/sashimi_plot.py')
python_executable = get_python_executable()
# call python index_gff.py
tmp_str = os.path.join(options.sashimi_path, "tmp.gff3")
os.system("{} {} --index {} {}".format(python_executable, path_index_gff,
tmp_str, options.sashimi_path))
# call python sashimi_plot.py
setting_str = os.path.join(options.sashimi_path, "sashimi_plot_settings.txt")
output_path = os.path.join(options.out_dir, "Sashimi_plot")
if options.group_info is not None:
os.system("{} {} --plot-event \"{}\" {} {} "
"--output-dir {} --group-info {}".format(
python_executable, path_sashimi_plot, id_str,
options.sashimi_path, setting_str, output_path,
options.group_info))
else:
os.system("{} {} --plot-event \"{}\" {} {} "
"--output-dir {}".format(
python_executable, path_sashimi_plot, id_str,
options.sashimi_path, setting_str, output_path))
return
def plot_e(options, id_str, gene_symbol, events_no):
"""
the plot part of the events file method
"""
path_index_gff = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'MISO/misopy/index_gff.py')
path_sashimi_plot = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'MISO/misopy/sashimi_plot/sashimi_plot.py')
python_executable = get_python_executable()
# call python index_gff.py
out_index = os.path.join(options.out_dir, "Sashimi_index_" + gene_symbol + '_' + str(events_no))
tmp_str = os.path.join(out_index, "tmp.gff3")
os.system("{} {} --index {} {}".format(python_executable, path_index_gff,
tmp_str, out_index))
# call python sashimi_plot.py
setting_str = os.path.join(out_index, "sashimi_plot_settings.txt")
output_path = os.path.join(options.out_dir, "Sashimi_plot")
print("{} {} --plot-event \"{}\" {} {} "
"--output-dir {}".format(python_executable, path_sashimi_plot, id_str,
out_index, setting_str, output_path))
if options.group_info is not None:
os.system("{} {} --plot-event \"{}\" {} {} "
"--output-dir {} --group-info {}".format(
python_executable, path_sashimi_plot, id_str, out_index,
setting_str, output_path, options.group_info))
else:
os.system("{} {} --plot-event \"{}\" {} {} "
"--output-dir {}".format(
python_executable, path_sashimi_plot, id_str, out_index,
setting_str, output_path))
# move pdf file
old_file = os.path.join(options.out_dir, "Sashimi_plot", id_str + '.pdf')
new_file = os.path.join(options.out_dir, "Sashimi_plot",
str(events_no) + '_' + gene_symbol + '_' + id_str + '.pdf')
os.system("mv {0} {1}".format(old_file, new_file))
return
def plot_with_coordinate(options):
"""
if the user provides with coordinate, then plot in this way
"""
try:
tmp_str = options.coordinate.split(':')
in_chr = tmp_str[0]
# if not in_chr.startswith("chr"): # add 'chr' prefix to the sequence name which is from the input arguement
# in_chr = "chr" + in_chr
in_strand = tmp_str[1]
in_coor_s = tmp_str[2]
in_coor_e = int(tmp_str[3]) + 1
id_str = in_chr + "_" + in_coor_s + "_" + str(in_coor_e) + "_" + in_strand # chr2_10101175_10104171_+
gff3_file = tmp_str[4]
fo = open(gff3_file, 'r')
w2 = open(os.path.join(options.sashimi_path, "SE.event.list.txt"), 'w')
w1 = open(os.path.join(options.sashimi_path, "tmp.gff3"), 'w')
w1.write("%s\tensGene\tgene\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s\n" %
(in_chr, in_coor_s, in_coor_e, in_strand, id_str, id_str))
# w1.write("%s\tensGene\tmRNA\t%s\t%s\t.\t%s\t.\tName=ENST00000000000;Parent=%s;ID=ENST00000000000\n" %
# (in_chr, in_coor_s, in_coor_e, in_strand, id_str))
events_no = 0
for line in fo:
if line.startswith('#'):
continue
events_no += 1
items = line.split("\t")
# if items[0].startswith("chr"):
# item_chr = items[0]
# else: # add 'chr' prefix to the seqence name which is from the gff3 file
# item_chr = "chr" + items[0]
item_chr = items[0]
if in_chr != item_chr:
continue
item_type = items[2]
is_mrna_or_transcript = item_type in ["mRNA", "transcript"]
if is_mrna_or_transcript or item_type == "exon":
coor_s = items[3]
coor_e = items[4]
strand = items[6]
annot_str = items[8].strip()
# judge whether the coordinates fit in the item
if (in_strand == strand
and ((item_type == 'exon'
and int(in_coor_s) <= int(coor_s)
and int(coor_e) <= int(in_coor_e))
or (is_mrna_or_transcript
and int(coor_s) < int(in_coor_e)
and int(coor_e) > int(in_coor_s)))):
if is_mrna_or_transcript:
if int(coor_s) < int(in_coor_s):
coor_s = in_coor_s
if int(coor_e) > int(in_coor_e):
coor_e = in_coor_e
annot_str = annot_str.replace('Parent', 'Note')
w1.write("%s\tensGene\t%s\t%s\t%s\t.\t%s\t.\tParent=%s;%s\n" %
(item_chr, item_type, coor_s, coor_e, strand, id_str, annot_str))
if item_type == "exon":
w1.write("%s\tensGene\t%s\t%s\t%s\t.\t%s\t.\t%s\n" %
(item_chr, item_type, coor_s, coor_e, strand, annot_str))
w1.close()
try:
conf_setting_file(options)
except Exception as e:
print(e)
print("There is an exception in preparing coordinate setting file")
raise
plot_c(options, id_str)
fo.close()
except Exception as e:
print(e)
print("There is an exception in plot_with_coordinate")
raise
return
class EventCoor(object):
"""
to store the coordinates regarding to the event_type
"""
def __init__(self, event_type, items):
if event_type == "MXE":
self.e1st_s = str(int(items[5]) + 1)
self.e1st_e = items[6]
self.e2st_s = str(int(items[7]) + 1)
self.e2st_e = items[8]
self.up_s = str(int(items[9]) + 1)
self.up_e = items[10]
self.dn_s = str(int(items[11]) + 1)
self.dn_e = items[12]
self.inc_level1 = items[22] # IncLevel1
self.inc_level2 = items[23] # IncLevel2
elif event_type == "SE" or event_type == "RI":
self.se_s = str(int(items[5]) + 1)
self.se_e = items[6]
self.up_s = str(int(items[7]) + 1)
self.up_e = items[8]
self.dn_s = str(int(items[9]) + 1)
self.dn_e = items[10]
self.inc_level1 = items[20] # IncLevel1
self.inc_level2 = items[21] # IncLevel2
else: # A3SS or A5SS
self.lo_s = str(int(items[5]) + 1) # long
self.lo_e = items[6]
self.sh_s = str(int(items[7]) + 1) # short
self.sh_e = items[8]
self.fl_s = str(int(items[9]) + 1) # flanking
self.fl_e = items[10]
self.inc_level1 = items[20] # IncLevel1
self.inc_level2 = items[21] # IncLevel2
self.name_str = ''
self.id_str = ''
def generate_in_positive_order(self, seq_chr, gene_symbol, strand, event_type):
if event_type == 'MXE':
self.id_str = (seq_chr + "_" + self.up_s + "_" + self.up_e + "_" + strand + "@" +
seq_chr + "_" + self.e1st_s + "_" + self.e1st_e + "_" + strand + "@" +
seq_chr + "_" + self.e2st_s + "_" + self.e2st_e + "_" + strand + "@" +
seq_chr + "_" + self.dn_s + "_" + self.dn_e + "_" + strand)
elif event_type == 'A5SS':
self.id_str = (seq_chr + "_" + self.sh_s + "_" + self.sh_e + "_" + strand + "@" +
seq_chr + "_" + self.lo_s + "_" + self.lo_e + "_" + strand + "@" +
seq_chr + "_" + self.fl_s + "_" + self.fl_e + "_" + strand)
elif event_type == 'A3SS':
self.id_str = (seq_chr + "_" + self.fl_s + "_" + self.fl_e + "_" + strand + "@" +
seq_chr + "_" + self.lo_s + "_" + self.lo_e + "_" + strand + "@" +
seq_chr + "_" + self.sh_s + "_" + self.sh_e + "_" + strand)
elif event_type == 'SE' or event_type == 'RI':
self.id_str = (seq_chr + "_" + self.up_s + "_" + self.up_e + "_" + strand + "@" +
seq_chr + "_" + self.se_s + "_" + self.se_e + "_" + strand + "@" +
seq_chr + "_" + self.dn_s + "_" + self.dn_e + "_" + strand)
self.name_str = gene_symbol + "_" + self.id_str
def generate_in_reversed_order(self, seq_chr, gene_symbol, strand, event_type):
if event_type == 'MXE':
self.id_str = (seq_chr + "_" + self.dn_s + "_" + self.dn_e + "_" + strand + "@" +
seq_chr + "_" + self.e2st_s + "_" + self.e2st_e + "_" + strand + "@" +
seq_chr + "_" + self.e1st_s + "_" + self.e1st_e + "_" + strand + "@" +
seq_chr + "_" + self.up_s + "_" + self.up_e + "_" + strand)
elif event_type == 'A3SS': # the same as the positive order in A5SS
self.id_str = (seq_chr + "_" + self.sh_s + "_" + self.sh_e + "_" + strand + "@" +
seq_chr + "_" + self.lo_s + "_" + self.lo_e + "_" + strand + "@" +
seq_chr + "_" + self.fl_s + "_" + self.fl_e + "_" + strand)
elif event_type == 'A5SS': # the same as the positive order in A3SS
self.id_str = (seq_chr + "_" + self.fl_s + "_" + self.fl_e + "_" + strand + "@" +
seq_chr + "_" + self.lo_s + "_" + self.lo_e + "_" + strand + "@" +
seq_chr + "_" + self.sh_s + "_" + self.sh_e + "_" + strand)
elif event_type == 'SE' or event_type == 'RI':
self.id_str = (seq_chr + "_" + self.dn_s + "_" + self.dn_e + "_" + strand + "@" +
seq_chr + "_" + self.se_s + "_" + self.se_e + "_" + strand + "@" +
seq_chr + "_" + self.up_s + "_" + self.up_e + "_" + strand)
self.name_str = gene_symbol + "_" + self.id_str
def create_chr_aware_events_file(options):
"""
The *.MATS.*.txt events file from rmats includes the prefix 'chr'
for chromosomes. If the BAM files do not have the 'chr' prefix
then remove the prefix from the events file. Consistent presence or
absence of 'chr' is needed to search for reads in the BAM files.
"""
orig_events_file_path = options.events_file
new_events_file_path = os.path.join(options.sashimi_path, 'events_file.txt')
if options.b1:
first_bam_path = options.b1.split(',')[0]
else:
first_bam_path = options.b2.split(',')[0]
with pysam.AlignmentFile(first_bam_path, 'rb') as bam_file:
for alignment in bam_file.fetch(until_eof=True):
ref_name = alignment.reference_name
sam_has_chr_prefix = ref_name.startswith('chr')
break # only check first alignment
remove_chr_prefix = (not options.keep_event_chr_prefix
and (options.remove_event_chr_prefix
or not sam_has_chr_prefix))
with open(orig_events_file_path, 'rt') as orig_handle:
with open(new_events_file_path, 'wt') as new_handle:
for i, line in enumerate(orig_handle):
is_header_line = i == 0
columns = line.rstrip('\n').split('\t')
if is_header_line:
chr_index = columns.index('chr')
elif remove_chr_prefix:
orig_chr_column = columns[chr_index]
if orig_chr_column.startswith('chr'):
columns[chr_index] = orig_chr_column[3:]
new_line = '\t'.join(columns)
new_handle.write('{}\n'.format(new_line))
return new_events_file_path
def plot_with_eventsfile(options):
"""
if the user provides with event files, then plot in this way
"""
try:
options.events_file = create_chr_aware_events_file(options)
fo = open(options.events_file, 'r')
w2 = open(os.path.join(options.sashimi_path, options.event_type + ".event.list.txt"), 'w')
events_name_level = {}
events_no = 0
for line in fo:
if line.startswith('ID'):
continue
events_no += 1
items = line.split("\t")
gene_symbol = items[2]
gene_symbol = gene_symbol.replace("\"", '')
gene_no_str = gene_symbol + '_' + str(events_no)
sashimi_path = os.path.join(options.out_dir, "Sashimi_index_" + gene_symbol + '_' + str(events_no))
if not os.path.isdir(sashimi_path):
os.makedirs(sashimi_path)
w1 = open(os.path.join(sashimi_path, "tmp.gff3"), 'w')
seq_chr = items[3]
strand = items[4]
# construct the events coordinates depending on the event type
coor = EventCoor(options.event_type, items)
events_name_level[gene_symbol] = coor.inc_level1 + "_" + coor.inc_level2
if strand == '+':
coor.generate_in_positive_order(seq_chr, gene_symbol, strand, options.event_type)
w2.write("%s\n" % coor.name_str)
if options.event_type == "SE" or options.event_type == "RI":
w1.write("%s\tSE\tgene\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.name_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.A;Parent=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.B;Parent=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.up;Parent=%s.A\n" % (
seq_chr, coor.up_s, coor.up_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.se;Parent=%s.A\n" % (
seq_chr, coor.se_s, coor.se_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.dn;Parent=%s.A\n" % (
seq_chr, coor.dn_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.up;Parent=%s.B\n" % (
seq_chr, coor.up_s, coor.up_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.dn;Parent=%s.B\n" % (
seq_chr, coor.dn_s, coor.dn_e, strand, coor.id_str, coor.id_str))
elif options.event_type == "A3SS": # flanking -- long/short TODO: change the ID name
w1.write("%s\tSE\tgene\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s\n" % (
seq_chr, coor.fl_s, coor.sh_e, strand, coor.id_str, coor.name_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.A;Parent=%s\n" % (
seq_chr, coor.fl_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.B;Parent=%s\n" % (
seq_chr, coor.fl_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.up;Parent=%s.A\n" % (
seq_chr, coor.fl_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.se;Parent=%s.A\n" % (
seq_chr, coor.sh_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.dn;Parent=%s.A\n" % (
seq_chr, coor.lo_s, coor.lo_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.up;Parent=%s.B\n" % (
seq_chr, coor.fl_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.dn;Parent=%s.B\n" % (
seq_chr, coor.sh_s, coor.sh_e, strand, coor.id_str, coor.id_str))
elif options.event_type == "A5SS": # short/long -- flanking TODO: change the ID name
w1.write("%s\tSE\tgene\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s\n" % (
seq_chr, coor.sh_s, coor.fl_e, strand, coor.id_str, coor.name_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.A;Parent=%s\n" % (
seq_chr, coor.sh_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.B;Parent=%s\n" % (
seq_chr, coor.sh_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.up;Parent=%s.A\n" % (
seq_chr, coor.sh_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.se;Parent=%s.A\n" % (
seq_chr, coor.lo_s, coor.lo_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.dn;Parent=%s.A\n" % (
seq_chr, coor.fl_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.up;Parent=%s.B\n" % (
seq_chr, coor.sh_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.dn;Parent=%s.B\n" % (
seq_chr, coor.lo_s, coor.lo_e, strand, coor.id_str, coor.id_str))
elif options.event_type == "MXE":
w1.write("%s\tMXE\tgene\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.name_str))
w1.write("%s\tMXE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.A;Parent=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.B;Parent=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.up;Parent=%s.A\n" % (
seq_chr, coor.up_s, coor.up_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.1st;Parent=%s.A\n" % (
seq_chr, coor.e1st_s, coor.e1st_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.dn;Parent=%s.A\n" % (
seq_chr, coor.dn_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.up;Parent=%s.B\n" % (
seq_chr, coor.up_s, coor.up_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.2st;Parent=%s.B\n" % (
seq_chr, coor.e2st_s, coor.e2st_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.dn;Parent=%s.B\n" % (
seq_chr, coor.dn_s, coor.dn_e, strand, coor.id_str, coor.id_str))
elif strand == '-':
coor.generate_in_reversed_order(seq_chr, gene_symbol, strand, options.event_type)
w2.write("%s\n" % coor.name_str)
if options.event_type == "SE" or options.event_type == "RI":
w1.write("%s\tSE\tgene\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.name_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.A;Parent=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.B;Parent=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.up;Parent=%s.A\n" % (
seq_chr, coor.dn_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.se;Parent=%s.A\n" % (
seq_chr, coor.se_s, coor.se_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.dn;Parent=%s.A\n" % (
seq_chr, coor.up_s, coor.up_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.up;Parent=%s.B\n" % (
seq_chr, coor.dn_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.dn;Parent=%s.B\n" % (
seq_chr, coor.up_s, coor.up_e, strand, coor.id_str, coor.id_str))
elif options.event_type == "A5SS": # flanking -- long/short TODO: change the ID name
w1.write("%s\tSE\tgene\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s\n" % (
seq_chr, coor.fl_s, coor.sh_e, strand, coor.id_str, coor.name_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.A;Parent=%s\n" % (
seq_chr, coor.fl_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.B;Parent=%s\n" % (
seq_chr, coor.fl_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.up;Parent=%s.A\n" % (
seq_chr, coor.fl_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.se;Parent=%s.A\n" % (
seq_chr, coor.sh_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.dn;Parent=%s.A\n" % (
seq_chr, coor.lo_s, coor.lo_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.up;Parent=%s.B\n" % (
seq_chr, coor.fl_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.dn;Parent=%s.B\n" % (
seq_chr, coor.sh_s, coor.sh_e, strand, coor.id_str, coor.id_str))
elif options.event_type == "A3SS": # short/long -- flanking TODO: change the ID name
w1.write("%s\tSE\tgene\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s\n" % (
seq_chr, coor.sh_s, coor.fl_e, strand, coor.id_str, coor.name_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.A;Parent=%s\n" % (
seq_chr, coor.sh_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.B;Parent=%s\n" % (
seq_chr, coor.sh_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.up;Parent=%s.A\n" % (
seq_chr, coor.sh_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.se;Parent=%s.A\n" % (
seq_chr, coor.lo_s, coor.lo_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.dn;Parent=%s.A\n" % (
seq_chr, coor.fl_s, coor.fl_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.up;Parent=%s.B\n" % (
seq_chr, coor.sh_s, coor.sh_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tSE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.dn;Parent=%s.B\n" % (
seq_chr, coor.lo_s, coor.lo_e, strand, coor.id_str, coor.id_str))
elif options.event_type == "MXE":
w1.write("%s\tMXE\tgene\t%s\t%s\t.\t%s\t.\tID=%s;Name=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.name_str))
w1.write("%s\tMXE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.A;Parent=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\tmRNA\t%s\t%s\t.\t%s\t.\tID=%s.B;Parent=%s\n" % (
seq_chr, coor.up_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.up;Parent=%s.A\n" % (
seq_chr, coor.dn_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.1st;Parent=%s.A\n" % (
seq_chr, coor.e1st_s, coor.e1st_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.A.dn;Parent=%s.A\n" % (
seq_chr, coor.up_s, coor.up_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.up;Parent=%s.B\n" % (
seq_chr, coor.dn_s, coor.dn_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.2st;Parent=%s.B\n" % (
seq_chr, coor.e2st_s, coor.e2st_e, strand, coor.id_str, coor.id_str))
w1.write("%s\tMXE\texon\t%s\t%s\t.\t%s\t.\tID=%s.B.dn;Parent=%s.B\n" % (
seq_chr, coor.up_s, coor.up_e, strand, coor.id_str, coor.id_str))
w1.close()
try:
conf_setting_file(options, gene_no_str, gene_symbol, events_name_level, coor.id_str)
except Exception as e:
print(e)
print("There is an exception in preparing coordinate setting file")
raise
plot_e(options, coor.id_str, gene_symbol, events_no)
fo.close()
w2.close()
except Exception as e:
print(e)
print("There is an exception in plot_with_eventsfile")
raise
def main():
parser = argparse.ArgumentParser(prog="rmats2sashimiplot")
required_group = parser.add_argument_group('Required')
required_group.add_argument("-o", dest="out_dir", required=True,
help="The output directory.")
label_group = parser.add_argument_group('Labels')
label_group.add_argument("--l1", dest="l1",
help="The label for the first sample.")
label_group.add_argument("--l2", dest="l2",
help="The label for the second sample.")
rmats_group_str = 'rMATS event input'
coord_group_str = 'Coordinate and annotation input'
rmats_group = parser.add_argument_group(
rmats_group_str,
'Use either ({}) or ({})'.format(rmats_group_str, coord_group_str))
coordinate_group = parser.add_argument_group(
coord_group_str,
'Use either ({}) or ({})'.format(coord_group_str, rmats_group_str))
rmats_group.add_argument(
"-t", dest="event_type", choices=['SE', 'A5SS', 'A3SS', 'MXE', 'RI'],
help=("Type of event from rMATS result used in the analysis."
" 'SE': skipped exon,"
" 'A5SS': alternative 5' splice site,"
" 'A3SS' alternative 3' splice site,"
" 'MXE': mutually exclusive exons,"
" 'RI': retained intron."
" (Only if using " + rmats_group_str + ")"))
rmats_group.add_argument(
"-e", dest="events_file",
help=("The rMATS output event file (Only if using "
+ rmats_group_str + ")"))
coordinate_group.add_argument(
"-c", dest="coordinate",
help=("The genome region coordinates and a GFF3 (not GTF) annotation"
" file of genes and transcripts. The format is"
" -c {chromosome}:{strand}:{start}:{end}:{/path/to/gff3}"
" (Only if using " + coord_group_str + ")"))
sam_bam_group_str_template = '{} Files'
sam_bam_group_desc_template = (
'Mapping results for sample_1 & sample_2 in {0} format.'
' Replicates must be in a comma separated list.'
' (Only if using {0})')
sam_bam_sample_arg_desc_template = (
'sample_{num} {kind} files: s{num}_rep1.{kind}[,s{num}_rep2.{kind}]')
group_sam = parser.add_argument_group(
sam_bam_group_str_template.format('SAM'),
sam_bam_group_desc_template.format('SAM'))
group_sam.add_argument(
"--s1", dest="s1",
help=sam_bam_sample_arg_desc_template.format(num=1, kind='sam'))
group_sam.add_argument(
"--s2", dest="s2",
help=sam_bam_sample_arg_desc_template.format(num=2, kind='sam'))
group_bam = parser.add_argument_group(
sam_bam_group_str_template.format('BAM'),
sam_bam_group_desc_template.format('BAM'))
group_bam.add_argument(
"--b1", dest="b1",
help=sam_bam_sample_arg_desc_template.format(num=1, kind='bam'))
group_bam.add_argument(
"--b2", dest="b2",
help=sam_bam_sample_arg_desc_template.format(num=2, kind='bam'))
optional_group = parser.add_argument_group('Optional')
optional_group.add_argument(
"--exon_s", dest="exon_s", type=int, default=1,
help="How much to scale down exons. Default: %(default)s")
optional_group.add_argument(
"--intron_s", dest="intron_s", type=int, default=1,
help=("How much to scale down introns. For example, --intron_s 5"
" results in an intron with real length of 100 being plotted as"
" 100/5 = 20. Default: %(default)s"))
optional_group.add_argument(
"--group-info", dest="group_info",
help=('The path to a *.gf file which groups the replicates. One'
' sashimi plot will be generated for each group instead of'
' the default behavior of one plot per replicate'))
optional_group.add_argument(
"--min-counts", dest="min_counts", default=0,
help=("Individual junctions with read count below --min-counts will"
" be omitted from the plot. Default: %(default)s"))
optional_group.add_argument(
"--color", dest="color",
help=('Specify a list of colors with one color per plot. Without'
' grouping there is one plot per replicate. With grouping there'
' is one plot per group: --color \'#CC0011[,#FF8800]\''))
optional_group.add_argument("--font-size", dest="font_size", default=8,
help="Set the font size. Default: %(default)s")
optional_group.add_argument(
"--hide-number", dest="hide_number", action="store_true",
help='Do not display the read count on the junctions')
optional_group.add_argument(
"--no-text-background", dest="text_background", action="store_false",
help='Do not put a white box behind the junction read count')
optional_group.add_argument(
"--keep-event-chr-prefix", action="store_true",
help='force the contig name in the provided events file to be used')
optional_group.add_argument(
"--remove-event-chr-prefix", action="store_true",
help=('remove any leading "chr" from contig names in the provided'
' events file'))
options = parser.parse_args()
out_path = os.path.abspath(os.path.expanduser(options.out_dir))
checkout(parser, options) # 0.check out the arguments
sashimi_path = os.path.join(out_path, "Sashimi_index")
if not os.path.isdir(sashimi_path):
os.makedirs(sashimi_path)
options.out_dir = out_path
options.sashimi_path = sashimi_path
prepare_bams(options) # 1.convert sam to bam format
if options.events_file is None: # 2.setting and plot
plot_with_coordinate(options)
else:
plot_with_eventsfile(options)
if __name__ == '__main__':
main()
| Xinglab/rmats2sashimiplot | src/rmats2sashimiplot/rmats2sashimiplot.py | Python | gpl-2.0 | 44,567 |
import sqlalchemy
import sqlalchemy.orm
from sqlalchemy.ext.orderinglist import ordering_list
from Blue_Yellow.data.modelbase import SqlAlchemyBase
class Album(SqlAlchemyBase):
__tablename__ = 'Album'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
name = sqlalchemy.Column(sqlalchemy.String, index=True, unique=True, nullable=False)
year = sqlalchemy.Column(sqlalchemy.Integer, index=True)
price = sqlalchemy.Column(sqlalchemy.Float, index=True)
album_image = sqlalchemy.Column(sqlalchemy.String)
# Name of the table and back_populates = field
tracks = sqlalchemy.orm.relationship('Track', back_populates='album',
order_by='Track.display_order',
collection_class=ordering_list('display_order'),
cascade='all')
| smitsgit/tptm | Blue_Yellow/Blue_Yellow/data/album.py | Python | apache-2.0 | 903 |
"""Finders try to find right section for passed module name"""
import importlib.machinery
import inspect
import os
import os.path
import re
import sys
import sysconfig
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from fnmatch import fnmatch
from functools import lru_cache
from glob import glob
from pathlib import Path
from typing import Dict, Iterable, Iterator, List, Optional, Pattern, Sequence, Tuple, Type
from isort import sections
from isort.settings import KNOWN_SECTION_MAPPING, Config
from isort.utils import exists_case_sensitive
try:
from pipreqs import pipreqs
except ImportError:
pipreqs = None
try:
from pip_api import parse_requirements
except ImportError:
parse_requirements = None
try:
from requirementslib import Pipfile
except ImportError:
Pipfile = None
@contextmanager
def chdir(path: str) -> Iterator[None]:
"""Context manager for changing dir and restoring previous workdir after exit."""
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
class BaseFinder(metaclass=ABCMeta):
def __init__(self, config: Config) -> None:
self.config = config
@abstractmethod
def find(self, module_name: str) -> Optional[str]:
raise NotImplementedError
class ForcedSeparateFinder(BaseFinder):
def find(self, module_name: str) -> Optional[str]:
for forced_separate in self.config.forced_separate:
# Ensure all forced_separate patterns will match to end of string
path_glob = forced_separate
if not forced_separate.endswith("*"):
path_glob = "%s*" % forced_separate
if fnmatch(module_name, path_glob) or fnmatch(module_name, "." + path_glob):
return forced_separate
return None
class LocalFinder(BaseFinder):
def find(self, module_name: str) -> Optional[str]:
if module_name.startswith("."):
return "LOCALFOLDER"
return None
class KnownPatternFinder(BaseFinder):
def __init__(self, config: Config) -> None:
super().__init__(config)
self.known_patterns: List[Tuple[Pattern[str], str]] = []
for placement in reversed(config.sections):
known_placement = KNOWN_SECTION_MAPPING.get(placement, placement).lower()
config_key = f"known_{known_placement}"
known_patterns = list(
getattr(self.config, config_key, self.config.known_other.get(known_placement, []))
)
known_patterns = [
pattern
for known_pattern in known_patterns
for pattern in self._parse_known_pattern(known_pattern)
]
for known_pattern in known_patterns:
regexp = "^" + known_pattern.replace("*", ".*").replace("?", ".?") + "$"
self.known_patterns.append((re.compile(regexp), placement))
def _parse_known_pattern(self, pattern: str) -> List[str]:
"""Expand pattern if identified as a directory and return found sub packages"""
if pattern.endswith(os.path.sep):
patterns = [
filename
for filename in os.listdir(os.path.join(self.config.directory, pattern))
if os.path.isdir(os.path.join(self.config.directory, pattern, filename))
]
else:
patterns = [pattern]
return patterns
def find(self, module_name: str) -> Optional[str]:
# Try to find most specific placement instruction match (if any)
parts = module_name.split(".")
module_names_to_check = (".".join(parts[:first_k]) for first_k in range(len(parts), 0, -1))
for module_name_to_check in module_names_to_check:
for pattern, placement in self.known_patterns:
if pattern.match(module_name_to_check):
return placement
return None
class PathFinder(BaseFinder):
def __init__(self, config: Config, path: str = ".") -> None:
super().__init__(config)
# restore the original import path (i.e. not the path to bin/isort)
root_dir = os.path.abspath(path)
src_dir = f"{root_dir}/src"
self.paths = [root_dir, src_dir]
# virtual env
self.virtual_env = self.config.virtual_env or os.environ.get("VIRTUAL_ENV")
if self.virtual_env:
self.virtual_env = os.path.realpath(self.virtual_env)
self.virtual_env_src = ""
if self.virtual_env:
self.virtual_env_src = f"{self.virtual_env}/src/"
for venv_path in glob(f"{self.virtual_env}/lib/python*/site-packages"):
if venv_path not in self.paths:
self.paths.append(venv_path)
for nested_venv_path in glob(f"{self.virtual_env}/lib/python*/*/site-packages"):
if nested_venv_path not in self.paths:
self.paths.append(nested_venv_path)
for venv_src_path in glob(f"{self.virtual_env}/src/*"):
if os.path.isdir(venv_src_path):
self.paths.append(venv_src_path)
# conda
self.conda_env = self.config.conda_env or os.environ.get("CONDA_PREFIX") or ""
if self.conda_env:
self.conda_env = os.path.realpath(self.conda_env)
for conda_path in glob(f"{self.conda_env}/lib/python*/site-packages"):
if conda_path not in self.paths:
self.paths.append(conda_path)
for nested_conda_path in glob(f"{self.conda_env}/lib/python*/*/site-packages"):
if nested_conda_path not in self.paths:
self.paths.append(nested_conda_path)
# handle case-insensitive paths on windows
self.stdlib_lib_prefix = os.path.normcase(sysconfig.get_paths()["stdlib"])
if self.stdlib_lib_prefix not in self.paths:
self.paths.append(self.stdlib_lib_prefix)
# add system paths
for system_path in sys.path[1:]:
if system_path not in self.paths:
self.paths.append(system_path)
def find(self, module_name: str) -> Optional[str]:
for prefix in self.paths:
package_path = "/".join((prefix, module_name.split(".")[0]))
path_obj = Path(package_path).resolve()
is_module = (
exists_case_sensitive(package_path + ".py")
or any(
exists_case_sensitive(package_path + ext_suffix)
for ext_suffix in importlib.machinery.EXTENSION_SUFFIXES
)
or exists_case_sensitive(package_path + "/__init__.py")
)
is_package = exists_case_sensitive(package_path) and os.path.isdir(package_path)
if is_module or is_package:
if (
"site-packages" in prefix
or "dist-packages" in prefix
or (self.virtual_env and self.virtual_env_src in prefix)
):
return sections.THIRDPARTY
elif os.path.normcase(prefix) == self.stdlib_lib_prefix:
return sections.STDLIB
elif self.conda_env and self.conda_env in prefix:
return sections.THIRDPARTY
for src_path in self.config.src_paths:
if src_path in path_obj.parents and not self.config.is_skipped(path_obj):
return sections.FIRSTPARTY
if os.path.normcase(prefix).startswith(self.stdlib_lib_prefix):
return sections.STDLIB # pragma: no cover - edge case for one OS. Hard to test.
return self.config.default_section
return None
class ReqsBaseFinder(BaseFinder):
enabled = False
def __init__(self, config: Config, path: str = ".") -> None:
super().__init__(config)
self.path = path
if self.enabled:
self.mapping = self._load_mapping()
self.names = self._load_names()
@abstractmethod
def _get_names(self, path: str) -> Iterator[str]:
raise NotImplementedError
@abstractmethod
def _get_files_from_dir(self, path: str) -> Iterator[str]:
raise NotImplementedError
@staticmethod
def _load_mapping() -> Optional[Dict[str, str]]:
"""Return list of mappings `package_name -> module_name`
Example:
django-haystack -> haystack
"""
if not pipreqs:
return None
path = os.path.dirname(inspect.getfile(pipreqs))
path = os.path.join(path, "mapping")
with open(path) as f:
mappings: Dict[str, str] = {} # pypi_name: import_name
for line in f:
import_name, _, pypi_name = line.strip().partition(":")
mappings[pypi_name] = import_name
return mappings
# return dict(tuple(line.strip().split(":")[::-1]) for line in f)
def _load_names(self) -> List[str]:
"""Return list of thirdparty modules from requirements"""
names = []
for path in self._get_files():
for name in self._get_names(path):
names.append(self._normalize_name(name))
return names
@staticmethod
def _get_parents(path: str) -> Iterator[str]:
prev = ""
while path != prev:
prev = path
yield path
path = os.path.dirname(path)
def _get_files(self) -> Iterator[str]:
"""Return paths to all requirements files"""
path = os.path.abspath(self.path)
if os.path.isfile(path):
path = os.path.dirname(path)
for path in self._get_parents(path):
yield from self._get_files_from_dir(path)
def _normalize_name(self, name: str) -> str:
"""Convert package name to module name
Examples:
Django -> django
django-haystack -> django_haystack
Flask-RESTFul -> flask_restful
"""
if self.mapping:
name = self.mapping.get(name.replace("-", "_"), name)
return name.lower().replace("-", "_")
def find(self, module_name: str) -> Optional[str]:
# required lib not installed yet
if not self.enabled:
return None
module_name, _sep, _submodules = module_name.partition(".")
module_name = module_name.lower()
if not module_name:
return None
for name in self.names:
if module_name == name:
return sections.THIRDPARTY
return None
class RequirementsFinder(ReqsBaseFinder):
exts = (".txt", ".in")
enabled = bool(parse_requirements)
def _get_files_from_dir(self, path: str) -> Iterator[str]:
"""Return paths to requirements files from passed dir."""
yield from self._get_files_from_dir_cached(path)
@classmethod
@lru_cache(maxsize=16)
def _get_files_from_dir_cached(cls, path: str) -> List[str]:
results = []
for fname in os.listdir(path):
if "requirements" not in fname:
continue
full_path = os.path.join(path, fname)
# *requirements*/*.{txt,in}
if os.path.isdir(full_path):
for subfile_name in os.listdir(full_path):
for ext in cls.exts:
if subfile_name.endswith(ext):
results.append(os.path.join(full_path, subfile_name))
continue
# *requirements*.{txt,in}
if os.path.isfile(full_path):
for ext in cls.exts:
if fname.endswith(ext):
results.append(full_path)
break
return results
def _get_names(self, path: str) -> Iterator[str]:
"""Load required packages from path to requirements file"""
yield from self._get_names_cached(path)
@classmethod
@lru_cache(maxsize=16)
def _get_names_cached(cls, path: str) -> List[str]:
result = []
with chdir(os.path.dirname(path)):
requirements = parse_requirements(path)
for req in requirements.values():
if req.name:
result.append(req.name)
return result
class PipfileFinder(ReqsBaseFinder):
enabled = bool(Pipfile)
def _get_names(self, path: str) -> Iterator[str]:
with chdir(path):
project = Pipfile.load(path)
for req in project.packages:
yield req.name
def _get_files_from_dir(self, path: str) -> Iterator[str]:
if "Pipfile" in os.listdir(path):
yield path
class DefaultFinder(BaseFinder):
def find(self, module_name: str) -> Optional[str]:
return self.config.default_section
class FindersManager:
_default_finders_classes: Sequence[Type[BaseFinder]] = (
ForcedSeparateFinder,
LocalFinder,
KnownPatternFinder,
PathFinder,
PipfileFinder,
RequirementsFinder,
DefaultFinder,
)
def __init__(
self, config: Config, finder_classes: Optional[Iterable[Type[BaseFinder]]] = None
) -> None:
self.verbose: bool = config.verbose
if finder_classes is None:
finder_classes = self._default_finders_classes
finders: List[BaseFinder] = []
for finder_cls in finder_classes:
try:
finders.append(finder_cls(config))
except Exception as exception:
# if one finder fails to instantiate isort can continue using the rest
if self.verbose:
print(
(
f"{finder_cls.__name__} encountered an error ({exception}) during "
"instantiation and cannot be used"
)
)
self.finders: Tuple[BaseFinder, ...] = tuple(finders)
def find(self, module_name: str) -> Optional[str]:
for finder in self.finders:
try:
section = finder.find(module_name)
if section is not None:
return section
except Exception as exception:
# isort has to be able to keep trying to identify the correct
# import section even if one approach fails
if self.verbose:
print(
f"{finder.__class__.__name__} encountered an error ({exception}) while "
f"trying to identify the {module_name} module"
)
return None
| TeamSPoon/logicmoo_workspace | packs_web/butterfly/lib/python3.7/site-packages/isort/deprecated/finders.py | Python | mit | 14,768 |
# -*- coding: utf-8 -*-
__author__ = 'paronax'
class InvalidFacetError(ValueError):
def __init__(self, message):
self.message = 'Unknown facet : ' + message
class InvalidLanguageError(ValueError):
def __init__(self, message):
self.message = 'Unknown language : ' + message | paronax/pyratp | pyratp/exceptions.py | Python | gpl-2.0 | 299 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints document imprint
"""
__revision__ = "$Id$"
from invenio.utils.date import strftime, strptime
def format_element(bfo, place_label, publisher_label, date_label,
separator=', ', date_format=""):
"""
Print imprint (Order: Name of publisher, place of publication and date of publication).
Parameter <code>date_format</code> allows to specify the string representation of the output.
The format string has the same behaviour as the strftime() function::
<pre>Eg: 1982-09-24 07:32:00
"%d %B %Y" -> 24 September 1982
"%I:%M" -> 07:32
</pre>
@param separator: a separator between the elements of imprint
@param place_label: a label to print before the publication place value
@param publisher_label: a label to print before the publisher name
@param date_label: a a label to print before the publication date
@param date_format: date format
@see: place.py, publisher.py, date.py, reprints.py, pagination.py
"""
place = bfo.field('260__a')
publisher = bfo.field('260__b')
date = bfo.field('260__c')
out = ""
if publisher != "sine nomine":
out += publisher_label + ' ' + publisher + separator
if place != "sine loco":
out += place_label + ' ' + place + separator
if len(date) > 0:
if date_format != '':
try:
date_time = strptime(date, "%Y-%m-%d")
out += date_label + " " + strftime(date_format, date_time)
except ValueError:
out += date_label + ' ' + date
else:
out += date_label + ' ' + date
return out
| zenodo/invenio | invenio/modules/formatter/format_elements/bfe_imprint.py | Python | gpl-2.0 | 2,491 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2007-2009 Stephane Charette
# Copyright (C) 2009 Gary Burton
# Contribution 2009 by Bob Ham <rah@bash.sh>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import os
from types import ClassType
from gramps.gen.ggettext import gettext as _
#-------------------------------------------------------------------------------
#
# GTK+ modules
#
#-------------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------------
from gramps.gen.config import config
from gramps.gen.plug.report import CATEGORY_GRAPHVIZ
from _reportdialog import ReportDialog
from _papermenu import PaperFrame
import gramps.gen.plug.docgen.graphdoc as graphdoc
from gramps.gen.plug.menu import Menu
#-------------------------------------------------------------------------------
#
# GraphvizFormatComboBox
#
#-------------------------------------------------------------------------------
class GraphvizFormatComboBox(Gtk.ComboBox):
"""
Format combo box class for Graphviz report.
"""
def set(self, active=None):
self.store = Gtk.ListStore(GObject.TYPE_STRING)
self.set_model(self.store)
cell = Gtk.CellRendererText()
self.pack_start(cell, True)
self.add_attribute(cell, 'text', 0)
index = 0
active_index = 0
for item in graphdoc.FORMATS:
name = item["descr"]
self.store.append(row=[name])
if item['type'] == active:
active_index = index
index += 1
self.set_active(active_index)
def get_label(self):
return graphdoc.FORMATS[self.get_active()]["descr"]
def get_reference(self):
return graphdoc.FORMATS[self.get_active()]["class"]
def get_paper(self):
return 1
def get_styles(self):
return 0
def get_ext(self):
return '.%s' % graphdoc.FORMATS[self.get_active()]['ext']
def get_format_str(self):
return graphdoc.FORMATS[self.get_active()]["type"]
def is_file_output(self):
return True
def get_clname(self):
return graphdoc.FORMATS[self.get_active()]["type"]
#-----------------------------------------------------------------------
#
# GraphvizReportDialog
#
#-----------------------------------------------------------------------
class GraphvizReportDialog(ReportDialog):
"""A class of ReportDialog customized for graphviz based reports."""
def __init__(self, dbstate, uistate, opt, name, translated_name):
"""Initialize a dialog to request that the user select options
for a graphviz report. See the ReportDialog class for
more information."""
self.category = CATEGORY_GRAPHVIZ
self.__gvoptions = graphdoc.GVOptions()
ReportDialog.__init__(self, dbstate, uistate, opt,
name, translated_name)
def init_options(self, option_class):
try:
if (issubclass(option_class, object) or # New-style class
isinstance(option_class, ClassType)): # Old-style class
self.options = option_class(self.raw_name,
self.dbstate.get_database())
except TypeError:
self.options = option_class
menu = Menu()
self.__gvoptions.add_menu_options(menu)
for category in menu.get_categories():
for name in menu.get_option_names(category):
option = menu.get_option(category, name)
self.options.add_menu_option(category, name, option)
self.options.load_previous_values()
def init_interface(self):
ReportDialog.init_interface(self)
self.doc_type_changed(self.format_menu)
def setup_format_frame(self):
"""Set up the format frame of the dialog."""
self.format_menu = GraphvizFormatComboBox()
self.format_menu.set(self.options.handler.get_format_name())
self.format_menu.connect('changed', self.doc_type_changed)
label = Gtk.Label(label="%s:" % _("Output Format"))
label.set_alignment(0.0, 0.5)
self.tbl.attach(label, 1, 2, self.row, self.row+1, Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
self.tbl.attach(self.format_menu, 2, 4, self.row, self.row+1,
yoptions=Gtk.AttachOptions.SHRINK)
self.row += 1
self.open_with_app = Gtk.CheckButton(_("Open with default viewer"))
self.open_with_app.set_active(
config.get('interface.open-with-default-viewer'))
self.tbl.attach(self.open_with_app, 2, 4, self.row, self.row+1,
yoptions=Gtk.AttachOptions.SHRINK)
self.row += 1
ext = self.format_menu.get_ext()
if ext is None:
ext = ""
else:
spath = self.get_default_directory()
if self.options.get_output():
base = os.path.basename(self.options.get_output())
else:
base = "%s%s" % (self.report_name, ext)
spath = os.path.normpath(os.path.join(spath, base))
self.target_fileentry.set_filename(spath)
def setup_report_options_frame(self):
self.paper_label = Gtk.Label(label='<b>%s</b>'%_("Paper Options"))
self.paper_label.set_use_markup(True)
handler = self.options.handler
self.paper_frame = PaperFrame(
handler.get_paper_metric(),
handler.get_paper_name(),
handler.get_orientation(),
handler.get_margins(),
handler.get_custom_paper_size(),
)
self.notebook.insert_page(self.paper_frame, self.paper_label, 0)
self.paper_frame.show_all()
ReportDialog.setup_report_options_frame(self)
def doc_type_changed(self, obj):
"""
This routine is called when the user selects a new file
formats for the report. It adjust the various dialog sections
to reflect the appropriate values for the currently selected
file format. For example, a HTML document doesn't need any
paper size/orientation options, but it does need a template
file. Those chances are made here.
"""
self.open_with_app.set_sensitive(True)
fname = self.target_fileentry.get_full_path(0)
(spath, ext) = os.path.splitext(fname)
ext_val = obj.get_ext()
if ext_val:
fname = spath + ext_val
else:
fname = spath
self.target_fileentry.set_filename(fname)
def make_document(self):
"""Create a document of the type requested by the user.
"""
pstyle = self.paper_frame.get_paper_style()
self.doc = self.format(self.options, pstyle)
self.options.set_document(self.doc)
def on_ok_clicked(self, obj):
"""The user is satisfied with the dialog choices. Validate
the output file name before doing anything else. If there is
a file name, gather the options and create the report."""
# Is there a filename? This should also test file permissions, etc.
if not self.parse_target_frame():
self.window.run()
# Preparation
self.parse_format_frame()
self.parse_user_options()
self.options.handler.set_paper_metric(
self.paper_frame.get_paper_metric())
self.options.handler.set_paper_name(self.paper_frame.get_paper_name())
self.options.handler.set_orientation(self.paper_frame.get_orientation())
self.options.handler.set_margins(self.paper_frame.get_paper_margins())
self.options.handler.set_custom_paper_size(
self.paper_frame.get_custom_paper_size())
# Create the output document.
self.make_document()
# Save options
self.options.handler.save_options()
config.set('interface.open-with-default-viewer',
self.open_with_app.get_active())
def parse_format_frame(self):
"""Parse the format frame of the dialog. Save the user
selected output format for later use."""
self.format = self.format_menu.get_reference()
format_name = self.format_menu.get_clname()
self.options.handler.set_format_name(format_name)
def setup_style_frame(self):
"""Required by ReportDialog"""
pass
| arunkgupta/gramps | gramps/gui/plug/report/_graphvizreportdialog.py | Python | gpl-2.0 | 9,927 |
#!/usr/bin/env python
#-*-coding: utf-8-*-
#
# Halil Kaya
# www.halilkaya.net
# kayahalil@gmail.com
# GPG: 0x0FA83C53
#
# wppy is licensed with GPL
#
import sys, os
from optparse import OptionParser
from random import randint
try:
input = raw_input # Python 2
except NameError: # Python 3
pass
if __name__ != '__main__':
print("Not a module!!!")
sys.exit(1)
parser = OptionParser()
parser.add_option('-n', '--name', dest="folder_name", help="Wordpress folder name", action="store")
options, args = parser.parse_args()
if not options.folder_name:
print("[ERROR] Enter a folder name to get Wordpress into. You can use -n or --name parameter to determine it")
sys.exit(1)
print("[INFO] Cloning the latest version of Wordpress")
os.system('wget -c http://wordpress.org/latest.zip')
print("[INFO] Extracting compressed file")
rndmfolder = 'wordpress' + str(randint(0,99));
os.system('unzip latest.zip -d ' + rndmfolder)
print("[INFO] Folder renaming as", options.folder_name)
os.system('mv ' + rndmfolder + '/wordpress ' + options.folder_name)
print("[INFO] Deleting latest.zip file and temporary wordpress directory")
os.system('rm latest.zip')
os.system('rm -r ' + rndmfolder)
print("[INFO] Copying wp-config-sample.php as wp-config.php")
os.system('cp ' + options.folder_name + '/wp-config-sample.php ' + options.folder_name + '/wp-config.php')
print("\n")
site_url = input('Enter site URL that you are currently installing Wordpress on: http://')
database_name = input('Enter your database name: ')
database_username = input('Enter username for this database: ')
database_password = input('Enter password: ')
database_host = input('Enter hostname (leave blank for localhost): ')
print("[INFO] Configuring database...")
os.system('vi -c "%s/database_name_here/' + database_name + '/g|wq" ' + options.folder_name + '/wp-config.php')
os.system('vi -c "%s/username_here/' + database_username + '/g|wq" ' + options.folder_name + '/wp-config.php')
os.system('vi -c "%s/password_here/' + database_password + '/g|wq" ' + options.folder_name + '/wp-config.php')
if not database_name:
print("[WARN] You have typed an empty database name!")
if not database_username:
print("[WARN] You have typed an empty username for database!")
if not database_password:
print("[WARN] You have not typed a password!")
if not database_host:
database_host = 'localhost'
os.system('vi -c "%s/localhost/' + database_host + '/g|wq" ' + options.folder_name + '/wp-config.php')
print("[INFO] Configured database config file successfully!\n")
if 'http://' not in site_url:
home = 'http://' + site_url
# @todo: httpS?
user_name = input('Enter a username for admin panel: ')
password = input('Enter a password for admin panel: ')
weblog_title = input('Enter site title: ')
blogdescription = input('Enter blog description: ')
admin_email = input('Enter admin e-mail: ')
print("[INFO] Connecting to MySQL and creating and configuring database...")
print("Enter your MySQL password the line below again.\n")
if not database_password:
os.system('echo "create database ' + database_name + '" | mysql -u ' + database_username)
else:
os.system('echo "create database ' + database_name + '" | mysql -u ' + database_username + ' -p')
os.system('curl --data "weblog_title=' + weblog_title + '&user_name=' + user_name + '&admin_password=' + password + '&admin_password2=' + password + '&admin_email=' + admin_email + '&blog_public=1&language=" ' + home + '/' + options.folder_name + '/wp-admin/install.php?step=2 -o "curl_output.txt"')
os.system('mv curl_output.txt ' + options.folder_name + '/')
print("[INFO] Your blog is ready! Just go to %s/%s" % (home, options.folder_name)) | halilkaya/wppy | wppy.py | Python | gpl-3.0 | 3,680 |
import pathlib
import attr
_LICENSES = {
"Glide": {
"name": "3dfx Glide License",
"url": "http://www.users.on.net/~triforce/glidexp/COPYING.txt",
},
"Abstyles": {
"name": "Abstyles License",
"url": "https://fedoraproject.org/wiki/Licensing/Abstyles",
},
"AFL-1.1": {
"name": "Academic Free License v1.1",
"url": "http://opensource.linux-mirror.org/licenses/afl-1.1.txt",
},
"AFL-1.2": {
"name": "Academic Free License v1.2",
"url": "http://opensource.linux-mirror.org/licenses/afl-1.2.txt",
},
"AFL-2.0": {
"name": "Academic Free License v2.0",
"url": "http://opensource.linux-mirror.org/licenses/afl-2.0.txt",
},
"AFL-2.1": {
"name": "Academic Free License v2.1",
"url": "http://opensource.linux-mirror.org/licenses/afl-2.1.txt",
},
"AFL-3.0": {
"name": "Academic Free License v3.0",
"url": "http://www.opensource.org/licenses/afl-3.0",
},
"AMPAS": {
"name": "Academy of Motion Picture Arts and Sciences BSD",
"url": "https://fedoraproject.org/wiki/Licensing/BSD#AMPASBSD",
},
"APL-1.0": {
"name": "Adaptive Public License 1.0",
"url": "http://www.opensource.org/licenses/APL-1.0",
},
"Adobe-Glyph": {
"name": "Adobe Glyph List License",
"url": "https://fedoraproject.org/wiki/Licensing/MIT#AdobeGlyph",
},
"APAFML": {
"name": "Adobe Postscript AFM License",
"url": "https://fedoraproject.org/wiki/Licensing/AdobePostscriptAFM",
},
"Adobe-2006": {
"name": "Adobe Systems Incorporated Source Code License Agreement",
"url": "https://fedoraproject.org/wiki/Licensing/AdobeLicense",
},
"AGPL-1.0": {
"name": "Affero General Public License v1.0",
"url": "http://www.affero.org/oagpl.html",
},
"Afmparse": {
"name": "Afmparse License",
"url": "https://fedoraproject.org/wiki/Licensing/Afmparse",
},
"Aladdin": {
"name": "Aladdin Free Public License",
"url": "http://pages.cs.wisc.edu/~ghost/doc/AFPL/6.01/Public.htm",
},
"ADSL": {
"name": "Amazon Digital Services License",
"url": "https://fedoraproject.org/wiki/Licensing/AmazonDigitalServicesLicense",
},
"AMDPLPA": {
"name": "AMD's plpa_map.c License",
"url": "https://fedoraproject.org/wiki/Licensing/AMD_plpa_map_License",
},
"ANTLR-PD": {
"name": "ANTLR Software Rights Notice",
"url": "http://www.antlr2.org/license.html",
},
"Apache-1.0": {
"name": "Apache License 1.0",
"url": "http://www.apache.org/licenses/LICENSE-1.0",
},
"Apache-1.1": {
"name": "Apache License 1.1",
"url": "http://apache.org/licenses/LICENSE-1.1",
},
"Apache-2.0": {
"name": "Apache License 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0",
},
"AML": {
"name": "Apple MIT License",
"url": "https://fedoraproject.org/wiki/Licensing/Apple_MIT_License",
},
"APSL-1.0": {
"name": "Apple Public Source License 1.0",
"url": "https://fedoraproject.org/wiki/Licensing/Apple_Public_Source_License_1.0",
},
"APSL-1.2": {
"name": "Apple Public Source License 1.2",
"url": "http://www.samurajdata.se/opensource/mirror/licenses/apsl.php",
},
"APSL-2.0": {
"name": "Apple Public Source License 2.0",
"url": "http://www.opensource.apple.com/license/apsl/",
},
"Artistic-1.0": {
"name": "Artistic License 1.0",
"url": "http://opensource.org/licenses/Artistic-1.0",
},
"Artistic-1.0-Perl": {
"name": "Artistic License 1.0 (Perl)",
"url": "http://dev.perl.org/licenses/artistic.html",
},
"Artistic-1.0-cl8": {
"name": "Artistic License 1.0 w/clause 8",
"url": "http://opensource.org/licenses/Artistic-1.0",
},
"Artistic-2.0": {
"name": "Artistic License 2.0",
"url": "http://www.opensource.org/licenses/artistic-license-2.0",
},
"AAL": {
"name": "Attribution Assurance License",
"url": "http://www.opensource.org/licenses/attribution",
},
"Bahyph": {
"name": "Bahyph License",
"url": "https://fedoraproject.org/wiki/Licensing/Bahyph",
},
"Barr": {
"name": "Barr License",
"url": "https://fedoraproject.org/wiki/Licensing/Barr",
},
"Beerware": {
"name": "Beerware License",
"url": "https://fedoraproject.org/wiki/Licensing/Beerware",
},
"BitTorrent-1.1": {
"name": "BitTorrent Open Source License v1.1",
"url": "http://directory.fsf.org/wiki/License:BitTorrentOSL1.1",
},
"BSL-1.0": {
"name": "Boost Software License 1.0",
"url": "http://www.boost.org/LICENSE_1_0.txt",
},
"Borceux": {
"name": "Borceux license",
"url": "https://fedoraproject.org/wiki/Licensing/Borceux",
},
"BSD-2-Clause": {
"name": "BSD 2-clause \"Simplified\" License",
"url": "http://www.opensource.org/licenses/BSD-2-Clause",
},
"BSD-2-Clause-FreeBSD": {
"name": "BSD 2-clause FreeBSD License",
"url": "http://www.freebsd.org/copyright/freebsd-license.html",
},
"BSD-2-Clause-NetBSD": {
"name": "BSD 2-clause NetBSD License",
"url": "http://www.netbsd.org/about/redistribution.html#default",
},
"BSD-3-Clause": {
"name": "BSD 3-clause \"New\" or \"Revised\" License",
"url": "http://www.opensource.org/licenses/BSD-3-Clause",
},
"BSD-3-Clause-Clear": {
"name": "BSD 3-clause Clear License",
"url": "http://labs.metacarta.com/license-explanation.html#license",
},
"BSD-4-Clause": {
"name": "BSD 4-clause \"Original\" or \"Old\" License",
"url": "http://directory.fsf.org/wiki/License:BSD_4Clause",
},
"BSD-Protection": {
"name": "BSD Protection License",
"url": "https://fedoraproject.org/wiki/Licensing/BSD_Protection_License",
},
"BSD-3-Clause-Attribution": {
"name": "BSD with attribution",
"url": "https://fedoraproject.org/wiki/Licensing/BSD_with_Attribution",
},
"0BSD": {
"name": "BSD Zero Clause License",
"url": "http://landley.net/toybox/license.html ",
},
"BSD-4-Clause-UC": {
"name": "BSD-4-Clause (University of California-Specific)",
"url": "http://www.freebsd.org/copyright/license.html",
},
"bzip2-1.0.5": {
"name": "bzip2 and libbzip2 License v1.0.5",
"url": "http://bzip.org/1.0.5/bzip2-manual-1.0.5.html",
},
"bzip2-1.0.6": {
"name": "bzip2 and libbzip2 License v1.0.6",
"url": "https://github.com/asimonov-im/bzip2/blob/master/LICENSE",
},
"Caldera": {
"name": "Caldera License",
"url": "http://www.lemis.com/grog/UNIX/ancient-source-all.pdf",
},
"CECILL-1.0": {
"name": "CeCILL Free Software License Agreement v1.0",
"url": "http://www.cecill.info/licences/Licence_CeCILL_V1-fr.html",
},
"CECILL-1.1": {
"name": "CeCILL Free Software License Agreement v1.1",
"url": "http://www.cecill.info/licences/Licence_CeCILL_V1.1-US.html",
},
"CECILL-2.0": {
"name": "CeCILL Free Software License Agreement v2.0",
"url": "http://www.cecill.info/licences/Licence_CeCILL_V2-fr.html",
},
"CECILL-2.1": {
"name": "CeCILL Free Software License Agreement v2.1",
"url": "http://opensource.org/licenses/CECILL-2.1",
},
"CECILL-B": {
"name": "CeCILL-B Free Software License Agreement",
"url": "http://www.cecill.info/licences/Licence_CeCILL-B_V1-fr.html",
},
"CECILL-C": {
"name": "CeCILL-C Free Software License Agreement",
"url": "http://www.cecill.info/licences/Licence_CeCILL-C_V1-fr.html",
},
"ClArtistic": {
"name": "Clarified Artistic License",
"url": "http://www.ncftp.com/ncftp/doc/LICENSE.txt",
},
"MIT-CMU": {
"name": "CMU License",
"url": "https://fedoraproject.org/wiki/Licensing:MIT?rd=Licensing/MIT#CMU_Style",
},
"CNRI-Jython": {
"name": "CNRI Jython License",
"url": "http://www.jython.org/license.html",
},
"CNRI-Python": {
"name": "CNRI Python License",
"url": "http://www.opensource.org/licenses/CNRI-Python",
},
"CNRI-Python-GPL-Compatible": {
"name": "CNRI Python Open Source GPL Compatible License Agreement",
"url": "http://www.python.org/download/releases/1.6.1/download_win/",
},
"CPOL-1.02": {
"name": "Code Project Open License 1.02",
"url": "http://www.codeproject.com/info/cpol10.aspx",
},
"CDDL-1.0": {
"name": "Common Development and Distribution License 1.0",
"url": "http://www.opensource.org/licenses/cddl1",
},
"CDDL-1.1": {
"name": "Common Development and Distribution License 1.1",
"url": "http://glassfish.java.net/public/CDDL+GPL_1_1.html",
},
"CPAL-1.0": {
"name": "Common Public Attribution License 1.0",
"url": "http://www.opensource.org/licenses/CPAL-1.0",
},
"CPL-1.0": {
"name": "Common Public License 1.0",
"url": "http://opensource.org/licenses/CPL-1.0",
},
"CATOSL-1.1": {
"name": "Computer Associates Trusted Open Source License 1.1",
"url": "http://opensource.org/licenses/CATOSL-1.1",
},
"Condor-1.1": {
"name": "Condor Public License v1.1",
"url": "http://research.cs.wisc.edu/condor/license.html#condor",
},
"CC-BY-1.0": {
"name": "Creative Commons Attribution 1.0",
"url": "https://creativecommons.org/licenses/by/1.0/",
},
"CC-BY-2.0": {
"name": "Creative Commons Attribution 2.0",
"url": "https://creativecommons.org/licenses/by/2.0/",
},
"CC-BY-2.5": {
"name": "Creative Commons Attribution 2.5",
"url": "https://creativecommons.org/licenses/by/2.5/",
},
"CC-BY-3.0": {
"name": "Creative Commons Attribution 3.0",
"url": "https://creativecommons.org/licenses/by/3.0/",
},
"CC-BY-4.0": {
"name": "Creative Commons Attribution 4.0",
"url": "https://creativecommons.org/licenses/by/4.0/",
},
"CC-BY-ND-1.0": {
"name": "Creative Commons Attribution No Derivatives 1.0",
"url": "https://creativecommons.org/licenses/by-nd/1.0/",
},
"CC-BY-ND-2.0": {
"name": "Creative Commons Attribution No Derivatives 2.0",
"url": "https://creativecommons.org/licenses/by-nd/2.0/",
},
"CC-BY-ND-2.5": {
"name": "Creative Commons Attribution No Derivatives 2.5",
"url": "https://creativecommons.org/licenses/by-nd/2.5/",
},
"CC-BY-ND-3.0": {
"name": "Creative Commons Attribution No Derivatives 3.0",
"url": "https://creativecommons.org/licenses/by-nd/3.0/",
},
"CC-BY-ND-4.0": {
"name": "Creative Commons Attribution No Derivatives 4.0",
"url": "https://creativecommons.org/licenses/by-nd/4.0/",
},
"CC-BY-NC-1.0": {
"name": "Creative Commons Attribution Non Commercial 1.0",
"url": "https://creativecommons.org/licenses/by-nc/1.0/",
},
"CC-BY-NC-2.0": {
"name": "Creative Commons Attribution Non Commercial 2.0",
"url": "https://creativecommons.org/licenses/by-nc/2.0/",
},
"CC-BY-NC-2.5": {
"name": "Creative Commons Attribution Non Commercial 2.5",
"url": "https://creativecommons.org/licenses/by-nc/2.5/",
},
"CC-BY-NC-3.0": {
"name": "Creative Commons Attribution Non Commercial 3.0",
"url": "https://creativecommons.org/licenses/by-nc/3.0/",
},
"CC-BY-NC-4.0": {
"name": "Creative Commons Attribution Non Commercial 4.0",
"url": "https://creativecommons.org/licenses/by-nc/4.0/",
},
"CC-BY-NC-ND-1.0": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 1.0",
"url": "https://creativecommons.org/licenses/by-nd-nc/1.0/",
},
"CC-BY-NC-ND-2.0": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 2.0",
"url": "https://creativecommons.org/licenses/by-nc-nd/2.0/",
},
"CC-BY-NC-ND-2.5": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 2.5",
"url": "https://creativecommons.org/licenses/by-nc-nd/2.5/",
},
"CC-BY-NC-ND-3.0": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 3.0",
"url": "https://creativecommons.org/licenses/by-nc-nd/3.0/",
},
"CC-BY-NC-ND-4.0": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 4.0",
"url": "https://creativecommons.org/licenses/by-nc-nd/4.0/",
},
"CC-BY-NC-SA-1.0": {
"name": "Creative Commons Attribution Non Commercial Share Alike 1.0",
"url": "https://creativecommons.org/licenses/by-nc-sa/1.0/",
},
"CC-BY-NC-SA-2.0": {
"name": "Creative Commons Attribution Non Commercial Share Alike 2.0",
"url": "https://creativecommons.org/licenses/by-nc-sa/2.0/",
},
"CC-BY-NC-SA-2.5": {
"name": "Creative Commons Attribution Non Commercial Share Alike 2.5",
"url": "https://creativecommons.org/licenses/by-nc-sa/2.5/",
},
"CC-BY-NC-SA-3.0": {
"name": "Creative Commons Attribution Non Commercial Share Alike 3.0",
"url": "https://creativecommons.org/licenses/by-nc-sa/3.0/",
},
"CC-BY-NC-SA-4.0": {
"name": "Creative Commons Attribution Non Commercial Share Alike 4.0",
"url": "https://creativecommons.org/licenses/by-nc-sa/4.0/",
},
"CC-BY-SA-1.0": {
"name": "Creative Commons Attribution Share Alike 1.0",
"url": "https://creativecommons.org/licenses/by-sa/1.0/",
},
"CC-BY-SA-2.0": {
"name": "Creative Commons Attribution Share Alike 2.0",
"url": "https://creativecommons.org/licenses/by-sa/2.0/",
},
"CC-BY-SA-2.5": {
"name": "Creative Commons Attribution Share Alike 2.5",
"url": "https://creativecommons.org/licenses/by-sa/2.5/",
},
"CC-BY-SA-3.0": {
"name": "Creative Commons Attribution Share Alike 3.0",
"url": "https://creativecommons.org/licenses/by-sa/3.0/",
},
"CC-BY-SA-4.0": {
"name": "Creative Commons Attribution Share Alike 4.0",
"url": "https://creativecommons.org/licenses/by-sa/4.0/",
},
"CC0-1.0": {
"name": "Creative Commons Zero v1.0 Universal",
"url": "https://creativecommons.org/publicdomain/zero/1.0/",
},
"Crossword": {
"name": "Crossword License",
"url": "https://fedoraproject.org/wiki/Licensing/Crossword",
},
"CUA-OPL-1.0": {
"name": "CUA Office Public License v1.0",
"url": "http://opensource.org/licenses/CUA-OPL-1.0",
},
"Cube": {
"name": "Cube License",
"url": "https://fedoraproject.org/wiki/Licensing/Cube",
},
"D-FSL-1.0": {
"name": "Deutsche Freie Software Lizenz",
"url": "http://www.dipp.nrw.de/d-fsl/index_html/lizenzen/de/D-FSL-1_0_de.txt",
},
"diffmark": {
"name": "diffmark license",
"url": "https://fedoraproject.org/wiki/Licensing/diffmark",
},
"WTFPL": {
"name": "Do What The F*ck You Want To Public License",
"url": "http://sam.zoy.org/wtfpl/COPYING",
},
"DOC": {
"name": "DOC License",
"url": "http://www.cs.wustl.edu/~schmidt/ACE-copying.html",
},
"Dotseqn": {
"name": "Dotseqn License",
"url": "https://fedoraproject.org/wiki/Licensing/Dotseqn",
},
"DSDP": {
"name": "DSDP License",
"url": "https://fedoraproject.org/wiki/Licensing/DSDP",
},
"dvipdfm": {
"name": "dvipdfm License",
"url": "https://fedoraproject.org/wiki/Licensing/dvipdfm",
},
"EPL-1.0": {
"name": "Eclipse Public License 1.0",
"url": "http://www.opensource.org/licenses/EPL-1.0",
},
"ECL-1.0": {
"name": "Educational Community License v1.0",
"url": "http://opensource.org/licenses/ECL-1.0",
},
"ECL-2.0": {
"name": "Educational Community License v2.0",
"url": "http://opensource.org/licenses/ECL-2.0",
},
"EFL-1.0": {
"name": "Eiffel Forum License v1.0",
"url": "http://opensource.org/licenses/EFL-1.0",
},
"EFL-2.0": {
"name": "Eiffel Forum License v2.0",
"url": "http://opensource.org/licenses/EFL-2.0",
},
"MIT-advertising": {
"name": "Enlightenment License (e16)",
"url": "https://fedoraproject.org/wiki/Licensing/MIT_With_Advertising",
},
"MIT-enna": {
"name": "enna License",
"url": "https://fedoraproject.org/wiki/Licensing/MIT#enna",
},
"Entessa": {
"name": "Entessa Public License v1.0",
"url": "http://opensource.org/licenses/Entessa",
},
"ErlPL-1.1": {
"name": "Erlang Public License v1.1",
"url": "http://www.erlang.org/EPLICENSE",
},
"EUDatagrid": {
"name": "EU DataGrid Software License",
"url": "http://www.opensource.org/licenses/EUDatagrid",
},
"EUPL-1.0": {
"name": "European Union Public License 1.0",
"url": "http://ec.europa.eu/idabc/en/document/7330.html",
},
"EUPL-1.1": {
"name": "European Union Public License 1.1",
"url": "http://www.opensource.org/licenses/EUPL-1.1",
},
"Eurosym": {
"name": "Eurosym License",
"url": "https://fedoraproject.org/wiki/Licensing/Eurosym",
},
"Fair": {
"name": "Fair License",
"url": "http://www.opensource.org/licenses/Fair",
},
"MIT-feh": {
"name": "feh License",
"url": "https://fedoraproject.org/wiki/Licensing/MIT#feh",
},
"Frameworx-1.0": {
"name": "Frameworx Open License 1.0",
"url": "http://www.opensource.org/licenses/Frameworx-1.0",
},
"FreeImage": {
"name": "FreeImage Public License v1.0",
"url": "http://freeimage.sourceforge.net/freeimage-license.txt",
},
"FTL": {
"name": "Freetype Project License",
"url": "http://freetype.fis.uniroma2.it/FTL.TXT",
},
"FSFUL": {
"name": "FSF Unlimited License",
"url": "https://fedoraproject.org/wiki/Licensing/FSF_Unlimited_License",
},
"FSFULLR": {
"name": "FSF Unlimited License (with License Retention)",
"url": "https://fedoraproject.org/wiki/Licensing/FSF_Unlimited_License",
},
"Giftware": {
"name": "Giftware License",
"url": "http://alleg.sourceforge.net//license.html",
},
"GL2PS": {
"name": "GL2PS License",
"url": "http://www.geuz.org/gl2ps/COPYING.GL2PS",
},
"Glulxe": {
"name": "Glulxe License",
"url": "https://fedoraproject.org/wiki/Licensing/Glulxe",
},
"AGPL-3.0": {
"name": "GNU Affero General Public License v3.0",
"url": "http://www.gnu.org/licenses/agpl.txt",
},
"GFDL-1.1": {
"name": "GNU Free Documentation License v1.1",
"url": "http://www.gnu.org/licenses/old-licenses/fdl-1.1.txt",
},
"GFDL-1.2": {
"name": "GNU Free Documentation License v1.2",
"url": "http://www.gnu.org/licenses/old-licenses/fdl-1.2.txt",
},
"GFDL-1.3": {
"name": "GNU Free Documentation License v1.3",
"url": "http://www.gnu.org/licenses/fdl-1.3.txt",
},
"GPL-1.0": {
"name": "GNU General Public License v1.0 only",
"url": "http://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html",
},
"GPL-2.0": {
"name": "GNU General Public License v2.0 only",
"url": "http://www.opensource.org/licenses/GPL-2.0",
},
"GPL-3.0": {
"name": "GNU General Public License v3.0 only",
"url": "http://www.opensource.org/licenses/GPL-3.0",
},
"LGPL-2.1": {
"name": "GNU Lesser General Public License v2.1 only",
"url": "http://www.opensource.org/licenses/LGPL-2.1",
},
"LGPL-3.0": {
"name": "GNU Lesser General Public License v3.0 only",
"url": "http://www.opensource.org/licenses/LGPL-3.0",
},
"LGPL-2.0": {
"name": "GNU Library General Public License v2 only",
"url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html",
},
"gnuplot": {
"name": "gnuplot License",
"url": "https://fedoraproject.org/wiki/Licensing/Gnuplot",
},
"gSOAP-1.3b": {
"name": "gSOAP Public License v1.3b",
"url": "http://www.cs.fsu.edu/~engelen/license.html",
},
"HaskellReport": {
"name": "Haskell Language Report License",
"url": "https://fedoraproject.org/wiki/Licensing/Haskell_Language_Report_License",
},
"HPND": {
"name": "Historic Permission Notice and Disclaimer",
"url": "http://www.opensource.org/licenses/HPND",
},
"IPL-1.0": {
"name": "IBM Public License v1.0",
"url": "http://www.opensource.org/licenses/IPL-1.0",
},
"ICU": {
"name": "ICU License",
"url": "http://source.icu-project.org/repos/icu/icu/trunk/license.html",
},
"ImageMagick": {
"name": "ImageMagick License",
"url": "http://www.imagemagick.org/script/license.php",
},
"iMatix": {
"name": "iMatix Standard Function Library Agreement",
"url": "http://legacy.imatix.com/html/sfl/sfl4.htm#license",
},
"Imlib2": {
"name": "Imlib2 License",
"url": "http://trac.enlightenment.org/e/browser/trunk/imlib2/COPYING",
},
"IJG": {
"name": "Independent JPEG Group License",
"url": "http://dev.w3.org/cvsweb/Amaya/libjpeg/Attic/README?rev=1.2",
},
"Intel": {
"name": "Intel Open Source License",
"url": "http://opensource.org/licenses/Intel",
},
"IPA": {
"name": "IPA Font License",
"url": "http://www.opensource.org/licenses/IPA",
},
"JasPer-2.0": {
"name": "JasPer License",
"url": "http://www.ece.uvic.ca/~mdadams/jasper/LICENSE",
},
"JSON": {
"name": "JSON License",
"url": "http://www.json.org/license.html",
},
"LPPL-1.3a": {
"name": "LaTeX Project Public License 1.3a",
"url": "http://www.latex-project.org/lppl/lppl-1-3a.txt",
},
"LPPL-1.0": {
"name": "LaTeX Project Public License v1.0",
"url": "http://www.latex-project.org/lppl/lppl-1-0.txt",
},
"LPPL-1.1": {
"name": "LaTeX Project Public License v1.1",
"url": "http://www.latex-project.org/lppl/lppl-1-1.txt",
},
"LPPL-1.2": {
"name": "LaTeX Project Public License v1.2",
"url": "http://www.latex-project.org/lppl/lppl-1-2.txt",
},
"LPPL-1.3c": {
"name": "LaTeX Project Public License v1.3c",
"url": "http://www.opensource.org/licenses/LPPL-1.3c",
},
"Latex2e": {
"name": "Latex2e License",
"url": "https://fedoraproject.org/wiki/Licensing/Latex2e",
},
"BSD-3-Clause-LBNL": {
"name": "Lawrence Berkeley National Labs BSD variant license",
"url": "https://fedoraproject.org/wiki/Licensing/LBNLBSD",
},
"Leptonica": {
"name": "Leptonica License",
"url": "https://fedoraproject.org/wiki/Licensing/Leptonica",
},
"LGPLLR": {
"name": "Lesser General Public License For Linguistic Resources",
"url": "http://www-igm.univ-mlv.fr/~unitex/lgpllr.html",
},
"Libpng": {
"name": "libpng License",
"url": "http://www.libpng.org/pub/png/src/libpng-LICENSE.txt",
},
"libtiff": {
"name": "libtiff License",
"url": "https://fedoraproject.org/wiki/Licensing/libtiff",
},
"LPL-1.02": {
"name": "Lucent Public License v1.02",
"url": "http://www.opensource.org/licenses/LPL-1.02",
},
"LPL-1.0": {
"name": "Lucent Public License Version 1.0",
"url": "http://opensource.org/licenses/LPL-1.0",
},
"MakeIndex": {
"name": "MakeIndex License",
"url": "https://fedoraproject.org/wiki/Licensing/MakeIndex",
},
"MTLL": {
"name": "Matrix Template Library License",
"url": "https://fedoraproject.org/wiki/Licensing/Matrix_Template_Library_License",
},
"MS-PL": {
"name": "Microsoft Public License",
"url": "http://www.opensource.org/licenses/MS-PL",
},
"MS-RL": {
"name": "Microsoft Reciprocal License",
"url": "http://www.opensource.org/licenses/MS-RL",
},
"MirOS": {
"name": "MirOS Licence",
"url": "http://www.opensource.org/licenses/MirOS",
},
"MITNFA": {
"name": "MIT +no-false-attribs license",
"url": "https://fedoraproject.org/wiki/Licensing/MITNFA",
},
"MIT": {
"name": "MIT License",
"url": "http://www.opensource.org/licenses/MIT",
},
"Motosoto": {
"name": "Motosoto License",
"url": "http://www.opensource.org/licenses/Motosoto",
},
"MPL-1.0": {
"name": "Mozilla Public License 1.0",
"url": "http://www.mozilla.org/MPL/MPL-1.0.html",
},
"MPL-1.1": {
"name": "Mozilla Public License 1.1",
"url": "http://www.mozilla.org/MPL/MPL-1.1.html",
},
"MPL-2.0": {
"name": "Mozilla Public License 2.0",
"url": "http://www.mozilla.org/MPL/2.0/\nhttp://opensource.org/licenses/MPL-2.0",
},
"MPL-2.0-no-copyleft-exception": {
"name": "Mozilla Public License 2.0 (no copyleft exception)",
"url": "http://www.mozilla.org/MPL/2.0/\nhttp://opensource.org/licenses/MPL-2.0",
},
"mpich2": {
"name": "mpich2 License",
"url": "https://fedoraproject.org/wiki/Licensing/MIT",
},
"Multics": {
"name": "Multics License",
"url": "http://www.opensource.org/licenses/Multics",
},
"Mup": {
"name": "Mup License",
"url": "https://fedoraproject.org/wiki/Licensing/Mup",
},
"NASA-1.3": {
"name": "NASA Open Source Agreement 1.3",
"url": "http://www.opensource.org/licenses/NASA-1.3",
},
"Naumen": {
"name": "Naumen Public License",
"url": "http://www.opensource.org/licenses/Naumen",
},
"NetCDF": {
"name": "NetCDF license",
"url": "http://www.unidata.ucar.edu/software/netcdf/copyright.html",
},
"NGPL": {
"name": "Nethack General Public License",
"url": "http://www.opensource.org/licenses/NGPL",
},
"NOSL": {
"name": "Netizen Open Source License",
"url": "http://bits.netizen.com.au/licenses/NOSL/nosl.txt",
},
"NPL-1.0": {
"name": "Netscape Public License v1.0",
"url": "http://www.mozilla.org/MPL/NPL/1.0/",
},
"NPL-1.1": {
"name": "Netscape Public License v1.1",
"url": "http://www.mozilla.org/MPL/NPL/1.1/",
},
"Newsletr": {
"name": "Newsletr License",
"url": "https://fedoraproject.org/wiki/Licensing/Newsletr",
},
"NLPL": {
"name": "No Limit Public License",
"url": "https://fedoraproject.org/wiki/Licensing/NLPL",
},
"Nokia": {
"name": "Nokia Open Source License",
"url": "http://www.opensource.org/licenses/nokia",
},
"NPOSL-3.0": {
"name": "Non-Profit Open Software License 3.0",
"url": "http://www.opensource.org/licenses/NOSL3.0",
},
"Noweb": {
"name": "Noweb License",
"url": "https://fedoraproject.org/wiki/Licensing/Noweb",
},
"NRL": {
"name": "NRL License",
"url": "http://web.mit.edu/network/isakmp/nrllicense.html",
},
"NTP": {
"name": "NTP License",
"url": "http://www.opensource.org/licenses/NTP",
},
"Nunit": {
"name": "Nunit License",
"url": "https://fedoraproject.org/wiki/Licensing/Nunit",
},
"OCLC-2.0": {
"name": "OCLC Research Public License 2.0",
"url": "http://www.opensource.org/licenses/OCLC-2.0",
},
"ODbL-1.0": {
"name": "ODC Open Database License v1.0",
"url": "http://www.opendatacommons.org/licenses/odbl/1.0/",
},
"PDDL-1.0": {
"name": "ODC Public Domain Dedication & License 1.0",
"url": "http://opendatacommons.org/licenses/pddl/1.0/",
},
"OGTSL": {
"name": "Open Group Test Suite License",
"url": "http://www.opensource.org/licenses/OGTSL",
},
"OML": {
"name": "Open Market License",
"url": "https://fedoraproject.org/wiki/Licensing/Open_Market_License",
},
"OPL-1.0": {
"name": "Open Public License v1.0",
"url": "https://fedoraproject.org/wiki/Licensing/Open_Public_License",
},
"OSL-1.0": {
"name": "Open Software License 1.0",
"url": "http://opensource.org/licenses/OSL-1.0",
},
"OSL-1.1": {
"name": "Open Software License 1.1",
"url": "https://fedoraproject.org/wiki/Licensing/OSL1.1",
},
"PHP-3.01": {
"name": "PHP License v3.01",
"url": "http://www.php.net/license/3_01.txt",
},
"Plexus": {
"name": "Plexus Classworlds License",
"url": "https://fedoraproject.org/wiki/Licensing/Plexus_Classworlds_License",
},
"PostgreSQL": {
"name": "PostgreSQL License",
"url": "http://www.opensource.org/licenses/PostgreSQL",
},
"psfrag": {
"name": "psfrag License",
"url": "https://fedoraproject.org/wiki/Licensing/psfrag",
},
"psutils": {
"name": "psutils License",
"url": "https://fedoraproject.org/wiki/Licensing/psutils",
},
"Python-2.0": {
"name": "Python License 2.0",
"url": "http://www.opensource.org/licenses/Python-2.0",
},
"QPL-1.0": {
"name": "Q Public License 1.0",
"url": "http://www.opensource.org/licenses/QPL-1.0",
},
"Qhull": {
"name": "Qhull License",
"url": "https://fedoraproject.org/wiki/Licensing/Qhull",
},
"Rdisc": {
"name": "Rdisc License",
"url": "https://fedoraproject.org/wiki/Licensing/Rdisc_License",
},
"RPSL-1.0": {
"name": "RealNetworks Public Source License v1.0",
"url": "http://www.opensource.org/licenses/RPSL-1.0",
},
"RPL-1.1": {
"name": "Reciprocal Public License 1.1",
"url": "http://opensource.org/licenses/RPL-1.1",
},
"RPL-1.5": {
"name": "Reciprocal Public License 1.5",
"url": "http://www.opensource.org/licenses/RPL-1.5",
},
"RHeCos-1.1": {
"name": "Red Hat eCos Public License v1.1",
"url": "http://ecos.sourceware.org/old-license.html",
},
"RSCPL": {
"name": "Ricoh Source Code Public License",
"url": "http://www.opensource.org/licenses/RSCPL",
},
"RSA-MD": {
"name": "RSA Message-Digest License",
"url": "http://www.faqs.org/rfcs/rfc1321.html",
},
"Ruby": {
"name": "Ruby License",
"url": "http://www.ruby-lang.org/en/LICENSE.txt",
},
"SAX-PD": {
"name": "Sax Public Domain Notice",
"url": "http://www.saxproject.org/copying.html",
},
"Saxpath": {
"name": "Saxpath License",
"url": "https://fedoraproject.org/wiki/Licensing/Saxpath_License",
},
"SCEA": {
"name": "SCEA Shared Source License",
"url": "http://research.scea.com/scea_shared_source_license.html",
},
"SWL": {
"name": "Scheme Widget Library (SWL) Software License Agreement",
"url": "https://fedoraproject.org/wiki/Licensing/SWL",
},
"Sendmail": {
"name": "Sendmail License",
"url": "http://www.sendmail.com/pdfs/open_source/sendmail_license.pdf",
},
"SGI-B-1.0": {
"name": "SGI Free Software License B v1.0",
"url": "http://oss.sgi.com/projects/FreeB/SGIFreeSWLicB.1.0.html",
},
"SGI-B-1.1": {
"name": "SGI Free Software License B v1.1",
"url": "http://oss.sgi.com/projects/FreeB/",
},
"SGI-B-2.0": {
"name": "SGI Free Software License B v2.0",
"url": "http://oss.sgi.com/projects/FreeB/SGIFreeSWLicB.2.0.pdf",
},
"OFL-1.0": {
"name": "SIL Open Font License 1.0",
"url": "http://scripts.sil.org/cms/scripts/page.php?item_id=OFL10_web",
},
"OFL-1.1": {
"name": "SIL Open Font License 1.1",
"url": "http://www.opensource.org/licenses/OFL-1.1",
},
"SimPL-2.0": {
"name": "Simple Public License 2.0",
"url": "http://www.opensource.org/licenses/SimPL-2.0",
},
"Sleepycat": {
"name": "Sleepycat License",
"url": "http://www.opensource.org/licenses/Sleepycat",
},
"SNIA": {
"name": "SNIA Public License 1.1",
"url": "https://fedoraproject.org/wiki/Licensing/SNIA_Public_License",
},
"SMLNJ": {
"name": "Standard ML of New Jersey License",
"url": "http://www.smlnj.org//license.html",
},
"SugarCRM-1.1.3": {
"name": "SugarCRM Public License v1.1.3",
"url": "http://www.sugarcrm.com/crm/SPL",
},
"SISSL": {
"name": "Sun Industry Standards Source License v1.1",
"url": "http://opensource.org/licenses/SISSL",
},
"SISSL-1.2": {
"name": "Sun Industry Standards Source License v1.2",
"url": "http://gridscheduler.sourceforge.net/Gridengine_SISSL_license.html",
},
"SPL-1.0": {
"name": "Sun Public License v1.0",
"url": "http://www.opensource.org/licenses/SPL-1.0",
},
"Watcom-1.0": {
"name": "Sybase Open Watcom Public License 1.0",
"url": "http://www.opensource.org/licenses/Watcom-1.0",
},
"TCL": {
"name": "TCL/TK License",
"url": "https://fedoraproject.org/wiki/Licensing/TCL",
},
"Unlicense": {
"name": "The Unlicense",
"url": "http://unlicense.org/",
},
"TMate": {
"name": "TMate Open Source License",
"url": "http://svnkit.com/license.html",
},
"TORQUE-1.1": {
"name": "TORQUE v2.5+ Software License v1.1",
"url": "https://fedoraproject.org/wiki/Licensing/TORQUEv1.1",
},
"TOSL": {
"name": "Trusster Open Source License",
"url": "https://fedoraproject.org/wiki/Licensing/TOSL",
},
"Unicode-TOU": {
"name": "Unicode Terms of Use",
"url": "http://www.unicode.org/copyright.html",
},
"UPL-1.0": {
"name": "Universal Permissive License v1.0",
"url": "http://opensource.org/licenses/UPL",
},
"NCSA": {
"name": "University of Illinois/NCSA Open Source License",
"url": "http://www.opensource.org/licenses/NCSA",
},
"Vim": {
"name": "Vim License",
"url": "http://vimdoc.sourceforge.net/htmldoc/uganda.html",
},
"VOSTROM": {
"name": "VOSTROM Public License for Open Source",
"url": "https://fedoraproject.org/wiki/Licensing/VOSTROM",
},
"VSL-1.0": {
"name": "Vovida Software License v1.0",
"url": "http://www.opensource.org/licenses/VSL-1.0",
},
"W3C-19980720": {
"name": "W3C Software Notice and License (1998-07-20)",
"url": "http://www.w3.org/Consortium/Legal/copyright-software-19980720.html",
},
"W3C": {
"name": "W3C Software Notice and License (2002-12-31)",
"url": "http://www.opensource.org/licenses/W3C",
},
"Wsuipa": {
"name": "Wsuipa License",
"url": "https://fedoraproject.org/wiki/Licensing/Wsuipa",
},
"Xnet": {
"name": "X.Net License",
"url": "http://opensource.org/licenses/Xnet",
},
"X11": {
"name": "X11 License",
"url": "http://www.xfree86.org/3.3.6/COPYRIGHT2.html#3",
},
"Xerox": {
"name": "Xerox License",
"url": "https://fedoraproject.org/wiki/Licensing/Xerox",
},
"XFree86-1.1": {
"name": "XFree86 License 1.1",
"url": "http://www.xfree86.org/current/LICENSE4.html",
},
"xinetd": {
"name": "xinetd License",
"url": "https://fedoraproject.org/wiki/Licensing/Xinetd_License",
},
"xpp": {
"name": "XPP License",
"url": "https://fedoraproject.org/wiki/Licensing/xpp",
},
"XSkat": {
"name": "XSkat License",
"url": "https://fedoraproject.org/wiki/Licensing/XSkat_License",
},
"YPL-1.0": {
"name": "Yahoo! Public License v1.0",
"url": "http://www.zimbra.com/license/yahoo_public_license_1.0.html",
},
"YPL-1.1": {
"name": "Yahoo! Public License v1.1",
"url": "http://www.zimbra.com/license/yahoo_public_license_1.1.html",
},
"Zed": {
"name": "Zed License",
"url": "https://fedoraproject.org/wiki/Licensing/Zed",
},
"Zlib": {
"name": "zlib License",
"url": "http://www.opensource.org/licenses/Zlib",
},
"zlib-acknowledgement": {
"name": "zlib/libpng License with Acknowledgement",
"url": "https://fedoraproject.org/wiki/Licensing/ZlibWithAcknowledgement",
},
"ZPL-1.1": {
"name": "Zope Public License 1.1",
"url": "http://old.zope.org/Resources/License/ZPL-1.1",
},
"ZPL-2.0": {
"name": "Zope Public License 2.0",
"url": "http://opensource.org/licenses/ZPL-2.0",
},
"ZPL-2.1": {
"name": "Zope Public License 2.1",
"url": "http://old.zope.org/Resources/ZPL/",
}
}
@attr.s
class License(object):
id = attr.ib()
name = attr.ib()
url = attr.ib()
@property
def legalcode(self):
p = pathlib.Path(__file__).parent / 'legalcode' / self.id
if p.exists():
return p.read_text(encoding='utf8')
_LICENSES = [License(id_, l['name'], l['url']) for id_, l in _LICENSES.items()]
def find(q):
for license_ in _LICENSES:
if q.lower() == license_.id.lower() or q == license_.name or q == license_.url:
return license_
if '://' in q:
u1 = license_.url.split('://')[1]
u2 = q.split('://')[1]
if u1.startswith(u2) or u2.startswith(u1):
return license_
| clld/clldutils | src/clldutils/licenses.py | Python | apache-2.0 | 38,642 |
from __future__ import unicode_literals
import random
import string
MASTER_ACCOUNT_ID = '123456789012'
MASTER_ACCOUNT_EMAIL = 'fakeorg@moto-example.com'
ORGANIZATION_ARN_FORMAT = 'arn:aws:organizations::{0}:organization/{1}'
MASTER_ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{0}'
ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}'
ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}'
OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}'
CHARSET = string.ascii_lowercase + string.digits
ORG_ID_SIZE = 10
ROOT_ID_SIZE = 4
ACCOUNT_ID_SIZE = 12
OU_ID_SUFFIX_SIZE = 8
CREATE_ACCOUNT_STATUS_ID_SIZE = 8
def make_random_org_id():
# The regex pattern for an organization ID string requires "o-"
# followed by from 10 to 32 lower-case letters or digits.
# e.g. 'o-vipjnq5z86'
return 'o-' + ''.join(random.choice(CHARSET) for x in range(ORG_ID_SIZE))
def make_random_root_id():
# The regex pattern for a root ID string requires "r-" followed by
# from 4 to 32 lower-case letters or digits.
# e.g. 'r-3zwx'
return 'r-' + ''.join(random.choice(CHARSET) for x in range(ROOT_ID_SIZE))
def make_random_ou_id(root_id):
# The regex pattern for an organizational unit ID string requires "ou-"
# followed by from 4 to 32 lower-case letters or digits (the ID of the root
# that contains the OU) followed by a second "-" dash and from 8 to 32
# additional lower-case letters or digits.
# e.g. ou-g8sd-5oe3bjaw
return '-'.join([
'ou',
root_id.partition('-')[2],
''.join(random.choice(CHARSET) for x in range(OU_ID_SUFFIX_SIZE)),
])
def make_random_account_id():
# The regex pattern for an account ID string requires exactly 12 digits.
# e.g. '488633172133'
return ''.join([random.choice(string.digits) for n in range(ACCOUNT_ID_SIZE)])
def make_random_create_account_status_id():
# The regex pattern for an create account request ID string requires
# "car-" followed by from 8 to 32 lower-case letters or digits.
# e.g. 'car-35gxzwrp'
return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE))
| okomestudio/moto | moto/organizations/utils.py | Python | apache-2.0 | 2,175 |
import copy
class Pentomino(object):
def __init__(self, name, coos):
self.name = name
self.coos = coos
self.dim = len(coos[0])
def normalize_coo(self, coo):
a=self.coos[0][coo]
for i in self.coos :
if a > self.coos[i][coo] :
a = self.coos[i][coo]
for i in self.coos :
self.coos[i][coo] = self.coos[i][coo] - [a]
def normalize(self):
self.coos.sort()
a=self.coos[0][0]
b=self.coos[0][1]
for i in self.coos :
if a > i[0] :
a = i[0]
if b > i[1] :
b = i[1]
for i in range(5) :
self.coos[i][0] = self.coos[i][0] - a
self.coos[i][1] = self.coos[i][1] - b
return self
def flip(self, coo):
right = -100
left = 100
for i in self.coos :
if right < i[coo] :
right = i[coo]
if left > i[coo] :
left = i[coo]
if right-left == 1 :
for i in range(5) :
if self.coos[i][coo] == left :
self.coos[i][coo] = right
else :
self.coos[i][coo] = left
elif right-left == 2 :
for i in range(5) :
if self.coos[i][coo] == left :
self.coos[i][coo] = right
elif self.coos[i][coo] == right :
self.coos[i][coo] = left
elif right-left == 3 :
for i in range(5) :
if self.coos[i][coo] == left :
self.coos[i][coo] = right
elif self.coos[i][coo] == right :
self.coos[i][coo] = left
elif self.coos[i][coo] == right-1 :
self.coos[i][coo] = right-2
else :
self.coos[i][coo] = right-1
return self
def translate_one(self, coo):
for i in range(5) :
self.coos[i][coo] = self.coos[i][coo] + 1
return self
def translate_coo(self, coo, amount):
for i in self.coos :
self.coos[i][coo] = self.coos[i][coo] + amount
return self
def translate_by(self, by_vector):
for i in self.coos :
self.coos[i][0] = self.coos[i][0] + by_vector[0]
self.coos[i][1] = self.coos[i][1] + by_vector[1]
return self
def turn90(self):
for i in range(5) :
coord = self.coos[i][1]
self.coos[i][1]=self.coos[i][0]
self.coos[i][0]=coord
self.flip(1)
return self
def max(self):
maxx=-1
maxy=-1
maximum=list()
for i in self.coos:
if maxx<i[0]:
maxx=i[0]
for i in self.coos:
if maxx==i[0]:
maximum.append(i)
for i in maximum:
if maxy<i[1]:
maxy=i[1]
return [maxx,maxy]
def __hash__(self):
c0 = self.normalize()
h = 100**len(self.coos)
x=0
for i in range(5) :
x = c0.coos[i][0]*100+c0.coos[i][1]
h = h+x*100**(i*2)
x=0
return h
def __eq__(self, other):
if self.name != other.name :
return False
else :
return self.__hash__() == other.__hash__()
def representation(self):
return "[" + self.name + ":" + str(self.coos) + "]"
class F(Pentomino):
def __init__(self):
Pentomino.__init__(self, "F", [[0,1],[1,0],[1,1],[1,2],[2,2]])
class I(Pentomino):
def __init__(self):
Pentomino.__init__(self, "I", [[0,0],[0,1],[0,2],[0,3],[0,4]])
class L(Pentomino):
def __init__(self):
Pentomino.__init__(self, "L", [[0,0],[0,1],[0,2],[0,3],[1,0]])
class N(Pentomino):
def __init__(self):
Pentomino.__init__(self, "N", [[0,0],[0,1],[1,1],[1,2],[1,3]])
class P(Pentomino):
def __init__(self):
Pentomino.__init__(self, "P", [[0,0],[0,1],[0,2],[1,1],[1,2]])
class T(Pentomino):
def __init__(self):
Pentomino.__init__(self, "T", [[0,2],[1,0],[1,1],[1,2],[2,2]])
class U(Pentomino):
def __init__(self):
Pentomino.__init__(self, "U", [[0,0],[0,1],[1,0],[2,0],[2,1]])
class V(Pentomino):
def __init__(self):
Pentomino.__init__(self, "V", [[0,0],[1,0],[2,0],[2,1],[2,2]])
class W(Pentomino):
def __init__(self):
Pentomino.__init__(self, "W", [[0,0],[1,0],[1,1],[2,1],[2,2]])
class X(Pentomino):
def __init__(self):
Pentomino.__init__(self, "X", [[0,1],[1,0],[1,1],[1,2],[2,1]])
class Y(Pentomino):
def __init__(self):
Pentomino.__init__(self, "Y", [[0,0],[1,0],[2,0],[2,1],[3,0]])
class Z(Pentomino):
def __init__(self):
Pentomino.__init__(self, "Z", [[0,2],[1,0],[1,1],[1,2],[2,0]])
def all_pentominos():
return [F(), I(), L(), P(), N(), T(), U(), V(), W(), X(), Y(), Z()]
def all_fixed_pentominos():
s = TileSet()
for i in all_pentominos() :
if i.name == "X" :
s.add(i)
elif i.name == "I":
s.add(I())
s.add(I().turn90())
else :
for k in range(4):
s.add(i.normalize())
s.add(i.flip(0).normalize())
s.add(i.flip(1).normalize())
s.add(i.flip(0).normalize())
i.flip(1).normalize()
i.turn90().normalize()
return s
def fixed_pentominos_of(p):
s = TileSet()
for i in all_pentominos() :
if p.name == i.name :
for k in range(4):
s.add(i.normalize())
s.add(i.flip(0).normalize())
s.add(i.flip(1).normalize())
s.add(i.flip(0).normalize())
i.flip(1).normalize()
i.turn90().normalize()
return s
class TileSet(object):
def __init__(self, plist=[]):
self.set = set()
for p in plist:
self.add(p)
def __iter__(self):
return iter(self.set)
def add(self, p):
c = copy.deepcopy(p)
value = True
for i in self.set :
if i.__eq__(c) :
value = False
if value :
self.set.add(c)
def size(self):
return len(self.set)
def representation(self):
rep = "["
i = 0
for p in self.set:
if i>0:
rep += ","
else:
i = 1
rep += str(p.coos)
rep += "]"
return rep
| lockeee/Dancing-Links2015 | dancing_links/python/pentominos.py | Python | cc0-1.0 | 6,749 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import platform
import functools
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import TextAnalyticsTest, TextAnalyticsPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from devtools_testutils import recorded_by_proxy
from azure.ai.textanalytics import TextAnalyticsClient
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
# the first one
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestEncoding(TextAnalyticsTest):
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_emoji(self, client):
result = client.recognize_pii_entities(["👩 SSN: 859-98-0987"])
assert result[0].entities[0].offset == 7
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_emoji_with_skin_tone_modifier(self, client):
result = client.recognize_pii_entities(["👩🏻 SSN: 859-98-0987"])
assert result[0].entities[0].offset == 8
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_emoji_family(self, client):
result = client.recognize_pii_entities(["👩👩👧👧 SSN: 859-98-0987"])
assert result[0].entities[0].offset == 13
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_emoji_family_with_skin_tone_modifier(self, client):
result = client.recognize_pii_entities(["👩🏻👩🏽👧🏾👦🏿 SSN: 859-98-0987"])
assert result[0].entities[0].offset == 17
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_diacritics_nfc(self, client):
result = client.recognize_pii_entities(["año SSN: 859-98-0987"])
assert result[0].entities[0].offset == 9
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_diacritics_nfd(self, client):
result = client.recognize_pii_entities(["año SSN: 859-98-0987"])
assert result[0].entities[0].offset == 10
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_korean_nfc(self, client):
result = client.recognize_pii_entities(["아가 SSN: 859-98-0987"])
assert result[0].entities[0].offset == 8
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_korean_nfd(self, client):
result = client.recognize_pii_entities(["아가 SSN: 859-98-0987"])
assert result[0].entities[0].offset == 8
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_zalgo_text(self, client):
result = client.recognize_pii_entities(["ơ̵̧̧̢̳̘̘͕͔͕̭̟̙͎͈̞͔̈̇̒̃͋̇̅͛̋͛̎́͑̄̐̂̎͗͝m̵͍͉̗̄̏͌̂̑̽̕͝͠g̵̢̡̢̡̨̡̧̛͉̞̯̠̤̣͕̟̫̫̼̰͓̦͖̣̣͎̋͒̈́̓̒̈̍̌̓̅͑̒̓̅̅͒̿̏́͗̀̇͛̏̀̈́̀̊̾̀̔͜͠͝ͅ SSN: 859-98-0987"])
assert result[0].entities[0].offset == 121
| Azure/azure-sdk-for-python | sdk/textanalytics/azure-ai-textanalytics/tests/test_encoding.py | Python | mit | 3,419 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
如果按照算法描述,这里容易犯一个错误,认为可以使用查询到的最小值替换列表中的元素。
其实不是替换,而是把列表要替换的元素和找到的最小值交换。否则会出项系统中的一些值
被最小值覆盖掉的问题
"""
def select_sort(li):
li_len = len(li)
for x in xrange(0, li_len - 1):
index = x
temp = li[index]
for y in xrange(x + 1, li_len):
if temp > li[y]:
temp = li[y]
index = y
li[x], li[index] = li[index], li[x]
return li
def main():
print select_sort([2, 1, 3, 4, 5, 6, 19, 0])
if __name__ == '__main__':
main()
| ssjssh/algorithm | src/ssj/sort/select_sort.py | Python | gpl-2.0 | 737 |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://cloudedbats.org
# Copyright (c) 2016-2018 Arnold Andreasson
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import pathlib
import shutil
import logging
import wurb_core
@wurb_core.singleton
class WurbSettings(object):
""" Used for config and settings.
There are three alternatives for settings:
1. Use default values. Default values are defined in the modules where they are used.
2. Use the internally used file 'wurb_settings/user_settings.txt'.
3. Add a file to the connected USB memory. Path on USB memory;
'cloudedbats_wurb/settings/user_settings.txt'
At startup files for config/settings are copied from the USB memory to the internally
stored 'wurb_settings' folder at starup, if available. Otherwise, defaults are used.
A 'user_settings_DEFAULTS.txt' file will also automatically be prepared at startup.
"""
def __init__(self):
""" Note: Singleton, parameters not allowed. """
self._logger = logging.getLogger('CloudedBatsWURB')
self._wurb_settings = {}
self._default_settings_text = []
self._default_settings = {}
self._developer_settings = {}
self._valid_settings = {}
self._wurb_scheduler_events = []
def text(self, key):
""" """
value = ''
if key in self._wurb_settings:
value = self._wurb_settings.get(key)
# Return string.
return value
def boolean(self, key):
""" """
value = 'F'
if key in self._wurb_settings:
value = self._wurb_settings.get(key)
# Return Boolean.
if value.lower() in ['yes', 'y', 'true', 't']:
return True
else:
return False
def integer(self, key):
""" """
value = '0'
if key in self._wurb_settings:
value = self._wurb_settings.get(key)
# Return integer.
try:
return int(value)
except:
return 0
def float(self, key):
""" """
value = '0.0'
if key in self._wurb_settings:
value = self._wurb_settings.get(key)
elif key in self._default_settings:
value = self._default_settings.get(key)
# Return float.
try:
return float(value)
except:
return 0
def scheduler_events(self):
""" """
return self._wurb_scheduler_events
def set_default_values(self, description=None, default_settings=None, developer_settings=None):
""" """
# Description.
if description:
self._default_settings_text.append('')
for row in description:
self._default_settings_text.append(row)
# Public settings.
if default_settings:
self._default_settings_text.append('')
for row in default_settings:
if row['key'] == 'scheduler_event':
self._default_settings_text.append(row['key'] + ': ' + str(row['value']))
self._wurb_scheduler_events.append(row['value'])
else:
self._default_settings_text.append(row['key'] + ': ' + str(row['value']))
self._wurb_settings[row['key']] = row['value']
self._default_settings[row['key']] = row['value']
if 'valid' in row:
self._valid_settings[row['key']] = row['valid']
# Hidden settings.
if developer_settings:
for row in developer_settings:
self._wurb_settings[row['key']] = row['value']
self._developer_settings[row['key']] = row['value']
if 'valid' in row:
self._valid_settings[row['key']] = row['valid']
def load_settings(self, file_path):
""" """
if not file_path.exists():
self._logger.warning('Settings: Config/settings file does not exists. Default values are used.')
return
#
self._logger.info('Settings: Used settings from file: ')
clear_default_scheduler_events = True
with file_path.open('r') as infile:
for row in infile:
key_value = row.strip()
key = ''
value = ''
# Remove comments.
if '#' in key_value:
key_value = key_value.split('#')[0].strip() # Use left part.
# Split key/value.
if key_value:
if ':' in key_value:
key_value_list = key_value.split(':', 1) # Split on first occurrence.
key = key_value_list[0].strip()
value = key_value_list[1].strip()
if key and value:
if key == 'scheduler_event':
# Clear defaults if scheduler events are defined. First time only.
if clear_default_scheduler_events:
self._wurb_scheduler_events = []
clear_default_scheduler_events = False
# Many rows with the same key are allowed.
self._wurb_scheduler_events.append(value)
self._logger.info('- Scheduler event: ' + str(value))
else:
# Add to dict. Only one is allowed.
self._wurb_settings[key] = value
self._logger.info('- Setting key: ' + str(key) + ' value: ' + str(value))
def save_default_settings(self, file_path):
""" """
with file_path.open('w') as file:
file.write('\r\n'.join(self._default_settings_text))
def save_last_used_settings(self, file_path):
""" """
used_settings = [
'',
'# Settings used during the last ',
'# execution of CloudedBats WURB.',
'',
'# Standard settings:',
' ',
]
#
for key in sorted(self._wurb_settings.keys()):
if key in self._default_settings:
used_settings.append(key + ': ' + str(self._wurb_settings[key]))
#
used_settings.append(' ')
used_settings.append('# Scheduler events:')
used_settings.append(' ')
#
for row in self._wurb_scheduler_events:
used_settings.append('scheduler_event: ' + row)
#
used_settings.append(' ')
used_settings.append('# Development settings:')
used_settings.append(' ')
#
for key in sorted(self._wurb_settings.keys()):
if key in self._developer_settings:
used_settings.append(key + ': ' + str(self._wurb_settings[key]))
#
used_settings.append(' ')
used_settings.append('# Unrecognised settings:')
used_settings.append(' ')
#
for key in sorted(self._wurb_settings.keys()):
if (key not in self._default_settings) and \
(key not in self._developer_settings) :
used_settings.append(key + ': ' + str(self._wurb_settings[key]))
#
with file_path.open('w') as file:
file.write('\r\n'.join(used_settings))
| cloudedbats/cloudedbats_wurb | cloudedbats_wurb/wurb_core/wurb_settings.py | Python | mit | 7,577 |
from ws4redis.publisher import redis_connection_pool, StrictRedis
class RedisProvider(object):
def __init__(self, **kwargs):
self._connection = StrictRedis(connection_pool=redis_connection_pool)
def set(self, key, value, expire=None):
return self._connection.set(name=key, value=value)
def get(self, key):
return self._connection.get(name=key)
def push(self, key, values):
return self._connection.lpush(key, values)
def exists(self, key):
return self._connection.exists(name=key)
def get_list(self, key):
return self._connection.lrange(name=key, start=0, end=-1)
def set_scan(self, key, match=None):
return self._connection.sscan_iter(name=key, match=match)
def set_add(self, key, values):
return self._connection.sadd(key, values)
def set_remove(self, key, values):
return self._connection.srem(key, values)
def set_hash(self, key, id, value):
return self._connection.hset(key, id, value)
def del_hash(self, key, id):
return self._connection.hdel(key, id)
def get_hkeys(self, key):
return self._connection.hkeys(key)
def get_status(self, key, id):
return self._connection.hget(key, id)
def hmset(self, key, mapping):
return self._connection.hmset(key, mapping)
def hmget(self, name, keys):
return self._connection.hmget(name, keys)
def hgetall(self, key):
return self._connection.hgetall(key)
def hincrby(self, name, key, amount=1):
return self._connection.hincrby(name, key, amount)
def smembers(self, name):
return self._connection.smembers(name)
@staticmethod
def build_key(prefix, key):
return str(prefix) + ':' + str(key)
| crowdresearch/daemo | crowdsourcing/redis.py | Python | mit | 1,780 |
from .__about__ import * # noqa: F401,F403
| TeamSPoon/logicmoo_workspace | packs_web/butterfly/lib/python3.7/site-packages/devcore/__init__.py | Python | mit | 44 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class TimelineImporter(object):
"""Reads TraceData and populates timeline model with what it finds."""
def __init__(self, model, trace_data, import_order):
self._model = model
self._trace_data = trace_data
self.import_order = import_order
@staticmethod
def GetSupportedPart():
raise NotImplementedError
def ImportEvents(self):
"""Processes the event data in the wrapper and creates and adds
new timeline events to the model"""
raise NotImplementedError
def FinalizeImport(self):
"""Called after all other importers for the model are run."""
raise NotImplementedError
| M4sse/chromium.src | tools/telemetry/telemetry/timeline/importer.py | Python | bsd-3-clause | 781 |
import colander
from celery.utils.log import get_task_logger
from script_wrapper.models import getGPSCount
from script_wrapper.tasks import RTask
from script_wrapper.validation import validateRange
from script_wrapper.validation import iso8601Validator
logger = get_task_logger(__name__)
class Schema(colander.MappingSchema):
start = colander.SchemaNode(colander.String(), validator=iso8601Validator)
end = colander.SchemaNode(colander.String(), validator=iso8601Validator)
tracker_id = colander.SchemaNode(colander.Int())
class ExampleR(RTask):
name = 'exampler'
label = 'Example in R'
title = 'Title of example in R'
script = 'dbq.r'
autoregister = False
def run(self, db_url, tracker_id, start, end):
u = self.local_db_url(db_url)
trackersInR = self.toIntVector([tracker_id])
self.r.exampler(u.username, u.password, u.database, u.host,
trackersInR, start, end, self.output_dir())
return {'query': {'start': start,
'end': end,
'tracker_id': tracker_id,
}
}
def formfields2taskargs(self, fields, db_url):
schema = Schema()
taskargs = schema.deserialize(fields)
start = taskargs['start']
end = taskargs['end']
tracker_id = taskargs['tracker_id']
validateRange(getGPSCount(db_url, tracker_id, start, end), 0, self.MAX_FIX_COUNT)
taskargs['db_url'] = db_url
return taskargs
| NLeSC/eEcology-script-wrapper | script_wrapper/tasks/example_r/__init__.py | Python | apache-2.0 | 1,537 |
# Generated by Django 2.2.24 on 2021-08-27 17:56
from django.db import migrations, models
def force_unique_title(apps, schema_editor):
# I've verified there are no duplicates in Stage and Prod.
# This was brought up as a thing devs might need for their local devstack.
EnterpriseCatalogQuery = apps.get_model('enterprise', 'EnterpriseCatalogQuery')
duplicate_queries = EnterpriseCatalogQuery.objects.raw(
'SELECT cq1.title, cq1.id from enterprise_enterprisecatalogquery cq1 '
'JOIN (select count(*), title from enterprise_enterprisecatalogquery GROUP BY title having count(*) > 1) cq2 '
'ON cq1.title=cq2.title'
)
for query in duplicate_queries:
query.title = f'{query.title}{query.id}'
query.save(update_fields=['title'])
class Migration(migrations.Migration):
dependencies = [
('enterprise', '0140_update_enrollment_sources'),
]
operations = [
migrations.RunPython(
code=force_unique_title,
reverse_code=migrations.RunPython.noop
),
migrations.AlterField(
model_name='enterprisecatalogquery',
name='title',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
]
| edx/edx-enterprise | enterprise/migrations/0141_make_enterprisecatalogquery_title_unique.py | Python | agpl-3.0 | 1,274 |
"""
pygments.lexers.r
~~~~~~~~~~~~~~~~~
Lexers for the R/S languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer']
line_re = re.compile('.*?\n')
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
yield from do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block))
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
yield from do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block))
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
.. versionadded:: 0.10
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
valid_name = r'`[^`\\]*(?:\\.[^`\\]*)*`|(?:[a-zA-Z]|\.[A-Za-z_.])[\w.]*|\.'
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(valid_name, Name),
],
'punctuation': [
(r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
],
'keywords': [
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![\w.])',
Keyword.Reserved),
],
'operators': [
(r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
(r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator),
],
'builtin_symbols': [
(r'(NULL|NA(_(integer|real|complex|character)_)?|'
r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))'
r'(?![\w.])',
Keyword.Constant),
(r'(T|F)\b', Name.Builtin.Pseudo),
],
'numbers': [
# hex number
(r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
# decimal number
(r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?',
Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('valid_name'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
],
'root': [
# calls:
(r'(%s)\s*(?=\()' % valid_name, Name.Function),
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
# (r'\{', Punctuation, 'block'),
(r'.', Text),
],
# 'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
# ],
'string_squote': [
(r'([^\'\\]|\\.)*\'', String, '#pop'),
],
'string_dquote': [
(r'([^"\\]|\\.)*"', String, '#pop'),
],
}
def analyse_text(text):
if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
return 0.11
class RdLexer(RegexLexer):
"""
Pygments Lexer for R documentation (Rd) files
This is a very minimal implementation, highlighting little more
than the macros. A description of Rd syntax is found in `Writing R
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <http://developer.r-project.org/parseRd.pdf>`_.
.. versionadded:: 1.6
"""
name = 'Rd'
aliases = ['rd']
filenames = ['*.Rd']
mimetypes = ['text/x-r-doc']
# To account for verbatim / LaTeX-like / and R-like areas
# would require parsing.
tokens = {
'root': [
# catch escaped brackets and percent sign
(r'\\[\\{}%]', String.Escape),
# comments
(r'%.*$', Comment),
# special macros with no arguments
(r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
# macros
(r'\\[a-zA-Z]+\b', Keyword),
# special preprocessor macros
(r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
# non-escaped brackets
(r'[{}]', Name.Builtin),
# everything else
(r'[^\\%\n{}]+', Text),
(r'.', Text),
]
}
| dscorbett/pygments | pygments/lexers/r.py | Python | bsd-2-clause | 6,167 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.