code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import datetime
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _, ugettext as __
from django.contrib.auth.models import User
class UnableToActivateWorkflow(Exception):
"""
To be raised if unable to activate the workflow because it did not pass the
validation steps
"""
class UnableToCloneWorkflow(Exception):
"""
To be raised if unable to clone a workflow model (and related models)
"""
class UnableToStartWorkflow(Exception):
"""
To be raised if a WorkflowActivity is unable to start a workflow
"""
class UnableToProgressWorkflow(Exception):
"""
To be raised if the WorkflowActivity is unable to progress a workflow with a
particular transition.
"""
class UnableToLogWorkflowEvent(Exception):
"""
To be raised if the WorkflowActivity is unable to log an event in the
WorkflowHistory
"""
class UnableToAddCommentToWorkflow(Exception):
"""
To be raised if the WorkflowActivity is unable to log a comment in the
WorkflowHistory
"""
class UnableToDisableParticipant(Exception):
"""
To be raised if the WorkflowActivity is unable to disable a participant
"""
class UnableToEnableParticipant(Exception):
"""
To be raised if the WorkflowActivity is unable to enable a participant
"""
class Role(models.Model):
"""
Represents a type of user who can be associated with a workflow. Used by
the State and Transition models to define *who* has permission to view a
state or use a transition. The Event model uses this model to reference
*who* should be involved in a particular event.
"""
name = models.CharField(
_('Name of Role'),
max_length=64
)
description = models.TextField(
_('Description'),
blank=True
)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name',]
verbose_name = _('Role')
verbose_name_plural = _('Roles')
permissions = (
('can_define_roles', __('Can define roles')),
)
class Workflow(models.Model):
"""
Instances of this class represent a named workflow that achieve a particular
aim through a series of related states / transitions. A name for a directed
graph.
"""
# A workflow can be in one of three states:
#
# * definition: you're building the thing to meet whatever requirements you
# have
#
# * active: you're using the defined workflow in relation to things in your
# application - the workflow definition is frozen from this point on.
#
# * retired: you no longer use the workflow (but we keep it so it can be
# cloned as the basis of new workflows starting in the definition state)
#
# Why do this? Imagine the mess that could be created if a "live" workflow
# was edited and states were deleted or orphaned. These states at least
# allow us to check things don't go horribly wrong. :-/
DEFINITION = 0
ACTIVE = 1
RETIRED = 2
STATUS_CHOICE_LIST = (
(DEFINITION, _('In definition')),
(ACTIVE, _('Active')),
(RETIRED, _('Retired')),
)
name = models.CharField(
_('Workflow Name'),
max_length=128
)
slug = models.SlugField(
_('Slug')
)
description = models.TextField(
_('Description'),
blank=True
)
status = models.IntegerField(
_('Status'),
choices=STATUS_CHOICE_LIST,
default = DEFINITION
)
# These next fields are helpful for tracking the history and devlopment of a
# workflow should it have been cloned
created_on = models.DateTimeField(
auto_now_add=True
)
created_by = models.ForeignKey(
User
)
cloned_from = models.ForeignKey(
'self',
null=True
)
# To hold error messages created in the validate method
errors = {
'workflow':[],
'states': {},
'transitions':{},
}
def is_valid(self):
"""
Checks that the directed graph doesn't contain any orphaned nodes (is
connected), any cul-de-sac nodes (non-end nodes with no exit
transition), has compatible roles for transitions and states and
contains exactly one start node and at least one end state.
Any errors are logged in the errors dictionary.
Returns a boolean
"""
self.errors = {
'workflow':[],
'states': {},
'transitions':{},
}
valid = True
# The graph must have only one start node
if self.states.filter(is_start_state=True).count() != 1:
self.errors['workflow'].append(__('There must be only one start'\
' state'))
valid = False
# The graph must have at least one end state
if self.states.filter(is_end_state=True).count() < 1:
self.errors['workflow'].append(__('There must be at least one end'\
' state'))
valid = False
# Check for orphan nodes / cul-de-sac nodes
all_states = self.states.all()
for state in all_states:
if state.transitions_into.all().count() == 0 and state.is_start_state == False:
if not state.id in self.errors['states']:
self.errors['states'][state.id] = list()
self.errors['states'][state.id].append(__('This state is'\
' orphaned. There is no way to get to it given the'\
' current workflow topology.'))
valid = False
if state.transitions_from.all().count() == 0 and state.is_end_state == False:
if not state.id in self.errors['states']:
self.errors['states'][state.id] = list()
self.errors['states'][state.id].append(__('This state is a'\
' dead end. It is not marked as an end state and there'\
' is no way to exit from it.'))
valid = False
# Check the role collections are compatible between states and
# transitions (i.e. there cannot be any transitions that are only
# available to participants with roles that are not also roles
# associated with the parent state).
for state in all_states:
# *at least* one role from the state must also be associated
# with each transition where the state is the from_state
state_roles = state.roles.all()
for transition in state.transitions_from.all():
if not transition.roles.filter(pk__in=[r.id for r in state_roles]):
if not transition.id in self.errors['transitions']:
self.errors['transitions'][transition.id] = list()
self.errors['transitions'][transition.id].append(__('This'\
' transition is not navigable because none of the'\
' roles associated with the parent state have'\
' permission to use it.'))
valid = False
return valid
def has_errors(self, thing):
"""
Utility method to quickly get a list of errors associated with the
"thing" passed to it (either a state or transition)
"""
if isinstance(thing, State):
if thing.id in self.errors['states']:
return self.errors['states'][thing.id]
else:
return []
elif isinstance(thing, Transition):
if thing.id in self.errors['transitions']:
return self.errors['transitions'][thing.id]
else:
return []
else:
return []
def activate(self):
"""
Puts the workflow in the "active" state after checking the directed
graph doesn't contain any orphaned nodes (is connected), is in
DEFINITION state, has compatible roles for transitions and states and
contains exactly one start state and at least one end state
"""
# Only workflows in definition state can be activated
if not self.status == self.DEFINITION:
raise UnableToActivateWorkflow, __('Only workflows in the'\
' "definition" state may be activated')
if not self.is_valid():
raise UnableToActivateWorkflow, __("Cannot activate as the"\
" workflow doesn't validate.")
# Good to go...
self.status = self.ACTIVE
self.save()
def retire(self):
"""
Retires the workflow so it can no-longer be used with new
WorkflowActivity models
"""
self.status = self.RETIRED
self.save()
def clone(self, user):
"""
Returns a clone of the workflow. The clone will be in the DEFINITION
state whereas the source workflow *must* be ACTIVE or RETIRED (so we
know it *must* be valid).
"""
# TODO: A target for refactoring so calling this method doesn't hit the
# database so hard. Would welcome ideas..?
if self.status >= self.ACTIVE:
# Clone this workflow
clone_workflow = Workflow()
clone_workflow.name = self.name
clone_workflow.slug = self.slug+'_clone'
clone_workflow.description = self.description
clone_workflow.status = self.DEFINITION
clone_workflow.created_by = user
clone_workflow.cloned_from = self
clone_workflow.save()
# Clone the states
state_dict = dict() # key = old pk of state, val = new clone state
for s in self.states.all():
clone_state = State()
clone_state.name = s.name
clone_state.description = s.description
clone_state.is_start_state = s.is_start_state
clone_state.is_end_state = s.is_end_state
clone_state.workflow = clone_workflow
clone_state.estimation_value = s.estimation_value
clone_state.estimation_unit = s.estimation_unit
clone_state.save()
for r in s.roles.all():
clone_state.roles.add(r)
state_dict[s.id] = clone_state
# Clone the transitions
for tr in self.transitions.all():
clone_trans = Transition()
clone_trans.name = tr.name
clone_trans.workflow = clone_workflow
clone_trans.from_state = state_dict[tr.from_state.id]
clone_trans.to_state = state_dict[tr.to_state.id]
clone_trans.save()
for r in tr.roles.all():
clone_trans.roles.add(r)
# Clone the events
for ev in self.events.all():
clone_event = Event()
clone_event.name = ev.name
clone_event.description = ev.description
clone_event.workflow = clone_workflow
clone_event.state = state_dict[ev.state.id]
clone_event.is_mandatory = ev.is_mandatory
clone_event.save()
for r in ev.roles.all():
clone_event.roles.add(r)
return clone_workflow
else:
raise UnableToCloneWorkflow, __('Only active or retired workflows'\
' may be cloned')
def __unicode__(self):
return self.name
class Meta:
ordering = ['status', 'name']
verbose_name = _('Workflow')
verbose_name_plural = _('Workflows')
permissions = (
('can_manage_workflows', __('Can manage workflows')),
)
class State(models.Model):
"""
Represents a specific state that a thing can be in during its progress
through a workflow. A node in a directed graph.
"""
# Constant values to denote a period of time in seconds
SECOND = 1
MINUTE = 60
HOUR = 3600
DAY = 86400
WEEK = 604800
DURATIONS = (
(SECOND, _('Second(s)')),
(MINUTE, _('Minute(s)')),
(HOUR, _('Hour(s)')),
(DAY, _('Day(s)')),
(WEEK, _('Week(s)')),
)
name = models.CharField(
_('Name'),
max_length=256
)
description = models.TextField(
_('Description'),
blank=True
)
is_start_state = models.BooleanField(
_('Is the start state?'),
help_text=_('There can only be one start state for a workflow'),
default=False
)
is_end_state = models.BooleanField(
_('Is an end state?'),
help_text=_('An end state shows that the workflow is complete'),
default=False
)
workflow = models.ForeignKey(
Workflow,
related_name='states')
# The roles defined here define *who* has permission to view the item in
# this state.
roles = models.ManyToManyField(
Role,
blank=True
)
# The following two fields allow a specification of expected duration to be
# associated with a state. The estimation_value field stores the amount of
# time, whilst estimation_unit stores the unit of time estimation_value is
# in. For example, estimation_value=5, estimation_unit=DAY means something
# is expected to be in this state for 5 days. By doing estimation_value *
# estimation_unit we can get the number of seconds to pass into a timedelta
# to discover when the deadline for a state is.
estimation_value = models.IntegerField(
_('Estimated time (value)'),
default=0,
help_text=_('Use whole numbers')
)
estimation_unit = models.IntegerField(
_('Estimation unit of time'),
default=DAY,
choices = DURATIONS
)
def deadline(self):
"""
Will return the expected deadline (or None) for this state calculated
from datetime.today()
"""
if self.estimation_value > 0:
duration = datetime.timedelta(
seconds=(self.estimation_value*self.estimation_unit)
)
return (self._today()+duration)
else:
return None
def _today(self):
"""
To help with the unit tests
"""
return datetime.datetime.today()
def __unicode__(self):
return self.name
class Meta:
ordering = ['-is_start_state','is_end_state']
verbose_name = _('State')
verbose_name_plural = _('States')
class Transition(models.Model):
"""
Represents how a workflow can move between different states. An edge
between state "nodes" in a directed graph.
"""
name = models.CharField(
_('Name of transition'),
max_length=128,
help_text=_('Use an "active" verb. e.g. "Close Issue", "Open'\
' Vacancy" or "Start Interviews"')
)
# This field is the result of denormalization to help with the Workflow
# class's clone() method.
workflow = models.ForeignKey(
Workflow,
related_name = 'transitions'
)
from_state = models.ForeignKey(
State,
related_name = 'transitions_from'
)
to_state = models.ForeignKey(
State,
related_name = 'transitions_into'
)
# The roles referenced here define *who* has permission to use this
# transition to move between states.
roles = models.ManyToManyField(
Role,
blank=True
)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('Transition')
verbose_name_plural = _('Transitions')
class EventType(models.Model):
"""
Defines the types of event that can be associated with a workflow. Examples
might include: meeting, deadline, review, assessment etc...
"""
name = models.CharField(
_('Event Type Name'),
max_length=256
)
description = models.TextField(
_('Description'),
blank=True
)
def __unicode__(self):
return self.name
class Event(models.Model):
"""
A definition of something that is supposed to happen when in a particular
state.
"""
name = models.CharField(
_('Event summary'),
max_length=256
)
description = models.TextField(
_('Description'),
blank=True
)
# The workflow field is the result of denormalization to help with the
# Workflow class's clone() method.
# Also, workflow and state can be nullable so an event can be treated as
# "generic" for all workflows / states in the database.
workflow = models.ForeignKey(
Workflow,
related_name='events',
null=True,
blank=True
)
state = models.ForeignKey(
State,
related_name='events',
null=True,
blank=True
)
# The roles referenced here indicate *who* is supposed to be a part of the
# event
roles = models.ManyToManyField(Role)
# The event types referenced here help define what sort of event this is.
# For example, a meeting and review (an event might be of more than one
# type)
event_types = models.ManyToManyField(EventType)
# If this field is true then the workflow cannot progress beyond the related
# state without it first appearing in the workflow history
is_mandatory = models.BooleanField(
_('Mandatory event'),
default=False,
help_text=_('This event must be marked as complete before moving'\
' out of the associated state.')
)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('Event')
verbose_name_plural = _('Events')
class WorkflowActivity(models.Model):
"""
Other models in a project reference this model so they become associated
with a particular workflow.
The WorkflowActivity object also contains *all* the methods required to
start, progress and stop a workflow.
"""
workflow = models.ForeignKey(Workflow)
created_by = models.ForeignKey(User)
created_on = models.DateTimeField(auto_now_add=True)
completed_on = models.DateTimeField(
null=True,
blank=True
)
def current_state(self):
"""
Returns the instance of the WorkflowHistory model that represents the
current state this WorkflowActivity is in.
"""
if self.history.all():
return self.history.all()[0]
else:
return None
def start(self, user):
"""
Starts a WorkflowActivity by putting it into the start state of the
workflow defined in the "workflow" field after validating the workflow
activity is in a state appropriate for "starting"
"""
participant = Participant.objects.get(workflowactivity=self, user=user,
disabled=False)
start_state_result = State.objects.filter(
workflow=self.workflow,
is_start_state=True
)
# Validation...
# 1. The workflow activity isn't already started
if self.current_state():
if self.current_state().state:
raise UnableToStartWorkflow, __('Already started')
# 2. The workflow activity hasn't been force_stopped before being
# started
if self.completed_on:
raise UnableToStartWorkflow, __('Already completed')
# 3. There is exactly one start state
if not len(start_state_result) == 1:
raise UnableToStartWorkflow, __('Cannot find single start state')
# Good to go...
first_step = WorkflowHistory(
workflowactivity=self,
state=start_state_result[0],
log_type=WorkflowHistory.TRANSITION,
participant=participant,
note=__('Started workflow'),
deadline=start_state_result[0].deadline()
)
first_step.save()
return first_step
def progress(self, transition, user, note=''):
"""
Attempts to progress a workflow activity with the specified transition
as requested by the specified participant.
The transition is validated (to make sure it is a legal "move" in the
directed graph) and the method returns the new WorkflowHistory state or
raises an UnableToProgressWorkflow exception.
"""
participant = Participant.objects.get(workflowactivity=self, user=user,
disabled=False)
# Validate the transition
current_state = self.current_state()
# 1. Make sure the workflow activity is started
if not current_state:
raise UnableToProgressWorkflow, __('Start the workflow before'\
' attempting to transition')
# 2. Make sure it's parent is the current state
if not transition.from_state == current_state.state:
raise UnableToProgressWorkflow, __('Transition not valid (wrong'\
' parent)')
# 3. Make sure all mandatory events for the current state are found in
# the WorkflowHistory
mandatory_events = current_state.state.events.filter(is_mandatory=True)
for me in mandatory_events:
if not me.history.filter(workflowactivity=self):
raise UnableToProgressWorkflow, __('Transition not valid'\
' (mandatory event missing)')
# 4. Make sure the user has the appropriate role to allow them to make
# the transition
if not transition.roles.filter(pk__in=[role.id for role in participant.roles.all()]):
raise UnableToProgressWorkflow, __('Participant has insufficient'\
' authority to use the specified transition')
# The "progress" request has been validated to store the transition into
# the appropriate WorkflowHistory record and if it is an end state then
# update this WorkflowActivity's record with the appropriate timestamp
if not note:
note = transition.name
wh = WorkflowHistory(
workflowactivity=self,
state=transition.to_state,
log_type=WorkflowHistory.TRANSITION,
transition=transition,
participant=participant,
note=note,
deadline=transition.to_state.deadline()
)
wh.save()
# If we're at the end then mark the workflow activity as completed on
# today
if transition.to_state.is_end_state:
self.completed_on = datetime.datetime.today()
self.save()
return wh
def log_event(self, event, user, note=''):
"""
Logs the occurance of an event in the WorkflowHistory of a
WorkflowActivity and returns the resulting record.
If the event is associated with a workflow or state then this method
validates that the event is associated with the workflow, that the
participant logging the event is also one of the event participants and
if the event is mandatory then it must be done whilst in the
appropriate state.
"""
participant = Participant.objects.get(workflowactivity=self, user=user,
disabled=False)
current_state = self.current_state()
if event.workflow:
# Make sure we have an event for the right workflow
if not event.workflow == self.workflow:
raise UnableToLogWorkflowEvent, __('The event is not associated'\
' with the workflow for the WorkflowActivity')
if event.state:
# If the event is mandatory then it must be completed whilst in
# the associated state
if event.is_mandatory:
if not event.state == current_state.state:
raise UnableToLogWorkflowEvent, __('The mandatory'\
' event is not associated with the current'\
' state')
if event.roles.all():
# Make sure the participant is associated with the event
if not event.roles.filter(pk__in=[p.id for p in participant.roles.all()]):
raise UnableToLogWorkflowEvent, __('The participant is not'\
' associated with the specified event')
if not note:
note=event.name
# Good to go...
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.EVENT,
event=event,
participant=participant,
note=note,
deadline=deadline
)
wh.save()
return wh
def add_comment(self, user, note):
"""
In many sorts of workflow it is necessary to add a comment about
something at a particular state in a WorkflowActivity.
"""
if not note:
raise UnableToAddCommentToWorkflow, __('Cannot add an empty comment')
p, created = Participant.objects.get_or_create(workflowactivity=self,
user=user)
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.COMMENT,
participant=p,
note=note,
deadline=deadline
)
wh.save()
return wh
def assign_role(self, user, assignee, role):
"""
Assigns the role to the assignee for this instance of a workflow
activity. The arg 'user' logs who made the assignment
"""
p_as_user = Participant.objects.get(workflowactivity=self, user=user,
disabled=False)
p_as_assignee, created = Participant.objects.get_or_create(
workflowactivity=self,
user=assignee)
p_as_assignee.roles.add(role)
name = assignee.get_full_name() if assignee.get_full_name() else assignee.username
note = _('Role "%s" assigned to %s')%(role.__unicode__(), name)
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.ROLE,
participant=p_as_user,
note=note,
deadline=deadline
)
wh.save()
role_assigned.send(sender=wh)
return wh
def remove_role(self, user, assignee, role):
"""
Removes the role from the assignee. The 'user' argument is used for
logging purposes.
"""
try:
p_as_user = Participant.objects.get(workflowactivity=self,
user=user, disabled=False)
p_as_assignee = Participant.objects.get(workflowactivity=self,
user=assignee)
if role in p_as_assignee.roles.all():
p_as_assignee.roles.remove(role)
name = assignee.get_full_name() if assignee.get_full_name() else assignee.username
note = _('Role "%s" removed from %s')%(role.__unicode__(), name)
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.ROLE,
participant=p_as_user,
note=note,
deadline=deadline
)
wh.save()
role_removed.send(sender=wh)
return wh
else:
# The role isn't associated with the assignee anyway so there is
# nothing to do
return None
except ObjectDoesNotExist:
# If we can't find the assignee as a participant then there is
# nothing to do
return None
def clear_roles(self, user, assignee):
"""
Clears all the roles from assignee. The 'user' argument is used for
logging purposes.
"""
try:
p_as_user = Participant.objects.get(workflowactivity=self,
user=user, disabled=False)
p_as_assignee = Participant.objects.get(workflowactivity=self,
user=assignee)
p_as_assignee.roles.clear()
name = assignee.get_full_name() if assignee.get_full_name() else assignee.username
note = _('All roles removed from %s')%name
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.ROLE,
participant=p_as_user,
note=note,
deadline=deadline
)
wh.save()
role_removed.send(sender=wh)
return wh
except ObjectDoesNotExist:
# If we can't find the assignee then there is nothing to do
pass
def disable_participant(self, user, user_to_disable, note):
"""
Mark the user_to_disable as disabled. Must include a note explaining
reasons for this action. Also the 'user' arg is used for logging who
carried this out
"""
if not note:
raise UnableToDisableParticipant, __('Must supply a reason for'\
' disabling a participant. None given.')
try:
p_as_user = Participant.objects.get(workflowactivity=self,
user=user, disabled=False)
p_to_disable = Participant.objects.get(workflowactivity=self,
user=user_to_disable)
if not p_to_disable.disabled:
p_to_disable.disabled = True
p_to_disable.save()
name = user_to_disable.get_full_name() if user_to_disable.get_full_name() else user_to_disable.username
note = _('Participant %s disabled with the reason: %s')%(name, note)
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.ROLE,
participant=p_as_user,
note=note,
deadline=deadline
)
wh.save()
return wh
else:
# They're already disabled
return None
except ObjectDoesNotExist:
# If we can't find the assignee then there is nothing to do
return None
def enable_participant(self, user, user_to_enable, note):
"""
Mark the user_to_enable as enabled. Must include a note explaining
reasons for this action. Also the 'user' arg is used for logging who
carried this out
"""
if not note:
raise UnableToEnableParticipant, __('Must supply a reason for'\
' enabling a disabled participant. None given.')
try:
p_as_user = Participant.objects.get(workflowactivity=self,
user=user, disabled=False)
p_to_enable = Participant.objects.get(workflowactivity=self,
user=user_to_enable)
if p_to_enable.disabled:
p_to_enable.disabled = False
p_to_enable.save()
name = user_to_enable.get_full_name() if user_to_enable.get_full_name() else user_to_enable.username
note = _('Participant %s enabled with the reason: %s')%(name,
note)
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.ROLE,
participant=p_as_user,
note=note,
deadline=deadline
)
wh.save()
return wh
else:
# The participant is already enabled
return None
except ObjectDoesNotExist:
# If we can't find the participant then there is nothing to do
return None
def force_stop(self, user, reason):
"""
Should a WorkflowActivity need to be abandoned this method cleanly logs
the event and puts the WorkflowActivity in the appropriate state (with
reason provided by participant).
"""
# Lets try to create an appropriate entry in the WorkflowHistory table
current_state = self.current_state()
participant = Participant.objects.get(
workflowactivity=self,
user=user)
if current_state:
final_step = WorkflowHistory(
workflowactivity=self,
state=current_state.state,
log_type=WorkflowHistory.TRANSITION,
participant=participant,
note=__('Workflow forced to stop! Reason given: %s') % reason,
deadline=None
)
final_step.save()
self.completed_on = datetime.datetime.today()
self.save()
class Meta:
ordering = ['-completed_on', '-created_on']
verbose_name = _('Workflow Activity')
verbose_name_plural = _('Workflow Activites')
permissions = (
('can_start_workflow',__('Can start a workflow')),
('can_assign_roles',__('Can assign roles'))
)
class Participant(models.Model):
"""
Defines which users have what roles in a particular run of a workflow
"""
user = models.ForeignKey(User)
# can be nullable because a participant *might* not have a role assigned to
# them (yet), and is many-to-many as they might have many different roles.
roles = models.ManyToManyField(
Role,
null=True)
workflowactivity= models.ForeignKey(
WorkflowActivity,
related_name='participants'
)
disabled = models.BooleanField(default=False)
def __unicode__(self):
name = self.user.get_full_name() if self.user.get_full_name() else self.user.username
if self.roles.all():
roles = u' - ' + u', '.join([r.__unicode__() for r in self.roles.all()])
else:
roles = ''
disabled = _(' (disabled)') if self.disabled else ''
return u"%s%s%s"%(name, roles, disabled)
class Meta:
ordering = ['-disabled', 'workflowactivity', 'user',]
verbose_name = _('Participant')
verbose_name_plural = _('Participants')
unique_together = ('user', 'workflowactivity')
class WorkflowHistory(models.Model):
"""
Records what has happened and when in a particular run of a workflow. The
latest record for the referenced WorkflowActivity will indicate the current
state.
"""
# The sort of things we can log in the workflow history
TRANSITION = 1
EVENT = 2
ROLE = 3
COMMENT = 4
# Used to indicate what sort of thing we're logging in the workflow history
TYPE_CHOICE_LIST = (
(TRANSITION, _('Transition')),
(EVENT, _('Event')),
(ROLE, _('Role')),
(COMMENT, _('Comment')),
)
workflowactivity= models.ForeignKey(
WorkflowActivity,
related_name='history')
log_type = models.IntegerField(
help_text=_('The sort of thing being logged'),
choices=TYPE_CHOICE_LIST
)
state = models.ForeignKey(
State,
help_text=_('The state at this point in the workflow history'),
null=True
)
transition = models.ForeignKey(
Transition,
null=True,
related_name='history',
help_text=_('The transition relating to this happening in the'\
' workflow history')
)
event = models.ForeignKey(
Event,
null=True,
related_name='history',
help_text=_('The event relating to this happening in the workflow'\
' history')
)
participant = models.ForeignKey(
Participant,
help_text=_('The participant who triggered this happening in the'\
' workflow history')
)
created_on = models.DateTimeField(auto_now_add=True)
note = models.TextField(
_('Note'),
blank=True
)
deadline = models.DateTimeField(
_('Deadline'),
null=True,
blank=True,
help_text=_('The deadline for staying in this state')
)
def save(self):
workflow_pre_change.send(sender=self)
super(WorkflowHistory, self).save()
workflow_post_change.send(sender=self)
if self.log_type==self.TRANSITION:
workflow_transitioned.send(sender=self)
if self.log_type==self.EVENT:
workflow_event_completed.send(sender=self)
if self.log_type==self.COMMENT:
workflow_commented.send(sender=self)
if self.state:
if self.state.is_start_state:
workflow_started.send(sender=self.workflowactivity)
elif self.state.is_end_state:
workflow_ended.send(sender=self.workflowactivity)
def __unicode__(self):
return u"%s created by %s"%(self.note, self.participant.__unicode__())
class Meta:
ordering = ['-created_on']
verbose_name = _('Workflow History')
verbose_name_plural = _('Workflow Histories')
|
indexofire/gravoicy
|
gravoicy/libs/workflow/models.py
|
Python
|
bsd-3-clause
| 40,120
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_delete_request(
scope: str,
policy_exemption_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyExemptions/{policyExemptionName}')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
"policyExemptionName": _SERIALIZER.url("policy_exemption_name", policy_exemption_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
scope: str,
policy_exemption_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyExemptions/{policyExemptionName}')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
"policyExemptionName": _SERIALIZER.url("policy_exemption_name", policy_exemption_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
scope: str,
policy_exemption_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyExemptions/{policyExemptionName}')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
"policyExemptionName": _SERIALIZER.url("policy_exemption_name", policy_exemption_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyExemptions')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_for_resource_group_request(
subscription_id: str,
resource_group_name: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyExemptions')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_for_resource_request(
subscription_id: str,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyExemptions')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"parentResourcePath": _SERIALIZER.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
"resourceType": _SERIALIZER.url("resource_type", resource_type, 'str', skip_quote=True),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_for_management_group_request(
management_group_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyExemptions')
path_format_arguments = {
"managementGroupId": _SERIALIZER.url("management_group_id", management_group_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PolicyExemptionsOperations(object):
"""PolicyExemptionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.policy.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def delete(
self,
scope: str,
policy_exemption_name: str,
**kwargs: Any
) -> None:
"""Deletes a policy exemption.
This operation deletes a policy exemption, given its name and the scope it was created in. The
scope of a policy exemption is the part of its ID preceding
'/providers/Microsoft.Authorization/policyExemptions/{policyExemptionName}'.
:param scope: The scope of the policy exemption. Valid scopes are: management group (format:
'/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
'/subscriptions/{subscriptionId}'), resource group (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
:type scope: str
:param policy_exemption_name: The name of the policy exemption to delete.
:type policy_exemption_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
scope=scope,
policy_exemption_name=policy_exemption_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyExemptions/{policyExemptionName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
scope: str,
policy_exemption_name: str,
parameters: "_models.PolicyExemption",
**kwargs: Any
) -> "_models.PolicyExemption":
"""Creates or updates a policy exemption.
This operation creates or updates a policy exemption with the given scope and name. Policy
exemptions apply to all resources contained within their scope. For example, when you create a
policy exemption at resource group scope for a policy assignment at the same or above level,
the exemption exempts to all applicable resources in the resource group.
:param scope: The scope of the policy exemption. Valid scopes are: management group (format:
'/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
'/subscriptions/{subscriptionId}'), resource group (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
:type scope: str
:param policy_exemption_name: The name of the policy exemption to delete.
:type policy_exemption_name: str
:param parameters: Parameters for the policy exemption.
:type parameters: ~azure.mgmt.resource.policy.v2020_09_01.models.PolicyExemption
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyExemption, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2020_09_01.models.PolicyExemption
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyExemption"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PolicyExemption')
request = build_create_or_update_request(
scope=scope,
policy_exemption_name=policy_exemption_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PolicyExemption', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PolicyExemption', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyExemptions/{policyExemptionName}'} # type: ignore
@distributed_trace
def get(
self,
scope: str,
policy_exemption_name: str,
**kwargs: Any
) -> "_models.PolicyExemption":
"""Retrieves a policy exemption.
This operation retrieves a single policy exemption, given its name and the scope it was created
at.
:param scope: The scope of the policy exemption. Valid scopes are: management group (format:
'/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
'/subscriptions/{subscriptionId}'), resource group (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
:type scope: str
:param policy_exemption_name: The name of the policy exemption to delete.
:type policy_exemption_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyExemption, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2020_09_01.models.PolicyExemption
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyExemption"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
scope=scope,
policy_exemption_name=policy_exemption_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyExemption', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyExemptions/{policyExemptionName}'} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyExemptionListResult"]:
"""Retrieves all policy exemptions that apply to a subscription.
This operation retrieves the list of all policy exemptions associated with the given
subscription that match the optional given $filter. Valid values for $filter are: 'atScope()',
'atExactScope()', 'excludeExpired()' or 'policyAssignmentId eq '{value}''. If $filter is not
provided, the unfiltered list includes all policy exemptions associated with the subscription,
including those that apply directly or from management groups that contain the given
subscription, as well as any applied to objects contained within the subscription.
:param filter: The filter to apply on the operation. Valid values for $filter are: 'atScope()',
'atExactScope()', 'excludeExpired()' or 'policyAssignmentId eq '{value}''. If $filter is not
provided, no filtering is performed. If $filter is not provided, the unfiltered list includes
all policy exemptions associated with the scope, including those that apply directly or apply
from containing scopes. If $filter=atScope() is provided, the returned list only includes all
policy exemptions that apply to the scope, which is everything in the unfiltered list except
those applied to sub scopes contained within the given scope. If $filter=atExactScope() is
provided, the returned list only includes all policy exemptions that at the given scope. If
$filter=excludeExpired() is provided, the returned list only includes all policy exemptions
that either haven't expired or didn't set expiration date. If $filter=policyAssignmentId eq
'{value}' is provided. the returned list only includes all policy exemptions that are
associated with the give policyAssignmentId.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyExemptionListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyExemptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyExemptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyExemptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyExemptions'} # type: ignore
@distributed_trace
def list_for_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyExemptionListResult"]:
"""Retrieves all policy exemptions that apply to a resource group.
This operation retrieves the list of all policy exemptions associated with the given resource
group in the given subscription that match the optional given $filter. Valid values for $filter
are: 'atScope()', 'atExactScope()', 'excludeExpired()' or 'policyAssignmentId eq '{value}''. If
$filter is not provided, the unfiltered list includes all policy exemptions associated with the
resource group, including those that apply directly or apply from containing scopes, as well as
any applied to resources contained within the resource group.
:param resource_group_name: The name of the resource group containing the resource.
:type resource_group_name: str
:param filter: The filter to apply on the operation. Valid values for $filter are: 'atScope()',
'atExactScope()', 'excludeExpired()' or 'policyAssignmentId eq '{value}''. If $filter is not
provided, no filtering is performed. If $filter is not provided, the unfiltered list includes
all policy exemptions associated with the scope, including those that apply directly or apply
from containing scopes. If $filter=atScope() is provided, the returned list only includes all
policy exemptions that apply to the scope, which is everything in the unfiltered list except
those applied to sub scopes contained within the given scope. If $filter=atExactScope() is
provided, the returned list only includes all policy exemptions that at the given scope. If
$filter=excludeExpired() is provided, the returned list only includes all policy exemptions
that either haven't expired or didn't set expiration date. If $filter=policyAssignmentId eq
'{value}' is provided. the returned list only includes all policy exemptions that are
associated with the give policyAssignmentId.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyExemptionListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyExemptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyExemptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_for_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
filter=filter,
template_url=self.list_for_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_for_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyExemptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyExemptions'} # type: ignore
@distributed_trace
def list_for_resource(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyExemptionListResult"]:
"""Retrieves all policy exemptions that apply to a resource.
This operation retrieves the list of all policy exemptions associated with the specified
resource in the given resource group and subscription that match the optional given $filter.
Valid values for $filter are: 'atScope()', 'atExactScope()', 'excludeExpired()' or
'policyAssignmentId eq '{value}''. If $filter is not provided, the unfiltered list includes all
policy exemptions associated with the resource, including those that apply directly or from all
containing scopes, as well as any applied to resources contained within the resource. Three
parameters plus the resource name are used to identify a specific resource. If the resource is
not part of a parent resource (the more common case), the parent resource path should not be
provided (or provided as ''). For example a web app could be specified as
({resourceProviderNamespace} == 'Microsoft.Web', {parentResourcePath} == '', {resourceType} ==
'sites', {resourceName} == 'MyWebApp'). If the resource is part of a parent resource, then all
parameters should be provided. For example a virtual machine DNS name could be specified as
({resourceProviderNamespace} == 'Microsoft.Compute', {parentResourcePath} ==
'virtualMachines/MyVirtualMachine', {resourceType} == 'domainNames', {resourceName} ==
'MyComputerName'). A convenient alternative to providing the namespace and type name separately
is to provide both in the {resourceType} parameter, format: ({resourceProviderNamespace} == '',
{parentResourcePath} == '', {resourceType} == 'Microsoft.Web/sites', {resourceName} ==
'MyWebApp').
:param resource_group_name: The name of the resource group containing the resource.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. For example, the
namespace of a virtual machine is Microsoft.Compute (from Microsoft.Compute/virtualMachines).
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource path. Use empty string if there is none.
:type parent_resource_path: str
:param resource_type: The resource type name. For example the type name of a web app is 'sites'
(from Microsoft.Web/sites).
:type resource_type: str
:param resource_name: The name of the resource.
:type resource_name: str
:param filter: The filter to apply on the operation. Valid values for $filter are: 'atScope()',
'atExactScope()', 'excludeExpired()' or 'policyAssignmentId eq '{value}''. If $filter is not
provided, no filtering is performed. If $filter is not provided, the unfiltered list includes
all policy exemptions associated with the scope, including those that apply directly or apply
from containing scopes. If $filter=atScope() is provided, the returned list only includes all
policy exemptions that apply to the scope, which is everything in the unfiltered list except
those applied to sub scopes contained within the given scope. If $filter=atExactScope() is
provided, the returned list only includes all policy exemptions that at the given scope. If
$filter=excludeExpired() is provided, the returned list only includes all policy exemptions
that either haven't expired or didn't set expiration date. If $filter=policyAssignmentId eq
'{value}' is provided. the returned list only includes all policy exemptions that are
associated with the give policyAssignmentId.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyExemptionListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyExemptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyExemptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_for_resource_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
filter=filter,
template_url=self.list_for_resource.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_for_resource_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyExemptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_for_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyExemptions'} # type: ignore
@distributed_trace
def list_for_management_group(
self,
management_group_id: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyExemptionListResult"]:
"""Retrieves all policy exemptions that apply to a management group.
This operation retrieves the list of all policy exemptions applicable to the management group
that match the given $filter. Valid values for $filter are: 'atScope()', 'atExactScope()',
'excludeExpired()' or 'policyAssignmentId eq '{value}''. If $filter=atScope() is provided, the
returned list includes all policy exemptions that are assigned to the management group or the
management group's ancestors.
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param filter: The filter to apply on the operation. Valid values for $filter are: 'atScope()',
'atExactScope()', 'excludeExpired()' or 'policyAssignmentId eq '{value}''. If $filter is not
provided, no filtering is performed. If $filter is not provided, the unfiltered list includes
all policy exemptions associated with the scope, including those that apply directly or apply
from containing scopes. If $filter=atScope() is provided, the returned list only includes all
policy exemptions that apply to the scope, which is everything in the unfiltered list except
those applied to sub scopes contained within the given scope. If $filter=atExactScope() is
provided, the returned list only includes all policy exemptions that at the given scope. If
$filter=excludeExpired() is provided, the returned list only includes all policy exemptions
that either haven't expired or didn't set expiration date. If $filter=policyAssignmentId eq
'{value}' is provided. the returned list only includes all policy exemptions that are
associated with the give policyAssignmentId.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyExemptionListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyExemptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyExemptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_for_management_group_request(
management_group_id=management_group_id,
filter=filter,
template_url=self.list_for_management_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_for_management_group_request(
management_group_id=management_group_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyExemptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_for_management_group.metadata = {'url': '/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyExemptions'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2020_09_01/operations/_policy_exemptions_operations.py
|
Python
|
mit
| 42,135
|
from random import seed
from mongoengine import connect
import pytest
@pytest.fixture
def User(django_settings):
from regme.documents import User
return User
@pytest.yield_fixture
def db(User):
connect(db='test_regme')
User.drop_collection()
yield
User.drop_collection()
@pytest.fixture
def user_data():
return {
'username': 'username',
'password': 'password',
'email': 'email@example.com'}
@pytest.fixture
def user(db, User, user_data):
seed('')
return User.create_user(**user_data)
@pytest.fixture
def active_user(user):
user.activate(user.activation_key)
return user
def test_user_creation_form(user_data, db):
from regme.forms import UserCreationForm
form = UserCreationForm({
'username': user_data['username'],
'email': user_data['email'],
'password1': user_data['password'],
'password2': user_data['password'],
})
assert bool(form.is_valid())
user = form.save()
assert not user.is_active
def test_user_activation_form(user_data, user):
from regme.forms import UserActivationForm
form = UserActivationForm({
'username': user.username,
'activation_key': user.activation_key,
})
assert bool(form.is_valid())
user = form.save()
assert user.is_active
def test_user_activation_form_unknown_username(user_data, user):
from regme.forms import UserActivationForm
form = UserActivationForm({
'username': user.username + 'test',
'activation_key': user.activation_key,
})
assert not bool(form.is_valid())
assert form.user is None
assert form.save() is None
def test_user_activation_form_empty_username(user_data, user):
from regme.forms import UserActivationForm
form = UserActivationForm({
'username': '',
'activation_key': user.activation_key,
})
assert not bool(form.is_valid())
assert form.user is None
assert form.save() is None
def test_user_activation_form_wrong_activation_key(user_data, user):
from regme.forms import UserActivationForm
form = UserActivationForm({
'username': user.username,
'activation_key': user.activation_key + 'test',
})
assert not bool(form.is_valid())
assert form.user is None
assert form.save() is None
def test_user_activation_form_empty_activation_key(user_data, user):
from regme.forms import UserActivationForm
form = UserActivationForm({
'username': user.username,
'activation_key': '',
})
assert not bool(form.is_valid())
assert form.user is None
assert form.save() is None
def test_user_activation_form_already_activated(user_data, active_user):
user = active_user
from regme.forms import UserActivationForm
form = UserActivationForm({
'username': user.username,
'activation_key': user.activation_key,
})
assert not bool(form.is_valid())
assert form.user is None
assert form.save() is None
def test_password_recovery_form(active_user):
user = active_user
from django.contrib.auth.forms import PasswordResetForm
from django.core import mail
mail.outbox = []
form = PasswordResetForm({'email': user.email})
assert bool(form.is_valid())
form.save(domain_override='localhost')
assert len(mail.outbox) == 1
def test_password_change_form(user_data, active_user):
user = active_user
from django.contrib.auth.forms import PasswordChangeForm
form = PasswordChangeForm(user, {
'old_password': user_data['password'],
'new_password1': 'new_password',
'new_password2': 'new_password',
})
assert bool(form.is_valid())
user = form.save()
assert user.check_password('new_password')
|
lig/regme
|
tests/test_forms.py
|
Python
|
apache-2.0
| 3,782
|
from bson import ObjectId, json_util
from flask import g, request
from flask_restful import Resource
from service.SmsService import SmsService
from foodbeazt.fapp import app, mongo, admin_permission
import logging
class SmsApi(Resource):
def __init__(self):
self.log = logging.getLogger(__name__)
self.smsService = SmsService(mongo.db, app.config['SMS_USER'], app.config['SMS_API_KEY'])
def get(self):
if not admin_permission.can():
return "Unauthorized", 403
tenant_id = g.user.tenant_id
try:
page_no = int(request.args.get('page_no', 1))
page_size = int(request.args.get('page_size', 50))
is_otp = request.args.get('is_otp', None) in ["true", "True", "1"]
items, total = self.smsService.search(tenant_id=tenant_id, page_no=page_no, page_size=page_size, is_otp=is_otp)
offset = page_no*page_size
result = {"items": items, "total": total, "page_no": page_no, "page_size": page_size, "is_otp": is_otp}
url = "/api/sms?page_no=%d&page_size=%d&is_otp=%s"
if total > offset:
result["next"] = url % (page_no+1, page_size, is_otp)
if page_no > 1:
result["previous"] = url % (page_no-1, page_size, is_otp)
return result
except Exception as e:
self.log.exception(e)
return {"status": "error", "message": "Error on searching sms messages"}, 460
def delete(self):
if not admin_permission.can():
return "Unauthorized", 403
tenant_id = g.user.tenant_id
_id = request.args.get('_id', None)
is_otp = request.args.get('is_otp', None) in ["true", "True", "1"]
if _id is None or len(_id) != 24:
return {"status": "error", "message": "Invalid sms message id"}, 460
try:
self.smsService.delete(tenant_id, _id, is_otp)
return None, 204 # No content
except Exception as e:
self.log.exception(e)
return {"status": "error", "message": "Error on deleting sms message"}, 460
|
cackharot/fbeazt
|
src/foodbeazt/resources/sms.py
|
Python
|
apache-2.0
| 2,134
|
from pmg.models import db
from tests.fixtures import *
class UserData(DataSet):
class admin_user:
email = "admin"
name = "Admin"
password = "admin"
active = True
roles = [RoleData.admin, RoleData.editor]
confirmed = True
if __name__ == "__main__":
db.create_all()
db_fixture = dbfixture.data(
HouseData,
MinisterData,
CommitteeData,
CommitteeMeetingData,
BillTypeData,
BillStatusData,
BillData,
CallForCommentData,
TabledCommitteeReportData,
PartyData,
ProvinceData,
MemberData,
CommitteeQuestionData,
EventData,
FeaturedData,
PageData,
PostData,
RoleData,
UserData,
MembershipTypeData,
MembershipData,
)
db_fixture.setup()
|
Code4SA/pmg-cms-2
|
setup_dev_database.py
|
Python
|
apache-2.0
| 864
|
#!/usr/bin/python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2009 Didier Roche
#
# This file is part of Quickly ubuntu-application template
#
#This program is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License version 3, as published
#by the Free Software Foundation.
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranties of
#MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
#PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along
#with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import webbrowser
from quickly import templatetools, commands
import license
import gettext
from gettext import gettext as _
gettext.textdomain('quickly')
from quickly import configurationhandler
from internal import quicklyutils, packaging
from quickly import launchpadaccess
launchpad = None
ppa_name = None
for_extras = False
i = 0
args = []
argv = sys.argv
options = ["--ppa", "--extras",]
def usage():
templatetools.print_usage(_('quickly share [--extras] [--ppa <ppa | group/ppa>]'))
def help():
print _("""Updates your PPA with the the latest saved project changes.
Before running 'quickly share', you should: create your account
on http://launchpad.net.
You also have to add a PPA to your launchpad account.
Name, email, and version will be automatically changed in setup.py.
The new version number will be 'CURRENT.VERSION-publicX', where X will
be incremented each time you share.
For example, if you most recently released 10.07.2 and you have shared
the package three times since then, another run of 'quickly share' will
use a new version of 10.07.2-public4.
Passing --extras will create a package similar to one created by
the "quickly package --extras" command. It will install files into /opt.
You can optionally run 'quickly package' and test your package to make
sure it installs as expected.""")
def shell_completion(argv):
''' Complete --args '''
# option completion
rv = []
if argv[-1].startswith("-"):
rv = options
elif len(argv) > 1 and argv[-2] == '--ppa': # if argument following --ppa, complete by ppa
rv = packaging.shell_complete_ppa(argv[-1])
if rv:
rv.sort()
print ' '.join(rv)
templatetools.handle_additional_parameters(sys.argv, help, shell_completion, usage=usage)
while i < len(argv):
arg = argv[i]
if arg.startswith('-'):
if arg == '--ppa':
if i + 1 < len(argv):
ppa_name = argv[i + 1]
i += 1
else:
cmd = commands.get_command('share', 'ubuntu-application')
templatetools.usage_error(_("No PPA provided."), cmd=cmd)
elif arg == '--extras':
for_extras = True
else:
cmd = commands.get_command('share', 'ubuntu-application')
templatetools.usage_error(_("Unknown option: %s." % arg), cmd=cmd)
else:
args.append(arg)
i += 1
# warning: project_name can be different from project.name (one local, one on launchpad)
if not configurationhandler.project_config:
configurationhandler.loadConfig()
project_name = configurationhandler.project_config['project']
# connect to LP
try:
launchpad = launchpadaccess.initialize_lpi()
except launchpadaccess.launchpad_connection_error, e:
print(e)
sys.exit(1)
# push the gpg key to the env
keyid = ""
try:
keyid = quicklyutils.get_right_gpg_key_id(launchpad)
except quicklyutils.gpg_error, e:
print(e)
sys.exit(1)
# choose right ppa parameter (users, etc.) ppa or staging if ppa_name is None
try:
(ppa_user, ppa_name, dput_ppa_name, ppa_url) = packaging.choose_ppa(launchpad, ppa_name)
except packaging.user_team_not_found, e:
print(_("User or Team %s not found on Launchpad") % e)
sys.exit(1)
except packaging.not_ppa_owner, e:
print(_("You have to be a member of %s team to upload to its ppas") % e)
sys.exit(1)
try:
ppa_name = packaging.check_and_return_ppaname(launchpad, ppa_user, ppa_name) # ppa_name can be ppa name or ppa display name. Find the right one if exists
except packaging.ppa_not_found, e:
print(_("%s does not exist. Please create it on launchpad if you want to push a package to it. %s has the following ppas available:") % (e, ppa_user.name))
user_has_ppa = False
for ppa_name, ppa_display_name in packaging.get_all_ppas(launchpad, ppa_user):
print "%s - %s" % (ppa_name, ppa_display_name)
user_has_ppa = True
if user_has_ppa:
print(_("You can temporary choose one of them with --ppa switch or definitely by executing 'quickly configure ppa <ppa_name>'."))
sys.exit(1)
# license if needed (default with author in setup.py and GPL-3). Don't change anything if not needed
try:
license.licensing()
except license.LicenceError, error_message:
print(error_message)
sys.exit(1)
try:
release_version = packaging.updateversion(sharing=True)
except (packaging.invalid_versionning_scheme,
packaging.invalid_version_in_setup), error_message:
print(error_message)
sys.exit(1)
# creation/update debian packaging
return_code = packaging.updatepackaging(installopt=for_extras)
if return_code != 0:
print _("ERROR: can't create or update ubuntu package")
sys.exit(1)
# upload to launchpad
print _("pushing to launchpad")
return_code = packaging.push_to_ppa(dput_ppa_name, "../%s_%s_source.changes" % (project_name, release_version), keyid=keyid) != 0
if return_code != 0:
sys.exit(return_code)
print _("%s %s is building on Launchpad. Wait for half an hour and have look at %s.") % (project_name, release_version, ppa_url)
sys.exit(0)
|
didrocks/quickly
|
data/templates/ubuntu-application/share.py
|
Python
|
gpl-3.0
| 5,902
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing images.
"""
from django.conf import settings
from django.forms import ValidationError # noqa
from django.forms.widgets import HiddenInput # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard import policy
IMAGE_BACKEND_SETTINGS = getattr(settings, 'OPENSTACK_IMAGE_BACKEND', {})
IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS.get('image_formats', [])
class CreateImageForm(forms.SelfHandlingForm):
name = forms.CharField(max_length="255", label=_("Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'class': 'modal-body-fixed-width'}),
label=_("Description"),
required=False)
source_type = forms.ChoiceField(
label=_('Image Source'),
required=False,
choices=[('url', _('Image Location')),
('file', _('Image File'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source'}))
copy_from = forms.CharField(max_length="255",
label=_("Image Location"),
help_text=_("An external (HTTP) URL to load "
"the image from."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-url': _('Image Location'),
'ng-model': 'copyFrom',
'ng-change':
'selectImageFormat(copyFrom)'}),
required=False)
image_file = forms.FileField(label=_("Image File"),
help_text=_("A local image to upload."),
widget=forms.FileInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-file': _('Image File'),
'ng-model': 'imageFile',
'ng-change':
'selectImageFormat(imageFile.name)',
'image-file-on-change': None}),
required=False)
disk_format = forms.ChoiceField(label=_('Format'),
choices=[],
widget=forms.Select(attrs={
'class': 'switchable',
'ng-model': 'diskFormat'}))
architecture = forms.CharField(max_length="255", label=_("Architecture"),
required=False)
minimum_disk = forms.IntegerField(label=_("Minimum Disk (GB)"),
help_text=_('The minimum disk size'
' required to boot the'
' image. If unspecified, this'
' value defaults to 0'
' (no minimum).'),
required=False)
minimum_ram = forms.IntegerField(label=_("Minimum RAM (MB)"),
help_text=_('The minimum memory size'
' required to boot the'
' image. If unspecified, this'
' value defaults to 0 (no'
' minimum).'),
required=False)
is_public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super(CreateImageForm, self).__init__(request, *args, **kwargs)
if (not settings.HORIZON_IMAGES_ALLOW_UPLOAD or
not policy.check((("image", "upload_image"),), request)):
self._hide_file_source_type()
if not policy.check((("image", "set_image_location"),), request):
self._hide_url_source_type()
if not policy.check((("image", "publicize_image"),), request):
self._hide_is_public()
self.fields['disk_format'].choices = IMAGE_FORMAT_CHOICES
def _hide_file_source_type(self):
self.fields['image_file'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'file']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_url_source_type(self):
self.fields['copy_from'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'url']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_is_public(self):
self.fields['is_public'].widget = HiddenInput()
self.fields['is_public'].initial = False
def clean(self):
data = super(CreateImageForm, self).clean()
# The image_file key can be missing based on particular upload
# conditions. Code defensively for it here...
image_file = data.get('image_file', None)
image_url = data.get('copy_from', None)
if not image_url and not image_file:
raise ValidationError(
_("A image or external image location must be specified."))
elif image_url and image_file:
raise ValidationError(
_("Can not specify both image and external image location."))
else:
return data
def handle(self, request, data):
# Glance does not really do anything with container_format at the
# moment. It requires it is set to the same disk_format for the three
# Amazon image types, otherwise it just treats them as 'bare.' As such
# we will just set that to be that here instead of bothering the user
# with asking them for information we can already determine.
if data['disk_format'] in ('ami', 'aki', 'ari',):
container_format = data['disk_format']
else:
container_format = 'bare'
meta = {'is_public': data['is_public'],
'protected': data['protected'],
'disk_format': data['disk_format'],
'container_format': container_format,
'min_disk': (data['minimum_disk'] or 0),
'min_ram': (data['minimum_ram'] or 0),
'name': data['name'],
'properties': {}}
if data['description']:
meta['properties']['description'] = data['description']
if data['architecture']:
meta['properties']['architecture'] = data['architecture']
if (settings.HORIZON_IMAGES_ALLOW_UPLOAD and
policy.check((("image", "upload_image"),), request) and
data.get('image_file', None)):
meta['data'] = self.files['image_file']
else:
meta['copy_from'] = data['copy_from']
try:
image = api.glance.image_create(request, **meta)
messages.success(request,
_('Your image %s has been queued for creation.') %
data['name'])
return image
except Exception:
exceptions.handle(request, _('Unable to create new image.'))
class UpdateImageForm(forms.SelfHandlingForm):
image_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length="255", label=_("Name"))
description = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("Description"),
required=False,
)
kernel = forms.CharField(
max_length="36",
label=_("Kernel ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
ramdisk = forms.CharField(
max_length="36",
label=_("Ramdisk ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
architecture = forms.CharField(
label=_("Architecture"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
disk_format = forms.ChoiceField(
label=_("Format"),
)
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super(UpdateImageForm, self).__init__(request, *args, **kwargs)
self.fields['disk_format'].choices = [(value, name) for value,
name in IMAGE_FORMAT_CHOICES
if value]
if not policy.check((("image", "publicize_image"),), request):
self.fields['public'].widget = forms.CheckboxInput(
attrs={'readonly': 'readonly'})
def handle(self, request, data):
image_id = data['image_id']
error_updating = _('Unable to update image "%s".')
if data['disk_format'] in ['aki', 'ari', 'ami']:
container_format = data['disk_format']
else:
container_format = 'bare'
meta = {'is_public': data['public'],
'protected': data['protected'],
'disk_format': data['disk_format'],
'container_format': container_format,
'name': data['name'],
'properties': {'description': data['description']}}
if data['kernel']:
meta['properties']['kernel_id'] = data['kernel']
if data['ramdisk']:
meta['properties']['ramdisk_id'] = data['ramdisk']
if data['architecture']:
meta['properties']['architecture'] = data['architecture']
# Ensure we do not delete properties that have already been
# set on an image.
meta['purge_props'] = False
try:
image = api.glance.image_update(request, image_id, **meta)
messages.success(request, _('Image was successfully updated.'))
return image
except Exception:
exceptions.handle(request, error_updating % image_id)
|
394954369/horizon
|
openstack_dashboard/dashboards/project/images/images/forms.py
|
Python
|
apache-2.0
| 11,525
|
import ast
import itertools
import functools
import re
from typing import Any, Iterable, Union
from formulaic.errors import FormulaParsingError
from .algos.tokens_to_ast import tokens_to_ast
from .algos.tokenize import tokenize
from .types import ASTNode, Factor, Operator, OperatorResolver, Term, Token
from .utils import exc_for_token
class FormulaParser:
"""
The default and base formula parser for `Formula`s.
The role of this class is to transform a string representation of a formula
to a sequence of `Term` instances that can be evaluated by materializers
and ultimately rendered into model matrices.
This class can be subclassed to customize this behavior. The three phases of
formula parsing are split out into separate methods to make this easier.
They are:
- get_tokens: Which returns an iterable of `Token` instances. By default
this uses `tokenize()` and handles the addition/removal of the
intercept.
- get_ast: Which converts the iterable of `Token`s into an abstract
syntax tree. By default this uses `tokens_to_ast()` and the nominated
`OperatorResolver` instance.
- get_terms: Which evaluates the abstract syntax tree and returns an
iterable of `Term`s.
Only the `get_terms()` method is essential from an API perspective.
Attributes:
operator_resolver: The operator resolver to use when parsing the formula
string and generating the abstract syntax tree. If not specified,
it will default to `DefaultOperatorResolver`.
"""
def __init__(self, operator_resolver: OperatorResolver = None):
self.operator_resolver = operator_resolver or DefaultOperatorResolver()
def get_tokens(
self, formula: str, *, include_intercept: bool = True
) -> Iterable[Token]:
"""
Return an iterable of `Token` instances for the nominated `formula`
string.
Args:
formula: The formula string to be tokenized.
include_intercept: Whether to include an intercept by default
(formulas can still omit this intercept in the usual manner:
adding a '-1' or '+0' term).
"""
tokens = list(tokenize(formula))
# Insert "1" or "1 + " to beginning of RHS formula
one = Token(token="1", kind="value")
plus = Token(token="+", kind="operator")
minus = Token(token="-", kind="operator")
if include_intercept:
if len(tokens) == 0:
tokens = [one]
else:
try:
tilde_index = len(tokens) - 1 - tokens[::-1].index("~")
if tilde_index == len(tokens) - 1:
tokens.append(one)
else:
tokens.insert(tilde_index + 1, one)
tokens.insert(tilde_index + 2, plus)
except ValueError:
tokens.insert(0, one)
tokens.insert(1, plus)
# Replace all "0"s with "-1"
zero_index = -1
try:
while True:
zero_index = tokens.index("0", zero_index + 1)
if zero_index - 1 < 0 or tokens[zero_index - 1] == "~":
tokens.pop(zero_index)
zero_index -= 1
continue
elif tokens[zero_index - 1] == "+":
tokens[zero_index - 1] = minus
elif tokens[zero_index - 1] == "-":
tokens[zero_index - 1] = plus
else:
raise FormulaParsingError(
f"Unrecognised use of `0` at index: {tokens[zero_index-1].source_start}."
)
tokens[zero_index] = one
except ValueError:
pass
return tokens
def get_ast(self, formula: str, *, include_intercept: bool = True) -> ASTNode:
"""
Assemble an abstract syntax tree for the nominated `formula` string.
Args:
formula: The formula for which an AST should be generated.
include_intercept: Whether to include an intercept by default
(formulas can still omit this intercept in the usual manner:
adding a '-1' or '+0' term).
"""
return tokens_to_ast(
self.get_tokens(formula, include_intercept=include_intercept),
operator_resolver=self.operator_resolver,
)
def get_terms(
self, formula, *, sort=True, include_intercept=True
) -> Union[Iterable[Term], Iterable[Iterable[Term]], Any]:
"""
Assemble the `Term` instances for a formula string. Depending on the
operators involved, this may be an iterable of `Term` instances, or
an iterable of iterables of `Term`s, etc.
Args:
formula: The formula for which an AST should be generated.
sort: Whether to sort the terms before returning them.
include_intercept: Whether to include an intercept by default
(formulas can still omit this intercept in the usual manner:
adding a '-1' or '+0' term).
"""
terms = self.get_ast(formula, include_intercept=include_intercept).to_terms()
if sort:
if isinstance(terms, tuple):
terms = tuple(sorted(ts) for ts in terms)
else:
terms = sorted(terms)
return terms
class DefaultOperatorResolver(OperatorResolver):
"""
The default operator resolver implementation.
This class implements the standard operators in a form consistent with
other implementations of Wilkinson formulas. It can be extended via
subclassing to support other kinds of operators, in which case `.operators`
and/or `.resolve` can be overridden. For more details about which operators
are implemented, review the code or the documentation website.
"""
@property
def operators(self):
def formula_separator_expansion(lhs, rhs):
terms = (lhs.to_terms(), rhs.to_terms())
out = []
for termset in terms:
if isinstance(termset, tuple):
out.extend(termset)
else:
out.append(termset)
return tuple(out)
def nested_product_expansion(parents, nested):
terms = parents.to_terms()
common = functools.reduce(lambda x, y: x * y, terms)
return terms.union({common * term for term in nested.to_terms()})
def unary_negation(arg):
# TODO: FormulaParser().get_terms('a * ( - b)') Should return `a`
terms = arg.to_terms()
if len(terms) > 1 or list(terms)[0] != "0":
raise FormulaParsingError(
"Unary negation is only implemented for '0', where it is substituted for '1'."
)
return {
Term(factors=[Factor("1", eval_method="literal")])
} # pragma: no cover; All zero handling is currently done in the token pre-processor.
def power(arg, power):
if (
not isinstance(power, Token)
or power.kind is not Token.Kind.VALUE
or not isinstance(ast.literal_eval(power.token), int)
):
raise exc_for_token(
power, "The right-hand argument of `**` must be a positive integer."
)
return {
functools.reduce(lambda x, y: x * y, term)
for term in itertools.product(*[arg.to_terms()] * int(power.token))
}
return [
Operator(
"~",
arity=2,
precedence=-100,
associativity=None,
to_terms=formula_separator_expansion,
),
Operator(
"~",
arity=1,
precedence=-100,
associativity=None,
fixity="prefix",
to_terms=lambda expr: (expr.to_terms(),),
),
Operator(
"+",
arity=2,
precedence=100,
associativity="left",
to_terms=lambda *args: set(
itertools.chain(*[arg.to_terms() for arg in args])
),
),
Operator(
"-",
arity=2,
precedence=100,
associativity="left",
to_terms=lambda left, right: set(
set(left.to_terms()).difference(right.to_terms())
),
),
Operator(
"+",
arity=1,
precedence=100,
associativity="right",
fixity="prefix",
to_terms=lambda arg: arg.to_terms(),
),
Operator(
"-",
arity=1,
precedence=100,
associativity="right",
fixity="prefix",
to_terms=unary_negation,
),
Operator(
"*",
arity=2,
precedence=200,
associativity="left",
to_terms=lambda *args: (
{
functools.reduce(lambda x, y: x * y, term)
for term in itertools.product(*[arg.to_terms() for arg in args])
}.union(itertools.chain(*[arg.to_terms() for arg in args]))
),
),
Operator(
"/",
arity=2,
precedence=200,
associativity="left",
to_terms=nested_product_expansion,
),
Operator(
":",
arity=2,
precedence=300,
associativity="left",
to_terms=lambda *args: {
functools.reduce(lambda x, y: x * y, term)
for term in itertools.product(*[arg.to_terms() for arg in args])
},
),
Operator(
"**", arity=2, precedence=500, associativity="right", to_terms=power
),
]
def resolve(self, token: Token, max_prefix_arity) -> Iterable[Operator]:
if token.token in self.operator_table:
return super().resolve(token, max_prefix_arity)
symbol = token.token
# Apply R-like transformations to operator
symbol = re.sub(
r"[+\-]*\-[+\-]*", "-", symbol
) # Any sequence of '+' and '-' -> '-'
symbol = re.sub(r"[+]{2,}", "+", symbol) # multiple sequential '+' -> '+'
if symbol in self.operator_table:
return [self._resolve(token, symbol, max_prefix_arity)]
return [
self._resolve(token, sym, max_prefix_arity if i == 0 else 0)
for i, sym in enumerate(symbol)
]
|
matthewwardrop/formulaic
|
formulaic/parser/parser.py
|
Python
|
mit
| 11,168
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import mkeventd
from valuespec import *
try:
mkeventd_enabled = config.mkeventd_enabled
except:
mkeventd_enabled = False
# .--Datasources---------------------------------------------------------.
# | ____ _ |
# | | _ \ __ _| |_ __ _ ___ ___ _ _ _ __ ___ ___ ___ |
# | | | | |/ _` | __/ _` / __|/ _ \| | | | '__/ __/ _ \/ __| |
# | | |_| | (_| | || (_| \__ \ (_) | |_| | | | (_| __/\__ \ |
# | |____/ \__,_|\__\__,_|___/\___/ \__,_|_| \___\___||___/ |
# | |
# '----------------------------------------------------------------------'
def table_events(what, columns, add_headers, only_sites, limit, filters):
# First we wetch the list of all events from mkeventd - either current
# or historic ones. We ignore any filters for host_ here. Note:
# event_host and host_name needn't be compatible. They might differ
# in case. Also in the events table instead of the host name there
# might be the IP address of the host - while in the monitoring we
# name. We will join later.
# If due to limitation of visibility we do a post-filtering, we cannot
# impose a limit when fetching the data. This is dangerous, but we
# have no other chance, currently.
if not config.may("mkeventd.seeall"):
use_limit = None
else:
use_limit = limit
rows = get_all_events(what, filters, use_limit)
# Now we join the stuff with the host information. Therefore we
# get the information about all hosts that are referred to in
# any of the events.
required_hosts = set()
for row in rows:
host = row.get("event_host")
if host:
required_hosts.add(host.lower())
# Get information about these hosts via Livestatus. We
# allow event_host to match either the host_name or
# the host_address.
host_filters = ""
for host in required_hosts:
host_filters += "Filter: host_name =~ %s\n" \
"Filter: host_address = %s\n" % (host.encode("utf-8"), host.encode("utf-8"))
if len(required_hosts) > 0:
host_filters += "Or: %d\n" % (len(required_hosts) * 2)
# Make sure that the host name is fetched. We need it for
# joining. The event columns are always fetched all. The event
# daemon currently does not implement any Columns: header.
if "host_name" not in columns:
columns.append("host_name")
if "host_address" not in columns:
columns.append("host_address")
# Fetch list of hosts. Here is much room for optimization.
# If no host filter is set, then the data of all hosts would
# be fetched before we even know if there are any events
# for those hosts. Better would be first fetching all events
# and later fetch the data of the relevant hosts.
hostrows = event_hostrows(columns, only_sites, filters, host_filters)
# Sichtbarkeit: Wenn der Benutzer *nicht* das Recht hat, alle Events
# zu sehen, dann müssen wir die Abfrage zweimal machen. Einmal with
# AuthUser: (normal) und einmal zusätzlich ohne AuthUser. Dabei brauchen
# wir dann nicht mehr alle Spalten, sondern nur noch die Liste der
# Kontaktgruppen.
# 1. Wenn ein Host bei der ersten Anfrage fehlt, aber bei der zweiten kommt,
# heißt das, dass der User diesen Host nicht sehen darf. Und der Event wird
# nicht angezeigt.
# 2. Wenn ein Host bei beiden Abfragen fehlt, gibt es diesen Host nicht im
# Monitoring. Dann gibt es zwei Fälle:
# a) Wenn im Event eine Liste von Kontaktgruppen eingetragen ist (kommt durch
# eine Regel), dann darf der User den Event sehen, wenn er Mitglied einer
# der Kontaktgruppen ist. Dies bekommen wir einfach aus dem User-Profil
# heraus. Für solche Events brauchen wir das Ergebnis der Abfrage nicht.
# b) Wenn im Event diese Option fehlt, dann darf der User das Event immer sehen.
# Wir können das aber nochmal global steuern über eine Permission.
if not config.may("mkeventd.seeall"):
host_contact_groups = {}
query = "GET hosts\nColumns: name address contact_groups\n" + host_filters
html.live.set_only_sites(only_sites)
html.live.set_auth_domain('mkeventd')
data = html.live.query(query)
html.live.set_auth_domain('read')
html.live.set_only_sites(None)
for host, address, groups in data:
host_contact_groups[host.lower()] = groups
host_contact_groups[address] = groups
else:
host_contact_groups = None
# Create lookup dict from hostname/address to the dataset of the host.
# This speeds up the mapping to the events.
hostdict = {}
for row in hostrows:
hostdict[row["host_name"].lower()] = row
hostdict[row["host_address"]] = row
# If there is at least one host filter, then we do not show event
# entries with an empty host information
have_host_filter = False
for filt in filters:
if filt.info == "host":
filter_code = filt.filter('event')
if filter_code:
have_host_filter = True
break
if not have_host_filter:
# Create empty host for outer join on host table
empty_host = dict([ (c, "") for c in columns if c.startswith("host_") ])
empty_host["site"] = ''
empty_host["host_state"] = 0
empty_host["host_has_been_checked"] = 0
# We're ready to join the host-data with the event data now. The question
# is what to do with events that cannot be mapped to a host...
new_rows = []
user_contact_groups = None
for event in rows:
host = event["event_host"].lower()
# Users without the mkeventd.seeall permission only may see the host if
# they are a contact via the monitoring. In case the host is not known
# to the monitoring the permission mkeventd.seeunrelated is being neccessary
# as well.
if host_contact_groups != None:
if host in host_contact_groups:
if host not in hostdict:
continue # Host known to monitoring, but user is now allowed
else: # Host not known to monitoring
# Has the event explicit contact groups assigned? Use them!
cgs = event.get("event_contact_groups")
if cgs == None:
if not config.may("mkeventd.seeunrelated"):
continue
else:
if user_contact_groups == None:
user_contact_groups = get_user_contact_groups()
allowed = False
for g in cgs:
if g in user_contact_groups:
allowed = True
if not allowed:
continue
if host in hostdict:
event.update(hostdict[host])
new_rows.append(event)
elif not have_host_filter:
# This event does not belong to any host known by
# the monitoring. We need to create the columns nevertheless.
# TODO: If there are any host filters, these events should
# be dropped.
# Hier könnten wir Leerdaten eintragen. Dann
# kann man auch Events sehen, die keinem
# Host zugeordnet sind. Wenn wir nichts machen,
# dann fehlen Spalten und die Painter fallen
# auf die Nase.
event.update(empty_host)
new_rows.append(event)
return new_rows
def event_hostrows(columns, only_sites, filters, host_filters):
filter_code = ""
for filt in filters:
header = filt.filter("event")
if not header.startswith("Sites:"):
filter_code += header
filter_code += host_filters
host_columns = filter(lambda c: c.startswith("host_"), columns)
return get_host_table(filter_code, only_sites, host_columns)
def get_user_contact_groups():
query = "GET contactgroups\nFilter: members >= %s\nColumns: name\nCache: reload" % (config.user_id)
contacts = html.live.query_column(query)
return set(contacts)
def get_host_table(filter_header, only_sites, add_columns):
columns = [ "host_name" ] + add_columns
html.live.set_only_sites(only_sites)
html.live.set_prepend_site(True)
data = html.live.query(
"GET hosts\n" +
"Columns: " + (" ".join(columns)) + "\n" +
filter_header)
html.live.set_prepend_site(False)
html.live.set_only_sites(None)
headers = [ "site" ] + columns
rows = [ dict(zip(headers, row)) for row in data ]
return rows
def get_all_events(what, filters, limit):
headers = ""
for f in filters:
try:
headers += f.event_headers()
except:
pass
if limit:
headers += "Limit: %d\n" % limit
query = "GET %s\n%s" % (what, headers)
try:
debug = config.debug_mkeventd_queries
except:
debug = False
if debug \
and html.output_format == "html" and 'W' in html.display_options:
html.write('<div class="livestatus message" onmouseover="this.style.display=\'none\';">'
'<tt>%s</tt></div>\n' % (query.replace('\n', '<br>\n')))
response = mkeventd.query(query)
# First line of the response is the list of column names.
headers = response[0]
rows = []
for r in response[1:]:
rows.append(dict(zip(headers, r)))
return rows
# Declare datasource only if the event console is activated. We do
# not want to irritate users that do not know anything about the EC.
if mkeventd_enabled:
config.declare_permission("mkeventd.seeall",
_("See all events"),
_("If a user lacks this permission then he/she can see only those events that "
"originate from a host that he/she is a contact for."),
[ "user", "admin", "guest" ])
config.declare_permission("mkeventd.seeunrelated",
_("See events not related to a known host"),
_("If that user does not have the permission <i>See all events</i> then this permission "
"controls wether he/she can see events that are not related to a host in the montioring "
"and that do not have been assigned specific contract groups to via the event rule."),
[ "user", "admin", "guest" ])
multisite_datasources["mkeventd_events"] = {
"title" : _("Event Console: Current Events"),
"table" : lambda *args: table_events('events', *args),
"infos" : [ "event", "host" ],
"keys" : [],
"idkeys" : [ 'site', 'host_name', 'event_id' ],
"time_filters" : [ "event_first" ],
}
multisite_datasources["mkeventd_history"] = {
"title" : _("Event Console: Event History"),
"table" : lambda *args: table_events('history', *args),
"infos" : [ "history", "event", "host" ],
"keys" : [],
"idkeys" : [ 'site', 'host_name', 'event_id', 'history_line' ],
"time_filters" : [ "history_time" ],
}
#.
# .--Painters------------------------------------------------------------.
# | ____ _ _ |
# | | _ \ __ _(_)_ __ | |_ ___ _ __ ___ |
# | | |_) / _` | | '_ \| __/ _ \ '__/ __| |
# | | __/ (_| | | | | | || __/ | \__ \ |
# | |_| \__,_|_|_| |_|\__\___|_| |___/ |
# | |
# '----------------------------------------------------------------------'
def paint_event_host(row):
if row["host_name"]:
return "", row["host_name"]
else:
return "", row["event_host"]
multisite_painters["event_id"] = {
"title" : _("ID of the event"),
"short" : _("ID"),
"columns" : ["event_id"],
"paint" : lambda row: ("number", str(row["event_id"])),
}
multisite_painters["event_count"] = {
"title" : _("Count (number of recent occurrances)"),
"short" : _("Cnt."),
"columns" : ["event_count"],
"paint" : lambda row: ("number", str(row["event_count"])),
}
multisite_painters["event_text"] = {
"title" : _("Text/Message of the event"),
"short" : _("Message"),
"columns" : ["event_text"],
"paint" : lambda row: ("", row["event_text"].replace("\x01","<br>")),
}
def paint_ec_match_groups(row):
groups = row["event_match_groups"]
if groups:
code = ""
for text in groups:
code += '<span>%s</span>' % text
return "matchgroups", code
else:
return "", ""
multisite_painters["event_match_groups"] = {
"title" : _("Match Groups"),
"short" : _("Match"),
"columns" : ["event_match_groups"],
"paint" : paint_ec_match_groups,
}
multisite_painters["event_first"] = {
"title" : _("Time of first occurrance of this serial"),
"short" : _("First"),
"columns" : ["event_first"],
"options" : [ "ts_format", "ts_date" ],
"paint" : lambda row: paint_age(row["event_first"], True, True),
}
multisite_painters["event_last"] = {
"title" : _("Time of last occurrance"),
"short" : _("Last"),
"columns" : ["event_last"],
"options" : [ "ts_format", "ts_date" ],
"paint" : lambda row: paint_age(row["event_last"], True, True),
}
multisite_painters["event_comment"] = {
"title" : _("Comment to the event"),
"short" : _("Comment"),
"columns" : ["event_comment"],
"paint" : lambda row: ("", row["event_comment"]),
}
def mkeventd_paint_sl(row):
try:
return "", dict(config.mkeventd_service_levels)[row["event_sl"]]
except:
return "", str(row["event_sl"])
multisite_painters["event_sl"] = {
"title" : _("Service-Level"),
"short" : _("Level"),
"columns" : ["event_sl"],
"paint" : mkeventd_paint_sl,
}
multisite_painters["event_host"] = {
"title" : _("Hostname/IP-Address"),
"short" : _("Host"),
"columns" : ["event_host", "host_name"],
"paint" : paint_event_host,
}
multisite_painters["event_owner"] = {
"title" : _("Owner of event"),
"short" : _("owner"),
"columns" : ["event_owner"],
"paint" : lambda row: ("", row["event_owner"]),
}
multisite_painters["event_contact"] = {
"title" : _("Contact Person"),
"short" : _("Contact"),
"columns" : ["event_contact" ],
"paint" : lambda row: ("", row["event_contact"]),
}
multisite_painters["event_application"] = {
"title" : _("Application / Syslog-Tag"),
"short" : _("Application"),
"columns" : ["event_application" ],
"paint" : lambda row: ("", row["event_application"]),
}
multisite_painters["event_pid"] = {
"title" : _("Process ID"),
"short" : _("PID"),
"columns" : ["event_pid" ],
"paint" : lambda row: ("", row["event_pid"]),
}
multisite_painters["event_priority"] = {
"title" : _("Syslog-Priority"),
"short" : _("Prio"),
"columns" : ["event_priority" ],
"paint" : lambda row: ("", dict(mkeventd.syslog_priorities)[row["event_priority"]]),
}
multisite_painters["event_facility"] = {
"title" : _("Syslog-Facility"),
"short" : _("Facility"),
"columns" : ["event_facility" ],
"paint" : lambda row: ("", dict(mkeventd.syslog_facilities)[row["event_facility"]]),
}
def paint_rule_id(row):
rule_id = row["event_rule_id"]
if config.may("mkeventd.edit"):
urlvars = html.urlencode_vars([("mode", "mkeventd_edit_rule"), ("rule_id", rule_id)])
return "", '<a href="wato.py?%s">%s</a>' % (urlvars, rule_id)
else:
return "", rule_id
multisite_painters["event_rule_id"] = {
"title" : _("Rule-ID"),
"short" : _("Rule"),
"columns" : ["event_rule_id" ],
"paint" : paint_rule_id,
}
def paint_event_state(row):
state = row["event_state"]
name = nagios_short_state_names[row["event_state"]]
return "state svcstate state%s" % state, name
multisite_painters["event_state"] = {
"title" : _("State (severity) of event"),
"short" : _("State"),
"columns" : ["event_state"],
"paint" : paint_event_state,
}
multisite_painters["event_phase"] = {
"title" : _("Phase of event (open, counting, etc.)"),
"short" : _("Phase"),
"columns" : ["event_phase" ],
"paint" : lambda row: ("", mkeventd.phase_names.get(row["event_phase"], ''))
}
def paint_event_icons(row):
phase = row["event_phase"]
if phase == "ack":
title = _("This event has been acknowledged.")
elif phase == "counting":
title = _("This event has not reached the target count yet.")
elif phase == "delayed":
title = _("The action of this event is still delayed in the hope of a cancelling event.")
else:
return "", ""
return 'icons', '<img class=icon title="%s" src="images/icon_%s.png">' % (title, phase)
multisite_painters["event_icons"] = {
"title" : _("Event Icons"),
"short" : _("Icons"),
"printable" : False,
"columns" : [ "event_phase" ],
"paint" : paint_event_icons,
}
def paint_event_contact_groups(row):
cgs = row.get("event_contact_groups")
if cgs == None:
return "", ""
elif cgs:
return "", ", ".join(cgs)
else:
return "", "<i>" + _("none") + "</i>"
multisite_painters["event_contact_groups"] = {
"title" : _("Fallback Contact Groups"),
"short" : _("Contact Groups"),
"columns" : [ "event_contact_groups" ],
"paint" : paint_event_contact_groups,
}
# Event History
multisite_painters["history_line"] = {
"title" : _("Line number in log file"),
"short" : _("Line"),
"columns" : ["history_line" ],
"paint" : lambda row: ("number", row["history_line"]),
}
multisite_painters["history_time"] = {
"title" : _("Time of entry in logfile"),
"short" : _("Time"),
"columns" : ["history_time" ],
"options" : [ "ts_format", "ts_date" ],
"paint" : lambda row: paint_age(row["history_time"], True, True),
}
multisite_painters["history_what"] = {
"title" : _("Type of event action"),
"short" : _("Action"),
"columns" : ["history_what" ],
"paint" : lambda row: ("", row["history_what"]),
}
multisite_painters["history_what_explained"] = {
"title" : _("Explanation for event action"),
"columns" : ["history_what" ],
"paint" : lambda row: ("", mkeventd.action_whats[row["history_what"]]),
}
multisite_painters["history_who"] = {
"title" : _("User who performed action"),
"short" : _("Who"),
"columns" : ["history_who" ],
"paint" : lambda row: ("", row["history_who"]),
}
multisite_painters["history_addinfo"] = {
"title" : _("Additional Information"),
"short" : _("Info"),
"columns" : ["history_addinfo" ],
"paint" : lambda row: ("", row["history_addinfo"]),
}
#.
# .--Commands------------------------------------------------------------.
# | ____ _ |
# | / ___|___ _ __ ___ _ __ ___ __ _ _ __ __| |___ |
# | | | / _ \| '_ ` _ \| '_ ` _ \ / _` | '_ \ / _` / __| |
# | | |__| (_) | | | | | | | | | | | (_| | | | | (_| \__ \ |
# | \____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|\__,_|___/ |
# | |
# '----------------------------------------------------------------------'
def command_executor_mkeventd(command, site):
response = mkeventd.query("COMMAND %s" % command)
# Acknowledge and update comment and contact
config.declare_permission("mkeventd.update",
_("Update an event"),
_("Needed for acknowledging and changing the comment and contact of an event"),
[ "user", "admin" ])
# Sub-Permissions for Changing Comment, Contact and Acknowledgement
config.declare_permission("mkeventd.update_comment",
_("Update an event: change comment"),
_("Needed for changing a comment when updating an event"),
[ "user", "admin" ])
config.declare_permission("mkeventd.update_contact",
_("Update an event: change contact"),
_("Needed for changing a contact when updating an event"),
[ "user", "admin" ])
def render_mkeventd_update():
html.write('<table border=0 cellspacing=3 cellpadding=0>')
if config.may("mkeventd.update_comment"):
html.write('<tr><td>%s</td><td>' % _("Change comment:"))
html.text_input('_mkeventd_comment', size=50)
html.write('</td></tr>')
if config.may("mkeventd.update_contact"):
html.write('<tr><td>%s</td><td>' % _("Change contact:"))
html.text_input('_mkeventd_contact', size=50)
html.write('</td></tr>')
html.write('<td></td><td>')
html.checkbox('_mkeventd_acknowledge', True, label=_("Set event to acknowledged"))
html.write('</td></tr>')
html.write('</table>')
html.button('_mkeventd_update', _("Update"))
def command_mkeventd_update(cmdtag, spec, row):
if html.var('_mkeventd_update'):
if config.may("mkeventd.update_comment"):
comment = html.var_utf8("_mkeventd_comment").strip().replace(";",",")
else:
comment = ""
if config.may("mkeventd.update_contact"):
contact = html.var_utf8("_mkeventd_contact").strip().replace(":",",")
else:
contact = ""
ack = html.get_checkbox("_mkeventd_acknowledge")
return "UPDATE;%s;%s;%s;%s;%s" % \
(row["event_id"], config.user_id, ack and 1 or 0, comment, contact), \
_("update")
multisite_commands.append({
"tables" : [ "event" ],
"permission" : "mkeventd.update",
"title" : _("Update & Acknowledge"),
"render" : render_mkeventd_update,
"action" : command_mkeventd_update,
"executor" : command_executor_mkeventd,
})
# Change event state
config.declare_permission("mkeventd.changestate",
_("Change event state"),
_("This permission allows to change the state classification of an event "
"(e.g. from CRIT to WARN)."),
[ "user", "admin" ])
def render_mkeventd_changestate():
html.button('_mkeventd_changestate', _("Change Event state to:"))
html.write(" ")
MonitoringState().render_input("_mkeventd_state", 2)
def command_mkeventd_changestate(cmdtag, spec, row):
if html.var('_mkeventd_changestate'):
state = MonitoringState().from_html_vars("_mkeventd_state")
return "CHANGESTATE;%s;%s;%s" % \
(row["event_id"], config.user_id, state), \
_("change the state")
multisite_commands.append({
"tables" : [ "event" ],
"permission" : "mkeventd.changestate",
"title" : _("Change State"),
"render" : render_mkeventd_changestate,
"action" : command_mkeventd_changestate,
"executor" : command_executor_mkeventd,
})
# Perform custom actions
config.declare_permission("mkeventd.actions",
_("Perform custom action"),
_("This permission is needed for performing the configured actions "
"(execution of scripts and sending emails)."),
[ "user", "admin" ])
def render_mkeventd_actions():
for action_id, title in mkeventd.action_choices(omit_hidden = True):
html.button("_action_" + action_id, title)
html.write("<br>")
def command_mkeventd_action(cmdtag, spec, row):
for action_id, title in mkeventd.action_choices(omit_hidden = True):
if html.var("_action_" + action_id):
return "ACTION;%s;%s;%s" % (row["event_id"], config.user_id, action_id), \
(_("execute that action "%s"") % title)
multisite_commands.append({
"tables" : [ "event" ],
"permission" : "mkeventd.actions",
"title" : _("Custom Action"),
"render" : render_mkeventd_actions,
"action" : command_mkeventd_action,
"executor" : command_executor_mkeventd,
})
# Delete events
config.declare_permission("mkeventd.delete",
_("Archive an event"),
_("Finally archive an event without any further action"),
[ "user", "admin" ])
def command_mkeventd_delete(cmdtag, spec, row):
if html.var("_delete_event"):
command = "DELETE;%s;%s" % (row["event_id"], config.user_id)
title = _("<b>archive</b>")
return command, title
multisite_commands.append({
"tables" : [ "event" ],
"permission" : "mkeventd.delete",
"title" : _("Archive Event"),
"render" : lambda: \
html.button("_delete_event", _("Archive Event")),
"action" : command_mkeventd_delete,
"executor" : command_executor_mkeventd,
})
#.
# .--Sorters-------------------------------------------------------------.
# | ____ _ |
# | / ___| ___ _ __| |_ ___ _ __ ___ |
# | \___ \ / _ \| '__| __/ _ \ '__/ __| |
# | ___) | (_) | | | || __/ | \__ \ |
# | |____/ \___/|_| \__\___|_| |___/ |
# | |
# '----------------------------------------------------------------------'
def cmp_simple_state(column, ra, rb):
a = ra.get(column, -1)
b = rb.get(column, -1)
if a == 3:
a = 1.5
if b == 3:
b = 1.5
return cmp(a, b)
declare_1to1_sorter("event_id", cmp_simple_number)
declare_1to1_sorter("event_count", cmp_simple_number)
declare_1to1_sorter("event_text", cmp_simple_string)
declare_1to1_sorter("event_first", cmp_simple_number)
declare_1to1_sorter("event_last", cmp_simple_number)
declare_1to1_sorter("event_comment", cmp_simple_string)
declare_1to1_sorter("event_sl", cmp_simple_number)
declare_1to1_sorter("event_host", cmp_simple_string)
declare_1to1_sorter("event_contact", cmp_simple_string)
declare_1to1_sorter("event_application", cmp_simple_string)
declare_1to1_sorter("event_pid", cmp_simple_number)
declare_1to1_sorter("event_priority", cmp_simple_number)
declare_1to1_sorter("event_facility", cmp_simple_number) # maybe convert to text
declare_1to1_sorter("event_rule_id", cmp_simple_string)
declare_1to1_sorter("event_state", cmp_simple_state)
declare_1to1_sorter("event_phase", cmp_simple_string)
declare_1to1_sorter("event_owner", cmp_simple_string)
declare_1to1_sorter("history_line", cmp_simple_number)
declare_1to1_sorter("history_time", cmp_simple_number)
declare_1to1_sorter("history_what", cmp_simple_string)
declare_1to1_sorter("history_who", cmp_simple_string)
declare_1to1_sorter("history_addinfo", cmp_simple_string)
#.
# .--Views---------------------------------------------------------------.
# | __ ___ |
# | \ \ / (_) _____ _____ |
# | \ \ / /| |/ _ \ \ /\ / / __| |
# | \ V / | | __/\ V V /\__ \ |
# | \_/ |_|\___| \_/\_/ |___/ |
# | |
# '----------------------------------------------------------------------'
def mkeventd_view(d):
x = {
'topic': _('Event Console'),
'browser_reload': 60,
'column_headers': 'pergroup',
'icon': 'mkeventd',
'mobile': False,
'hidden': False,
'mustsearch': False,
'group_painters': [],
'num_columns': 1,
'hidebutton': False,
'play_sounds': False,
'public': True,
'sorters': [],
'user_sortable': 'on',
'show_filters': [],
'hard_filters': [],
'hide_filters': [],
'hard_filtervars': [],
}
x.update(d)
return x
# Table of all open events
multisite_builtin_views['ec_events'] = mkeventd_view({
'title': _('Events'),
'description': _('Table of all currently open events (handled and unhandled)'),
'datasource': 'mkeventd_events',
'layout': 'table',
'painters': [
('event_id', 'ec_event', ''),
('event_icons', None, ''),
('event_state', None, ''),
('event_sl', None, ''),
('event_host', 'ec_events_of_host', ''),
('event_rule_id', None, ''),
('event_application', None, ''),
('event_text', None, ''),
('event_last', None, ''),
('event_count', None, ''),
],
'show_filters': [
'event_id',
'event_rule_id',
'event_text',
'event_application',
'event_contact',
'event_comment',
'event_host_regex',
'event_count',
'event_phase',
'event_state',
'event_first',
'event_last',
'event_priority',
'event_facility',
'event_sl',
'event_sl_max',
'hostregex',
],
'hard_filtervars': [
( 'event_phase_open', "on" ),
( 'event_phase_ack', "on" ),
( 'event_phase_counting', "" ),
( 'event_phase_delayed', "" ),
],
})
multisite_builtin_views['ec_events_of_monhost'] = mkeventd_view({
'title': _('Events of Monitored Host'),
'description': _('Currently open events of a host that is monitored'),
'datasource': 'mkeventd_events',
'layout': 'table',
'hidden': True,
'painters': [
('event_id', 'ec_event', ''),
('event_icons', None, ''),
('event_state', None, ''),
('event_sl', None, ''),
('event_rule_id', None, ''),
('event_application', None, ''),
('event_text', None, ''),
('event_last', None, ''),
('event_count', None, ''),
],
'show_filters': [
'event_id',
'event_rule_id',
'event_text',
'event_application',
'event_contact',
'event_comment',
'event_count',
'event_phase',
'event_state',
'event_first',
'event_last',
'event_priority',
'event_facility',
'event_sl',
'event_sl_max',
],
'hide_filters': [
'siteopt',
'host',
],
})
multisite_builtin_views['ec_events_of_host'] = mkeventd_view({
'title': _('Events of Host'),
'description': _('Currently open events of one specific host'),
'datasource': 'mkeventd_events',
'layout': 'table',
'hidden': True,
'painters': [
('event_id', 'ec_event', ''),
('event_icons', None, ''),
('event_state', None, ''),
('event_sl', None, ''),
('event_rule_id', None, ''),
('event_application', None, ''),
('event_text', None, ''),
('event_last', None, ''),
('event_count', None, ''),
],
'show_filters': [
'event_id',
'event_rule_id',
'event_text',
'event_application',
'event_contact',
'event_comment',
'event_count',
'event_phase',
'event_state',
'event_first',
'event_last',
'event_priority',
'event_facility',
'event_sl',
'event_sl_max',
],
'hide_filters': [
'siteopt',
'event_host',
],
})
multisite_builtin_views['ec_event'] = mkeventd_view({
'title': _('Event Details'),
'description': _('Details about one event'),
'linktitle': 'Event Details',
'datasource': 'mkeventd_events',
'layout': 'dataset',
'hidden': True,
'browser_reload': 0,
'hide_filters': [
'event_id',
],
'painters': [
('event_state', None, ''),
('event_host', None, ''),
('host_address', 'hoststatus', ''),
('host_contacts', None, ''),
('host_icons', None, ''),
('event_text', None, ''),
('event_match_groups', None, ''),
('event_comment', None, ''),
('event_owner', None, ''),
('event_first', None, ''),
('event_last', None, ''),
('event_id', None, ''),
('event_icons', None, ''),
('event_count', None, ''),
('event_sl', None, ''),
('event_contact', None, ''),
('event_contact_groups', None, ''),
('event_application', None, ''),
('event_pid', None, ''),
('event_priority', None, ''),
('event_facility', None, ''),
('event_rule_id', None, ''),
('event_phase', None, ''),
('host_services', None, ''),
],
})
multisite_builtin_views['ec_history_recent'] = mkeventd_view({
'title': _('Recent Event History'),
'description': _('Information about events and actions on events during the recent 24 hours.'),
'datasource': 'mkeventd_history',
'layout': 'table',
'painters': [
('history_time', None, ''),
('event_id', 'ec_historyentry', ''),
('history_who', None, ''),
('history_what', None, ''),
('event_icons', None, ''),
('event_state', None, ''),
('event_phase', None, ''),
('event_sl', None, ''),
('event_host', 'ec_history_of_host', ''),
('event_rule_id', None, ''),
('event_application', None, ''),
('event_text', None, ''),
('event_last', None, ''),
('event_count', None, ''),
],
'show_filters': [
'event_id',
'event_rule_id',
'event_text',
'event_application',
'event_contact',
'event_comment',
'event_host_regex',
'event_count',
'event_phase',
'event_state',
'event_first',
'event_last',
'event_priority',
'event_facility',
'event_sl',
'event_sl_max',
'history_time',
'history_who',
'history_what',
'host_state_type',
],
'hard_filtervars': [
('history_time_from', '1'),
('history_time_from_range', '86400'),
],
'sorters': [
('history_time', True),
('history_line', True),
],
})
multisite_builtin_views['ec_historyentry'] = mkeventd_view({
'title': _('Event History Entry'),
'description': _('Details about a historical event history entry'),
'datasource': 'mkeventd_history',
'layout': 'dataset',
'hidden': True,
'browser_reload': 0,
'hide_filters': [
'event_id',
'history_line',
],
'painters': [
('history_time', None, ''),
('history_line', None, ''),
('history_what', None, ''),
('history_what_explained', None, ''),
('history_who', None, ''),
('history_addinfo', None, ''),
('event_state', None, ''),
('event_host', 'ec_history_of_host', ''),
('event_text', None, ''),
('event_match_groups', None, ''),
('event_comment', None, ''),
('event_owner', None, ''),
('event_first', None, ''),
('event_last', None, ''),
('event_id', 'ec_history_of_event', ''),
('event_icons', None, ''),
('event_count', None, ''),
('event_sl', None, ''),
('event_contact', None, ''),
('event_contact_groups', None, ''),
('event_application', None, ''),
('event_pid', None, ''),
('event_priority', None, ''),
('event_facility', None, ''),
('event_rule_id', None, ''),
('event_phase', None, ''),
],
})
multisite_builtin_views['ec_history_of_event'] = mkeventd_view({
'title': _('History of Event'),
'description': _('History entries of one specific event'),
'datasource': 'mkeventd_history',
'layout': 'table',
'columns': 1,
'hidden': True,
'browser_reload': 0,
'hide_filters': [
'event_id',
],
'painters': [
('history_time', None, ''),
('history_line', 'ec_historyentry', ''),
('history_what', None, ''),
('history_what_explained', None, ''),
('history_who', None, ''),
('event_state', None, ''),
('event_host', None, ''),
('event_application', None, ''),
('event_text', None, ''),
('event_sl', None, ''),
('event_priority', None, ''),
('event_facility', None, ''),
('event_phase', None, ''),
('event_count', None, ''),
],
'sorters': [
('history_time', True),
('history_line', True),
],
})
multisite_builtin_views['ec_history_of_host'] = mkeventd_view({
'title': _('Event History of Host'),
'description': _('History entries of one specific host'),
'datasource': 'mkeventd_history',
'layout': 'table',
'columns': 1,
'hidden': True,
'browser_reload': 0,
'hide_filters': [
'event_host',
],
'show_filters': [
'event_id',
'event_rule_id',
'event_text',
'event_application',
'event_contact',
'event_comment',
'event_count',
'event_phase',
'event_state',
'event_first',
'event_last',
'event_priority',
'event_facility',
'event_sl',
'event_sl_max',
'history_time',
'history_who',
'history_what',
],
'painters': [
('history_time', None, ''),
('event_id', 'ec_history_of_event', ''),
('history_line', 'ec_historyentry', ''),
('history_what', None, ''),
('history_what_explained', None, ''),
('history_who', None, ''),
('event_state', None, ''),
('event_host', None, ''),
('event_application', None, ''),
('event_text', None, ''),
('event_sl', None, ''),
('event_priority', None, ''),
('event_facility', None, ''),
('event_phase', None, ''),
('event_count', None, ''),
],
'sorters': [
('history_time', True),
('history_line', True),
],
})
multisite_builtin_views['ec_event_mobile'] = \
{'browser_reload': 0,
'column_headers': 'pergroup',
'context': {},
'datasource': 'mkeventd_events',
'description': u'Details about one event\n',
'group_painters': [],
'hidden': True,
'hidebutton': False,
'icon': 'mkeventd',
'layout': 'mobiledataset',
'linktitle': u'Event Details',
'mobile': True,
'name': 'ec_event_mobile',
'num_columns': 1,
'painters': [('event_state', None, None),
('event_host', None, None),
('host_address', 'hoststatus', None),
('host_contacts', None, None),
('host_icons', None, None),
('event_text', None, None),
('event_comment', None, None),
('event_owner', None, None),
('event_first', None, None),
('event_last', None, None),
('event_id', None, None),
('event_icons', None, None),
('event_count', None, None),
('event_sl', None, None),
('event_contact', None, None),
('event_contact_groups', None, None),
('event_application', None, None),
('event_pid', None, None),
('event_priority', None, None),
('event_facility', None, None),
('event_rule_id', None, None),
('event_phase', None, None),
('host_services', None, None)],
'public': True,
'single_infos': ['event'],
'sorters': [],
'title': u'Event Details',
'topic': u'Event Console',
'user_sortable': True}
multisite_builtin_views['ec_events_mobile'] = \
{'browser_reload': 60,
'column_headers': 'pergroup',
'context': {'event_application': {'event_application': ''},
'event_comment': {'event_comment': ''},
'event_contact': {'event_contact': ''},
'event_count': {'event_count_from': '',
'event_count_to': ''},
'event_facility': {'event_facility': ''},
'event_first': {'event_first_from': '',
'event_first_from_range': '3600',
'event_first_until': '',
'event_first_until_range': '3600'},
'event_host_regex': {'event_host_regex': ''},
'event_id': {'event_id': ''},
'event_last': {'event_last_from': '',
'event_last_from_range': '3600',
'event_last_until': '',
'event_last_until_range': '3600'},
'event_phase': {'event_phase_ack': 'on',
'event_phase_closed': 'on',
'event_phase_counting': '',
'event_phase_delayed': '',
'event_phase_open': 'on'},
'event_priority': {'event_priority_0': 'on',
'event_priority_1': 'on',
'event_priority_2': 'on',
'event_priority_3': 'on',
'event_priority_4': 'on',
'event_priority_5': 'on',
'event_priority_6': 'on',
'event_priority_7': 'on'},
'event_rule_id': {'event_rule_id': ''},
'event_sl': {'event_sl': ''},
'event_sl_max': {'event_sl_max': ''},
'event_state': {'event_state_0': 'on',
'event_state_1': 'on',
'event_state_2': 'on',
'event_state_3': 'on'},
'event_text': {'event_text': ''},
'hostregex': {'host_regex': ''}},
'datasource': 'mkeventd_events',
'description': u'Table of all currently open events (handled and unhandled)\n',
'group_painters': [],
'hidden': False,
'hidebutton': False,
'icon': 'mkeventd',
'layout': 'mobilelist',
'linktitle': u'Events',
'mobile': True,
'name': 'ec_events_mobile',
'num_columns': 1,
'owner': 'omdadmin',
'painters': [('event_id', 'ec_event_mobile', None),
('event_state', None, None),
('event_host', 'ec_events_of_host', None),
('event_application', None, None),
('event_text', None, None),
('event_last', None, None)],
'public': True,
'single_infos': [],
'sorters': [],
'title': u'Events',
'topic': u'Event Console',
'user_sortable': True}
|
iceman1989/Check_mk
|
mkeventd/web/plugins/views/mkeventd.py
|
Python
|
gpl-2.0
| 48,814
|
import serial
import sqlite3
import time
import datetime
MEASUREMENT_COMMAND = b'm'
SERIAL_DEVICE = '/dev/ttyUSB0'
DB_FILE = 'measurements.db'
DB_SCHEMA = 'src/schema.sql'
DB_INSERT = 'src/insert.sql'
def read_measurements():
conn = serial.Serial(SERIAL_DEVICE, baudrate=9600, timeout=1)
if not conn.isOpen():
raise IOError("Could not open serial connection for interface: %s" % SERIAL_DEVICE)
time.sleep(2.5) # Make sure the connection is ready for use
conn.write(MEASUREMENT_COMMAND)
response = conn.read(7)
conn.close()
return response.decode().split(' ')
def create_db(db=None):
with open(DB_SCHEMA, 'r') as schema_file:
schema = schema_file.read()
if db is None:
db = sqlite3.connect(DB_FILE)
cur = db.cursor()
cur.executescript(schema)
db.commit()
db.close()
def add_measurement():
res = read_measurements()
date = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
_save_measurement('Temperature', res[0], date)
_save_measurement('Humidity', res[1], date)
def _save_measurement(name, value, date, db=None):
with open(DB_INSERT, 'r') as insert_file:
insert = insert_file.read()
if db is None:
db = sqlite3.connect(DB_FILE)
cur = db.cursor()
cur.execute(insert, (name, value, date))
db.commit()
db.close()
|
RoopeSavolainen/HomeAutomation
|
Raspi/src/reader.py
|
Python
|
mit
| 1,390
|
#!/usr/bin/env python
import os, sys
import cosmology
import cosmopy
import numpy
import math
import pylab
import matplotlib
#from matplotlib import rc
Polygon = matplotlib.patches.Polygon
from matplotlib import rc
#rc('text', usetex=True)
#rc('font',**{'family':'sans-serif','sans-serif':['VeraSe']})
rc('axes', labelsize=20)
rc('axes', titlesize=20)
rc('xtick', labelsize=18)
rc('ytick', labelsize=18)
def main():
LBCG = 4.0
z = numpy.arange(0.01, 1.5, 0.01)
#mstar = mi_star(z)
mstar = mi_star_evol(z)
mstar_sub = mstar - 2.5 * math.log10(0.4)
BCG = mstar - 2.5 * math.log10(LBCG)
# 80% recovery - iband
mlim_SOAR = 24.30
mlim_NTT = 23.50
x = numpy.array([0, 1.5])
y = numpy.array([mlim_SOAR, mlim_SOAR])
ax = pylab.figure(1, figsize=(8, 6))
pylab.plot(z, mstar, 'k-', linewidth=3)
pylab.plot(z, mstar_sub, 'k--', linewidth=1)
pylab.plot(z, BCG, 'k-', linewidth=1)
ax = pylab.axes()
pylab.xlabel('Redshift')
pylab.ylabel('i magnitude')
# For the evolution of colors
BCSPIPE = os.getenv('BCSPIPE')
BCSPIPE = '/home/boada/Projects/planckClusters/MOSAICpipe/'
evolfile = "1_0gyr_hr_m62_salp.color"
evolfile = os.path.join(BCSPIPE, "LIB/evol", evolfile)
k, ev, colorZ = KEfit(evolfile)
SED = 'El_Benitez2003'
#SED = 'El_cww'
# Add the Blakeless points now
z_850 = numpy.array([21.1, 21.2, 21.4])
i_pts = z_850 + cosmology.color_z(SED, 'i_SDSS', 'z_WFC', 1.24)
#i_pts = z_850 + colorZ['iz'](1.24)
z_pts = i_pts * 0.0 + 1.24
pylab.plot(z_pts, i_pts, 'o', mfc='0.7',
mec='k', ms=6) #mec='k',mfc='0.7',ms=5)
# Add the Mullis points now
z_VLT = numpy.array([20.3, 21.0, 21.2])
i_pts = z_VLT + cosmology.color_z(SED, 'i_SDSS', 'z_SDSS', 1.40)
#i_pts = z_VLT + colorZ['iz'](1.40)
z_pts = i_pts * 0.0 + 1.40
pylab.plot(z_pts, i_pts, 's', mec='k', mfc='0.9', ms=5)
# Add the Standford clusters
z_850 = numpy.array([22.80, 22.84, 22.78])
i_pts = z_850 + cosmology.color_z(SED, 'i_SDSS', 'z_WFC', 1.46)
#i_pts = z_850 + colorZ['iz'](1.46)
z_pts = i_pts * 0.0 + 1.46
pylab.plot(z_pts, i_pts, 'x', mec='k', mfc='0.9', ms=5)
pylab.legend(('$L^*$ galaxy', '$0.4L^*$ galaxy', '$%dL^*$ (BCG)' % LBCG),
'lower right',
fancybox=True,
shadow=True)
pylab.text(1.24, 21.7, "CL-1252.9-2927", size=8, ha='center')
pylab.text(1.36, 20.7, "XMMU-2235.3-2557", size=8, ha='center')
pylab.text(1.36, 23.95, "XMM-2215.9-1738", size=8, ha='center')
# Make detection region
x = numpy.array([0, 1.5])
y = numpy.array([mlim_NTT, mlim_NTT])
pylab.plot(x, y, 'k:')
X1 = 0
X2 = 1.5
Y1 = mlim_NTT
Y2 = mlim_NTT + 2.0
verts = [(X1, Y1), (X1, Y2), (X2, Y2), (X2, Y1)]
poly = Polygon(verts, facecolor='0.95', edgecolor='0.85')
ax.add_patch(poly)
pylab.text(0.1, mlim_NTT + 0.1, "NTT/EFOSC")
x = numpy.array([0, 1.5])
y = numpy.array([mlim_SOAR, mlim_SOAR])
pylab.plot(x, y, 'k:')
Y1 = mlim_SOAR
Y2 = mlim_SOAR + 2.0
verts = [(X1, Y1), (X1, Y2), (X2, Y2), (X2, Y1)]
poly = Polygon(verts, facecolor='0.70', edgecolor='0.85')
ax.add_patch(poly)
pylab.text(0.1, mlim_SOAR + 0.1, "SOAR/SOI")
pylab.xlim(0.05, 1.5)
pylab.ylim(16.5, 26)
pylab.savefig('mi_z.pdf')
pylab.savefig('mi_z.eps')
pylab.show()
return
# observed mi_star as a function of redshift
def mr_star(z, h=0.7, cosmo=(0.3, 0.7, 0.7)):
dlum = cosmology.dl(z, cosmology=cosmo)
# Red galaxies fit from Brown et al
#Mb_star = -19.43 - 1.01*z
#Mr_star = cosmology.reobs('El_Benitez2003',m=Mb_star, oldfilter="B_Johnson", newfilter="r_SDSS")
# Alternative -- Paolillo et al. (2001) LF for clusters
#Mr_star = -21.53 + self.evf['r'](z)[0]
#Mi_star = cosmology.reobs('El_Benitez2003',m=Mr_star, oldfilter="R_Cousins", newfilter="i_MOSAICII")
# Blanton M*
Mi_star = -21.22 - 5 * math.log10(h) #+ self.evf['i'](z)[0]
Mr_star = cosmology.reobs('El_Benitez2003',
m=Mi_star,
oldfilter="i_SDSS",
newfilter="r_SDSS")
return Mr_star + 5.0 * numpy.log10(dlum) + 25
# observed mi_star as a function of redshift
def mi_star(z, h=0.7, cosmo=(0.3, 0.7, 0.7), SED='El_Benitez2003'):
dlum = cosmology.dl(z, cosmology=cosmo)
# Red galaxies fit from Brown et al
#Mb_star = -19.43 - 1.01*z
#Mr_star = cosmology.reobs('El_Benitez2003',m=Mb_star, oldfilter="B_Johnson", newfilter="r_SDSS")
# Alternative -- Paolillo et al. (2001) LF for clusters
#Mr_star = -21.53 + self.evf['r'](z)[0]
#Mi_star = cosmology.reobs('El_Benitez2003',m=Mr_star, oldfilter="R_Cousins", newfilter="i_MOSAICII")
# Blanton M*
#Mi_star = -21.22 - 5*math.log10(h) #+ self.evf['i'](z)[0]
Mi_star = -21.22 - 5 * math.log10(h) # cosmology.kcor(z,SED,'i_SDSS')
#Mr_star = cosmology.reobs('El_Benitez2003',m=Mi_star, oldfilter="i_SDSS", newfilter="r_SDSS")
return Mi_star + 5.0 * numpy.log10(dlum) + 25
def mi_star_evol(z, h=0.7, cosmo=(0.3, 0.7, 0.7)):
# Blanton's number i.e. M* - 1.5 mags
BCSPIPE = os.getenv('BCSPIPE')
BCSPIPE = '/home/boada/Projects/planckClusters/MOSAICpipe/'
evolfile = "1_0gyr_hr_m62_salp.color"
evolfile = os.path.join(BCSPIPE, "LIB/evol", evolfile)
k, ev, c = KEfit(evolfile)
dlum = cosmology.dl(z, cosmology=cosmo)
# Blanton M*
Mi_star = -21.22 - 5 * math.log10(h) #+ self.evf['i'](z)[0]
dlum = cosmology.dl(z, cosmology=cosmo)
DM = 25.0 + 5.0 * numpy.log10(dlum)
mx = Mi_star + DM + k['i'](z) + ev['i'](z) - ev['i'](0.1)
return mx
##################################################################
# Read both kcorrection k(z) and evolution ev(z) from BC03 model
##################################################################
def KEfit(modelfile):
import scipy
import scipy.interpolate
import tableio
print("# Getting K(z) and Ev(z) corrections from file: %s\n" % modelfile)
e = {}
k = {}
c = {}
(z, c_gr, c_ri, c_iz, k_g, k_r, k_i, k_z, e_g, e_r, e_i,
e_z) = tableio.get_data(modelfile,
cols=(0, 3, 4, 5, 10, 11, 12, 13, 14, 15, 16, 17))
# K-only correction at each age SED,
k['g'] = scipy.interpolate.interp1d(z, k_g)
k['r'] = scipy.interpolate.interp1d(z, k_r)
k['i'] = scipy.interpolate.interp1d(z, k_i)
k['z'] = scipy.interpolate.interp1d(z, k_z)
# Evolution term alone
e['g'] = scipy.interpolate.interp1d(z, e_g)
e['r'] = scipy.interpolate.interp1d(z, e_r)
e['i'] = scipy.interpolate.interp1d(z, e_i)
e['z'] = scipy.interpolate.interp1d(z, e_z)
# Color redshift
c['gr'] = scipy.interpolate.interp1d(z, c_gr)
c['ri'] = scipy.interpolate.interp1d(z, c_ri)
c['iz'] = scipy.interpolate.interp1d(z, c_iz)
return k, e, c
main()
|
boada/planckClusters
|
snippets/magnitude-redshift-i.py
|
Python
|
mit
| 7,008
|
#!/usr/bin/env python
from demobrowser import app, db
#db.drop_all()
db.create_all()
app.debug = app.config.get('DEBUG', False)
# This is required.
app.secret_key = app.config.get('SECRET_KEY', None)
if app.secret_key is None:
print "ERROR: SECRET_KEY not found in settings.cfg. Please see README.md for help!"
else:
app.run(host=app.config.get('ADDRESS', '0.0.0.0'))
|
FernFerret/demobrowser
|
run.py
|
Python
|
mit
| 379
|
# coding: utf-8
_auth_views_code = '''# coding: utf-8
from . import auth
from flask import render_template, url_for, redirect, flash
from flask_login import login_user, logout_user, current_user, login_required
from app.models import User
from .forms import LoginForm
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username = form.username.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user)
return redirect('/admin')
return render_template('auth/login.html', form=form)
@login_required
@auth.route('/logout')
def logout():
logout_user()
return redirect(url_for('auth.login'))
'''
|
neo1218/mana
|
mana/templates/auth/_auth_views.py
|
Python
|
mit
| 783
|
class Finger(object):
Thumb = 1
Index = 2
Middle = 4
Ring = 8
Pinky = 16
|
dany74q/python-microsoft-project-prague-sdk
|
microsoft/gestures/finger.py
|
Python
|
mit
| 93
|
from tree import Tree
from copy import deepcopy
import codecs
import sys
# http://www.nltk.org/_modules/nltk/treetransforms.html#chomsky_normal_form
HORZMARKOV = 0
VERTMARKOV = 0
LANGUAGE = "eng"
def read_from_ptb(filename):
ptb_file = open(filename, 'r')
for l in ptb_file:
tree = Tree.fromstring(l, remove_empty_top_bracketing=False)
original = deepcopy(tree)
chomsky_normal_form(tree, horzMarkov=HORZMARKOV, vertMarkov=VERTMARKOV, childChar='|', parentChar='#')
ptb_file.close()
# Check the head rules to match in each case
# (Now a re-implementation of (Collins, 1999) thesis)
# Lingpeng Kong, lingpenk@cs.cmu.edu
PUNCTSET = set([".", ",", ":", "``", "''"])
def remove_labelChar(n, labelChar = '^'):
r_labelChar = labelChar
if isinstance(n, str) or isinstance(n, unicode):
if isinstance(n, unicode):
r_labelChar = labelChar.encode('utf-8')
return n[:(n.rfind(labelChar) if n.rfind(labelChar) > 0 else len(n))]
else:
return [remove_labelChar(x) for x in n]
def findHead(parent,child_list, labelChar='^'):
if LANGUAGE == "eng":
return findHeadEnglish(parent,child_list, labelChar)
else:
r_parent = str(parent)
r_child_list = [str(c) for c in child_list]
x_parent = remove_labelChar(r_parent, labelChar)
x_child_list = remove_labelChar(r_child_list, labelChar)
return findHeadChinses(x_parent,x_child_list, labelChar)
def findHeadChinses(parent,child_list, labelChar='^'):
if len(child_list) == 1:
#Unary Rule -> the head must be the only one
return 0
# Chinese HeadRule : http://stp.lingfil.uu.se/~nivre/research/chn_headrules.txt
# @inproceedings{ding2005machine,
# title={Machine translation using probabilistic synchronous dependency insertion grammars},
# author={Ding, Yuan and Palmer, Martha},
# booktitle={Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics},
# pages={541--548},
# year={2005},
# organization={Association for Computational Linguistics}
# }
# ADJP r ADJP JJ;r AD NN CS;r
if parent == "ADJP":
s = set(["ADJP", "JJ"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
s = set(["AD", "NN", "CS"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# ADVP r ADVP AD;r
if parent == "ADVP":
s = set(["ADVP", "AD"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# CLP r CLP M;r
if parent == "CLP":
s = set(["CLP", "M"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# CP r DEC SP;l ADVP CS;r CP IP;r
if parent == "CP":
s = set(["DEC", "SP"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
s = set(["ADVP", "CS"])
for i in xrange(len(child_list)):
if child_list[i] in s:
return i
s = set(["CP", "IP"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# DNP r DNP DEG;r DEC;r
if parent == "DNP":
s = set(["DNP", "DEG"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
s = set(["DEC"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# DP l DP DT;l
if parent == "DP":
s = set(["DP", "DT"])
for i in xrange(len(child_list)):
if child_list[i] in s:
return i
return firstNoPunctChild(child_list)
# DVP r DVP DEV;r
if parent == "DVP":
s = set(["DVP", "DEV"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# FRAG r VV NR NN;r
if parent == "FRAG":
s = set(["VV", "NR", "NN"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# INTJ r INTJ IJ;r
if parent == "INTJ":
s = set(["INTJ", "IJ"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# IP r IP VP;r VV;r
if parent == "IP":
s = set(["IP", "VP"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
s = set(["VV"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# LCP r LCP LC;r
if parent == "LCP":
s = set(["LCP", "LC"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# LST l LST CD OD;l
if parent == "LST":
s = set(["LST", "CD", "OD"])
for i in xrange(len(child_list)):
if child_list[i] in s:
return i
return firstNoPunctChild(child_list)
# NP r NP NN NT NR QP;r
if parent == "NP":
s = set(["NP", "NN", "NT", "NR", "QP"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# PP l PP P;l
if parent == "PP":
s = set(["PP", "P"])
for i in xrange(len(child_list)):
if child_list[i] in s:
return i
return firstNoPunctChild(child_list)
# PRN r NP IP VP NT NR NN;r
if parent == "PRN":
s = set(["NP", "IP", "VP", "NT", "NR", "NN"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# QP r QP CLP CD OD;r
if parent == "QP":
s = set(["QP", "CLP", "CD", "OD"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# UCP r
if parent == "UCP":
return lastNoPunctChild(child_list)
# VCD r VCD VV VA VC VE;r
if parent == "VCD":
s = set(["VCD", "VV", "VA", "VC", "VE"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# VCP r VCP VV VA VC VE;r
if parent == "VCP":
s = set(["VCP", "VV", "VA", "VC", "VE"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# VNV r VNV VV VA VC VE;r
if parent == "VNV":
s = set(["VNV", "VV", "VA", "VC", "VE"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# VP l VP VA VC VE VV BA LB VCD VSB VRD VNV VCP;l
if parent == "VP":
s = set(["VP", "VA", "VC", "VE", "VV", "BA", "LB", "VCD", "VSB", "VRD", "VNV", "VCP"])
for i in xrange(len(child_list)):
if child_list[i] in s:
return i
return firstNoPunctChild(child_list)
# VPT r VNV VV VA VC VE;r
if parent == "VPT":
s = set(["VNV", "VV", "VA", "VC", "VE"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# VRD r VRD VV VA VC VE;r
if parent == "VRD":
s = set(["VRD", "VV", "VA", "VC", "VE"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# VSB r VSB VV VA VC VE;r
if parent == "VSB":
s = set(["VSB", "VV", "VA", "VC", "VE"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# WHNP r WHNP NP NN NT NR QP;r
if parent == "WHNP":
s = set(["WHNP", "NP", "NN", "NT", "NR", "QP"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
return lastNoPunctChild(child_list)
# WHPP l WHPP PP P;l
if parent == "WHPP":
s = set(["WHPP", "PP", "P"])
for i in xrange(len(child_list)):
if child_list[i] in s:
return i
return firstNoPunctChild(child_list)
return firstNoPunctChild(child_list)
# Parent is a string, child_list is a list of string
def findHeadEnglish(parent,child_list, labelChar='^'):
r_parent = remove_labelChar(parent, labelChar)
r_child_list = remove_labelChar(child_list, labelChar)
#print r_child_list
if len(r_child_list) == 1:
#Unary Rule -> the head must be the only one
return 0
normalHead = findHeaderNormal(r_parent, r_child_list)
return findHeaderCoord(r_parent, r_child_list, normalHead);
def firstNoPunctChild(child_list):
for i in xrange(len(child_list)):
if child_list[i] not in PUNCTSET:
return i
return 0
def lastNoPunctChild(child_list):
for i in reversed(xrange(len(child_list))):
if child_list[i] not in PUNCTSET:
return i
return -1
def test():
print findHead("NP",["DT", "NNP", "CC", "NNP"])
print findHead("VP",["ADVP", "VBN", "PP", "NP"])
print findHead("NP",["NP", "NP",",","NP",",","NP","CC","NP"])
def findHeaderNormal(parent, child_list):
# Rules for NPs
if parent == "NP":
# If the last word is tagged POS return (last word)
if child_list[lastNoPunctChild(child_list)] == "POS":
return lastNoPunctChild(child_list)
# Else search from right to left for the first child which is an
# NN, NNP, NNPS, NNS, NX, POS or JJR
s = set(["NN", "NNP", "NNPS", "NNS", "NX", "POS", "JJR"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
# Else search from left to right for the first child which is an NP
for i in xrange(len(child_list)):
if child_list[i] == "NP":
return i
# Else search from right to left for the first child which is a $,
# ADJP or PRN.
s = set(["$", "ADJP", "PRN"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
# Else search from right to left for the first child which is a CD.
for i in reversed(xrange(len(child_list))):
if child_list[i] == ("CD"):
return i
# Else search from right to left for the first child which is a JJ,
# JJS, RB or QP.
s = set(["JJ", "JJS", "RB", "QP"])
for i in reversed(xrange(len(child_list))):
if child_list[i] in s:
return i
# Else return the last word.
return lastNoPunctChild(child_list)
# ADJP -- Left -- NNS QP NN $ ADVP JJ VBN VBG ADJP JJR NP JJS DT FW RBR
# RBS SBAR RB
if parent == "ADJP":
plist = ["NNS", "QP", "NN", "$", "ADVP", "JJ", "VBN", "VBG", "ADJP", "JJR", "NP", "JJS", "DT", "FW", "RBR", "RBS", "SBAR", "RB"]
for pe in plist:
for i in xrange(len(child_list)):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# ADVP -- Right -- RB RBR RBS FW ADVP TO CD JJR JJ IN NP JJS NN
if parent == "ADVP":
plist = ["RB", "RBR", "RBS", "FW", "ADVP", "TO", "CD", "JJR", "JJ", "IN", "NP", "JJS", "NN"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return rightmost
return lastNoPunctChild(child_list)
# CONJP -- Right -- CC RB IN
if parent == "CONJP":
plist = ["CC", "RB", "IN"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return rightmost
return lastNoPunctChild(child_list)
# FRAG -- Right
if parent == "FRAG":
return lastNoPunctChild(child_list)
# INTJ -- Left
if parent == "INTJ":
return firstNoPunctChild(child_list)
# LST -- Right -- LS :
if parent == "LST":
plist = ["LS", ":"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return rightmost
return lastNoPunctChild(child_list)
# NAC -- Left -- NN NNS NNP NNPS NP NAC EX $ CD QP PRP VBG JJ JJS JJR
# ADJP FW
if parent == "NAC":
plist = ["NN", "NNS", "NNP", "NNPS", "NP", "NAC", "EX", "$", "CD", "QP", "PRP", "VBG", "JJ", "JJS", "JJR", "ADJP", "FW"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# PP -- Right -- IN TO VBG VBN RP FW
if parent == "PP":
plist = ["IN", "TO", "VBG", "VBN", "RP", "FW"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return rightmost
return lastNoPunctChild(child_list);
# PRN -- Left
if parent == "PRN":
return firstNoPunctChild(child_list)
# PRT -- Right -- RP
if parent == "PRT":
plist = ["RP"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return rightmost
return lastNoPunctChild(child_list);
# QP -- Left -- $ IN NNS NN JJ RB DT CD NCD QP JJR JJS
if parent == "QP":
plist = ["$", "IN", "NNS", "NN", "JJ", "RB", "DT", "CD", "NCD", "QP", "JJR", "JJS"]
for pe in plist:
for i in xrange(len(child_list)):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# RRC -- Right -- VP NP ADVP ADJP PP
if parent == "RRC":
plist = ["VP","NP","ADVP","ADJP","PP"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return rightmost
return lastNoPunctChild(child_list)
# S -- Left -- TO IN VP S SBAR ADJP UCP NP
if parent == "S":
plist = ["TO", "IN", "VP", "S", "SBAR", "ADJP", "UCP", "NP"]
for pe in plist:
for i in xrange(len(child_list)):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# SBAR -- Left -- WHNP WHPP WHADVP WHADJP IN DT S SQ SINV SBAR FRAG
if parent == "SBAR":
plist = ["WHNP", "WHPP", "WHADVP", "WHADJP", "IN", "DT", "S", "SQ", "SINV", "SBAR", "FRAG"]
for pe in plist:
for i in xrange(len(child_list)):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# SBARQ -- Left -- SQ S SINV SBARQ FRAG
if parent == "SBARQ":
plist = ["SQ", "S", "SINV", "SBARQ", "FRAG"]
for pe in plist:
for i in xrange(len(child_list)):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# SINV -- Left -- VBZ VBD VBP VB MD VP S SINV ADJP NP
if parent == "SINV":
plist = ["VBZ", "VBD", "VBP", "VB", "MD", "VP", "S", "SINV", "ADJP", "NP"]
for pe in plist:
for i in xrange(len(child_list)):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# SQ -- Left -- VBZ VBD VBP VB MD VP SQ
if parent == "SQ":
plist = ["VBZ", "VBD", "VBP", "VB", "MD", "VP", "SQ"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# UCP -- Right
if parent == "UCP":
return lastNoPunctChild(child_list)
# VP -- Left -- TO VBD VBN MD VBZ VB VBG VBP VP ADJP NN NNS NP
if parent == "VP":
plist = ["TO", "VBD", "VBN", "MD", "VBZ", "VB", "VBG", "VBP", "VP", "ADJP", "NN", "NNS", "NP"]
for pe in plist:
for i in xrange(len(child_list)):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# WHADJP -- Left -- CC WRB JJ ADJP
if parent == "WHADJP":
plist = ["CC", "WRB", "JJ", "ADJP"]
for pe in plist:
for i in xrange(len(child_list)):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# WHADVP -- Right -- CC WRB
if parent == "WHADVP":
plist = ["CC", "WRB"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return rightmost
return lastNoPunctChild(child_list);
# WHNP -- Left -- WDT WP WP$ WHADJP WHPP WHNP
if parent == "WHNP":
plist = ["WDT", "WP", "WP$", "WHADJP", "WHPP", "WHNP"]
for pe in plist:
for i in xrange(len(child_list)):
if child_list[i] == pe:
return i
# Nothing found, return leftmost
return firstNoPunctChild(child_list)
# WHPP -- Right -- IN TO FW
if parent == "WHPP":
plist = ["IN", "TO", "FW"]
for pe in plist:
for i in reversed(xrange(len(child_list))):
if child_list[i] == pe:
return i
# Nothing found, return rightmost
return lastNoPunctChild(child_list)
# No rule found, return leftmost
return firstNoPunctChild(child_list)
def findHeaderCoord(parent, child_list, normalHead):
# Rules for Coordinated Phrases
h = normalHead
if h < len(child_list) - 2:
if child_list[h+1] == "CC":
# Y_h Y_h+1 Y_h+2 forms a triple of non-terminals in a coordinating relationship,
# But since Y_h already the head, nothing happened in unlabeled parsing.
return normalHead
if h > 1:
if child_list[h-1] == "CC":
# Y_h-2 Y_h-1 Y_h forms a triple of non-terminals in a coordinating relationship,
# the head is modified to be Y_h-2 in this case
# Make sure punctuation not selected as the head
if child_list[h-2] not in PUNCTSET:
return h-2
return normalHead
def lexLabel(tree, labelChar="^"):
# a little hacky way to label the leaves using the index
preterminals = [t for t in tree.subtrees(lambda t: t.height() == 2)]
for i in xrange(len(preterminals)):
preterminals[i][0] = preterminals[i][0] + labelChar + str(i+1)
for pos in tree.treepositions(order='postorder'):
st = tree[pos]
if isinstance(st, str) or isinstance(st, unicode):
continue
else:
# print "*" + str(st)
if len(st) == 1:
st.set_label(st.label() + labelChar + findIndex(st[0]))
else:
child_list_str = [n.label() if isinstance(n, Tree) else n for n in st]
child_list = [n for n in st]
index = findIndex(child_list[findHead(st.label(), child_list_str)])
st.set_label(st.label() + labelChar + findIndex(index))
# e.g. for "word^3" return '3'
def findIndex(s, labelChar='^'):
r_labelChar = labelChar
if isinstance(s, str) or isinstance(s, unicode):
if isinstance(s, unicode):
r_labelChar = r_labelChar.encode('utf-8')
return s[s.rfind(r_labelChar)+1:]
else:
m = s.label()
return m[m.rfind(labelChar)+1:]
def getParentDic(lexTree, labelChar='^'):
lt = deepcopy(lexTree)
# build a parent set
dep_set = {}
label_set = {}
# add root to the tree
dep_set[findIndex(lt.label())] = '0'
# label for the root is just ROOT
label_set[findIndex(lt.label())] = 'ROOT'
for pos in lt.treepositions(order='preorder'):
nt = lt[pos]
if isinstance(nt, Tree) and nt.height() != 2 and len(nt) > 1:
ind_p = findIndex(nt.label())
phrase_label = remove_labelChar(nt.label())
head_label = '*'
for c in nt:
ind_c = findIndex(c.label() if isinstance(c, Tree) else c)
if ind_c == ind_p and isinstance(c, Tree) and c.height()!=2:
head_label = remove_labelChar(c.label())
for c in nt:
ind_c = findIndex(c.label() if isinstance(c, Tree) else c)
if not ind_c == ind_p:
dependent_label = '*'
if isinstance(c, Tree) and c.height()!=2:
dependent_label = remove_labelChar(c.label())
dep_set[ind_c] = ind_p
label_set[ind_c] = phrase_label + "+" + dependent_label
return dep_set, label_set
def generateDep(ot, labelChar='^'):
lt = deepcopy(ot)
lexLabel(lt, labelChar)
dep_set, dep_label_set = getParentDic(lt, labelChar)
conll_lines = []
preterminals = [t for t in lt.subtrees(lambda t: t.height() == 2)]
for i in xrange(len(preterminals)):
word = remove_labelChar(preterminals[i][0], labelChar)
pos = remove_labelChar(preterminals[i].label(), labelChar)
index = findIndex(preterminals[i][0], labelChar)
parent = dep_set[index]
label = dep_label_set[index]
conll_lines.append([index, word, pos, parent, label])
return conll_lines
def print_conll_lines(clines, wt):
for l in clines:
wt.write(l[0] + "\t" + l[1] + "\t_\t" + l[2] + "\t" + l[2] + "\t_\t" + l[3] + "\t"+ l[4] +"\t_\t_\n")
def chomsky_normal_form(tree, horzMarkov=None, vertMarkov=0, childChar="|", parentChar="#"):
# assume all subtrees have homogeneous children
# assume all terminals have no siblings
# A semi-hack to have elegant looking code below. As a result,
# any subtree with a branching factor greater than 999 will be incorrectly truncated.
if horzMarkov is None: horzMarkov = 999
# Traverse the tree depth-first keeping a list of ancestor nodes to the root.
# I chose not to use the tree.treepositions() method since it requires
# two traversals of the tree (one to get the positions, one to iterate
# over them) and node access time is proportional to the height of the node.
# This method is 7x faster which helps when parsing 40,000 sentences.
nodeList = [(tree, [tree.label()])]
# print nodeList
while nodeList != []:
node, parent = nodeList.pop()
if isinstance(node,Tree):
# for the node and the parent, we want to know which is the index of the head in Collins head rule
# which basically is the break point of the binarization
child_list = [n.label() for n in node if isinstance(n, Tree)]
if len(child_list) == 0:
continue
# print parent
# print str(node.label()) + "\t-->\t" + "\t".join(child_list)
# The head postion is determined by the collins rule
head_postion = findHead(node.label(),child_list)
head = child_list[head_postion]
#print child_list[head_postion]
# parent annotation
parentString = ""
originalNode = node.label()
if vertMarkov != 0 and node != tree and isinstance(node[0],Tree):
parentString = "%s<%s>" % (parentChar, "-".join(parent))
node.set_label(node.label() + parentString)
# Attach the higher level parent to the parent list and then pass the into the agenda later
# The originalNode here is the direct parent of the child node
parent = [originalNode] + parent[:vertMarkov - 1]
# add children to the agenda before we mess with them
for child in node:
nodeList.append((child, parent))
# chomsky normal form factorization
if len(node) > 2:
childNodes = [child.label() for child in node]
nodeCopy = node.copy()
node[0:] = [] # delete the children
curNode = node
numChildren = len(nodeCopy)
for i in range(1,numChildren - 1):
next_step_right = ((i+1) < head_postion)
if i < head_postion:
#every time we going to the right, the left context in are consumed by one
del childNodes[0]
newHead = "%s%s" % (originalNode, childChar)
newNode = Tree(newHead, [])
curNode[0:] = [nodeCopy.pop(0), newNode]
else:
del childNodes[-1]
newHead = "%s%s" % (originalNode, childChar)
newNode = Tree(newHead, [])
curNode[0:] = [newNode, nodeCopy.pop()]
curNode = newNode
curNode[0:] = [child for child in nodeCopy]
def un_chomsky_normal_form(tree, expandUnary = True, childChar = "|", parentChar = "#", unaryChar = "+"):
# Traverse the tree-depth first keeping a pointer to the parent for modification purposes.
nodeList = [(tree,[])]
while nodeList != []:
node,parent = nodeList.pop()
if isinstance(node,Tree):
# if the node contains the 'childChar' character it means that
# it is an artificial node and can be removed, although we still need
# to move its children to its parent
childIndex = node.label().find(childChar)
if childIndex != -1:
nodeIndex = parent.index(node)
parent.remove(parent[nodeIndex])
# Generated node was on the left if the nodeIndex is 0 which
# means the grammar was left factored. We must insert the children
# at the beginning of the parent's children
# if nodeIndex == 0:
parent.insert(nodeIndex,node[0])
parent.insert(nodeIndex+1,node[1])
# else:
# parent.extend([node[0],node[1]])
# parent is now the current node so the children of parent will be added to the agenda
node = parent
else:
parentIndex = node.label().find(parentChar)
if parentIndex != -1:
# strip the node name of the parent annotation
node.set_label(node.label()[:parentIndex])
# expand collapsed unary productions
if expandUnary == True:
unaryIndex = node.label().find(unaryChar)
if unaryIndex != -1:
newNode = Tree(node.label()[unaryIndex + 1:], [i for i in node])
node.set_label(node.label()[:unaryIndex])
node[0:] = [newNode]
for child in node:
nodeList.append((child,node))
#print "#" + str(tree)
def collapse_unary(tree, collapsePOS = False, collapseRoot = False, joinChar = "+"):
"""
Collapse subtrees with a single child (ie. unary productions)
into a new non-terminal (Tree node) joined by 'joinChar'.
This is useful when working with algorithms that do not allow
unary productions, and completely removing the unary productions
would require loss of useful information. The Tree is modified
directly (since it is passed by reference) and no value is returned.
:param tree: The Tree to be collapsed
:type tree: Tree
:param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie.
Part-of-Speech tags) since they are always unary productions
:type collapsePOS: bool
:param collapseRoot: 'False' (default) will not modify the root production
if it is unary. For the Penn WSJ treebank corpus, this corresponds
to the TOP -> productions.
:type collapseRoot: bool
:param joinChar: A string used to connect collapsed node values (default = "+")
:type joinChar: str
"""
if collapseRoot == False and isinstance(tree, Tree) and len(tree) == 1:
nodeList = [tree[0]]
else:
nodeList = [tree]
# depth-first traversal of tree
while nodeList != []:
node = nodeList.pop()
if isinstance(node,Tree):
if len(node) == 1 and isinstance(node[0], Tree) and (collapsePOS == True or isinstance(node[0,0], Tree)):
node.set_label(node.label() + joinChar + node[0].label())
node[0:] = [child for child in node[0]]
# since we assigned the child's children to the current node,
# evaluate the current node again
nodeList.append(node)
else:
for child in node:
nodeList.append(child)
def flat_print(t):
# print the tree in one single line
return t._pprint_flat(nodesep='', parens='()', quotes=False)
# this actually assumes that every bin rule contains one dep, like we assumed in the paper.
def get_binarize_lex(otree, labelChar='^'):
work_tree = deepcopy(otree)
lexLabel(work_tree)
#print work_tree
parent_dic, dep_label_set = getParentDic(work_tree)
# for x in parent_dic:
# print x, parent_dic[x]
work_tree = deepcopy(otree)
chomsky_normal_form(work_tree, horzMarkov=HORZMARKOV, vertMarkov=VERTMARKOV)
preterminals = [t for t in work_tree.subtrees(lambda t: t.height() == 2)]
for i in xrange(len(preterminals)):
preterminals[i][0] = preterminals[i][0] + labelChar + str(i+1)
# print work_tree
# print flat_print(work_tree)
for pos in work_tree.treepositions(order='postorder'):
# print pos
t = work_tree[pos]
if isinstance(t, str) or isinstance(t, unicode):
continue
else:
if len(t) == 1:
t.set_label(t.label() + labelChar + findIndex(t[0]))
else:
x_ind = findIndex(t[0])
y_ind = findIndex(t[1])
#print parent_dic
if parent_dic[x_ind] == y_ind:
t.set_label(t.label() + labelChar + y_ind)
else:
t.set_label(t.label() + labelChar + x_ind)
return work_tree
# The function take a lex bin tree and return all the bin (and unary) rules as a set
# the rule is represented as {unary?} {X} {Y} {Z} {DIR}, we will add the {id} later
def generate_rules(lex_bin_tree):
rule_set = set([])
for st in lex_bin_tree.subtrees():
if isinstance(st, str) or isinstance(st, unicode):
# these are leaves, just skip
continue
if isinstance(st, Tree):
if st.height() == 2:
# preterminals also don't generate rules
continue
else:
rule = get_rule_for_nt(st)[0]
rule_set.add(rule)
return rule_set
def get_rule_for_nt(st):
if len(st) == 1:
# unary rule
X = remove_labelChar(st.label())
Y = remove_labelChar(st[0].label())
X_ind = findIndex(st.label())
rule = "1 " + X + " " + Y
h = str(int(X_ind) - 1)
m = str(int(X_ind) - 1)
else:
# not unary rule
X = remove_labelChar(st.label())
Y = remove_labelChar(st[0].label())
Z = remove_labelChar(st[1].label())
X_ind = findIndex(st.label())
Y_ind = findIndex(st[0].label())
Z_ind = findIndex(st[1].label())
d = ('0' if X_ind == Y_ind else '1')
rule = "0 " + X + " " + Y + " " + Z + " " + d
h = str(int(X_ind) - 1)
m = str(int(Z_ind) - 1) if X_ind == Y_ind else str(int(Y_ind) - 1)
return (rule, h, m)
def get_span_info(nt, rule_to_ind_dict):
# Given a non-terminal form a indexed tree (^index), generate it's span information
# Concretely, i, j, k, h, m and rule_num (lookup from the rule_to_ind_dict)
# Assume it is a tree here and is not a preterminal.
# First, go all the way down to the left to see the left boundary
pointer = nt
while not (isinstance(pointer,str) or isinstance(pointer, unicode)):
pointer = pointer[0]
span_left = str(int(findIndex(pointer)) - 1)
# All the way right to find the right boundary
pointer = nt
while not (isinstance(pointer,str) or isinstance(pointer, unicode)):
if len(pointer) > 1:
pointer = pointer[1]
else:
# unary
pointer = pointer[0]
span_right = str(int(findIndex(pointer)) - 1)
pointer = nt[0]
while not (isinstance(pointer,str) or isinstance(pointer, unicode)):
if len(pointer) > 1:
pointer = pointer[1]
else:
# unary
pointer = pointer[0]
span_split = str(int(findIndex(pointer)) - 1)
# we will find the h and m and the rule_num
rule, h, m = get_rule_for_nt(nt)
if rule in rule_to_ind_dict:
return (span_left, span_split, span_right, h, m, rule_to_ind_dict[rule])
else:
sys.stderr.write("None rule found!")
return None
|
ikekonglp/PAD
|
python/NewTree.py
|
Python
|
lgpl-3.0
| 35,875
|
import datet<caret>
|
dahlstrom-g/intellij-community
|
python/testData/completion/import.py
|
Python
|
apache-2.0
| 19
|
import hmac
import os
import warnings
KEY = os.environ.get('UNSUB_KEY')
if not KEY:
warnings.warn("Using insecure key for HMAC!")
KEY = 'thisisinsecure'
def generate(msg):
return hmac.new(KEY, msg).hexdigest()
def verify(sec, msg):
if isinstance(msg, unicode):
msg = msg.encode('utf-8')
if isinstance(sec, unicode):
sec = sec.encode('utf-8')
return hmac.compare_digest(generate(msg), sec)
if __name__ == '__main__':
assert verify(generate('12345'), '12345')
print 'tests passed'
|
thingless/torweather
|
verifier.py
|
Python
|
mit
| 531
|
import abc
import curses
import re
class EndGameException(Exception):
pass
class Player(object):
"""Recall game player.
A player takes a game (as a constructor argument). When
``play`` is called (no arguments) the game is played to
completion and the result of the game is returned.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, game):
self.game = game
def play(self):
try:
while True:
guess = self.choose_cards()
a, b = self.game.guess(guess)
if a != b:
self.notify_nonmatch(a, guess[0])
self.notify_nonmatch(b, guess[1])
else:
self.notify_match(a, guess)
except EndGameException as e:
outcome = self.game.end(e.args[0])
self.notify_outcome(*outcome)
@abc.abstractmethod
def notify_match(self, value, cards):
pass
@abc.abstractmethod
def notify_nonmatch(self, value, card):
pass
@abc.abstractmethod
def notify_outcome(self, win, msg):
pass
class AutoPlayer(Player):
def __init__(self, game):
super(AutoPlayer, self).__init__(game)
self.cards = {(x, y) for x in range(game.x) for y in range(game.y)}
self.memory = {}
self.matches = set()
def choose_cards(self):
if self.matches:
return self.matches.pop()
else:
return self.cards.pop(), self.cards.pop()
def notify_nonmatch(self, value, card):
print 'saw {} at {}'.format(value, card)
if value in self.memory:
self.matches.add((self.memory.pop(value), card))
else:
self.memory[value] = card
self.check_end_game()
def notify_match(self, value, cards):
print 'matched {} at {} and {}'.format(value, *cards)
self.check_end_game()
def check_end_game(self):
"""Return raise EndGameException iff in an end state."""
if not self.memory and (
(len(self.cards) == 2 and len(self.matches) == 0)
or (len(self.matches) == 1 and len(self.cards) == 0)
):
raise EndGameException(self.choose_cards())
def notify_outcome(self, win, msg):
print msg
class ManualPlayer(Player):
def __init__(self, game):
super(ManualPlayer, self).__init__(game)
self.cards = {(x, y) for x in range(game.x) for y in range(game.y)}
self.matched_cards = set()
def play(self):
curses.wrapper(self._play)
def _play(self, screen):
self.screen = screen
self.obscure_cards()
self.alert('Enter "end" to end the game')
self.screen.refresh()
super(ManualPlayer, self).play()
def obscure_cards(self):
for x, y in self.cards:
self.screen.addstr(y, x, '@')
for x, y in self.matched_cards:
self.screen.addstr(y, x, ' ')
def choose_cards(self):
return (
self.ask_card('Enter the first card: '),
self.ask_card('Enter the second card: ')
)
def ask_card(self, prompt):
self.screen.move(self.game.y + 1, 0)
self.screen.clrtoeol()
self.screen.addstr(self.game.y + 1, 0, prompt)
curses.echo()
s = self.screen.getstr().strip()
curses.noecho()
if s == 'end':
self.alert('Enter END GAME CARDS: ')
raise EndGameException(self.choose_cards())
try:
values = tuple(map(int, re.split('[,\s]+', s)))
except ValueError:
self.alert('Coordinates must be integer; try again.')
return self.ask_card(prompt)
if len(values) != 2:
self.alert('Card must have two parts; "X, Y".')
return self.ask_card(prompt)
if values not in self.cards:
self.alert('Card is not on the table; try again.')
return self.ask_card(prompt)
# ready to rumble
self.alert('')
self.obscure_cards()
return values
def alert(self, msg):
self.screen.move(self.game.y, 0)
self.screen.clrtoeol()
self.screen.addstr(self.game.y, 0, msg)
def notify_nonmatch(self, value, card):
self.screen.addstr(card[1], card[0], value[0])
def notify_match(self, value, cards):
self.alert('matched {} at {} and {}'.format(value, *cards))
for x, y in cards:
self.screen.addstr(y, x, ' ')
self.cards -= set(cards)
self.matched_cards |= set(cards)
if len(self.cards) == 2:
raise EndGameException(tuple(self.cards))
def notify_outcome(self, win, msg):
self.alert(msg)
self.screen.move(self.game.y + 1, 0)
self.screen.clrtoeol()
self.screen.addstr(self.game.y + 1, 0, "Press ENTER to exit")
self.screen.getstr()
|
frasertweedale/recall
|
recall/player.py
|
Python
|
mit
| 4,926
|
"""Tests the sites class."""
import requests_mock
from acapi.resources.environment import Environment
from acapi.resources.environmentlist import EnvironmentList
from acapi.resources.task import Task
from acapi.resources.tasklist import TaskList
from acapi.tests import BaseTest
@requests_mock.Mocker()
class TestSite(BaseTest):
"""Tests the Acquia Cloud API sites class."""
site = None
def setUp(self):
super(TestSite, self).setUp()
self.site = self.client.site("mysite")
def test_copy_code(self, mocker):
"""Tests copying code from one environment to another. """
source = "dev"
target = "staging"
url = (
"https://cloudapi.acquia.com/v1/"
"sites/prod:mysite/code-deploy/"
"{source}/{target}.json".format(source=source, target=target)
)
# Register the copy action.
mocker.register_uri(
"POST", url, json=self.generate_task_dictionary(1137, "waiting", False)
)
# Register the task.
mocker.register_uri(
"GET",
"https://cloudapi.acquia.com/v1/sites/prod:mysite/tasks/1137.json",
json=self.generate_task_dictionary(1137, "done", True),
)
self.site.copy_code(source, target)
def test_environment(self, mocker):
"""Tests environment() method."""
name = "dev"
url = "https://cloudapi.acquia.com/v1/" "sites/prod:mysite/envs/{}.json".format(
name
)
json = {
"name": name,
"vcs_path": "master",
"ssh_host": "srv-1.devcloud.hosting.acquia.com",
"db_clusters": ["4"],
"default_domain": "mysited.msmith.ahclouddev.com",
"livedev": "disabled",
}
mocker.register_uri("GET", url, json=json)
env = self.site.environment("dev")
self.assertIsInstance(env, Environment)
self.assertEqual(env["name"], name)
def test_environments(self, mocker):
"""Tests environments() method."""
url = "https://cloudapi.acquia.com/v1/sites/prod:mysite/envs.json"
json = [
{
"name": "prod",
"vcs_path": "tags/WELCOME",
"ssh_host": "srv-1.devcloud.hosting.acquia.com",
"db_clusters": ["4"],
"default_domain": "mysite.msmith.ahclouddev.com",
"livedev": "disabled",
},
{
"name": "dev",
"vcs_path": "master",
"ssh_host": "srv-1.devcloud.hosting.acquia.com",
"db_clusters": ["4"],
"default_domain": "mysite.msmith.ahclouddev.com",
"livedev": "disabled",
},
]
mocker.register_uri("GET", url, json=json)
env = self.site.environments()
self.assertIsInstance(env, EnvironmentList)
# TODO(skwashd) move this to test_environments
self.assertEqual(env.first()["livedev"], "disabled")
self.assertEqual(env.last()["default_domain"], "mysite.msmith.ahclouddev.com")
self.assertEqual(env["prod"]["name"], "prod")
def test_task(self, mocker):
"""Tests single site task request."""
url = "https://cloudapi.acquia.com/v1/" "sites/prod:mysite/tasks/289466.json"
json = {
"completed": None,
"created": "1331259657",
"description": "Copy files from dev to prod",
"id": "1213",
"logs": "[02:20:58] [02:20:58] Started\n" "[02:21:00] [02:21:00] Failure\n",
"queue": "files-migrate",
"result": "",
"sender": "cloud_api",
"started": "1331259658",
"state": "error",
}
mocker.register_uri("GET", url, json=json)
task = self.site.task(289466)
self.assertIsInstance(task, Task)
def test_tasks(self, mocker):
"""Tests site task list request."""
url = "https://cloudapi.acquia.com/v1/sites/prod:mysite/tasks.json"
json = [
{
"completed": "1331254866",
"created": "1331254863",
"description": "Backup database mysite in dev environment.",
"id": "988",
"logs": "[01:01:04] [01:01:04] Started\n"
"[01:01:06] [01:01:06] Done\n",
"queue": "create-db-backup-ondemand",
"result": '{"backupid":"37"}',
"sender": "cloud_api",
"started": "1331254864",
"state": "done",
}
]
mocker.register_uri("GET", url, json=json)
tasks = self.site.tasks()
self.assertIsInstance(tasks, TaskList)
self.assertEqual(len(tasks), 1)
|
skwashd/python-acquia-cloud
|
acapi/tests/test_site.py
|
Python
|
mit
| 4,820
|
r"""Test correct treatment of various string literals by the parser.
There are four types of string literals:
'abc' -- normal str
r'abc' -- raw str
b'xyz' -- normal bytes
br'xyz' -- raw bytes
The difference between normal and raw strings is of course that in a
raw string, \ escapes (while still used to determine the end of the
literal) are not interpreted, so that r'\x00' contains four
characters: a backslash, an x, and two zeros; while '\x00' contains a
single character (code point zero).
The tricky thing is what should happen when non-ASCII bytes are used
inside literals. For bytes literals, this is considered illegal. But
for str literals, those bytes are supposed to be decoded using the
encoding declared for the file (UTF-8 by default).
We have to test this with various file encodings. We also test it with
exec()/eval(), which uses a different code path.
This file is really about correct treatment of encodings and
backslashes. It doens't concern itself with issues like single
vs. double quotes or singly- vs. triply-quoted strings: that's dealt
with elsewhere (I assume).
"""
import os
import sys
import shutil
import tempfile
import unittest
TEMPLATE = r"""# coding: %s
a = 'x'
assert ord(a) == 120
b = '\x01'
assert ord(b) == 1
c = r'\x01'
assert list(map(ord, c)) == [92, 120, 48, 49]
d = '\x81'
assert ord(d) == 0x81
e = r'\x81'
assert list(map(ord, e)) == [92, 120, 56, 49]
f = '\u1881'
assert ord(f) == 0x1881
g = r'\u1881'
assert list(map(ord, g)) == [92, 117, 49, 56, 56, 49]
"""
def byte(i):
return bytes([i])
class TestLiterals(unittest.TestCase):
def setUp(self):
self.save_path = sys.path[:]
self.tmpdir = tempfile.mkdtemp()
sys.path.insert(0, self.tmpdir)
def tearDown(self):
sys.path = self.save_path
shutil.rmtree(self.tmpdir, ignore_errors=True)
def test_template(self):
# Check that the template doesn't contain any non-printables
# except for \n.
for c in TEMPLATE:
assert c == '\n' or ' ' <= c <= '~', repr(c)
def test_eval_str_normal(self):
self.assertEqual(eval(""" 'x' """), 'x')
self.assertEqual(eval(r""" '\x01' """), chr(1))
self.assertEqual(eval(""" '\x01' """), chr(1))
self.assertEqual(eval(r""" '\x81' """), chr(0x81))
self.assertEqual(eval(""" '\x81' """), chr(0x81))
self.assertEqual(eval(r""" '\u1881' """), chr(0x1881))
self.assertEqual(eval(""" '\u1881' """), chr(0x1881))
def test_eval_str_raw(self):
self.assertEqual(eval(""" r'x' """), 'x')
self.assertEqual(eval(r""" r'\x01' """), '\\' + 'x01')
self.assertEqual(eval(""" r'\x01' """), chr(1))
self.assertEqual(eval(r""" r'\x81' """), '\\' + 'x81')
self.assertEqual(eval(""" r'\x81' """), chr(0x81))
self.assertEqual(eval(r""" r'\u1881' """), '\\' + 'u1881')
self.assertEqual(eval(""" r'\u1881' """), chr(0x1881))
def test_eval_bytes_normal(self):
self.assertEqual(eval(""" b'x' """), b'x')
self.assertEqual(eval(r""" b'\x01' """), byte(1))
self.assertEqual(eval(""" b'\x01' """), byte(1))
self.assertEqual(eval(r""" b'\x81' """), byte(0x81))
self.assertRaises(SyntaxError, eval, """ b'\x81' """)
self.assertEqual(eval(r""" b'\u1881' """), b'\\' + b'u1881')
self.assertRaises(SyntaxError, eval, """ b'\u1881' """)
def test_eval_bytes_raw(self):
self.assertEqual(eval(""" br'x' """), b'x')
self.assertEqual(eval(r""" br'\x01' """), b'\\' + b'x01')
self.assertEqual(eval(""" br'\x01' """), byte(1))
self.assertEqual(eval(r""" br'\x81' """), b"\\" + b"x81")
self.assertRaises(SyntaxError, eval, """ br'\x81' """)
self.assertEqual(eval(r""" br'\u1881' """), b"\\" + b"u1881")
self.assertRaises(SyntaxError, eval, """ br'\u1881' """)
def check_encoding(self, encoding, extra=""):
modname = "xx_" + encoding.replace("-", "_")
fn = os.path.join(self.tmpdir, modname + ".py")
f = open(fn, "w", encoding=encoding)
try:
f.write(TEMPLATE % encoding)
f.write(extra)
finally:
f.close()
__import__(modname)
del sys.modules[modname]
def test_file_utf_8(self):
extra = "z = '\u1234'; assert ord(z) == 0x1234\n"
self.check_encoding("utf-8", extra)
def test_file_utf_8_error(self):
extra = "b'\x80'\n"
self.assertRaises(SyntaxError, self.check_encoding, "utf-8", extra)
def test_file_utf8(self):
self.check_encoding("utf8")
def test_file_iso_8859_1(self):
self.check_encoding("iso-8859-1")
def test_file_latin_1(self):
self.check_encoding("latin-1")
def test_file_latin9(self):
self.check_encoding("latin9")
if __name__ == "__main__":
# Hack so that error messages containing non-ASCII can be printed
sys.stdout._encoding = sys.stderr._encoding = "utf-8"
unittest.main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.2/Lib/test/test_strlit.py
|
Python
|
mit
| 5,040
|
#!/usr/bin/env python3
"""exfi.new_correct.py: fill small overlaps and gaps with abyss-sealer"""
import logging
from tempfile import \
mkstemp
from subprocess import Popen
import os
import pandas as pd
from exfi.io.bed import \
BED3_COLS, \
bed3_to_bed4, \
bed4_to_node2sequence, \
bed4_to_edge2overlap
def prepare_sealer(bed4, transcriptome_dict, args):
"""exfi.new_correct.prepare_sealer: inspect the bed4 file and create a fasta
file where pairs of exons have a small gap between them or have a small
overlap.
"""
sealer_input = mkstemp()
max_fp_bases = args["max_fp_bases"]
max_gap_size = args["max_gap_size"]
node2sequence = bed4_to_node2sequence(bed4, transcriptome_dict)
edge2overlap = bed4_to_edge2overlap(bed4)
node2sequence_dict = node2sequence.set_index("name").to_dict()["sequence"]
# Disable warnings
pd.options.mode.chained_assignment = None
# Compute the small gaps
small_gaps = edge2overlap\
.loc[(edge2overlap.overlap < 0) & (edge2overlap.overlap <= max_gap_size)]
small_gaps["identifier"] = small_gaps['u'] + "~" + small_gaps['v']
small_gaps["data_to_map"] = tuple(zip(small_gaps.u, small_gaps.v))
small_gaps["sequence"] = small_gaps.data_to_map\
.map(
lambda x: \
node2sequence_dict[x[0]][0:-max_fp_bases] + \
100 * 'N' + \
node2sequence_dict[x[1]][max_fp_bases:]
)
small_gaps = small_gaps[["identifier", "sequence"]]
# Compute pairs of overlapping exons
overlaps = edge2overlap.loc[edge2overlap.overlap >= 0]
overlaps["data_to_map"] = tuple(zip(overlaps.u, overlaps.v, overlaps.overlap))
overlaps["identifier"] = overlaps.u + "~" + overlaps.v
overlaps["sequence"] = overlaps.data_to_map\
.map(
lambda x: \
node2sequence_dict[x[0]][0:-x[2] - max_fp_bases] + \
100 * 'N' + \
node2sequence_dict[x[1]][x[2] + max_fp_bases:]
)
overlaps = overlaps[["identifier", "sequence"]]
# Put again the warning
pd.options.mode.chained_assignment = 'warn'
# Merge the results
for_sealer = pd.concat([small_gaps, overlaps])
for_sealer["fasta"] = ">" + for_sealer["identifier"] + "\n" + for_sealer["sequence"] + "\n"
for_sealer = for_sealer[["fasta"]]
with open(sealer_input[1], "w", 1*1024**3) as f_in:
for fasta_record in for_sealer.fasta.values:
f_in.write(fasta_record)
return sealer_input[1]
def run_sealer(sealer_input_fn: str, args: dict) -> str:
"""Run abyss-sealer with the parameters in args, and the scaffold in
sealer_input.
args = {
"kmer": int,
"max_gap_size": int,
"input_bloom": str,
}
:param str sealer_input_fn: Input filename for sealer (the scaffold).
:param dict args: Dict of argumnets for sealer
"""
#logging.debug("\tRunning abyss-sealer")
# Run sealer
sealer_output_prefix = mkstemp()
c_sealer = [
'abyss-sealer',
'--input-scaffold', sealer_input_fn,
'--flank-length', str(args["kmer"]),
'--max-gap-length', "30",
'--kmer', str(args["kmer"]),
'--fix-errors',
'--input-bloom', args["bloom"],
'--mask',
'--output-prefix', sealer_output_prefix[1],
'--verbose'
]
# Execute
p_sealer = Popen(c_sealer)
p_sealer.communicate()
# Clean files
os.remove(sealer_output_prefix[1] + "_log.txt")
os.remove(sealer_output_prefix[1] + "_scaffold.fa")
os.remove(sealer_output_prefix[1])
return sealer_output_prefix[1] + "_merged.fa"
def collect_sealer_results(filename):
"""Read the fasta output from sealer and return the merged nodes"""
if os.path.getsize(filename) == 0:
return pd.DataFrame(data=None, columns=["u", "v"])
headers = pd.read_csv(filename, header=None, sep="\t")
headers = headers.iloc[::2] # Take odd rows: headers.
headers.columns = ["raw"]
headers["clean"] = headers\
.raw\
.str.slice(1)\
.str.rsplit("_", 2).str[0]\
.str.split("~")
headers["u"], headers["v"] = headers.clean.str
headers = headers[["u", "v"]]
headers = headers.reset_index(drop=True)
return headers
def apply_correction_to_bed4(bed4, sealed_edges):
"""Merge nodes into a single ones, being careful with the coordinates"""
if sealed_edges.shape[0] == 0:
return bed4
new_bed4 = bed4.copy().set_index("name")
for row in sealed_edges.iloc[::-1].itertuples():
new_bed4.loc[row.u, "chrom_end"] = new_bed4.loc[row.v, "chrom_end"]
new_bed4 = new_bed4.drop(sealed_edges.v)
new_bed4 = bed3_to_bed4(new_bed4[BED3_COLS])
return new_bed4.reset_index(drop=True)
def correct_bed4(bed4, transcriptome_dict, args):
"""Inspect the bed4 for small gaps and overlaps, write a fasta file for
sealer, and correct the bed4.
"""
logging.info('Preparing abyss-sealer')
sealer_input_fn = prepare_sealer(
bed4=bed4, transcriptome_dict=transcriptome_dict, args=args
)
logging.info('Running abyss-sealer')
sealer_output_fn = run_sealer(sealer_input_fn=sealer_input_fn, args=args)
logging.info('Collecting abyss-sealer\'s results')
sealer_results = collect_sealer_results(filename=sealer_output_fn)
logging.info('Applying correction to BED4')
bed4_corrected = apply_correction_to_bed4(bed4, sealer_results)
os.remove(sealer_input_fn)
os.remove(sealer_output_fn)
return bed4_corrected
|
jlanga/exfi
|
exfi/correct.py
|
Python
|
mit
| 5,566
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import calendar
import sys
import time
import math
from peewee import SqliteDatabase, InsertQuery, \
IntegerField, CharField, DoubleField, BooleanField, \
DateTimeField, CompositeKey, fn
from playhouse.flask_utils import FlaskDB
from playhouse.pool import PooledMySQLDatabase
from playhouse.shortcuts import RetryOperationalError
from playhouse.migrate import migrate, MySQLMigrator, SqliteMigrator
from datetime import datetime, timedelta
from base64 import b64encode
from . import config
from .utils import get_pokemon_name, get_pokemon_rarity, get_pokemon_types, get_args
from .transform import transform_from_wgs_to_gcj
from .customLog import printPokemon
log = logging.getLogger(__name__)
args = get_args()
flaskDb = FlaskDB()
db_schema_version = 5
class MyRetryDB(RetryOperationalError, PooledMySQLDatabase):
pass
def init_database(app):
if args.db_type == 'mysql':
log.info('Connecting to MySQL database on %s:%i', args.db_host, args.db_port)
connections = args.db_max_connections
if hasattr(args, 'accounts'):
connections *= len(args.accounts)
db = MyRetryDB(
args.db_name,
user=args.db_user,
password=args.db_pass,
host=args.db_host,
port=args.db_port,
max_connections=connections,
stale_timeout=300)
else:
log.info('Connecting to local SQLite database')
db = SqliteDatabase(args.db)
app.config['DATABASE'] = db
flaskDb.init_app(app)
return db
class BaseModel(flaskDb.Model):
@classmethod
def get_all(cls):
results = [m for m in cls.select().dicts()]
if args.china:
for result in results:
result['latitude'], result['longitude'] = \
transform_from_wgs_to_gcj(
result['latitude'], result['longitude'])
return results
class Pokemon(BaseModel):
# We are base64 encoding the ids delivered by the api
# because they are too big for sqlite to handle
encounter_id = CharField(primary_key=True, max_length=50)
spawnpoint_id = CharField(index=True)
pokemon_id = IntegerField(index=True)
latitude = DoubleField()
longitude = DoubleField()
disappear_time = DateTimeField(index=True)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_active(swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokemon
.select()
.where(Pokemon.disappear_time > datetime.utcnow())
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.disappear_time > datetime.utcnow()) &
(Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))
.dicts())
pokemons = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['pokemon_rarity'] = get_pokemon_rarity(p['pokemon_id'])
p['pokemon_types'] = get_pokemon_types(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemons.append(p)
return pokemons
@staticmethod
def get_active_by_id(ids, swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()))
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()) &
(Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))
.dicts())
pokemons = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['pokemon_rarity'] = get_pokemon_rarity(p['pokemon_id'])
p['pokemon_types'] = get_pokemon_types(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemons.append(p)
return pokemons
@classmethod
def get_seen(cls, timediff):
if timediff:
timediff = datetime.utcnow() - timediff
pokemon_count_query = (Pokemon
.select(Pokemon.pokemon_id,
fn.COUNT(Pokemon.pokemon_id).alias('count'),
fn.MAX(Pokemon.disappear_time).alias('lastappeared')
)
.where(Pokemon.disappear_time > timediff)
.group_by(Pokemon.pokemon_id)
.alias('counttable')
)
query = (Pokemon
.select(Pokemon.pokemon_id,
Pokemon.disappear_time,
Pokemon.latitude,
Pokemon.longitude,
pokemon_count_query.c.count)
.join(pokemon_count_query, on=(Pokemon.pokemon_id == pokemon_count_query.c.pokemon_id))
.where(Pokemon.disappear_time == pokemon_count_query.c.lastappeared)
.dicts()
)
pokemons = []
total = 0
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
pokemons.append(p)
total += p['count']
return {'pokemon': pokemons, 'total': total}
@classmethod
def get_appearances(cls, pokemon_id, last_appearance):
query = (Pokemon
.select()
.where((Pokemon.pokemon_id == pokemon_id) &
(Pokemon.disappear_time > datetime.utcfromtimestamp(last_appearance / 1000.0))
)
.order_by(Pokemon.disappear_time.asc())
.dicts()
)
appearances = []
for a in query:
appearances.append(a)
return appearances
@classmethod
def get_spawnpoints(cls, swLat, swLng, neLat, neLng):
query = Pokemon.select(Pokemon.latitude, Pokemon.longitude, Pokemon.spawnpoint_id)
if None not in (swLat, swLng, neLat, neLng):
query = (query
.where((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng)
)
)
# Sqlite doesn't support distinct on columns
if args.db_type == 'mysql':
query = query.distinct(Pokemon.spawnpoint_id)
else:
query = query.group_by(Pokemon.spawnpoint_id)
return list(query.dicts())
@classmethod
def get_spawnpoints_in_hex(cls, center, steps):
log.info('got {}steps'.format(steps))
# work out hex bounding box
hdist = ((steps * 120.0) - 50.0) / 1000.0
vdist = ((steps * 105.0) - 35.0) / 1000.0
R = 6378.1 # km radius of the earth
vang = math.degrees(vdist / R)
hang = math.degrees(hdist / (R * math.cos(math.radians(center[0]))))
north = center[0] + vang
south = center[0] - vang
east = center[1] + hang
west = center[1] - hang
# get all spawns in that box
query = (Pokemon
.select(Pokemon.latitude.alias('lat'),
Pokemon.longitude.alias('lng'),
((Pokemon.disappear_time.minute * 60) + Pokemon.disappear_time.second).alias('time'),
Pokemon.spawnpoint_id
))
query = (query.where((Pokemon.latitude <= north) &
(Pokemon.latitude >= south) &
(Pokemon.longitude >= west) &
(Pokemon.longitude <= east)
))
# Sqlite doesn't support distinct on columns
if args.db_type == 'mysql':
query = query.distinct(Pokemon.spawnpoint_id)
else:
query = query.group_by(Pokemon.spawnpoint_id)
s = list(query.dicts())
# for each spawn work out if it is in the hex (clipping the diagonals)
trueSpawns = []
for spawn in s:
spawn['time'] = (spawn['time'] + 2700) % 3600
# get the offset from the center of each spawn in km
offset = [math.radians(spawn['lat'] - center[0]) * R, math.radians(spawn['lng'] - center[1]) * (R * math.cos(math.radians(center[0])))]
# check agains the 4 lines that make up the diagonals
if (offset[1] + (offset[0] * 0.5)) > hdist: # too far ne
continue
if (offset[1] - (offset[0] * 0.5)) > hdist: # too far se
continue
if ((offset[0] * 0.5) - offset[1]) > hdist: # too far nw
continue
if ((0 - offset[1]) - (offset[0] * 0.5)) > hdist: # too far sw
continue
# if it gets to here its a good spawn
trueSpawns.append(spawn)
return trueSpawns
class Pokestop(BaseModel):
pokestop_id = CharField(primary_key=True, max_length=50)
enabled = BooleanField()
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
lure_expiration = DateTimeField(null=True, index=True)
active_fort_modifier = CharField(max_length=50, null=True)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_stops(swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokestop
.select()
.dicts())
else:
query = (Pokestop
.select()
.where((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng))
.dicts())
pokestops = []
for p in query:
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokestops.append(p)
return pokestops
class Gym(BaseModel):
UNCONTESTED = 0
TEAM_MYSTIC = 1
TEAM_VALOR = 2
TEAM_INSTINCT = 3
gym_id = CharField(primary_key=True, max_length=50)
team_id = IntegerField()
guard_pokemon_id = IntegerField()
gym_points = IntegerField()
enabled = BooleanField()
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_gyms(swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Gym
.select()
.dicts())
else:
query = (Gym
.select()
.where((Gym.latitude >= swLat) &
(Gym.longitude >= swLng) &
(Gym.latitude <= neLat) &
(Gym.longitude <= neLng))
.dicts())
gyms = []
for g in query:
gyms.append(g)
return gyms
class ScannedLocation(BaseModel):
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
class Meta:
primary_key = CompositeKey('latitude', 'longitude')
@staticmethod
def get_recent(swLat, swLng, neLat, neLng):
query = (ScannedLocation
.select()
.where((ScannedLocation.last_modified >=
(datetime.utcnow() - timedelta(minutes=15))) &
(ScannedLocation.latitude >= swLat) &
(ScannedLocation.longitude >= swLng) &
(ScannedLocation.latitude <= neLat) &
(ScannedLocation.longitude <= neLng))
.dicts())
scans = []
for s in query:
scans.append(s)
return scans
class Versions(flaskDb.Model):
key = CharField()
val = IntegerField()
class Meta:
primary_key = False
# todo: this probably shouldn't _really_ be in "models" anymore, but w/e
def parse_map(args, map_dict, step_location, db_update_queue, wh_update_queue):
pokemons = {}
pokestops = {}
gyms = {}
cells = map_dict['responses']['GET_MAP_OBJECTS']['map_cells']
for cell in cells:
if config['parse_pokemon']:
for p in cell.get('wild_pokemons', []):
# time_till_hidden_ms was overflowing causing a negative integer.
# It was also returning a value above 3.6M ms.
if 0 < p['time_till_hidden_ms'] < 3600000:
d_t = datetime.utcfromtimestamp(
(p['last_modified_timestamp_ms'] +
p['time_till_hidden_ms']) / 1000.0)
else:
# Set a value of 15 minutes because currently its unknown but larger than 15.
d_t = datetime.utcfromtimestamp((p['last_modified_timestamp_ms'] + 900000) / 1000.0)
printPokemon(p['pokemon_data']['pokemon_id'], p['latitude'],
p['longitude'], d_t)
pokemons[p['encounter_id']] = {
'encounter_id': b64encode(str(p['encounter_id'])),
'spawnpoint_id': p['spawn_point_id'],
'pokemon_id': p['pokemon_data']['pokemon_id'],
'latitude': p['latitude'],
'longitude': p['longitude'],
'disappear_time': d_t
}
if args.webhooks:
wh_update_queue.put(('pokemon', {
'encounter_id': b64encode(str(p['encounter_id'])),
'spawnpoint_id': p['spawn_point_id'],
'pokemon_id': p['pokemon_data']['pokemon_id'],
'latitude': p['latitude'],
'longitude': p['longitude'],
'disappear_time': calendar.timegm(d_t.timetuple()),
'last_modified_time': p['last_modified_timestamp_ms'],
'time_until_hidden_ms': p['time_till_hidden_ms']
}))
for f in cell.get('forts', []):
if config['parse_pokestops'] and f.get('type') == 1: # Pokestops
if 'active_fort_modifier' in f:
lure_expiration = datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0) + timedelta(minutes=30)
active_fort_modifier = f['active_fort_modifier']
if args.webhooks and args.webhook_updates_only:
wh_update_queue.put(('pokestop', {
'pokestop_id': b64encode(str(f['id'])),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified_time': f['last_modified_timestamp_ms'],
'lure_expiration': calendar.timegm(lure_expiration.timetuple()),
'active_fort_modifier': active_fort_modifier
}))
else:
lure_expiration, active_fort_modifier = None, None
pokestops[f['id']] = {
'pokestop_id': f['id'],
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
'lure_expiration': lure_expiration,
'active_fort_modifier': active_fort_modifier
}
# Send all pokéstops to webhooks
if args.webhooks and not args.webhook_updates_only:
# Explicitly set 'webhook_data', in case we want to change the information pushed to webhooks,
# similar to above and previous commits.
l_e = None
if lure_expiration is not None:
l_e = calendar.timegm(lure_expiration.timetuple())
wh_update_queue.put(('pokestop', {
'pokestop_id': b64encode(str(f['id'])),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': calendar.timegm(pokestops[f['id']]['last_modified'].timetuple()),
'lure_expiration': l_e,
'active_fort_modifier': active_fort_modifier
}))
elif config['parse_gyms'] and f.get('type') is None: # Currently, there are only stops and gyms
gyms[f['id']] = {
'gym_id': f['id'],
'team_id': f.get('owned_by_team', 0),
'guard_pokemon_id': f.get('guard_pokemon_id', 0),
'gym_points': f.get('gym_points', 0),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
}
# Send gyms to webhooks
if args.webhooks and not args.webhook_updates_only:
# Explicitly set 'webhook_data', in case we want to change the information pushed to webhooks,
# similar to above and previous commits.
wh_update_queue.put(('gym', {
'gym_id': b64encode(str(f['id'])),
'team_id': f.get('owned_by_team', 0),
'guard_pokemon_id': f.get('guard_pokemon_id', 0),
'gym_points': f.get('gym_points', 0),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': calendar.timegm(gyms[f['id']]['last_modified'].timetuple())
}))
if len(pokemons):
db_update_queue.put((Pokemon, pokemons))
if len(pokestops):
db_update_queue.put((Pokestop, pokestops))
if len(gyms):
db_update_queue.put((Gym, gyms))
log.info('Parsing found %d pokemons, %d pokestops, and %d gyms',
len(pokemons),
len(pokestops),
len(gyms))
db_update_queue.put((ScannedLocation, {0: {
'latitude': step_location[0],
'longitude': step_location[1],
'last_modified': datetime.utcnow()
}}))
return len(pokemons) + len(pokestops) + len(gyms)
def db_updater(args, q):
# The forever loop
while True:
try:
while True:
try:
flaskDb.connect_db()
break
except Exception as e:
log.warning('%s... Retrying', e)
# Loop the queue
while True:
model, data = q.get()
bulk_upsert(model, data)
q.task_done()
log.debug('Upserted to %s, %d records (upsert queue remaining: %d)',
model.__name__,
len(data),
q.qsize())
if q.qsize() > 50:
log.warning("DB queue is > 50 (@%d); try increasing --db-threads", q.qsize())
except Exception as e:
log.exception('Exception in db_updater: %s', e)
def clean_db_loop(args):
while True:
try:
# Clean out old scanned locations
query = (ScannedLocation
.delete()
.where((ScannedLocation.last_modified <
(datetime.utcnow() - timedelta(minutes=30)))))
query.execute()
# If desired, clear old pokemon spawns
if args.purge_data > 0:
query = (Pokemon
.delete()
.where((Pokemon.disappear_time <
(datetime.utcnow() - timedelta(hours=args.purge_data)))))
log.info('Regular database cleaning complete')
time.sleep(60)
except Exception as e:
log.exception('Exception in clean_db_loop: %s', e)
def bulk_upsert(cls, data):
num_rows = len(data.values())
i = 0
step = 120
while i < num_rows:
log.debug('Inserting items %d to %d', i, min(i + step, num_rows))
try:
InsertQuery(cls, rows=data.values()[i:min(i + step, num_rows)]).upsert().execute()
except Exception as e:
log.warning('%s... Retrying', e)
continue
i += step
def create_tables(db):
db.connect()
verify_database_schema(db)
db.create_tables([Pokemon, Pokestop, Gym, ScannedLocation], safe=True)
db.close()
def drop_tables(db):
db.connect()
db.drop_tables([Pokemon, Pokestop, Gym, ScannedLocation, Versions], safe=True)
db.close()
def verify_database_schema(db):
if not Versions.table_exists():
db.create_tables([Versions])
if ScannedLocation.table_exists():
# Versions table didn't exist, but there were tables. This must mean the user
# is coming from a database that existed before we started tracking the schema
# version. Perform a full upgrade.
InsertQuery(Versions, {Versions.key: 'schema_version', Versions.val: 0}).execute()
database_migrate(db, 0)
else:
InsertQuery(Versions, {Versions.key: 'schema_version', Versions.val: db_schema_version}).execute()
else:
db_ver = Versions.get(Versions.key == 'schema_version').val
if db_ver < db_schema_version:
database_migrate(db, db_ver)
elif db_ver > db_schema_version:
log.error("Your database version (%i) appears to be newer than the code supports (%i).",
db_ver, db_schema_version)
log.error("Please upgrade your code base or drop all tables in your database.")
sys.exit(1)
def database_migrate(db, old_ver):
# Update database schema version
Versions.update(val=db_schema_version).where(Versions.key == 'schema_version').execute()
log.info("Detected database version %i, updating to %i", old_ver, db_schema_version)
# Perform migrations here
migrator = None
if args.db_type == 'mysql':
migrator = MySQLMigrator(db)
else:
migrator = SqliteMigrator(db)
# No longer necessary, we're doing this at schema 4 as well
# if old_ver < 1:
# db.drop_tables([ScannedLocation])
if old_ver < 2:
migrate(migrator.add_column('pokestop', 'encounter_id', CharField(max_length=50, null=True)))
if old_ver < 3:
migrate(
migrator.add_column('pokestop', 'active_fort_modifier', CharField(max_length=50, null=True)),
migrator.drop_column('pokestop', 'encounter_id'),
migrator.drop_column('pokestop', 'active_pokemon_id')
)
if old_ver < 4:
db.drop_tables([ScannedLocation])
if old_ver < 5:
# Some pokemon were added before the 595 bug was "fixed"
# Clean those up for a better UX
query = (Pokemon
.delete()
.where(Pokemon.disappear_time >
(datetime.utcnow() - timedelta(hours=24))))
query.execute()
|
pir2/PokemonGo-Map
|
pogom/models.py
|
Python
|
agpl-3.0
| 25,093
|
#!/usr/bin/env python
'''
Author: Christopher Duffy
Date: February 2015
Name: nmap_scanner.py
Purpose: To scan a network
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
try:
import nmap
except:
sys.exit("[!] Install the nmap library: pip install python-nmap")
# Argument Validator
if len(sys.argv) != 3:
sys.exit("Please provide two arguments the first being the targets the second the ports")
ports = str(sys.argv[2])
addrs = str(sys.argv[1])
scanner = nmap.PortScanner()
scanner.scan(addrs, ports)
for host in scanner.all_hosts():
if “” in host:
print("The host's IP address is %s and it's hostname was not found") % (host, scanner[host].hostname())
else:
print("The host's IP address is %s and it's hostname is %s") % (host, scanner[host].hostname())
|
liorvh/pythonpentest
|
nmap_scannner.py
|
Python
|
bsd-3-clause
| 2,228
|
# -*- coding: UTF-8 -*-
'''
Copyright (c) 2015 Scouter Project.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import socket,io
import time,sys
import json
import struct
from scouter.lang.inout import DataInputSocket, DataOutputX
from scouter.lang.pack import MapPack
from scouter.lang.value import TextValue
from scouter.lang.utility import *
import traceback
import binascii
class TCP():
HasNext = 0x03
NoNext = 0x04
handlerTable=dict()
TCP_AGENT=0xCAFE1001
objHash=binascii.crc32(objname())
localAddr='127.0.0.1'
def getLocalAddr():
return localAddr
def startReqHandler(host, port, handlers):
global listen_addr
global handlerTable
localAddr = '127.0.0.1'
handlerTable=handlers
while True:
try:
BRUN=True
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host,port))
sock.settimeout(60000)
out=DataOutputX()
out.writeInt(TCP_AGENT)
out.writeInt(objHash)
sock.send(out.toByteArray())
inx=DataInputSocket(sock)
while BRUN:
try:
out=DataOutputX()
cmd = inx.readText()
pack = inx.readPack()
if handlerTable.has_key(cmd):
result=handlerTable[cmd](pack)
else:
print 'unknown command: ' + str(cmd)
result=MapPack()
result.putValue('msg', TextValue('unknown command: ' + str(cmd)))
if result != None:
out.writeByte(TCP.HasNext)
out.writePack(result)
out.writeByte(TCP.NoNext)
sock.sendall(out.toByteArray())
except:
traceback.print_exc(file=sys.stdout)
sock.close()
BRUN=False
except:
time.sleep(5)
pass
|
jw0201/scouter
|
scouter.host/scouter/lang/request.py
|
Python
|
apache-2.0
| 2,811
|
class RenFoo(object):
pass
print RenFoo
#comment: RenFoo must be renamed
'string: RenFoo must be renamed'
|
aptana/Pydev
|
tests/com.python.pydev.refactoring.tests/src/pysrcrefactoring/reflib/renameclass/renfoo.py
|
Python
|
epl-1.0
| 111
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachineScaleSet(Resource):
"""Describes a Virtual Machine Scale Set.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param sku: The virtual machine scale set sku.
:type sku: ~azure.mgmt.compute.v2016_04_30_preview.models.Sku
:param plan: Specifies information about the marketplace image used to
create the virtual machine. This element is only used for marketplace
images. Before you can use a marketplace image from an API, you must
enable the image for programmatic use. In the Azure portal, find the
marketplace image that you want to use and then click **Want to deploy
programmatically, Get Started ->**. Enter any required information and
then click **Save**.
:type plan: ~azure.mgmt.compute.v2016_04_30_preview.models.Plan
:param upgrade_policy: The upgrade policy.
:type upgrade_policy:
~azure.mgmt.compute.v2016_04_30_preview.models.UpgradePolicy
:param virtual_machine_profile: The virtual machine profile.
:type virtual_machine_profile:
~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineScaleSetVMProfile
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param over_provision: Specifies whether the Virtual Machine Scale Set
should be overprovisioned.
:type over_provision: bool
:param single_placement_group: When true this limits the scale set to a
single placement group, of max size 100 virtual machines.
:type single_placement_group: bool
:param identity: The identity of the virtual machine scale set, if
configured.
:type identity:
~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineScaleSetIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetVMProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'over_provision': {'key': 'properties.overProvision', 'type': 'bool'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSet, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.plan = kwargs.get('plan', None)
self.upgrade_policy = kwargs.get('upgrade_policy', None)
self.virtual_machine_profile = kwargs.get('virtual_machine_profile', None)
self.provisioning_state = None
self.over_provision = kwargs.get('over_provision', None)
self.single_placement_group = kwargs.get('single_placement_group', None)
self.identity = kwargs.get('identity', None)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine_scale_set.py
|
Python
|
mit
| 4,404
|
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""OS abstraction OS specific utility functions."""
# pylint: disable=unnecessary-lambda
import sys
if sys.platform == 'cygwin':
from api.platforms import gce
from api.platforms import posix
from api.platforms import win
is_gce = lambda: gce.is_gce() # to reuse gce.is_gce mock, if any
if sys.platform == 'darwin':
from api.platforms import osx
from api.platforms import posix
is_gce = lambda: False
if sys.platform == 'win32':
from api.platforms import gce
from api.platforms import win
is_gce = lambda: gce.is_gce() # to reuse gce.is_gce mock, if any
if sys.platform == 'linux':
try:
from api.platforms import android
except OSError:
logging.warning('failed to import android', exc_info=True)
android = None
from api.platforms import gce
from api.platforms import linux
from api.platforms import posix
is_gce = lambda: gce.is_gce() # to reuse gce.is_gce mock, if any
|
luci/luci-py
|
appengine/swarming/swarming_bot/api/platforms/__init__.py
|
Python
|
apache-2.0
| 1,096
|
# coding=utf-8
# Author: Dennis Lutter <lad1337@gmail.com>
# URL: https://sickrage.github.io/
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
from sickbeard import db, logger, helpers
from adba.aniDBerrors import AniDBCommandTimeoutError
class BlackAndWhiteList(object):
blacklist = []
whitelist = []
def __init__(self, show_id):
if not show_id:
raise BlackWhitelistNoShowIDException()
self.show_id = show_id
self.load()
def load(self):
"""
Builds black and whitelist
"""
logger.log(u'Building black and white list for ' + str(self.show_id), logger.DEBUG)
self.blacklist = self._load_list('blacklist')
self.whitelist = self._load_list('whitelist')
def _add_keywords(self, table, values):
"""
DB: Adds keywords into database for current show
:param table: SQL table to add keywords to
:param values: Values to be inserted in table
"""
main_db_con = db.DBConnection()
for value in values:
main_db_con.action('INSERT INTO [' + table + '] (show_id, keyword) VALUES (?,?)', [self.show_id, value])
def set_black_keywords(self, values):
"""
Sets blacklist to new value
:param values: Complete list of keywords to be set as blacklist
"""
self._del_all_keywords('blacklist')
self._add_keywords('blacklist', values)
self.blacklist = values
logger.log(u'Blacklist set to: %s' % self.blacklist, logger.DEBUG)
def set_white_keywords(self, values):
"""
Sets whitelist to new value
:param values: Complete list of keywords to be set as whitelist
"""
self._del_all_keywords('whitelist')
self._add_keywords('whitelist', values)
self.whitelist = values
logger.log(u'Whitelist set to: %s' % self.whitelist, logger.DEBUG)
def _del_all_keywords(self, table):
"""
DB: Remove all keywords for current show
:param table: SQL table remove keywords from
"""
main_db_con = db.DBConnection()
main_db_con.action('DELETE FROM [' + table + '] WHERE show_id = ?', [self.show_id])
def _load_list(self, table):
"""
DB: Fetch keywords for current show
:param table: Table to fetch list of keywords from
:return: keywords in list
"""
main_db_con = db.DBConnection()
sql_results = main_db_con.select('SELECT keyword FROM [' + table + '] WHERE show_id = ?', [self.show_id])
if not sql_results or not len(sql_results):
return []
groups = []
for result in sql_results:
groups.append(result["keyword"])
logger.log(u'BWL: ' + str(self.show_id) + ' loaded keywords from ' + table + ': ' + str(groups), logger.DEBUG)
return groups
def is_valid(self, result):
"""
Check if result is valid according to white/blacklist for current show
:param result: Result to analyse
:return: False if result is not allowed in white/blacklist, True if it is
"""
if self.whitelist or self.blacklist:
if not result.release_group:
logger.log('Failed to detect release group, invalid result', logger.DEBUG)
return False
if result.release_group.lower() in [x.lower() for x in self.whitelist]:
white_result = True
elif not self.whitelist:
white_result = True
else:
white_result = False
if result.release_group.lower() in [x.lower() for x in self.blacklist]:
black_result = False
else:
black_result = True
logger.log(u'Whitelist check passed: %s. Blacklist check passed: %s' % (white_result, black_result), logger.DEBUG)
if white_result and black_result:
return True
else:
return False
else:
logger.log(u'No Whitelist and Blacklist defined, check passed.', logger.DEBUG)
return True
class BlackWhitelistNoShowIDException(Exception):
"""No show_id was given"""
def short_group_names(groups):
"""
Find AniDB short group names for release groups
:param groups: list of groups to find short group names for
:return: list of shortened group names
"""
groups = groups.split(",")
shortGroupList = []
if helpers.set_up_anidb_connection():
for groupName in groups:
try:
group = sickbeard.ADBA_CONNECTION.group(gname=groupName)
except AniDBCommandTimeoutError:
logger.log(u"Timeout while loading group from AniDB. Trying next group", logger.DEBUG)
except Exception:
logger.log(u"Failed while loading group from AniDB. Trying next group", logger.DEBUG)
else:
for line in group.datalines:
if line["shortname"]:
shortGroupList.append(line["shortname"])
else:
if groupName not in shortGroupList:
shortGroupList.append(groupName)
else:
shortGroupList = groups
return shortGroupList
|
p0psicles/SickRage
|
sickbeard/blackandwhitelist.py
|
Python
|
gpl-3.0
| 6,013
|
# Copyright 2017 Krzysztof Stopa (stopa.krzysztof.k@gmail.com)
# This file is part of Copernicus Atmosphere Monitoring Service (CAMS) downloading and
# processing tools (CAMS tools).
# CAMS tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or any later
# version.
# CAMS tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with CAMS tools. If not, see <http://www.gnu.org/licenses/>.
from os import path
import datetime
import requests
import sys
from cams.tools import Converter
from cams.core import Param
class Model(Param):
CHIMERE = 'CHIMERE'
EMEP = 'EMEP'
ENSEMBLE = 'ENSEMBLE'
EURAD = 'EURAD'
LOTOSEUROS = 'LOTOSEUROS'
MATCH = 'MATCH'
MOCAGE = 'MOCAGE'
SILAM = 'SILAM'
class PackageType(Param):
ANALYSIS = 'ANALYSIS'
FORECAST = 'FORECAST'
ANALYSIS_REPORT = 'ANALYSISREPORT'
FORECAST_REPORT = 'FORECASTREPORT'
class PackageSpecies(Param):
CO = 'CO'
NH3 = 'NH3'
NMVOC = 'NMVOC'
NO = 'NO'
NO2 = 'NO2'
O3 = 'O3'
PANS = 'PANS'
PM10 = 'PM10'
PM25 = 'PM25'
SO2 = 'SO2'
BIRCH_POLLEN = 'BIRCHPOLLEN'
OLIVE_POLLEN = 'OLIVEPOLLEN'
GRASS_POLLEN = 'GRASSPOLLEN'
ALL_SPECIES = 'ALLSPECIES'
class PackageLevel(Param):
SURFACE = 'SURFACE'
ALL_LEVELS = 'ALLLEVELS'
class Time(Param):
ANALYSIS_24H1H = '-24H-1H'
FORECAST_0H24H = '0H24H'
FORECAST_25H48H = '25H48H'
FORECAST_49H72H = '49H72H'
FORECAST_73H96H = '73H96H'
ALLTIMES = 'ALLTIMES' # Custom option for download all times (not included in REST API)
def get_base_time(self):
if self == Time.ANALYSIS_24H1H:
return -24
elif self == Time.FORECAST_0H24H:
return -1 # Forecast 0H24 includes 0 hour with base analysis.
elif self == Time.FORECAST_25H48H:
return 24
elif self == Time.FORECAST_49H72H:
return 48
else:
return 72
def get_hours_range(self):
"""
Get the number of hours available in a time period package. Defacto gets the number of bands in a package.
Usually 24 but in the case of Time.FORECAST_0H24H it also include a band with base analysis (time 00)
:return: Number of bands available in a time period package.
"""
if self == Time.FORECAST_0H24H:
return 25
else:
return 24
class Format(Param):
GRIB2 = 'GRIB2'
NETCDF = 'NETCDF'
TXT = 'TXT'
class Downloader:
_base_url = 'https://download.regional.atmosphere.copernicus.eu/services/CAMS50?'
_token = '__M0bChV6QsoOFqHz31VRqnpr4GhWPtcpaRy3oeZjBNSg__'
_licence = 'yes'
_grid = '0.1'
@staticmethod
def download(out_path='./', reference_time=datetime.datetime.now(), model=Model.ENSEMBLE,
type=PackageType.FORECAST, species=PackageSpecies.ALL_SPECIES, level=PackageLevel.SURFACE,
time=Time.FORECAST_0H24H, data_format=Format.GRIB2):
"""
Online data relate to data less than 30 days old.
Forecasts and analyses are available in Grib Edition 2 or/and Netcdf Format depending on the model :
- Available file format for Ensemble data are Grib Edition 2 and Netcdf.
- Available file format for partners data are Netcdf only.
Archived data means data older than 30 days old. This Regional Air Quality archive is available since October 1st, 2015.
Below are a few more particularities about Regional Air Quality Archived production :
- Forecasts and analyses are currently available in Grib Edition2 format for archived products.
- Analyses issued from Ensemble model are available FOR ALL SPECIES since March 2016. Before this date, only O3
and NO2 species were produced by a sufficient number of models to generate reliable Ensemble products. So please,
don't use analysis data from other pollutants than O3 and NO2 before March 2016.
Note that archived analyses from partner models could be missing, occasionally, in case of production problems.
In such a case, please use Ensemble analyses instead.
:param out_path: path to the folder where data will be stored.
:param reference_time: Date of the forecast and analysis production
:type reference_time: datetime
:param model: Name of one of the available CAMS atmospheric models
:type model: Model
:param type: One of the available package types for analysis or forecast products
:type type: PackageType
:param species: One of the available package species (O3, CO, etc.)
:type species: PackageSpecies
:param level: Surface or all available levels
:type level: PackageLevel
:param time: One of the available analysis or forecast times. ALL TIMES is not available.
:type time: Time
:param data_format: Downloaded data output format
:type data_format: Format
:return: Path to the downloaded file or None if error
"""
# TODO check date and package compatibility
if time == Time.ANALYSIS_24H1H:
type = PackageType.ANALYSIS
# Referece date is formatted as YYYY-MM-DDT00:00:00Z
params = 'token=' + Downloader._token +\
'&grid=' + Downloader._grid +\
'&model=' + model.value +\
'&package={0}_{1}_{2}'.format(type.value, species.value, level.value) +\
'&time=' + time.value +\
'&referencetime={0}T00:00:00Z'.format(reference_time.strftime("%Y-%m-%d")) +\
'&format=' + data_format.value +\
'&licence=' + Downloader._licence
print('Requesting {0}'.format(Downloader._base_url + params))
resp = requests.get(Downloader._base_url + params, stream=True)
if 'Content-Disposition' in resp.headers:
file_name = resp.headers['Content-Disposition'].replace('inline; filename="', '').replace('"', '')
print('Downloading {0}'.format(file_name))
down_data = 0
down_file = path.join(out_path, file_name)
with open(down_file, "wb") as f:
for data in resp.iter_content(chunk_size=8192):
down_data += len(data)
f.write(data)
sys.stdout.write("\r{0:.2f} Mb".format(down_data / (1000.0 * 1000.0)))
print('...done!')
return down_file
else:
print('Wrong request. Requested product may be unavailable for given parameters.')
return None
@staticmethod
def downloadAll(out_path='./', reference_time=datetime.datetime.now(), model=Model.ENSEMBLE,
species=PackageSpecies.ALL_SPECIES, level=PackageLevel.SURFACE, data_format=Format.GRIB2):
down_files = []
for time in Time:
package_type = PackageType.FORECAST
if time == Time.ANALYSIS_24H1H:
package_type = PackageType.ANALYSIS
# Ignore all time
if time != Time.ALLTIMES and species == PackageSpecies.ALL_SPECIES:
for sp in PackageSpecies:
if sp != PackageSpecies.ALL_SPECIES:
df = Downloader.download(out_path, reference_time, model, package_type, sp, level, time, data_format)
if df is not None:
down_files.append(df)
elif time != Time.ALLTIMES:
# Download just single specie
df = Downloader.download(out_path, reference_time, model, package_type, species, level, time, data_format)
if df is not None:
down_files.append(df)
return down_files
|
kstopa/cams-tools
|
cams/regional.py
|
Python
|
lgpl-3.0
| 8,211
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import webbrowser
import datetime
import socket
import os
import re
import os.path
import shutil
import shutil_custom
shutil.copyfile = shutil_custom.copyfile_custom
from threading import Lock
import sys
from github import Github
from sickbeard import metadata
from sickbeard import providers
from sickbeard.providers.generic import GenericProvider
from sickbeard.config import CheckSection, check_setting_int, check_setting_str, check_setting_float, ConfigMigrator, \
naming_ep_type
from sickbeard import searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \
subtitles, traktChecker, numdict
from sickbeard import db
from sickbeard import helpers
from sickbeard import scheduler
from sickbeard import search_queue
from sickbeard import show_queue
from sickbeard import logger
from sickbeard import naming
from sickbeard import dailysearcher
from sickbeard.indexers import indexer_api
from sickbeard.indexers.indexer_exceptions import indexer_shownotfound, indexer_showincomplete, indexer_exception, indexer_error, \
indexer_episodenotfound, indexer_attributenotfound, indexer_seasonnotfound, indexer_userabort, indexerExcepts
from sickbeard.common import SD
from sickbeard.common import SKIPPED
from sickbeard.common import WANTED
from sickbeard.databases import mainDB, cache_db, failed_db
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from sickrage.show.Show import Show
from sickrage.system.Shutdown import Shutdown
from configobj import ConfigObj
import requests
requests.packages.urllib3.disable_warnings()
indexerApi = indexer_api.indexerApi
PID = None
CFG = None
CONFIG_FILE = None
# This is the version of the config we EXPECT to find
CONFIG_VERSION = 7
# Default encryption version (0 for None)
ENCRYPTION_VERSION = 0
ENCRYPTION_SECRET = None
PROG_DIR = '.'
MY_FULLNAME = None
MY_NAME = None
MY_ARGS = []
SYS_ENCODING = ''
DATA_DIR = ''
CREATEPID = False
PIDFILE = ''
DAEMON = None
NO_RESIZE = False
# system events
events = None
# github
gh = None
# schedualers
dailySearchScheduler = None
backlogSearchScheduler = None
showUpdateScheduler = None
versionCheckScheduler = None
showQueueScheduler = None
searchQueueScheduler = None
properFinderScheduler = None
autoPostProcesserScheduler = None
subtitlesFinderScheduler = None
traktCheckerScheduler = None
showList = None
loadingShowList = None
providerList = []
newznabProviderList = []
torrentRssProviderList = []
metadata_provider_dict = {}
NEWEST_VERSION = None
NEWEST_VERSION_STRING = None
VERSION_NOTIFY = False
AUTO_UPDATE = False
NOTIFY_ON_UPDATE = False
CUR_COMMIT_HASH = None
BRANCH = ''
GIT_RESET = True
GIT_REMOTE = ''
GIT_REMOTE_URL = ''
CUR_COMMIT_BRANCH = ''
GIT_ORG = 'SickRage'
GIT_REPO = 'SickRage'
GIT_USERNAME = None
GIT_PASSWORD = None
GIT_PATH = None
GIT_AUTOISSUES = False
GIT_NEWVER = False
DEVELOPER = False
NEWS_URL = 'http://sickrage.github.io/sickrage-news/news.md'
NEWS_LAST_READ = None
NEWS_LATEST = None
NEWS_UNREAD = 0
INIT_LOCK = Lock()
started = False
ACTUAL_LOG_DIR = None
LOG_DIR = None
LOG_NR = 5
LOG_SIZE = 1048576
SOCKET_TIMEOUT = None
WEB_PORT = None
WEB_LOG = None
WEB_ROOT = None
WEB_USERNAME = None
WEB_PASSWORD = None
WEB_HOST = None
WEB_IPV6 = None
WEB_COOKIE_SECRET = None
WEB_USE_GZIP = True
DOWNLOAD_URL = None
HANDLE_REVERSE_PROXY = False
PROXY_SETTING = None
PROXY_INDEXERS = True
SSL_VERIFY = True
LOCALHOST_IP = None
CPU_PRESET = None
ANON_REDIRECT = None
API_KEY = None
API_ROOT = None
ENABLE_HTTPS = False
HTTPS_CERT = None
HTTPS_KEY = None
INDEXER_DEFAULT_LANGUAGE = None
EP_DEFAULT_DELETED_STATUS = None
LAUNCH_BROWSER = False
CACHE_DIR = None
ACTUAL_CACHE_DIR = None
ROOT_DIRS = None
TRASH_REMOVE_SHOW = False
TRASH_ROTATE_LOGS = False
SORT_ARTICLE = False
DEBUG = False
DISPLAY_ALL_SEASONS = True
DEFAULT_PAGE = 'home'
USE_LISTVIEW = False
METADATA_KODI = None
METADATA_KODI_12PLUS = None
METADATA_MEDIABROWSER = None
METADATA_PS3 = None
METADATA_WDTV = None
METADATA_TIVO = None
METADATA_MEDE8ER = None
QUALITY_DEFAULT = None
STATUS_DEFAULT = None
STATUS_DEFAULT_AFTER = None
FLATTEN_FOLDERS_DEFAULT = False
SUBTITLES_DEFAULT = False
INDEXER_DEFAULT = None
INDEXER_TIMEOUT = None
SCENE_DEFAULT = False
ANIME_DEFAULT = False
ARCHIVE_DEFAULT = False
PROVIDER_ORDER = []
NAMING_MULTI_EP = False
NAMING_ANIME_MULTI_EP = False
NAMING_PATTERN = None
NAMING_ABD_PATTERN = None
NAMING_CUSTOM_ABD = False
NAMING_SPORTS_PATTERN = None
NAMING_CUSTOM_SPORTS = False
NAMING_ANIME_PATTERN = None
NAMING_CUSTOM_ANIME = False
NAMING_FORCE_FOLDERS = False
NAMING_STRIP_YEAR = False
NAMING_ANIME = None
USE_NZBS = False
USE_TORRENTS = False
NZB_METHOD = None
NZB_DIR = None
USENET_RETENTION = None
TORRENT_METHOD = None
TORRENT_DIR = None
DOWNLOAD_PROPERS = False
CHECK_PROPERS_INTERVAL = None
ALLOW_HIGH_PRIORITY = False
SAB_FORCED = False
RANDOMIZE_PROVIDERS = False
AUTOPOSTPROCESSER_FREQUENCY = None
DAILYSEARCH_FREQUENCY = None
UPDATE_FREQUENCY = None
BACKLOG_FREQUENCY = None
SHOWUPDATE_HOUR = None
DEFAULT_AUTOPOSTPROCESSER_FREQUENCY = 10
DEFAULT_DAILYSEARCH_FREQUENCY = 40
DEFAULT_BACKLOG_FREQUENCY = 21
DEFAULT_UPDATE_FREQUENCY = 1
DEFAULT_SHOWUPDATE_HOUR = 3
MIN_AUTOPOSTPROCESSER_FREQUENCY = 1
MIN_DAILYSEARCH_FREQUENCY = 10
MIN_BACKLOG_FREQUENCY = 10
MIN_UPDATE_FREQUENCY = 1
BACKLOG_DAYS = 7
ADD_SHOWS_WO_DIR = False
CREATE_MISSING_SHOW_DIRS = False
RENAME_EPISODES = False
AIRDATE_EPISODES = False
FILE_TIMESTAMP_TIMEZONE = None
PROCESS_AUTOMATICALLY = False
NO_DELETE = False
KEEP_PROCESSED_DIR = False
PROCESS_METHOD = None
DELRARCONTENTS = False
MOVE_ASSOCIATED_FILES = False
POSTPONE_IF_SYNC_FILES = True
POSTPONE_IF_NO_SUBS = False
NFO_RENAME = True
TV_DOWNLOAD_DIR = None
UNPACK = False
SKIP_REMOVED_FILES = False
ALLOWED_EXTENSIONS = "nfo,srr,sfv"
NZBS = False
NZBS_UID = None
NZBS_HASH = None
OMGWTFNZBS = False
OMGWTFNZBS_USERNAME = None
OMGWTFNZBS_APIKEY = None
NEWZBIN = False
NEWZBIN_USERNAME = None
NEWZBIN_PASSWORD = None
SAB_USERNAME = None
SAB_PASSWORD = None
SAB_APIKEY = None
SAB_CATEGORY = None
SAB_CATEGORY_BACKLOG = None
SAB_CATEGORY_ANIME = None
SAB_CATEGORY_ANIME_BACKLOG = None
SAB_HOST = ''
NZBGET_USERNAME = None
NZBGET_PASSWORD = None
NZBGET_CATEGORY = None
NZBGET_CATEGORY_BACKLOG = None
NZBGET_CATEGORY_ANIME = None
NZBGET_CATEGORY_ANIME_BACKLOG = None
NZBGET_HOST = None
NZBGET_USE_HTTPS = False
NZBGET_PRIORITY = 100
TORRENT_USERNAME = None
TORRENT_PASSWORD = None
TORRENT_HOST = ''
TORRENT_PATH = ''
TORRENT_SEED_TIME = None
TORRENT_PAUSED = False
TORRENT_HIGH_BANDWIDTH = False
TORRENT_LABEL = ''
TORRENT_LABEL_ANIME = ''
TORRENT_VERIFY_CERT = False
TORRENT_RPCURL = 'transmission'
TORRENT_AUTH_TYPE = 'none'
USE_KODI = False
KODI_ALWAYS_ON = True
KODI_NOTIFY_ONSNATCH = False
KODI_NOTIFY_ONDOWNLOAD = False
KODI_NOTIFY_ONSUBTITLEDOWNLOAD = False
KODI_UPDATE_LIBRARY = False
KODI_UPDATE_FULL = False
KODI_UPDATE_ONLYFIRST = False
KODI_HOST = ''
KODI_USERNAME = None
KODI_PASSWORD = None
USE_PLEX = False
PLEX_NOTIFY_ONSNATCH = False
PLEX_NOTIFY_ONDOWNLOAD = False
PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = False
PLEX_UPDATE_LIBRARY = False
PLEX_SERVER_HOST = None
PLEX_SERVER_TOKEN = None
PLEX_HOST = None
PLEX_USERNAME = None
PLEX_PASSWORD = None
USE_PLEX_CLIENT = False
PLEX_CLIENT_USERNAME = None
PLEX_CLIENT_PASSWORD = None
USE_EMBY = False
EMBY_HOST = None
EMBY_APIKEY = None
USE_GROWL = False
GROWL_NOTIFY_ONSNATCH = False
GROWL_NOTIFY_ONDOWNLOAD = False
GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = False
GROWL_HOST = ''
GROWL_PASSWORD = None
USE_FREEMOBILE = False
FREEMOBILE_NOTIFY_ONSNATCH = False
FREEMOBILE_NOTIFY_ONDOWNLOAD = False
FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD = False
FREEMOBILE_ID = ''
FREEMOBILE_APIKEY = ''
USE_PROWL = False
PROWL_NOTIFY_ONSNATCH = False
PROWL_NOTIFY_ONDOWNLOAD = False
PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = False
PROWL_API = None
PROWL_PRIORITY = 0
PROWL_MESSAGE_TITLE = 'SickRage'
USE_TWITTER = False
TWITTER_NOTIFY_ONSNATCH = False
TWITTER_NOTIFY_ONDOWNLOAD = False
TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = False
TWITTER_USERNAME = None
TWITTER_PASSWORD = None
TWITTER_PREFIX = None
TWITTER_DMTO = None
TWITTER_USEDM = False
USE_BOXCAR2 = False
BOXCAR2_NOTIFY_ONSNATCH = False
BOXCAR2_NOTIFY_ONDOWNLOAD = False
BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = False
BOXCAR2_ACCESSTOKEN = None
USE_PUSHOVER = False
PUSHOVER_NOTIFY_ONSNATCH = False
PUSHOVER_NOTIFY_ONDOWNLOAD = False
PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = False
PUSHOVER_USERKEY = None
PUSHOVER_APIKEY = None
PUSHOVER_DEVICE = None
PUSHOVER_SOUND = None
USE_LIBNOTIFY = False
LIBNOTIFY_NOTIFY_ONSNATCH = False
LIBNOTIFY_NOTIFY_ONDOWNLOAD = False
LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = False
USE_NMJ = False
NMJ_HOST = None
NMJ_DATABASE = None
NMJ_MOUNT = None
ANIMESUPPORT = False
USE_ANIDB = False
ANIDB_USERNAME = None
ANIDB_PASSWORD = None
ANIDB_USE_MYLIST = False
ADBA_CONNECTION = None
ANIME_SPLIT_HOME = False
USE_SYNOINDEX = False
USE_NMJv2 = False
NMJv2_HOST = None
NMJv2_DATABASE = None
NMJv2_DBLOC = None
USE_SYNOLOGYNOTIFIER = False
SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = False
SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = False
SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = False
USE_TRAKT = False
TRAKT_USERNAME = None
TRAKT_ACCESS_TOKEN = None
TRAKT_REFRESH_TOKEN = None
TRAKT_REMOVE_WATCHLIST = False
TRAKT_REMOVE_SERIESLIST = False
TRAKT_REMOVE_SHOW_FROM_SICKRAGE = False
TRAKT_SYNC_WATCHLIST = False
TRAKT_METHOD_ADD = None
TRAKT_START_PAUSED = False
TRAKT_USE_RECOMMENDED = False
TRAKT_SYNC = False
TRAKT_SYNC_REMOVE = False
TRAKT_DEFAULT_INDEXER = None
TRAKT_TIMEOUT = None
TRAKT_BLACKLIST_NAME = None
USE_PYTIVO = False
PYTIVO_NOTIFY_ONSNATCH = False
PYTIVO_NOTIFY_ONDOWNLOAD = False
PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = False
PYTIVO_UPDATE_LIBRARY = False
PYTIVO_HOST = ''
PYTIVO_SHARE_NAME = ''
PYTIVO_TIVO_NAME = ''
USE_NMA = False
NMA_NOTIFY_ONSNATCH = False
NMA_NOTIFY_ONDOWNLOAD = False
NMA_NOTIFY_ONSUBTITLEDOWNLOAD = False
NMA_API = None
NMA_PRIORITY = 0
USE_PUSHALOT = False
PUSHALOT_NOTIFY_ONSNATCH = False
PUSHALOT_NOTIFY_ONDOWNLOAD = False
PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = False
PUSHALOT_AUTHORIZATIONTOKEN = None
USE_PUSHBULLET = False
PUSHBULLET_NOTIFY_ONSNATCH = False
PUSHBULLET_NOTIFY_ONDOWNLOAD = False
PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = False
PUSHBULLET_API = None
PUSHBULLET_DEVICE = None
USE_EMAIL = False
EMAIL_NOTIFY_ONSNATCH = False
EMAIL_NOTIFY_ONDOWNLOAD = False
EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD = False
EMAIL_HOST = None
EMAIL_PORT = 25
EMAIL_TLS = False
EMAIL_USER = None
EMAIL_PASSWORD = None
EMAIL_FROM = None
EMAIL_LIST = None
GUI_NAME = None
HOME_LAYOUT = None
HISTORY_LAYOUT = None
HISTORY_LIMIT = 0
DISPLAY_SHOW_SPECIALS = False
COMING_EPS_LAYOUT = None
COMING_EPS_DISPLAY_PAUSED = False
COMING_EPS_SORT = None
COMING_EPS_MISSED_RANGE = None
FUZZY_DATING = False
TRIM_ZERO = False
DATE_PRESET = None
TIME_PRESET = None
TIME_PRESET_W_SECONDS = None
TIMEZONE_DISPLAY = None
THEME_NAME = None
POSTER_SORTBY = None
POSTER_SORTDIR = None
USE_SUBTITLES = False
SUBTITLES_LANGUAGES = []
SUBTITLES_DIR = ''
SUBTITLES_SERVICES_LIST = []
SUBTITLES_SERVICES_ENABLED = []
SUBTITLES_HISTORY = False
EMBEDDED_SUBTITLES_ALL = False
SUBTITLES_HEARING_IMPAIRED = False
SUBTITLES_FINDER_FREQUENCY = 1
SUBTITLES_MULTI = False
SUBTITLES_EXTRA_SCRIPTS = []
SUBTITLES_DOWNLOAD_IN_PP = False
ADDIC7ED_USER = None
ADDIC7ED_PASS = None
OPENSUBTITLES_USER = None
OPENSUBTITLES_PASS = None
LEGENDASTV_USER = None
LEGENDASTV_PASS = None
USE_FAILED_DOWNLOADS = False
DELETE_FAILED = False
EXTRA_SCRIPTS = []
IGNORE_WORDS = "german,french,core2hd,dutch,swedish,reenc,MrLss"
TRACKERS_LIST = "udp://coppersurfer.tk:6969/announce,udp://open.demonii.com:1337,udp://exodus.desync.com:6969,udp://9.rarbg.me:2710/announce,udp://glotorrents.pw:6969/announce,udp://tracker.openbittorrent.com:80/announce,udp://9.rarbg.to:2710/announce"
REQUIRE_WORDS = ""
IGNORED_SUBS_LIST = "dk,fin,heb,kor,nor,nordic,pl,swe"
SYNC_FILES = "!sync,lftp-pget-status,part,bts,!qb"
CALENDAR_UNPROTECTED = False
CALENDAR_ICONS = False
NO_RESTART = False
TMDB_API_KEY = 'edc5f123313769de83a71e157758030b'
# TRAKT_API_KEY = 'd4161a7a106424551add171e5470112e4afdaf2438e6ef2fe0548edc75924868'
TRAKT_API_KEY = '5c65f55e11d48c35385d9e8670615763a605fad28374c8ae553a7b7a50651ddd'
TRAKT_API_SECRET = 'b53e32045ac122a445ef163e6d859403301ffe9b17fb8321d428531b69022a82'
TRAKT_PIN_URL = 'https://trakt.tv/pin/4562'
TRAKT_OAUTH_URL = 'https://trakt.tv/'
TRAKT_API_URL = 'https://api-v2launch.trakt.tv/'
FANART_API_KEY = '9b3afaf26f6241bdb57d6cc6bd798da7'
SHOWS_RECENT = []
__INITIALIZED__ = False
NEWZNAB_DATA = None
def get_backlog_cycle_time():
cycletime = DAILYSEARCH_FREQUENCY * 2 + 7
return max([cycletime, 720])
def initialize(consoleLogging=True):
with INIT_LOCK:
global BRANCH, GIT_RESET, GIT_REMOTE, GIT_REMOTE_URL, CUR_COMMIT_HASH, CUR_COMMIT_BRANCH, GIT_NEWVER, ACTUAL_LOG_DIR, LOG_DIR, LOG_NR, LOG_SIZE, WEB_PORT, WEB_LOG, ENCRYPTION_VERSION, ENCRYPTION_SECRET, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, WEB_COOKIE_SECRET, WEB_USE_GZIP, API_KEY, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \
HANDLE_REVERSE_PROXY, USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR, DOWNLOAD_PROPERS, RANDOMIZE_PROVIDERS, CHECK_PROPERS_INTERVAL, ALLOW_HIGH_PRIORITY, SAB_FORCED, TORRENT_METHOD, \
SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_CATEGORY_BACKLOG, SAB_CATEGORY_ANIME, SAB_CATEGORY_ANIME_BACKLOG, SAB_HOST, \
NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_CATEGORY_BACKLOG, NZBGET_CATEGORY_ANIME, NZBGET_CATEGORY_ANIME_BACKLOG, NZBGET_PRIORITY, NZBGET_HOST, NZBGET_USE_HTTPS, backlogSearchScheduler, \
TORRENT_USERNAME, TORRENT_PASSWORD, TORRENT_HOST, TORRENT_PATH, TORRENT_SEED_TIME, TORRENT_PAUSED, TORRENT_HIGH_BANDWIDTH, TORRENT_LABEL, TORRENT_LABEL_ANIME, TORRENT_VERIFY_CERT, TORRENT_RPCURL, TORRENT_AUTH_TYPE, \
USE_KODI, KODI_ALWAYS_ON, KODI_NOTIFY_ONSNATCH, KODI_NOTIFY_ONDOWNLOAD, KODI_NOTIFY_ONSUBTITLEDOWNLOAD, KODI_UPDATE_FULL, KODI_UPDATE_ONLYFIRST, \
KODI_UPDATE_LIBRARY, KODI_HOST, KODI_USERNAME, KODI_PASSWORD, BACKLOG_FREQUENCY, \
USE_TRAKT, TRAKT_USERNAME, TRAKT_ACCESS_TOKEN, TRAKT_REFRESH_TOKEN, TRAKT_REMOVE_WATCHLIST, TRAKT_SYNC_WATCHLIST, TRAKT_REMOVE_SHOW_FROM_SICKRAGE, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_SYNC_REMOVE, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, TRAKT_TIMEOUT, TRAKT_BLACKLIST_NAME, \
USE_PLEX, PLEX_NOTIFY_ONSNATCH, PLEX_NOTIFY_ONDOWNLOAD, PLEX_NOTIFY_ONSUBTITLEDOWNLOAD, PLEX_UPDATE_LIBRARY, USE_PLEX_CLIENT, PLEX_CLIENT_USERNAME, PLEX_CLIENT_PASSWORD, \
PLEX_SERVER_HOST, PLEX_SERVER_TOKEN, PLEX_HOST, PLEX_USERNAME, PLEX_PASSWORD, MIN_BACKLOG_FREQUENCY, SKIP_REMOVED_FILES, ALLOWED_EXTENSIONS, \
USE_EMBY, EMBY_HOST, EMBY_APIKEY, \
showUpdateScheduler, __INITIALIZED__, INDEXER_DEFAULT_LANGUAGE, EP_DEFAULT_DELETED_STATUS, LAUNCH_BROWSER, TRASH_REMOVE_SHOW, TRASH_ROTATE_LOGS, SORT_ARTICLE, showList, loadingShowList, \
NEWZNAB_DATA, NZBS, NZBS_UID, NZBS_HASH, INDEXER_DEFAULT, INDEXER_TIMEOUT, USENET_RETENTION, TORRENT_DIR, \
QUALITY_DEFAULT, FLATTEN_FOLDERS_DEFAULT, SUBTITLES_DEFAULT, STATUS_DEFAULT, STATUS_DEFAULT_AFTER, \
GROWL_NOTIFY_ONSNATCH, GROWL_NOTIFY_ONDOWNLOAD, GROWL_NOTIFY_ONSUBTITLEDOWNLOAD, TWITTER_NOTIFY_ONSNATCH, TWITTER_NOTIFY_ONDOWNLOAD, TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD, USE_FREEMOBILE, FREEMOBILE_ID, FREEMOBILE_APIKEY, FREEMOBILE_NOTIFY_ONSNATCH, FREEMOBILE_NOTIFY_ONDOWNLOAD, FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD, \
USE_GROWL, GROWL_HOST, GROWL_PASSWORD, USE_PROWL, PROWL_NOTIFY_ONSNATCH, PROWL_NOTIFY_ONDOWNLOAD, PROWL_NOTIFY_ONSUBTITLEDOWNLOAD, PROWL_API, PROWL_PRIORITY, PROWL_MESSAGE_TITLE, \
USE_PYTIVO, PYTIVO_NOTIFY_ONSNATCH, PYTIVO_NOTIFY_ONDOWNLOAD, PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD, PYTIVO_UPDATE_LIBRARY, PYTIVO_HOST, PYTIVO_SHARE_NAME, PYTIVO_TIVO_NAME, \
USE_NMA, NMA_NOTIFY_ONSNATCH, NMA_NOTIFY_ONDOWNLOAD, NMA_NOTIFY_ONSUBTITLEDOWNLOAD, NMA_API, NMA_PRIORITY, \
USE_PUSHALOT, PUSHALOT_NOTIFY_ONSNATCH, PUSHALOT_NOTIFY_ONDOWNLOAD, PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHALOT_AUTHORIZATIONTOKEN, \
USE_PUSHBULLET, PUSHBULLET_NOTIFY_ONSNATCH, PUSHBULLET_NOTIFY_ONDOWNLOAD, PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHBULLET_API, PUSHBULLET_DEVICE, \
versionCheckScheduler, VERSION_NOTIFY, AUTO_UPDATE, NOTIFY_ON_UPDATE, PROCESS_AUTOMATICALLY, NO_DELETE, UNPACK, CPU_PRESET, \
KEEP_PROCESSED_DIR, PROCESS_METHOD, DELRARCONTENTS, TV_DOWNLOAD_DIR, UPDATE_FREQUENCY, \
showQueueScheduler, searchQueueScheduler, ROOT_DIRS, CACHE_DIR, ACTUAL_CACHE_DIR, TIMEZONE_DISPLAY, \
NAMING_PATTERN, NAMING_MULTI_EP, NAMING_ANIME_MULTI_EP, NAMING_FORCE_FOLDERS, NAMING_ABD_PATTERN, NAMING_CUSTOM_ABD, NAMING_SPORTS_PATTERN, NAMING_CUSTOM_SPORTS, NAMING_ANIME_PATTERN, NAMING_CUSTOM_ANIME, NAMING_STRIP_YEAR, \
RENAME_EPISODES, AIRDATE_EPISODES, FILE_TIMESTAMP_TIMEZONE, properFinderScheduler, PROVIDER_ORDER, autoPostProcesserScheduler, \
providerList, newznabProviderList, torrentRssProviderList, \
EXTRA_SCRIPTS, USE_TWITTER, TWITTER_USERNAME, TWITTER_PASSWORD, TWITTER_PREFIX, DAILYSEARCH_FREQUENCY, TWITTER_DMTO, TWITTER_USEDM, \
USE_BOXCAR2, BOXCAR2_ACCESSTOKEN, BOXCAR2_NOTIFY_ONDOWNLOAD, BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD, BOXCAR2_NOTIFY_ONSNATCH, \
USE_PUSHOVER, PUSHOVER_USERKEY, PUSHOVER_APIKEY, PUSHOVER_DEVICE, PUSHOVER_NOTIFY_ONDOWNLOAD, PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHOVER_NOTIFY_ONSNATCH, PUSHOVER_SOUND, \
USE_LIBNOTIFY, LIBNOTIFY_NOTIFY_ONSNATCH, LIBNOTIFY_NOTIFY_ONDOWNLOAD, LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD, USE_NMJ, NMJ_HOST, NMJ_DATABASE, NMJ_MOUNT, USE_NMJv2, NMJv2_HOST, NMJv2_DATABASE, NMJv2_DBLOC, USE_SYNOINDEX, \
USE_SYNOLOGYNOTIFIER, SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH, SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD, SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD, \
USE_EMAIL, EMAIL_HOST, EMAIL_PORT, EMAIL_TLS, EMAIL_USER, EMAIL_PASSWORD, EMAIL_FROM, EMAIL_NOTIFY_ONSNATCH, EMAIL_NOTIFY_ONDOWNLOAD, EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD, EMAIL_LIST, \
USE_LISTVIEW, METADATA_KODI, METADATA_KODI_12PLUS, METADATA_MEDIABROWSER, METADATA_PS3, metadata_provider_dict, \
NEWZBIN, NEWZBIN_USERNAME, NEWZBIN_PASSWORD, GIT_PATH, MOVE_ASSOCIATED_FILES, SYNC_FILES, POSTPONE_IF_SYNC_FILES, POSTPONE_IF_NO_SUBS, dailySearchScheduler, NFO_RENAME, \
GUI_NAME, HOME_LAYOUT, HISTORY_LAYOUT, DISPLAY_SHOW_SPECIALS, COMING_EPS_LAYOUT, COMING_EPS_SORT, COMING_EPS_DISPLAY_PAUSED, COMING_EPS_MISSED_RANGE, FUZZY_DATING, TRIM_ZERO, DATE_PRESET, TIME_PRESET, TIME_PRESET_W_SECONDS, THEME_NAME, \
POSTER_SORTBY, POSTER_SORTDIR, HISTORY_LIMIT, CREATE_MISSING_SHOW_DIRS, ADD_SHOWS_WO_DIR, \
METADATA_WDTV, METADATA_TIVO, METADATA_MEDE8ER, IGNORE_WORDS, TRACKERS_LIST, IGNORED_SUBS_LIST, REQUIRE_WORDS, CALENDAR_UNPROTECTED, CALENDAR_ICONS, NO_RESTART, \
USE_SUBTITLES, SUBTITLES_LANGUAGES, SUBTITLES_DIR, SUBTITLES_SERVICES_LIST, SUBTITLES_SERVICES_ENABLED, SUBTITLES_HISTORY, SUBTITLES_FINDER_FREQUENCY, SUBTITLES_MULTI, SUBTITLES_DOWNLOAD_IN_PP, EMBEDDED_SUBTITLES_ALL, SUBTITLES_EXTRA_SCRIPTS, subtitlesFinderScheduler, \
SUBTITLES_HEARING_IMPAIRED, ADDIC7ED_USER, ADDIC7ED_PASS, LEGENDASTV_USER, LEGENDASTV_PASS, OPENSUBTITLES_USER, OPENSUBTITLES_PASS, \
USE_FAILED_DOWNLOADS, DELETE_FAILED, ANON_REDIRECT, LOCALHOST_IP, DEBUG, DEFAULT_PAGE, PROXY_SETTING, PROXY_INDEXERS, \
AUTOPOSTPROCESSER_FREQUENCY, SHOWUPDATE_HOUR, \
ANIME_DEFAULT, NAMING_ANIME, ANIMESUPPORT, USE_ANIDB, ANIDB_USERNAME, ANIDB_PASSWORD, ANIDB_USE_MYLIST, \
ANIME_SPLIT_HOME, SCENE_DEFAULT, ARCHIVE_DEFAULT, DOWNLOAD_URL, BACKLOG_DAYS, GIT_USERNAME, GIT_PASSWORD, \
GIT_AUTOISSUES, DEVELOPER, gh, DISPLAY_ALL_SEASONS, SSL_VERIFY, NEWS_LAST_READ, NEWS_LATEST, SOCKET_TIMEOUT
if __INITIALIZED__:
return False
CheckSection(CFG, 'General')
CheckSection(CFG, 'Blackhole')
CheckSection(CFG, 'Newzbin')
CheckSection(CFG, 'SABnzbd')
CheckSection(CFG, 'NZBget')
CheckSection(CFG, 'KODI')
CheckSection(CFG, 'PLEX')
CheckSection(CFG, 'Emby')
CheckSection(CFG, 'Growl')
CheckSection(CFG, 'Prowl')
CheckSection(CFG, 'Twitter')
CheckSection(CFG, 'Boxcar2')
CheckSection(CFG, 'NMJ')
CheckSection(CFG, 'NMJv2')
CheckSection(CFG, 'Synology')
CheckSection(CFG, 'SynologyNotifier')
CheckSection(CFG, 'pyTivo')
CheckSection(CFG, 'NMA')
CheckSection(CFG, 'Pushalot')
CheckSection(CFG, 'Pushbullet')
CheckSection(CFG, 'Subtitles')
CheckSection(CFG, 'pyTivo')
# Need to be before any passwords
ENCRYPTION_VERSION = check_setting_int(CFG, 'General', 'encryption_version', 0)
ENCRYPTION_SECRET = check_setting_str(CFG, 'General', 'encryption_secret', helpers.generateCookieSecret(), censor_log=True)
GIT_AUTOISSUES = bool(check_setting_int(CFG, 'General', 'git_autoissues', 0))
# git login info
GIT_USERNAME = check_setting_str(CFG, 'General', 'git_username', '')
GIT_PASSWORD = check_setting_str(CFG, 'General', 'git_password', '', censor_log=True)
GIT_NEWVER = bool(check_setting_int(CFG, 'General', 'git_newver', 0))
DEVELOPER = bool(check_setting_int(CFG, 'General', 'developer', 0))
# debugging
DEBUG = bool(check_setting_int(CFG, 'General', 'debug', 0))
DEFAULT_PAGE = check_setting_str(CFG, 'General', 'default_page', 'home')
if DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'):
DEFAULT_PAGE = 'home'
ACTUAL_LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', 'Logs')
LOG_DIR = ek(os.path.normpath, ek(os.path.join, DATA_DIR, ACTUAL_LOG_DIR))
LOG_NR = check_setting_int(CFG, 'General', 'log_nr', 5) # Default to 5 backup file (sickrage.log.x)
LOG_SIZE = check_setting_int(CFG, 'General', 'log_size', 1048576) # Default to max 1MB per logfile
fileLogging = True
if not helpers.makeDir(LOG_DIR):
sys.stderr.write("!!! No log folder, logging to screen only!\n")
fileLogging = False
# init logging
logger.initLogging(consoleLogging=consoleLogging, fileLogging=fileLogging, debugLogging=DEBUG)
# github api
try:
if not (GIT_USERNAME and GIT_PASSWORD):
gh = Github(user_agent="SiCKRAGE").get_organization(GIT_ORG).get_repo(GIT_REPO)
else:
gh = Github(login_or_token=GIT_USERNAME, password=GIT_PASSWORD, user_agent="SiCKRAGE").get_organization(GIT_ORG).get_repo(GIT_REPO)
except Exception as e:
gh = None
logger.log(u'Unable to setup GitHub properly. GitHub will not be available. Error: %s' % str(e), logger.WARNING)
# git reset on update
GIT_RESET = bool(check_setting_int(CFG, 'General', 'git_reset', 1))
# current git branch
BRANCH = check_setting_str(CFG, 'General', 'branch', '')
# git_remote
GIT_REMOTE = check_setting_str(CFG, 'General', 'git_remote', 'origin')
GIT_REMOTE_URL = check_setting_str(CFG, 'General', 'git_remote_url',
'https://github.com/%s/%s.git' % (GIT_ORG, GIT_REPO))
if 'sickragetv' in GIT_REMOTE_URL.lower():
GIT_REMOTE_URL = 'https://github.com/SickRage/SickRage.git'
# current commit hash
CUR_COMMIT_HASH = check_setting_str(CFG, 'General', 'cur_commit_hash', '')
# current commit branch
CUR_COMMIT_BRANCH = check_setting_str(CFG, 'General', 'cur_commit_branch', '')
ACTUAL_CACHE_DIR = check_setting_str(CFG, 'General', 'cache_dir', 'cache')
# fix bad configs due to buggy code
if ACTUAL_CACHE_DIR == 'None':
ACTUAL_CACHE_DIR = 'cache'
# unless they specify, put the cache dir inside the data dir
if not ek(os.path.isabs, ACTUAL_CACHE_DIR):
CACHE_DIR = ek(os.path.join, DATA_DIR, ACTUAL_CACHE_DIR)
else:
CACHE_DIR = ACTUAL_CACHE_DIR
if not helpers.makeDir(CACHE_DIR):
logger.log(u"!!! Creating local cache dir failed, using system default", logger.ERROR)
CACHE_DIR = None
# Check if we need to perform a restore of the cache folder
try:
restoreDir = ek(os.path.join, DATA_DIR, 'restore')
if ek(os.path.exists, restoreDir) and ek(os.path.exists, ek(os.path.join, restoreDir, 'cache')):
def restoreCache(srcDir, dstDir):
def path_leaf(path):
head, tail = ek(os.path.split, path)
return tail or ek(os.path.basename, head)
try:
if ek(os.path.isdir, dstDir):
bakFilename = '{0}-{1}'.format(path_leaf(dstDir), datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d_%H%M%S'))
shutil.move(dstDir, ek(os.path.join, ek(os.path.dirname, dstDir), bakFilename))
shutil.move(srcDir, dstDir)
logger.log(u"Restore: restoring cache successful", logger.INFO)
except Exception as e:
logger.log(u"Restore: restoring cache failed: {0}".format(str(e)), logger.ERROR)
restoreCache(ek(os.path.join, restoreDir, 'cache'), CACHE_DIR)
except Exception as e:
logger.log(u"Restore: restoring cache failed: {0}".format(ex(e)), logger.ERROR)
finally:
if ek(os.path.exists, ek(os.path.join, DATA_DIR, 'restore')):
try:
shutil.rmtree(ek(os.path.join, DATA_DIR, 'restore'))
except Exception as e:
logger.log(u"Restore: Unable to remove the restore directory: {0}".format(ex(e)), logger.ERROR)
for cleanupDir in ['mako', 'sessions', 'indexers']:
try:
shutil.rmtree(ek(os.path.join, CACHE_DIR, cleanupDir))
except Exception as e:
logger.log(u"Restore: Unable to remove the cache/{0} directory: {1}".format(cleanupDir, ex(e)), logger.WARNING)
GUI_NAME = check_setting_str(CFG, 'GUI', 'gui_name', 'slick')
THEME_NAME = check_setting_str(CFG, 'GUI', 'theme_name', 'dark')
SOCKET_TIMEOUT = check_setting_int(CFG, 'General', 'socket_timeout', 30)
socket.setdefaulttimeout(SOCKET_TIMEOUT)
try:
WEB_PORT = check_setting_int(CFG, 'General', 'web_port', 8081)
except Exception:
WEB_PORT = 8081
if WEB_PORT < 21 or WEB_PORT > 65535:
WEB_PORT = 8081
WEB_HOST = check_setting_str(CFG, 'General', 'web_host', '0.0.0.0')
WEB_IPV6 = bool(check_setting_int(CFG, 'General', 'web_ipv6', 0))
WEB_ROOT = check_setting_str(CFG, 'General', 'web_root', '').rstrip("/")
WEB_LOG = bool(check_setting_int(CFG, 'General', 'web_log', 0))
WEB_USERNAME = check_setting_str(CFG, 'General', 'web_username', '', censor_log=True)
WEB_PASSWORD = check_setting_str(CFG, 'General', 'web_password', '', censor_log=True)
WEB_COOKIE_SECRET = check_setting_str(CFG, 'General', 'web_cookie_secret', helpers.generateCookieSecret(), censor_log=True)
if not WEB_COOKIE_SECRET:
WEB_COOKIE_SECRET = helpers.generateCookieSecret()
WEB_USE_GZIP = bool(check_setting_int(CFG, 'General', 'web_use_gzip', 1))
SSL_VERIFY = bool(check_setting_int(CFG, 'General', 'ssl_verify', 1))
INDEXER_DEFAULT_LANGUAGE = check_setting_str(CFG, 'General', 'indexerDefaultLang', 'en')
EP_DEFAULT_DELETED_STATUS = check_setting_int(CFG, 'General', 'ep_default_deleted_status', 6)
LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))
DOWNLOAD_URL = check_setting_str(CFG, 'General', 'download_url', "")
LOCALHOST_IP = check_setting_str(CFG, 'General', 'localhost_ip', '')
CPU_PRESET = check_setting_str(CFG, 'General', 'cpu_preset', 'NORMAL')
ANON_REDIRECT = check_setting_str(CFG, 'General', 'anon_redirect', 'http://dereferer.org/?')
PROXY_SETTING = check_setting_str(CFG, 'General', 'proxy_setting', '')
PROXY_INDEXERS = bool(check_setting_int(CFG, 'General', 'proxy_indexers', 1))
# attempt to help prevent users from breaking links by using a bad url
if not ANON_REDIRECT.endswith('?'):
ANON_REDIRECT = ''
TRASH_REMOVE_SHOW = bool(check_setting_int(CFG, 'General', 'trash_remove_show', 0))
TRASH_ROTATE_LOGS = bool(check_setting_int(CFG, 'General', 'trash_rotate_logs', 0))
SORT_ARTICLE = bool(check_setting_int(CFG, 'General', 'sort_article', 0))
API_KEY = check_setting_str(CFG, 'General', 'api_key', '', censor_log=True)
ENABLE_HTTPS = bool(check_setting_int(CFG, 'General', 'enable_https', 0))
HTTPS_CERT = check_setting_str(CFG, 'General', 'https_cert', 'server.crt')
HTTPS_KEY = check_setting_str(CFG, 'General', 'https_key', 'server.key')
HANDLE_REVERSE_PROXY = bool(check_setting_int(CFG, 'General', 'handle_reverse_proxy', 0))
ROOT_DIRS = check_setting_str(CFG, 'General', 'root_dirs', '')
if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', ROOT_DIRS):
ROOT_DIRS = ''
QUALITY_DEFAULT = check_setting_int(CFG, 'General', 'quality_default', SD)
STATUS_DEFAULT = check_setting_int(CFG, 'General', 'status_default', SKIPPED)
STATUS_DEFAULT_AFTER = check_setting_int(CFG, 'General', 'status_default_after', WANTED)
VERSION_NOTIFY = bool(check_setting_int(CFG, 'General', 'version_notify', 1))
AUTO_UPDATE = bool(check_setting_int(CFG, 'General', 'auto_update', 0))
NOTIFY_ON_UPDATE = bool(check_setting_int(CFG, 'General', 'notify_on_update', 1))
FLATTEN_FOLDERS_DEFAULT = bool(check_setting_int(CFG, 'General', 'flatten_folders_default', 0))
INDEXER_DEFAULT = check_setting_int(CFG, 'General', 'indexer_default', 0)
INDEXER_TIMEOUT = check_setting_int(CFG, 'General', 'indexer_timeout', 20)
ANIME_DEFAULT = bool(check_setting_int(CFG, 'General', 'anime_default', 0))
SCENE_DEFAULT = bool(check_setting_int(CFG, 'General', 'scene_default', 0))
ARCHIVE_DEFAULT = bool(check_setting_int(CFG, 'General', 'archive_default', 0))
PROVIDER_ORDER = check_setting_str(CFG, 'General', 'provider_order', '').split()
NAMING_PATTERN = check_setting_str(CFG, 'General', 'naming_pattern', 'Season %0S/%SN - S%0SE%0E - %EN')
NAMING_ABD_PATTERN = check_setting_str(CFG, 'General', 'naming_abd_pattern', '%SN - %A.D - %EN')
NAMING_CUSTOM_ABD = bool(check_setting_int(CFG, 'General', 'naming_custom_abd', 0))
NAMING_SPORTS_PATTERN = check_setting_str(CFG, 'General', 'naming_sports_pattern', '%SN - %A-D - %EN')
NAMING_ANIME_PATTERN = check_setting_str(CFG, 'General', 'naming_anime_pattern',
'Season %0S/%SN - S%0SE%0E - %EN')
NAMING_ANIME = check_setting_int(CFG, 'General', 'naming_anime', 3)
NAMING_CUSTOM_SPORTS = bool(check_setting_int(CFG, 'General', 'naming_custom_sports', 0))
NAMING_CUSTOM_ANIME = bool(check_setting_int(CFG, 'General', 'naming_custom_anime', 0))
NAMING_MULTI_EP = check_setting_int(CFG, 'General', 'naming_multi_ep', 1)
NAMING_ANIME_MULTI_EP = check_setting_int(CFG, 'General', 'naming_anime_multi_ep', 1)
NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
NAMING_STRIP_YEAR = bool(check_setting_int(CFG, 'General', 'naming_strip_year', 0))
USE_NZBS = bool(check_setting_int(CFG, 'General', 'use_nzbs', 0))
USE_TORRENTS = bool(check_setting_int(CFG, 'General', 'use_torrents', 1))
NZB_METHOD = check_setting_str(CFG, 'General', 'nzb_method', 'blackhole')
if NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
NZB_METHOD = 'blackhole'
TORRENT_METHOD = check_setting_str(CFG, 'General', 'torrent_method', 'blackhole')
if TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet'):
TORRENT_METHOD = 'blackhole'
DOWNLOAD_PROPERS = bool(check_setting_int(CFG, 'General', 'download_propers', 1))
CHECK_PROPERS_INTERVAL = check_setting_str(CFG, 'General', 'check_propers_interval', '')
if CHECK_PROPERS_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'):
CHECK_PROPERS_INTERVAL = 'daily'
RANDOMIZE_PROVIDERS = bool(check_setting_int(CFG, 'General', 'randomize_providers', 0))
ALLOW_HIGH_PRIORITY = bool(check_setting_int(CFG, 'General', 'allow_high_priority', 1))
SKIP_REMOVED_FILES = bool(check_setting_int(CFG, 'General', 'skip_removed_files', 0))
ALLOWED_EXTENSIONS = check_setting_str(CFG, 'General', 'allowed_extensions', ALLOWED_EXTENSIONS)
USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', 500)
AUTOPOSTPROCESSER_FREQUENCY = check_setting_int(CFG, 'General', 'autopostprocesser_frequency',
DEFAULT_AUTOPOSTPROCESSER_FREQUENCY)
if AUTOPOSTPROCESSER_FREQUENCY < MIN_AUTOPOSTPROCESSER_FREQUENCY:
AUTOPOSTPROCESSER_FREQUENCY = MIN_AUTOPOSTPROCESSER_FREQUENCY
DAILYSEARCH_FREQUENCY = check_setting_int(CFG, 'General', 'dailysearch_frequency',
DEFAULT_DAILYSEARCH_FREQUENCY)
if DAILYSEARCH_FREQUENCY < MIN_DAILYSEARCH_FREQUENCY:
DAILYSEARCH_FREQUENCY = MIN_DAILYSEARCH_FREQUENCY
MIN_BACKLOG_FREQUENCY = get_backlog_cycle_time()
BACKLOG_FREQUENCY = check_setting_int(CFG, 'General', 'backlog_frequency', DEFAULT_BACKLOG_FREQUENCY)
if BACKLOG_FREQUENCY < MIN_BACKLOG_FREQUENCY:
BACKLOG_FREQUENCY = MIN_BACKLOG_FREQUENCY
UPDATE_FREQUENCY = check_setting_int(CFG, 'General', 'update_frequency', DEFAULT_UPDATE_FREQUENCY)
if UPDATE_FREQUENCY < MIN_UPDATE_FREQUENCY:
UPDATE_FREQUENCY = MIN_UPDATE_FREQUENCY
SHOWUPDATE_HOUR = check_setting_int(CFG, 'General', 'showupdate_hour', DEFAULT_SHOWUPDATE_HOUR)
if SHOWUPDATE_HOUR > 23:
SHOWUPDATE_HOUR = 0
elif SHOWUPDATE_HOUR < 0:
SHOWUPDATE_HOUR = 0
BACKLOG_DAYS = check_setting_int(CFG, 'General', 'backlog_days', 7)
NEWS_LAST_READ = check_setting_str(CFG, 'General', 'news_last_read', '1970-01-01')
NEWS_LATEST = NEWS_LAST_READ
NZB_DIR = check_setting_str(CFG, 'Blackhole', 'nzb_dir', '')
TORRENT_DIR = check_setting_str(CFG, 'Blackhole', 'torrent_dir', '')
TV_DOWNLOAD_DIR = check_setting_str(CFG, 'General', 'tv_download_dir', '')
PROCESS_AUTOMATICALLY = bool(check_setting_int(CFG, 'General', 'process_automatically', 0))
NO_DELETE = bool(check_setting_int(CFG, 'General', 'no_delete', 0))
UNPACK = bool(check_setting_int(CFG, 'General', 'unpack', 0))
RENAME_EPISODES = bool(check_setting_int(CFG, 'General', 'rename_episodes', 1))
AIRDATE_EPISODES = bool(check_setting_int(CFG, 'General', 'airdate_episodes', 0))
FILE_TIMESTAMP_TIMEZONE = check_setting_str(CFG, 'General', 'file_timestamp_timezone', 'network')
KEEP_PROCESSED_DIR = bool(check_setting_int(CFG, 'General', 'keep_processed_dir', 1))
PROCESS_METHOD = check_setting_str(CFG, 'General', 'process_method', 'copy' if KEEP_PROCESSED_DIR else 'move')
DELRARCONTENTS = bool(check_setting_int(CFG, 'General', 'del_rar_contents', 0))
MOVE_ASSOCIATED_FILES = bool(check_setting_int(CFG, 'General', 'move_associated_files', 0))
POSTPONE_IF_SYNC_FILES = bool(check_setting_int(CFG, 'General', 'postpone_if_sync_files', 1))
POSTPONE_IF_NO_SUBS = bool(check_setting_int(CFG, 'General', 'postpone_if_no_subs', 0))
SYNC_FILES = check_setting_str(CFG, 'General', 'sync_files', SYNC_FILES)
NFO_RENAME = bool(check_setting_int(CFG, 'General', 'nfo_rename', 1))
CREATE_MISSING_SHOW_DIRS = bool(check_setting_int(CFG, 'General', 'create_missing_show_dirs', 0))
ADD_SHOWS_WO_DIR = bool(check_setting_int(CFG, 'General', 'add_shows_wo_dir', 0))
NZBS = bool(check_setting_int(CFG, 'NZBs', 'nzbs', 0))
NZBS_UID = check_setting_str(CFG, 'NZBs', 'nzbs_uid', '', censor_log=True)
NZBS_HASH = check_setting_str(CFG, 'NZBs', 'nzbs_hash', '', censor_log=True)
NEWZBIN = bool(check_setting_int(CFG, 'Newzbin', 'newzbin', 0))
NEWZBIN_USERNAME = check_setting_str(CFG, 'Newzbin', 'newzbin_username', '', censor_log=True)
NEWZBIN_PASSWORD = check_setting_str(CFG, 'Newzbin', 'newzbin_password', '', censor_log=True)
SAB_USERNAME = check_setting_str(CFG, 'SABnzbd', 'sab_username', '', censor_log=True)
SAB_PASSWORD = check_setting_str(CFG, 'SABnzbd', 'sab_password', '', censor_log=True)
SAB_APIKEY = check_setting_str(CFG, 'SABnzbd', 'sab_apikey', '', censor_log=True)
SAB_CATEGORY = check_setting_str(CFG, 'SABnzbd', 'sab_category', 'tv')
SAB_CATEGORY_BACKLOG = check_setting_str(CFG, 'SABnzbd', 'sab_category_backlog', SAB_CATEGORY)
SAB_CATEGORY_ANIME = check_setting_str(CFG, 'SABnzbd', 'sab_category_anime', 'anime')
SAB_CATEGORY_ANIME_BACKLOG = check_setting_str(CFG, 'SABnzbd', 'sab_category_anime_backlog', SAB_CATEGORY_ANIME)
SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '')
SAB_FORCED = bool(check_setting_int(CFG, 'SABnzbd', 'sab_forced', 0))
NZBGET_USERNAME = check_setting_str(CFG, 'NZBget', 'nzbget_username', 'nzbget', censor_log=True)
NZBGET_PASSWORD = check_setting_str(CFG, 'NZBget', 'nzbget_password', 'tegbzn6789', censor_log=True)
NZBGET_CATEGORY = check_setting_str(CFG, 'NZBget', 'nzbget_category', 'tv')
NZBGET_CATEGORY_BACKLOG = check_setting_str(CFG, 'NZBget', 'nzbget_category_backlog', NZBGET_CATEGORY)
NZBGET_CATEGORY_ANIME = check_setting_str(CFG, 'NZBget', 'nzbget_category_anime', 'anime')
NZBGET_CATEGORY_ANIME_BACKLOG = check_setting_str(CFG, 'NZBget', 'nzbget_category_anime_backlog', NZBGET_CATEGORY_ANIME)
NZBGET_HOST = check_setting_str(CFG, 'NZBget', 'nzbget_host', '')
NZBGET_USE_HTTPS = bool(check_setting_int(CFG, 'NZBget', 'nzbget_use_https', 0))
NZBGET_PRIORITY = check_setting_int(CFG, 'NZBget', 'nzbget_priority', 100)
TORRENT_USERNAME = check_setting_str(CFG, 'TORRENT', 'torrent_username', '', censor_log=True)
TORRENT_PASSWORD = check_setting_str(CFG, 'TORRENT', 'torrent_password', '', censor_log=True)
TORRENT_HOST = check_setting_str(CFG, 'TORRENT', 'torrent_host', '')
TORRENT_PATH = check_setting_str(CFG, 'TORRENT', 'torrent_path', '')
TORRENT_SEED_TIME = check_setting_int(CFG, 'TORRENT', 'torrent_seed_time', 0)
TORRENT_PAUSED = bool(check_setting_int(CFG, 'TORRENT', 'torrent_paused', 0))
TORRENT_HIGH_BANDWIDTH = bool(check_setting_int(CFG, 'TORRENT', 'torrent_high_bandwidth', 0))
TORRENT_LABEL = check_setting_str(CFG, 'TORRENT', 'torrent_label', '')
TORRENT_LABEL_ANIME = check_setting_str(CFG, 'TORRENT', 'torrent_label_anime', '')
TORRENT_VERIFY_CERT = bool(check_setting_int(CFG, 'TORRENT', 'torrent_verify_cert', 0))
TORRENT_RPCURL = check_setting_str(CFG, 'TORRENT', 'torrent_rpcurl', 'transmission')
TORRENT_AUTH_TYPE = check_setting_str(CFG, 'TORRENT', 'torrent_auth_type', '')
USE_KODI = bool(check_setting_int(CFG, 'KODI', 'use_kodi', 0))
KODI_ALWAYS_ON = bool(check_setting_int(CFG, 'KODI', 'kodi_always_on', 1))
KODI_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'KODI', 'kodi_notify_onsnatch', 0))
KODI_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'KODI', 'kodi_notify_ondownload', 0))
KODI_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'KODI', 'kodi_notify_onsubtitledownload', 0))
KODI_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'KODI', 'kodi_update_library', 0))
KODI_UPDATE_FULL = bool(check_setting_int(CFG, 'KODI', 'kodi_update_full', 0))
KODI_UPDATE_ONLYFIRST = bool(check_setting_int(CFG, 'KODI', 'kodi_update_onlyfirst', 0))
KODI_HOST = check_setting_str(CFG, 'KODI', 'kodi_host', '')
KODI_USERNAME = check_setting_str(CFG, 'KODI', 'kodi_username', '', censor_log=True)
KODI_PASSWORD = check_setting_str(CFG, 'KODI', 'kodi_password', '', censor_log=True)
USE_PLEX = bool(check_setting_int(CFG, 'Plex', 'use_plex', 0))
PLEX_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Plex', 'plex_notify_onsnatch', 0))
PLEX_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Plex', 'plex_notify_ondownload', 0))
PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Plex', 'plex_notify_onsubtitledownload', 0))
PLEX_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'Plex', 'plex_update_library', 0))
PLEX_SERVER_HOST = check_setting_str(CFG, 'Plex', 'plex_server_host', '')
PLEX_SERVER_TOKEN = check_setting_str(CFG, 'Plex', 'plex_server_token', '')
PLEX_HOST = check_setting_str(CFG, 'Plex', 'plex_host', '')
PLEX_USERNAME = check_setting_str(CFG, 'Plex', 'plex_username', '', censor_log=True)
PLEX_PASSWORD = check_setting_str(CFG, 'Plex', 'plex_password', '', censor_log=True)
USE_PLEX_CLIENT = bool(check_setting_int(CFG, 'Plex', 'use_plex_client', 0))
PLEX_CLIENT_USERNAME = check_setting_str(CFG, 'Plex', 'plex_client_username', '', censor_log=True)
PLEX_CLIENT_PASSWORD = check_setting_str(CFG, 'Plex', 'plex_client_password', '', censor_log=True)
USE_EMBY = bool(check_setting_int(CFG, 'Emby', 'use_emby', 0))
EMBY_HOST = check_setting_str(CFG, 'Emby', 'emby_host', '')
EMBY_APIKEY = check_setting_str(CFG, 'Emby', 'emby_apikey', '')
USE_GROWL = bool(check_setting_int(CFG, 'Growl', 'use_growl', 0))
GROWL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Growl', 'growl_notify_onsnatch', 0))
GROWL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Growl', 'growl_notify_ondownload', 0))
GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Growl', 'growl_notify_onsubtitledownload', 0))
GROWL_HOST = check_setting_str(CFG, 'Growl', 'growl_host', '')
GROWL_PASSWORD = check_setting_str(CFG, 'Growl', 'growl_password', '', censor_log=True)
USE_FREEMOBILE = bool(check_setting_int(CFG, 'FreeMobile', 'use_freemobile', 0))
FREEMOBILE_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'FreeMobile', 'freemobile_notify_onsnatch', 0))
FREEMOBILE_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'FreeMobile', 'freemobile_notify_ondownload', 0))
FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'FreeMobile', 'freemobile_notify_onsubtitledownload', 0))
FREEMOBILE_ID = check_setting_str(CFG, 'FreeMobile', 'freemobile_id', '')
FREEMOBILE_APIKEY = check_setting_str(CFG, 'FreeMobile', 'freemobile_apikey', '')
USE_PROWL = bool(check_setting_int(CFG, 'Prowl', 'use_prowl', 0))
PROWL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_onsnatch', 0))
PROWL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_ondownload', 0))
PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_onsubtitledownload', 0))
PROWL_API = check_setting_str(CFG, 'Prowl', 'prowl_api', '', censor_log=True)
PROWL_PRIORITY = check_setting_str(CFG, 'Prowl', 'prowl_priority', "0")
PROWL_MESSAGE_TITLE = check_setting_str(CFG, 'Prowl', 'prowl_message_title', "SickRage")
USE_TWITTER = bool(check_setting_int(CFG, 'Twitter', 'use_twitter', 0))
TWITTER_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Twitter', 'twitter_notify_onsnatch', 0))
TWITTER_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Twitter', 'twitter_notify_ondownload', 0))
TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = bool(
check_setting_int(CFG, 'Twitter', 'twitter_notify_onsubtitledownload', 0))
TWITTER_USERNAME = check_setting_str(CFG, 'Twitter', 'twitter_username', '', censor_log=True)
TWITTER_PASSWORD = check_setting_str(CFG, 'Twitter', 'twitter_password', '', censor_log=True)
TWITTER_PREFIX = check_setting_str(CFG, 'Twitter', 'twitter_prefix', GIT_REPO)
TWITTER_DMTO = check_setting_str(CFG, 'Twitter', 'twitter_dmto', '')
TWITTER_USEDM = bool(check_setting_int(CFG, 'Twitter', 'twitter_usedm', 0))
USE_BOXCAR2 = bool(check_setting_int(CFG, 'Boxcar2', 'use_boxcar2', 0))
BOXCAR2_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_onsnatch', 0))
BOXCAR2_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_ondownload', 0))
BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_onsubtitledownload', 0))
BOXCAR2_ACCESSTOKEN = check_setting_str(CFG, 'Boxcar2', 'boxcar2_accesstoken', '', censor_log=True)
USE_PUSHOVER = bool(check_setting_int(CFG, 'Pushover', 'use_pushover', 0))
PUSHOVER_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_onsnatch', 0))
PUSHOVER_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_ondownload', 0))
PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_onsubtitledownload', 0))
PUSHOVER_USERKEY = check_setting_str(CFG, 'Pushover', 'pushover_userkey', '', censor_log=True)
PUSHOVER_APIKEY = check_setting_str(CFG, 'Pushover', 'pushover_apikey', '', censor_log=True)
PUSHOVER_DEVICE = check_setting_str(CFG, 'Pushover', 'pushover_device', '')
PUSHOVER_SOUND = check_setting_str(CFG, 'Pushover', 'pushover_sound', 'pushover')
USE_LIBNOTIFY = bool(check_setting_int(CFG, 'Libnotify', 'use_libnotify', 0))
LIBNOTIFY_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_onsnatch', 0))
LIBNOTIFY_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_ondownload', 0))
LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_onsubtitledownload', 0))
USE_NMJ = bool(check_setting_int(CFG, 'NMJ', 'use_nmj', 0))
NMJ_HOST = check_setting_str(CFG, 'NMJ', 'nmj_host', '')
NMJ_DATABASE = check_setting_str(CFG, 'NMJ', 'nmj_database', '')
NMJ_MOUNT = check_setting_str(CFG, 'NMJ', 'nmj_mount', '')
USE_NMJv2 = bool(check_setting_int(CFG, 'NMJv2', 'use_nmjv2', 0))
NMJv2_HOST = check_setting_str(CFG, 'NMJv2', 'nmjv2_host', '')
NMJv2_DATABASE = check_setting_str(CFG, 'NMJv2', 'nmjv2_database', '')
NMJv2_DBLOC = check_setting_str(CFG, 'NMJv2', 'nmjv2_dbloc', '')
USE_SYNOINDEX = bool(check_setting_int(CFG, 'Synology', 'use_synoindex', 0))
USE_SYNOLOGYNOTIFIER = bool(check_setting_int(CFG, 'SynologyNotifier', 'use_synologynotifier', 0))
SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = bool(
check_setting_int(CFG, 'SynologyNotifier', 'synologynotifier_notify_onsnatch', 0))
SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = bool(
check_setting_int(CFG, 'SynologyNotifier', 'synologynotifier_notify_ondownload', 0))
SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = bool(
check_setting_int(CFG, 'SynologyNotifier', 'synologynotifier_notify_onsubtitledownload', 0))
USE_TRAKT = bool(check_setting_int(CFG, 'Trakt', 'use_trakt', 0))
TRAKT_USERNAME = check_setting_str(CFG, 'Trakt', 'trakt_username', '', censor_log=True)
TRAKT_ACCESS_TOKEN = check_setting_str(CFG, 'Trakt', 'trakt_access_token', '', censor_log=True)
TRAKT_REFRESH_TOKEN = check_setting_str(CFG, 'Trakt', 'trakt_refresh_token', '', censor_log=True)
TRAKT_REMOVE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_watchlist', 0))
TRAKT_REMOVE_SERIESLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_serieslist', 0))
TRAKT_REMOVE_SHOW_FROM_SICKRAGE = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_show_from_sickrage', 0))
TRAKT_SYNC_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_sync_watchlist', 0))
TRAKT_METHOD_ADD = check_setting_int(CFG, 'Trakt', 'trakt_method_add', 0)
TRAKT_START_PAUSED = bool(check_setting_int(CFG, 'Trakt', 'trakt_start_paused', 0))
TRAKT_USE_RECOMMENDED = bool(check_setting_int(CFG, 'Trakt', 'trakt_use_recommended', 0))
TRAKT_SYNC = bool(check_setting_int(CFG, 'Trakt', 'trakt_sync', 0))
TRAKT_SYNC_REMOVE = bool(check_setting_int(CFG, 'Trakt', 'trakt_sync_remove', 0))
TRAKT_DEFAULT_INDEXER = check_setting_int(CFG, 'Trakt', 'trakt_default_indexer', 1)
TRAKT_TIMEOUT = check_setting_int(CFG, 'Trakt', 'trakt_timeout', 30)
TRAKT_BLACKLIST_NAME = check_setting_str(CFG, 'Trakt', 'trakt_blacklist_name', '')
USE_PYTIVO = bool(check_setting_int(CFG, 'pyTivo', 'use_pytivo', 0))
PYTIVO_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_onsnatch', 0))
PYTIVO_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_ondownload', 0))
PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_onsubtitledownload', 0))
PYTIVO_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'pyTivo', 'pyTivo_update_library', 0))
PYTIVO_HOST = check_setting_str(CFG, 'pyTivo', 'pytivo_host', '')
PYTIVO_SHARE_NAME = check_setting_str(CFG, 'pyTivo', 'pytivo_share_name', '')
PYTIVO_TIVO_NAME = check_setting_str(CFG, 'pyTivo', 'pytivo_tivo_name', '')
USE_NMA = bool(check_setting_int(CFG, 'NMA', 'use_nma', 0))
NMA_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'NMA', 'nma_notify_onsnatch', 0))
NMA_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'NMA', 'nma_notify_ondownload', 0))
NMA_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'NMA', 'nma_notify_onsubtitledownload', 0))
NMA_API = check_setting_str(CFG, 'NMA', 'nma_api', '', censor_log=True)
NMA_PRIORITY = check_setting_str(CFG, 'NMA', 'nma_priority', "0")
USE_PUSHALOT = bool(check_setting_int(CFG, 'Pushalot', 'use_pushalot', 0))
PUSHALOT_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushalot', 'pushalot_notify_onsnatch', 0))
PUSHALOT_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushalot', 'pushalot_notify_ondownload', 0))
PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = bool(
check_setting_int(CFG, 'Pushalot', 'pushalot_notify_onsubtitledownload', 0))
PUSHALOT_AUTHORIZATIONTOKEN = check_setting_str(CFG, 'Pushalot', 'pushalot_authorizationtoken', '', censor_log=True)
USE_PUSHBULLET = bool(check_setting_int(CFG, 'Pushbullet', 'use_pushbullet', 0))
PUSHBULLET_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushbullet', 'pushbullet_notify_onsnatch', 0))
PUSHBULLET_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushbullet', 'pushbullet_notify_ondownload', 0))
PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = bool(
check_setting_int(CFG, 'Pushbullet', 'pushbullet_notify_onsubtitledownload', 0))
PUSHBULLET_API = check_setting_str(CFG, 'Pushbullet', 'pushbullet_api', '', censor_log=True)
PUSHBULLET_DEVICE = check_setting_str(CFG, 'Pushbullet', 'pushbullet_device', '')
USE_EMAIL = bool(check_setting_int(CFG, 'Email', 'use_email', 0))
EMAIL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Email', 'email_notify_onsnatch', 0))
EMAIL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Email', 'email_notify_ondownload', 0))
EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Email', 'email_notify_onsubtitledownload', 0))
EMAIL_HOST = check_setting_str(CFG, 'Email', 'email_host', '')
EMAIL_PORT = check_setting_int(CFG, 'Email', 'email_port', 25)
EMAIL_TLS = bool(check_setting_int(CFG, 'Email', 'email_tls', 0))
EMAIL_USER = check_setting_str(CFG, 'Email', 'email_user', '', censor_log=True)
EMAIL_PASSWORD = check_setting_str(CFG, 'Email', 'email_password', '', censor_log=True)
EMAIL_FROM = check_setting_str(CFG, 'Email', 'email_from', '')
EMAIL_LIST = check_setting_str(CFG, 'Email', 'email_list', '')
USE_SUBTITLES = bool(check_setting_int(CFG, 'Subtitles', 'use_subtitles', 0))
SUBTITLES_LANGUAGES = check_setting_str(CFG, 'Subtitles', 'subtitles_languages', '').split(',')
if SUBTITLES_LANGUAGES[0] == '':
SUBTITLES_LANGUAGES = []
SUBTITLES_DIR = check_setting_str(CFG, 'Subtitles', 'subtitles_dir', '')
SUBTITLES_SERVICES_LIST = check_setting_str(CFG, 'Subtitles', 'SUBTITLES_SERVICES_LIST', '').split(',')
SUBTITLES_SERVICES_ENABLED = [int(x) for x in
check_setting_str(CFG, 'Subtitles', 'SUBTITLES_SERVICES_ENABLED', '').split('|')
if x]
SUBTITLES_DEFAULT = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_default', 0))
SUBTITLES_HISTORY = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_history', 0))
EMBEDDED_SUBTITLES_ALL = bool(check_setting_int(CFG, 'Subtitles', 'embedded_subtitles_all', 0))
SUBTITLES_HEARING_IMPAIRED = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_hearing_impaired', 0))
SUBTITLES_FINDER_FREQUENCY = check_setting_int(CFG, 'Subtitles', 'subtitles_finder_frequency', 1)
SUBTITLES_MULTI = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_multi', 1))
SUBTITLES_DOWNLOAD_IN_PP = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_download_in_pp', 0))
SUBTITLES_EXTRA_SCRIPTS = [x.strip() for x in check_setting_str(CFG, 'Subtitles', 'subtitles_extra_scripts', '').split('|') if x.strip()]
ADDIC7ED_USER = check_setting_str(CFG, 'Subtitles', 'addic7ed_username', '', censor_log=True)
ADDIC7ED_PASS = check_setting_str(CFG, 'Subtitles', 'addic7ed_password', '', censor_log=True)
LEGENDASTV_USER = check_setting_str(CFG, 'Subtitles', 'legendastv_username', '', censor_log=True)
LEGENDASTV_PASS = check_setting_str(CFG, 'Subtitles', 'legendastv_password', '', censor_log=True)
OPENSUBTITLES_USER = check_setting_str(CFG, 'Subtitles', 'opensubtitles_username', '', censor_log=True)
OPENSUBTITLES_PASS = check_setting_str(CFG, 'Subtitles', 'opensubtitles_password', '', censor_log=True)
USE_FAILED_DOWNLOADS = bool(check_setting_int(CFG, 'FailedDownloads', 'use_failed_downloads', 0))
DELETE_FAILED = bool(check_setting_int(CFG, 'FailedDownloads', 'delete_failed', 0))
GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')
IGNORE_WORDS = check_setting_str(CFG, 'General', 'ignore_words', IGNORE_WORDS)
TRACKERS_LIST = check_setting_str(CFG, 'General', 'trackers_list', TRACKERS_LIST)
REQUIRE_WORDS = check_setting_str(CFG, 'General', 'require_words', REQUIRE_WORDS)
IGNORED_SUBS_LIST = check_setting_str(CFG, 'General', 'ignored_subs_list', IGNORED_SUBS_LIST)
CALENDAR_UNPROTECTED = bool(check_setting_int(CFG, 'General', 'calendar_unprotected', 0))
CALENDAR_ICONS = bool(check_setting_int(CFG, 'General', 'calendar_icons', 0))
NO_RESTART = bool(check_setting_int(CFG, 'General', 'no_restart', 0))
EXTRA_SCRIPTS = [x.strip() for x in check_setting_str(CFG, 'General', 'extra_scripts', '').split('|') if
x.strip()]
USE_LISTVIEW = bool(check_setting_int(CFG, 'General', 'use_listview', 0))
ANIMESUPPORT = False
USE_ANIDB = bool(check_setting_int(CFG, 'ANIDB', 'use_anidb', 0))
ANIDB_USERNAME = check_setting_str(CFG, 'ANIDB', 'anidb_username', '', censor_log=True)
ANIDB_PASSWORD = check_setting_str(CFG, 'ANIDB', 'anidb_password', '', censor_log=True)
ANIDB_USE_MYLIST = bool(check_setting_int(CFG, 'ANIDB', 'anidb_use_mylist', 0))
ANIME_SPLIT_HOME = bool(check_setting_int(CFG, 'ANIME', 'anime_split_home', 0))
METADATA_KODI = check_setting_str(CFG, 'General', 'metadata_kodi', '0|0|0|0|0|0|0|0|0|0')
METADATA_KODI_12PLUS = check_setting_str(CFG, 'General', 'metadata_kodi_12plus', '0|0|0|0|0|0|0|0|0|0')
METADATA_MEDIABROWSER = check_setting_str(CFG, 'General', 'metadata_mediabrowser', '0|0|0|0|0|0|0|0|0|0')
METADATA_PS3 = check_setting_str(CFG, 'General', 'metadata_ps3', '0|0|0|0|0|0|0|0|0|0')
METADATA_WDTV = check_setting_str(CFG, 'General', 'metadata_wdtv', '0|0|0|0|0|0|0|0|0|0')
METADATA_TIVO = check_setting_str(CFG, 'General', 'metadata_tivo', '0|0|0|0|0|0|0|0|0|0')
METADATA_MEDE8ER = check_setting_str(CFG, 'General', 'metadata_mede8er', '0|0|0|0|0|0|0|0|0|0')
HOME_LAYOUT = check_setting_str(CFG, 'GUI', 'home_layout', 'poster')
HISTORY_LAYOUT = check_setting_str(CFG, 'GUI', 'history_layout', 'detailed')
HISTORY_LIMIT = check_setting_str(CFG, 'GUI', 'history_limit', '100')
DISPLAY_SHOW_SPECIALS = bool(check_setting_int(CFG, 'GUI', 'display_show_specials', 1))
COMING_EPS_LAYOUT = check_setting_str(CFG, 'GUI', 'coming_eps_layout', 'banner')
COMING_EPS_DISPLAY_PAUSED = bool(check_setting_int(CFG, 'GUI', 'coming_eps_display_paused', 0))
COMING_EPS_SORT = check_setting_str(CFG, 'GUI', 'coming_eps_sort', 'date')
COMING_EPS_MISSED_RANGE = check_setting_int(CFG, 'GUI', 'coming_eps_missed_range', 7)
FUZZY_DATING = bool(check_setting_int(CFG, 'GUI', 'fuzzy_dating', 0))
TRIM_ZERO = bool(check_setting_int(CFG, 'GUI', 'trim_zero', 0))
DATE_PRESET = check_setting_str(CFG, 'GUI', 'date_preset', '%x')
TIME_PRESET_W_SECONDS = check_setting_str(CFG, 'GUI', 'time_preset', '%I:%M:%S %p')
TIME_PRESET = TIME_PRESET_W_SECONDS.replace(u":%S", u"")
TIMEZONE_DISPLAY = check_setting_str(CFG, 'GUI', 'timezone_display', 'local')
POSTER_SORTBY = check_setting_str(CFG, 'GUI', 'poster_sortby', 'name')
POSTER_SORTDIR = check_setting_int(CFG, 'GUI', 'poster_sortdir', 1)
DISPLAY_ALL_SEASONS = bool(check_setting_int(CFG, 'General', 'display_all_seasons', 1))
# initialize NZB and TORRENT providers
providerList = providers.makeProviderList()
NEWZNAB_DATA = check_setting_str(CFG, 'Newznab', 'newznab_data', '')
newznabProviderList = providers.getNewznabProviderList(NEWZNAB_DATA)
TORRENTRSS_DATA = check_setting_str(CFG, 'TorrentRss', 'torrentrss_data', '')
torrentRssProviderList = providers.getTorrentRssProviderList(TORRENTRSS_DATA)
# dynamically load provider settings
for curTorrentProvider in [curProvider for curProvider in providers.sortedProviderList() if
curProvider.providerType == GenericProvider.TORRENT]:
curTorrentProvider.enabled = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID(), 0))
if hasattr(curTorrentProvider, 'custom_url'):
curTorrentProvider.custom_url = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_custom_url', '', censor_log=True)
if hasattr(curTorrentProvider, 'api_key'):
curTorrentProvider.api_key = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_api_key', '', censor_log=True)
if hasattr(curTorrentProvider, 'hash'):
curTorrentProvider.hash = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_hash', '', censor_log=True)
if hasattr(curTorrentProvider, 'digest'):
curTorrentProvider.digest = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_digest', '', censor_log=True)
if hasattr(curTorrentProvider, 'username'):
curTorrentProvider.username = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_username', '', censor_log=True)
if hasattr(curTorrentProvider, 'password'):
curTorrentProvider.password = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_password', '', censor_log=True)
if hasattr(curTorrentProvider, 'passkey'):
curTorrentProvider.passkey = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_passkey', '', censor_log=True)
if hasattr(curTorrentProvider, 'pin'):
curTorrentProvider.pin = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_pin', '', censor_log=True)
if hasattr(curTorrentProvider, 'confirmed'):
curTorrentProvider.confirmed = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_confirmed', 1))
if hasattr(curTorrentProvider, 'ranked'):
curTorrentProvider.ranked = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_ranked', 1))
if hasattr(curTorrentProvider, 'engrelease'):
curTorrentProvider.engrelease = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_engrelease', 0))
if hasattr(curTorrentProvider, 'onlyspasearch'):
curTorrentProvider.onlyspasearch = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_onlyspasearch', 0))
if hasattr(curTorrentProvider, 'sorting'):
curTorrentProvider.sorting = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_sorting', 'seeders')
if hasattr(curTorrentProvider, 'options'):
curTorrentProvider.options = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_options', '')
if hasattr(curTorrentProvider, 'ratio'):
curTorrentProvider.ratio = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_ratio', '')
if hasattr(curTorrentProvider, 'minseed'):
curTorrentProvider.minseed = check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_minseed', 1)
if hasattr(curTorrentProvider, 'minleech'):
curTorrentProvider.minleech = check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_minleech', 0)
if hasattr(curTorrentProvider, 'freeleech'):
curTorrentProvider.freeleech = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_freeleech', 0))
if hasattr(curTorrentProvider, 'search_mode'):
curTorrentProvider.search_mode = check_setting_str(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_search_mode',
'eponly')
if hasattr(curTorrentProvider, 'search_fallback'):
curTorrentProvider.search_fallback = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_search_fallback',
0))
if hasattr(curTorrentProvider, 'enable_daily'):
curTorrentProvider.enable_daily = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_enable_daily',
1))
if hasattr(curTorrentProvider, 'enable_backlog'):
curTorrentProvider.enable_backlog = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_enable_backlog',
curTorrentProvider.supportsBacklog))
if hasattr(curTorrentProvider, 'cat'):
curTorrentProvider.cat = check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_cat', 0)
if hasattr(curTorrentProvider, 'subtitle'):
curTorrentProvider.subtitle = bool(check_setting_int(CFG, curTorrentProvider.getID().upper(),
curTorrentProvider.getID() + '_subtitle', 0))
for curNzbProvider in [curProvider for curProvider in providers.sortedProviderList() if
curProvider.providerType == GenericProvider.NZB]:
curNzbProvider.enabled = bool(
check_setting_int(CFG, curNzbProvider.getID().upper(), curNzbProvider.getID(), 0))
if hasattr(curNzbProvider, 'api_key'):
curNzbProvider.api_key = check_setting_str(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_api_key', '', censor_log=True)
if hasattr(curNzbProvider, 'username'):
curNzbProvider.username = check_setting_str(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_username', '', censor_log=True)
if hasattr(curNzbProvider, 'search_mode'):
curNzbProvider.search_mode = check_setting_str(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_search_mode',
'eponly')
if hasattr(curNzbProvider, 'search_fallback'):
curNzbProvider.search_fallback = bool(check_setting_int(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_search_fallback',
0))
if hasattr(curNzbProvider, 'enable_daily'):
curNzbProvider.enable_daily = bool(check_setting_int(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_enable_daily',
1))
if hasattr(curNzbProvider, 'enable_backlog'):
curNzbProvider.enable_backlog = bool(check_setting_int(CFG, curNzbProvider.getID().upper(),
curNzbProvider.getID() + '_enable_backlog',
curNzbProvider.supportsBacklog))
if not ek(os.path.isfile, CONFIG_FILE):
logger.log(u"Unable to find '" + CONFIG_FILE + "', all settings will be default!", logger.DEBUG)
save_config()
# initialize the main SB database
myDB = db.DBConnection()
db.upgradeDatabase(myDB, mainDB.InitialSchema)
# initialize the cache database
myDB = db.DBConnection('cache.db')
db.upgradeDatabase(myDB, cache_db.InitialSchema)
# initialize the failed downloads database
myDB = db.DBConnection('failed.db')
db.upgradeDatabase(myDB, failed_db.InitialSchema)
# fix up any db problems
myDB = db.DBConnection()
db.sanityCheckDatabase(myDB, mainDB.MainSanityCheck)
# migrate the config if it needs it
migrator = ConfigMigrator(CFG)
migrator.migrate_config()
# initialize metadata_providers
metadata_provider_dict = metadata.get_metadata_generator_dict()
for cur_metadata_tuple in [(METADATA_KODI, metadata.kodi),
(METADATA_KODI_12PLUS, metadata.kodi_12plus),
(METADATA_MEDIABROWSER, metadata.mediabrowser),
(METADATA_PS3, metadata.ps3),
(METADATA_WDTV, metadata.wdtv),
(METADATA_TIVO, metadata.tivo),
(METADATA_MEDE8ER, metadata.mede8er)]:
(cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
tmp_provider = cur_metadata_class.metadata_class()
tmp_provider.set_config(cur_metadata_config)
metadata_provider_dict[tmp_provider.name] = tmp_provider
# initialize schedulers
# updaters
versionCheckScheduler = scheduler.Scheduler(versionChecker.CheckVersion(),
cycleTime=datetime.timedelta(hours=UPDATE_FREQUENCY),
threadName="CHECKVERSION",
silent=False)
showQueueScheduler = scheduler.Scheduler(show_queue.ShowQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SHOWQUEUE")
showUpdateScheduler = scheduler.Scheduler(showUpdater.ShowUpdater(),
cycleTime=datetime.timedelta(hours=1),
threadName="SHOWUPDATER",
start_time=datetime.time(hour=SHOWUPDATE_HOUR))
# searchers
searchQueueScheduler = scheduler.Scheduler(search_queue.SearchQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SEARCHQUEUE")
# TODO: update_interval should take last daily/backlog times into account!
update_interval = datetime.timedelta(minutes=DAILYSEARCH_FREQUENCY)
dailySearchScheduler = scheduler.Scheduler(dailysearcher.DailySearcher(),
cycleTime=update_interval,
threadName="DAILYSEARCHER",
run_delay=update_interval)
update_interval = datetime.timedelta(minutes=BACKLOG_FREQUENCY)
backlogSearchScheduler = searchBacklog.BacklogSearchScheduler(searchBacklog.BacklogSearcher(),
cycleTime=update_interval,
threadName="BACKLOG",
run_delay=update_interval)
search_intervals = {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}
if CHECK_PROPERS_INTERVAL in search_intervals:
update_interval = datetime.timedelta(minutes=search_intervals[CHECK_PROPERS_INTERVAL])
run_at = None
else:
update_interval = datetime.timedelta(hours=1)
run_at = datetime.time(hour=1) # 1 AM
properFinderScheduler = scheduler.Scheduler(properFinder.ProperFinder(),
cycleTime=update_interval,
threadName="FINDPROPERS",
start_time=run_at,
run_delay=update_interval)
# processors
autoPostProcesserScheduler = scheduler.Scheduler(autoPostProcesser.PostProcesser(),
cycleTime=datetime.timedelta(
minutes=AUTOPOSTPROCESSER_FREQUENCY),
threadName="POSTPROCESSER",
silent=not PROCESS_AUTOMATICALLY)
traktCheckerScheduler = scheduler.Scheduler(traktChecker.TraktChecker(),
cycleTime=datetime.timedelta(hours=1),
threadName="TRAKTCHECKER",
silent=not USE_TRAKT)
subtitlesFinderScheduler = scheduler.Scheduler(subtitles.SubtitlesFinder(),
cycleTime=datetime.timedelta(hours=SUBTITLES_FINDER_FREQUENCY),
threadName="FINDSUBTITLES",
silent=not USE_SUBTITLES)
showList = []
loadingShowList = {}
__INITIALIZED__ = True
return True
def start():
global started
with INIT_LOCK:
if __INITIALIZED__:
# start sysetm events queue
events.start()
# start the daily search scheduler
dailySearchScheduler.enable = True
dailySearchScheduler.start()
# start the backlog scheduler
backlogSearchScheduler.enable = True
backlogSearchScheduler.start()
# start the show updater
showUpdateScheduler.enable = True
showUpdateScheduler.start()
# start the version checker
versionCheckScheduler.enable = True
versionCheckScheduler.start()
# start the queue checker
showQueueScheduler.enable = True
showQueueScheduler.start()
# start the search queue checker
searchQueueScheduler.enable = True
searchQueueScheduler.start()
# start the proper finder
if DOWNLOAD_PROPERS:
properFinderScheduler.silent = False
properFinderScheduler.enable = True
else:
properFinderScheduler.enable = False
properFinderScheduler.silent = True
properFinderScheduler.start()
# start the post processor
if PROCESS_AUTOMATICALLY:
autoPostProcesserScheduler.silent = False
autoPostProcesserScheduler.enable = True
else:
autoPostProcesserScheduler.enable = False
autoPostProcesserScheduler.silent = True
autoPostProcesserScheduler.start()
# start the subtitles finder
if USE_SUBTITLES:
subtitlesFinderScheduler.silent = False
subtitlesFinderScheduler.enable = True
else:
subtitlesFinderScheduler.enable = False
subtitlesFinderScheduler.silent = True
subtitlesFinderScheduler.start()
# start the trakt checker
if USE_TRAKT:
traktCheckerScheduler.silent = False
traktCheckerScheduler.enable = True
else:
traktCheckerScheduler.enable = False
traktCheckerScheduler.silent = True
traktCheckerScheduler.start()
started = True
def halt():
global __INITIALIZED__, started
with INIT_LOCK:
if __INITIALIZED__:
logger.log(u"Aborting all threads")
events.stop.set()
logger.log(u"Waiting for the EVENTS thread to exit")
try:
events.join(10)
except Exception:
pass
dailySearchScheduler.stop.set()
logger.log(u"Waiting for the DAILYSEARCH thread to exit")
try:
dailySearchScheduler.join(10)
except Exception:
pass
backlogSearchScheduler.stop.set()
logger.log(u"Waiting for the BACKLOG thread to exit")
try:
backlogSearchScheduler.join(10)
except Exception:
pass
showUpdateScheduler.stop.set()
logger.log(u"Waiting for the SHOWUPDATER thread to exit")
try:
showUpdateScheduler.join(10)
except Exception:
pass
versionCheckScheduler.stop.set()
logger.log(u"Waiting for the VERSIONCHECKER thread to exit")
try:
versionCheckScheduler.join(10)
except Exception:
pass
showQueueScheduler.stop.set()
logger.log(u"Waiting for the SHOWQUEUE thread to exit")
try:
showQueueScheduler.join(10)
except Exception:
pass
searchQueueScheduler.stop.set()
logger.log(u"Waiting for the SEARCHQUEUE thread to exit")
try:
searchQueueScheduler.join(10)
except Exception:
pass
autoPostProcesserScheduler.stop.set()
logger.log(u"Waiting for the POSTPROCESSER thread to exit")
try:
autoPostProcesserScheduler.join(10)
except Exception:
pass
traktCheckerScheduler.stop.set()
logger.log(u"Waiting for the TRAKTCHECKER thread to exit")
try:
traktCheckerScheduler.join(10)
except Exception:
pass
properFinderScheduler.stop.set()
logger.log(u"Waiting for the PROPERFINDER thread to exit")
try:
properFinderScheduler.join(10)
except Exception:
pass
subtitlesFinderScheduler.stop.set()
logger.log(u"Waiting for the SUBTITLESFINDER thread to exit")
try:
subtitlesFinderScheduler.join(10)
except Exception:
pass
if ADBA_CONNECTION:
ADBA_CONNECTION.logout()
logger.log(u"Waiting for the ANIDB CONNECTION thread to exit")
try:
ADBA_CONNECTION.join(10)
except Exception:
pass
__INITIALIZED__ = False
started = False
def sig_handler(signum=None, frame=None):
if not isinstance(signum, type(None)):
logger.log(u"Signal %i caught, saving and exiting..." % int(signum))
Shutdown.stop(PID)
def saveAll():
# write all shows
logger.log(u"Saving all shows to the database")
for show in showList:
show.saveToDB()
# save config
logger.log(u"Saving config file to disk")
save_config()
def restart(soft=True):
if soft:
halt()
saveAll()
logger.log(u"Re-initializing all data")
initialize()
else:
events.put(events.SystemEvent.RESTART)
def save_config():
new_config = ConfigObj()
new_config.filename = CONFIG_FILE
# For passwords you must include the word `password` in the item_name and add `helpers.encrypt(ITEM_NAME, ENCRYPTION_VERSION)` in save_config()
new_config['General'] = {}
new_config['General']['git_autoissues'] = int(GIT_AUTOISSUES)
new_config['General']['git_username'] = GIT_USERNAME
new_config['General']['git_password'] = helpers.encrypt(GIT_PASSWORD, ENCRYPTION_VERSION)
new_config['General']['git_reset'] = int(GIT_RESET)
new_config['General']['branch'] = BRANCH
new_config['General']['git_remote'] = GIT_REMOTE
new_config['General']['git_remote_url'] = GIT_REMOTE_URL
new_config['General']['cur_commit_hash'] = CUR_COMMIT_HASH
new_config['General']['cur_commit_branch'] = CUR_COMMIT_BRANCH
new_config['General']['git_newver'] = int(GIT_NEWVER)
new_config['General']['config_version'] = CONFIG_VERSION
new_config['General']['encryption_version'] = int(ENCRYPTION_VERSION)
new_config['General']['encryption_secret'] = ENCRYPTION_SECRET
new_config['General']['log_dir'] = ACTUAL_LOG_DIR if ACTUAL_LOG_DIR else 'Logs'
new_config['General']['log_nr'] = int(LOG_NR)
new_config['General']['log_size'] = int(LOG_SIZE)
new_config['General']['socket_timeout'] = SOCKET_TIMEOUT
new_config['General']['web_port'] = WEB_PORT
new_config['General']['web_host'] = WEB_HOST
new_config['General']['web_ipv6'] = int(WEB_IPV6)
new_config['General']['web_log'] = int(WEB_LOG)
new_config['General']['web_root'] = WEB_ROOT
new_config['General']['web_username'] = WEB_USERNAME
new_config['General']['web_password'] = helpers.encrypt(WEB_PASSWORD, ENCRYPTION_VERSION)
new_config['General']['web_cookie_secret'] = WEB_COOKIE_SECRET
new_config['General']['web_use_gzip'] = int(WEB_USE_GZIP)
new_config['General']['ssl_verify'] = int(SSL_VERIFY)
new_config['General']['download_url'] = DOWNLOAD_URL
new_config['General']['localhost_ip'] = LOCALHOST_IP
new_config['General']['cpu_preset'] = CPU_PRESET
new_config['General']['anon_redirect'] = ANON_REDIRECT
new_config['General']['api_key'] = API_KEY
new_config['General']['debug'] = int(DEBUG)
new_config['General']['default_page'] = DEFAULT_PAGE
new_config['General']['enable_https'] = int(ENABLE_HTTPS)
new_config['General']['https_cert'] = HTTPS_CERT
new_config['General']['https_key'] = HTTPS_KEY
new_config['General']['handle_reverse_proxy'] = int(HANDLE_REVERSE_PROXY)
new_config['General']['use_nzbs'] = int(USE_NZBS)
new_config['General']['use_torrents'] = int(USE_TORRENTS)
new_config['General']['nzb_method'] = NZB_METHOD
new_config['General']['torrent_method'] = TORRENT_METHOD
new_config['General']['usenet_retention'] = int(USENET_RETENTION)
new_config['General']['autopostprocesser_frequency'] = int(AUTOPOSTPROCESSER_FREQUENCY)
new_config['General']['dailysearch_frequency'] = int(DAILYSEARCH_FREQUENCY)
new_config['General']['backlog_frequency'] = int(BACKLOG_FREQUENCY)
new_config['General']['update_frequency'] = int(UPDATE_FREQUENCY)
new_config['General']['showupdate_hour'] = int(SHOWUPDATE_HOUR)
new_config['General']['download_propers'] = int(DOWNLOAD_PROPERS)
new_config['General']['randomize_providers'] = int(RANDOMIZE_PROVIDERS)
new_config['General']['check_propers_interval'] = CHECK_PROPERS_INTERVAL
new_config['General']['allow_high_priority'] = int(ALLOW_HIGH_PRIORITY)
new_config['General']['skip_removed_files'] = int(SKIP_REMOVED_FILES)
new_config['General']['allowed_extensions'] = ALLOWED_EXTENSIONS
new_config['General']['quality_default'] = int(QUALITY_DEFAULT)
new_config['General']['status_default'] = int(STATUS_DEFAULT)
new_config['General']['status_default_after'] = int(STATUS_DEFAULT_AFTER)
new_config['General']['flatten_folders_default'] = int(FLATTEN_FOLDERS_DEFAULT)
new_config['General']['indexer_default'] = int(INDEXER_DEFAULT)
new_config['General']['indexer_timeout'] = int(INDEXER_TIMEOUT)
new_config['General']['anime_default'] = int(ANIME_DEFAULT)
new_config['General']['scene_default'] = int(SCENE_DEFAULT)
new_config['General']['archive_default'] = int(ARCHIVE_DEFAULT)
new_config['General']['provider_order'] = ' '.join(PROVIDER_ORDER)
new_config['General']['version_notify'] = int(VERSION_NOTIFY)
new_config['General']['auto_update'] = int(AUTO_UPDATE)
new_config['General']['notify_on_update'] = int(NOTIFY_ON_UPDATE)
new_config['General']['naming_strip_year'] = int(NAMING_STRIP_YEAR)
new_config['General']['naming_pattern'] = NAMING_PATTERN
new_config['General']['naming_custom_abd'] = int(NAMING_CUSTOM_ABD)
new_config['General']['naming_abd_pattern'] = NAMING_ABD_PATTERN
new_config['General']['naming_custom_sports'] = int(NAMING_CUSTOM_SPORTS)
new_config['General']['naming_sports_pattern'] = NAMING_SPORTS_PATTERN
new_config['General']['naming_custom_anime'] = int(NAMING_CUSTOM_ANIME)
new_config['General']['naming_anime_pattern'] = NAMING_ANIME_PATTERN
new_config['General']['naming_multi_ep'] = int(NAMING_MULTI_EP)
new_config['General']['naming_anime_multi_ep'] = int(NAMING_ANIME_MULTI_EP)
new_config['General']['naming_anime'] = int(NAMING_ANIME)
new_config['General']['indexerDefaultLang'] = INDEXER_DEFAULT_LANGUAGE
new_config['General']['ep_default_deleted_status'] = int(EP_DEFAULT_DELETED_STATUS)
new_config['General']['launch_browser'] = int(LAUNCH_BROWSER)
new_config['General']['trash_remove_show'] = int(TRASH_REMOVE_SHOW)
new_config['General']['trash_rotate_logs'] = int(TRASH_ROTATE_LOGS)
new_config['General']['sort_article'] = int(SORT_ARTICLE)
new_config['General']['proxy_setting'] = PROXY_SETTING
new_config['General']['proxy_indexers'] = int(PROXY_INDEXERS)
new_config['General']['use_listview'] = int(USE_LISTVIEW)
new_config['General']['metadata_kodi'] = METADATA_KODI
new_config['General']['metadata_kodi_12plus'] = METADATA_KODI_12PLUS
new_config['General']['metadata_mediabrowser'] = METADATA_MEDIABROWSER
new_config['General']['metadata_ps3'] = METADATA_PS3
new_config['General']['metadata_wdtv'] = METADATA_WDTV
new_config['General']['metadata_tivo'] = METADATA_TIVO
new_config['General']['metadata_mede8er'] = METADATA_MEDE8ER
new_config['General']['backlog_days'] = int(BACKLOG_DAYS)
new_config['General']['cache_dir'] = ACTUAL_CACHE_DIR if ACTUAL_CACHE_DIR else 'cache'
new_config['General']['root_dirs'] = ROOT_DIRS if ROOT_DIRS else ''
new_config['General']['tv_download_dir'] = TV_DOWNLOAD_DIR
new_config['General']['keep_processed_dir'] = int(KEEP_PROCESSED_DIR)
new_config['General']['process_method'] = PROCESS_METHOD
new_config['General']['del_rar_contents'] = int(DELRARCONTENTS)
new_config['General']['move_associated_files'] = int(MOVE_ASSOCIATED_FILES)
new_config['General']['sync_files'] = SYNC_FILES
new_config['General']['postpone_if_sync_files'] = int(POSTPONE_IF_SYNC_FILES)
new_config['General']['postpone_if_no_subs'] = int(POSTPONE_IF_NO_SUBS)
new_config['General']['nfo_rename'] = int(NFO_RENAME)
new_config['General']['process_automatically'] = int(PROCESS_AUTOMATICALLY)
new_config['General']['no_delete'] = int(NO_DELETE)
new_config['General']['unpack'] = int(UNPACK)
new_config['General']['rename_episodes'] = int(RENAME_EPISODES)
new_config['General']['airdate_episodes'] = int(AIRDATE_EPISODES)
new_config['General']['file_timestamp_timezone'] = FILE_TIMESTAMP_TIMEZONE
new_config['General']['create_missing_show_dirs'] = int(CREATE_MISSING_SHOW_DIRS)
new_config['General']['add_shows_wo_dir'] = int(ADD_SHOWS_WO_DIR)
new_config['General']['extra_scripts'] = '|'.join(EXTRA_SCRIPTS)
new_config['General']['git_path'] = GIT_PATH
new_config['General']['ignore_words'] = IGNORE_WORDS
new_config['General']['trackers_list'] = TRACKERS_LIST
new_config['General']['require_words'] = REQUIRE_WORDS
new_config['General']['ignored_subs_list'] = IGNORED_SUBS_LIST
new_config['General']['calendar_unprotected'] = int(CALENDAR_UNPROTECTED)
new_config['General']['calendar_icons'] = int(CALENDAR_ICONS)
new_config['General']['no_restart'] = int(NO_RESTART)
new_config['General']['developer'] = int(DEVELOPER)
new_config['General']['display_all_seasons'] = int(DISPLAY_ALL_SEASONS)
new_config['General']['news_last_read'] = NEWS_LAST_READ
new_config['Blackhole'] = {}
new_config['Blackhole']['nzb_dir'] = NZB_DIR
new_config['Blackhole']['torrent_dir'] = TORRENT_DIR
# dynamically save provider settings
for curTorrentProvider in [curProvider for curProvider in providers.sortedProviderList() if
curProvider.providerType == GenericProvider.TORRENT]:
new_config[curTorrentProvider.getID().upper()] = {}
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID()] = int(curTorrentProvider.enabled)
if hasattr(curTorrentProvider, 'custom_url'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_custom_url'] = curTorrentProvider.custom_url
if hasattr(curTorrentProvider, 'digest'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_digest'] = curTorrentProvider.digest
if hasattr(curTorrentProvider, 'hash'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_hash'] = curTorrentProvider.hash
if hasattr(curTorrentProvider, 'api_key'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_api_key'] = curTorrentProvider.api_key
if hasattr(curTorrentProvider, 'username'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_username'] = curTorrentProvider.username
if hasattr(curTorrentProvider, 'password'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_password'] = helpers.encrypt(
curTorrentProvider.password, ENCRYPTION_VERSION)
if hasattr(curTorrentProvider, 'passkey'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_passkey'] = curTorrentProvider.passkey
if hasattr(curTorrentProvider, 'pin'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_pin'] = curTorrentProvider.pin
if hasattr(curTorrentProvider, 'confirmed'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_confirmed'] = int(
curTorrentProvider.confirmed)
if hasattr(curTorrentProvider, 'ranked'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_ranked'] = int(
curTorrentProvider.ranked)
if hasattr(curTorrentProvider, 'engrelease'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_engrelease'] = int(
curTorrentProvider.engrelease)
if hasattr(curTorrentProvider, 'onlyspasearch'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_onlyspasearch'] = int(
curTorrentProvider.onlyspasearch)
if hasattr(curTorrentProvider, 'sorting'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_sorting'] = curTorrentProvider.sorting
if hasattr(curTorrentProvider, 'ratio'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_ratio'] = curTorrentProvider.ratio
if hasattr(curTorrentProvider, 'minseed'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_minseed'] = int(
curTorrentProvider.minseed)
if hasattr(curTorrentProvider, 'minleech'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_minleech'] = int(
curTorrentProvider.minleech)
if hasattr(curTorrentProvider, 'options'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_options'] = curTorrentProvider.options
if hasattr(curTorrentProvider, 'freeleech'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_freeleech'] = int(
curTorrentProvider.freeleech)
if hasattr(curTorrentProvider, 'search_mode'):
new_config[curTorrentProvider.getID().upper()][
curTorrentProvider.getID() + '_search_mode'] = curTorrentProvider.search_mode
if hasattr(curTorrentProvider, 'search_fallback'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_search_fallback'] = int(
curTorrentProvider.search_fallback)
if hasattr(curTorrentProvider, 'enable_daily'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_enable_daily'] = int(
curTorrentProvider.enable_daily)
if hasattr(curTorrentProvider, 'enable_backlog'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_enable_backlog'] = int(
curTorrentProvider.enable_backlog)
if hasattr(curTorrentProvider, 'cat'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_cat'] = int(
curTorrentProvider.cat)
if hasattr(curTorrentProvider, 'subtitle'):
new_config[curTorrentProvider.getID().upper()][curTorrentProvider.getID() + '_subtitle'] = int(
curTorrentProvider.subtitle)
for curNzbProvider in [curProvider for curProvider in providers.sortedProviderList() if
curProvider.providerType == GenericProvider.NZB]:
new_config[curNzbProvider.getID().upper()] = {}
new_config[curNzbProvider.getID().upper()][curNzbProvider.getID()] = int(curNzbProvider.enabled)
if hasattr(curNzbProvider, 'api_key'):
new_config[curNzbProvider.getID().upper()][
curNzbProvider.getID() + '_api_key'] = curNzbProvider.api_key
if hasattr(curNzbProvider, 'username'):
new_config[curNzbProvider.getID().upper()][
curNzbProvider.getID() + '_username'] = curNzbProvider.username
if hasattr(curNzbProvider, 'search_mode'):
new_config[curNzbProvider.getID().upper()][
curNzbProvider.getID() + '_search_mode'] = curNzbProvider.search_mode
if hasattr(curNzbProvider, 'search_fallback'):
new_config[curNzbProvider.getID().upper()][curNzbProvider.getID() + '_search_fallback'] = int(
curNzbProvider.search_fallback)
if hasattr(curNzbProvider, 'enable_daily'):
new_config[curNzbProvider.getID().upper()][curNzbProvider.getID() + '_enable_daily'] = int(
curNzbProvider.enable_daily)
if hasattr(curNzbProvider, 'enable_backlog'):
new_config[curNzbProvider.getID().upper()][curNzbProvider.getID() + '_enable_backlog'] = int(
curNzbProvider.enable_backlog)
new_config['NZBs'] = {}
new_config['NZBs']['nzbs'] = int(NZBS)
new_config['NZBs']['nzbs_uid'] = NZBS_UID
new_config['NZBs']['nzbs_hash'] = NZBS_HASH
new_config['Newzbin'] = {}
new_config['Newzbin']['newzbin'] = int(NEWZBIN)
new_config['Newzbin']['newzbin_username'] = NEWZBIN_USERNAME
new_config['Newzbin']['newzbin_password'] = helpers.encrypt(NEWZBIN_PASSWORD, ENCRYPTION_VERSION)
new_config['SABnzbd'] = {}
new_config['SABnzbd']['sab_username'] = SAB_USERNAME
new_config['SABnzbd']['sab_password'] = helpers.encrypt(SAB_PASSWORD, ENCRYPTION_VERSION)
new_config['SABnzbd']['sab_apikey'] = SAB_APIKEY
new_config['SABnzbd']['sab_category'] = SAB_CATEGORY
new_config['SABnzbd']['sab_category_backlog'] = SAB_CATEGORY_BACKLOG
new_config['SABnzbd']['sab_category_anime'] = SAB_CATEGORY_ANIME
new_config['SABnzbd']['sab_category_anime_backlog'] = SAB_CATEGORY_ANIME_BACKLOG
new_config['SABnzbd']['sab_host'] = SAB_HOST
new_config['SABnzbd']['sab_forced'] = int(SAB_FORCED)
new_config['NZBget'] = {}
new_config['NZBget']['nzbget_username'] = NZBGET_USERNAME
new_config['NZBget']['nzbget_password'] = helpers.encrypt(NZBGET_PASSWORD, ENCRYPTION_VERSION)
new_config['NZBget']['nzbget_category'] = NZBGET_CATEGORY
new_config['NZBget']['nzbget_category_backlog'] = NZBGET_CATEGORY_BACKLOG
new_config['NZBget']['nzbget_category_anime'] = NZBGET_CATEGORY_ANIME
new_config['NZBget']['nzbget_category_anime_backlog'] = NZBGET_CATEGORY_ANIME_BACKLOG
new_config['NZBget']['nzbget_host'] = NZBGET_HOST
new_config['NZBget']['nzbget_use_https'] = int(NZBGET_USE_HTTPS)
new_config['NZBget']['nzbget_priority'] = NZBGET_PRIORITY
new_config['TORRENT'] = {}
new_config['TORRENT']['torrent_username'] = TORRENT_USERNAME
new_config['TORRENT']['torrent_password'] = helpers.encrypt(TORRENT_PASSWORD, ENCRYPTION_VERSION)
new_config['TORRENT']['torrent_host'] = TORRENT_HOST
new_config['TORRENT']['torrent_path'] = TORRENT_PATH
new_config['TORRENT']['torrent_seed_time'] = int(TORRENT_SEED_TIME)
new_config['TORRENT']['torrent_paused'] = int(TORRENT_PAUSED)
new_config['TORRENT']['torrent_high_bandwidth'] = int(TORRENT_HIGH_BANDWIDTH)
new_config['TORRENT']['torrent_label'] = TORRENT_LABEL
new_config['TORRENT']['torrent_label_anime'] = TORRENT_LABEL_ANIME
new_config['TORRENT']['torrent_verify_cert'] = int(TORRENT_VERIFY_CERT)
new_config['TORRENT']['torrent_rpcurl'] = TORRENT_RPCURL
new_config['TORRENT']['torrent_auth_type'] = TORRENT_AUTH_TYPE
new_config['KODI'] = {}
new_config['KODI']['use_kodi'] = int(USE_KODI)
new_config['KODI']['kodi_always_on'] = int(KODI_ALWAYS_ON)
new_config['KODI']['kodi_notify_onsnatch'] = int(KODI_NOTIFY_ONSNATCH)
new_config['KODI']['kodi_notify_ondownload'] = int(KODI_NOTIFY_ONDOWNLOAD)
new_config['KODI']['kodi_notify_onsubtitledownload'] = int(KODI_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['KODI']['kodi_update_library'] = int(KODI_UPDATE_LIBRARY)
new_config['KODI']['kodi_update_full'] = int(KODI_UPDATE_FULL)
new_config['KODI']['kodi_update_onlyfirst'] = int(KODI_UPDATE_ONLYFIRST)
new_config['KODI']['kodi_host'] = KODI_HOST
new_config['KODI']['kodi_username'] = KODI_USERNAME
new_config['KODI']['kodi_password'] = helpers.encrypt(KODI_PASSWORD, ENCRYPTION_VERSION)
new_config['Plex'] = {}
new_config['Plex']['use_plex'] = int(USE_PLEX)
new_config['Plex']['plex_notify_onsnatch'] = int(PLEX_NOTIFY_ONSNATCH)
new_config['Plex']['plex_notify_ondownload'] = int(PLEX_NOTIFY_ONDOWNLOAD)
new_config['Plex']['plex_notify_onsubtitledownload'] = int(PLEX_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Plex']['plex_update_library'] = int(PLEX_UPDATE_LIBRARY)
new_config['Plex']['plex_server_host'] = PLEX_SERVER_HOST
new_config['Plex']['plex_server_token'] = PLEX_SERVER_TOKEN
new_config['Plex']['plex_host'] = PLEX_HOST
new_config['Plex']['plex_username'] = PLEX_USERNAME
new_config['Plex']['plex_password'] = helpers.encrypt(PLEX_PASSWORD, ENCRYPTION_VERSION)
new_config['Emby'] = {}
new_config['Emby']['use_emby'] = int(USE_EMBY)
new_config['Emby']['emby_host'] = EMBY_HOST
new_config['Emby']['emby_apikey'] = EMBY_APIKEY
new_config['Growl'] = {}
new_config['Growl']['use_growl'] = int(USE_GROWL)
new_config['Growl']['growl_notify_onsnatch'] = int(GROWL_NOTIFY_ONSNATCH)
new_config['Growl']['growl_notify_ondownload'] = int(GROWL_NOTIFY_ONDOWNLOAD)
new_config['Growl']['growl_notify_onsubtitledownload'] = int(GROWL_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Growl']['growl_host'] = GROWL_HOST
new_config['Growl']['growl_password'] = helpers.encrypt(GROWL_PASSWORD, ENCRYPTION_VERSION)
new_config['FreeMobile'] = {}
new_config['FreeMobile']['use_freemobile'] = int(USE_FREEMOBILE)
new_config['FreeMobile']['freemobile_notify_onsnatch'] = int(FREEMOBILE_NOTIFY_ONSNATCH)
new_config['FreeMobile']['freemobile_notify_ondownload'] = int(FREEMOBILE_NOTIFY_ONDOWNLOAD)
new_config['FreeMobile']['freemobile_notify_onsubtitledownload'] = int(FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['FreeMobile']['freemobile_id'] = FREEMOBILE_ID
new_config['FreeMobile']['freemobile_apikey'] = FREEMOBILE_APIKEY
new_config['Prowl'] = {}
new_config['Prowl']['use_prowl'] = int(USE_PROWL)
new_config['Prowl']['prowl_notify_onsnatch'] = int(PROWL_NOTIFY_ONSNATCH)
new_config['Prowl']['prowl_notify_ondownload'] = int(PROWL_NOTIFY_ONDOWNLOAD)
new_config['Prowl']['prowl_notify_onsubtitledownload'] = int(PROWL_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Prowl']['prowl_api'] = PROWL_API
new_config['Prowl']['prowl_priority'] = PROWL_PRIORITY
new_config['Prowl']['prowl_message_title'] = PROWL_MESSAGE_TITLE
new_config['Twitter'] = {}
new_config['Twitter']['use_twitter'] = int(USE_TWITTER)
new_config['Twitter']['twitter_notify_onsnatch'] = int(TWITTER_NOTIFY_ONSNATCH)
new_config['Twitter']['twitter_notify_ondownload'] = int(TWITTER_NOTIFY_ONDOWNLOAD)
new_config['Twitter']['twitter_notify_onsubtitledownload'] = int(TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Twitter']['twitter_username'] = TWITTER_USERNAME
new_config['Twitter']['twitter_password'] = helpers.encrypt(TWITTER_PASSWORD, ENCRYPTION_VERSION)
new_config['Twitter']['twitter_prefix'] = TWITTER_PREFIX
new_config['Twitter']['twitter_dmto'] = TWITTER_DMTO
new_config['Twitter']['twitter_usedm'] = int(TWITTER_USEDM)
new_config['Boxcar2'] = {}
new_config['Boxcar2']['use_boxcar2'] = int(USE_BOXCAR2)
new_config['Boxcar2']['boxcar2_notify_onsnatch'] = int(BOXCAR2_NOTIFY_ONSNATCH)
new_config['Boxcar2']['boxcar2_notify_ondownload'] = int(BOXCAR2_NOTIFY_ONDOWNLOAD)
new_config['Boxcar2']['boxcar2_notify_onsubtitledownload'] = int(BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Boxcar2']['boxcar2_accesstoken'] = BOXCAR2_ACCESSTOKEN
new_config['Pushover'] = {}
new_config['Pushover']['use_pushover'] = int(USE_PUSHOVER)
new_config['Pushover']['pushover_notify_onsnatch'] = int(PUSHOVER_NOTIFY_ONSNATCH)
new_config['Pushover']['pushover_notify_ondownload'] = int(PUSHOVER_NOTIFY_ONDOWNLOAD)
new_config['Pushover']['pushover_notify_onsubtitledownload'] = int(PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Pushover']['pushover_userkey'] = PUSHOVER_USERKEY
new_config['Pushover']['pushover_apikey'] = PUSHOVER_APIKEY
new_config['Pushover']['pushover_device'] = PUSHOVER_DEVICE
new_config['Pushover']['pushover_sound'] = PUSHOVER_SOUND
new_config['Libnotify'] = {}
new_config['Libnotify']['use_libnotify'] = int(USE_LIBNOTIFY)
new_config['Libnotify']['libnotify_notify_onsnatch'] = int(LIBNOTIFY_NOTIFY_ONSNATCH)
new_config['Libnotify']['libnotify_notify_ondownload'] = int(LIBNOTIFY_NOTIFY_ONDOWNLOAD)
new_config['Libnotify']['libnotify_notify_onsubtitledownload'] = int(LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['NMJ'] = {}
new_config['NMJ']['use_nmj'] = int(USE_NMJ)
new_config['NMJ']['nmj_host'] = NMJ_HOST
new_config['NMJ']['nmj_database'] = NMJ_DATABASE
new_config['NMJ']['nmj_mount'] = NMJ_MOUNT
new_config['NMJv2'] = {}
new_config['NMJv2']['use_nmjv2'] = int(USE_NMJv2)
new_config['NMJv2']['nmjv2_host'] = NMJv2_HOST
new_config['NMJv2']['nmjv2_database'] = NMJv2_DATABASE
new_config['NMJv2']['nmjv2_dbloc'] = NMJv2_DBLOC
new_config['Synology'] = {}
new_config['Synology']['use_synoindex'] = int(USE_SYNOINDEX)
new_config['SynologyNotifier'] = {}
new_config['SynologyNotifier']['use_synologynotifier'] = int(USE_SYNOLOGYNOTIFIER)
new_config['SynologyNotifier']['synologynotifier_notify_onsnatch'] = int(SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH)
new_config['SynologyNotifier']['synologynotifier_notify_ondownload'] = int(SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD)
new_config['SynologyNotifier']['synologynotifier_notify_onsubtitledownload'] = int(
SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Trakt'] = {}
new_config['Trakt']['use_trakt'] = int(USE_TRAKT)
new_config['Trakt']['trakt_username'] = TRAKT_USERNAME
new_config['Trakt']['trakt_access_token'] = TRAKT_ACCESS_TOKEN
new_config['Trakt']['trakt_refresh_token'] = TRAKT_REFRESH_TOKEN
new_config['Trakt']['trakt_remove_watchlist'] = int(TRAKT_REMOVE_WATCHLIST)
new_config['Trakt']['trakt_remove_serieslist'] = int(TRAKT_REMOVE_SERIESLIST)
new_config['Trakt']['trakt_remove_show_from_sickrage'] = int(TRAKT_REMOVE_SHOW_FROM_SICKRAGE)
new_config['Trakt']['trakt_sync_watchlist'] = int(TRAKT_SYNC_WATCHLIST)
new_config['Trakt']['trakt_method_add'] = int(TRAKT_METHOD_ADD)
new_config['Trakt']['trakt_start_paused'] = int(TRAKT_START_PAUSED)
new_config['Trakt']['trakt_use_recommended'] = int(TRAKT_USE_RECOMMENDED)
new_config['Trakt']['trakt_sync'] = int(TRAKT_SYNC)
new_config['Trakt']['trakt_sync_remove'] = int(TRAKT_SYNC_REMOVE)
new_config['Trakt']['trakt_default_indexer'] = int(TRAKT_DEFAULT_INDEXER)
new_config['Trakt']['trakt_timeout'] = int(TRAKT_TIMEOUT)
new_config['Trakt']['trakt_blacklist_name'] = TRAKT_BLACKLIST_NAME
new_config['pyTivo'] = {}
new_config['pyTivo']['use_pytivo'] = int(USE_PYTIVO)
new_config['pyTivo']['pytivo_notify_onsnatch'] = int(PYTIVO_NOTIFY_ONSNATCH)
new_config['pyTivo']['pytivo_notify_ondownload'] = int(PYTIVO_NOTIFY_ONDOWNLOAD)
new_config['pyTivo']['pytivo_notify_onsubtitledownload'] = int(PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['pyTivo']['pyTivo_update_library'] = int(PYTIVO_UPDATE_LIBRARY)
new_config['pyTivo']['pytivo_host'] = PYTIVO_HOST
new_config['pyTivo']['pytivo_share_name'] = PYTIVO_SHARE_NAME
new_config['pyTivo']['pytivo_tivo_name'] = PYTIVO_TIVO_NAME
new_config['NMA'] = {}
new_config['NMA']['use_nma'] = int(USE_NMA)
new_config['NMA']['nma_notify_onsnatch'] = int(NMA_NOTIFY_ONSNATCH)
new_config['NMA']['nma_notify_ondownload'] = int(NMA_NOTIFY_ONDOWNLOAD)
new_config['NMA']['nma_notify_onsubtitledownload'] = int(NMA_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['NMA']['nma_api'] = NMA_API
new_config['NMA']['nma_priority'] = NMA_PRIORITY
new_config['Pushalot'] = {}
new_config['Pushalot']['use_pushalot'] = int(USE_PUSHALOT)
new_config['Pushalot']['pushalot_notify_onsnatch'] = int(PUSHALOT_NOTIFY_ONSNATCH)
new_config['Pushalot']['pushalot_notify_ondownload'] = int(PUSHALOT_NOTIFY_ONDOWNLOAD)
new_config['Pushalot']['pushalot_notify_onsubtitledownload'] = int(PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Pushalot']['pushalot_authorizationtoken'] = PUSHALOT_AUTHORIZATIONTOKEN
new_config['Pushbullet'] = {}
new_config['Pushbullet']['use_pushbullet'] = int(USE_PUSHBULLET)
new_config['Pushbullet']['pushbullet_notify_onsnatch'] = int(PUSHBULLET_NOTIFY_ONSNATCH)
new_config['Pushbullet']['pushbullet_notify_ondownload'] = int(PUSHBULLET_NOTIFY_ONDOWNLOAD)
new_config['Pushbullet']['pushbullet_notify_onsubtitledownload'] = int(PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Pushbullet']['pushbullet_api'] = PUSHBULLET_API
new_config['Pushbullet']['pushbullet_device'] = PUSHBULLET_DEVICE
new_config['Email'] = {}
new_config['Email']['use_email'] = int(USE_EMAIL)
new_config['Email']['email_notify_onsnatch'] = int(EMAIL_NOTIFY_ONSNATCH)
new_config['Email']['email_notify_ondownload'] = int(EMAIL_NOTIFY_ONDOWNLOAD)
new_config['Email']['email_notify_onsubtitledownload'] = int(EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD)
new_config['Email']['email_host'] = EMAIL_HOST
new_config['Email']['email_port'] = int(EMAIL_PORT)
new_config['Email']['email_tls'] = int(EMAIL_TLS)
new_config['Email']['email_user'] = EMAIL_USER
new_config['Email']['email_password'] = helpers.encrypt(EMAIL_PASSWORD, ENCRYPTION_VERSION)
new_config['Email']['email_from'] = EMAIL_FROM
new_config['Email']['email_list'] = EMAIL_LIST
new_config['Newznab'] = {}
new_config['Newznab']['newznab_data'] = NEWZNAB_DATA
new_config['TorrentRss'] = {}
new_config['TorrentRss']['torrentrss_data'] = '!!!'.join([x.configStr() for x in torrentRssProviderList])
new_config['GUI'] = {}
new_config['GUI']['gui_name'] = GUI_NAME
new_config['GUI']['theme_name'] = THEME_NAME
new_config['GUI']['home_layout'] = HOME_LAYOUT
new_config['GUI']['history_layout'] = HISTORY_LAYOUT
new_config['GUI']['history_limit'] = HISTORY_LIMIT
new_config['GUI']['display_show_specials'] = int(DISPLAY_SHOW_SPECIALS)
new_config['GUI']['coming_eps_layout'] = COMING_EPS_LAYOUT
new_config['GUI']['coming_eps_display_paused'] = int(COMING_EPS_DISPLAY_PAUSED)
new_config['GUI']['coming_eps_sort'] = COMING_EPS_SORT
new_config['GUI']['coming_eps_missed_range'] = int(COMING_EPS_MISSED_RANGE)
new_config['GUI']['fuzzy_dating'] = int(FUZZY_DATING)
new_config['GUI']['trim_zero'] = int(TRIM_ZERO)
new_config['GUI']['date_preset'] = DATE_PRESET
new_config['GUI']['time_preset'] = TIME_PRESET_W_SECONDS
new_config['GUI']['timezone_display'] = TIMEZONE_DISPLAY
new_config['GUI']['poster_sortby'] = POSTER_SORTBY
new_config['GUI']['poster_sortdir'] = POSTER_SORTDIR
new_config['Subtitles'] = {}
new_config['Subtitles']['use_subtitles'] = int(USE_SUBTITLES)
new_config['Subtitles']['subtitles_languages'] = ','.join(SUBTITLES_LANGUAGES)
new_config['Subtitles']['SUBTITLES_SERVICES_LIST'] = ','.join(SUBTITLES_SERVICES_LIST)
new_config['Subtitles']['SUBTITLES_SERVICES_ENABLED'] = '|'.join([str(x) for x in SUBTITLES_SERVICES_ENABLED])
new_config['Subtitles']['subtitles_dir'] = SUBTITLES_DIR
new_config['Subtitles']['subtitles_default'] = int(SUBTITLES_DEFAULT)
new_config['Subtitles']['subtitles_history'] = int(SUBTITLES_HISTORY)
new_config['Subtitles']['embedded_subtitles_all'] = int(EMBEDDED_SUBTITLES_ALL)
new_config['Subtitles']['subtitles_hearing_impaired'] = int(SUBTITLES_HEARING_IMPAIRED)
new_config['Subtitles']['subtitles_finder_frequency'] = int(SUBTITLES_FINDER_FREQUENCY)
new_config['Subtitles']['subtitles_multi'] = int(SUBTITLES_MULTI)
new_config['Subtitles']['subtitles_extra_scripts'] = '|'.join(SUBTITLES_EXTRA_SCRIPTS)
new_config['Subtitles']['subtitles_download_in_pp'] = int(SUBTITLES_DOWNLOAD_IN_PP)
new_config['Subtitles']['addic7ed_username'] = ADDIC7ED_USER
new_config['Subtitles']['addic7ed_password'] = helpers.encrypt(ADDIC7ED_PASS, ENCRYPTION_VERSION)
new_config['Subtitles']['legendastv_username'] = LEGENDASTV_USER
new_config['Subtitles']['legendastv_password'] = helpers.encrypt(LEGENDASTV_PASS, ENCRYPTION_VERSION)
new_config['Subtitles']['opensubtitles_username'] = OPENSUBTITLES_USER
new_config['Subtitles']['opensubtitles_password'] = helpers.encrypt(OPENSUBTITLES_PASS, ENCRYPTION_VERSION)
new_config['FailedDownloads'] = {}
new_config['FailedDownloads']['use_failed_downloads'] = int(USE_FAILED_DOWNLOADS)
new_config['FailedDownloads']['delete_failed'] = int(DELETE_FAILED)
new_config['ANIDB'] = {}
new_config['ANIDB']['use_anidb'] = int(USE_ANIDB)
new_config['ANIDB']['anidb_username'] = ANIDB_USERNAME
new_config['ANIDB']['anidb_password'] = helpers.encrypt(ANIDB_PASSWORD, ENCRYPTION_VERSION)
new_config['ANIDB']['anidb_use_mylist'] = int(ANIDB_USE_MYLIST)
new_config['ANIME'] = {}
new_config['ANIME']['anime_split_home'] = int(ANIME_SPLIT_HOME)
new_config.write()
def launchBrowser(protocol='http', startPort=None, web_root='/'):
if not startPort:
startPort = WEB_PORT
browserURL = '%s://localhost:%d%s/home/' % (protocol, startPort, web_root)
try:
webbrowser.open(browserURL, 2, 1)
except Exception:
try:
webbrowser.open(browserURL, 1, 1)
except Exception:
logger.log(u"Unable to launch a browser", logger.ERROR)
def getEpList(epIDs, showid=None):
if epIDs is None or len(epIDs) == 0:
return []
query = "SELECT * FROM tv_episodes WHERE indexerid in (%s)" % (",".join(['?'] * len(epIDs)),)
params = epIDs
if showid is not None:
query += " AND showid = ?"
params.append(showid)
myDB = db.DBConnection()
sqlResults = myDB.select(query, params)
epList = []
for curEp in sqlResults:
curShowObj = Show.find(showList, int(curEp["showid"]))
curEpObj = curShowObj.getEpisode(int(curEp["season"]), int(curEp["episode"]))
epList.append(curEpObj)
return epList
|
CristianBB/SickRage
|
sickbeard/__init__.py
|
Python
|
gpl-3.0
| 115,225
|
import numpy as np
from ss_generator import geometry
def get_internal_coordinates_from_ca_list(ca_list):
'''Get the list of ds, thetas and taus from a ca list.'''
ds = []
thetas = []
taus = []
for i in range(len(ca_list) - 1):
ds.append(np.linalg.norm(ca_list[i + 1] - ca_list[i]))
for i in range(1, len(ca_list) - 1):
thetas.append(geometry.angle(ca_list[i - 1] - ca_list[i],
ca_list[i + 1] - ca_list[i]))
for i in range(1, len(ca_list) - 2):
taus.append(geometry.dihedral(ca_list[i - 1], ca_list[i],
ca_list[i + 1], ca_list[i + 2]))
return ds, thetas, taus
def generate_segment_from_internal_coordinates(ds, thetas, taus):
'''Generate a protein segment from a set of internal coordinates.
Return a list of Ca coordinates.
'''
# Make sure that the sizes of internal coordinates are correct
if len(ds) < 3 or len(thetas) < 2 or len(taus) < 1 \
or len(ds) != len(thetas) + 1 or len(ds) != len(taus) + 2:
raise Exception("Incompatible sizes of internal coordinates.")
# Make the first three Ca atoms
ca_list = []
ca_list.append(ds[0] * np.array([np.sin(thetas[0]),np.cos(thetas[0]), 0]))
ca_list.append(np.array([0, 0, 0]))
ca_list.append(np.array([0, ds[1], 0]))
# Make the rest of Ca atoms
for i in range(len(taus)):
ca_list.append(geometry.cartesian_coord_from_internal_coord(
ca_list[i], ca_list[i + 1], ca_list[i + 2], ds[i + 2], thetas[i + 1], taus[i]))
return ca_list
def get_peptide_bond_parameters():
'''Print peptide parameters.'''
d = {'c_n_length' : 1.32869,
'n_ca_length' : 1.458,
'ca_c_length' : 1.52326,
'c_n_ca_angle' : np.radians(121.7),
'n_ca_c_angle' : np.radians(111.2),
'ca_c_n_angle' : np.radians(116.2),
'omega' : np.radians(180)}
p1 = np.array([0, 0, 0])
p2 = np.array([0, 0, d['ca_c_length']])
p3 = p2 + d['c_n_length'] * np.array([
np.sin(d['ca_c_n_angle']), 0, -np.cos(d['ca_c_n_angle'])])
p4 = geometry.cartesian_coord_from_internal_coord(
p1, p2, p3, d['n_ca_length'], d['n_ca_c_angle'], d['omega'])
d['theta_c'] = geometry.angle(p4 - p1, p2 - p1)
d['theta_n'] = geometry.angle(p1 - p4, p3 - p4)
return d
def get_n_for_pp_bond_forward(ca1, ca2, v_c):
'''Get the coordinate of the N atom in a peptide bond.
Inputs are the two ends of the peptide bond and the
direction from ca1 to the position of C.
'''
params = get_peptide_bond_parameters()
x = geometry.normalize(ca1 - ca2)
y = -geometry.normalize(v_c - np.dot(v_c, x) * x)
return ca2 + params['n_ca_length'] * (np.cos(params['theta_n']) * x \
+ np.sin(params['theta_n']) * y)
def get_c_for_pp_bond_forward(ca1, ca2, v_n, z_sign=1):
'''Get the coordinate of the C atom in a peptide bond.
Inputs are the two ends of the peptide bond, the direction
from ca1 to the position of the previous N and the sign
of Z direction that is used to pick one solution from two.
'''
params = get_peptide_bond_parameters()
frame = geometry.create_frame_from_three_points(ca1 + v_n, ca1, ca2)
beta = geometry.angle(v_n, ca2 - ca1)
gamma = z_sign * np.arccos((np.cos(params['n_ca_c_angle']) - np.cos(params['theta_c']) * np.cos(beta)) \
/ (np.sin(params['theta_c']) * np.sin(beta)))
c_local = params['ca_c_length'] * np.array([np.sin(params['theta_c']) * np.cos(gamma),
np.cos(params['theta_c']), np.sin(params['theta_c']) * np.sin(gamma)])
return ca1 + np.dot(np.transpose(frame), c_local)
def get_o_for_peptide_bond(c, n, ca2):
'''Get the coordinate of the O atom in a peptide bond.'''
return geometry.cartesian_coord_from_internal_coord(ca2,
n, c, 1.24, np.radians(125), 0)
def thread_ca_list_forward(ca_list, initial_c_direction, z_sign=1):
'''Thread backbones through a ca list. Return a list
for residue dictionaries.
'''
params = get_peptide_bond_parameters()
# Make the initial residue
residue_list = [{'ca' : ca_list[0],
'c' : ca_list[0] + params['ca_c_length'] * geometry.normalize(initial_c_direction)}]
# Make the rest of residues
for i in range(1, len(ca_list)):
residue = {'ca' : ca_list[i]}
v_c = residue_list[i - 1]['c'] - residue_list[i - 1]['ca']
residue['n'] = get_n_for_pp_bond_forward(ca_list[i - 1], ca_list[i], v_c)
if i < len(ca_list) - 1:
residue['c'] = get_c_for_pp_bond_forward(ca_list[i], ca_list[i + 1],
residue['n'] - residue['ca'], z_sign=z_sign)
residue['o'] = get_o_for_peptide_bond(residue_list[i - 1]['c'],
residue['n'], residue['ca'])
residue_list.append(residue)
return residue_list
|
xingjiepan/ss_generator
|
ss_generator/ca_tracing/basic.py
|
Python
|
bsd-3-clause
| 4,853
|
#from .jtest import get_2014_nuggets
#from .submissions import Updates
from .judgements import (
get_2013_nuggets, get_2014_nuggets, get_2013_matches, get_2014_matches
)
from .data import Resource, get_resource_manager
from .misc import stringify_corenlp_doc, stringify_streamcorpus_sentence
import os
import signal
import Queue
import wtmf
import gzip
import re
from sklearn.externals import joblib
import streamcorpus as sc
import corenlp.server
from itertools import izip
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
class WTMFModelsResource(Resource):
def __init__(self):
Resource.__init__(self)
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'wtmf-models')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
self.type2fname = {
u'accident': 'accident.pkl',
u'impact event': 'astronomy.pkl',
u'bombing': 'terrorism.pkl',
u'hostage': 'terrorism.pkl',
u'shooting': 'murder.pkl',
u'protest': 'social-unrest.pkl',
u'riot': 'social-unrest.pkl',
u'storm': 'natural-disaster.pkl',
u'earthquake': 'natural-disaster.pkl',
}
def check_coverage(self, event, corpus, **kwargs):
if os.path.exists(self.get_model_path(event)):
return 1.0
else:
return 0.0
def get(self, event, corpus, **kwargs):
model_path = self.get_model_path(event)
parent_dir = os.path.dirname(model_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
lminputs = get_resource_manager(u'DomainLMInputResource')
lminput_path = lminputs.get_domain_lm_input_path(event)
with gzip.open(lminput_path, u'r') as f:
X = f.readlines()
vectorizer = wtmf.WTMFVectorizer(
input='content', k=100, w_m=0.01,lam=20, max_iter=20,
tokenizer=domain_lm_input_tokenizer,
tf_threshold=2, verbose=True).fit(X)
joblib.dump(vectorizer, model_path)
def get_model_path(self, event):
return os.path.join(self.dir_, self.type2fname[event.type])
def dependencies(self):
return tuple(['DomainLMInputResource'])
def __unicode__(self):
return u"cuttsum.sentsim.WTMFModelsResource"
def domain_lm_input_tokenizer(line):
# need to filter punc
# also need to add freq filter
tokens = [token for token in line.split(' ')
if token != '-lrb-' and token != '-rrb-']
tokens = [token for token in tokens
if len(token) > 2 and not re.match(r'__.+__', token)]
return tokens
class SentenceStringsResource(Resource):
def __init__(self):
Resource.__init__(self)
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'sentence-strings')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def dependencies(self):
return tuple(['WTMFModelsResource', 'ArticlesResource'])
def __unicode__(self):
return u"cuttsum.sentsim.SentenceStringsResource"
def dataframe_generator(self, event):
for hour in event.list_event_hours():
tsv_path = self.get_tsv_path(event, hour)
if os.path.exists(tsv_path):
with gzip.open(tsv_path, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
yield df
def get_dataframe(self, event, hour):
tsv = self.get_tsv_path(event, hour)
if not os.path.exists(tsv):
return None
else:
with gzip.open(tsv, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
return df
def get_tsv_path(self, event, hour):
data_dir = os.path.join(self.dir_, event.fs_name())
return os.path.join(data_dir, u'{}.tsv.gz'.format(
hour.strftime(u'%Y-%m-%d-%H')))
def check_coverage(self, event, corpus, **kwargs):
articles = get_resource_manager(u'ArticlesResource')
data_dir = os.path.join(self.dir_, event.fs_name())
n_chunks = 0
n_covered = 0
hours = event.list_event_hours()
for hour in hours:
if os.path.exists(articles.get_chunk_path(event, hour)):
n_chunks += 1
if os.path.exists(self.get_tsv_path(event, hour)):
n_covered += 1
if n_chunks == 0:
return 0
else:
return float(n_covered) / n_chunks
def get(self, event, corpus, n_procs=1, progress_bar=False, **kwargs):
articles = get_resource_manager(u'ArticlesResource')
data_dir = os.path.join(self.dir_, event.fs_name())
if not os.path.exists(data_dir):
os.makedirs(data_dir)
jobs = []
for hour in event.list_event_hours():
article_chunk_path = articles.get_chunk_path(event, hour)
if os.path.exists(article_chunk_path):
tsv_path = self.get_tsv_path(event, hour)
if os.path.exists(tsv_path):
continue
jobs.append((article_chunk_path, tsv_path))
if corenlp.server.check_status() is False:
print "starting corenlp.server"
corenlp.server.start(
mem="20G", threads=n_procs + 15,
annotators=['tokenize', 'ssplit', 'pos', 'lemma', 'ner'])
self.do_work(sentencestring_worker_, jobs, n_procs, progress_bar,
corpus=corpus)
from .geo import get_loc_sequences
def sentencestring_worker_(job_queue, result_queue, **kwargs):
signal.signal(signal.SIGINT, signal.SIG_IGN)
corpus = kwargs.get(u'corpus')
cnlp = corenlp.server.CoreNLPClient()
while not job_queue.empty():
try:
chunk_path, tsv_path = job_queue.get(block=False)
sent_string_data = []
for si in sc.Chunk(path=chunk_path, message=corpus.sc_msg()):
sentences = corpus.get_sentences(si)
str2idx = {}
for idx, sentence in enumerate(sentences):
key = stringify_streamcorpus_sentence(sentence)
str2idx[key] = idx
for sentence in si.body.sentences[u'article-clf']:
sc_string = stringify_streamcorpus_sentence(sentence)
idx = str2idx[sc_string]
#print idx, ")", sc_string
doc = cnlp.annotate(sc_string)
locs = get_loc_sequences(doc)
if len(locs) > 0:
locs_string = (u','.join(locs)).encode(u'utf-8')
else:
locs_string = 'nan'
cnlp_string = stringify_corenlp_doc(doc)
#print cnlp_string
sent_string_data.append({
u'stream id': si.stream_id, u'sentence id': idx,
u'streamcorpus': sc_string,
u'corenlp': cnlp_string,
u'locations': locs_string})
if len(sent_string_data) > 0:
df = pd.DataFrame(
sent_string_data,
columns=[u'stream id', u'sentence id',
u'streamcorpus', u'corenlp', u'locations'])
with gzip.open(tsv_path, u'w') as f:
df.to_csv(f, sep='\t', index=False, index_label=False)
result_queue.put(None)
except Queue.Empty:
pass
# for job in jobs:
# articles_chunk, tsv_path = job
# sentence_meta = []
# sentence_strings = []
# for si in sc.Chunk(path=articles_chunk, message=corpus.sc_msg()):
# si.stream_id
#
# sentences = corpus.get_sentences(si)
# str2idx = {}
# for idx, sentence in enumerate(sentences):
# key = ' '.join(token.token for token in sentence.tokens)
# str2idx[key] = idx
#
# for sentence in si.body.sentences[u'article-clf']:
# key = ' '.join(token.token for token in sentence.tokens)
# idx = str2idx[key]
# #print idx, ")", key
# doc = cnlp.annotate(key)
# norm_tokens = []
# for sent in doc:
# for token in sent:
# if token.ne == 'O':
# words = token.lem.split(u'_')
# for word in words:
# if word != u'':
# norm_tokens.append(word.lower())
# else:
# norm_tokens.append(
# u'__{}__'.format(token.ne.lower()))
# sentence_strings.append(
# (u' '.join(norm_tokens)).encode(u'utf-8'))
# sentence_meta.append((si.stream_id, idx))
class SentenceLatentVectorsResource(Resource):
def __init__(self):
#Resource.__init__(self)
self.deps_met_ = set()
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'sentence-latent-vectors')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def dataframe_generator(self, event):
for hour in event.list_event_hours():
tsv_path = self.get_tsv_path(event, hour)
if os.path.exists(tsv_path):
with gzip.open(tsv_path, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
yield df
def get_dataframe(self, event, hour):
tsv = self.get_tsv_path(event, hour)
if not os.path.exists(tsv):
return None
else:
with gzip.open(tsv, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
return df
def get_tsv_path(self, event, hour):
data_dir = os.path.join(self.dir_, event.fs_name())
return os.path.join(data_dir, u'{}.tsv.gz'.format(
hour.strftime(u'%Y-%m-%d-%H')))
def dependencies(self):
return tuple(['SentenceStringsResource'])
def __unicode__(self):
return u"cuttsum.sentsim.SentenceLatentVectorsResource"
def check_coverage(self, event, corpus, **kwargs):
sentencestrings = get_resource_manager(u'SentenceStringsResource')
data_dir = os.path.join(self.dir_, event.fs_name())
n_files = 0
n_covered = 0
hours = event.list_event_hours()
for hour in hours:
if os.path.exists(sentencestrings.get_tsv_path(event, hour)):
n_files += 1
if os.path.exists(self.get_tsv_path(event, hour)):
n_covered += 1
if n_files == 0:
return 0
else:
return float(n_covered) / n_files
def get(self, event, corpus, overwrite=False, n_procs=1,
progress_bar=False, **kwargs):
sentencestrings = get_resource_manager(u'SentenceStringsResource')
data_dir = os.path.join(self.dir_, event.fs_name())
if not os.path.exists(data_dir):
os.makedirs(data_dir)
jobs = []
for hour in event.list_event_hours():
strings_tsv_path = sentencestrings.get_tsv_path(event, hour)
lvec_tsv_path = self.get_tsv_path(event, hour)
if os.path.exists(strings_tsv_path):
if overwrite is True or not os.path.exists(lvec_tsv_path):
jobs.append((strings_tsv_path, lvec_tsv_path))
self.do_work(sentencelvec_worker_, jobs, n_procs, progress_bar,
event=event)
def sentencelvec_worker_(job_queue, result_queue, **kwargs):
signal.signal(signal.SIGINT, signal.SIG_IGN)
event = kwargs.get(u'event')
wtmf_models = get_resource_manager(u'WTMFModelsResource')
model_path = wtmf_models.get_model_path(event)
vectorizer = joblib.load(model_path)
while not job_queue.empty():
try:
strings_tsv, lvec_tsv_path = job_queue.get(block=False)
vecs_data = []
with gzip.open(strings_tsv, u'r') as f:
strings_df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
strings = strings_df[u'corenlp'].tolist()
Xstrings = vectorizer.transform(strings)
for (_, r), xstrings in izip(strings_df.iterrows(), Xstrings):
vec_data = {idx: val for idx, val in enumerate(xstrings, 0)}
vec_data[u'stream id'] = r[u'stream id']
vec_data[u'sentence id'] = r[u'sentence id']
vecs_data.append(vec_data)
ndims = Xstrings.shape[1]
names = [u'stream id', u'sentence id' ] + range(ndims)
df = pd.DataFrame(vecs_data, columns=names)
with gzip.open(lvec_tsv_path, u'w') as f:
df.to_csv(f, sep='\t', index=False, index_label=False)
result_queue.put(None)
except Queue.Empty:
pass
return True
class NuggetSimilaritiesResource(Resource):
def __init__(self):
Resource.__init__(self)
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'nugget-similarities')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def dependencies(self):
return tuple([u'SentenceLatentVectorsResource',
u'SentenceStringsResource' ])
def __unicode__(self):
return u"cuttsum.sentsim.NuggetSimilaritiesResource"
def get_tsv_path(self, event, hour):
data_dir = os.path.join(self.dir_, event.fs_name())
return os.path.join(data_dir, u'{}.tsv.gz'.format(
hour.strftime(u'%Y-%m-%d-%H')))
def get_dataframe(self, event, hour):
tsv = self.get_tsv_path(event, hour)
if not os.path.exists(tsv):
return None
else:
with gzip.open(tsv, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
return df
def get_matches(self, event, corpus):
if corpus.year_ == 2014:
df = get_2014_matches()
elif corpus.year_ == 2013:
df = get_2013_matches()
return df.loc[df[u'query id'] == event.query_id]
def get_nuggets(self, event, corpus):
if corpus.year_ == 2014:
df = get_2014_nuggets()
elif corpus.year_ == 2013:
df = get_2013_nuggets()
return df.loc[df[u'query id'] == event.query_id]
def get_nugget_latent_vectors_(self, nuggets, vectorizer):
if corenlp.server.check_status() is False:
print "starting corenlp.server"
corenlp.server.start(
mem="20G", threads=20,
annotators=['tokenize', 'ssplit', 'pos', 'lemma', 'ner'])
texts = nuggets[u'text'].tolist()
processed_texts = []
cnlp = corenlp.server.CoreNLPClient()
processed_texts = [stringify_corenlp_doc(cnlp.annotate(text))
for text in texts]
return vectorizer.transform(processed_texts)
def check_coverage(self, event, corpus, **kwargs):
lvecs = get_resource_manager(u'SentenceLatentVectorsResource')
data_dir = os.path.join(self.dir_, event.fs_name())
n_chunks = 0
n_covered = 0
for hour in event.list_event_hours():
if os.path.exists(lvecs.get_tsv_path(event, hour)):
n_chunks += 1
if os.path.exists(self.get_tsv_path(event, hour)):
n_covered += 1
if n_chunks == 0:
return 0
else:
return float(n_covered) / n_chunks
def get(self, event, corpus, overwrite=False, n_procs=1,
progress_bar=False, **kwargs):
lvecs = get_resource_manager(
u'SentenceLatentVectorsResource')
strings = get_resource_manager(u'SentenceStringsResource')
data_dir = os.path.join(self.dir_, event.fs_name())
if not os.path.exists(data_dir):
os.makedirs(data_dir)
jobs = []
for hour in event.list_event_hours():
lvec_tsv_path = lvecs.get_tsv_path(event, hour)
strings_tsv_path = strings.get_tsv_path(event, hour)
nsim_tsv_path = self.get_tsv_path(event, hour)
if os.path.exists(lvec_tsv_path):
if overwrite is True or not os.path.exists(nsim_tsv_path):
jobs.append(
(lvec_tsv_path, strings_tsv_path, nsim_tsv_path))
self.do_work(nuggetsim_worker_, jobs, n_procs, progress_bar,
event=event, corpus=corpus)
def nuggetsim_worker_(job_queue, result_queue, **kwargs):
signal.signal(signal.SIGINT, signal.SIG_IGN)
event = kwargs.get(u'event')
corpus = kwargs.get(u'corpus')
nsims = get_resource_manager(u'NuggetSimilaritiesResource')
wtmf_models = get_resource_manager(u'WTMFModelsResource')
model_path = wtmf_models.get_model_path(event)
vectorizer = joblib.load(model_path)
nuggets = nsims.get_nuggets(event, corpus)
nugget_id_list = nuggets[u'nugget id'].tolist()
Xnuggets = nsims.get_nugget_latent_vectors_(nuggets, vectorizer)
matches = nsims.get_matches(event, corpus)
def check_assessor_matches(stream_id, sentence_id):
update_id = "{}-{}".format(stream_id, sentence_id)
assessor_matches = matches[matches[u'update id'].str.match(update_id)]
n_matches = len(assessor_matches)
if n_matches > 0:
return {nid: 1.0 for nid
in assessor_matches[u'nugget id'].tolist()}
return dict()
while not job_queue.empty():
try:
lvec_tsv_path, strings_tsv_path, nsim_tsv_path = \
job_queue.get(block=False)
with gzip.open(lvec_tsv_path, u'r') as f:
lvec_df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
with gzip.open(strings_tsv_path, u'r') as f:
strings_df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
Xstrings = lvec_df.ix[:,2:].as_matrix()
S = cosine_similarity(Xnuggets, Y=Xstrings)
sentence_sim_data = []
for (ii, lvec_row), (_, strings_row) in izip(
lvec_df.iterrows(), strings_df.iterrows()):
assert lvec_row[u'stream id'] == strings_row[u'stream id']
assert lvec_row[u'sentence id'] == strings_row[u'sentence id']
sentence_sim_dict = {nugget_id_list[nugget_idx]: nugget_sim
for nugget_idx, nugget_sim
in enumerate(S[:, ii])}
nugget_ids = check_assessor_matches(
lvec_row[u'stream id'], lvec_row[u'sentence id'])
sentence_sim_dict.update(nugget_ids.items())
sentence_sim_dict[u'stream id'] = lvec_row[u'stream id']
sentence_sim_dict[u'sentence id'] = lvec_row[u'sentence id']
sentence_sim_data.append(sentence_sim_dict)
### Need to remove this when I confirm 2013 matches are good
writeme = "/local/nlp/kedzie/check_matches/" + \
event.fs_name() + "." + os.path.basename(lvec_tsv_path)
if len(nugget_ids) > 0:
with gzip.open(writeme, u'w') as f:
f.write(strings_row[u'streamcorpus'] + '\n')
for nugget_id in nugget_ids:
nugget_texts = nuggets[ \
nuggets['nugget id'].str.match(
nugget_id)]['text'].tolist()
for text in nugget_texts:
f.write("\t" + text + '\n')
names = [u'stream id', u'sentence id'] + nugget_id_list
df = pd.DataFrame(sentence_sim_data, columns=names)
with gzip.open(nsim_tsv_path, u'w') as f:
df.to_csv(f, sep='\t', index=False, index_label=False)
result_queue.put(None)
except Queue.Empty:
pass
return True
|
kedz/cuttsum
|
trec2014/python/cuttsum/sentsim.py
|
Python
|
apache-2.0
| 21,038
|
from vsg import token
from vsg.rules import single_space_between_token_pairs
lTokens = []
lTokens.append([token.library_clause.keyword, token.logical_name_list.logical_name])
class rule_002(single_space_between_token_pairs):
'''
This rule checks for excessive spaces after the **library** keyword.
**Violation**
.. code-block:: vhdl
library ieee;
**Fix**
.. code-block:: vhdl
library ieee;
'''
def __init__(self):
single_space_between_token_pairs.__init__(self, 'library', '002', lTokens)
self.solution = 'Ensure a single space between the *library* keyword and the logical_name.'
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/rules/library/rule_002.py
|
Python
|
gpl-3.0
| 657
|
#!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/6/14 #
# Combine event raw attributes info with intro text segmentation #
# results. #
###################################################################
import csv
from collections import defaultdict
def main():
infile1 = "./Event/eventInfo.csv"
infile2 = "./Event/eventInfo.WDS.csv"
outfile1 = "/home/anthonylife/Doctor/Code/MyPaperCode/EventPopularity/data/eventInfo.csv"
event_info = defaultdict(list)
idx = 0
for line in open(infile1):
parts = line.strip("\r\t\n").split(",")
if idx == 0:
header = parts
idx += 1
continue
eid = parts[0]
event_info[eid] = parts[1:10]
header= header + ["entity"]
writer = csv.writer(open(outfile1, "w"), lineterminator="\n")
writer.writerow(header)
for line in open(infile2):
parts = line.strip("\r\t\n").split(",")
eid = parts[0]
info_content = parts[2]
entity_content = parts[4]
writer.writerow([eid]+event_info[eid]+[info_content]+[entity_content])
if __name__ == "__main__":
main()
|
anthonylife/EventRecommendation
|
script/combineEventInfo.py
|
Python
|
apache-2.0
| 1,864
|
"""Glyph-specific queries on font-files"""
from ttfquery import describe
try:
from OpenGLContext.debug.logs import text_log
except ImportError:
text_log = None
def hasGlyph( font, char, encoding=None ):
"""Check to see if font appears to have explicit glyph for char"""
glyfName = explicitGlyph( font, char, encoding )
if glyfName is None:
return False
return True
def explicitGlyph( font, char, encoding=None ):
"""Return glyphName or None if there is not explicit glyph for char"""
cmap = font['cmap']
if encoding is None:
encoding = describe.guessEncoding( font )
table = cmap.getcmap( *encoding )
glyfName = table.cmap.get( ord(char))
return glyfName
def glyphName( font, char, encoding=None, warnOnFailure=1 ):
"""Retrieve the glyph name for the given character
XXX
Not sure what the effect of the Unicode mapping
will be given the use of ord...
"""
glyfName = explicitGlyph( font, char, encoding )
if glyfName is None:
encoding = describe.guessEncoding( font ) #KH
cmap = font['cmap'] #KH
table = cmap.getcmap( *encoding ) #KH
glyfName = table.cmap.get( -1)
if glyfName is None:
glyfName = font['glyf'].glyphOrder[0]
if text_log and warnOnFailure:
text_log.warn(
"""Unable to find glyph name for %r, in %r using first glyph in table (%r)""",
char,
describe.shortName(font),
glyfName
)
return glyfName
def width( font, glyphName ):
"""Retrieve the width of the giving character for given font
The horizontal metrics table provides both the
width and the left side bearing, we should really
be using the left side bearing to adjust the
character, but that's a later project.
"""
try:
return font['hmtx'].metrics[ glyphName ][0]
except KeyError:
raise ValueError( """Couldn't find glyph for glyphName %r"""%(
glyphName,
))
def lineHeight( font ):
"""Get the base-line to base-line height for the font
XXX
There is some fudging going on here as I
workaround what appears to be a problem with the
specification for sTypoDescender, which states
that it should normally be a negative value, but
winds up being positive in at least one font that
defines points below the zero axis.
XXX The entire OS/2 table doesn't appear in a few
fonts (symbol fonts in particular), such as Corel's
BeeHive and BlackLight 686.
"""
return charHeight(font) + font['OS/2'].sTypoLineGap
def charHeight( font ):
"""Determine the general character height for the font (for scaling)"""
ascent = font['OS/2'].sTypoAscender
descent = font['OS/2'].sTypoDescender
if descent > 0:
descent = - descent
return ascent - descent
def charDescent( font ):
"""Determine the general descent for the font (for scaling)"""
return font['OS/2'].sTypoDescender
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/ttfquery/glyphquery.py
|
Python
|
lgpl-3.0
| 3,146
|
"""
Package lib
Non game specific code.
"""
|
Trilarion/imperialism-remake
|
source/imperialism_remake/lib/__init__.py
|
Python
|
gpl-3.0
| 45
|
def get_point_cloud(point_cloud_id, number_points=None, history=None):
from voxel_globe.meta import models
from vpgl_adaptor import convert_local_to_global_coordinates_array, create_lvcs
import os
import numpy as np
from plyfile import PlyData
point_cloud = models.PointCloud.objects.get(id=point_cloud_id).history(history)
lvcs = create_lvcs(point_cloud.origin[1], point_cloud.origin[0], point_cloud.origin[2], 'wgs84')
ply = PlyData.read(str(os.path.join(point_cloud.directory, 'error.ply')))
data = ply.elements[0].data
if number_points:
try:
import heapq
data = np.array(heapq.nlargest(number_points, ply.elements[0].data,
key=lambda x:x['prob']))
except IndexError: #not a correctly formated ply file. HACK A CODE!
#This is a hack-a-code for Tom's ply file
data = ply.elements[0].data.astype([('x', '<f4'), ('y', '<f4'),
('z', '<f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'),
('prob', '<f4')])
import copy
blah = copy.deepcopy(data['y'])
data['y'] = data['z']
data['z'] = -blah
blah = copy.deepcopy(data['blue'])
data['blue'] = data['green']
data['green'] = blah
data['prob'] = abs(data['x'] - 10 - sum(data['x'])/len(data['x'])) \
+ abs(data['y'] + 30 - sum(data['y'])/len(data['y'])) \
+ abs(data['z'] - sum(data['z'])/len(data['z']))
data['prob'] = max(data['prob']) - data['prob']
data = np.array(heapq.nlargest(number_points, data,
key=lambda x:x['prob']))
print data['prob']
lla = convert_local_to_global_coordinates_array(lvcs, data['x'].tolist(), data['y'].tolist(), data['z'].tolist());
latitude = np.array(lla[0])
longitude = np.array(lla[1])
altitude = np.array(lla[2])
color = map(lambda r,b,g:'#%02x%02x%02x' % (r, g, b), data['red'], data['green'], data['blue'])
return_data = {"latitude": latitude, "longitude": longitude,
"altitude": altitude, "color": color}
try:
return_data['le'] = data['le']
except ValueError:
return_data['le'] = (-np.ones(len(latitude))).tolist()
try:
return_data['ce'] = data['ce']
except ValueError:
return_data['ce'] = (-np.ones(len(latitude))).tolist()
return return_data
|
andyneff/voxel-globe
|
voxel_globe/voxel_viewer/tools.py
|
Python
|
mit
| 2,354
|
# some system funcionts
import os
# to hash test
from martelo import hash
import pickle
from pprint import pprint
import requests
from soupselect import select
from BeautifulSoup import BeautifulSoup
from martelo import data2xls
import multiprocessing
# load the data collected in the first part
with open('data.pickle', 'r') as h:
data = pickle.load(h)
result = []
def process(d, i=None):
''' function to process one entry of the table '''
# to keep a small idea if this is still working (output)
if i:
print '%s' % i
else:
print '.'
# extraction of the link of interest
link = d['penalty_notice_link']
# if we havn't downloaded the link yet, we do it and keep in into a html file into the temp folder
if not os.path.exists('./temp/%s.html' % hash(link)):
r = requests.get(link)
with open('./temp/%s.html' % hash(link), 'w') as h:
h.write(r.text.encode('utf-8'))
# load the hmtl markup
with open('./temp/%s.html' % hash(link), 'r') as h:
source = h.read()
# if we havnt previously extracted the info, we do it now
if not os.path.exists('./temp/%s.pickle' % hash(link)):
# to extract info it's usually the same way:
# - use BeautifulSoup to create the soup of the source
# - use select and some css classes/ids to extract info
# => it's exaclty what is down below
soup = BeautifulSoup(source)
div = select(soup, 'div.cim_content')[0]
table = select(div, 'table')[0]
rows = select(table, 'tr')
address = str(select(rows[2], 'td')[-1].contents[0])
offence_code = str(select(rows[5], 'td')[-1].contents[0])
nature = str(select(rows[6], 'td')[-1].contents[0])
amount = str(select(rows[7], 'td')[-1].contents[0])
data_penalty = str(select(rows[9], 'td')[-1].contents[0])
issued_by = str(select(rows[10], 'td')[-1].contents[0])
d['address'] = address
d['offence_code'] = offence_code
d['nature'] = nature
d['amount'] = amount
d['data_penalty'] = data_penalty
d['issued_by'] = issued_by
with open('./temp/%s.pickle' % hash(link), 'w') as h:
pickle.dump(d, h)
else:
# we have previously extracted the info, we simply load it avoiding extra work
with open('./temp/%s.pickle' % hash(link), 'r') as h:
d = pickle.load(h)
return d
# to download the data and process it using multiple threads
pool = multiprocessing.Pool()
result = pool.map(process, data)
print 'saving results'
data2xls(result, 'result.xls', sample=True)
|
mintyPT/scrapers
|
foodauthority.nsw.gov.au/app2.py
|
Python
|
mit
| 2,650
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
from openerp.addons import decimal_precision as dp
class StockMove(models.Model):
_inherit = 'stock.move'
product_uop_qty = fields.Float(
string='Quantity (UoP)', states={'done': [('readonly', True)]},
digits=dp.get_precision('Product Unit of Measure'))
product_uop = fields.Many2one(
comodel_name='product.uom', string='Product UoP',
states={'done': [('readonly', True)]})
@api.one
@api.onchange('product_uop_qty')
def onchange_uop_qty(self):
if self.product_id:
self.product_uom_qty = (self.product_uop_qty /
self.product_id.uop_coeff)
@api.multi
def onchange_quantity(
self, product_id, product_qty, product_uom, product_uos):
res = super(StockMove, self).onchange_quantity(
product_id, product_qty, product_uom, product_uos)
if 'value' in res and self.product_id:
res['value'].update({
'product_uop_qty': product_qty * self.product_id.uop_coeff})
return res
|
esthermm/odoomrp-wip
|
stock_purchase_unit/models/stock_move.py
|
Python
|
agpl-3.0
| 1,968
|
#!/usr/bin/env python
#
# 3 - Left Motor Servo
# 4 - Right Motor Servo
# 5 - Pan Servo
# 14 (A0) - Analog Sensor
# 15 (A1) - Left Whisker
# 16 (A1) - Right Whisker
SIMULATE = 1
SENSOR_PIN = 14
L_BUMP = 15
R_BUMP = 16
SENSOR_THRESH = 400
DELAY_VAL = 700 # Delay for turning
if SIMULATE:
import time
import sys
import select
import termios
def Delay(msec):
time.sleep(msec/1000)
class Servo:
def _init__(self):
self._pos = 90
def angle(self, pos):
self._pos = pos;
def getAngle(self):
return self._pos
simBumpL = 0
simBumpR = 0
simSensor = 0
motorStr = ''
prevSimBumpL = 0
prevSimBumpR = 0
prevSensor = 0
prevMotorStr = ''
throbberStr = " .oO"
throbberIdx = 0
def ReadBump(pin):
global simBumpL
global simBumpR
global simSensor
global motorStr
global prevSimBumpL
global prevSimBumpR
global prevSensor
global prevMotorStr
global throbberIdx
events = epoll.poll(timeout=0)
for fileno, _ in events:
if fileno == sys.stdin.fileno():
data = sys.stdin.read(1)
ch = data[0]
if ch == 'l':
simBumpL = 1 - simBumpL
if ch == 'r':
simBumpR = 1 - simBumpR
if ch >= '0' and ch <= '9':
simSensor = (ord(ch) - ord('0')) * 100
#if (prevSimBumpL != simBumpL or
# prevSimBumpR != simBumpR or
# prevSensor != simSensor or
# prevMotorStr != motorStr):
# sys.stdout.write('\n')
# prevSimBumpL = simBumpL
# prevSimBumpR = simBumpR
# prevSensor = simSensor
# prevMotorStr = motorStr
panAngle = panServo.getAngle()
sys.stdout.write("\rMotors: %s Pan: %3d Bump L: %d R: %d Sensor: %3d %c\n" %(motorStr, panAngle, simBumpL, simBumpR, simSensor, throbberStr[throbberIdx]))
sys.stdout.flush()
throbberIdx = (throbberIdx + 1) % 4
if pin == L_BUMP:
return simBumpL
else:
return simBumpR
def ReadSensor():
return simSensor
panServo = Servo()
stdin_fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(stdin_fd)
new_settings = termios.tcgetattr(stdin_fd)
new_settings[3] &= ~(termios.ICANON | termios.ECHO)
new_settings[6][termios.VTIME] = 0
new_settings[6][termios.VMIN] = 1
termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
epoll = select.epoll()
epoll.register(sys.stdin.fileno(), select.POLLIN)
else:
LServo = pyb.Servo()
LServo.attach(3)
RServo = pyb.Servo()
RServo.attach(4);
panServo = pyb.Servo()
panServo.attach(5)
def Delay(msec):
pyb.delay(msec)
def ReadBump(pin):
return pyb.gpio(pin)
def ReadSensor():
return pyb.analogRead(SENSOR_PIN)
panServo.angle(90)
Delay(250) # Give the pan servo time to get to where its going
def BumpTest():
bump_L = ReadBump(L_BUMP)
bump_R = ReadBump(R_BUMP)
while bump_L or bump_R:
if bump_L:
MotorsR()
else:
MotorsL()
Delay(100)
bump_L = ReadBump(L_BUMP)
bump_R = ReadBump(R_BUMP)
def MotorsFwd():
global motorStr
if SIMULATE:
motorStr = "Forward ";
else:
LServo.angle(180)
RServo.angle(0)
def MotorsBwd():
global motorStr
if SIMULATE:
motorStr = "Backward";
else:
LServo.angle(0)
RServo.angle(180)
def MotorsL():
global motorStr
if SIMULATE:
motorStr = "Left ";
else:
LServo.angle(180)
RServo.angle(180)
def MotorsR():
global motorStr
if SIMULATE:
motorStr = "Right ";
else:
LServo.angle(0)
RServo.angle(0)
def StopMotors():
global motorStr
if SIMULATE:
motorStr = "Stop ";
else:
LServo.angle(90)
RServo.angle(90)
def Roaming():
pos = 90
pos_incr = 1
while True:
panServo.angle(pos)
BumpTest()
MotorsFwd()
sensor_val = ReadSensor()
if sensor_val > SENSOR_THRESH:
if pos > 90:
MotorsL()
else:
MotorsR()
Delay(DELAY_VAL)
Delay(10) # This delay help overcome current spikes from the servos
Delay(1000)
if pos == 135:
pos_incr = -1
elif pos == 0:
pos_incr = 1
pos += pos_incr
Roaming();
|
dhylands/DragonTail
|
DragonSim.py
|
Python
|
mit
| 4,656
|
''' Whole Body Motion: Multiple Effectors control ***'''
import argparse
import motion
import almath
import time
from naoqi import ALProxy
def main(robotIP, PORT=9559):
'''
Example of a whole body multiple effectors control "LArm", "RArm" and "Torso"
Warning: Needs a PoseInit before executing
Whole body balancer must be inactivated at the end of the script
'''
motionProxy = ALProxy("ALMotion", robotIP, PORT)
postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)
# end initialize proxy, begin go to Stand Init
# Wake up robot
motionProxy.wakeUp()
# Send robot to Stand Init
id = postureProxy.post.goToPosture("Stand", 0.5)
postureProxy.wait(id, 0)
# end go to Stand Init, begin initialize whole body
# Enable Whole Body Balancer
isEnabled = True
motionProxy.wbEnable(isEnabled)
# Legs are constrained fixed
stateName = "Fixed"
supportLeg = "Legs"
motionProxy.wbFootState(stateName, supportLeg)
# Constraint Balance Motion
isEnable = True
supportLeg = "Legs"
motionProxy.wbEnableBalanceConstraint(isEnable, supportLeg)
# end initialize whole body, define arms motions
useSensorValues = False
# ***************************************************************
# effectorList = ["LArm"]
# arm = "LArm"
# axisDirection = 1 # +1 for LArm, -1 for RArm
# frame = motion.FRAME_WORLD
# currentTf = motionProxy.getTransform(arm, frame, useSensorValues)
for i in range(5):
print i
# Arms motion
effectorList = ["LArm"]
arm = "LArm"
axisDirection = 1 # +1 for LArm, -1 for RArm
frame = motion.FRAME_WORLD
pathArm = []
currentTf = motionProxy.getTransform(arm, frame, useSensorValues)
# 1 - arm ready out front
target1Tf = almath.Transform(currentTf)
target1Tf.r1_c4 += 0.05 # x
target1Tf.r2_c4 += 0.00 * axisDirection # y
target1Tf.r3_c4 += 0.00 # z
# 2 - arm back
target2Tf = almath.Transform(currentTf)
target2Tf.r1_c4 += 0.00
target2Tf.r2_c4 += 0.15
target2Tf.r3_c4 += 0.15
# 3 - arm to ball using ball.y
target3Tf = almath.Transform(currentTf)
target3Tf.r1_c4 += 0.05
target3Tf.r2_c4 += 0.00 * axisDirection
target3Tf.r3_c4 += 0.10
pathArm.append(list(target1Tf.toVector()))
pathArm.append(list(target2Tf.toVector()))
pathArm.append(list(target3Tf.toVector()))
pathList = [pathArm]
axisMaskList = [almath.AXIS_MASK_VEL]
coef = 1.5
timesList = [coef * (i + 1) for i in range(len(pathArm))]
# And move!
id = motionProxy.post.transformInterpolations(effectorList, frame, pathArm, axisMaskList, timesList)
motionProxy.wait(id, 0)
# It is necessary to return the robot to the start position so the next target
# positions are not added to the last move position.
# id = postureProxy.post.goToPosture("Stand", 0.75)
# postureProxy.wait(id, 0)
# ***************************************************************
# ***************************************************************
# effectorList = ["LArm", "RArm"]
# frame = motion.FRAME_ROBOT
# # pathLArm
# pathLArm = []
# currentTf = motionProxy.getTransform("LArm", frame, useSensorValues)
# # 1
# target1Tf = almath.Transform(currentTf)
# target1Tf.r1_c4 += 0.00 # x?
# target1Tf.r2_c4 += 0.00 # y
# target1Tf.r3_c4 += 0.00 # z
# # 2
# target2Tf = almath.Transform(currentTf)
# target2Tf.r1_c4 += 0.20 # x?
# target2Tf.r2_c4 -= 0.00 # y
# target2Tf.r3_c4 += 0.20 # z
# pathLArm.append(list(target1Tf.toVector()))
# pathLArm.append(list(target2Tf.toVector()))
# pathLArm.append(list(target1Tf.toVector()))
# pathLArm.append(list(target2Tf.toVector()))
# pathLArm.append(list(target1Tf.toVector()))
# # pathRArm
# pathRArm = []
# currentTf = motionProxy.getTransform("RArm", frame, useSensorValues)
# # 1
# target1Tf = almath.Transform(currentTf)
# target1Tf.r1_c4 += 0.00 # x?
# target1Tf.r2_c4 += 0.00 # y
# target1Tf.r3_c4 += 0.00 # z
# # 2
# target2Tf = almath.Transform(currentTf)
# target2Tf.r1_c4 += 0.00 # x?
# target2Tf.r2_c4 -= 0.20 # y
# target2Tf.r3_c4 += 0.20 # z
# pathRArm.append(list(target1Tf.toVector()))
# pathRArm.append(list(target2Tf.toVector()))
# pathRArm.append(list(target1Tf.toVector()))
# pathRArm.append(list(target2Tf.toVector()))
# pathRArm.append(list(target1Tf.toVector()))
# pathRArm.append(list(target2Tf.toVector()))
# pathList = [pathLArm, pathRArm]
# axisMaskList = [almath.AXIS_MASK_VEL, # for "LArm"
# almath.AXIS_MASK_VEL] # for "RArm"
# coef = 1.5
# timesList = [ [coef*(i+1) for i in range(5)], # for "LArm" in seconds
# [coef*(i+1) for i in range(6)] ] # for "RArm" in seconds
# # called cartesian interpolation
# motionProxy.transformInterpolations(effectorList, frame, pathList, axisMaskList, timesList)
# ***************************************************************
# ***************************************************************
# end define arms motions, define torso motion
# # Torso Motion
# effectorList = ["Torso", "LArm", "RArm"]
# dy = 0.06
# dz = 0.06
# # pathTorso
# currentTf = motionProxy.getTransform("Torso", frame, useSensorValues)
# # 1
# target1Tf = almath.Transform(currentTf)
# target1Tf.r2_c4 += dy
# target1Tf.r3_c4 -= dz
# # 2
# target2Tf = almath.Transform(currentTf)
# target2Tf.r2_c4 -= dy
# target2Tf.r3_c4 -= dz
# pathTorso = []
# for i in range(3):
# pathTorso.append(list(target1Tf.toVector()))
# pathTorso.append(currentTf)
# pathTorso.append(list(target2Tf.toVector()))
# pathTorso.append(currentTf)
# pathLArm = [motionProxy.getTransform("LArm", frame, useSensorValues)]
# pathRArm = [motionProxy.getTransform("RArm", frame, useSensorValues)]
# pathList = [pathTorso, pathLArm, pathRArm]
# axisMaskList = [almath.AXIS_MASK_ALL, # for "Torso"
# almath.AXIS_MASK_VEL, # for "LArm"
# almath.AXIS_MASK_VEL] # for "RArm"
# coef = 0.5
# timesList = [
# [coef*(i+1) for i in range(12)], # for "Torso" in seconds
# [coef*12], # for "LArm" in seconds
# [coef*12] # for "RArm" in seconds
# ]
# motionProxy.transformInterpolations(effectorList, frame, pathList, axisMaskList, timesList)
# # end define torso motion, disable whole body
# Deactivate whole body
isEnabled = False
motionProxy.wbEnable(isEnabled)
# Send robot to Pose Init
postureProxy.goToPosture("Stand", 0.5)
# Go to rest position
motionProxy.rest()
# end script
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="mistcalf.local",
help="Robot ip address")
parser.add_argument("--port", type=int, default=9559,
help="Robot port number")
args = parser.parse_args()
main(args.ip, args.port)
|
mikemcfarlane/Code_sprints
|
Kivy/almotion_wbMultipleEffectors.py
|
Python
|
gpl-2.0
| 7,516
|
from sympy import sin, cos, atan2, log, exp, gamma, conjugate, sqrt, \
factorial, Integral, Piecewise, Add, diff, symbols, S, Float, Dummy, Eq
from sympy import Catalan, EulerGamma, E, GoldenRatio, I, pi
from sympy import Function, Rational, Integer, Lambda
from sympy.core.relational import Relational
from sympy.logic.boolalg import And, Or, Not, Equivalent, Xor
from sympy.printing.fcode import fcode, FCodePrinter
from sympy.tensor import IndexedBase, Idx
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.matrices import Matrix, MatrixSymbol
def test_printmethod():
x = symbols('x')
class nint(Function):
def _fcode(self, printer):
return "nint(%s)" % printer._print(self.args[0])
assert fcode(nint(x)) == " nint(x)"
def test_fcode_Pow():
x, y = symbols('x,y')
n = symbols('n', integer=True)
assert fcode(x**3) == " x**3"
assert fcode(x**(y**3)) == " x**(y**3)"
assert fcode(1/(sin(x)*3.5)**(x - y**x)/(x**2 + y)) == \
" (3.5d0*sin(x))**(-x + y**x)/(x**2 + y)"
assert fcode(sqrt(x)) == ' sqrt(x)'
assert fcode(sqrt(n)) == ' sqrt(dble(n))'
assert fcode(x**0.5) == ' sqrt(x)'
assert fcode(sqrt(x)) == ' sqrt(x)'
assert fcode(sqrt(10)) == ' sqrt(10.0d0)'
assert fcode(x**-1.0) == ' 1.0/x'
assert fcode(x**-2.0, 'y', source_format='free') == 'y = x**(-2.0d0)' # 2823
assert fcode(x**Rational(3, 7)) == ' x**(3.0d0/7.0d0)'
def test_fcode_Rational():
x = symbols('x')
assert fcode(Rational(3, 7)) == " 3.0d0/7.0d0"
assert fcode(Rational(18, 9)) == " 2"
assert fcode(Rational(3, -7)) == " -3.0d0/7.0d0"
assert fcode(Rational(-3, -7)) == " 3.0d0/7.0d0"
assert fcode(x + Rational(3, 7)) == " x + 3.0d0/7.0d0"
assert fcode(Rational(3, 7)*x) == " (3.0d0/7.0d0)*x"
def test_fcode_Integer():
assert fcode(Integer(67)) == " 67"
assert fcode(Integer(-1)) == " -1"
def test_fcode_Float():
assert fcode(Float(42.0)) == " 42.0000000000000d0"
assert fcode(Float(-1e20)) == " -1.00000000000000d+20"
def test_fcode_functions():
x, y = symbols('x,y')
assert fcode(sin(x) ** cos(y)) == " sin(x)**cos(y)"
#issue 6814
def test_fcode_functions_with_integers():
x= symbols('x')
assert fcode(x * log(10)) == " x*2.30258509299405d0"
assert fcode(x * log(10)) == " x*2.30258509299405d0"
assert fcode(x * log(S(10))) == " x*2.30258509299405d0"
assert fcode(log(S(10))) == " 2.30258509299405d0"
assert fcode(exp(10)) == " 22026.4657948067d0"
assert fcode(x * log(log(10))) == " x*0.834032445247956d0"
assert fcode(x * log(log(S(10)))) == " x*0.834032445247956d0"
def test_fcode_NumberSymbol():
p = FCodePrinter()
assert fcode(Catalan) == ' parameter (Catalan = 0.915965594177219d0)\n Catalan'
assert fcode(EulerGamma) == ' parameter (EulerGamma = 0.577215664901533d0)\n EulerGamma'
assert fcode(E) == ' parameter (E = 2.71828182845905d0)\n E'
assert fcode(GoldenRatio) == ' parameter (GoldenRatio = 1.61803398874989d0)\n GoldenRatio'
assert fcode(pi) == ' parameter (pi = 3.14159265358979d0)\n pi'
assert fcode(
pi, precision=5) == ' parameter (pi = 3.1416d0)\n pi'
assert fcode(Catalan, human=False) == (set(
[(Catalan, p._print(Catalan.evalf(15)))]), set([]), ' Catalan')
assert fcode(EulerGamma, human=False) == (set([(EulerGamma, p._print(
EulerGamma.evalf(15)))]), set([]), ' EulerGamma')
assert fcode(E, human=False) == (
set([(E, p._print(E.evalf(15)))]), set([]), ' E')
assert fcode(GoldenRatio, human=False) == (set([(GoldenRatio, p._print(
GoldenRatio.evalf(15)))]), set([]), ' GoldenRatio')
assert fcode(pi, human=False) == (
set([(pi, p._print(pi.evalf(15)))]), set([]), ' pi')
assert fcode(pi, precision=5, human=False) == (
set([(pi, p._print(pi.evalf(5)))]), set([]), ' pi')
def test_fcode_complex():
assert fcode(I) == " cmplx(0,1)"
x = symbols('x')
assert fcode(4*I) == " cmplx(0,4)"
assert fcode(3 + 4*I) == " cmplx(3,4)"
assert fcode(3 + 4*I + x) == " cmplx(3,4) + x"
assert fcode(I*x) == " cmplx(0,1)*x"
assert fcode(3 + 4*I - x) == " cmplx(3,4) - x"
x = symbols('x', imaginary=True)
assert fcode(5*x) == " 5*x"
assert fcode(I*x) == " cmplx(0,1)*x"
assert fcode(3 + x) == " x + 3"
def test_implicit():
x, y = symbols('x,y')
assert fcode(sin(x)) == " sin(x)"
assert fcode(atan2(x, y)) == " atan2(x, y)"
assert fcode(conjugate(x)) == " conjg(x)"
def test_not_fortran():
x = symbols('x')
g = Function('g')
assert fcode(
gamma(x)) == "C Not supported in Fortran:\nC gamma\n gamma(x)"
assert fcode(Integral(sin(x))) == "C Not supported in Fortran:\nC Integral\n Integral(sin(x), x)"
assert fcode(g(x)) == "C Not supported in Fortran:\nC g\n g(x)"
def test_user_functions():
x = symbols('x')
assert fcode(sin(x), user_functions={"sin": "zsin"}) == " zsin(x)"
x = symbols('x')
assert fcode(
gamma(x), user_functions={"gamma": "mygamma"}) == " mygamma(x)"
g = Function('g')
assert fcode(g(x), user_functions={"g": "great"}) == " great(x)"
n = symbols('n', integer=True)
assert fcode(
factorial(n), user_functions={"factorial": "fct"}) == " fct(n)"
def test_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert fcode(g(x)) == " 2*x"
g = implemented_function('g', Lambda(x, 2*pi/x))
assert fcode(g(x)) == (
" parameter (pi = 3.14159265358979d0)\n"
" 2*pi/x"
)
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
assert fcode(g(A[i]), assign_to=A[i]) == (
" do i = 1, n\n"
" A(i) = (A(i) + 1)*(A(i) + 2)*A(i)\n"
" end do"
)
def test_assign_to():
x = symbols('x')
assert fcode(sin(x), assign_to="s") == " s = sin(x)"
def test_line_wrapping():
x, y = symbols('x,y')
assert fcode(((x + y)**10).expand(), assign_to="var") == (
" var = x**10 + 10*x**9*y + 45*x**8*y**2 + 120*x**7*y**3 + 210*x**6*\n"
" @ y**4 + 252*x**5*y**5 + 210*x**4*y**6 + 120*x**3*y**7 + 45*x**2*y\n"
" @ **8 + 10*x*y**9 + y**10"
)
e = [x**i for i in range(11)]
assert fcode(Add(*e)) == (
" x**10 + x**9 + x**8 + x**7 + x**6 + x**5 + x**4 + x**3 + x**2 + x\n"
" @ + 1"
)
def test_fcode_precedence():
x, y = symbols("x y")
assert fcode(And(x < y, y < x + 1), source_format="free") == \
"x < y .and. y < x + 1"
assert fcode(Or(x < y, y < x + 1), source_format="free") == \
"x < y .or. y < x + 1"
assert fcode(Xor(x < y, y < x + 1, evaluate=False),
source_format="free") == "x < y .neqv. y < x + 1"
assert fcode(Equivalent(x < y, y < x + 1), source_format="free") == \
"x < y .eqv. y < x + 1"
def test_fcode_Logical():
x, y, z = symbols("x y z")
# unary Not
assert fcode(Not(x), source_format="free") == ".not. x"
# binary And
assert fcode(And(x, y), source_format="free") == "x .and. y"
assert fcode(And(x, Not(y)), source_format="free") == "x .and. .not. y"
assert fcode(And(Not(x), y), source_format="free") == "y .and. .not. x"
assert fcode(And(Not(x), Not(y)), source_format="free") == \
".not. x .and. .not. y"
assert fcode(Not(And(x, y), evaluate=False), source_format="free") == \
".not. (x .and. y)"
# binary Or
assert fcode(Or(x, y), source_format="free") == "x .or. y"
assert fcode(Or(x, Not(y)), source_format="free") == "x .or. .not. y"
assert fcode(Or(Not(x), y), source_format="free") == "y .or. .not. x"
assert fcode(Or(Not(x), Not(y)), source_format="free") == \
".not. x .or. .not. y"
assert fcode(Not(Or(x, y), evaluate=False), source_format="free") == \
".not. (x .or. y)"
# mixed And/Or
assert fcode(And(Or(y, z), x), source_format="free") == "x .and. (y .or. z)"
assert fcode(And(Or(z, x), y), source_format="free") == "y .and. (x .or. z)"
assert fcode(And(Or(x, y), z), source_format="free") == "z .and. (x .or. y)"
assert fcode(Or(And(y, z), x), source_format="free") == "x .or. y .and. z"
assert fcode(Or(And(z, x), y), source_format="free") == "y .or. x .and. z"
assert fcode(Or(And(x, y), z), source_format="free") == "z .or. x .and. y"
# trinary And
assert fcode(And(x, y, z), source_format="free") == "x .and. y .and. z"
assert fcode(And(x, y, Not(z)), source_format="free") == \
"x .and. y .and. .not. z"
assert fcode(And(x, Not(y), z), source_format="free") == \
"x .and. z .and. .not. y"
assert fcode(And(Not(x), y, z), source_format="free") == \
"y .and. z .and. .not. x"
assert fcode(Not(And(x, y, z), evaluate=False), source_format="free") == \
".not. (x .and. y .and. z)"
# trinary Or
assert fcode(Or(x, y, z), source_format="free") == "x .or. y .or. z"
assert fcode(Or(x, y, Not(z)), source_format="free") == \
"x .or. y .or. .not. z"
assert fcode(Or(x, Not(y), z), source_format="free") == \
"x .or. z .or. .not. y"
assert fcode(Or(Not(x), y, z), source_format="free") == \
"y .or. z .or. .not. x"
assert fcode(Not(Or(x, y, z), evaluate=False), source_format="free") == \
".not. (x .or. y .or. z)"
def test_fcode_Xlogical():
x, y, z = symbols("x y z")
# binary Xor
assert fcode(Xor(x, y, evaluate=False), source_format="free") == \
"x .neqv. y"
assert fcode(Xor(x, Not(y), evaluate=False), source_format="free") == \
"x .neqv. .not. y"
assert fcode(Xor(Not(x), y, evaluate=False), source_format="free") == \
"y .neqv. .not. x"
assert fcode(Xor(Not(x), Not(y), evaluate=False),
source_format="free") == ".not. x .neqv. .not. y"
assert fcode(Not(Xor(x, y, evaluate=False), evaluate=False),
source_format="free") == ".not. (x .neqv. y)"
# binary Equivalent
assert fcode(Equivalent(x, y), source_format="free") == "x .eqv. y"
assert fcode(Equivalent(x, Not(y)), source_format="free") == \
"x .eqv. .not. y"
assert fcode(Equivalent(Not(x), y), source_format="free") == \
"y .eqv. .not. x"
assert fcode(Equivalent(Not(x), Not(y)), source_format="free") == \
".not. x .eqv. .not. y"
assert fcode(Not(Equivalent(x, y), evaluate=False),
source_format="free") == ".not. (x .eqv. y)"
# mixed And/Equivalent
assert fcode(Equivalent(And(y, z), x), source_format="free") == \
"x .eqv. y .and. z"
assert fcode(Equivalent(And(z, x), y), source_format="free") == \
"y .eqv. x .and. z"
assert fcode(Equivalent(And(x, y), z), source_format="free") == \
"z .eqv. x .and. y"
assert fcode(And(Equivalent(y, z), x), source_format="free") == \
"x .and. (y .eqv. z)"
assert fcode(And(Equivalent(z, x), y), source_format="free") == \
"y .and. (x .eqv. z)"
assert fcode(And(Equivalent(x, y), z), source_format="free") == \
"z .and. (x .eqv. y)"
# mixed Or/Equivalent
assert fcode(Equivalent(Or(y, z), x), source_format="free") == \
"x .eqv. y .or. z"
assert fcode(Equivalent(Or(z, x), y), source_format="free") == \
"y .eqv. x .or. z"
assert fcode(Equivalent(Or(x, y), z), source_format="free") == \
"z .eqv. x .or. y"
assert fcode(Or(Equivalent(y, z), x), source_format="free") == \
"x .or. (y .eqv. z)"
assert fcode(Or(Equivalent(z, x), y), source_format="free") == \
"y .or. (x .eqv. z)"
assert fcode(Or(Equivalent(x, y), z), source_format="free") == \
"z .or. (x .eqv. y)"
# mixed Xor/Equivalent
assert fcode(Equivalent(Xor(y, z, evaluate=False), x),
source_format="free") == "x .eqv. (y .neqv. z)"
assert fcode(Equivalent(Xor(z, x, evaluate=False), y),
source_format="free") == "y .eqv. (x .neqv. z)"
assert fcode(Equivalent(Xor(x, y, evaluate=False), z),
source_format="free") == "z .eqv. (x .neqv. y)"
assert fcode(Xor(Equivalent(y, z), x, evaluate=False),
source_format="free") == "x .neqv. (y .eqv. z)"
assert fcode(Xor(Equivalent(z, x), y, evaluate=False),
source_format="free") == "y .neqv. (x .eqv. z)"
assert fcode(Xor(Equivalent(x, y), z, evaluate=False),
source_format="free") == "z .neqv. (x .eqv. y)"
# mixed And/Xor
assert fcode(Xor(And(y, z), x, evaluate=False), source_format="free") == \
"x .neqv. y .and. z"
assert fcode(Xor(And(z, x), y, evaluate=False), source_format="free") == \
"y .neqv. x .and. z"
assert fcode(Xor(And(x, y), z, evaluate=False), source_format="free") == \
"z .neqv. x .and. y"
assert fcode(And(Xor(y, z, evaluate=False), x), source_format="free") == \
"x .and. (y .neqv. z)"
assert fcode(And(Xor(z, x, evaluate=False), y), source_format="free") == \
"y .and. (x .neqv. z)"
assert fcode(And(Xor(x, y, evaluate=False), z), source_format="free") == \
"z .and. (x .neqv. y)"
# mixed Or/Xor
assert fcode(Xor(Or(y, z), x, evaluate=False), source_format="free") == \
"x .neqv. y .or. z"
assert fcode(Xor(Or(z, x), y, evaluate=False), source_format="free") == \
"y .neqv. x .or. z"
assert fcode(Xor(Or(x, y), z, evaluate=False), source_format="free") == \
"z .neqv. x .or. y"
assert fcode(Or(Xor(y, z, evaluate=False), x), source_format="free") == \
"x .or. (y .neqv. z)"
assert fcode(Or(Xor(z, x, evaluate=False), y), source_format="free") == \
"y .or. (x .neqv. z)"
assert fcode(Or(Xor(x, y, evaluate=False), z), source_format="free") == \
"z .or. (x .neqv. y)"
# trinary Xor
assert fcode(Xor(x, y, z, evaluate=False), source_format="free") == \
"x .neqv. y .neqv. z"
assert fcode(Xor(x, y, Not(z), evaluate=False), source_format="free") == \
"x .neqv. y .neqv. .not. z"
assert fcode(Xor(x, Not(y), z, evaluate=False), source_format="free") == \
"x .neqv. z .neqv. .not. y"
assert fcode(Xor(Not(x), y, z, evaluate=False), source_format="free") == \
"y .neqv. z .neqv. .not. x"
def test_fcode_Relational():
x, y = symbols("x y")
assert fcode(Relational(x, y, "=="), source_format="free") == "Eq(x, y)"
assert fcode(Relational(x, y, "!="), source_format="free") == "Ne(x, y)"
assert fcode(Relational(x, y, ">="), source_format="free") == "x >= y"
assert fcode(Relational(x, y, "<="), source_format="free") == "x <= y"
assert fcode(Relational(x, y, ">"), source_format="free") == "x > y"
assert fcode(Relational(x, y, "<"), source_format="free") == "x < y"
def test_fcode_Piecewise():
x = symbols('x')
expr = Piecewise((x, x < 1), (x**2, True))
# Check that inline conditional (merge) fails if standard isn't 95+
raises(NotImplementedError, lambda: fcode(expr))
code = fcode(expr, standard=95)
expected = " merge(x, x**2, x < 1)"
assert code == expected
assert fcode(Piecewise((x, x < 1), (x**2, True)), assign_to="var") == (
" if (x < 1) then\n"
" var = x\n"
" else\n"
" var = x**2\n"
" end if"
)
a = cos(x)/x
b = sin(x)/x
for i in range(10):
a = diff(a, x)
b = diff(b, x)
expected = (
" if (x < 0) then\n"
" weird_name = -cos(x)/x + 10*sin(x)/x**2 + 90*cos(x)/x**3 - 720*\n"
" @ sin(x)/x**4 - 5040*cos(x)/x**5 + 30240*sin(x)/x**6 + 151200*cos(x\n"
" @ )/x**7 - 604800*sin(x)/x**8 - 1814400*cos(x)/x**9 + 3628800*sin(x\n"
" @ )/x**10 + 3628800*cos(x)/x**11\n"
" else\n"
" weird_name = -sin(x)/x - 10*cos(x)/x**2 + 90*sin(x)/x**3 + 720*\n"
" @ cos(x)/x**4 - 5040*sin(x)/x**5 - 30240*cos(x)/x**6 + 151200*sin(x\n"
" @ )/x**7 + 604800*cos(x)/x**8 - 1814400*sin(x)/x**9 - 3628800*cos(x\n"
" @ )/x**10 + 3628800*sin(x)/x**11\n"
" end if"
)
code = fcode(Piecewise((a, x < 0), (b, True)), assign_to="weird_name")
assert code == expected
code = fcode(Piecewise((x, x < 1), (x**2, x > 1), (sin(x), True)), standard=95)
expected = " merge(x, merge(x**2, sin(x), x > 1), x < 1)"
assert code == expected
# Check that Piecewise without a True (default) condition error
expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
raises(ValueError, lambda: fcode(expr))
def test_wrap_fortran():
# "########################################################################"
printer = FCodePrinter()
lines = [
"C This is a long comment on a single line that must be wrapped properly to produce nice output",
" this = is + a + long + and + nasty + fortran + statement + that * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)/must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)/must + be + wrapped + properly",
]
wrapped_lines = printer._wrap_fortran(lines)
expected_lines = [
"C This is a long comment on a single line that must be wrapped",
"C properly to produce nice output",
" this = is + a + long + and + nasty + fortran + statement + that *",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that *",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ *must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement +",
" @ that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ **must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ **must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement +",
" @ that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)/",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)",
" @ /must + be + wrapped + properly",
]
for line in wrapped_lines:
assert len(line) <= 72
for w, e in zip(wrapped_lines, expected_lines):
assert w == e
assert len(wrapped_lines) == len(expected_lines)
def test_wrap_fortran_keep_d0():
printer = FCodePrinter()
lines = [
' this_variable_is_very_long_because_we_try_to_test_line_break=1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 10.0d0'
]
expected = [
' this_variable_is_very_long_because_we_try_to_test_line_break=1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 10.0d0'
]
assert printer._wrap_fortran(lines) == expected
def test_settings():
raises(TypeError, lambda: fcode(S(4), method="garbage"))
def test_free_form_code_line():
x, y = symbols('x,y')
assert fcode(cos(x) + sin(y), source_format='free') == "sin(y) + cos(x)"
def test_free_form_continuation_line():
x, y = symbols('x,y')
result = fcode(((cos(x) + sin(y))**(7)).expand(), source_format='free')
expected = (
'sin(y)**7 + 7*sin(y)**6*cos(x) + 21*sin(y)**5*cos(x)**2 + 35*sin(y)**4* &\n'
' cos(x)**3 + 35*sin(y)**3*cos(x)**4 + 21*sin(y)**2*cos(x)**5 + 7* &\n'
' sin(y)*cos(x)**6 + cos(x)**7'
)
assert result == expected
def test_free_form_comment_line():
printer = FCodePrinter({'source_format': 'free'})
lines = [ "! This is a long comment on a single line that must be wrapped properly to produce nice output"]
expected = [
'! This is a long comment on a single line that must be wrapped properly',
'! to produce nice output']
assert printer._wrap_fortran(lines) == expected
def test_loops():
n, m = symbols('n,m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
expected = (
'do i = 1, m\n'
' y(i) = 0\n'
'end do\n'
'do i = 1, m\n'
' do j = 1, n\n'
' y(i) = %(rhs)s\n'
' end do\n'
'end do'
)
code = fcode(A[i, j]*x[j], assign_to=y[i], source_format='free')
assert (code == expected % {'rhs': 'y(i) + A(i, j)*x(j)'} or
code == expected % {'rhs': 'y(i) + x(j)*A(i, j)'} or
code == expected % {'rhs': 'x(j)*A(i, j) + y(i)'} or
code == expected % {'rhs': 'A(i, j)*x(j) + y(i)'})
def test_dummy_loops():
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'do i_%(icount)i = 1, m_%(mcount)i\n'
' y(i_%(icount)i) = x(i_%(icount)i)\n'
'end do'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = fcode(x[i], assign_to=y[i], source_format='free')
assert code == expected
def test_fcode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = fcode(e.rhs, assign_to=e.lhs, contract=False)
assert code0.endswith('Dy(i) = (y(i + 1) - y(i))/(x(i + 1) - x(i))')
def test_derived_classes():
class MyFancyFCodePrinter(FCodePrinter):
_default_settings = FCodePrinter._default_settings.copy()
printer = MyFancyFCodePrinter()
x = symbols('x')
assert printer.doprint(sin(x), "bork") == " bork = sin(x)"
def test_indent():
codelines = (
'subroutine test(a)\n'
'integer :: a, i, j\n'
'\n'
'do\n'
'do \n'
'do j = 1, 5\n'
'if (a>b) then\n'
'if(b>0) then\n'
'a = 3\n'
'donot_indent_me = 2\n'
'do_not_indent_me_either = 2\n'
'ifIam_indented_something_went_wrong = 2\n'
'if_I_am_indented_something_went_wrong = 2\n'
'end should not be unindented here\n'
'end if\n'
'endif\n'
'end do\n'
'end do\n'
'enddo\n'
'end subroutine\n'
'\n'
'subroutine test2(a)\n'
'integer :: a\n'
'do\n'
'a = a + 1\n'
'end do \n'
'end subroutine\n'
)
expected = (
'subroutine test(a)\n'
'integer :: a, i, j\n'
'\n'
'do\n'
' do \n'
' do j = 1, 5\n'
' if (a>b) then\n'
' if(b>0) then\n'
' a = 3\n'
' donot_indent_me = 2\n'
' do_not_indent_me_either = 2\n'
' ifIam_indented_something_went_wrong = 2\n'
' if_I_am_indented_something_went_wrong = 2\n'
' end should not be unindented here\n'
' end if\n'
' endif\n'
' end do\n'
' end do\n'
'enddo\n'
'end subroutine\n'
'\n'
'subroutine test2(a)\n'
'integer :: a\n'
'do\n'
' a = a + 1\n'
'end do \n'
'end subroutine\n'
)
p = FCodePrinter({'source_format': 'free'})
result = p.indent_code(codelines)
assert result == expected
def test_Matrix_printing():
x, y, z = symbols('x,y,z')
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
assert fcode(mat, A) == (
" A(1, 1) = x*y\n"
" if (y > 0) then\n"
" A(2, 1) = x + 2\n"
" else\n"
" A(2, 1) = y\n"
" end if\n"
" A(3, 1) = sin(z)")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
assert fcode(expr, standard=95) == (
" merge(2*A(3, 1), A(3, 1), x > 0) + sin(A(2, 1)) + A(1, 1)")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert fcode(m, M) == (
" M(1, 1) = sin(q(2, 1))\n"
" M(2, 1) = q(2, 1) + q(3, 1)\n"
" M(3, 1) = 2*q(5, 1)*1.0/q(2, 1)\n"
" M(1, 2) = 0\n"
" M(2, 2) = q(4, 1)\n"
" M(3, 2) = 4 + sqrt(q(1, 1))\n"
" M(1, 3) = cos(q(3, 1))\n"
" M(2, 3) = 5\n"
" M(3, 3) = 0")
|
kaichogami/sympy
|
sympy/printing/tests/test_fcode.py
|
Python
|
bsd-3-clause
| 28,380
|
# The contents of this file are subject to the Mozilla Public License
# (MPL) Version 1.1 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License
# at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and
# limitations under the License.
#
# The Original Code is LEPL (http://www.acooke.org/lepl)
# The Initial Developer of the Original Code is Andrew Cooke.
# Portions created by the Initial Developer are Copyright (C) 2009-2010
# Andrew Cooke (andrew@acooke.org). All Rights Reserved.
#
# Alternatively, the contents of this file may be used under the terms
# of the LGPL license (the GNU Lesser General Public License,
# http://www.gnu.org/licenses/lgpl.html), in which case the provisions
# of the LGPL License are applicable instead of those above.
#
# If you wish to allow use of your version of this file only under the
# terms of the LGPL License and not to allow others to use your version
# of this file under the MPL, indicate your decision by deleting the
# provisions above and replace them with the notice and other provisions
# required by the LGPL License. If you do not delete the provisions
# above, a recipient may use your version of this file under either the
# MPL or the LGPL License.
from collections import Iterable
from lepl.stream.simple import SequenceHelper, StringHelper, ListHelper
from lepl.stream.iter import IterableHelper, Cons
from lepl.support.lib import basestring, fmt, add_defaults, file
from lepl.lexer.stream import TokenHelper
class StreamFactory(object):
'''
Given a value (typically a sequence), generate a stream.
'''
def from_string(self, text, **kargs):
'''
Provide a stream for the contents of the string.
'''
add_defaults(kargs, {'factory': self})
return (0, StringHelper(text, **kargs))
def from_list(self, list_, **kargs):
'''
Provide a stream for the contents of the list.
'''
add_defaults(kargs, {'factory': self})
return (0, ListHelper(list_, **kargs))
def from_sequence(self, sequence, **kargs):
'''
Return a generic stream for any indexable sequence.
'''
add_defaults(kargs, {'factory': self})
return (0, SequenceHelper(sequence, **kargs))
def from_iterable(self, iterable, **kargs):
'''
Provide a stream for the contents of the iterable. This assumes that
each value from the iterable is a "line" which will, in turn, be
passed to the stream factory.
'''
add_defaults(kargs, {'factory': self})
cons = Cons(iterable)
return ((cons, self(cons.head, **kargs)), IterableHelper(**kargs))
def from_file(self, file_, **kargs):
'''
Provide a stream for the contents of the file. There is no
corresponding `from_path` because the opening and closing of the
path must be done outside the parsing (or the contents will become
unavailable), so use instead:
with open(path) as f:
parser.parse_file(f)
which will close the file after parsing.
'''
try:
gkargs = kargs.get('global_kargs', {})
add_defaults(gkargs, {'filename': file_.name})
add_defaults(kargs, {'global_kargs': gkargs})
except AttributeError:
pass
return self.from_iterable(file_, **kargs)
def to_token(self, iterable, **kargs):
'''
Create a stream for tokens. The `iterable` is a source of
(token_ids, sub_stream) tuples, where `sub_stream` will be
matched within the token.
'''
return (Cons(iterable), TokenHelper(**kargs))
def __call__(self, sequence, **kargs):
'''
Auto-detect type and wrap appropriately.
'''
if isinstance(sequence, basestring):
return self.from_string(sequence, **kargs)
elif isinstance(sequence, list):
return self.from_list(sequence, **kargs)
elif isinstance(sequence, file):
return self.from_file(sequence, **kargs)
elif hasattr(sequence, '__getitem__') and hasattr(sequence, '__len__'):
return self.from_sequence(sequence, **kargs)
elif isinstance(sequence, Iterable):
return self.from_iterable(sequence, **kargs)
else:
raise TypeError(fmt('Cannot generate a stream for type {0}',
type(sequence)))
DEFAULT_STREAM_FACTORY = StreamFactory()
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/lepl/stream/factory.py
|
Python
|
agpl-3.0
| 4,763
|
#!/usr/bin/env python3
'''
Object : defaultdict
defaultdict: No need key is there or not
'''
from collections import defaultdict
s = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]
# Create a dict with value type being list
d = defaultdict(list)
[d[k].append(v) for k, v in s]
print(d.items())
# defaultdict is used to counting
s = 'mississippi'
d = defaultdict(int)
for k in s:
d[k] += 1
print(d.items())
# Setting the default_factory to set makes the defaultdict useful for building a dictionary of sets:
s = [('red', 1), ('blue', 2), ('red', 3), ('blue', 4), ('red', 1), ('blue', 4)]
d = defaultdict(set)
[d[k].add(v) for k, v in s]
print(d.items())
s = [('red', 1), ('blue', 2), ('yellow', 12), ('red', 3), ('blue', 4), ('red', 1), ('blue', 4), ('yellow', 2, 3, 5)]
d = defaultdict(list)
for x in s:
k = x[0]
v = (x[1:])
d[k].extend(v)
print(d.items())
|
jingwangian/tutorial
|
python/collection_tt/defaultdict_st.py
|
Python
|
gpl-3.0
| 909
|
"""
Bot wide hook opt-out for channels
"""
import asyncio
from collections import defaultdict
from fnmatch import fnmatch
from functools import total_ordering
from threading import RLock
from sqlalchemy import Table, Column, String, Boolean, PrimaryKeyConstraint, and_
from cloudbot import hook
from cloudbot.hook import Priority
from cloudbot.util import database, web
from cloudbot.util.formatting import gen_markdown_table
optout_table = Table(
'optout',
database.metadata,
Column('network', String),
Column('chan', String),
Column('hook', String),
Column('allow', Boolean, default=False),
PrimaryKeyConstraint('network', 'chan', 'hook')
)
optout_cache = defaultdict(list)
cache_lock = RLock()
@total_ordering
class OptOut:
def __init__(self, channel, hook_pattern, allow):
self.channel = channel.casefold()
self.hook = hook_pattern.casefold()
self.allow = allow
def __lt__(self, other):
if isinstance(other, OptOut):
diff = len(self.channel) - len(other.channel)
if diff:
return diff < 0
return len(self.hook) < len(other.hook)
return NotImplemented
def __str__(self):
return "{} {} {}".format(self.channel, self.hook, self.allow)
def __repr__(self):
return "{}({}, {}, {})".format(self.__class__.__name__, self.channel, self.hook, self.allow)
def match(self, channel, hook_name):
return self.match_chan(channel) and fnmatch(hook_name.casefold(), self.hook)
def match_chan(self, channel):
return fnmatch(channel.casefold(), self.channel)
@asyncio.coroutine
def check_channel_permissions(event, chan, *perms):
old_chan = event.chan
event.chan = chan
allowed = yield from event.check_permissions(*perms)
event.chan = old_chan
return allowed
def get_channel_optouts(conn_name, chan=None):
with cache_lock:
return [opt for opt in optout_cache[conn_name] if not chan or opt.match_chan(chan)]
def format_optout_list(opts):
headers = ("Channel Pattern", "Hook Pattern", "Allowed")
table = [(opt.channel, opt.hook, "true" if opt.allow else "false") for opt in opts]
return gen_markdown_table(headers, table)
def set_optout(db, conn, chan, pattern, allowed):
conn_cf = conn.casefold()
chan_cf = chan.casefold()
pattern_cf = pattern.casefold()
clause = and_(optout_table.c.network == conn_cf, optout_table.c.chan == chan_cf, optout_table.c.hook == pattern_cf)
res = db.execute(optout_table.update().values(allow=allowed).where(clause))
if not res.rowcount:
db.execute(optout_table.insert().values(network=conn_cf, chan=chan_cf, hook=pattern_cf, allow=allowed))
db.commit()
load_cache(db)
def del_optout(db, conn, chan, pattern):
conn_cf = conn.casefold()
chan_cf = chan.casefold()
pattern_cf = pattern.casefold()
clause = and_(optout_table.c.network == conn_cf, optout_table.c.chan == chan_cf, optout_table.c.hook == pattern_cf)
res = db.execute(optout_table.delete().where(clause))
db.commit()
load_cache(db)
return res.rowcount > 0
def clear_optout(db, conn, chan=None):
conn_cf = conn.casefold()
if chan:
chan_cf = chan.casefold()
clause = and_(optout_table.c.network == conn_cf, optout_table.c.chan == chan_cf)
else:
clause = optout_table.c.network == conn_cf
res = db.execute(optout_table.delete().where(clause))
db.commit()
load_cache(db)
return res.rowcount
_STR_TO_BOOL = {
"yes": True,
"y": True,
"no": False,
"n": False,
"on": True,
"off": False,
"enable": True,
"disable": False,
"allow": True,
"deny": False,
}
@hook.onload
def load_cache(db):
with cache_lock:
optout_cache.clear()
for row in db.execute(optout_table.select()):
optout_cache[row["network"]].append(OptOut(row["chan"], row["hook"], row["allow"]))
for opts in optout_cache.values():
opts.sort(reverse=True)
# noinspection PyUnusedLocal
@hook.sieve(priority=Priority.HIGHEST)
def optout_sieve(bot, event, _hook):
if not event.chan or not event.conn:
return event
hook_name = _hook.plugin.title + "." + _hook.function_name
with cache_lock:
optouts = optout_cache[event.conn.name]
for _optout in optouts:
if _optout.match(event.chan, hook_name):
if not _optout.allow:
if _hook.type == "command":
event.notice("Sorry, that command is disabled in this channel.")
return None
break
return event
@hook.command
@asyncio.coroutine
def optout(text, event, chan, db, conn):
"""[chan] <pattern> [allow] - Set the global allow option for hooks matching <pattern> in [chan], or the current channel if not specified
:type text: str
:type event: cloudbot.event.CommandEvent
"""
args = text.split()
if args[0].startswith("#") and len(args) > 1:
chan = args.pop(0)
has_perm = yield from check_channel_permissions(event, chan, "op", "chanop", "botcontrol")
if not has_perm:
event.notice("Sorry, you may not configure optout settings for that channel.")
return
pattern = args.pop(0)
allowed = False
if args:
allow = args.pop(0)
try:
allowed = _STR_TO_BOOL[allow.lower()]
except KeyError:
return "Invalid allow option."
yield from event.async_call(set_optout, db, conn.name, chan, pattern, allowed)
return "{action} hooks matching {pattern} in {channel}.".format(
action="Enabled" if allowed else "Disabled",
pattern=pattern,
channel=chan
)
@hook.command
@asyncio.coroutine
def deloptout(text, event, chan, db, conn):
"""[chan] <pattern> - Delete global optout hooks matching <pattern> in [chan], or the current channel if not specified"""
args = text.split()
if len(args) > 1:
chan = args.pop(0)
has_perm = yield from check_channel_permissions(event, chan, "op", "chanop", "botcontrol")
if not has_perm:
event.notice("Sorry, you may not configure optout settings for that channel.")
return
pattern = args.pop(0)
deleted = yield from event.async_call(del_optout, db, conn.name, chan, pattern)
if deleted:
return "Deleted optout '{}' in channel '{}'.".format(pattern, chan)
return "No matching optouts in channel '{}'.".format(chan)
@asyncio.coroutine
def check_global_perms(event):
chan = event.chan
text = event.text
if text:
chan = text.split()[0]
can_global = yield from event.check_permissions("botcontrol")
allowed = can_global or (yield from check_channel_permissions(event, chan, "op", "chanop"))
if not allowed:
event.notice("Sorry, you are not allowed to use this command.")
if chan.lower() == "global":
if not can_global:
event.notice("You do not have permission to access global opt outs")
allowed = False
chan = None
return chan, allowed
@hook.command("listoptout", autohelp=False)
@asyncio.coroutine
def list_optout(conn, event, async_call):
"""[channel] - View the opt out data for <channel> or the current channel if not specified. Specify "global" to view all data for this network
:type conn: cloudbot.clients.irc.Client
:type event: cloudbot.event.CommandEvent
"""
chan, allowed = yield from check_global_perms(event)
if not allowed:
return
opts = yield from async_call(get_channel_optouts, conn.name, chan)
table = yield from async_call(format_optout_list, opts)
return web.paste(table, "md", "hastebin")
@hook.command("clearoptout", autohelp=False)
@asyncio.coroutine
def clear(conn, event, db, async_call):
"""[channel] - Clears the optout list for a channel. Specify "global" to clear all data for this network"""
chan, allowed = yield from check_global_perms(event)
if not allowed:
return
count = yield from async_call(clear_optout, db, conn.name, chan)
return "Cleared {} opt outs from the list.".format(count)
|
valesi/CloudBot
|
plugins/core/optout.py
|
Python
|
gpl-3.0
| 8,225
|
'''
Name: Dallas Fraser
Date: 2016-04-12
Project: MLSB API
Purpose: Holds the routes for the admin side
'''
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from os.path import join
from flask import render_template, make_response, url_for,\
redirect, session, request
from json import dumps
from api.routes import Routes
from api import app
from api import DB
from api.errors import InvalidField
from api.model import Team, Player, Sponsor, League, Game, Espys, Fun, Division
from api.variables import BATS
from api.authentication import check_auth
from datetime import date, time, datetime
from api.advanced.import_team import TeamList
from api.advanced.import_league import LeagueList
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
ALLOWED_EXTENSIONS = set(['csv'])
# -----------------------------------------------------------------------------
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route(Routes['import_team_list'], methods=["POST"])
def admin_import_team_list():
results = {'errors': [], 'success': False, 'warnings': []}
if not logged_in():
results['errors'].append("Permission denied")
return dumps(results)
file = request.files['file']
results = {'errors': [], 'success': False, 'warnings': []}
if file and allowed_file(file.filename):
content = (file.read()).decode("UTF-8")
lines = content.replace("\r", "")
lines = lines.split("\n")
team = TeamList(lines)
team.add_team_functional()
results["warnings"] = team.warnings
results["error"] = team.errors
results['success'] = True
if len(results['errors']) > 0:
results['success'] = False
else:
s = "File format not accepted (csv)"
raise InvalidField(payload={'detail': s})
return dumps(results)
@app.route(Routes['import_game_list'], methods=["POST"])
def admin_import_game_list():
results = {'errors': [], 'success': False, 'warnings': []}
if not logged_in():
results['errors'].append("Permission denied")
return dumps(results)
file = request.files['file']
if file and allowed_file(file.filename):
content = (file.read()).decode("UTF-8")
lines = content.replace("\r", "")
lines = lines.split("\n")
team = LeagueList(lines)
team.import_league_functional()
results['errors'] = team.errors
results['warnings'] = team.warnings
results['success'] = True
if len(results['errors']) > 0:
results['success'] = False
else:
results['errors'] = "File should be a CSV"
results['success'] = False
return dumps(results)
@app.route(Routes['importteam'])
def admin_import_team():
if not logged_in():
return redirect(url_for('admin_login'))
return render_template("admin/importForm.html",
year=date.today().year,
route=Routes,
title="Import Team from CSV",
template=Routes['team_template'],
import_route=Routes['import_team_list'],
type="Team")
@app.route(Routes['importgame'])
def admin_import_game():
if not logged_in():
return redirect(url_for('admin_login'))
return render_template("admin/importForm.html",
year=date.today().year,
route=Routes,
title="Import League's Game from CSV",
admin=session['admin'],
password=session['password'],
template=Routes['game_template'],
import_route=Routes['import_game_list'],
type="Games")
@app.route(Routes['team_template'])
def admin_team_template():
uploads = join(app.root_path, "static", "files", "team_template.csv")
result = ""
with open(uploads, "r") as f:
for line in f:
result += line
response = make_response(result)
s = "attachment; filename=team_template.csv"
response.headers["Content-Disposition"] = s
return response
@app.route(Routes['game_template'])
def admin_game_template():
uploads = join(app.root_path, "static", "files", "game_template.csv")
result = ""
with open(uploads, "r") as f:
for line in f:
result += line
response = make_response(result)
s = "attachment; filename=game_template.csv"
response.headers["Content-Disposition"] = s
return response
@app.route(Routes['panel_captain_to_submit'] + "/<int:year>")
def get_captains_games_not_submitted(year):
t1 = time(0, 0)
t2 = time(23, 59)
d1 = date(year, 1, 1)
d2 = date.today()
start = datetime.combine(d1, t1)
end = datetime.combine(d2, t2)
games = (DB.session.query(Game).filter(Game.date.between(start, end))
).order_by(Game.date)
captains = []
for game in games:
away_bats = []
home_bats = []
for bat in game.bats:
if bat.team_id == game.away_team_id:
away_bats.append(bat)
elif bat.team_id == game.home_team_id:
home_bats.append(bat)
if len(away_bats) == 0:
team = Team.query.get(game.away_team_id)
player = (Player.query.get(team.player_id))
captains.append(player.name + "-" + player.email +
" on " + str(game.date))
if len(home_bats) == 0:
team = Team.query.get(game.home_team_id)
player = (Player.query.get(team.player_id))
captains.append(player.name + "-" + player.email +
" on " + str(game.date))
return render_template("admin/viewGamesNotSubmitted.html",
route=Routes,
title="Captains with games to submit",
captains=captains,
year=year)
@app.route(Routes['editroster'] + "/<int:year>" + "/<int:team_id>")
def admin_edit_roster(year, team_id):
if not logged_in():
return redirect(url_for('admin_login'))
team = Team.query.get(team_id)
if team is None:
return render_template("admin/notFound.html",
route=Routes,
title="Team not found")
else:
players = []
for player in team.players:
players.append(player.json())
players = quick_sort(players)
all_players = Player.query.order_by(Player.name).all()
non_roster = []
for player in all_players:
non_roster.append(player.json())
return render_template("admin/editTeamRoster.html",
route=Routes,
title="Edit {} roster".format(str(team)),
players=players,
team_id=team_id,
non_roster=non_roster,
year=year)
def quick_sort(array):
less = []
equal = []
greater = []
if len(array) > 1:
pivot = array[0]
for x in array:
if x['player_name'] < pivot['player_name']:
less.append(x)
if x['player_name'] == pivot['player_name']:
equal.append(x)
if x['player_name'] > pivot['player_name']:
greater.append(x)
# Don't forget to return something!
# Just use the + operator to join lists
return quick_sort(less) + equal + quick_sort(greater)
# Note that you want equal ^^^^^ not pivot
else:
# You need to hande the part at the end of the recursion
# when you only have one element in your array, just return the array.
return array
@app.route(Routes['editfun'] + "/<int:year>")
def admin_edit_fun(year):
if not logged_in():
return redirect(url_for('admin_login'))
return render_template("admin/editFun.html",
year=year,
route=Routes,
funs=get_funs(),
title="Edit Fun")
@app.route(Routes['editdivision'] + "/<int:year>")
def admin_edit_division(year):
if not logged_in():
return redirect(url_for('admin_login'))
return render_template("admin/editDivision.html",
year=year,
route=Routes,
divisions=get_divisions(),
title="Edit Division")
@app.route(Routes['editleague'] + "/<int:year>")
def admin_edit_league(year):
if not logged_in():
return redirect(url_for('admin_login'))
return render_template("admin/editLeague.html",
year=year,
route=Routes,
leagues=get_leagues(),
title="Edit Leagues")
@app.route(Routes['editsponsor'] + "/<int:year>")
def admin_edit_sponsor(year):
if not logged_in():
return redirect(url_for('admin_login'))
return render_template("admin/editSponsor.html",
year=year,
route=Routes,
sponsors=get_sponsors(),
not_active=get_sponsors(active=False),
title="Edit Leagues")
@app.route(Routes['aindex'] + "/<int:year>")
def admin_home(year):
if not logged_in():
return redirect(url_for('admin_login'))
return render_template("admin/index.html",
year=year,
route=Routes,
title="Admin")
@app.route(Routes['editplayer'] + "/<int:year>")
def admin_edit_player(year):
if not logged_in():
return redirect(url_for('admin_login'))
players = get_players()
return render_template("admin/editPlayer.html",
year=year,
route=Routes,
players=players,
title="Edit Players")
@app.route(Routes['nonactiveplayers'] + "/<int:year>")
def admin_non_active_players(year):
if not logged_in():
return redirect(url_for('admin_login'))
players = get_players(active=False)
return render_template("admin/nonActivePlayers.html",
year=year,
route=Routes,
players=players,
title="Activate Old Players")
@app.route(Routes['editteam'] + "/<int:year>")
def admin_edit_team(year):
if not logged_in():
return redirect(url_for('admin_login'))
results = Team.query.filter(Team.year == year).all()
# results = Team.query.all()
teams = []
for team in results:
teams.append(team.json())
return render_template("admin/editTeam.html",
year=year,
route=Routes,
teams=teams,
title="Edit Teams",
sponsors=get_sponsors(),
leagues=get_leagues())
@app.route(Routes['editgame'] + "/<int:year>")
def admin_edit_game(year):
if not logged_in():
return redirect(url_for('admin_login'))
results = Team.query.filter(Team.year == year).all()
leagues = get_leagues()
divisions = get_divisions()
teams = []
for league in leagues:
while len(teams) < league['league_id'] + 1:
teams.append([])
for team in results:
if team.league_id is not None:
t = team.json()
t['team_name'] = str(team)
teams[team.league_id].append(t)
d1 = date(year, 1, 1)
d2 = date(year, 12, 31)
results = Game.query.filter(Game.date.between(d1, d2)).all()
games = []
for game in results:
games.append(game.json())
return render_template("admin/editGame.html",
year=year,
route=Routes,
teams=teams,
title="Edit Game",
leagues=leagues,
divisions=divisions,
games=games)
@app.route(Routes['adeactivateplayer'] + "/<int:year>" + "/<int:player_id>")
def admin_activate_player(year, player_id):
if not logged_in():
return redirect(url_for('admin_login'))
player = Player.query.get(player_id)
if player is None:
return render_template("admin/notFound.html",
route=Routes,
year=year,
title="Player not found"
)
return render_template("admin/activatePlayer.html",
year=year,
player=player.json(),
route=Routes,
title="Activate/Deactivate Player")
@app.route(Routes['adeactivateplayer'] + "/<int:year>" + "/<int:player_id>",
methods=["POST"])
def admin_activate_player_post(year, player_id):
if not logged_in():
return dumps(False)
player = Player.query.get(player_id)
if player is None:
return dumps(False)
activate = request.get_json()['active']
if activate:
player.activate()
else:
player.deactivate()
DB.session.commit()
return dumps(True)
@app.route(Routes['adeactivatesponsor'] + "/<int:year>" + "/<int:sponsor_id>")
def admin_activate_sponsor(year, sponsor_id):
if not logged_in():
return redirect(url_for('admin_login'))
sponsor = Sponsor.query.get(sponsor_id)
if sponsor is None:
return render_template("admin/notFound.html",
route=Routes,
year=year,
title="Sponsor not found"
)
return render_template("admin/activateSponsor.html",
year=year,
sponsor=sponsor.json(),
route=Routes,
title="Activate/Deactivate Sponsor")
@app.route(Routes['adeactivatesponsor'] + "/<int:year>" + "/<int:sponsor_id>",
methods=["POST"])
def admin_activate_sponsor_post(year, sponsor_id):
if not logged_in():
return dumps(False)
sponsor = Sponsor.query.get(sponsor_id)
if sponsor is None:
return dumps(False)
activate = request.get_json()['active']
if activate:
sponsor.activate()
else:
sponsor.deactivate()
DB.session.commit()
return dumps(True)
@app.route(Routes['editespys'] + "/<int:year>" + "/<int:team_id>")
def admin_edit_espys(year, team_id):
if not logged_in():
return redirect(url_for('admin_login'))
espys = Espys.query.filter(Espys.team_id == team_id).all()
result = []
for espy in espys:
result.append(espy.json())
return render_template("admin/editEspys.html",
year=year,
route=Routes,
espys=result,
team_id=team_id,
title="Edit Espys",
sponsors=get_sponsors(True))
@app.route(Routes['editbat'] + "/<int:year>" + "/<int:game_id>")
def admin_edit_bat(year, game_id):
if not logged_in():
return redirect(url_for('admin_login'))
game = Game.query.get(game_id)
results = game.bats
away_team_id = game.away_team_id
home_team_id = game.home_team_id
if game is None:
return render_template("admin/notFound.html",
route=Routes,
title="Game not found",
year=year
)
away_bats = []
home_bats = []
for bat in results:
if bat.team_id == game.home_team_id:
home_bats.append(bat.json())
elif bat.team_id == game.away_team_id:
away_bats.append(bat.json())
away_players = get_team_players(game.away_team_id)
home_players = get_team_players(game.home_team_id)
return render_template("admin/editBat.html",
year=year,
game_id=game_id,
route=Routes,
away_bats=away_bats,
home_bats=home_bats,
home_players=home_players,
away_players=away_players,
away_team_id=away_team_id,
home_team_id=home_team_id,
title="Edit Bats",
game=str(game),
players=get_players(),
BATS=BATS)
@app.route(Routes['alogout'])
def admin_logout():
logout()
return redirect(url_for('reroute'))
@app.route(Routes['aportal'], methods=['POST'])
def admin_portal():
if 'admin' in session and 'password' in session:
admin = session['admin']
password = session['password']
else:
admin = request.form.get('admin')
password = request.form.get('password')
if check_auth(admin, password):
session['admin'] = admin
session['password'] = password
return redirect(url_for('admin_home', year=date.today().year))
else:
session['error'] = 'INVALID CREDENTIALS'
return redirect(url_for('admin_login'))
@app.route(Routes['alogin'])
def admin_login():
post_url = Routes['aportal']
error = None
if 'error' in session:
error = session.pop('error', None)
logout()
return render_template('admin/login.html',
type='Admin',
error=error,
route=Routes,
post_url=post_url)
def logged_in():
logged = False
if 'admin' in session and 'password' in session:
logged = check_auth(session['admin'], session['password'])
return logged
def logout():
session.pop('admin', None)
session.pop('password', None)
return
def get_sponsors(active=True):
results = (Sponsor.query.filter(Sponsor.active == active).order_by("name")
).all()
sponsors = []
for sponsor in results:
sponsors.append(sponsor.json())
return sponsors
def get_leagues():
results = League.query.all()
leagues = []
for league in results:
leagues.append(league.json())
return leagues
def get_funs():
results = Fun.query.all()
return [fun.json() for fun in results]
def get_divisions():
results = Division.query.all()
return [division.json() for division in results]
def get_players(active=True):
results = (Player.query.filter(Player.active == active).order_by("name")
).all()
players = []
for player in results:
players.append(player.admin_json())
return players
def get_team_players(team_id):
team = Team.query.get(team_id)
players = []
for player in team.players:
players.append(player.json())
return players
|
fras2560/mlsb-platform
|
api/admin/__init__.py
|
Python
|
apache-2.0
| 19,760
|
from django.conf import settings
from django.utils.timezone import now
from cronjobs import register
from remo.profiles.models import UserProfile
from remo.profiles.tasks import send_generic_mail
from remo.base.utils import go_back_n_months
@register
def new_reps_reminder():
"""Send email to reps-mentors listing new subscribers the past month."""
prev = go_back_n_months(now().date())
prev_date = prev.strftime('%B %Y')
reps = UserProfile.objects
reps_num = reps.count()
new_reps = reps.filter(date_joined_program__month=prev.month)
email_template = 'emails/new_reps_monthly_reminder.jinja'
subject = '[Info] New Reps for %s' % prev_date
recipient = settings.REPS_MENTORS_LIST
data = {'reps': new_reps, 'date': prev_date, 'reps_num': reps_num}
send_generic_mail.delay([recipient], subject, email_template, data)
|
mozilla/remo
|
remo/profiles/cron.py
|
Python
|
bsd-3-clause
| 865
|
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
log = CPLog(__name__)
class CP(Automation):
def getMovies(self):
if self.isDisabled():
return
return []
|
Akylas/CouchPotatoServer
|
couchpotato/core/providers/automation/cp/main.py
|
Python
|
gpl-3.0
| 250
|
import unittest.mock
import typing_extensions
from bugwarrior import config, services
from bugwarrior.config import load, schema
from .base import ConfigTest
LONG_MESSAGE = """\
Some message that is over 100 characters. This message is so long it's
going to fill up your floppy disk taskwarrior backup. Actually it's not
that long.""".replace('\n', ' ')
class DumbConfig(config.ServiceConfig, prefix='dumb'):
service: typing_extensions.Literal['test']
class DumbIssue(services.Issue):
"""
Implement the required methods but they shouldn't be called.
"""
def get_default_description(self):
raise NotImplementedError
def to_taskwarrior(self):
raise NotImplementedError
class DumbIssueService(services.IssueService):
"""
Implement the required methods but they shouldn't be called.
"""
ISSUE_CLASS = DumbIssue
CONFIG_SCHEMA = DumbConfig
def get_owner(self, issue):
raise NotImplementedError
def issues(self):
raise NotImplementedError
class ServiceBase(ConfigTest):
def setUp(self):
super().setUp()
self.config = load.BugwarriorConfigParser()
self.config.add_section('general')
self.config.set('general', 'targets', 'test')
self.config.set('general', 'interactive', 'false')
self.config.add_section('test')
self.config.set('test', 'service', 'test')
def makeService(self):
with unittest.mock.patch('bugwarrior.config.schema.get_service',
lambda x: DumbIssueService):
conf = schema.validate_config(self.config, 'general', 'configpath')
return DumbIssueService(conf['test'], conf['general'], 'test')
def makeIssue(self):
service = self.makeService()
return service.get_issue_for_record(None)
class TestIssueService(ServiceBase):
def test_build_annotations_default(self):
service = self.makeService()
annotations = service.build_annotations(
(('some_author', LONG_MESSAGE),), 'example.com')
self.assertEqual(annotations, [
'@some_author - Some message that is over 100 characters. Thi...'
])
def test_build_annotations_limited(self):
self.config.set('general', 'annotation_length', '20')
service = self.makeService()
annotations = service.build_annotations(
(('some_author', LONG_MESSAGE),), 'example.com')
self.assertEqual(
annotations, ['@some_author - Some message that is...'])
def test_build_annotations_limitless(self):
self.config.set('general', 'annotation_length', None)
service = self.makeService()
annotations = service.build_annotations(
(('some_author', LONG_MESSAGE),), 'example.com')
self.assertEqual(annotations, [
f'@some_author - {LONG_MESSAGE}'])
class TestIssue(ServiceBase):
def test_build_default_description_default(self):
issue = self.makeIssue()
description = issue.build_default_description(LONG_MESSAGE)
self.assertEqual(
description, '(bw)Is# - Some message that is over 100 chara')
def test_build_default_description_limited(self):
self.config.set('general', 'description_length', '20')
issue = self.makeIssue()
description = issue.build_default_description(LONG_MESSAGE)
self.assertEqual(
description, '(bw)Is# - Some message that is')
def test_build_default_description_limitless(self):
self.config.set('general', 'description_length', None)
issue = self.makeIssue()
description = issue.build_default_description(LONG_MESSAGE)
self.assertEqual(
description, f'(bw)Is# - {LONG_MESSAGE}')
|
ralphbean/bugwarrior
|
tests/test_service.py
|
Python
|
gpl-3.0
| 3,791
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import os
from enum import Enum
from UM.PluginObject import PluginObject
class FileReader(PluginObject):
## Used as the return value of FileReader.preRead.
class PreReadResult(Enum):
# The user has accepted the configuration dialog or there is no configuration dialog.
# The plugin should load the data.
accepted = 1
# The user has cancelled the dialog so don't load the data.
cancelled = 2
# preRead has failed and no further processing should happen.
failed = 3
def __init__(self):
super().__init__()
self._supported_extensions = []
## Returns true if file_name can be processed by this plugin.
#
# \return boolean indication if this plugin accepts the file specified.
def acceptsFile(self, file_name):
extension = os.path.splitext(file_name)[1]
if extension.lower() in self._supported_extensions:
return True
return False
## Executed before reading the file. This is used, for example, to display an import
# configuration dialog. If a plugin displays such a dialog,
# this function should block until it has been closed.
#
# \return \type{PreReadResult} indicating if the user accepted or canceled the dialog.
def preRead(self, file_name):
return FileReader.PreReadResult.accepted
## Read mesh data from file and returns a node that contains the data
#
# \return data read.
def read(self, file_name):
raise NotImplementedError("Reader plugin was not correctly implemented, no read was specified")
|
onitake/Uranium
|
UM/FileHandler/FileReader.py
|
Python
|
agpl-3.0
| 1,713
|
from autosar.writer.writer_base import BaseWriter, ElementWriter
from autosar.base import applyFilter
import collections.abc
import autosar.behavior
import autosar.component
class PackageWriter(BaseWriter):
def __init__(self, version, patch):
super().__init__(version, patch)
self.registeredWriters={}
self.xmlSwitcher={}
self.codeSwitcher={}
def registerElementWriter(self, elementWriter):
"""
registers a new element writer into this package writer
"""
assert(isinstance(elementWriter, ElementWriter))
writerName = type(elementWriter).__name__
if writerName not in self.registeredWriters:
list_or_generator = elementWriter.getSupportedXML()
if list_or_generator is not None:
if isinstance(list_or_generator, str):
list_or_generator = [list_or_generator]
for elementName in list_or_generator:
self.xmlSwitcher[elementName] = elementWriter
list_or_generator = elementWriter.getSupportedCode()
if list_or_generator is not None:
if isinstance(list_or_generator, str):
list_or_generator = [list_or_generator]
for elementName in list_or_generator:
self.codeSwitcher[elementName] = elementWriter
self.registeredWriters[writerName] = elementWriter
def toXML(self, package, filters, ignore):
lines=[]
lines.extend(self.beginPackage(package.name))
if len(package.elements)>0:
lines.append(self.indent("<ELEMENTS>",1))
for elem in package.elements:
elemRef = elem.ref
ignoreElem=True if (isinstance(ignore, collections.abc.Iterable) and elemRef in ignore) else False
#if SWC was ignored by user, also ignore its InternalBehavior and SwcImplementation elements in case they are in the same package
if not ignoreElem and isinstance(elem, autosar.behavior.InternalBehavior):
if (isinstance(ignore, collections.abc.Iterable) and elem.componentRef in ignore):
ignoreElem = True
if not ignoreElem and isinstance(elem, autosar.component.SwcImplementation):
behavior = package.rootWS().find(elem.behaviorRef)
if behavior is not None:
if (isinstance(ignore, collections.abc.Iterable) and behavior.componentRef in ignore):
ignoreElem = True
if not ignoreElem and applyFilter(elemRef, filters):
elementName = elem.__class__.__name__
elementWriter = self.xmlSwitcher.get(elementName)
if elementWriter is not None:
result = elementWriter.writeElementXML(elem)
if result is None:
print("[PackageWriter] No return value: %s"%elementName)
continue
else:
lines.extend(self.indent(result,2))
else:
package.unhandledWriter.add(elementName)
lines.append(self.indent("</ELEMENTS>",1))
else:
if self.version<4.0:
lines.append(self.indent("<ELEMENTS/>",1))
if len(package.subPackages)>0:
numPackets = 0
if self.version >= 3.0 and self.version < 4.0:
for subPackage in package.subPackages:
if applyFilter(subPackage.ref, filters):
if numPackets == 0:
lines.append(self.indent("<SUB-PACKAGES>",1))
lines.extend(self.indent(self.toXML(subPackage, filters, ignore),2))
numPackets += 1
if numPackets > 0:
lines.append(self.indent("</SUB-PACKAGES>",1))
elif self.version >= 4.0:
for subPackage in package.subPackages:
if applyFilter(subPackage.ref, filters):
if numPackets == 0:
lines.append(self.indent("<AR-PACKAGES>",1))
lines.extend(self.indent(self.toXML(subPackage, filters, ignore),2))
numPackets += 1
if numPackets > 0:
lines.append(self.indent("</AR-PACKAGES>",1))
lines.extend(self.endPackage())
return lines
def toCode(self, package, filters, ignore, localvars, isTemplate):
lines=[]
if not isTemplate:
if package.role is not None:
lines.append('package=ws.createPackage("%s", role="%s")'%(package.name, package.role))
else:
lines.append('package=ws.createPackage("%s")'%(package.name))
localvars['package']=package
for subPackage in package.subPackages:
if subPackage.role is not None:
lines.append('package.createSubPackage("%s", role="%s")'%(subPackage.name, subPackage.role))
else:
lines.append('package.createSubPackage("%s")'%(subPackage.name))
for elem in package.elements:
elemRef = elem.ref
ignoreElem=True if (isinstance(ignore, str) and ignore==elemRef) or (isinstance(ignore, collections.abc.Iterable) and elemRef in ignore) else False
#if SWC was ignored by user, also ignore its InternalBehavior and SwcImplementation elements in case they are in the same package
if not ignoreElem and isinstance(elem, autosar.behavior.InternalBehavior):
if (isinstance(ignore, str) and ignore==elem.componentRef) or (isinstance(ignore, collections.abc.Iterable) and elem.componentRef in ignore): ignoreElem = True
if not ignoreElem and isinstance(elem, autosar.component.SwcImplementation):
behavior = package.rootWS().find(elem.behaviorRef)
if behavior is not None:
if (isinstance(ignore, str) and ignore==behavior.componentRef) or (isinstance(ignore, collections.abc.Iterable) and behavior.componentRef in ignore): ignoreElem = True
if not ignoreElem and applyFilter(elemRef, filters):
elementName = elem.__class__.__name__
elementWriter = self.codeSwitcher.get(elementName)
if elementWriter is not None:
result = elementWriter.writeElementCode(elem, localvars)
if result is None:
print("[PackageWriter] No return value: %s"%elementName)
continue
else:
lines.extend(result)
else:
package.unhandledWriter.add(elementName)
else:
pass
return lines
|
cogu/autosar
|
autosar/writer/package_writer.py
|
Python
|
mit
| 7,113
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import re
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import http_server_port, try_rm
from youtube_dl import YoutubeDL
from youtube_dl.compat import compat_http_server
from youtube_dl.downloader.http import HttpFD
from youtube_dl.utils import encodeFilename
import threading
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_SIZE = 10 * 1024
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def send_content_range(self, total=None):
range_header = self.headers.get('Range')
start = end = None
if range_header:
mobj = re.search(r'^bytes=(\d+)-(\d+)', range_header)
if mobj:
start = int(mobj.group(1))
end = int(mobj.group(2))
valid_range = start is not None and end is not None
if valid_range:
content_range = 'bytes %d-%d' % (start, end)
if total:
content_range += '/%d' % total
self.send_header('Content-Range', content_range)
return (end - start + 1) if valid_range else total
def serve(self, range=True, content_length=True):
self.send_response(200)
self.send_header('Content-Type', 'video/mp4')
size = TEST_SIZE
if range:
size = self.send_content_range(TEST_SIZE)
if content_length:
self.send_header('Content-Length', size)
self.end_headers()
self.wfile.write(b'#' * size)
def do_GET(self):
if self.path == '/regular':
self.serve()
elif self.path == '/no-content-length':
self.serve(content_length=False)
elif self.path == '/no-range':
self.serve(range=False)
elif self.path == '/no-range-no-content-length':
self.serve(range=False, content_length=False)
else:
assert False
class FakeLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
pass
class TestHttpFD(unittest.TestCase):
def setUp(self):
self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler)
self.port = http_server_port(self.httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def download(self, params, ep):
params['logger'] = FakeLogger()
ydl = YoutubeDL(params)
downloader = HttpFD(ydl, params)
filename = 'testfile.mp4'
try_rm(encodeFilename(filename))
self.assertTrue(downloader.real_download(filename, {
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
}))
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE)
try_rm(encodeFilename(filename))
def download_all(self, params):
for ep in ('regular', 'no-content-length', 'no-range', 'no-range-no-content-length'):
self.download(params, ep)
def test_regular(self):
self.download_all({})
def test_chunked(self):
self.download_all({
'http_chunk_size': 1000,
})
if __name__ == '__main__':
unittest.main()
|
rg3/youtube-dl
|
test/test_downloader_http.py
|
Python
|
unlicense
| 3,499
|
import inspect
from mock import MagicMock
from plastic import Plastic
from util import must_be_checkable
from cant_find_dependency import CantFindDependency
from primitive_musts import SafeObject
def _mock_must_return_itself_for_must_calls(mock):
mock.must_be_type.return_value = mock
mock.must_be_primitive = mock
mock.must.return_value = mock
mock.must_have.return_value = mock
mock.must_use.return_value = mock
mock.must_make.return_value = mock
mock.that_must.return_value = mock
mock.that_must_have.return_value = mock
mock.that_must_use.return_value = mock
mock.that_must_make.return_value = mock
mock.and_must.return_value = mock
mock.and_must_have.return_value = mock
mock.and_must_use.return_value = mock
mock.and_must_make.return_value = mock
def must_handle_synonyms(obj, synonym_dict):
for member in inspect.getmembers(obj):
m_name, m_val = member
if callable(m_val) and not m_name.startswith('_') and m_name in synonym_dict:
for alias in synonym_dict[m_name]:
if not hasattr(obj, alias):
setattr(obj, alias, m_val)
class ParameterSignature(object):
'''WRITEME'''
def __init__(self, param_name):
self.name = param_name
self.requirements = Plastic()
def __str__(self):
return self.name+' that '+str(self.requirements)
def get_param_mold(self):
return self.requirements
def mock(self):
result = MagicMock(spec=[m[0] for m in inspect.getmembers(self.requirements)])
_mock_must_return_itself_for_must_calls(result)
return result
class FunctionSignature(object):
'''WRITEME'''
def __init__(self, f, owner_obj=None, ignore_warnings=False):
self.function = f
self.name = f.__name__
self.args, self.varargs, self.keywords, self.defaults = inspect.getargspec(f.__init__ if inspect.isclass(f) else f)
self.is_method = self.args[0] == 'self' and owner_obj is not None
self.is_constructor = self.args[0] == 'self' and owner_obj is None
if self.args[0] == 'self':
self.args = self.args[1:] # Shave off 'self'
self.param_signatures = [ParameterSignature(x) for x in self.args]
self.returns = None
self.has_explicit_return_value = False
if self.is_method:
def note_return(*x):
if len(x) == 1:
x = x[0]
self.has_explicit_return_value = True
self.returns = x
if type(x) is tuple:
self.returns = map(Plastic, x)
elif type(x) is str:
self.returns = Plastic(x)
return self.returns
owner_obj.must_return = note_return
try:
self.mold_result = f(*[p.get_param_mold() for p in self.param_signatures])
except Exception as ex:
if self.has_explicit_return_value is False and not ignore_warnings:
# TODO: Provide better user notification and details on failure
self.mold_result = ex
print 'I MUST WARN YOU: ' + str(ex)
print 'Warning in ' + str(self)
print ''
if self.is_method:
del owner_obj.must_return
if self.returns is None and inspect.isclass(f):
self.returns = self.name
self.has_explicit_return_value = True
def get_default(self, index):
if self.defaults is None:
return None
index += len(self.defaults) - len(self.args)
if index >= 0:
return self.defaults[index]
else:
return None
def mock_params(self):
return [x.mock() for x in self.param_signatures]
def _return_str(self):
if self.has_explicit_return_value:
return str(self.returns)
else:
return '???'
def __str__(self):
arg_headers = [str(p) for p in self.param_signatures]
for i in range(len(self.args)):
if self.get_default(i) is not None:
arg_headers[i] += ' (default='+str(self.get_default(i))+')'
if len(arg_headers) < 2:
return self.name+"("+','.join(arg_headers)+") -> "+self._return_str()
return self.name+"(\n\t"+',\n\t'.join(arg_headers)+"\n) -> "+self._return_str()
class ClassPattern(object):
''' WRITEME '''
def __init__(self, constructor, is_function_wrapper=False, ignore_warnings=False):
self._constructor = FunctionSignature(constructor, ignore_warnings=ignore_warnings)
self._is_function_wrapper = is_function_wrapper
self._properties = []
self._capabilities = {}
obj = self._constructor.mold_result
members = filter(lambda x: not x[0].startswith('_'), inspect.getmembers(obj))
for m in members:
m_name, m_val = m
if callable(m_val):
self._capabilities[m_name] = FunctionSignature(m_val, obj, ignore_warnings=ignore_warnings)
else:
self._properties.append(m_name)
def reflects_class(self, possible_class):
return possible_class == self._constructor.function
def describe(self, member_name=None):
if member_name is None:
if self._is_function_wrapper:
return '\n'+'\n'.join(map(self.describe, self._capabilities))
members = self.describe('__init__')+"\n"
for c in self._capabilities:
members += self.describe(c)+"\n"
return ("\n%s:\n\t" % self._constructor.name) + members.replace("\n","\n\t")
if member_name == "__init__":
return str(self._constructor)
elif member_name in self._capabilities:
return str(self._capabilities[member_name])
raise NotImplementedError # TODO
def mock_dependencies(self, method_name):
if method_name == "__init__":
return self._constructor.mock_params()
raise NotImplementedError # TODO
def create(self, universe, aliases, known_parameters):
params = {}
for i in range(len(self._constructor.args)):
arg_name = self._constructor.args[i]
if arg_name in known_parameters:
params[arg_name] = known_parameters[arg_name]
must_be_checkable(params[arg_name])
else:
try:
namehint = str(self._constructor.function)+' needs '+('an' if arg_name[0] in 'aeiou' else 'a')+' "'+arg_name+'" that'
params[arg_name] = universe.create_with_namehint(namehint, self._constructor.param_signatures[i].get_param_mold())
except CantFindDependency as ex:
default = self._constructor.get_default(i)
if default is not None:
params[arg_name] = default
else:
raise ex
result = self._constructor.function(**params)
must_handle_synonyms(result, aliases)
must_be_checkable(result)
result.must_return = lambda *x: SafeObject()
return result
def matches(self, requirements, aliases):
right_type = requirements.type is None or requirements.type == 'object'
has_properties = self.has(requirements.properties)
takes_parameters = self.takes(requirements.known_parameters.keys())
has_capabilities = self.can(requirements.capabilities, aliases)
return right_type and has_properties and takes_parameters and has_capabilities
def has(self, attributes):
return all([x in self._properties for x in attributes])
def takes(self, parameters):
return all([p in self._constructor.args for p in parameters])
def can(self, target_capabilities, aliases):
for target_capability in target_capabilities.items():
action = target_capability[0]
taking = target_capability[1][0]
returning = target_capability[1][1]
if action in aliases:
possible_actions = aliases[action]
if not any([self.can_do_action(a,taking,returning,aliases) for a in possible_actions]):
return False
else:
if not self.can_do_action(action, taking, returning, aliases):
return False
return True
def can_do_action(self, action, taking, returning, aliases):
if action in self._capabilities:
numberOfArgsTaken = 0 if len(taking) is 0 else taking.count(',')+1 # TODO: This is bad and should feel bad.
numberOfArgsProvided = len(self._capabilities[action].args)
sameNumberOfArgs = numberOfArgsTaken == numberOfArgsProvided
if sameNumberOfArgs:
if returning and self._capabilities[action].returns:
if isinstance(self._capabilities[action].returns, type):
return self._capabilities[action].returns == returning
else:
if type(self._capabilities[action].returns) is list:
# TODO: THIS IS A BAD CHECK
return len(self._capabilities[action].returns) == returning.count(',')+1
else:
return self._capabilities[action].returns.matches(returning, aliases)
return True
return False
return False
def __str__(self):
result = str(self._constructor.function)
if len(self._properties) > 0:
result += " has "+', '.join(self._properties)
if len(self._capabilities) > 0:
result += " and can " if len(self._properties) > 0 else " can "
result += ', '.join(self._capabilities.keys())
return result
|
umaptechnologies/must
|
details/class_pattern.py
|
Python
|
apache-2.0
| 9,902
|
import os, sys, inspect
# Add matrix-sdk submodule to path
__this_dir = os.path.split(inspect.getfile(inspect.currentframe()))[0]
__this_dir = os.path.realpath(os.path.abspath(__this_dir))
__matrix_dir = os.path.join(__this_dir, "matrix-python-sdk")
if __matrix_dir not in sys.path:
sys.path.insert(0, __matrix_dir)
|
Vzaa/navi
|
navi/__init__.py
|
Python
|
apache-2.0
| 321
|
import subprocess
from i3pystatus import IntervalModule
class Ping(IntervalModule):
"""
This module display the ping value between your computer and a host.
``switch_state`` callback can disable the Ping when desired.
``host`` propertie can be changed for set a specific host.
.. rubric:: Available formatters
* {ping} the ping value in milliseconds.
"""
interval = 5
settings = (
"color",
"format",
("color_disabled", "color when disabled"),
("color_down", "color when ping fail"),
("format_disabled", "format string when disabled"),
("format_down", "format string when ping fail"),
("host", "host to ping")
)
color = "#FFFFFF"
color_down = "#FF0000"
color_disabled = None
disabled = False
format = "{ping} ms"
format_down = "down"
format_disabled = None
host = "8.8.8.8"
on_leftclick = "switch_state"
def init(self):
if not self.color_down:
self.color_down = self.color
if not self.format_disabled:
self.format_disabled = self.format_down
if not self.color_disabled:
self.color_disabled = self.color_down
def switch_state(self):
self.disabled = not self.disabled
def ping_host(self):
p = subprocess.Popen(["ping", "-c1", "-w%d" % self.interval,
self.host], stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
out, _ = p.communicate()
if p.returncode == 0:
return float(out.decode().split("\n")[1]
.split("time=")[1].split()[0])
else:
return None
def run(self):
if self.disabled:
self.output = {
"full_text": self.format_disabled,
"color": self.color_disabled
}
return
ping = self.ping_host()
if not ping:
self.output = {
"full_text": self.format_down,
"color": self.color_down
}
return
self.output = {
"full_text": self.format.format(ping=ping),
"color": self.color
}
|
eBrnd/i3pystatus
|
i3pystatus/ping.py
|
Python
|
mit
| 2,244
|
"""-0.75/(1+x**2)-(0.65*x*math.atan(1/x))+0.65
(-4* x**2 - 20*x - 100) + (1 - x)**4
3*x**2+ 12/(x**3) - 5
3*x**4 + x**2 - 2*x + 1
10+x**3-2*x-5*(np.finfo(float).eps)**x
x**2- 10*(np.finfo(float).eps)**(0.1*x)
(10*x**3+3*x**2+5)**2
0.5/math.sqrt(1+x**2)- math.sqrt(1+x**2)*(1-0.5/(1+x**2))+x
(np.finfo(float).eps)**x-x**3
(x**2-1)**3-(2*x-5)**4
(-4*x**2-20*x-100) + (1-x)**4
(x**2+(y+1)**2)*(x**2+(y-1)**2)
(x**2-y)**2+y**2
50*(y-x**2)**2+(2-x)**2
(x+2*y-7)**2+(2*x+y-5)**2
(1.5-x*(1-y))**2+(2.25-x*(1-y**2))**2+(2.625-x*(1-y**3))**2
(10*(x[1]-x[0]**2))**2+(1-x[0])**2+90*(x[3]-x[2]**2)**2+(1-x[2])**2+10*(x[1]+x[3]-2)**2+0.1*(x[1]-x[3]) #4 variables, imposible graficar
(4-2.1*x[0]**2+(x[0]**4)/3)*x[0]**2+x[0]*x[1]+(-4+4*x[1]**2)*x[1]**2
(x[0]+10*x[1])**2+5*(x[2]-x[3])**2+(x[1]-2*x[2])**4+10*(x[0]-x[3])**4 #4 variables
x[0]**2+x[1]**2+x[2]**2 #3 variables
100*(x[0]**2-x[1])**2+(1-x[0])**2
math.floor(x[0])+math.floor(x[1])+math.floor(x[2])+math.floor(x[3])+math.floor(x[4]) #5 variables
#23 - 30 variables
suma = 0
for i in range(1,30):
suma+= i*x[i-1]**4
return suma + random.gauss(0,1)
#24
a = [[-32,-16,0,16,32,-32,-16,0,16,32,-32,-16,0,16,32,-32,-16,0,16,32,-32,-16,0,16,32], [-32,-32,-32,-32,-32,-16,-16,-16,-16,-16,0,0,0,0,0,16,16,16,16,16,32,32,32,32,32]]
def fitness(x,y):
superSuma = 0
for j in range(1,25):
superSuma += 1/f2(j,x,y)
return 1/(1/500 + superSuma)
def f2(j,x,y):
suma = 0
i = 0
suma+= (x- a[i][j])**6
i = 1
suma+= (y- a[i][j])**6
return j + suma """
|
jresendiz27/EvolutionaryComputing
|
practices/first/evolutionaryStrategies/functions.py
|
Python
|
apache-2.0
| 1,490
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import numpy
from pyscf import lib
from pyscf.pbc import gto as pbcgto
from pyscf.pbc.scf import hf as pbchf
import pyscf.pbc.scf as pscf
from pyscf.pbc import df as pdf
L = 4
n = 21
cell = pbcgto.Cell()
cell.build(unit = 'B',
verbose = 7,
output = '/dev/null',
a = ((L,0,0),(0,L,0),(0,0,L)),
mesh = [n,n,n],
atom = [['He', (L/2.-.5,L/2.,L/2.-.5)],
['He', (L/2. ,L/2.,L/2.+.5)]],
basis = { 'He': [[0, (0.8, 1.0)],
[0, (1.0, 1.0)],
[0, (1.2, 1.0)]]})
mf = pbchf.RHF(cell, exxdiv='ewald').run()
kmf = pscf.KRHF(cell, [[0,0,0]], exxdiv='ewald').run()
def tearDownModule():
global cell, mf, kmf
cell.stdout.close()
del cell, mf, kmf
class KnownValues(unittest.TestCase):
def test_hcore(self):
h1ref = pbchf.get_hcore(cell)
h1 = pbchf.RHF(cell).get_hcore()
self.assertAlmostEqual(abs(h1-h1ref).max(), 0, 9)
self.assertAlmostEqual(lib.finger(h1), 0.14116483012673137, 9)
cell1 = cell.copy()
cell1.ecp = {'He': (2, ((-1, (((7.2, .3),),)),))}
cell1.build(0, 0)
kpt = numpy.ones(3) * .5
h1ref = pbchf.get_hcore(cell1, kpt)
h1 = pbchf.RHF(cell1).get_hcore(kpt=kpt)
self.assertAlmostEqual(abs(h1-h1ref).max(), 0, 9)
self.assertAlmostEqual(lib.finger(h1), -2.708431894877279-0.395390980665125j, 9)
h1 = pscf.KRHF(cell1).get_hcore(kpts=[kpt])
self.assertEqual(h1.ndim, 3)
self.assertAlmostEqual(abs(h1[0]-h1ref).max(), 0, 9)
def test_rhf_vcut_sph(self):
mf = pbchf.RHF(cell, exxdiv='vcut_sph')
e1 = mf.kernel()
self.assertAlmostEqual(e1, -4.29190260870812, 8)
self.assertTrue(mf.mo_coeff.dtype == numpy.double)
mf = pscf.KRHF(cell, [[0,0,0]], exxdiv='vcut_sph')
e0 = mf.kernel()
self.assertTrue(numpy.allclose(e0,e1))
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pbchf.RHF(cell, k, exxdiv='vcut_sph')
e1 = mf.kernel()
self.assertAlmostEqual(e1, -4.1379172088570595, 8)
self.assertTrue(mf.mo_coeff.dtype == numpy.complex128)
mf = pscf.KRHF(cell, k, exxdiv='vcut_sph')
e0 = mf.kernel()
self.assertTrue(numpy.allclose(e0,e1))
def test_rhf_exx_ewald(self):
self.assertAlmostEqual(mf.e_tot, -4.3511582284698633, 8)
self.assertTrue(mf.mo_coeff.dtype == numpy.double)
self.assertAlmostEqual(mf.e_tot, kmf.e_tot, 8)
# test bands
numpy.random.seed(1)
kpts_band = numpy.random.random((2,3))
e1, c1 = mf.get_bands(kpts_band)
e0, c0 = kmf.get_bands(kpts_band)
self.assertAlmostEqual(abs(e0[0]-e1[0]).max(), 0, 7)
self.assertAlmostEqual(abs(e0[1]-e1[1]).max(), 0, 7)
self.assertAlmostEqual(lib.finger(e1[0]), -6.2986775452228283, 7)
self.assertAlmostEqual(lib.finger(e1[1]), -7.6616273746782362, 7)
def test_rhf_exx_ewald_with_kpt(self):
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pbchf.RHF(cell, k, exxdiv='ewald')
e1 = mf.kernel()
self.assertAlmostEqual(e1, -4.2048655827967139, 8)
self.assertTrue(mf.mo_coeff.dtype == numpy.complex128)
kmf = pscf.KRHF(cell, k, exxdiv='ewald')
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0,e1))
# test bands
numpy.random.seed(1)
kpt_band = numpy.random.random(3)
e1, c1 = mf.get_bands(kpt_band)
e0, c0 = kmf.get_bands(kpt_band)
self.assertAlmostEqual(abs(e0-e1).max(), 0, 7)
self.assertAlmostEqual(lib.finger(e1), -6.8312867098806249, 7)
def test_rhf_exx_None(self):
mf = pbchf.RHF(cell, exxdiv=None)
e1 = mf.kernel()
self.assertAlmostEqual(e1, -2.9325094887283196, 8)
self.assertTrue(mf.mo_coeff.dtype == numpy.double)
mf = pscf.KRHF(cell, [[0,0,0]], exxdiv=None)
e0 = mf.kernel()
self.assertTrue(numpy.allclose(e0,e1))
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pbchf.RHF(cell, k, exxdiv=None)
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, -2.7862168430230341, 8)
self.assertTrue(mf.mo_coeff.dtype == numpy.complex128)
mf = pscf.KRHF(cell, k, exxdiv=None)
mf.init_guess = 'hcore'
e0 = mf.kernel()
self.assertTrue(numpy.allclose(e0,e1))
def test_init_guess_by_chkfile(self):
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pbchf.RHF(cell, k, exxdiv='vcut_sph')
mf.max_cycle = 1
mf.diis = None
e1 = mf.kernel()
self.assertAlmostEqual(e1, -4.132445328608581, 9)
mf1 = pbchf.RHF(cell, exxdiv='vcut_sph')
mf1.chkfile = mf.chkfile
mf1.init_guess = 'chkfile'
mf1.diis = None
mf1.max_cycle = 1
e1 = mf1.kernel()
self.assertAlmostEqual(e1, -4.291854736401251, 9)
self.assertTrue(mf1.mo_coeff.dtype == numpy.double)
def test_uhf_exx_ewald(self):
mf = pscf.UHF(cell, exxdiv='ewald')
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, -4.3511582287379111, 8)
self.assertTrue(mf.mo_coeff[0].dtype == numpy.double)
kmf = pscf.KUHF(cell, [[0,0,0]], exxdiv='ewald')
kmf.init_guess = 'hcore'
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0,e1))
# test bands
numpy.random.seed(1)
kpts_band = numpy.random.random((2,3))
e1a, e1b = mf.get_bands(kpts_band)[0]
e0a, e0b = kmf.get_bands(kpts_band)[0]
self.assertAlmostEqual(abs(e0a[0]-e1a[0]).max(), 0, 5)
self.assertAlmostEqual(abs(e0a[1]-e1a[1]).max(), 0, 5)
self.assertAlmostEqual(abs(e0b[0]-e1b[0]).max(), 0, 5)
self.assertAlmostEqual(abs(e0b[1]-e1b[1]).max(), 0, 5)
self.assertAlmostEqual(lib.finger(e1a[0]), -6.2986775452228283, 5)
self.assertAlmostEqual(lib.finger(e1a[1]), -7.6616273746782362, 5)
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pscf.UHF(cell, k, exxdiv='ewald')
e1 = mf.kernel()
self.assertAlmostEqual(e1, -4.2048655827967139, 8)
self.assertTrue(mf.mo_coeff[0].dtype == numpy.complex128)
kmf = pscf.KUHF(cell, k, exxdiv='ewald')
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0,e1))
# test bands
numpy.random.seed(1)
kpts_band = numpy.random.random((2,3))
e1a, e1b = mf.get_bands(kpts_band)[0]
e0a, e0b = kmf.get_bands(kpts_band)[0]
self.assertAlmostEqual(abs(e0a[0]-e1a[0]).max(), 0, 5)
self.assertAlmostEqual(abs(e0a[1]-e1a[1]).max(), 0, 5)
self.assertAlmostEqual(abs(e0b[0]-e1b[0]).max(), 0, 5)
self.assertAlmostEqual(abs(e0b[1]-e1b[1]).max(), 0, 5)
self.assertAlmostEqual(lib.finger(e1a[0]), -6.8312867098806249, 5)
self.assertAlmostEqual(lib.finger(e1a[1]), -6.1120214505413086, 5)
def test_ghf_exx_ewald(self):
mf = pscf.GHF(cell, exxdiv='ewald')
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, -4.3511582287379111, 8)
self.assertTrue(mf.mo_coeff.dtype == numpy.double)
kmf = pscf.KGHF(cell, [[0,0,0]], exxdiv='ewald')
kmf.init_guess = 'hcore'
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0,e1))
# # test bands
# numpy.random.seed(1)
# kpts_band = numpy.random.random((2,3))
# e1, c1 = mf.get_bands(kpts_band)
# e0, c0 = kmf.get_bands(kpts_band)
# self.assertAlmostEqual(abs(e0[0]-e1[0]).max(), 0, 7)
# self.assertAlmostEqual(abs(e0[1]-e1[1]).max(), 0, 7)
# self.assertAlmostEqual(lib.finger(e1[0]), -6.2986775452228283, 7)
# self.assertAlmostEqual(lib.finger(e1[1]), -7.6616273746782362, 7)
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pscf.GHF(cell, k, exxdiv='ewald')
e1 = mf.kernel()
self.assertAlmostEqual(e1, -4.2048655827967139, 8)
self.assertTrue(mf.mo_coeff.dtype == numpy.complex128)
kmf = pscf.KGHF(cell, k, exxdiv='ewald')
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0,e1))
# # test bands
# numpy.random.seed(1)
# kpts_band = numpy.random.random((2,3))
# e1, c1 = mf.get_bands(kpts_band)
# e0, c0 = kmf.get_bands(kpts_band)
# self.assertAlmostEqual(abs(e0[0]-e1[0]).max(), 0, 7)
# self.assertAlmostEqual(abs(e0[1]-e1[1]).max(), 0, 7)
# self.assertAlmostEqual(lib.finger(e1[0]), -6.8312867098806249, 7)
# self.assertAlmostEqual(lib.finger(e1[1]), -6.1120214505413086, 7)
# def test_rhf_0d(self):
# from pyscf.df import mdf_jk
# from pyscf.scf import hf
# L = 4
# cell = pbcgto.Cell()
# cell.build(unit = 'B',
# a = numpy.eye(3)*L*5,
# mesh = [21]*3,
# atom = '''He 2 2 2; He 2 2 3''',
# dimension = 0,
# verbose = 0,
# basis = { 'He': [[0, (0.8, 1.0)],
# [0, (1.0, 1.0)],
# [0, (1.2, 1.0)]]})
# mol = cell.to_mol()
# mf = mdf_jk.density_fit(hf.RHF(mol))
# mf.with_df.mesh = [21]*3
# mf.with_df.auxbasis = {'He':[[0, (1e6, 1)]]}
# mf.with_df.charge_constraint = False
# mf.with_df.metric = 'S'
# eref = mf.kernel()
#
# mf = pbchf.RHF(cell)
# mf.with_df = pdf.AFTDF(cell)
# mf.exxdiv = None
# mf.get_hcore = lambda *args: hf.get_hcore(mol)
# mf.energy_nuc = lambda *args: mol.energy_nuc()
# e1 = mf.kernel()
# self.assertAlmostEqual(e1, eref, 8)
def test_rhf_1d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = [[L,0,0],[0,L*5,0],[0,0,L*5]],
mesh = [11,20,20],
atom = '''He 2 0 0; He 3 0 0''',
dimension = 1,
low_dim_ft_type = 'inf_vacuum',
verbose = 0,
basis = { 'He': [[0, (0.8, 1.0)],
#[0, (1.0, 1.0)],
[0, (1.2, 1.0)]
]})
mf = pbchf.RHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.with_df.eta = 0.3
mf.with_df.mesh = cell.mesh
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.24497234871167, 5)
def test_rhf_2d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = [[L,0,0],[0,L,0],[0,0,L*5]],
mesh = [11,11,20],
atom = '''He 2 0 0; He 3 0 0''',
dimension = 2,
low_dim_ft_type = 'inf_vacuum',
verbose = 0,
basis = { 'He': [[0, (0.8, 1.0)],
#[0, (1.0, 1.0)],
[0, (1.2, 1.0)]
]})
mf = pbchf.RHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.with_df.eta = 0.3
mf.with_df.mesh = cell.mesh
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.2681555164454039, 5)
def test_rhf_2d_fft(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = [[L,0,0],[0,L,0],[0,0,L*5]],
mesh = [11,11,20],
atom = '''He 2 0 0; He 3 0 0''',
dimension = 2,
verbose = 0,
basis = { 'He': [[0, (0.8, 1.0)],
[0, (1.2, 1.0)]
]})
mf = pbchf.RHF(cell, exxdiv='ewald')
mf.with_df = pdf.FFTDF(cell)
mf.with_df.mesh = cell.mesh
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.5797041803667593, 5)
mf1 = pbchf.RHF(cell, exxdiv='ewald')
mf1.with_df = pdf.FFTDF(cell)
mf1.with_df.mesh = cell.mesh
mf1.direct_scf = True
e1 = mf1.kernel()
self.assertAlmostEqual(e1, -3.5797041803667593, 5)
mf2 = pbchf.RHF(cell, exxdiv=None)
mf2.with_df = pdf.FFTDF(cell)
mf2.with_df.mesh = cell.mesh
mf2.direct_scf = True
e2 = mf2.kernel()
self.assertAlmostEqual(e2, -1.629571720365774, 5)
def test_uhf_1d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = numpy.eye(3)*4,
mesh = [10,20,20],
atom = '''He 2 0 0; He 3 0 0''',
dimension = 1,
low_dim_ft_type = 'inf_vacuum',
verbose = 0,
basis = { 'He': [[0, (0.8, 1.0)],
#[0, (1.0, 1.0)],
[0, (1.2, 1.0)]
]})
mf = pscf.UHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.with_df.eta = 0.3
mf.with_df.mesh = cell.mesh
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.24497234871167, 5)
def test_ghf_1d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = numpy.eye(3)*4,
mesh = [10,20,20],
atom = '''He 2 0 0; He 3 0 0''',
dimension = 1,
low_dim_ft_type = 'inf_vacuum',
verbose = 0,
basis = { 'He': [[0, (0.8, 1.0)],
#[0, (1.0, 1.0)],
[0, (1.2, 1.0)]
]})
mf = pscf.GHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.with_df.eta = 0.3
mf.with_df.mesh = cell.mesh
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.24497234871167, 5)
def test_get_veff(self):
mf = pscf.RHF(cell)
numpy.random.seed(1)
nao = cell.nao_nr()
dm = numpy.random.random((nao,nao)) + numpy.random.random((nao,nao))*1j
dm = dm + dm.conj().T
v11 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([.25,.25,.25]))
v12 = mf.get_veff(cell, dm, kpts_band=cell.get_abs_kpts([.25,.25,.25]))
v13 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([-1./3,1./3,.25]),
kpts_band=cell.get_abs_kpts([.25,.25,.25]))
v14 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([-1./3,1./3,.25]),
kpts_band=cell.make_kpts([2,1,1]))
self.assertTrue(v11.dtype == numpy.complex128)
self.assertTrue(v12.dtype == numpy.complex128)
mf = pscf.UHF(cell)
v21 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([.25,.25,.25]))
dm = [dm*.5,dm*.5]
v22 = mf.get_veff(cell, dm, kpts_band=cell.get_abs_kpts([.25,.25,.25]))
v23 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([-1./3,1./3,.25]),
kpts_band=cell.get_abs_kpts([.25,.25,.25]))
v24 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([-1./3,1./3,.25]),
kpts_band=cell.make_kpts([2,1,1]))
self.assertAlmostEqual(abs(v11-v21).max(), 0, 9)
self.assertAlmostEqual(abs(v12-v22).max(), 0, 9)
self.assertAlmostEqual(abs(v13-v23).max(), 0, 9)
self.assertAlmostEqual(abs(v14-v24).max(), 0, 9)
self.assertAlmostEqual(lib.finger(v11), -0.30110964334164825+0.81409418199767414j, 9)
self.assertAlmostEqual(lib.finger(v12), -2.1601376488983997-9.4070613374115908j, 9)
def test_init(self):
from pyscf.pbc import dft
cell_u = cell.copy()
cell_u.spin = 2
self.assertTrue(isinstance(pscf.RKS (cell ), dft.rks.RKS ))
self.assertTrue(isinstance(pscf.RKS (cell_u), dft.roks.ROKS ))
self.assertTrue(isinstance(pscf.UKS (cell ), dft.uks.UKS ))
self.assertTrue(isinstance(pscf.ROKS (cell ), dft.roks.ROKS ))
self.assertTrue(isinstance(pscf.KS (cell ), dft.rks.RKS ))
self.assertTrue(isinstance(pscf.KS (cell_u), dft.uks.UKS ))
self.assertTrue(isinstance(pscf.KRKS (cell ), dft.krks.KRKS ))
self.assertTrue(isinstance(pscf.KRKS (cell_u), dft.krks.KRKS ))
self.assertTrue(isinstance(pscf.KUKS (cell ), dft.kuks.KUKS ))
self.assertTrue(isinstance(pscf.KROKS(cell ), dft.kroks.KROKS))
self.assertTrue(isinstance(pscf.KKS (cell ), dft.krks.KRKS ))
self.assertTrue(isinstance(pscf.KKS (cell_u), dft.kuks.KUKS ))
self.assertTrue(isinstance(pscf.RHF (cell ), pscf.hf.RHF ))
self.assertTrue(isinstance(pscf.RHF (cell_u), pscf.rohf.ROHF ))
self.assertTrue(isinstance(pscf.KRHF (cell ), pscf.khf.KRHF ))
self.assertTrue(isinstance(pscf.KRHF (cell_u), pscf.khf.KRHF ))
self.assertTrue(isinstance(pscf.UHF (cell ), pscf.uhf.UHF ))
self.assertTrue(isinstance(pscf.KUHF (cell_u), pscf.kuhf.KUHF ))
self.assertTrue(isinstance(pscf.GHF (cell ), pscf.ghf.GHF ))
self.assertTrue(isinstance(pscf.KGHF (cell_u), pscf.kghf.KGHF ))
self.assertTrue(isinstance(pscf.ROHF (cell ), pscf.rohf.ROHF ))
self.assertTrue(isinstance(pscf.ROHF (cell_u), pscf.rohf.ROHF ))
self.assertTrue(isinstance(pscf.KROHF(cell ), pscf.krohf.KROHF))
self.assertTrue(isinstance(pscf.KROHF(cell_u), pscf.krohf.KROHF))
self.assertTrue(isinstance(pscf.HF (cell ), pscf.hf.RHF ))
self.assertTrue(isinstance(pscf.HF (cell_u), pscf.uhf.UHF ))
self.assertTrue(isinstance(pscf.KHF (cell ), pscf.khf.KRHF ))
self.assertTrue(isinstance(pscf.KHF (cell_u), pscf.kuhf.KUHF ))
def test_dipole_moment(self):
dip = mf.dip_moment()
self.assertAlmostEqual(lib.finger(dip), 0.03847620192010277, 8)
# For test cover only. Results for low-dimesion system are not
# implemented.
with lib.temporary_env(cell, dimension=1):
kdm = kmf.get_init_guess(key='minao')
dip = kmf.dip_moment(cell, kdm)
#self.assertAlmostEqual(lib.finger(dip), 0, 9)
def test_makov_payne_correction(self):
de = pbchf.makov_payne_correction(mf)
self.assertAlmostEqual(de[0], -0.1490687416177664, 7)
self.assertAlmostEqual(de[0], de[1], 7)
self.assertAlmostEqual(de[0], de[2], 7)
def test_init_guess_by_1e(self):
dm = mf.get_init_guess(key='1e')
self.assertAlmostEqual(lib.finger(dm), 0.025922864381755062, 9)
dm = kmf.get_init_guess(key='1e')
self.assertEqual(dm.ndim, 3)
self.assertAlmostEqual(lib.finger(dm), 0.025922864381755062, 9)
def test_init_guess_by_atom(self):
with lib.temporary_env(cell, dimension=1):
dm = mf.get_init_guess(key='minao')
kdm = kmf.get_init_guess(key='minao')
self.assertAlmostEqual(lib.finger(dm), -1.714952331211208, 8)
self.assertEqual(kdm.ndim, 3)
self.assertAlmostEqual(lib.finger(dm), -1.714952331211208, 8)
def test_jk(self):
nao = cell.nao
numpy.random.seed(2)
dm = numpy.random.random((2,nao,nao)) + .5j*numpy.random.random((2,nao,nao))
dm = dm + dm.conj().transpose(0,2,1)
ref = pbchf.get_jk(mf, cell, dm)
vj, vk = mf.get_jk_incore(cell, dm)
self.assertAlmostEqual(abs(vj - ref[0]).max(), 0, 9)
self.assertAlmostEqual(abs(vk - ref[1]).max(), 0, 9)
if __name__ == '__main__':
print("Full Tests for pbc.scf.hf")
unittest.main()
|
gkc1000/pyscf
|
pyscf/pbc/scf/test/test_hf.py
|
Python
|
apache-2.0
| 20,613
|
#!/usr/bin/python
import requests
import bs4
root_url = 'http://avito.ma'
index_url = root_url + '/fr/maroc/'
product = 'samsung'
url = index_url + product
products_link = []
products_price = []
products_title = []
def get_urls(url):
print url
response = requests.get(url)
soup = bs4.BeautifulSoup(response.text, "lxml")
product_items = soup.find_all("div",{"class":"item li-hover"})
for pro in product_items:
products_link.append(pro.a['href'].encode('utf-8'))
products_title.append(pro.h2.text.strip().encode('utf-8'))
price = pro.find("div", {"class": "item-price"}).find('span',{"class":"price_value"})
if None == price or '' == price.text.strip():
price = 0
else :
price = price.text.replace(" ","").encode('utf-8')
products_price.append(int(price))
def browse_product(url,among_price):
response = requests.get(url)
soup = bs4.BeautifulSoup(response.text, "lxml")
if among_price != 0:
among_price = soup.h2.span['title']
panel = soup.find('div',{'class':'span20'})
body = soup.find('div',{'class':'span10'}).text
body = " ".join(body.split()).encode('utf-8')
nbr_view = panel.contents[1].span.text.split()[1]
location = panel.contents[3].h2.text.strip().encode('utf-8')
date = panel.contents[3].ul.abbr['title']
author = panel.contents[3].strong.text.encode('utf-8')
print author
print location
print date
print nbr_view
print among_price
print body[:150]+' ... view more'
for i in range(47, 50):
print("------------------ Page %i ------------------"%(i))
index_url_p = url + '?o=' + str(i)
get_urls(index_url_p)
j = 1
for i, link in enumerate(products_link):
price = products_price[i]
if price > 1500:
print '-------------PRODUCT %i------------------'%(j)
print products_title[i]
print link
print price
j = j +1
browse_product(link, products_price[i])
|
belheber/le-comparateur
|
pkgs/avito.py
|
Python
|
mit
| 1,896
|
import json, hmac, hashlib, time, requests, base64
from requests.auth import AuthBase
class CoinbaseAuth(AuthBase):
SIGNATURE_HTTP_HEADER = 'CB-ACCESS-SIGN'
TIMESTAMP_HTTP_HEADER = 'CB-ACCESS-TIMESTAMP'
KEY_HTTP_HEADER = 'CB-ACCESS-KEY'
PASSPHRASE_HTTP_HEADER = 'CB-ACCESS-PASSPHRASE'
def __init__(self, api_key, secret_key, passphrase):
self.api_key = api_key
self.secret_key = secret_key
self.passphrase = passphrase
def __call__(self, request):
#Add headers
request.headers[CoinbaseAuth.KEY_HTTP_HEADER] = self.api_key
request.headers[CoinbaseAuth.PASSPHRASE_HTTP_HEADER] = self.passphrase
timestamp = str(time.time())
request.headers[CoinbaseAuth.TIMESTAMP_HTTP_HEADER] = timestamp
#add signature
method = request.method
path = request.path_url
content = request.body
message = timestamp + method + path
if content:
message += content
print message
hmac_key = base64.b64decode(self.secret_key)
sig = hmac.new(hmac_key, message, hashlib.sha256)
sig_b64 = sig.digest().encode("base64").rstrip("\n")
#Add signature header
request.headers[CoinbaseAuth.SIGNATURE_HTTP_HEADER] = sig_b64
return request
|
streblo/requests-CoinbaseExchangeAuth
|
CoinbaseExchangeAuth.py
|
Python
|
mit
| 1,350
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Translation methods for generating localized strings.
To load a locale and generate a translated string:
user_locale = locale.get("es_LA")
print user_locale.translate("Sign out")
locale.get() returns the closest matching locale, not necessarily the
specific locale you requested. You can support pluralization with
additional arguments to translate(), e.g.:
people = [...]
message = user_locale.translate(
"%(list)s is online", "%(list)s are online", len(people))
print message % {"list": user_locale.list(people)}
The first string is chosen if len(people) == 1, otherwise the second
string is chosen.
Applications should call one of load_translations (which uses a simple
CSV format) or load_gettext_translations (which uses the .mo format
supported by gettext and related tools). If neither method is called,
the locale.translate method will simply return the original string.
"""
import csv
import datetime
import logging
import os
import os.path
import re
_default_locale = "en_US"
_translations = {}
_supported_locales = frozenset([_default_locale])
_use_gettext = False
_log = logging.getLogger('tornado.locale')
def get(*locale_codes):
"""Returns the closest match for the given locale codes.
We iterate over all given locale codes in order. If we have a tight
or a loose match for the code (e.g., "en" for "en_US"), we return
the locale. Otherwise we move to the next code in the list.
By default we return en_US if no translations are found for any of
the specified locales. You can change the default locale with
set_default_locale() below.
"""
return Locale.get_closest(*locale_codes)
def set_default_locale(code):
"""Sets the default locale, used in get_closest_locale().
The default locale is assumed to be the language used for all strings
in the system. The translations loaded from disk are mappings from
the default locale to the destination locale. Consequently, you don't
need to create a translation file for the default locale.
"""
global _default_locale
global _supported_locales
_default_locale = code
_supported_locales = frozenset(_translations.keys() + [_default_locale])
def load_translations(directory):
"""Loads translations from CSV files in a directory.
Translations are strings with optional Python-style named placeholders
(e.g., "My name is %(name)s") and their associated translations.
The directory should have translation files of the form LOCALE.csv,
e.g. es_GT.csv. The CSV files should have two or three columns: string,
translation, and an optional plural indicator. Plural indicators should
be one of "plural" or "singular". A given string can have both singular
and plural forms. For example "%(name)s liked this" may have a
different verb conjugation depending on whether %(name)s is one
name or a list of names. There should be two rows in the CSV file for
that string, one with plural indicator "singular", and one "plural".
For strings with no verbs that would change on translation, simply
use "unknown" or the empty string (or don't include the column at all).
Example translation es_LA.csv:
"I love you","Te amo"
"%(name)s liked this","A %(name)s les gust\xf3 esto","plural"
"%(name)s liked this","A %(name)s le gust\xf3 esto","singular"
"""
global _translations
global _supported_locales
_translations = {}
for path in os.listdir(directory):
if not path.endswith(".csv"): continue
locale, extension = path.split(".")
if locale not in LOCALE_NAMES:
_log.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
f = open(os.path.join(directory, path), "r")
_translations[locale] = {}
for i, row in enumerate(csv.reader(f)):
if not row or len(row) < 2: continue
row = [c.decode("utf-8").strip() for c in row]
english, translation = row[:2]
if len(row) > 2:
plural = row[2] or "unknown"
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
_log.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(_translations.keys() + [_default_locale])
_log.info("Supported locales: %s", sorted(_supported_locales))
def load_gettext_translations(directory, domain):
"""Loads translations from gettext's locale tree
Locale tree is similar to system's /usr/share/locale, like:
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have you app translated:
1. Generate POT translation file
xgettext --language=Python --keyword=_:1,2 -d cyclone file1.py file2.html etc
2. Merge against existing POT file:
msgmerge old.po cyclone.po > new.po
3. Compile:
msgfmt cyclone.po -o {directory}/pt_BR/LC_MESSAGES/cyclone.mo
"""
import gettext
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if os.path.isfile(os.path.join(directory, lang)): continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain+".mo"))
_translations[lang] = gettext.translation(domain, directory,
languages=[lang])
except Exception, e:
logging.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(_translations.keys() + [_default_locale])
_use_gettext = True
_log.info("Supported locales: %s", sorted(_supported_locales))
def get_supported_locales(cls):
"""Returns a list of all the supported locale codes."""
return _supported_locales
class Locale(object):
@classmethod
def get_closest(cls, *locale_codes):
"""Returns the closest match for the given locale code."""
for code in locale_codes:
if not code: continue
code = code.replace("-", "_")
parts = code.split("_")
if len(parts) > 2:
continue
elif len(parts) == 2:
code = parts[0].lower() + "_" + parts[1].upper()
if code in _supported_locales:
return cls.get(code)
if parts[0].lower() in _supported_locales:
return cls.get(parts[0].lower())
return cls.get(_default_locale)
@classmethod
def get(cls, code):
"""Returns the Locale for the given locale code.
If it is not supported, we raise an exception.
"""
if not hasattr(cls, "_cache"):
cls._cache = {}
if code not in cls._cache:
assert code in _supported_locales
translations = _translations.get(code, None)
if translations is None:
locale = CSVLocale(code, {})
elif _use_gettext:
locale = GettextLocale(code, translations)
else:
locale = CSVLocale(code, translations)
cls._cache[code] = locale
return cls._cache[code]
def __init__(self, code, translations):
self.code = code
self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown")
self.rtl = False
for prefix in ["fa", "ar", "he"]:
if self.code.startswith(prefix):
self.rtl = True
break
self.translations = translations
# Initialize strings for date formatting
_ = self.translate
self._months = [
_("January"), _("February"), _("March"), _("April"),
_("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")]
self._weekdays = [
_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
_("Friday"), _("Saturday"), _("Sunday")]
def translate(self, message, plural_message=None, count=None):
raise NotImplementedError()
def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with relative=False.
You can force a full format date ("July 10, 1980") with
full_format=True.
"""
if self.code.startswith("ru"):
relative = False
if type(date) in (int, long, float):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
# Round down to now. Due to click skew, things are somethings
# slightly in the future.
if date > now: date = now
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
_ = self.translate
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return _("1 second ago", "%(seconds)d seconds ago",
seconds) % { "seconds": seconds }
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return _("1 minute ago", "%(minutes)d minutes ago",
minutes) % { "minutes": minutes }
hours = round(seconds / (60.0 * 60))
return _("1 hour ago", "%(hours)d hours ago",
hours) % { "hours": hours }
if days == 0:
format = _("%(time)s")
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = _("yesterday") if shorter else \
_("yesterday at %(time)s")
elif days < 5:
format = _("%(weekday)s") if shorter else \
_("%(weekday)s at %(time)s")
elif days < 334: # 11mo, since confusing for same month last year
format = _("%(month_name)s %(day)s") if shorter else \
_("%(month_name)s %(day)s at %(time)s")
if format is None:
format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
_("%(month_name)s %(day)s, %(year)s at %(time)s")
tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
if tfhour_clock:
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
elif self.code == "zh_CN":
str_time = "%s%d:%02d" % (
(u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12],
local_date.hour % 12 or 12, local_date.minute)
else:
str_time = "%d:%02d %s" % (
local_date.hour % 12 or 12, local_date.minute,
("am", "pm")[local_date.hour >= 12])
return format % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
def format_day(self, date, gmt_offset=0, dow=True):
"""Formats the given date as a day of week.
Example: "Monday, January 22". You can remove the day of week with
dow=False.
"""
local_date = date - datetime.timedelta(minutes=gmt_offset)
_ = self.translate
if dow:
return _("%(weekday)s, %(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
}
else:
return _("%(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"day": str(local_date.day),
}
def list(self, parts):
"""Returns a comma-separated list for the given list of parts.
The format is, e.g., "A, B and C", "A and B" or just "A" for lists
of size 1.
"""
_ = self.translate
if len(parts) == 0: return ""
if len(parts) == 1: return parts[0]
comma = u' \u0648 ' if self.code.startswith("fa") else u", "
return _("%(commas)s and %(last)s") % {
"commas": comma.join(parts[:-1]),
"last": parts[len(parts) - 1],
}
def friendly_number(self, value):
"""Returns a comma-separated number for the given integer."""
if self.code not in ("en", "en_US"):
return str(value)
value = str(value)
parts = []
while value:
parts.append(value[-3:])
value = value[:-3]
return ",".join(reversed(parts))
class CSVLocale(Locale):
"""Locale implementation using tornado's CSV translation format."""
def translate(self, message, plural_message=None, count=None):
"""Returns the translation for the given message for this locale.
If plural_message is given, you must also provide count. We return
plural_message when count != 1, and we return the singular form
for the given message when count == 1.
"""
if plural_message is not None:
assert count is not None
if count != 1:
message = plural_message
message_dict = self.translations.get("plural", {})
else:
message_dict = self.translations.get("singular", {})
else:
message_dict = self.translations.get("unknown", {})
return message_dict.get(message, message)
class GettextLocale(Locale):
"""Locale implementation using the gettext module."""
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
return self.translations.ungettext(message, plural_message, count)
else:
return self.translations.ugettext(message)
LOCALE_NAMES = {
"af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"},
"ar_AR": {"name_en": u"Arabic", "name": u"\u0627\u0644\u0639\u0631\u0628\u064a\u0629"},
"bg_BG": {"name_en": u"Bulgarian", "name": u"\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438"},
"bn_IN": {"name_en": u"Bengali", "name": u"\u09ac\u09be\u0982\u09b2\u09be"},
"bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"},
"ca_ES": {"name_en": u"Catalan", "name": u"Catal\xe0"},
"cs_CZ": {"name_en": u"Czech", "name": u"\u010ce\u0161tina"},
"cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"},
"da_DK": {"name_en": u"Danish", "name": u"Dansk"},
"de_DE": {"name_en": u"German", "name": u"Deutsch"},
"el_GR": {"name_en": u"Greek", "name": u"\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac"},
"en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"},
"en_US": {"name_en": u"English (US)", "name": u"English (US)"},
"es_ES": {"name_en": u"Spanish (Spain)", "name": u"Espa\xf1ol (Espa\xf1a)"},
"es_LA": {"name_en": u"Spanish", "name": u"Espa\xf1ol"},
"et_EE": {"name_en": u"Estonian", "name": u"Eesti"},
"eu_ES": {"name_en": u"Basque", "name": u"Euskara"},
"fa_IR": {"name_en": u"Persian", "name": u"\u0641\u0627\u0631\u0633\u06cc"},
"fi_FI": {"name_en": u"Finnish", "name": u"Suomi"},
"fr_CA": {"name_en": u"French (Canada)", "name": u"Fran\xe7ais (Canada)"},
"fr_FR": {"name_en": u"French", "name": u"Fran\xe7ais"},
"ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"},
"gl_ES": {"name_en": u"Galician", "name": u"Galego"},
"he_IL": {"name_en": u"Hebrew", "name": u"\u05e2\u05d1\u05e8\u05d9\u05ea"},
"hi_IN": {"name_en": u"Hindi", "name": u"\u0939\u093f\u0928\u094d\u0926\u0940"},
"hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"},
"hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"},
"id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"},
"is_IS": {"name_en": u"Icelandic", "name": u"\xcdslenska"},
"it_IT": {"name_en": u"Italian", "name": u"Italiano"},
"ja_JP": {"name_en": u"Japanese", "name": u"\xe6\xe6\xe8"},
"ko_KR": {"name_en": u"Korean", "name": u"\xed\xea\xec"},
"lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvi\u0173"},
"lv_LV": {"name_en": u"Latvian", "name": u"Latvie\u0161u"},
"mk_MK": {"name_en": u"Macedonian", "name": u"\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438"},
"ml_IN": {"name_en": u"Malayalam", "name": u"\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02"},
"ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"},
"nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokm\xe5l)"},
"nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"},
"nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"},
"pa_IN": {"name_en": u"Punjabi", "name": u"\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40"},
"pl_PL": {"name_en": u"Polish", "name": u"Polski"},
"pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Portugu\xeas (Brasil)"},
"pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Portugu\xeas (Portugal)"},
"ro_RO": {"name_en": u"Romanian", "name": u"Rom\xe2n\u0103"},
"ru_RU": {"name_en": u"Russian", "name": u"\u0420\u0443\u0441\u0441\u043a\u0438\u0439"},
"sk_SK": {"name_en": u"Slovak", "name": u"Sloven\u010dina"},
"sl_SI": {"name_en": u"Slovenian", "name": u"Sloven\u0161\u010dina"},
"sq_AL": {"name_en": u"Albanian", "name": u"Shqip"},
"sr_RS": {"name_en": u"Serbian", "name": u"\u0421\u0440\u043f\u0441\u043a\u0438"},
"sv_SE": {"name_en": u"Swedish", "name": u"Svenska"},
"sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"},
"ta_IN": {"name_en": u"Tamil", "name": u"\u0ba4\u0bae\u0bbf\u0bb4\u0bcd"},
"te_IN": {"name_en": u"Telugu", "name": u"\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41"},
"th_TH": {"name_en": u"Thai", "name": u"\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22"},
"tl_PH": {"name_en": u"Filipino", "name": u"Filipino"},
"tr_TR": {"name_en": u"Turkish", "name": u"T\xfcrk\xe7e"},
"uk_UA": {"name_en": u"Ukraini ", "name": u"\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430"},
"vi_VN": {"name_en": u"Vietnamese", "name": u"Ti\u1ebfng Vi\u1ec7t"},
"zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"\xe4\xe6(\xe7\xe4)"},
"zh_HK": {"name_en": u"Chinese (Hong Kong)", "name": u"\xe4\xe6(\xe9\xe6)"},
"zh_TW": {"name_en": u"Chinese (Taiwan)", "name": u"\xe4\xe6(\xe5\xe7)"},
}
|
sorenh/cc
|
vendor/tornado/tornado/locale.py
|
Python
|
apache-2.0
| 19,768
|
# Webhooks for teamcity integration
from __future__ import absolute_import
from django.db.models import Q
from zerver.models import UserProfile
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
import logging
import ujson
def guess_zulip_user_from_teamcity(teamcity_username, realm):
try:
# Try to find a matching user in Zulip
# We search a user's full name, short name,
# and beginning of email address
user = UserProfile.objects.filter(
Q(full_name__iexact=teamcity_username) |
Q(short_name__iexact=teamcity_username) |
Q(email__istartswith=teamcity_username),
is_active=True,
realm=realm).order_by("id")[0]
return user
except IndexError:
return None
def get_teamcity_property_value(property_list, name):
for property in property_list:
if property['name'] == name:
return property['value']
return None
@api_key_only_webhook_view('Teamcity')
@has_request_variables
def api_teamcity_webhook(request, user_profile, client, payload=REQ(argument_type='body'),
stream=REQ(default='teamcity')):
message = payload['build']
build_name = message['buildFullName']
build_url = message['buildStatusUrl']
changes_url = build_url + '&tab=buildChangesDiv'
build_number = message['buildNumber']
build_result = message['buildResult']
build_result_delta = message['buildResultDelta']
build_status = message['buildStatus']
if build_result == 'success':
if build_result_delta == 'fixed':
status = 'has been fixed! :thumbsup:'
else:
status = 'was successful! :thumbsup:'
elif build_result == 'failure':
if build_result_delta == 'broken':
status = 'is broken with status %s! :thumbsdown:' % (build_status)
else:
status = 'is still broken with status %s! :thumbsdown:' % (build_status)
elif build_result == 'running':
status = 'has started.'
else:
status = '(has no message specified for status %s)' % (build_status)
template = (
u'%s build %s %s\n'
u'Details: [changes](%s), [build log](%s)')
body = template % (build_name, build_number, status, changes_url, build_url)
topic = build_name
# Check if this is a personal build, and if so try to private message the user who triggered it.
if get_teamcity_property_value(message['teamcityProperties'], 'env.BUILD_IS_PERSONAL') == 'true':
# The triggeredBy field gives us the teamcity user full name, and the "teamcity.build.triggeredBy.username"
# property gives us the teamcity username. Let's try finding the user email from both.
teamcity_fullname = message['triggeredBy'].split(';')[0]
teamcity_user = guess_zulip_user_from_teamcity(teamcity_fullname, user_profile.realm)
if teamcity_user is None:
teamcity_shortname = get_teamcity_property_value(message['teamcityProperties'], 'teamcity.build.triggeredBy.username')
if teamcity_shortname is not None:
teamcity_user = guess_zulip_user_from_teamcity(teamcity_shortname, user_profile.realm)
if teamcity_user is None:
# We can't figure out who started this build - there's nothing we can do here.
logging.info("Teamcity webhook couldn't find a matching Zulip user for Teamcity user '%s' or '%s'" % (teamcity_fullname, teamcity_shortname))
return json_success()
body = "Your personal build of " + body
check_send_message(user_profile, client, 'private', [teamcity_user.email], topic, body)
return json_success()
check_send_message(user_profile, client, 'stream', [stream], topic, body)
return json_success()
|
peiwei/zulip
|
zerver/views/webhooks/teamcity.py
|
Python
|
apache-2.0
| 3,963
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ambari_client.model.base_model import BaseModel, ModelList
from ambari_client.model import paths, utils, status
LOG = logging.getLogger(__name__)
# TODO
def get_views(resource_root, blueprint_name):
pass
# TODO
class ViewModel(BaseModel):
pass
|
radicalbit/ambari
|
ambari-client/python-client/src/main/python/ambari_client/model/views.py
|
Python
|
apache-2.0
| 1,077
|
from openflow.optin_manager.monitoring.SessionMonitoringThread import SessionMonitoringThread
import time
import threading
#from vt_manager.settings.settingsLoader import MONITORING_INTERVAL
from django.conf import settings
'''
author:msune
Monitoring thread implementation
'''
SESSION_MULTIPLIER = 5
class BackgroundMonitor():
'''
Several monitoring actions
'''
@staticmethod
def __monitorSessions():
SessionMonitoringThread.monitorSessionInNewThread()
@staticmethod
def monitor():
sessionMultipler = 0
while True:
if sessionMultipler % SESSION_MULTIPLIER == 0:
sessionMultipler = 0
BackgroundMonitor.__monitorSessions()
else:
sessionMultipler +=1
time.sleep(settings.MONITORING_INTERVAL)
|
dana-i2cat/felix
|
optin_manager/src/python/openflow/optin_manager/monitoring/BackgroundMonitor.py
|
Python
|
apache-2.0
| 854
|
"""Sponsors forms."""
from flask_wtf import Form
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import Email
from wtforms_alchemy import model_form_factory
from pygotham.models import Level, Sponsor
__all__ = ('SponsorApplicationForm', 'SponsorEditForm')
ModelForm = model_form_factory(Form)
class SponsorApplicationForm(ModelForm):
"""Form for creating :class:`~pygotham.models.Sponsor` instances."""
level = QuerySelectField(query_factory=lambda: Level.query.current)
class Meta:
model = Sponsor
only = ('name', 'contact_name', 'contact_email')
field_args = {
'name': {'label': 'Sponsor Name'},
'contact_name': {'label': 'Contact Name'},
'contact_email': {
'label': 'Contact Email',
'validators': (Email(),),
},
}
class SponsorEditForm(ModelForm):
"""Form for editing :class:`~pygotham.models.Sponsor` instances.
The difference between this and
:class:`~pygotham.forms.SponsorApplicationForm` is that this form
does not allow ``level`` to be edited.
"""
class Meta:
model = Sponsor
only = ('name', 'contact_name', 'contact_email')
field_args = {
'name': {'label': 'Sponsor Name'},
'contact_name': {'label': 'Contact Name'},
'contact_email': {
'label': 'Contact Email',
'validators': (Email(),),
},
}
|
djds23/pygotham-1
|
pygotham/sponsors/forms.py
|
Python
|
bsd-3-clause
| 1,512
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (C) 2013 Didotech srl (<http://www.didotech.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
class hr_employee(osv.osv):
_inherit = 'hr.employee'
def is_parent(self, cr, uid, user_id):
employee_ids = self.search(cr, uid, [('user_id', '=', user_id)])
if employee_ids:
employee = self.browse(cr, uid, employee_ids[0])
parent_ids = self.search(cr, uid, [('user_id', '=', uid)])
if parent_ids and employee.parent_id and employee.parent_id.id == parent_ids[0]:
return True
else:
return False
else:
return False
|
dhp-denero/LibrERP
|
hr_employee_hierarchy/hr.py
|
Python
|
agpl-3.0
| 1,553
|
import requests
import logging
import redis
from requests.packages.urllib3.exceptions import ConnectionError
from core.serialisers import json
from dss import localsettings
# TODO(fergal.moran@gmail.com): refactor these out to
# classes to avoid duplicating constants below
HEADERS = {
'content-type': 'application/json'
}
logger = logging.getLogger('spa')
def post_notification(session_id, image, message):
try:
payload = {
'sessionid': session_id,
'image': image,
'message': message
}
data = json.dumps(payload)
r = requests.post(
localsettings.REALTIME_HOST + 'notification',
data=data,
headers=HEADERS
)
if r.status_code == 200:
return ""
else:
return r.text
except ConnectionError:
#should probably implement some sort of retry in here
pass
|
fergalmoran/dss
|
core/realtime/notification.py
|
Python
|
bsd-2-clause
| 928
|
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <jkotan@mail.desy.de>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file ProxyTools.py
# unittests for field Tags running Tango Server
#
import PyTango
import time
# test fixture
class ProxyHelper(object):
# waiting for running server
# \proxy server proxy
# \proxy counts number of counts
# \proxy sec time interval between two counts
@classmethod
def wait(cls, proxy, counts=-1, sec=0.01):
found = False
cnt = 0
while not found and cnt != counts:
try:
if proxy.state() != PyTango.DevState.RUNNING:
found = True
except Exception as e:
print(e)
found = False
raise
if cnt:
time.sleep(sec)
cnt += 1
return found
|
nexdatas/writer
|
test/ProxyHelper.py
|
Python
|
gpl-3.0
| 1,599
|
from a10sdk.common.A10BaseClass import A10BaseClass
class AddressCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param link_local: {"default": 0, "type": "number", "description": "Configure an IPv6 link local address", "format": "flag"}
:param anycast: {"default": 0, "type": "number", "description": "Configure an IPv6 anycast address", "format": "flag"}
:param ipv6_addr: {"type": "string", "description": "Set the IPv6 address of an interface", "format": "ipv6-address-plen"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "address-cfg"
self.DeviceProxy = ""
self.link_local = ""
self.anycast = ""
self.ipv6_addr = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ipv6(A10BaseClass):
"""Class Description::
Global IPv6 configuration subcommands.
Class ipv6 supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param address_cfg: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"link-local": {"default": 0, "type": "number", "description": "Configure an IPv6 link local address", "format": "flag"}, "anycast": {"default": 0, "type": "number", "description": "Configure an IPv6 anycast address", "format": "flag"}, "ipv6-addr": {"type": "string", "description": "Set the IPv6 address of an interface", "format": "ipv6-address-plen"}, "optional": true}}]}
:param ipv6_enable: {"default": 0, "optional": true, "type": "number", "description": "Enable IPv6 processing", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/tunnel/{ifnum}/ipv6`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ipv6"
self.a10_url="/axapi/v3/interface/tunnel/{ifnum}/ipv6"
self.DeviceProxy = ""
self.address_cfg = []
self.ipv6_enable = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
amwelch/a10sdk-python
|
a10sdk/core/interface/interface_tunnel_ipv6.py
|
Python
|
apache-2.0
| 2,656
|
# Copyright (c) 2008-2013 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
def openFile(f, m='r'):
if (os.path.exists(f)):
return open(f, m)
else:
return open('../' + f, m)
demo_test = ' '.join(openFile('mockito_test/demo_test.py').readlines())
demo_test = demo_test.split('#DELIMINATOR')[1]
readme_before = ''.join(openFile('README.rst').readlines())
token = 'Basic usage:'
readme_after = re.compile(token + '.*', re.S).sub(token + '\n' + demo_test, readme_before)
if (readme_before != readme_after):
readme_file = openFile('README.rst', 'w')
readme_file.write(readme_after)
print "README updated"
else:
print "README update not required"
|
zhilts/pymockito
|
mockito_util/write_readme.py
|
Python
|
mit
| 1,779
|
from openerp.osv import osv, fields
class base_config_settings(osv.TransientModel):
_inherit = 'base.config.settings'
_columns = {
'company_share_product': fields.boolean('Share product to all companies',
help="Share your product to all companies defined in your instance.\n"
" * Checked : Product are visible for every company, even if a company is defined on the partner.\n"
" * Unchecked : Each company can see only its product (product where company is defined). Product not related to a company are visible for all companies."),
}
def get_default_company_share_product(self, cr, uid, fields, context=None):
product_rule = self.pool['ir.model.data'].xmlid_to_object(cr, uid, 'product.product_comp_rule', context=context)
return {
'company_share_product': not bool(product_rule.active)
}
def set_auth_company_share_product(self, cr, uid, ids, context=None):
product_rule = self.pool['ir.model.data'].xmlid_to_object(cr, uid, 'product.product_comp_rule', context=context)
for wizard in self.browse(cr, uid, ids, context=context):
self.pool['ir.rule'].write(cr, uid, [product_rule.id], {'active': not bool(wizard.company_share_product)}, context=context)
|
addition-it-solutions/project-all
|
addons/product/res_config.py
|
Python
|
agpl-3.0
| 1,299
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from google.cloud import storage
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
def load_data(args):
"""Loads the data"""
features = pd.read_csv('./sonar.all-data', header=None)
labels = features[60].values
features = features.drop(columns=60).values
label_encoder = preprocessing.LabelEncoder()
label_encoder.fit(labels)
labels = label_encoder.transform(labels)
train_f, test_f, train_l, test_l = train_test_split(
features, labels, test_size=args.test_split, random_state=args.seed)
return train_f, test_f, train_l, test_l
def save_model(model_dir, model_name):
"""Saves the model to Google Cloud Storage"""
bucket = storage.Client().bucket(model_dir)
blob = bucket.blob('{}/{}'.format(
datetime.datetime.now().strftime('sonar_%Y%m%d_%H%M%S'),
model_name))
blob.upload_from_filename(model_name)
|
GoogleCloudPlatform/cloudml-samples
|
tensorflow/containers/unsupported_runtime/data_utils.py
|
Python
|
apache-2.0
| 1,538
|
import sys
import config_util # pylint: disable=import-error
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=no-init
class MLPerfInference(config_util.Config):
"""Basic Config class for the MLPerf Inference load generator."""
@staticmethod
def fetch_spec(props):
solution = {
'name' : 'src',
'url' : 'https://github.com/mlperf/inference.git',
'managed' : False,
}
spec = {
'solutions': [solution]
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return MLPerfInference().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
mlperf/inference_results_v0.5
|
open/Inspur/code/resnet/schedule/src/loadgen/depot_tools/fetch_configs/mlperf_loadgen.py
|
Python
|
apache-2.0
| 860
|
print "imported modc"
stuff = 942
things = "squirrel"
|
ArcherSys/ArcherSys
|
skulpt/test/run/pkga/pkgb/modc.py
|
Python
|
mit
| 54
|
#!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage MCPServer
# @author Joseph Perry <joseph@artefactual.com>
import archivematicaMCP
import sys
from linkTaskManagerChoice import choicesAvailableForUnits
import logging
import lxml.etree as etree
import gearman
import cPickle
import time
import traceback
from socket import gethostname
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
from custom_handlers import GroupWriteRotatingFileHandler
import databaseInterface
def rpcError(code="", details=""):
ret = etree.Element("Error")
etree.SubElement(ret, "code").text = code.__str__()
etree.SubElement(ret, "details").text = details.__str__()
return ret
def verifyDatabaseIsNotLocked():
timeBeforeReturningErrorLockedDB = 4
timeToSleep = 0.1
numberOfRuns = 0 #count of number of runs in loop
while not databaseInterface.sqlLock.acquire(False):
time.sleep(timeToSleep)
numberOfRuns += 1
if numberOfRuns * timeToSleep > timeBeforeReturningErrorLockedDB:
return rpcError(code="DatabaseLock", details="Couldn't acquire database lock")
databaseInterface.sqlLock.release()
return None
def getJobsAwaitingApproval():
ret = etree.Element("choicesAvailableForUnits")
dbStatus = verifyDatabaseIsNotLocked()
if dbStatus:
#print etree.tostring(dbStatus)
return etree.tostring(dbStatus)
for UUID, choice in choicesAvailableForUnits.items():
ret.append(choice.xmlify())
return etree.tostring(ret, pretty_print=True)
def approveJob(jobUUID, chain, agent):
print "approving: ", jobUUID, chain, agent
if jobUUID in choicesAvailableForUnits:
choicesAvailableForUnits[jobUUID].proceedWithChoice(chain, agent)
return "approving: ", jobUUID, chain
def gearmanApproveJob(gearman_worker, gearman_job):
try:
#execute = gearman_job.task
data = cPickle.loads(gearman_job.data)
jobUUID = data["jobUUID"]
chain = data["chain"]
agent = str(data["uid"])
ret = cPickle.dumps(approveJob(jobUUID, chain, agent))
if not ret:
ret = ""
return ""
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanApproveJob"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def gearmanGetJobsAwaitingApproval(gearman_worker, gearman_job):
try:
#print "DEBUG - getting list of jobs"
#execute = gearman_job.task
ret = cPickle.dumps(getJobsAwaitingApproval())
#print ret
if not ret:
ret = ""
return ret
#catch OS errors
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION! gearmanGetJobsAwaitingApproval"
traceback.print_exc(file=sys.stdout)
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
return ""
def startRPCServer():
logger = logging.getLogger("archivematica")
logger.addHandler(GroupWriteRotatingFileHandler("/var/log/archivematica/MCPServer/MCPServer.log", maxBytes=4194304))
gm_worker = gearman.GearmanWorker([archivematicaMCP.config.get('MCPServer', 'GearmanServerWorker')])
hostID = gethostname() + "_MCPServer"
gm_worker.set_client_id(hostID)
gm_worker.register_task("approveJob", gearmanApproveJob)
gm_worker.register_task("getJobsAwaitingApproval", gearmanGetJobsAwaitingApproval)
failMaxSleep = 30
failSleep = 1
failSleepIncrementor = 2
while True:
try:
gm_worker.work()
except gearman.errors.ServerUnavailable as inst:
#print >>sys.stderr, inst.args
#print >>sys.stderr, "Retrying in %d seconds." % (failSleep)
time.sleep(failSleep)
if failSleep < failMaxSleep:
failSleep += failSleepIncrementor
|
michal-ruzicka/archivematica
|
src/MCPServer/lib/RPCServer.py
|
Python
|
agpl-3.0
| 4,756
|
from setuptools import setup, find_packages
setup(name='BIOMD0000000227',
version=20140916,
description='BIOMD0000000227 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000227',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
biomodels/BIOMD0000000227
|
setup.py
|
Python
|
cc0-1.0
| 377
|
""" Module with graphs creation and configuration functions. """
from operator import itemgetter
from collections import defaultdict
import demandimport
with demandimport.enabled():
import bkcharts as charts
import bokeh.palettes as palettes
import numpy as np
import perun.profile.query as query
import perun.profile.convert as convert
import perun.utils.bokeh_helpers as bokeh_helpers
import perun.postprocess.regression_analysis.data_provider as data_provider
import perun.postprocess.regressogram.methods as rg_methods
__author__ = 'Jiri Pavela'
def slice_resources_by_uid(resources, models, uids):
""" Splits the resource tables and models into slices by the unique uids found in the resources.
:param pandas.DataFrame resources: the data table from resources
:param list of dict models: the list of models from profile
:param map uids: the list of unique uids from profile
:returns generator: resources and models slices of unique uid as pair
(data_slice(pandas.DataFrame), uid_models(list))
"""
for uid in uids:
# Slice only the plotted uid from the data table
uid_slice = resources[resources.uid == uid]
if uid_slice.size == 0 or uid_slice.shape[0] <= 1:
# plotting one point does not work (it has no real usage anyway), fix later
continue
# Filter models for the given uid
uid_models = [model for model in models if model['uid'] == uid]
yield uid_slice, uid_models
def slice_models_by_interval(models):
""" Splits the models list into slices with different x axis intervals.
:param list of dict models: the list of models to split
:returns generator: stream of models slices (list)
"""
# Sort the models by intervals first, to yield them in order
models = sorted(models, key=itemgetter('x_start', 'x_end'))
# Separate the models into groups according to intervals
intervals = defaultdict(list)
for model in models:
intervals[(model['x_start'], model['x_end'])].append(model)
# Yield the list of models with the same interval
for interval_models in intervals.items():
yield interval_models[1]
def generate_plot_data_slices(profile):
""" Generates data slices for plotting resources and models. The resources are split by unique
uids, models are sliced into parts by uid and interval.
:param Profile profile: loaded perun profile
:returns generator: generator: resources and models slices of unique uid as pair
(data_slice(pandas.DataFrame), uid_models(list))
"""
# Get resources for scatter plot points and models for curves
resource_table = convert.resources_to_pandas_dataframe(profile)
models = list(map(itemgetter(1), profile.all_models()))
# Get unique uids from profile, each uid (and optionally interval) will have separate graph
uids = map(convert.flatten, query.unique_resource_values_of(profile, 'uid'))
# Process each uid data
for uid_slice, uid_models in slice_resources_by_uid(resource_table, models, uids):
# Slice the uid models according to different intervals (each interval is plotted
# separately as it improves readability)
if uid_models:
for interval_models in slice_models_by_interval(uid_models):
yield uid_slice, interval_models
else:
# There are no models to plot
yield uid_slice, []
def draw_models(graph, models, profile):
""" Add models renderers to the graph.
:param charts.Graph graph: the scatter plot without models
:param list models: list of models to plot
:param dict profile: dictionary with measured data to pairing model with resources
:returns charts.Graph: the modified graph with model curves renderers
"""
# Get unique colors for the model curves
colour_palette = palettes.viridis(len(models))
for idx, model in enumerate(models):
# Coefficients are part only of parametric models
if 'coeffs' in model:
graph = create_parametric_model(graph, model, colour_palette[idx])
# The non-parametric models do not contain the coefficients
elif model['model'] == 'regressogram':
graph = create_regressogram_model(graph, model, colour_palette[idx])
elif model['model'] in ('moving_average', 'kernel_regression'):
graph = create_non_param_model(graph, model, profile, colour_palette[idx])
return graph
def create_parametric_model(graph, model, colour):
"""
Rendering the parametric models according to its coefficients.
:param charts.Graph graph: the scatter plot to render new models
:param model: the parametric model to be render to the graph
:param colour: the color of the current model to distinguish in the case of several models
in the graph
:return charts.Graph: the modified graph with new model curves
"""
# Convert the coefficients to points that can be plotted
model = convert.plot_data_from_coefficients_of(model)
# Create legend for the plotted model
coeffs = ', '.join('{}={:f}'.format(c['name'], c['value']) for c in model['coeffs'])
legend = '{0}: {1}, r^2={2:f}'.format(model['model'], coeffs, model['r_square'])
# Plot the model
graph.line(
x=model['plot_x'], y=model['plot_y'], line_color='#000000', line_width=7.5, legend=legend
)
graph.line(
x=model['plot_x'], y=model['plot_y'], line_color=colour, line_width=3.5, legend=legend
)
return graph
def create_regressogram_model(graph, model, colour):
"""
Rendering the regressogram model according to its computed properties.
:param charts.Graph graph: the scatter plot to render new models
:param model: the regressogram model which to be rendered to the graph
:param colour: the color of the current model to distinguish in the case of
several models in the graph
:return charts.Graph: the modified graph with new regressogram model
"""
bucket_no = len(model['bucket_stats'])
# Evenly division of the interval by number of buckets
x_pts = np.linspace(
model['x_start'], model['x_end'], num=bucket_no+ 1
)
# Add the beginning of the first edge
y_pts = np.append(model['y_start'], model['bucket_stats'])
# Create legend for the plotted model
legend = '{0}: buckets={1}, stat: {2}, R^2={3:f}'.format(
model['model'][:3], bucket_no, model['statistic_function'], model['r_square']
)
# Plot the render_step_function function for regressogram model
graph_params = {'color': colour, 'line_width': 3.5, 'legend': legend}
return rg_methods.render_step_function(graph, x_pts, y_pts, graph_params)
def create_non_param_model(graph, model, profile, colour):
"""
Rendering the moving average model according to its computed properties.
:param charts.Graph graph: the scatter plot to render new models
:param model: the moving average model which to be rendered to the graph
:param dict profile: the profile to obtains the x-coordinates
:param colour: the color of the current model to distinguish in the case of
several models in the graph
:return charts.Graph: the modified graph with new moving average model
"""
def draw_model(y_pts):
# Obtains the x-coordinates with the required uid to pair with current model
params = {
'of_key': 'amount',
'per_key': model['per_key']
}
for x_pts, _, uid in data_provider.data_provider_mapper(profile, **params):
if uid == model['uid']:
# Plot the model
graph.line(
x=sorted(x_pts), y=y_pts, line_color=colour, line_width=3.5, legend=legend
)
return graph
legend = ""
if model['model'] == 'moving_average':
# Create legend for the plotted moving_average model
legend = '{0}: window={1}, R^2={2:f}'.format(
model['moving_method'], model['window_width'], model['r_square']
)
elif model['model'] == 'kernel_regression':
# Create legend for the plotted kernel models
legend = '{0}: bw={1}, R^2={2:f}'.format(
model['kernel_mode'], model['bandwidth'], model['r_square']
)
# Render kernel models to the current graph
return draw_model(y_pts=model.get('kernel_stats', model.get('bucket_stats')))
def create_from_params(profile, of_key, per_key, x_axis_label, y_axis_label, graph_title,
graph_width=800):
"""Creates Scatter plot graph according to the given parameters.
Takes the input profile, convert it to pandas.DataFrame. Then the data according to 'of_key'
parameter are used as values and are output depending on values of 'per_key'.
Furthermore, models records are also plotted if the profile contains them.
:param dict profile: dictionary with measured data
:param str of_key: key that specifies which fields of the resource entry will be used as data
:param str per_key: key that specifies fields of the resource that will be on the x axis
:param str x_axis_label: label on the x axis
:param str y_axis_label: label on the y axis
:param str graph_title: name of the graph
:param int graph_width: width of the created bokeh graph
:returns uid, charts.Scatter: uid and scatter plot graph with models built according to the
params
"""
for data_slice, models_slice in generate_plot_data_slices(profile):
# Plot the points as a scatter plot
scatter = charts.Scatter(data_slice, x=per_key, y=of_key, title=graph_title,
xlabel=x_axis_label, ylabel=y_axis_label,
tools='pan,wheel_zoom,box_zoom,zoom_in,zoom_out,crosshair,'
'reset,save')
# Configure the graph properties
# Create the graph title as a combination of default parameter, uid, method and
# interval values (only if models are plotted) for easier identification
this_graph_title = graph_title + '; uid: {0}'.format(data_slice.uid.values[0])
if models_slice:
this_graph_title += ('; method: {0}; interval <{1}, {2}>'
.format(models_slice[0]['model'],
models_slice[0]['x_start'],
models_slice[0]['x_end']))
bokeh_helpers.configure_graph(
scatter, profile, 'count', this_graph_title, x_axis_label, y_axis_label, graph_width)
# Plot all models
scatter = draw_models(scatter, models_slice, profile)
yield '{}'.format(data_slice.uid.values[0]), scatter
|
tfiedor/perun
|
perun/view/scatter/factory.py
|
Python
|
gpl-3.0
| 10,803
|
from social_core.backends.nk import NKOAuth2
|
cjltsod/python-social-auth
|
social/backends/nk.py
|
Python
|
bsd-3-clause
| 45
|
# solve cliff-walking task with Q-Learning, very similar to SARSA
# original example problem from the book, introduction for reinforcement learning
# Author: Wenbin Li
# numeric backend
import pygame
from pygame.locals import *
import numpy as np
grid_size = 100
n_row = 4
n_col = 12
state = np.zeros((n_row * grid_size, n_col * grid_size))
step_size = 0.5
epsilon = 0.1 # parameter for epislon-greedy
N_actions = 4 # number of actions {left,up,right,down}
N_episodes = 600 # number of episodes
# as suggested by the book, reach optimality by 8000 time steps
# rewards of -1 until the goal state is reached
# -100 for entering cliff region and instantly return to starting position
# specify goal location
goal_r = 3
goal_c = 11
# specify start location
start_r = 3
start_c = 0
# initialize state-action value function
q = np.zeros((n_row,n_col,N_actions)) # num_row by num_col by num_states
# Note: Q(terminal-state,.) = 0
# undiscounted and episodic task
n_steps = 0
n_episodes = 0
# epsilon-greedy strategy
def ep_greedy(epsilon,num_actions,q,i,j):
roll = np.random.uniform(0,1)
# epsilon-greedy strategy
if roll < epsilon: # exploration
a = np.random.randint(0,num_actions)
else: # exploitation
a = np.argmax(q[i,j,:])
return a
# translate action into state-change
def action2state(i,j,a):
# Note: coordintate system start from the upper-left corner and
# right/downwards are the positive direction
if a == 0: # to left
i_next = i
j_next = j - 1
elif a == 1: # upwards
i_next = i - 1
j_next = j
elif a == 2: # to right
i_next = i
j_next = j + 1
else: # downwards
i_next = i + 1
j_next = j
return i_next,j_next
# Sarsa method
while n_episodes < N_episodes:
# begin of an episode
i = start_r
j = start_c
# end of an episode
n_episodes += 1
print "episode ",str(n_episodes),"..."
while True:
n_steps += 1
# print " step ",str(n_steps),"..."
# choose A from S using policy derived from Q (epsilon-greedy)
a = ep_greedy(epsilon,N_actions,q,i,j)
# translate action into state-change with windy effect
i_next,j_next = action2state(i,j,a)
# update the state-action value function with Sarsa/Q-Learning of choice
# state transitions end in the goal state
# state should be in the range of the gridworld
if i_next == goal_r and j_next == goal_c: # reach the goal position
# q[i,j] = q[i,j] + step_size * (-1 + 0 - q[i,j]) #the Q(terminal,.) = 0
q[i,j,a] = q[i,j,a] + step_size * (-1 + 0 - q[i,j,a]) #the Q(terminal,.) = 0
# Note, transition from noterminal to terminal also gets reward of -1 in this case
break
# different reward/consequence when entering the cliff region
elif i_next == 3 and j_next > 1 and j_next < n_col - 1:
i_next = start_r
j_next = start_c
r = -100
elif i_next < 0 or i_next > n_row -1:
i_next = i
r = -1
elif j_next < 0 or j_next > n_col - 1:
j_next = j
r = -1
else:
r = -1
# a_next = ep_greedy(epsilon,N_actions,q,i_next,j_next)
q[i,j,a] = q[i,j,a] + step_size * (r + max(q[i_next,j_next,:]) - q[i,j,a])
i = i_next
j = j_next
# visualize the solution/GUI-backend
# plot the gridworld as background
# (optional) mark wind direction
pygame.init()
pygame.display.set_mode((n_col * grid_size,n_row * grid_size))
pygame.display.set_caption('Cliff Walking')
screen = pygame.display.get_surface()
surface = pygame.Surface(screen.get_size())
bg = pygame.Surface(screen.get_size())
# draw background, with mark on start/end states & cliff region
def draw_bg(surface,n_row,n_col,grid_size,start_r,start_c,goal_r,goal_c):
for i in range(n_col):
for j in range(n_row):
x = i * grid_size
y = j * grid_size
coords = pygame.Rect(x,y,grid_size,grid_size)
pygame.draw.rect(surface,(255,255,255),coords,1)
# draw start state
pygame.draw.circle(surface,(192,192,192),(start_c * grid_size + grid_size/2,
start_r * grid_size + grid_size/2),grid_size/4)
# draw goal state
pygame.draw.circle(surface,(102,204,0),(goal_c * grid_size + grid_size/2,
goal_r * grid_size + grid_size/2),grid_size/4)
# draw cliff region
x = 1 * grid_size
y = 3 * grid_size
coords = pygame.Rect(x,y,grid_size*10,grid_size)
pygame.draw.rect(surface,(192,192,192),coords)
# use state-action function to find one-step optimal policy
def step_q(q,s_r,s_c,n_row,n_col):
print "state-action value:"
print q[s_r,s_c,:]
a = np.argmax(q[s_r,s_c,:]) # greedy only
# display debug
if a == 0:
print "move left"
elif a == 1:
print "move upward"
elif a == 2:
print "move right"
else:
print "move downwards"
s_r_next,s_c_next = action2state(s_r,s_c,a)
# define rules especially when the agent enter the cliff region
if s_r_next == 3 and s_c_next > 1 and s_c_next < n_col - 1:
s_r_next = start_r
s_c_next = start_c
# in theory, the produced optimal policy should not enter this branch
elif s_r_next < 0 or s_r_next > n_row -1:
s_r_next = s_r
elif s_c_next < 0 or s_c_next > n_col - 1:
s_c_next = s_c
return s_r_next,s_c_next
s_r = start_r
s_c = start_c
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# draw gridworld background
draw_bg(bg,n_row,n_col,grid_size,start_r,start_c,goal_r,goal_c)
screen.blit(bg,(0,0))
# draw the state of the agent, i.e. the path (start --> end) as the foreground
surface.fill((0,0,0))
# use state-action function to find a optimal policy
# in the loop, should provide a step function
#print (s_r,s_c)
s_r_next,s_c_next = step_q(q,s_r,s_c,n_row,n_col)
#print (s_r_next,s_c_next)
if s_r_next != goal_r or s_c_next != goal_c:
pygame.draw.circle(surface,(255,255,255),(s_c_next * grid_size + grid_size/2,
s_r_next * grid_size + grid_size/2),grid_size/4)
bg.blit(surface,(0,0))
pygame.display.flip() # update
pygame.time.delay(1000)
s_r,s_c = s_r_next,s_c_next # update coordinate
|
wenbinli/rl
|
cliffWalk_QL.py
|
Python
|
mit
| 6,866
|
import unittest
from mock import Mock
from cartodb_services.tomtom.isolines import TomTomIsolines, DEFAULT_PROFILE
from cartodb_services.tools import Coordinate
from credentials import tomtom_api_key
VALID_ORIGIN = Coordinate(-73.989, 40.733)
class TomTomIsolinesTestCase(unittest.TestCase):
def setUp(self):
self.tomtom_isolines = TomTomIsolines(apikey=tomtom_api_key(),
logger=Mock())
def test_calculate_isochrone(self):
time_ranges = [300, 900]
solution = self.tomtom_isolines.calculate_isochrone(
origin=VALID_ORIGIN,
profile=DEFAULT_PROFILE,
time_ranges=time_ranges)
assert solution
def test_calculate_isodistance(self):
distance_range = 10000
solution = self.tomtom_isolines.calculate_isodistance(
origin=VALID_ORIGIN,
profile=DEFAULT_PROFILE,
distance_range=distance_range)
assert solution
|
CartoDB/geocoder-api
|
server/lib/python/cartodb_services/test/test_tomtomisoline.py
|
Python
|
bsd-3-clause
| 992
|
# Generated by Django 3.1.7 on 2021-03-17 07:15
from django.db import migrations
from django.db.models import F
def fixup_readonly(apps, schema_editor):
Translation = apps.get_model("trans", "Translation")
db_alias = schema_editor.connection.alias
for translation in Translation.objects.using(db_alias).filter(
component__template="",
language_id=F("component__source_language_id"),
check_flags="",
):
translation.check_flags = "read-only"
translation.save(update_fields=["check_flags"])
translation.unit_set.filter(pending=True).update(pending=False)
class Migration(migrations.Migration):
dependencies = [
("trans", "0127_fix_source_glossary"),
]
operations = [
migrations.RunPython(fixup_readonly, migrations.RunPython.noop, elidable=True)
]
|
nijel/weblate
|
weblate/trans/migrations/0128_fix_pending_read_only.py
|
Python
|
gpl-3.0
| 848
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.immutable
import dns.rdtypes.util
class Relay(dns.rdtypes.util.Gateway):
name = 'AMTRELAY relay'
@property
def relay(self):
return self.gateway
@dns.immutable.immutable
class AMTRELAY(dns.rdata.Rdata):
"""AMTRELAY record"""
# see: RFC 8777
__slots__ = ['precedence', 'discovery_optional', 'relay_type', 'relay']
def __init__(self, rdclass, rdtype, precedence, discovery_optional,
relay_type, relay):
super().__init__(rdclass, rdtype)
relay = Relay(relay_type, relay)
self.precedence = self._as_uint8(precedence)
self.discovery_optional = self._as_bool(discovery_optional)
self.relay_type = relay.type
self.relay = relay.relay
def to_text(self, origin=None, relativize=True, **kw):
relay = Relay(self.relay_type, self.relay).to_text(origin, relativize)
return '%d %d %d %s' % (self.precedence, self.discovery_optional,
self.relay_type, relay)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True,
relativize_to=None):
precedence = tok.get_uint8()
discovery_optional = tok.get_uint8()
if discovery_optional > 1:
raise dns.exception.SyntaxError('expecting 0 or 1')
discovery_optional = bool(discovery_optional)
relay_type = tok.get_uint8()
if relay_type > 0x7f:
raise dns.exception.SyntaxError('expecting an integer <= 127')
relay = Relay.from_text(relay_type, tok, origin, relativize,
relativize_to)
return cls(rdclass, rdtype, precedence, discovery_optional, relay_type,
relay.relay)
def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
relay_type = self.relay_type | (self.discovery_optional << 7)
header = struct.pack("!BB", self.precedence, relay_type)
file.write(header)
Relay(self.relay_type, self.relay).to_wire(file, compress, origin,
canonicalize)
@classmethod
def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
(precedence, relay_type) = parser.get_struct('!BB')
discovery_optional = bool(relay_type >> 7)
relay_type &= 0x7f
relay = Relay.from_wire_parser(relay_type, parser, origin)
return cls(rdclass, rdtype, precedence, discovery_optional, relay_type,
relay.relay)
|
4shadoww/usploit
|
lib/dns/rdtypes/ANY/AMTRELAY.py
|
Python
|
mit
| 3,439
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from uuid import uuid4 as uuid
os.environ['ENVIRONMENT'] = 'test'
from activitystreams import parse as as_parser
from test.base import BaseTest
from dino import environ
from dino import api
from dino.config import RedisKeys
from dino.config import ErrorCodes
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
class ApiBanTest(BaseTest):
def test_ban_user_exists(self):
self.create_and_join_room()
self.set_owner()
self.create_user(BaseTest.OTHER_USER_ID, BaseTest.OTHER_USER_NAME)
json = self.activity_for_ban()
json['object']['id'] = ApiBanTest.OTHER_USER_ID
response_code, _ = api.on_ban(json, as_parser(json))
self.assertEqual(ErrorCodes.OK, response_code)
def create_room(self, room_id: str=None, room_name: str=None):
if room_id is None:
room_id = ApiBanTest.ROOM_ID
if room_name is None:
room_name = ApiBanTest.ROOM_NAME
environ.env.storage.redis.hset(RedisKeys.rooms(BaseTest.CHANNEL_ID), room_id, room_name)
def activity_for_ban(self):
return {
'actor': {
'id': ApiBanTest.USER_ID,
'content': ApiBanTest.USER_NAME
},
'verb': 'ban',
'object': {
'content': ApiBanTest.OTHER_USER_NAME,
'objectType': 'user',
'summary': '30m',
'url': BaseTest.CHANNEL_ID
},
'target': {
'id': ApiBanTest.ROOM_ID,
'displayName': ApiBanTest.ROOM_NAME
}
}
|
thenetcircle/dino
|
test/api/test_api_ban.py
|
Python
|
apache-2.0
| 2,148
|
#!/usr/bin/env python3
# Copyright 2015-2016 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from litmus.core.util import call
from litmus.cmds import sdb_exist
def main(args):
"""docstring for main"""
sdb_exist()
project_path = os.path.abspath(args.project_path)
sys.path.append(project_path)
call(['chmod', '-R', '775', project_path])
import userscript
userscript.main(project_name='adhoc project',
project_path=project_path,
param=args.param,
workingdir=args.workingdir)
|
dhs-shine/litmus
|
litmus/cmds/cmd_adhoc.py
|
Python
|
apache-2.0
| 1,115
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from nhwc.conv import Conv2d_NHWC
from nhwc.max_pool import MaxPool2d_NHWC
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = Conv2d_NHWC(1, 10, kernel_size=5)
self.pool1 = MaxPool2d_NHWC(2)
self.conv2 = Conv2d_NHWC(10, 20, kernel_size=5)
self.pool2 = MaxPool2d_NHWC(2)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(self.pool1(self.conv1(x)))
x = F.relu(self.pool2(self.conv2_drop(self.conv2(x))))
x = x.permute(0, 3, 1, 2).contiguous()
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
data = data.permute(0, 2, 3, 1).contiguous().half()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = data.permute(0, 2, 3, 1).contiguous().half()
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device).half()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
torch.backends.cudnn.benchmark = True
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if __name__ == '__main__':
main()
|
mlperf/training_results_v0.5
|
v0.5.0/nvidia/submission/code/single_stage_detector/pytorch/nhwc/mnist_nhwc.py
|
Python
|
apache-2.0
| 5,689
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# gmail.py
#
import sys
import os
import smtplib
import getpass
import ConfigParser
from optparse import OptionParser
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
'''
Usage: gmail.py [options] arg1
Options:
--version show program's version number and exit
-h, --help show this help message and exit
-f FILE, --file=FILE image FILE to attach
-t EMAIL, --to=EMAIL email destination
-o NAME, --from=NAME name of origin
-b BODY, --body=BODY BODY message
-s SUBJECT, --subject=SUBJECT
SUBJECT message
Config file example "gmail.cfg"
[Default]
fromaddr = Server email Name
toaddrs = destination@example.com
[Gmail]
username = MYGMAILUSER
password = MYGMAILPASS
'''
# Program epilog
epilog = \
"""
gmail is configured using a config file only.
If none is supplied, it will read gmail.cfg
from current directory or ~/.gmail.cfg.
"""
def main():
usage = "usage: %prog [options] arg"
version = "%prog 1.0"
parser = OptionParser(usage=usage, version=version, epilog=epilog)
parser.add_option("-f", "--file", dest="image_file",
help="image FILE to attach", metavar="FILE")
parser.add_option("-c", "--conf", dest="config_file",
help="config FILE", metavar="CONFIG",
default='gmail.cfg')
parser.add_option("-t", "--to", dest="toaddrs",
help="email destination", metavar="EMAIL",
default=None)
parser.add_option("-o", "--from", dest="fromaddr",
help="name of origin", metavar="NAME",
default=None)
parser.add_option("-b", "--body", dest="body",
help="BODY message", metavar="BODY",
default='')
parser.add_option("-s", "--subject", dest="subject",
help="SUBJECT message", metavar="SUBJECT",
default='')
(options, args) = parser.parse_args()
# Run the program
process(options, args)
def process(options, args):
config = get_config(options)
# Write the email
msg = MIMEMultipart()
msg['From'] = config['fromaddr']
msg['To'] = config['toaddrs']
msg['Subject'] = options.subject
body = options.body
msg.attach(MIMEText(body, 'plain'))
# Attach image
if options.image_file:
try:
filename = open(options.image_file, "rb")
attach_image = MIMEImage(filename.read())
attach_image.add_header('Content-Disposition',
'attachment; filename = %s'%options.image_file)
msg.attach(attach_image)
filename.close()
except:
msg.attach(MIMEText('Image attachment error', 'plain'))
# Converting email to text
text = msg.as_string()
# The actual mail send
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.ehlo()
server.login(config['username'],config['password'])
server.sendmail(config['fromaddr'], config['toaddrs'], text)
server.quit()
def get_config(options):
conf = {}
# Read Config File
config = ConfigParser.RawConfigParser()
config.read([options.config_file, os.path.expanduser('~/.gmail.cfg')])
# Gmail Credentials
try:
conf['username'] = config.get('Gmail', 'username')
conf['password'] = config.get('Gmail', 'password')
except:
conf['username'] = raw_input('Input Gmail username: ')
conf['password'] = getpass.getpass('Input Gmail password: ')
# Email Default
if options.fromaddr == None:
try:
conf['fromaddr'] = config.get('Default', 'fromaddr')
except:
conf['fromaddr'] = 'Python Gmail'
else:
conf['fromaddr'] = options.fromaddr
if options.toaddrs == None:
try:
conf['toaddrs'] = config.get('Default', 'toaddrs')
except:
conf['toaddrs'] = raw_input('Input email destination: ')
else:
conf['toaddrs'] = options.toaddrs
return conf
if __name__ == '__main__':
main()
|
ActiveState/code
|
recipes/Python/577690_Python_Gmail_script_smtp/recipe-577690.py
|
Python
|
mit
| 4,221
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
"""
Default Settings
"""
import os
DEFAULT_BUCKET_TYPE = os.environ.get('DEFAULT_BUCKET_TYPE', 'pyoko_models')
# write_once bucket doesn't support secondary indexes. Thus, backend is defined
# as "leveldb_mult" in log_version bucket properties.
VERSION_LOG_BUCKET_TYPE = os.environ.get('VERSION_LOG_BUCKET_TYPE', 'log_version')
RIAK_SERVER = os.environ.get('RIAK_SERVER', 'localhost')
RIAK_PROTOCOL = os.environ.get('RIAK_PROTOCOL', 'http')
RIAK_PORT = os.environ.get('RIAK_PORT', 8098)
RIAK_HTTP_PORT = os.environ.get('RIAK_HTTP_PORT', 8098)
#: Redis address and port.
REDIS_SERVER = os.environ.get('REDIS_SERVER', '127.0.0.1:6379')
#: Redis password (password).
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None)
#: Set True to enable versioning on write-once buckets
ENABLE_VERSIONS = os.environ.get('ENABLE_VERSIONS', 'False') == 'True'
#: Suffix for version buckets
VERSION_SUFFIX = os.environ.get('VERSION_SUFFIX', '_version')
#: Set True to enable auto-logging of all DB operations to a
#: write-once log bucket
ENABLE_ACTIVITY_LOGGING = os.environ.get('ENABLE_ACTIVITY_LOGGING', 'False') == 'True'
#: Set the name of logging bucket type and bucket name.
ACTIVITY_LOGGING_BUCKET = os.environ.get('ACTIVITY_LOGGING_BUCKET', 'log')
VERSION_BUCKET = os.environ.get('VERSION_BUCKET', 'version')
#: Set True to enable caching all models to Redis
ENABLE_CACHING = os.environ.get('ENABLE_CACHING', 'False') == 'True'
#: Set True to enable caching all models to Redis
CACHE_EXPIRE_DURATION = os.environ.get('CACHE_EXPIRE_DURATION', 36000)
|
zetaops/pyoko
|
pyoko/settings.py
|
Python
|
gpl-3.0
| 1,719
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("cases", "0044_caseaction_user_assignee")]
operations = [
migrations.AlterField(
model_name="case",
name="initial_message",
field=models.OneToOneField(
related_name="initial_case", null=True, to="msgs.Message", on_delete=models.PROTECT
),
)
]
|
praekelt/casepro
|
casepro/cases/migrations/0045_auto_20161014_1341.py
|
Python
|
bsd-3-clause
| 501
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RProtgenerics(RPackage):
"""S4 generic functions needed by Bioconductor proteomics packages."""
homepage = "https://bioconductor.org/packages/ProtGenerics/"
url = "https://git.bioconductor.org/packages/ProtGenerics"
list_url = homepage
version('1.8.0', git='https://git.bioconductor.org/packages/ProtGenerics', commit='b2b3bb0938e20f58fca905f6870de7dbc9dfd7a3')
depends_on('r@3.4.0:3.4.9', when='@1.8.0')
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-protgenerics/package.py
|
Python
|
lgpl-2.1
| 1,699
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''messaging based notification driver, with message envelopes'''
from oslo.config import cfg
from muranorepository.openstack.common import context as req_context
from muranorepository.openstack.common.gettextutils import _ # noqa
from muranorepository.openstack.common import log as logging
from muranorepository.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'topics', default=['notifications', ],
help='AMQP topic(s) used for OpenStack notifications')
opt_group = cfg.OptGroup(name='rpc_notifier2',
title='Options for rpc_notifier2')
CONF = cfg.CONF
CONF.register_group(opt_group)
CONF.register_opt(notification_topic_opt, opt_group)
def notify(context, message):
"""Sends a notification via RPC."""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.rpc_notifier2.topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message, envelope=True)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
{"topic": topic, "message": message})
|
Bloomie/murano-repository
|
muranorepository/openstack/common/notifier/rpc_notifier2.py
|
Python
|
apache-2.0
| 2,019
|
#!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Copyright (C)2017 William H. Majoros (martiandna@gmail.com).
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
import sys
import ProgramName
from GffTranscriptReader import GffTranscriptReader
from Rex import Rex
rex=Rex()
def processGFF(filename):
reader=GffTranscriptReader()
hashTable=reader.hashBySubstrate(filename)
chroms=hashTable.keys()
for chrom in chroms:
transcripts=hashTable[chrom]
processGene(transcripts)
def getAnnotatedExons(refTranscripts):
allExons=set()
for transcript in refTranscripts.values():
exons=transcript.getRawExons()
for exon in exons:
key=str(exon.begin)+" "+str(exon.end)
allExons.add(key)
return allExons
def getAnnotatedIntrons(refTranscripts):
allIntrons=set()
for transcript in refTranscripts.values():
introns=transcript.getIntrons()
for intron in introns:
key=str(intron.begin)+" "+str(intron.end)
allIntrons.add(key)
return allIntrons
def getStructureChanges(transcript,attributes):
changeString=attributes.get("structure_change","")
fields=changeString.split(" ")
changes=set()
for field in fields: changes.add(field)
return changes
def processGene(transcripts):
refTranscripts={}; altTranscripts=[]
for transcript in transcripts:
pairs=transcript.parseExtraFields()
attributes=transcript.hashExtraFields(pairs)
changes=getStructureChanges(transcript,attributes)
mapped="mapped-transcript" in changes
id=transcript.getID()
if(not mapped and rex.find("ALT\d+_(\S+)",id)):
transcript.refID=rex[1]
altTranscripts.append(transcript)
else: refTranscripts[id]=transcript
annotatedIntrons=getAnnotatedIntrons(refTranscripts)
for transcript in altTranscripts:
pairs=transcript.parseExtraFields()
attributes=transcript.hashExtraFields(pairs)
changes=getStructureChanges(transcript,attributes)
if("mapped-transcript" in changes): continue
changes=setToString(changes)
found1=getUniqueJunctions(transcript,annotatedIntrons,changes,
attributes)
#found2=getIntronRetentions(transcript,refTranscripts,changes,
# attributes)
def setToString(s):
r=""
for elem in s:
if(len(r)>0): r+=","
r+=elem
return r
def getUniqueJunctions(transcript,annotatedIntrons,changes,attributes):
introns=transcript.getIntrons()
fate=attributes.get("fate","none")
broken=attributes.get("broken-site")
if(broken is None or broken==""): broken="false"
found=False
for intron in introns:
key=str(intron.begin)+" "+str(intron.end)
if(key not in annotatedIntrons):
print(transcript.getGeneId(),transcript.getId(),"junction",
str(intron.begin)+"-"+str(intron.end),
transcript.getScore(),transcript.getStrand(),
changes,fate,broken,sep="\t")
found=True
return found
def exonIsAnnotated(exon,annotatedExons):
key=str(exon.begin)+" "+str(exon.end)
return key in annotatedExons
def getIntronRetentions(transcript,refTranscripts,changes,attributes):
ref=refTranscripts[transcript.refID]
refIntrons=ref.getIntrons()
annotatedExons=getAnnotatedExons(refTranscripts)
exons=transcript.getRawExons()
fate=attributes.get("fate","none")
broken=attributes.get("broken-site")
if(broken is None or broken==""): broken="false"
found=False
for exon in exons:
for refIntron in refIntrons:
if(exon.asInterval().containsInterval(refIntron)):
if(exonIsAnnotated(exon,annotatedExons)): continue
print(transcript.getGeneId(),transcript.getId(),
"intron-retention",
str(refIntron.begin)+"-"+str(refIntron.end),
transcript.getScore(),transcript.getStrand(),
changes,fate,broken,sep="\t")
found=True
return found
#=========================================================================
# main()
#=========================================================================
if(len(sys.argv)!=2):
exit(ProgramName.get()+" <in.gff>")
(gffFile,)=sys.argv[1:]
processGFF(gffFile)
|
ReddyLab/1000Genomes
|
get-novel-features.py
|
Python
|
gpl-2.0
| 5,078
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
# All code provided from the http://gengo.com site, such as API example code
# and libraries, is provided under the New BSD license unless otherwise
# noted. Details are below.
#
# New BSD License
# Copyright (c) 2009-2012, myGengo, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# Neither the name of myGengo, Inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from gengo import Gengo
# Get an instance of Gengo to work with...
gengo = Gengo(
public_key='your_public_key',
private_key='your_private_key',
sandbox=True,
)
# Update a job that has an id of 42, and reject it, cite the reason,
# add a comment, and throw up some captcha stuff. See the docs for
# more information pertaining to this method, it can do quite a bit. :)
gengo.updateTranslationJob(id=42, action={
'action': 'reject',
'reason': 'quality',
'comment': 'My grandmother does better.',
'captcha': 'bert'
})
|
shawnps/mygengo-python
|
examples/updateTranslationJob.py
|
Python
|
bsd-3-clause
| 2,306
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-27 03:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0007_auto_20170518_1459'),
]
operations = [
migrations.AlterModelOptions(
name='store',
options={'verbose_name': 'Store', 'verbose_name_plural': 'Store'},
),
]
|
okwow123/djangol2
|
example/store/migrations/0008_auto_20170527_0312.py
|
Python
|
mit
| 440
|
#!/usr/bin/env python3
""" Name that Shape module for use with Lab 5, Inf1340, Fall 2015 """
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
def name_that_shape(sides):
"""
For a given number of sides in a regular polygon, returns the shape name
Inputs | Expected Outputs
-------------------------
< 3 | Error
3 | triangle
4 | quadrilateral
5 | pentagon
6 | hexagon
7 | heptagon
8 | octagon
9 | nonagon
10 | decagon
> 10 | Error
Errors: TypeError when input is a string or float
ValueError when input is < 3 or > 10
"""
if sides.isdigit() or sides[0] is "-" and sides[1:].isdigit():
sides = int(sides)
else:
raise TypeError
if sides == 3:
print("triangle")
elif sides == 4:
print("quadrilateral")
elif sides == 5:
print("pentagon")
elif sides == 6:
print("hexagon")
elif sides == 7:
print("heptagon")
elif sides == 8:
print("octagon")
elif sides == 9:
print("nonagon")
elif sides == 10:
print("decagon")
else:
raise ValueError
|
benevolentprof/inf1340-2015-labs
|
name_that_shape.py
|
Python
|
mit
| 1,259
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import normalization
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import util
class HasList(training.Model):
def __init__(self):
super(HasList, self).__init__()
self.layer_list = data_structures.wrap_or_unwrap([core.Dense(3)])
self.layer_list.append(core.Dense(4))
self.layer_list.extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_list += [
core.Dense(7, bias_regularizer=math_ops.reduce_sum),
core.Dense(8)
]
self.layer_list += (
data_structures.wrap_or_unwrap([core.Dense(9)]) +
data_structures.wrap_or_unwrap([core.Dense(10)]))
self.layer_list.extend(
data_structures.wrap_or_unwrap(
list([core.Dense(11)]) + [core.Dense(12)]))
self.layers_with_updates = data_structures.wrap_or_unwrap(
[normalization.BatchNormalization()])
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += math_ops.reduce_sum(x)
bn, = self.layers_with_updates
return bn(x) / aggregation
class ListTests(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testTracking(self):
with self.test_session():
model = HasList()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 12], output.shape)
self.assertEqual(11, len(model.layers))
self.assertEqual(10, len(model.layer_list.layers))
six.assertCountEqual(
self,
model.layers,
model.layer_list.layers + model.layers_with_updates)
for index in range(10):
self.assertEqual(3 + index, model.layer_list.layers[index].units)
self.assertEqual(2, len(model._checkpoint_dependencies))
self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
self.assertIs(model.layers_with_updates,
model._checkpoint_dependencies[1].ref)
self.assertEqual(
10,
len(model._checkpoint_dependencies[0].ref._checkpoint_dependencies))
self.evaluate([v.initializer for v in model.variables])
self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],
self.evaluate(model.variables[0]))
v = variables.Variable(1.)
model.var_list = [v]
self.assertTrue(any(v is t for t in model.variables))
self.assertTrue(any(v is t for t in model.trainable_variables))
self.assertFalse(any(v is t for t in model.non_trainable_variables))
self.assertTrue(any(model.layer_list[0].trainable_weights[0]
is t for t in model.trainable_weights))
def testSubModelTracking(self):
model = training.Model()
model.v = variables.Variable(1.)
self.assertIn(model.v, model.trainable_weights)
model2 = training.Model()
model2.m = [model]
self.assertIn(model.v, model2.trainable_weights)
def testSubSequentialTracking(self):
class _Subclassed(training.Model):
def __init__(self, wrapped):
super(_Subclassed, self).__init__()
self._wrapped = wrapped
def call(self, x):
return self._wrapped(x)
model = sequential.Sequential()
layer = core.Dense(1)
model.add(layer)
model2 = _Subclassed(model)
model2(array_ops.ones([1, 2]))
model2.m = [model]
self.assertIn(layer.kernel, model2.trainable_weights)
def testLayerTrackedThroughSequential(self):
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def ffnet(layer_sizes, name):
ff = sequential.Sequential(name=name)
for i, width in enumerate(layer_sizes):
ff.add(core.Dense(
width,
activation=("relu" if i < len(layer_sizes)-1 else None)))
return ff
class MyModel2(training.Model):
def __init__(self, config, name="my_model_2"):
super(MyModel2, self).__init__(name=name)
self._num_tokens = config.num_tokens
# list of sub-models
self._ffnet = [ffnet(config.module_layers + (self._num_tokens,), "ff")]
def null_input(self):
return array_ops.zeros([1, self._num_tokens], dtype=dtypes.float32)
def call(self, input_, module_index=None):
return self._ffnet[0](input_)
m2 = MyModel2(AttrDict(
num_tokens=5,
module_layers=(50, 30)))
# Construct
m2(m2.null_input())
self.assertLen(m2.trainable_variables, 6)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testUpdatesForwarded(self):
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
if context.executing_eagerly():
self.assertEqual(0, len(model.updates))
else:
self.assertGreater(len(model.layers_with_updates[0].updates), 0)
self.assertEqual(set(model.layers_with_updates[0].updates),
set(model.updates))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testLossesForwarded(self):
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEqual(2, len(model.losses))
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super(HasEqualContainers, self).__init__()
self.l1 = []
self.l2 = []
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1.append(first_layer)
second_layer = HasEqualContainers()
model.l2.append(second_layer)
self.assertEqual([first_layer, second_layer], model.layers)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testTensorConversion(self):
class ListToTensor(training.Model):
def __init__(self):
super(ListToTensor, self).__init__()
self.l = [1., 2., 3.]
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(constant_op.constant(ListToTensor().l)))
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(gen_array_ops.Pack(values=ListToTensor().l)))
class ListWrapperTest(test.TestCase):
def testLayerCollectionWithExternalMutation(self):
l = []
l_wrapper = data_structures.wrap_or_unwrap(l)
layer = core.Dense(1)
l.append(layer)
self.assertEqual([layer], l_wrapper.layers)
class HasMapping(training.Model):
def __init__(self):
super(HasMapping, self).__init__()
self.layer_dict = data_structures.wrap_or_unwrap(dict(output=core.Dense(7)))
self.layer_dict["norm"] = data_structures.wrap_or_unwrap([])
self.layer_dict["dense"] = data_structures.wrap_or_unwrap([])
self.layer_dict["dense"].extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_dict["norm"].append(
normalization.BatchNormalization())
self.layer_dict["norm"].append(
normalization.BatchNormalization())
def call(self, x):
aggregation = 0.
for norm, dense in zip(self.layer_dict["norm"], self.layer_dict["dense"]):
x = norm(dense(x))
aggregation += math_ops.reduce_sum(x)
return self.layer_dict["output"](x) / aggregation
class MappingTests(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testTracking(self):
with self.test_session():
model = HasMapping()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 7], output.shape.as_list())
self.assertEqual(5, len(model.layers))
six.assertCountEqual(self, model.layers, model.layer_dict.layers)
self.assertEqual(1, len(model._checkpoint_dependencies))
self.assertIs(model.layer_dict, model._checkpoint_dependencies[0].ref)
self.evaluate([v.initializer for v in model.variables])
test_var = model.layer_dict["output"].kernel
self.evaluate(test_var.assign(array_ops.ones([6, 7])))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(test_var.assign(array_ops.zeros([6, 7])))
model.load_weights(save_path)
self.assertAllEqual(numpy.ones([6, 7]),
self.evaluate(test_var))
def testLayerCollectionWithExternalMutation(self):
d = {}
root = module.Module()
root.wrapper = d
self.assertEqual([], root.wrapper.layers)
self.assertEqual([], root.wrapper.trainable_weights)
layer1 = core.Dense(1)
layer2 = core.Dense(1)
d["a"] = layer1
d["b"] = layer2
self.assertEqual([layer1, layer2], root.wrapper.layers)
# The layers have still not created variables
self.assertEqual([], root.wrapper.trainable_weights)
def testDictWrapperBadKeys(self):
a = module.Module()
a.d = {}
a.d[1] = data_structures.wrap_or_unwrap([])
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegex(ValueError, "non-string key"):
model.save_weights(save_path)
def testDictWrapperNoDependency(self):
a = module.Module()
a.d = data_structures.NoDependency({})
a.d[1] = [3]
self.assertEqual([a], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonStringKeyNotTrackableValue(self):
a = module.Module()
a.d = {}
a.d["a"] = [3]
a.d[1] = data_structures.NoDependency([3])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonAppendNotTrackable(self):
# Non-append mutations (deleting or overwriting values) are OK when the
# values aren't tracked.
a = module.Module()
a.d = {}
a.d["a"] = [3]
a.d[1] = 3
a.d[1] = 2
self.assertEqual(2, a.d[1])
del a.d[1]
a.d[2] = data_structures.NoDependency(module.Module())
second = module.Module()
a.d[2] = data_structures.NoDependency(second)
self.assertIs(second, a.d[2])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testPopNoSave(self):
model = training.Model()
model.d = {}
model.d["a"] = []
model.d.pop("a")
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegex(ValueError, "Unable to save"):
model.save_weights(save_path)
def testExternalModificationNoSave(self):
model = training.Model()
external_reference = {}
model.d = external_reference
external_reference["a"] = []
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegex(ValueError, "modified outside the wrapper"):
model.save_weights(save_path)
def testOverwriteCanStillSave(self):
model = training.Model()
model.d = {}
model.d["a"] = {}
model.d["a"] = {}
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
def testIter(self):
model = training.Model()
model.d = {1: 3}
model.d[1] = 3
self.assertEqual([1], list(model.d))
new_dict = {}
# This update() is super tricky. If the dict wrapper subclasses dict,
# CPython will access its storage directly instead of calling any
# methods/properties on the object. So the options are either not to
# subclass dict (in which case update will call normal iter methods, but the
# object won't pass isinstance checks) or to subclass dict and keep that
# storage updated (no shadowing all its methods like ListWrapper).
new_dict.update(model.d)
self.assertEqual({1: 3}, new_dict)
class HasTuple(training.Model):
def __init__(self):
super(HasTuple, self).__init__()
self.layer_list = (
core.Dense(3), core.Dense(4),
core.Dense(5, kernel_regularizer=math_ops.reduce_sum))
self.layers_with_updates = (normalization.BatchNormalization(),)
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += math_ops.reduce_sum(x)
bn, = self.layers_with_updates
return bn(x) / aggregation
class TupleTests(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testTracking(self):
with self.test_session():
model = HasTuple()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 5], output.shape.as_list())
self.assertLen(model.layers, 4)
self.assertLen(model.layer_list.layers, 3)
six.assertCountEqual(
self,
model.layers,
tuple(model.layer_list.layers) + model.layers_with_updates)
self.assertEqual(3, model.layer_list.layers[0].units)
self.assertEqual(4, model.layer_list.layers[1].units)
self.assertEqual(5, model.layer_list.layers[2].units)
self.assertLen(model._checkpoint_dependencies, 2)
self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
self.assertIs(model.layers_with_updates,
model._checkpoint_dependencies[1].ref)
self.assertLen(
model._checkpoint_dependencies[0].ref._checkpoint_dependencies, 3)
self.evaluate([v.initializer for v in model.variables])
self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],
self.evaluate(model.variables[0]))
v = variables.Variable(1.)
model.var_list = (v,)
self.assertIn(id(v), [id(obj) for obj in model.variables])
self.assertIn(id(v), [id(obj) for obj in model.trainable_variables])
self.assertNotIn(id(v),
[id(obj) for obj in model.non_trainable_variables])
self.assertIn(id(model.layer_list[0].trainable_weights[0]),
[id(obj) for obj in model.trainable_weights])
@parameterized.named_parameters(
("Module", module.Module),
("Model", training.Model),
)
def testSubModelTracking(self, module_subclass):
model = module_subclass()
model.v = variables.Variable(1.)
self.assertIn(model.v, model.trainable_variables)
model2 = module_subclass()
model2.m = (model,)
self.assertIn(model.v, model2.trainable_variables)
def testSubSequentialTracking(self):
class _Subclassed(training.Model):
def __init__(self, wrapped):
super(_Subclassed, self).__init__()
self._wrapped = wrapped
def call(self, x):
return self._wrapped(x)
model = sequential.Sequential()
layer = core.Dense(1)
model.add(layer)
model2 = _Subclassed(model)
model2(array_ops.ones([1, 2]))
model2.m = (model,)
self.assertIn(layer.kernel, model2.trainable_weights)
def testUpdatesForwarded(self):
with ops.Graph().as_default():
model = HasTuple()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertNotEmpty(model.layers_with_updates[0].updates)
self.assertEqual(set(model.layers_with_updates[0].updates),
set(model.updates))
model = HasTuple()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEmpty(model.updates)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testLossesForwarded(self):
model = HasTuple()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertLen(model.losses, 1)
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super(HasEqualContainers, self).__init__()
self.l1 = ()
self.l2 = ()
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1 = (first_layer,)
second_layer = HasEqualContainers()
model.l2 = (second_layer,)
self.assertEqual((first_layer,), model.l1)
d = {model.l1: 1, model.l2: 2}
self.assertEqual(1, d[model.l1])
self.assertEqual(1, d[(first_layer,)])
self.assertEqual(2, d[model.l2])
self.assertEqual(2, d[(second_layer,)])
self.assertEqual([first_layer, second_layer], model.layers)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testTensorConversion(self):
class TupleToTensor(training.Model):
def __init__(self):
super(TupleToTensor, self).__init__()
self.l = (1., 2., 3.)
self.assertAllEqual(
(1., 2., 3.),
self.evaluate(constant_op.constant(TupleToTensor().l)))
self.assertAllEqual(
(1., 2., 3.),
self.evaluate(gen_array_ops.Pack(values=TupleToTensor().l)))
class InterfaceTests(keras_parameterized.TestCase):
def testNoDependency(self):
root = module.Module()
hasdep = module.Module()
root.hasdep = hasdep
nodep = module.Module()
root.nodep = data_structures.NoDependency(nodep)
self.assertEqual(1, len(root._checkpoint_dependencies))
self.assertIs(root._checkpoint_dependencies[0].ref, root.hasdep)
self.assertIs(root.hasdep, hasdep)
self.assertIs(root.nodep, nodep)
class NoDependencyModel(training.Model):
@base.no_automatic_dependency_tracking
def __init__(self):
super(NoDependencyModel, self).__init__()
self.a = []
self.b = module.Module()
nodeps = NoDependencyModel()
self.assertEqual([nodeps], util.list_objects(nodeps))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDictionariesBasic(self):
a = training.Model()
b = training.Model()
a.attribute = {"b": b}
c = training.Model()
a.attribute["c"] = []
a.attribute["c"].append(c)
a_deps = util.list_objects(a)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
self.assertIs(b, a.attribute["b"])
six.assertCountEqual(
self,
["b", "c"],
[dep.name for dep in a.attribute._checkpoint_dependencies])
self.assertEqual([b, c], a.layers)
self.assertEqual([b, c], a.attribute.layers)
self.assertEqual([c], a.attribute["c"].layers)
checkpoint = util.Checkpoint(a=a)
save_path = checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
with self.cached_session():
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testNoDepList(self):
a = training.Model()
a.l1 = data_structures.NoDependency([])
a.l1.insert(1, 0)
self.assertIsInstance(a.l1, list)
checkpoint = util.Checkpoint(a=a)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
a.l2 = []
a.l2.insert(1, module.Module())
with self.assertRaisesRegex(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
annarev/tensorflow
|
tensorflow/python/keras/tests/tracking_test.py
|
Python
|
apache-2.0
| 21,433
|
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import Balancer
class Plugin_failover (Balancer.PluginBalancer):
def __init__ (self, key, **kwargs):
Balancer.PluginBalancer.__init__ (self, key, **kwargs)
Balancer.PluginBalancer.AddCommon (self)
|
nuxleus/cherokee-webserver
|
admin/plugins/failover.py
|
Python
|
gpl-2.0
| 1,026
|
import pandas as pd
import numpy as np
import re
import json
import os
import warnings
import shutil
from pathlib import Path
import codecs
"""
Helper functions to load the article data. The main method to use
is load_data().
"""
# Caution! Modifying this in code will have no effect since the
# default arguments are populated with this reference at creation
# time, so post-hoc modifications will do nothing.
__data_folder = os.path.join(os.path.split(__file__)[0], '..', 'data')
def clean_string(s):
"""
Clean all the HTML/Unicode nastiness out of a string.
Replaces newlines with spaces.
"""
return s.replace('\r', '').replace('\n', ' ').replace('\xa0', ' ').strip()
def load_articles(data_folder=__data_folder, nrows=None):
"""
Loads the articles CSV. Can optionally only load the first
`nrows` number of rows.
"""
column_names = ['id',
'feedname',
'url',
'orig_html',
'title',
'bodytext',
'relevant',
'created',
'last_modified',
'news_source_id',
'author']
return pd.read_csv(os.path.join(data_folder,
'newsarticles_article.csv'),
header=None,
names=column_names,
nrows=nrows,
dtype={'orig_html': str, 'author': str})
def load_taggings(data_folder=__data_folder):
"""Loads the type-of-crime human tagging of the articles."""
uc_column_names = ['id', 'date', 'relevant', 'article_id',
'user_id', 'locations', 'sentiment']
uc = pd.read_csv(os.path.join(data_folder,
'newsarticles_usercoding.csv'),
header=None,
names=uc_column_names)
uc.set_index('id', drop=True, inplace=True)
uc_tags_column_names = ['id', 'usercoding_id', 'category_id']
uc_tags = pd.read_csv(
os.path.join(data_folder, 'newsarticles_usercoding_categories.csv'),
header=None,
names=uc_tags_column_names
)
uc_tags.set_index('usercoding_id', drop=True, inplace=True)
uc_tags['article_id'] = uc.loc[uc_tags.index, 'article_id']
return uc_tags
def load_model_categories(data_folder=__data_folder):
tcr_names = ['id', 'relevance', 'category_id', 'coding_id']
tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id',
'sentiment']
tcr = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedcategoryrelevance.csv'),
names=tcr_names
)
tc = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedcoding.csv'),
names=tc_names
).set_index('id', drop=True)
tcr['article_id'] = tc.loc[tcr['coding_id']]['article_id'].values
return tcr
def load_model_locations(data_folder=__data_folder):
tl_names = ['id', 'text', 'latitude', 'longitude', 'coding_id',
'confidence', 'neighborhood']
tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id',
'sentiment']
tl = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedlocation.csv'),
names=tl_names
)
tc = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedcoding.csv'),
names=tc_names
).set_index('id', drop=True)
tl['article_id'] = tc.loc[tl['coding_id']]['article_id'].values
return tl
def load_locations(data_folder=__data_folder):
"""Load the human-extracted locations from the articles."""
uc_column_names = ['id', 'date', 'relevant', 'article_id',
'user_id', 'locations', 'sentiment']
uc = pd.read_csv(os.path.join(data_folder,
'newsarticles_usercoding.csv'),
header=None,
names=uc_column_names)
uc['locations'] = uc['locations'].apply(lambda x: json.loads(x))
return uc
def load_categories(data_folder=__data_folder):
"""Loads the mapping of id to names/abbrevations of categories"""
column_names = ['id', 'category_name', 'abbreviation', 'created',
'active', 'kind']
return pd.read_csv(os.path.join(data_folder, 'newsarticles_category.csv'),
header=None,
names=column_names)
def load_data(data_folder=__data_folder, nrows=None):
"""
Creates a dataframe of the article information and k-hot encodes the tags
into columns called cat_NUMBER. The k-hot encoding is done assuming that
the categories are 1-indexed and there are as many categories as the
maximum value of the numerical cateogry_id column.
Inputs:
data_folder:
A folder containing the data files in CSV format.
nrows:
Number of articles to load. Defaults to all, which uses about 4
GB of memory.
"""
df = load_articles(data_folder=data_folder, nrows=nrows)
df['relevant'] = df['relevant'] == 't'
df.rename(columns={'id': 'article_id'}, inplace=True)
df.set_index('article_id', drop=True, inplace=True)
# hopefully this will save some memory/space, can add back if needed
del(df['orig_html'])
tags_df = load_taggings(data_folder)
# will help cacheing
tags_df.sort_values(by='article_id', inplace=True)
tags_df = tags_df.loc[tags_df['article_id'].isin(
df.index.intersection(tags_df['article_id']))]
locs_df = load_locations(data_folder)
locs_df.sort_values(by='article_id', inplace=True)
locs_df = locs_df.loc[locs_df['article_id'].isin(
df.index.intersection(locs_df['article_id']))]
model_tags_df = load_model_categories(data_folder)
# will help cacheing
model_tags_df.sort_values(by='article_id', inplace=True)
model_tags_df = model_tags_df.loc[model_tags_df['article_id'].isin(
df.index.intersection(model_tags_df['article_id']))]
# init with empty lists
df['locations'] = np.empty([df.shape[0], 0]).tolist()
loc_article_ids = locs_df['article_id'].values
df.loc[loc_article_ids, 'locations'] = locs_df['locations'].values
def find_loc_in_string(locs, string):
"""
The locations are generated from JavaScript, which means there's
going to be some problems getting things to line up exactly and
neatly. This function will hopefully performa all necessary
transformations to find the given location text within the
larger string.
Inputs:
locs: list of locations as loaded by load_locations
string: bodytext of article in which to find locs
Returns:
updated_locs: list of locations as loaded by
load_locations, but with a couple
extra fields ('cleaned text' and 'cleaned span').
"""
for i, loc in enumerate(locs):
loc_txt = loc['text']
loc_txt = clean_string(loc_txt)
string = clean_string(string)
loc['cleaned text'] = loc_txt
spans = [x.span() for x in re.finditer(re.escape(loc_txt), string)]
if spans:
# The string may have occurred multiple times, and since the
# spans don't line up perfectly we can't know which one is the
# "correct" one. Best we can do is find the python span closest
# to the expected javascript span.
closest = np.argmin(np.abs(
np.array([x[0] for x in spans]) - loc['start']
))
loc['cleaned span'] = spans[closest]
locs[i] = loc
return locs
df['locations'] = df.apply(
lambda r: find_loc_in_string(r['locations'], r['bodytext']),
axis=1
)
num_no_match = df['locations'].apply(
lambda locs: any([('cleaned span' not in loc) for loc in locs])
).sum()
if num_no_match:
warnings.warn(('{} location strings were not found in'
' the bodytext.').format(num_no_match),
RuntimeWarning)
model_locations_df = load_model_locations(data_folder)
model_locations_df = model_locations_df.set_index('article_id')
model_locations_gb = model_locations_df.groupby('article_id')
model_locations_text = model_locations_gb['text'].apply(list)
df['model_location_text'] = model_locations_text
categories_df = load_categories(data_folder)
categories_df.set_index('id', drop=True, inplace=True)
# tags_df['category_id'] = tags_df['category_id'].astype(str)
tags_df['category_abbreviation'] = (categories_df
['abbreviation']
[tags_df['category_id']]
.values)
model_tags_df['category_abbreviation'] = (categories_df
['abbreviation']
[model_tags_df['category_id']]
.values)
if np.setdiff1d(tags_df['article_id'].values, df.index.values).size:
warnings.warn('Tags were found for article IDs that do not exist.',
RuntimeWarning)
def update_df_with_categories(article_ids, cat_abbreviations, vals,
is_model):
# for some reason, some articles that are tagged don't show up
# in the articles CSV. filter those out.
existing_ids_filter = np.isin(article_ids, df.index.values)
article_ids = article_ids[existing_ids_filter]
cat_abbreviations = cat_abbreviations[existing_ids_filter]
vals = vals[existing_ids_filter]
for i in range(categories_df.shape[0]):
cat_name = categories_df.loc[i+1, 'abbreviation']
if is_model:
cat_name += '_model'
df[cat_name] = 0
if not is_model:
df[cat_name] = df[cat_name].astype('int8')
matches = cat_abbreviations == cat_name
if not matches.sum():
continue
df.loc[article_ids[matches], cat_name] = vals[matches]
update_df_with_categories(
model_tags_df['article_id'].values,
model_tags_df['category_abbreviation'].values + '_model',
model_tags_df['relevance'].values,
is_model=True
)
update_df_with_categories(
tags_df['article_id'].values,
tags_df['category_abbreviation'].values,
np.ones((tags_df['article_id'].values.shape), dtype='int8'),
is_model=False
)
df.loc[df['bodytext'].isnull(), 'bodytext'] = ''
return df
def subsample_and_resave(out_folder, n=5, input_folder=__data_folder,
random_seed=5):
"""
Subsamples the CSV data files so that we have at least
`n` articles from each type-of-crime tag as determined
by the human coding. Saves the subsampled CSV data
into `out_folder`. If there are fewer than `n` articles
tagged with a type-of-crime, then we will use all of
the articles with that tag.
Inputs
------
out_folder : str
Path to folder where data should be saved. Should already exist.
n : int
How many examples from each category should we have?
input_folder : str
Path to where the full CSV files are saved.
random_seed : None or int
np.random.RandomState() will be seeded with this value
in order to perform the random subsampling.
"""
out_folder = str(Path(out_folder).expanduser().absolute())
input_folder = str(Path(input_folder).expanduser().absolute())
if out_folder == input_folder:
raise RuntimeError('out_folder cannot match input_folder.')
random_state = np.random.RandomState(random_seed)
df = load_data(input_folder)
chosen_indexes = []
for crime_type in df.loc[:, 'OEMC':].columns:
is_type = df[crime_type].astype(bool)
n_samps = min(n, is_type.sum())
chosen_indexes += (df.loc[is_type, :]
.sample(n_samps, random_state=random_state)
.index
.tolist())
del df
chosen_indexes = sorted(list(set(chosen_indexes)))
# newsarticles_article.csv
articles_df = load_articles(input_folder)
sample = (articles_df
.reset_index()
.set_index('id')
.loc[chosen_indexes, 'index'])
articles_df = articles_df.loc[sample, :]
# garble garble
articles_df['bodytext'] = articles_df['bodytext'].astype(str).apply(
lambda x: codecs.encode(x, 'rot-13')
)
articles_df.to_csv(os.path.join(out_folder, 'newsarticles_article.csv'),
header=None, index=False)
del articles_df
# newsarticles_category.csv
shutil.copyfile(os.path.join(input_folder, 'newsarticles_category.csv'),
os.path.join(out_folder, 'newsarticles_category.csv'))
# newsarticles_usercoding.csv
uc_column_names = ['id', 'date', 'relevant',
'article_id', 'user_id', 'locations']
uc_df = pd.read_csv(os.path.join(input_folder,
'newsarticles_usercoding.csv'),
header=None,
names=uc_column_names)
sample = np.where(uc_df['article_id'].isin(chosen_indexes))[0]
uc_df.loc[sample, :].to_csv(
os.path.join(out_folder, 'newsarticles_usercoding.csv'),
header=None, index=False
)
uc_tags_column_names = ['id', 'usercoding_id', 'category_id']
# newsarticles_usercoding_categories.csv
uc_tags_df = pd.read_csv(
os.path.join(input_folder,
'newsarticles_usercoding_categories.csv'),
header=None,
names=uc_tags_column_names,
dtype={'id': int, 'usercoding_id': int, 'category_id': int}
)
sample = np.where(uc_df
.set_index('id')
.loc[uc_tags_df['usercoding_id'], 'article_id']
.isin(chosen_indexes)
)[0]
uc_tags_df = uc_tags_df.loc[sample, :]
uc_tags_df.to_csv(
os.path.join(out_folder, 'newsarticles_usercoding_categories.csv'),
header=None, index=False
)
# newsarticles_trainedcoding
tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
tc = pd.read_csv(
'tagnews/data/newsarticles_trainedcoding.csv',
names=tc_names
)
tc = tc.loc[tc['article_id'].isin(chosen_indexes)]
tc.to_csv(
os.path.join(out_folder, 'newsarticles_trainedcoding.csv'),
header=False, index=False
)
# newsarticles_trainedcategoryrelevance
tcr_names = ['id', 'relevance', 'category_id', 'coding_id']
tcr = pd.read_csv(
'tagnews/data/newsarticles_trainedcategoryrelevance.csv',
names=tcr_names
)
tcr = tcr.loc[tcr['coding_id'].isin(tc['id'])]
tcr.to_csv(
os.path.join(out_folder, 'newsarticles_trainedcategoryrelevance.csv'),
header=False, index=False
)
# newsarticles_trainedlocation
tl_names = ['id', 'text', 'latitude', 'longitude', 'coding_id']
tl = pd.read_csv(
'tagnews/data/newsarticles_trainedlocation.csv',
names=tl_names
)
tl = tl.loc[tl['coding_id'].isin(tc['id'])]
tl.to_csv(
os.path.join(out_folder, 'newsarticles_trainedlocation.csv'),
header=False, index=False
)
def load_crime_data(data_folder=__data_folder):
crimes = pd.read_csv(os.path.join(data_folder, 'Crimes.csv'))
crimes = crimes[crimes['Year'] > 2010]
crime_string = pd.Series('', crimes.index)
# ['ID', 'Case Number', 'Date', 'Block', 'IUCR', 'Primary Type',
# 'Description', 'Location Description', 'Arrest', 'Domestic', 'Beat',
# 'District', 'Ward', 'Community Area', 'FBI Code', 'X Coordinate',
# 'Y Coordinate', 'Year', 'Updated On', 'Latitude', 'Longitude',
# 'Location']
# TODO: synonyms on this for month name, weekday name,
# time of day (e.g. afternoon), etc.
crime_string += crimes['Date'] + ' '
# TODO: synonyms?
crime_string += crimes['Primary Type'] + ' '
# TODO: synonyms?
crime_string += crimes['Description'] + ' '
# TODO: synonyms?
crime_string += crimes['Location Description'] + ' '
# TODO: synonyms?
iucr = pd.read_csv(os.path.join(data_folder, 'IUCR.csv'))
iucr.set_index('IUCR', drop=True, inplace=True)
idx = iucr.index
idx_values = idx.values
idx_values[idx.str.len() == 3] = '0' + idx_values[idx.str.len() == 3]
crime_string += (iucr.loc[crimes['IUCR'], 'PRIMARY DESCRIPTION']
.fillna('')
.values
+ ' ')
crime_string += (iucr.loc[crimes['IUCR'], 'SECONDARY DESCRIPTION']
.fillna('')
.values
+ ' ')
community_areas = pd.read_csv(os.path.join(data_folder, 'CommAreas.csv'))
community_areas.set_index('AREA_NUM_1', inplace=True, drop=True)
crime_string += (community_areas.loc[crimes['Community Area'], 'COMMUNITY']
.fillna('')
.values
+ ' ')
return crimes, crime_string
def load_ner_data(data_folder=__data_folder):
"""
Loads ner.csv from the specified data folder.
The column 'stag' is a binary value indicating whether or not
the row corresponds to the entity "geo". Typically, you will
want to use column 'word' to predict the column 'stag'.
"""
df = pd.read_csv(os.path.join(data_folder, 'ner.csv'),
encoding="ISO-8859-1",
error_bad_lines=False,
index_col=0)
df.dropna(subset=['word', 'tag'], inplace=True)
df.reset_index(inplace=True, drop=True)
df['stag'] = (df['tag'] == 'B-geo') | (df['tag'] == 'I-geo')
df['all_tags'] = df['tag']
df['tag'] = df['stag']
df = df[['word', 'all_tags', 'tag']]
return df
|
chicago-justice-project/article-tagging
|
lib/tagnews/utils/load_data.py
|
Python
|
mit
| 18,234
|
#!/usr/bin/env python
# encoding: utf-8
"""
cluster.py - manage the behaviour and start up of a cluster
Created by Dave Williams on 2016-07-19
"""
import os
import sys
import time
import string
import copy
import itertools
import subprocess as subp
import configparser
import boto
## Defaults
BASE_PATH = os.path.abspath(os.path.split(__file__)[0]+'/../..')+'/'
CODE_DIR = 'multifil'
CODE_LOCATION = BASE_PATH + CODE_DIR
USER_DATA = CODE_LOCATION + '/aws/userdata.py'
CODE_BUCKET = 'model-code'
JOB_QUEUE = 'job-queue'
STATUS_QUEUE = 'status-queue'
KEY_FILE = os.path.expanduser('~/.aws/keys/id_aws')
KEY_NAME = 'id_aws'
SECURITY_GROUP_ID = 'sg-2a31b650'
SUBNET_IDS = {'us-east-1a':'subnet-7653873f', # map an availability zone
'us-east-1b':'subnet-39a5bf61', # to the right VPC
'us-east-1c':'subnet-018b1b64',
'us-east-1d':'subnet-00ff1b2d',
'us-east-1e':'subnet-a5957299'}
AMI = ('ami-2d39803a', 'c4.xlarge') # Ubuntu
HD_SIZE = 200 # primary drive size in GB
SPOT_BID = 0.209 # bidding the on-demand price
## Helper functions, quite substantial
def print_direct(string):
"""Print the given string straight to the stdout"""
try:
sys.stdout.truncate(0)
except AttributeError:
pass #for working with terminal and notebooks
sys.stdout.write(string)
sys.stdout.flush()
return
def get_access_keys(filename=os.path.expanduser('~/.aws/credentials'),
section='cluster'):
"""Parse out the access and secret keys"""
config = configparser.ConfigParser()
config.read(filename)
id = config.get(section,'aws_access_key_id')
secret = config.get(section,'aws_secret_access_key')
return id, secret
def get_bdm(ec2=boto.connect_ec2(), ami=AMI[0], size=HD_SIZE):
bdm = ec2.get_image(ami).block_device_mapping
bdm['/dev/sda1'].size = size
bdm['/dev/sda1'].encrypted = None
return bdm
def load_userdata(filename='userdata.py', queue_name=JOB_QUEUE):
id, secret = get_access_keys()
user_data_dict = {
'aws_access_key': id,
'aws_secret_key': secret,
'job_queue_name': queue_name,
'code_zip_key': "s3://%s/%s.zip"%(CODE_BUCKET, CODE_DIR)}
with open(filename, 'r') as udfile:
ud_template = string.Template(udfile.read())
return ud_template.substitute(user_data_dict)
def update_code_on_s3():
"""Update the code on s3 from our local copy"""
zipname = CODE_DIR+'.zip'
# Not fragile at all... ha
cmds = (
"cd %s; zip -roTFS -imultifil/\* -isetup* %s ./"%(BASE_PATH, zipname),
"cd %s; aws s3 cp %s s3://%s/"%(BASE_PATH, zipname, CODE_BUCKET))
print(os.getcwd())
print(cmds)
[print(subp.call(c, shell=True)) for c in cmds]
def launch_on_demand_instances(ec2, num_of, userdata,
ami=AMI[0], inst_type=AMI[1]):
if len(userdata) > 16*1024:
print("error: User data file is too big")
return
reservation = ec2.run_instances(
image_id = ami,
key_name = KEY_NAME,
security_group_ids = [SECURITY_GROUP_ID],
user_data = userdata,
instance_type = inst_type,
min_count = num_of,
max_count = num_of,
subnet_id = SUBNET_IDS['us-east-1a'],
block_device_map = get_bdm(ec2))
time.sleep(.5) # Give the machines time to register
nodes = copy.copy(reservation.instances)
return nodes
def launch_spot_instances(ec2, num_of, userdata, bid=SPOT_BID,
ami=AMI[0], inst_type=AMI[1]):
if len(userdata) > 16*1024:
print("error: User data file is too big")
return
# Choose cheapest availability zone
sphs = ec2.get_spot_price_history(filters={'instance_type':inst_type})
prices = [sph.price for sph in sphs]
availability_zone = sphs[prices.index(min(prices))].availability_zone
reservation = ec2.request_spot_instances(
price = bid,
image_id = ami,
key_name = KEY_NAME,
security_group_ids = [SECURITY_GROUP_ID],
user_data = userdata.encode('ascii'),
instance_type = inst_type,
count = num_of,
placement = availability_zone,
subnet_id = SUBNET_IDS[availability_zone],
block_device_map = get_bdm(ec2))
time.sleep(.5) # Give the machines time to register
return reservation
def watch_cluster():
"""Give real-time updates on what is happening aboard our cluster"""
#Make it pretty, or pretty trippy
range_plus =lambda ri,re,s: [str(i)+s for i in range(ri,re)]
styles = ["\033["+''.join(style) for style in itertools.product(
range_plus(0,3,';'), range_plus(30,38,';'), range_plus(40,48,'m'))]
#Make it work
sqs = boto.connect_sqs()
status_queue = sqs.get_queue(STATUS_QUEUE)
ec2 = boto.connect_ec2()
print("Starting cluster watch, ^c to stop")
while True: #quit via ^C
try:
# Gather and report messages
if status_queue.count()>0:
while True:
msg = status_queue.read()
body = msg.get_body()
last_ip = int(body.split('-')[1].split('.')[-1])
style = styles[last_ip%len(styles)]
print(style+body)
status_queue.delete_message(msg)
# Make sure some instances are running
running_instances = ec2.get_all_instances(
filters=({'instance-state-code':0,
'instance-state-code':16}))
if len(running_instances) == 0:
print("\nNo running instances found")
break
# Don't hammer the connection
time.sleep(3)
except KeyboardInterrupt: #^c pressed
print("\nMy watch has ended")
break
except AttributeError: #no message to read body from
pass
class cluster:
def __init__(self,
number_of_instances,
queue_name=JOB_QUEUE,
userdata=USER_DATA,
use_spot=True):
"""A cluster management object"""
self.number_of_instances = number_of_instances
self.queue_name = queue_name
self.userdata = load_userdata(userdata, queue_name)
self.use_spot = use_spot
self.s3 = boto.connect_s3()
self.ec2 = boto.connect_ec2()
def launch(self):
"""Get the cluster rolling, manual to make you think a bit"""
print("Uploading code to S3")
update_code_on_s3()
print("Creating reservation")
# TODO: This next bit could be refactored to be prettier
if self.use_spot is True:
nodes = launch_spot_instances(self.ec2,
self.number_of_instances,
self.userdata)
ids = [node.id for node in nodes]
node_states = lambda nodes: [node.state == 'active'
for node in nodes]
node_update = lambda : self.ec2.get_all_spot_instance_requests(ids)
else:
nodes = launch_on_demand_instances(self.ec2,
self.number_of_instances,
self.userdata)
ids = [node.id for node in nodes]
node_states = lambda nodes: [node.state_code == 16
for node in nodes]
node_update = lambda : [inst for res in
self.ec2.get_all_instances(ids)
for inst in res.instances]
print("Nodes are starting...")
while not all(node_states(nodes)):
nodes = node_update()
ready = sum(node_states(nodes))
print_direct("\r%i of %i nodes are ready"%(ready, len(nodes)))
time.sleep(1)
print_direct("\nAll nodes ready \n")
if self.use_spot:
nodes = self.ec2.get_only_instances([n.instance_id for n in nodes])
self.nodes = nodes
return nodes
def kill_cluster(self):
"""Terminate the cluster nodes"""
[node.terminate() for node in self.nodes]
def node_ip_addresses(self):
"""Print the ip addresses for each node"""
[print(instance.ip_address) for instance in nodes]
|
cdw/multifil
|
multifil/aws/cluster.py
|
Python
|
mit
| 8,549
|
# -*- coding: utf-8 -*-
__all__ = [
"zynthian_autoconnect"
]
from zynautoconnect.zynthian_autoconnect import *
|
zynthian/zynthian-ui
|
zynautoconnect/__init__.py
|
Python
|
gpl-3.0
| 112
|
# 任務一: task1.py
# 各班修課成員分組
# 分組的目的在讓修課的學員可以有組員可以討論問題, 可以分工合作, 可以互相砥礪, 或者....
# 分組的方法至少有兩種: 自己找組員, 或者由程式指定組員
# 當各組組員確定後, 為了要強迫讓分組組員能夠分散座位 (目的在強制組員間以網路系統內容進行協同)
# 希望各組先依照學號以數值遞增排序, 學號序最小者為各組組員, 而各組組序, 則由各組組長在組間遞增再排序
# 學號排序最小者為第一組, 然後依遞增序確定組序.
# 一旦各組組長確定後, 且各組組序確定後, 就由計算機程式依照電腦輔助設計室的座位, 採用電腦排各組員的固定座位
# 請問, 分別採用類比與數位方法進行分組的流程, 該如何進行? 採用類比法有何好處? 採用數位法有何好處?
|
s40523127/2017springwcm_g4
|
course/task1.py
|
Python
|
agpl-3.0
| 902
|
"""Shim to allow python -m tornado.test.
This only works in python 2.7+.
"""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from salt.ext.tornado.test.runtests import all, main
# tornado.testing.main autodiscovery relies on 'all' being present in
# the main module, so import it here even though it is not used directly.
# The following line prevents a pyflakes warning.
all = all
main()
|
saltstack/salt
|
salt/ext/tornado/test/__main__.py
|
Python
|
apache-2.0
| 430
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.