repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
sprenge/energywizard
|
degreeday/apps.py
|
<gh_stars>0
from django.apps import AppConfig
class DegreedayConfig(AppConfig):
name = 'degreeday'
|
sprenge/energywizard
|
degreeday/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-27 17:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Degreeday',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_day', models.DateField()),
('value', models.FloatField()),
],
),
migrations.CreateModel(
name='DegreedaySource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, unique=True, verbose_name='name source degree day')),
('api_key', models.CharField(blank=True, max_length=512, verbose_name='api key')),
('api_secret', models.CharField(blank=True, max_length=512, verbose_name='api key')),
('base_temperature', models.FloatField(default='16.5')),
('station_id', models.CharField(blank=True, max_length=512, verbose_name='wether station id')),
('fetch_url', models.CharField(blank=True, max_length=1024, verbose_name='nase url for fetching info')),
('fetch_method', models.CharField(default='aardgas.be', max_length=512, verbose_name='how to fetch data')),
],
),
migrations.AddField(
model_name='degreeday',
name='source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='degreeday.DegreedaySource'),
),
migrations.AlterUniqueTogether(
name='degreeday',
unique_together=set([('source', 'date_day', 'value')]),
),
]
|
sprenge/energywizard
|
meter/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^save_meter$',
views.save_meter,
name='save_meter'),
url(
r'^get_meter_types$',
views.get_meter_types,
name='get_meter_types'),
url(
r'^get_meter_readings$',
views.get_meter_readings,
name='get_meter_readings'),
]
|
sprenge/energywizard
|
degreeday/models.py
|
<filename>degreeday/models.py
from django.db import models
class DegreedaySource(models.Model):
name = models.CharField('name source degree day', max_length=512, unique=True)
api_key = models.CharField('api key', max_length=512, blank=True)
api_secret = models.CharField('api key', max_length=512, blank=True)
base_temperature = models.FloatField(default='16.5')
station_id = models.CharField('wether station id', max_length=512, blank=True)
fetch_url = models.CharField('nase url for fetching info', max_length=1024, blank=True)
fetch_method = models.CharField('how to fetch data', max_length=512, default='aardgas.be')
def __unicode__(self):
return str(self.name)
def __str__(self):
return self.__unicode__()
class Degreeday(models.Model):
source = models.ForeignKey(DegreedaySource)
date_day = models.DateField()
value = models.FloatField()
def __unicode__(self):
return str(self.id)
def __str__(self):
return self.__unicode__()
class Meta:
unique_together = ('source', 'date_day', 'value')
|
sprenge/energywizard
|
household/migrations/0002_auto_20161127_1809.py
|
<filename>household/migrations/0002_auto_20161127_1809.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-27 17:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('degreeday', '0001_initial'),
('meter', '0001_initial'),
('household', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='metertypehousehold',
name='meter_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='meter.MeterType'),
),
migrations.AddField(
model_name='household',
name='degreeday_source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='degreeday.DegreedaySource'),
),
migrations.AlterUniqueTogether(
name='metertypehousehold',
unique_together=set([('household', 'meter_type')]),
),
]
|
sprenge/energywizard
|
household/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^getall$',
views.get_all_households,
name='get_all_households'),
url(
r'^save_household_user$',
views.save_household_user,
name='save_household_user'),
]
|
sprenge/energywizard
|
energywizard/urls.py
|
"""energywizard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from main.views import auth_login, auth_logoff
from graphs.views import get_meter_values
from graphs.views import graph_home
urlpatterns = [
url(r'^accounts/', include('allauth.urls')),
url(r'^admin/', admin.site.urls),
url(r'^', include('main.urls')),
url(r'^household/', include('household.urls')),
url(r'^meter/', include('meter.urls')),
url(r'^login/', auth_login),
url(r'^logout/', auth_logoff),
url(r'^graph/get_meter_values/(?P<household>.*)/(?P<meter>.*)/$', get_meter_values),
url(r'^graph/home/(?P<household>.*)/(?P<meter>.*)/$', graph_home),
]
|
sprenge/energywizard
|
meter/models.py
|
<gh_stars>0
from django.db import models
from household.models import Household
from django.contrib.auth.models import User, Group
class EnergieType(models.Model):
name = models.CharField("energie type", max_length=256, help_text="e.g. Gas, Water", unique=True)
e_unit = models.CharField("energy unit", max_length=128, help_text="e.g. KW")
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
class Meta:
verbose_name_plural = "EnergieTypes"
class MeterType(models.Model):
name = models.CharField("meter type", max_length=256, help_text="e.g. day counter elektricity")
variant = models.CharField("variant", default="standard", max_length=256)
color_whole = models.CharField(
"color whole meter part",
default="White digits on black blackground",
max_length=128)
max_whole = models.IntegerField("maximum number of figures whole meter part", default=6)
color_fraction = models.CharField(
"Color fraction figure",
default="White digits on black blackground",
max_length=128, blank=True, null=True)
max_fraction = models.IntegerField("maximum number of figures fraction part", default=4) # 0 is disabled
photo = models.ImageField("photo meter (200x200)", blank=True, null=True)
energie_type = models.ForeignKey(EnergieType)
is_device = models.BooleanField("is a device", default=False)
def __unicode__(self):
return str(self.name)+"_"+str(self.variant)
def __str__(self):
return self.__unicode__()
class MeterReading(models.Model):
meter_register = models.ForeignKey(User)
meter_type = models.ForeignKey(MeterType)
household = models.ForeignKey(Household)
meter_reading = models.FloatField("meter reading")
ts = models.DateTimeField("timestamp measurement")
def __unicode__(self):
return str(self.household) + "_" + str(self.meter_type) + "_" + str(self.ts) + "_" + str(self.meter_reading)
def __str__(self):
return self.__unicode__()
class Meta:
unique_together = ('ts', 'household', 'meter_type')
|
sprenge/energywizard
|
main/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^$',
views.homepage,
name='homepage'),
]
|
sprenge/energywizard
|
household/views.py
|
<reponame>sprenge/energywizard<filename>household/views.py
import json
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.template import RequestContext
from django.contrib.auth.models import User, Group
from household.models import Household
from household.models import UserHousehold
def get_all_households(request):
'''
Return a list of all active households, each record containing
- household unique id
- household name
'''
info = []
active_household_list = Household.objects.filter(active=True)
user = User.objects.get(username=request.user)
for household in active_household_list:
grp_found = False
for grp in user.groups.all():
print (grp, household.group)
if grp.name == household.group.name:
grp_found = True
if grp_found:
print ("add")
rec = {}
rec['id'] = household.id
rec['display'] = household.name
info.append(rec)
return JsonResponse(info, safe=False)
def save_household_user(request):
'''
Actualize the household user table
'''
r = {'result': True}
if request.method == 'POST':
data = json.loads(request.body.decode("utf-8"))
user = User.objects.get(username=request.user)
user_household = UserHousehold.objects.get(user=user)
household = Household.objects.get(id=data['id'])
user_household.household = household
user_household.save()
return JsonResponse(r, safe=False)
|
sprenge/energywizard
|
meter/admin.py
|
<reponame>sprenge/energywizard
from django.contrib import admin
from import_export import resources
from .models import EnergieType
from .models import MeterType
from .models import MeterReading
from import_export.admin import ImportExportModelAdmin
class ReadingResource(resources.ModelResource):
class Meta:
model = MeterReading
class MeterReadingAdmin(ImportExportModelAdmin):
resource_class = ReadingResource
class EnergyTypeAdmin(admin.ModelAdmin):
list_display = ('name', 'is_device')
list_filter = ('is_device',)
admin.site.register(EnergieType)
admin.site.register(MeterType, EnergyTypeAdmin)
admin.site.register(MeterReading, MeterReadingAdmin)
|
sprenge/energywizard
|
household/admin.py
|
from django.contrib import admin
from .models import Household
from .models import MetertypeHousehold
from .models import UserHousehold
admin.site.register(Household)
admin.site.register(MetertypeHousehold)
admin.site.register(UserHousehold)
|
sprenge/energywizard
|
household/migrations/0001_initial.py
|
<filename>household/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-27 17:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Household',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=128, verbose_name='name household')),
('nbr_members', models.IntegerField(default=1, verbose_name='number of members in household')),
('address', models.CharField(blank=True, max_length=256, verbose_name='street and number')),
('phone', models.CharField(blank=True, max_length=64, verbose_name='phone number')),
('postal_code', models.CharField(max_length=32, verbose_name='postal code')),
('city', models.CharField(blank=True, max_length=128, verbose_name='city')),
('country', django_countries.fields.CountryField(max_length=2, verbose_name='country')),
('info', models.TextField(blank=True, null=True, verbose_name='info')),
('active', models.BooleanField(default=True, verbose_name='guided at this moment')),
],
options={
'verbose_name_plural': 'Households',
},
),
migrations.CreateModel(
name='MetertypeHousehold',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meter_id', models.CharField(blank=True, max_length=128, verbose_name='meter id')),
('order', models.IntegerField(default=0, verbose_name='order, high=more important')),
('household', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='household.Household')),
],
options={
'verbose_name_plural': 'MeterTypesHouseholds',
},
),
migrations.CreateModel(
name='UserHousehold',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('household', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='household.Household')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'UsersHouseholds',
},
),
]
|
sprenge/energywizard
|
meter/views.py
|
import json
import datetime
import time
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import JsonResponse
from django.contrib.auth.models import User, Group
from household.models import MetertypeHousehold
from meter.models import MeterReading
from meter.models import MeterType
from django.utils import timezone
from django.utils.timezone import localtime
from household.models import UserHousehold
def get_meter_types(request):
meter_info = {'result': True}
user = User.objects.get(username=request.user)
try:
household_rec = UserHousehold.objects.get(user=user)
household = household_rec.household
except Exception as e:
print (e)
return meter_info
meter_types_household = MetertypeHousehold.objects.filter(household=household)
meter_list = []
for meter_type_household in meter_types_household:
meter_rec = {}
meter_rec['value'] = meter_type_household.id
meter_rec['displayName'] = meter_type_household.meter_type.name
meter_rec['wholeText'] = meter_type_household.meter_type.color_whole
meter_rec['fragText'] = meter_type_household.meter_type.color_fraction
print ("photo", meter_type_household.meter_type.photo)
if meter_type_household.meter_type.photo != "":
meter_rec['picture'] = meter_type_household.meter_type.photo.url
else:
meter_rec['picture'] = '/static/theme/img/icon_meter.png'
meter_list.append(meter_rec)
now = time.localtime(time.time())
current_time = {}
current_time['year'] = now.tm_year
current_time['month'] = now.tm_mon
current_time['day'] = now.tm_mday
current_time['hour'] = now.tm_hour
current_time['minute'] = now.tm_min
meter_info['meter_types'] = meter_list
meter_info['current_time'] = current_time
return JsonResponse(meter_info, safe=False)
def save_meter(request):
if request.method == 'POST':
data = json.loads(request.body.decode("utf-8"))
print ("data", data, type(data))
print (data['id'])
r = data
r['result'] = False
user = User.objects.get(username=request.user)
try:
household_rec = UserHousehold.objects.get(user=user)
household = household_rec.household
except Exception as e:
print (e)
r['reason'] = "not connected to household"
return r
meter_reading = MeterReading()
meter_reading.meter_register = user
meter_type_household = MetertypeHousehold.objects.get(id=data['id'])
meter_type = meter_type_household.meter_type
meter_reading.meter_type = meter_type
meter_reading.household = household
try:
meter_r = str(data['meterWhole']) + "." + data['meterFrag']
meter_reading.meter_reading = float(meter_r)
except Exception as e:
print (e)
r['reason'] = "Meter reading not correct"
return r
try:
dt = datetime.datetime(
year=data['meterJaar'], month=data['meterMaand'], day=data['meterDag'],
minute=data['meterMinuten'], hour=data['meterUur'])
except Exception as e:
print (e)
r['reason'] = "Timestamp not correct"
return r
meter_reading.ts = dt
try:
meter_reading.save()
except Exception as e:
print (e)
r['reason'] = "Save failed, only 1 reading for a given timestamp"
return r
r['result'] = True
# validate now the content
r['meterWhole'] = 0
r['meterFrag'] = '0'
else:
r['reason'] = "Hmm, contact your admin"
r = {'result': False}
return JsonResponse(r, safe=False)
def get_meter_readings(request):
meter_info = {'result': True}
user = User.objects.get(username=request.user)
try:
household_rec = UserHousehold.objects.get(user=user)
household = household_rec.household
except Exception as e:
print (e)
meter_info['result'] = False
return
meter_readings = MeterReading.objects.filter(household=household).order_by('-ts')
meter_list = []
for meter_reading in meter_readings:
rec = {}
dt = localtime(meter_reading.ts)
rec['time'] = dt.strftime('%d %b %Y %H:%M')
rec['meter_type'] = str(meter_reading.meter_type.name)
rec['meter_reading'] = str(meter_reading.meter_reading)
rec['meter_register'] = \
str(meter_reading.meter_register.first_name) + " " +\
str(meter_reading.meter_register.last_name)
meter_list.append(rec)
return JsonResponse(meter_list, safe=False)
|
sprenge/energywizard
|
household/models.py
|
from django.db import models
from django.contrib.auth.models import User, Group
from django_countries.fields import CountryField
from degreeday.models import DegreedaySource
class Household(models.Model):
name = models.CharField('name household', max_length=128, blank=True)
nbr_members = models.IntegerField("number of members in household", default=1)
address = models.CharField('street and number', max_length=256, blank=True)
phone = models.CharField('phone number', max_length=64, blank=True)
postal_code = models.CharField('postal code', max_length=32)
city = models.CharField('city', max_length=128, blank=True)
country = CountryField('country')
info = models.TextField('info', blank=True, null=True)
active = models.BooleanField("guided at this moment", default=True)
degreeday_source = models.ForeignKey(DegreedaySource, blank=True, null=True)
group = models.ForeignKey(Group, blank=True, null=True)
def __unicode__(self):
return str(self.name)+"_"+str(self.postal_code)
def __str__(self):
return str(self.name)+"_"+str(self.postal_code)
class Meta:
verbose_name_plural = "Households"
class MetertypeHousehold(models.Model):
household = models.ForeignKey(Household)
meter_type = models.ForeignKey("meter.MeterType")
meter_id = models.CharField('meter id', max_length=128, blank=True)
order = models.IntegerField("order, high=more important", default=0)
def __unicode__(self):
return str(self.household) + "_" + str(self.meter_type)
def __str__(self):
return self.__unicode__()
class Meta:
verbose_name_plural = "MeterTypesHouseholds"
unique_together = ('household', 'meter_type')
class UserHousehold(models.Model):
'''
Table binds a user account to a household
'''
household = models.ForeignKey(Household)
user = models.OneToOneField(User, unique=True)
def __unicode__(self):
return str(self.household) + "_" + str(self.user)
def __str__(self):
return self.__unicode__()
class Meta:
verbose_name_plural = "UsersHouseholds"
|
sprenge/energywizard
|
graphs/views.py
|
import time
from django.shortcuts import render
from django.http import JsonResponse
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from household.models import Household
from meter.models import MeterType
from meter.models import MeterReading
def get_meter_values(request, household, meter):
ret_values = {}
ret_values['key'] = "Quantity"
ret_values['bar'] = True
vals = []
hh = int(household)
mt = int(meter)
hh_rec = Household.objects.get(id=hh)
m_rec = MeterType.objects.get(id=mt)
val_rec = MeterReading.objects.filter(household=hh_rec, meter_type=m_rec)
for rec in val_rec:
srec = []
srec.append(int(time.mktime(rec.ts.timetuple())*1000))
srec.append(rec.meter_reading)
# srec = [ 1164862800000 , 389.0]
vals.append(srec)
print (rec)
ret_values['values'] = vals
result = [ret_values]
return JsonResponse(result, safe=False)
def graph_home(request, household, meter):
hh = int(household)
mt = int(meter)
hh_rec = Household.objects.get(id=hh)
m_rec = MeterType.objects.get(id=mt)
val_rec = MeterReading.objects.filter(household=hh_rec, meter_type=m_rec)
for rec in val_rec:
print (rec)
info = {"household": household, "meter": meter}
return render_to_response("consumption_client.html", info, RequestContext(request))
|
sprenge/energywizard
|
main/views.py
|
<filename>main/views.py
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User, Group
from django.utils.translation import get_language
from household.models import UserHousehold
def is_user_member_of(user, group):
'''
Check if user (by username) is member of a certain group
'''
if user.is_superuser:
return True
return user.groups.filter(name=group).exists()
@login_required
def homepage(request):
info = {}
print ("espr", get_language())
user = User.objects.get(username=request.user)
info['first_name'] = user.first_name
info['last_name'] = user.last_name
try:
household_rec = UserHousehold.objects.get(user=user)
household = household_rec.household
except Exception as e:
print (e)
return render_to_response("fatal_error.html", info, RequestContext(request))
for grp in user.groups.all():
if grp == household.group:
info['group'] = grp.name
info['household'] = str(household.name)
print ("household", info['household'])
info['energiemeester'] = is_user_member_of(user, 'energiemeester')
return render_to_response("homepage.html", info, RequestContext(request))
def auth_login(request):
'''
Login request from a user
'''
state = ""
context = {'next': request.GET['next'] if request.GET and 'next' in request.GET else ''}
if request.user.is_authenticated():
return HttpResponseRedirect('/')
if request.POST:
username = request.POST.get('username')
password = request.POST.get('password')
redirect = request.POST['next']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
state = "You're successfully logged in!"
# return HttpResponseRedirect(request.POST.get('next', '/'))
if redirect == "":
redirect = "/"
return HttpResponseRedirect(redirect)
else:
state = "Your account is not active, please contact the site admin."
else:
state = "Your username and/or password were incorrect."
context['state'] = state
return render_to_response("login.html", context, RequestContext(request))
def auth_logoff(request):
'''
Log off request from a user
'''
logout(request)
return HttpResponseRedirect('/')
|
sprenge/energywizard
|
main/management/commands/import_graaddagen_archive.py
|
from django.core.management.base import BaseCommand, CommandError
import requests
import pandas
import datetime
from degreeday.models import DegreedaySource
from degreeday.models import Degreeday
ilist = [
'http://www.aardgas.be/sites/default/files/sites/default/files/imce/DJE01new-1.xls',
]
class Command(BaseCommand):
help = 'Import graaddagen from http://www.aardgas.be/nl/particulier/graaddagen'
def handle(self, *args, **options):
for xls in ilist:
df = pandas.read_excel(xls)
for r in df.iterrows():
r1 = r[1][0]
r2 = r[1][1]
if isinstance(r1, datetime.datetime):
d = r1.date()
print(d, r2)
dds = DegreedaySource.objects.get(name="aardgas")
print (r2)
dd = Degreeday()
dd.source = dds
dd.date_day = d
dd.value = r2
dd.save()
|
sprenge/energywizard
|
degreeday/admin.py
|
<reponame>sprenge/energywizard
from django.contrib import admin
from .models import DegreedaySource
from .models import Degreeday
class DegreedayAdmin(admin.ModelAdmin):
list_display = ['source', 'date_day', 'value']
admin.site.register(DegreedaySource)
admin.site.register(Degreeday, DegreedayAdmin)
|
sprenge/energywizard
|
main/management/commands/import_graaddagen_dagelijks.py
|
<gh_stars>0
import datetime
from django.core.management.base import BaseCommand, CommandError
import requests
import pandas
from lxml import html
from degreeday.models import DegreedaySource
from degreeday.models import Degreeday
base_link = "http://www.aardgas.be"
class Command(BaseCommand):
help = 'Import dagelijkse graaddagen from http://www.aardgas.be/nl/particulier/graaddagen'
def handle(self, *args, **options):
r = requests.get(base_link+'/nl/particulier/graaddagen')
tree = html.fromstring(r.content)
e = tree.xpath('.//a[contains(text(),"Dagelijkse gegevens")]')
df = pandas.read_excel(base_link+e[0].attrib['href'])
for r in df.iterrows():
r1 = r[1][0]
r2 = r[1][1]
if isinstance(r1, datetime.datetime):
d = r1.date()
print(d, r2)
dds = DegreedaySource.objects.get(name="aardgas")
print (r2)
dd = Degreeday()
dd.source = dds
dd.date_day = d
dd.value = r2
try:
dd.save()
except Exception as e:
print (e)
|
sprenge/energywizard
|
meter/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-27 17:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('household', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EnergieType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='e.g. Gas, Water', max_length=256, unique=True, verbose_name='energie type')),
('e_unit', models.CharField(help_text='e.g. KW', max_length=128, verbose_name='energy unit')),
],
options={
'verbose_name_plural': 'EnergieTypes',
},
),
migrations.CreateModel(
name='MeterReading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meter_reading', models.FloatField(verbose_name='meter reading')),
('ts', models.DateTimeField(verbose_name='timestamp measurement')),
('household', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='household.Household')),
('meter_register', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MeterType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='e.g. day counter elektricity', max_length=256, verbose_name='meter type')),
('variant', models.CharField(default='standard', max_length=256, verbose_name='variant')),
('color_whole', models.CharField(default='White digits on black blackground', max_length=128, verbose_name='color whole meter part')),
('max_whole', models.IntegerField(default=6, verbose_name='maximum number of figures whole meter part')),
('color_fraction', models.CharField(blank=True, default='White digits on black blackground', max_length=128, null=True, verbose_name='Color fraction figure')),
('max_fraction', models.IntegerField(default=4, verbose_name='maximum number of figures fraction part')),
('photo', models.ImageField(blank=True, null=True, upload_to='', verbose_name='photo meter (200x200)')),
('energie_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='meter.EnergieType')),
],
),
migrations.AddField(
model_name='meterreading',
name='meter_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='meter.MeterType'),
),
migrations.AlterUniqueTogether(
name='meterreading',
unique_together=set([('ts', 'household', 'meter_type')]),
),
]
|
tolbot/trail
|
trail/trail.py
|
# -*- coding: utf-8 -*-
"""trail.trail: provides entry point main()."""
import sys # for proper exit.
import os # for OS path checking.
import datetime
import calendar
__version__ = "0.1.0"
py3 = sys.version_info[0] > 2 # creates boolean value for test that Python major version > 2
global_flag_used = False
trail_db_path_file = "{}/.trail/.traildb".format(os.path.expanduser("~")) # home dir.
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
if py3:
choice = input().lower()
else:
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class Trail(object):
def __init__(self):
self.created_timestamp = self._get_now_timestamp() # eg: "Thu 25AUG17 04:16:53"
self.content = "bla bla bla"
@staticmethod
def _get_now_timestamp():
now_date = datetime.date.today()
now_date_time = datetime.datetime.now()
dayname = calendar.day_name[now_date.weekday()][:3] # get 3 letter day name (of the week).
created_timestamp = '{} {:02d}{}{} {:02d}:{:02d}:{:02d}'.format(dayname,
now_date.day,
calendar.month_name[now_date.month][:3].upper(),
str(now_date.year)[2:],
now_date_time.hour,
now_date_time.minute,
now_date_time.second)
return created_timestamp
def get_trail_string(self):
return '{} {}'.format(self.created_timestamp, self.content)
def print_args():
print("Total arguments:")
total = len(sys.argv)
print(total)
for lll in sys.argv:
print(str(lll))
def get_trail_content_string_from_args():
global global_flag_used
# Check 1st arg if -g is used
if sys.argv[1] == "-g":
global_flag_used = True
d = 2 # delta = 2, to skip the -g argument.
else:
global_flag_used = False
d = 1
return " ".join(sys.argv[d:]) # join args with single space.
def get_tags_from_user_input():
global py3
# TODO: sanitise input.
q = "Enter some tags, separated by \":\" (optional) > "
if py3:
return input(q)
else:
return raw_input(q)
def trailpath_in_global_db(path_to_trail):
global trail_db_path_file
# Check global traildb
try:
with open(trail_db_path_file, "r") as f:
content = f.readlines()
except IOError:
print("Error accessing global .traildb in {}, for reading.".format(trail_db_path_file))
return False
trailsdb = [x.strip() for x in content] # list; remove \n at end of each line.
if path_to_trail.strip() in trailsdb:
return True
else:
return False
def write_trailpath_in_global_db(path_file_of_trail):
global trail_db_path_file
try:
with open(trail_db_path_file, "a") as f:
f.write(path_file_of_trail)
f.write("\n")
except IOError:
print("Error accessing global .traildb in {}, for appending.".format(trail_db_path_file))
else:
print(".traildb in {}, updated with {}.".format(trail_db_path_file, path_file_of_trail))
def remove_trailpath_from_global_db(path_file):
global trail_db_path_file
# Read global .traildb.
try:
with open(trail_db_path_file, "r") as f:
lines = f.readlines()
except IOError:
print("Error accessing global .traildb in {}, for reading.".format(trail_db_path_file))
return
# Re-open to re-write from scratch, withour the selected path_file.
try:
with open(trail_db_path_file, "w") as f:
for line in lines:
if line.strip() != path_file.strip():
f.write(line.strip())
f.write("\n")
except IOError:
print("Error accessing global .traildb in {}, for writing.".format(trail_db_path_file))
return
else:
print(".traildb in {}, updated to remove {}.".format(trail_db_path_file, path_file.strip()))
def save_to_file(trail):
global global_flag_used
# Determine whether it's a local or global trail.
if global_flag_used: # TODO: these paths probaly won't work on windows.
# .trail FILE, NOT .traildb !
path_file_to_save = "{}/.trail/.trail".format(os.path.expanduser("~")) # home dir.
else:
# path_file_to_save = "./.trail"
# will expand ".", so we can use same string in the traildb.
path_file_to_save = "{}/.trail".format(os.getcwd()) # "current" dir.
# If .trail doesn't exist, create it and insert header.
if not os.path.isfile(path_file_to_save):
if global_flag_used:
tags_string = "global trails:"
else:
tags_string = get_tags_from_user_input()
try:
with open(path_file_to_save, "w") as f:
f.write("{}\n".format(tags_string))
except IOError:
print("Cannot write .trail file {}, (make sure you are not in \"home\" dir).".format(path_file_to_save))
# Usually, you cannot have dir+file with SAME name, under same (home) dir!
return
# TODO : handle above case better.
else:
print("New .trail file created, {}.".format(path_file_to_save))
# Append trail.
try:
with open(path_file_to_save, "a") as f:
f.write(trail.get_trail_string())
f.write("\n") # append newline at the end to avoid "partial lines" symbol in zsh;
except IOError:
print("Cannot access {}, to append trail.".format(path_file_to_save))
# print trail, if success.
print(trail.get_trail_string())
# If needed, write to global traildb.
if not trailpath_in_global_db(path_file_to_save):
write_trailpath_in_global_db(path_file_to_save)
def print_global_trail_file():
global_trail_path_file = "{}/.trail/.trail".format(os.path.expanduser("~"))
try:
with open(global_trail_path_file, 'r') as f:
content = f.read()
except IOError:
content = "{} not found.".format(global_trail_path_file)
print(content)
def print_local_trail_file():
try:
with open('./.trail', 'r') as f:
content = f.read()
except IOError:
print(".trail not found. - Displaying global .trail instead:")
print_global_trail_file()
return
else:
print(content)
def delete_local_trail():
global trail_db_path_file
path_file_to_delete = "{}/.trail".format(os.getcwd()) # "current" dir.
# if .trail file exists ... TODO: handle case where ~/.trail is a directory.
if os.path.isfile(path_file_to_delete):
if query_yes_no("Are you sure you want to delete {} ?".format(path_file_to_delete), default="no"):
# True == "yes"
# Delete local .trail file.
try:
os.remove(path_file_to_delete)
except IOError:
print("Cannot delete {}".format(path_file_to_delete))
return
else:
print("{} deleted.".format(path_file_to_delete))
# Delete appropriate entry from .traildb.
if trailpath_in_global_db(path_file_to_delete):
remove_trailpath_from_global_db(path_file_to_delete)
else:
print("Aborted.")
return
else:
print(".trail not found in {}.".format(path_file_to_delete))
def print_help(): # TODO: make this more dynamic.
helptext = """ trail help:
Save a new trail, in current directory (creates a ".trail" file), for example:
$ trail enter some text here ...
$ trail "Use quotes if your trail contains weird-f@r-bash characters!"
$ trail "is inspired by https://github.com/jonromero/trail "
Print trails found in current directory .trail file.
$ trail
Save a new "global" trail (in "~/.trail/.trail", requires dir to exist)
$ trail -g enter some text here ...
Print "global" trails.
$ trail -g
Delete all trails from current directory.
$ trail -D
"""
print(helptext)
def main():
global global_flag_used
# TODO: argparse & argcomplete is for the future.
if len(sys.argv) == 1: # when excecuting just ./trail-runner.py, without any args.
print_local_trail_file()
return
elif len(sys.argv) > 1: # when at least one argument is given.
if sys.argv[1] == "-g":
global_flag_used = True
if len(sys.argv) == 2: # if ONLY -g
print_global_trail_file()
return
elif sys.argv[1] == "-D":
if len(sys.argv) == 2: # if ONLY -D
delete_local_trail()
return
elif len(sys.argv) > 2:
print("\"-D\" does not accept additional options.")
return
elif sys.argv[1] == "-h" or sys.argv[1] == "--help":
if len(sys.argv) == 2: # if ONLY -h
print_help()
return
elif len(sys.argv) > 2:
print("\"-h\" does not accept additional options.")
return
# rest of options below:
trail_content_string = get_trail_content_string_from_args()
# Create a new trail
trail = Trail()
trail.content = trail_content_string
save_to_file(trail)
else: # should be unreachable.
print("Unknown Error.")
return
|
tolbot/trail
|
trail/__init__.py
|
# This file makes the "trail" directory a package.
|
tolbot/trail
|
trail-runner.py
|
<filename>trail-runner.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# could probably work with Python2.x but never tested ...
"""Convenience wrapper, for running trail.py (package form), directly from source tree.
Example usage:
./trail-runner enter some text here ... # Save new trail, in current directory
./trail-runner -g enter some text here ... # Save new "global" trail, found in ~/.trail
./trail-runner # Print current directory trails
./trail-runner -g # Print "global" trails.
"""
from trail.trail import main
if __name__ == '__main__':
main()
|
tolbot/trail
|
setup.py
|
# -*- coding: utf-8 -*-
"""setup.py: setuptools control."""
import re
from setuptools import setup
import os
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('trail/trail.py').read(),
re.M
).group(1)
print("Got version: {}".format(version))
with open("README.md", "rb") as f:
long_descr = f.read().decode("utf-8")
config_dir = "{}/.trail".format(os.path.expanduser("~")) # is the home dir; "global" trails are kept here.
print("config dir is: {}".format(config_dir))
setup(
name='trail',
packages=['trail'], # this must be the same as the name above
entry_points={
"console_scripts": ['trail = trail.trail:main']
},
data_files=[
(config_dir, [])
],
version=version,
description='Keep track of your thoughts.',
long_description=long_descr,
author='tolbot',
author_email='<EMAIL>',
url='https://github.com/tolbot/trail', # use the URL to the github repo
download_url='https://github.com/tolbot/trail/archive/{}.tar.gz'.format(version), # make sure proper github tags are added; see below
keywords=['todo', 'productivity', 'notes'], # arbitrary keywords
classifiers=[],
)
|
uehara1414/django-spa-template
|
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/apps.py
|
from django.apps import AppConfig
class AppNameConfig(AppConfig):
name = '{{cookiecutter.app_name}}'
|
uehara1414/django-spa-template
|
rename.py
|
<reponame>uehara1414/django-spa-template
import os
RENAME_PATTERNS = [
('__project_name__', '{{cookiecutter.project_name}}'),
('__app_name__', '{{cookiecutter.app_name}}'),
]
IGNORE_FILES = ['rename.py', 'README.md']
IGNORE_DIRS = ['.git']
def replace(text):
output = text
for pattern in RENAME_PATTERNS:
output = output.replace(pattern[0], pattern[1])
return output, text != output
def is_ignore_dir(dirpath):
for dir in IGNORE_DIRS:
if dir in dirpath:
return True
return False
def main():
for dirpath, dirnames, filenames in list(os.walk('.'))[:]:
if is_ignore_dir(dirpath):
continue
for filename in filenames:
fullpath = os.path.join(dirpath, filename)
if filename in IGNORE_FILES:
continue
with open(fullpath, 'r') as fp:
text = fp.read()
text, changed = replace(text)
if changed:
with open(fullpath, 'w') as fp:
fp.write(text)
for dirname in dirnames:
fullpath = os.path.join(dirpath, dirname)
new_fullpath, changed = replace(fullpath)
if changed:
os.rename(fullpath, new_fullpath)
for filename in filenames:
if filename in IGNORE_FILES:
continue
fullpath = os.path.join(dirpath, filename)
new_fullpath, changed = replace(fullpath)
if changed:
os.rename(fullpath, new_fullpath)
if __name__ == '__main__':
main()
|
UMCUGenetics/mips
|
mips_log_table.py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
if __name__ == "__main__":
# Parse arguments
if len(sys.argv) != 2:
print("python mips_log_table.py /path/to/log/files_dir")
sys.exit()
log_dir = os.path.abspath(sys.argv[1])
mips_data = {}
samples = []
# Open log files and store in dict
for log_file in sorted(os.listdir(log_dir)):
if log_file.endswith('.log'):
log_file_path = '{0}/{1}'.format(log_dir, log_file)
sample = log_file[:-4]
samples.append(sample)
with open(log_file_path, 'r') as f:
log_data = f.read().split('\n')[6:]
for mip_data in log_data:
mip_data = mip_data.split('\t')
if len(mip_data) != 4:
continue
else:
mip_name = mip_data[0]
read_count = mip_data[1]
dup_count = mip_data[2]
if mip_name not in mips_data.keys():
mips_data[mip_name] = {}
mips_data[mip_name][sample] = {'read_count': read_count, 'dup_count': dup_count}
## Print log table to stdout
print('MIP_Name', end="\t")
for sample in samples:
print('{0}-unique_read_count\t{0}-dup_count'.format(sample), end="\t")
print(' ')
for mip in mips_data.keys():
print(mip, end="\t")
for sample in samples:
print('{0}\t{1}'.format(mips_data[mip][sample]['read_count'], mips_data[mip][sample]['dup_count']), end="\t")
print(' ')
|
UMCUGenetics/mips
|
mips_trim_dedup.py
|
<gh_stars>1-10
#!/usr/bin/env python
import sys
import re
import argparse
from itertools import izip_longest, izip
import gzip
import contextlib
def reverse_complement(seq):
"""Return reverse complement of a dna sequence."""
bases_dict = {
'A': 'T', 'a': 't',
'C': 'G', 'g': 'c',
'G': 'C', 'c': 'g',
'T': 'A', 't': 'a'}
return "".join([bases_dict[base] for base in reversed(seq)])
def parse_design(design_file):
"""Parse design file and return mips dictonary."""
mips = {}
with open(design_file, 'r') as f:
header = f.readline().strip('\n').split('\t')
# Check header
if header[6] != 'ext_probe_sequence':
print "Error: column 7 in design file should be: ext_probe_sequence"
sys.exit()
elif header[10] != 'lig_probe_sequence':
print "Error: column 11 in design file should be: lig_probe_sequence"
sys.exit()
elif header[19] != 'mip_name':
print "Error: column 20 in design file should be: mip_name"
sys.exit()
# Parse MIPS
for line in f:
line = line.strip('\n').split('\t')
mip_name = line[19]
ext_probe = line[6]
ext_probe_revcom = reverse_complement(ext_probe)
lig_probe = line[10]
lig_probe_revcom = reverse_complement(lig_probe)
mips[mip_name] = {
'ext_probe': ext_probe, 'ext_probe_revcom': ext_probe_revcom,
'lig_probe': lig_probe, 'lig_probe_revcom': lig_probe_revcom,
'uuids': set({}),
'count': 0, 'dup_count': 0
}
return mips
def grouper(iterable, n, fillvalue=None):
"""Helper function to read in fasta file per 4 lines."""
args = [iter(iterable)] * n
return izip_longest(*args, fillvalue=fillvalue)
class FixedGzip(gzip.GzipFile):
"""Fix gzip class to work with contextlib.nested in python 2.6."""
def __enter__(self):
if self.fileobj is None:
raise ValueError("I/O operation on closed GzipFile object")
return self
def __exit__(self, *args):
self.close()
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=100, width=200),
description='Trim, merge and dedup fastq files from mips experiment. Assumes fastq naming convention: sample_flowcell_index_lane_R[12]_tag.fastq.gz and fastq files from one sample.')
parser.add_argument('-d', '--design_file', type=str, help='Mips design file', required=True)
parser.add_argument('-r1', '--r1_fastq', type=str, help='R1 fastq', required=True, nargs='*')
parser.add_argument('-r2', '--r2_fastq', type=str, help='R2 fastq', required=True, nargs='*')
parser.add_argument('-l', '--uuid_length', type=int, help='UUID length', required=True)
parser.add_argument('-ur', '--uuid_read', type=str, help='Read containing UUID', choices=['R1', 'R2'], required=True)
args = parser.parse_args()
# Check input fastq's
if len(args.r1_fastq) != len(args.r2_fastq):
parser.error("Arguments -r1/--r1_fastq and -r2/--r2_fastq should be of equal length.")
mips = parse_design(args.design_file)
unique_uuids = set({})
# Output files
fastq_1_file_out = args.r1_fastq[0].split('/')[-1]
fastq_2_file_out = args.r2_fastq[0].split('/')[-1]
if len(args.r1_fastq) > 1 and len(args.r2_fastq) > 1: # Multiple fastq's -> merge
fastq_1_file_out = re.sub('_L\d{3}_', '_LMergedTrimmedDedup_', fastq_1_file_out)
fastq_2_file_out = re.sub('_L\d{3}_', '_LMergedTrimmedDedup_', fastq_2_file_out)
with contextlib.nested(
FixedGzip(fastq_1_file_out, 'w'),
FixedGzip(fastq_2_file_out, 'w')
) as (write_f1, write_f2):
# Statistics variables
total = 0
match = 0
n_count = 0
duplicate = 0
# Loop over fastq files
for fastq_1_file, fastq_2_file in zip(args.r1_fastq, args.r2_fastq):
# Open input files
with contextlib.nested(
FixedGzip(fastq_1_file, 'r'),
FixedGzip(fastq_2_file, 'r'),
) as (f1, f2):
# Read in both fastq files per 4 lines [id, seq, +, qual]
for fastq_1_lines, fastq_2_lines in izip(grouper(f1, 4, ''), grouper(f2, 4, '')):
total += 1
for mip in mips:
if args.uuid_read == 'R1':
if fastq_2_lines[1].startswith(mips[mip]['ext_probe']) and fastq_1_lines[1].startswith(mips[mip]['lig_probe_revcom'], args.uuid_length):
match += 1
uuid = fastq_1_lines[1][0:args.uuid_length]
# skip uuid containing 'N'
if "N" in uuid.upper():
n_count += 1
break
# Check duplicate reads, uuid must be unique per mip.
elif uuid in mips[mip]['uuids']:
duplicate += 1
mips[mip]['dup_count'] += 1
else:
mips[mip]['uuids'].add(uuid)
mips[mip]['count'] += 1
# Trim fastq
fastq_1_lines = list(fastq_1_lines)
fastq_2_lines = list(fastq_2_lines)
fastq_1_lines[1] = fastq_1_lines[1][len(mips[mip]['lig_probe_revcom'])+args.uuid_length:] # seq
fastq_1_lines[3] = fastq_1_lines[3][len(mips[mip]['lig_probe_revcom'])+args.uuid_length:] # qual
fastq_2_lines[1] = fastq_2_lines[1][len(mips[mip]['ext_probe']):] # seq
fastq_2_lines[3] = fastq_2_lines[3][len(mips[mip]['ext_probe']):] # qual
# Print fastq to new trimmed and dedupped fastq's.
write_f1.write(''.join(fastq_1_lines))
write_f2.write(''.join(fastq_2_lines))
# Track unique uuids in sample
if uuid not in unique_uuids:
unique_uuids.add(uuid)
break # A read can only belong to one mip thus break.
if args.uuid_read == 'R2':
if fastq_2_lines[1].startswith(mips[mip]['ext_probe'], args.uuid_length) and fastq_1_lines[1].startswith(mips[mip]['lig_probe_revcom']):
match += 1
uuid = fastq_2_lines[1][0:args.uuid_length]
# skip uuid containing 'N' or 'n'
if "N" in uuid.upper():
n_count += 1
break
# Check duplicate reads, uuid must be unique per mip.
elif uuid in mips[mip]['uuids']:
duplicate += 1
mips[mip]['dup_count'] += 1
else:
mips[mip]['uuids'].add(uuid)
mips[mip]['count'] += 1
# Trim fastq
fastq_1_lines = list(fastq_1_lines)
fastq_2_lines = list(fastq_2_lines)
fastq_1_lines[1] = fastq_1_lines[1][len(mips[mip]['lig_probe_revcom']):] # seq
fastq_1_lines[3] = fastq_1_lines[3][len(mips[mip]['lig_probe_revcom']):] # qual
fastq_2_lines[1] = fastq_2_lines[1][len(mips[mip]['ext_probe'])+args.uuid_length:] # seq
fastq_2_lines[3] = fastq_2_lines[3][len(mips[mip]['ext_probe'])+args.uuid_length:] # qual
# Print fastq to new trimmed and dedupped fastq's.
write_f1.write(''.join(fastq_1_lines))
write_f2.write(''.join(fastq_2_lines))
# Track unique uuids in sample
if uuid not in unique_uuids:
unique_uuids.add(uuid)
break # A read can only belong to one mip thus break.
print 'Match with mip:', match
print 'Reads with N in uuid', n_count
print 'Duplicate reads', duplicate
print 'total reads', total
print 'sample_unique_uuid_count', len(unique_uuids)
print 'mip\tuniqe_read_count\tdup_count\tuuids'
for mip in mips:
print '{0}\t{1}\t{2}\t{3}'.format(mip, mips[mip]['count'], mips[mip]['dup_count'], ','.join(mips[mip]['uuids']))
|
UMCUGenetics/mips
|
qsub_mips_trim_dedup.py
|
#!/usr/bin/env python
import sys
import os
import fnmatch
import subprocess
def find_fastq(fastq_pattern, raw_data_dir):
for file in os.listdir(raw_data_dir):
if fnmatch.fnmatch(file, fastq_pattern):
return os.path.abspath('{0}/{1}'.format(raw_data_dir, file))
if __name__ == "__main__":
# Parse arguments
if len(sys.argv) != 6:
print "python qsub_mips_trim_dedup.py /path/to/design.txt uuid_length uuid_read(R1,R2) /path/to/raw_data/sequencer/run/Data/Intensities/BaseCalls /path/to/output/folder"
sys.exit()
design_file = os.path.abspath(sys.argv[1])
uuid_length = int(sys.argv[2])
uuid_read = sys.argv[3]
raw_data_dir = sys.argv[4]
output_dir = sys.argv[5]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Find all samples in raw data dir
samples = set([])
for file in os.listdir(raw_data_dir):
if file.endswith(".fastq.gz"):
sample = file.split('_')[0]
samples.add(sample)
# Trim and dedup per sample
for sample in samples:
log_file = "{0}/{1}.log".format(output_dir, sample)
r1_fastq = find_fastq("{0}_*_R1_*.fastq.gz".format(sample), raw_data_dir)
r2_fastq = find_fastq("{0}_*_R2_*.fastq.gz".format(sample), raw_data_dir)
mips_trim_dedup_path = os.path.dirname(os.path.realpath(__file__))
# Generate command and submit to cluster
command = "python {0}/mips_trim_dedup.py --design_file {1} --uuid_length {2} --uuid_read {3} -r1 {4} -r2 {5}".format(
mips_trim_dedup_path,
design_file,
uuid_length,
uuid_read,
r1_fastq,
r2_fastq)
subprocess.call("echo {0} | qsub -pe threaded 1 -l h_rt=1:0:0 -l h_vmem=2G -wd {1} -e {2} -o {2} -N mips_{3}".format(command, output_dir, log_file, sample), shell=True)
|
UMCUGenetics/mips
|
mips_uuid_table.py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
if __name__ == "__main__":
# Parse arguments
if len(sys.argv) != 2:
print("python mips_uuid_table.py /path/to/log/files_dir")
sys.exit()
log_dir = os.path.abspath(sys.argv[1])
uuid_data = {}
# Open log files and store in dict
for log_file in sorted(os.listdir(log_dir)):
if log_file.endswith('.log'):
log_file_path = '{0}/{1}'.format(log_dir, log_file)
with open(log_file_path, 'r') as f:
log_data = f.read().split('\n')[6:]
for mip_data in log_data:
mip_data = mip_data.split('\t')
if len(mip_data) != 4:
continue
else:
uuids = mip_data[3].split(',')
for uuid in uuids:
if uuid != '':
if uuid not in uuid_data:
uuid_data[uuid] = 1
else:
uuid_data[uuid] += 1
# Print results
print("UUID\tUUID_Count")
for uuid in uuid_data:
print('{}\t{}'.format(uuid, uuid_data[uuid]))
|
UMCUGenetics/mips
|
qsub_sample_dirs_mips_trim_dedup.py
|
#!/usr/bin/env python
import sys
import os
import subprocess
import glob
if __name__ == "__main__":
# Parse arguments
if len(sys.argv) != 6:
print "python qsub_sample_dirs_mips_trim_dedup.py /path/to/design.txt uuid_length uuid_read(R1,R2) /path/to/raw_data/sequencer/run/Unaligned/project /path/to/output/folder"
sys.exit()
design_file = os.path.abspath(sys.argv[1])
uuid_length = int(sys.argv[2])
uuid_read = sys.argv[3]
raw_data_dir = sys.argv[4]
output_dir = sys.argv[5]
mips_trim_dedup_path = os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Trim and dedup per sample
for sample_dir in sorted(os.listdir(raw_data_dir)):
sample_dir_path = raw_data_dir + "/" + sample_dir
r1_fastq_paths = []
r2_fastq_paths = []
# Find lanes
for r1_fastq in sorted(glob.glob('{0}/*R1_*.fastq.gz'.format(sample_dir_path))):
r1_fastq_paths.append(os.path.abspath(r1_fastq))
r2_fastq_paths.append(os.path.abspath(r1_fastq).replace('_R1_', '_R2_'))
log_file = "{0}/{1}.log".format(output_dir, sample_dir)
# Generate command and submit to cluster
command = "python {0}/mips_trim_dedup.py --design_file {1} --uuid_length {2} --uuid_read {3} -r1 {4} -r2 {5}".format(
mips_trim_dedup_path,
design_file,
uuid_length,
uuid_read,
' '.join(r1_fastq_paths),
' '.join(r2_fastq_paths))
subprocess.call("echo {0} | qsub -pe threaded 1 -l h_rt=1:0:0 -l h_vmem=2G -wd {1} -e {2} -o {2} -N mips_{3}".format(command, output_dir, log_file, sample_dir), shell=True)
|
yosemiteyss/lru_cache
|
20514332.py
|
<filename>20514332.py<gh_stars>0
import sys
import time
OUTPUT_FILE = '20514332-output.txt'
# Query
QUERY_CHECK = 'Check'
QUERY_ADD = 'Add'
QUERY_DELETE = 'Delete'
class Cache:
def __init__(self, size):
self.size = size
self.queue = []
def __reorder_queue(self):
self.queue = sorted(self.queue, key=lambda entry: entry.accessed_at, reverse=True)
def is_full(self):
return len(self.queue) >= self.size
def is_empty(self):
return len(self.queue) == 0
def get_mru(self):
return self.queue[0]
def detach_lru(self):
return self.queue.pop()
def contains(self, data):
return len([entry for entry in self.queue if entry.data == data]) != 0
def add(self, entry):
if self.is_full():
raise Exception('cache is full')
self.queue.append(entry)
self.__reorder_queue()
def remove(self, data):
remove_entry = [entry for entry in self.queue if entry.data == data][0]
entry_index = self.queue.index(remove_entry)
self.queue.pop(entry_index)
self.__reorder_queue()
class CacheEntry:
def __init__(self, data):
self.data = data
self.accessed_at = time.time()
# Caches
l1_cache = Cache(8)
l2_cache = Cache(16)
l3_cache = Cache(32)
storage = []
def cache_str(frm):
return ','.join([str(data) for data in map(lambda entry: entry.data, frm.queue)])
def write_output(outfile, line, newline=True):
result = f'{line}\n' if newline else line
outfile.write(result)
def move_mru(frm, to):
frm_mru = frm.get_mru()
frm.remove(frm_mru.data)
to.add(frm_mru)
def move_lru(frm, to):
frm_lru = frm.detach_lru()
to.add(frm_lru)
def delete(outfile, data):
if l1_cache.contains(data):
l1_cache.remove(data)
if not l2_cache.is_empty():
# Move L2's mru entry to L1
move_mru(l2_cache, l1_cache)
if not l3_cache.is_empty():
# Move L3's mru entry to L2
move_mru(l3_cache, l2_cache)
write_output(outfile, 'Deleted')
elif l2_cache.contains(data):
l2_cache.remove(data)
if not l3_cache.is_empty():
# Move L3's mru entry to L2
move_mru(l3_cache, l2_cache)
write_output(outfile, 'Deleted')
elif l3_cache.contains(data):
l3_cache.remove(data)
write_output(outfile, 'Deleted')
elif data in storage:
storage.remove(data)
write_output(outfile, 'Deleted')
else:
write_output(outfile, 'Already absent')
def add(outfile, data):
if l1_cache.contains(data) or l2_cache.contains(data) or l3_cache.contains(data) or data in storage:
write_output(outfile, 'Already present')
else:
# Add to storage, caches are left untouched
storage.append(data)
write_output(outfile, 'Added')
def check(outfile, data):
if l1_cache.contains(data):
# Re-add check entry to L1
l1_cache.remove(data)
l1_cache.add(CacheEntry(data))
write_output(outfile, 'Found in L1')
elif l2_cache.contains(data):
# Remove check entry from L2
l2_cache.remove(data)
# Move L1's lru entry to L2
move_lru(l1_cache, l2_cache)
# Add check entry to L1
l1_cache.add(CacheEntry(data))
write_output(outfile, 'Found in L2')
elif l3_cache.contains(data):
# Remove check entry from L3
l3_cache.remove(data)
# Move L2's lru entry to L3
move_lru(l2_cache, l3_cache)
# Move L1's lru entry to L2
move_lru(l1_cache, l2_cache)
# Add check entry to L1
l1_cache.add(CacheEntry(data))
write_output(outfile, 'Found in L3')
elif data in storage:
# Remove check entry from storage
storage.remove(data)
if l1_cache.is_full() and l2_cache.is_full() and l3_cache.is_full():
# Move L3's lru entry back to storage
l3_lru = l3_cache.detach_lru()
storage.append(l3_lru.data)
move_lru(l2_cache, l3_cache)
move_lru(l1_cache, l2_cache)
elif l1_cache.is_full() and l2_cache.is_full():
move_lru(l2_cache, l3_cache)
move_lru(l1_cache, l2_cache)
elif l1_cache.is_full():
move_lru(l1_cache, l2_cache)
# Add check entry to L1
l1_cache.add(CacheEntry(data))
write_output(outfile, 'Found in storage')
else:
write_output(outfile, 'Not found')
def main(argv):
input_file_name = argv[1]
with open(input_file_name, 'r') as input_file, open(OUTPUT_FILE, 'a') as output_file:
for line in input_file:
inputs = line.split()
query = inputs[0]
data = int(inputs[1])
# Execute query
switcher = {
QUERY_CHECK: check,
QUERY_ADD: add,
QUERY_DELETE: delete
}
switcher[query](output_file, data)
# Print cache result
write_output(output_file, cache_str(l1_cache))
write_output(output_file, cache_str(l2_cache))
write_output(output_file, cache_str(l3_cache), newline=False)
if __name__ == "__main__":
main(sys.argv)
|
yosemiteyss/lru_cache
|
cache_test.py
|
import unittest
import time
from cache import Cache
from cache_entry import CacheEntry
class CacheTest(unittest.TestCase):
def test_is_full(self):
cache = Cache(2)
cache.add(CacheEntry('A'))
self.assertFalse(cache.is_full())
cache.add(CacheEntry('B'))
self.assertTrue(cache.is_full())
def test_is_empty(self):
cache = Cache(2)
self.assertTrue(cache.is_empty())
cache.add(CacheEntry('A'))
self.assertFalse(cache.is_empty())
def test_get_mru(self):
cache = Cache(2)
data_1 = 'A'
cache.add(CacheEntry(data_1))
self.assertEqual(cache.get_mru().data, data_1)
data_2 = 'B'
cache.add(CacheEntry(data_2))
self.assertEqual(cache.get_mru().data, data_2)
def test_contains(self):
cache = Cache(2)
data = 'A'
self.assertFalse(cache.contains(data))
cache.add(CacheEntry(data))
self.assertTrue(cache.contains(data))
def test_add(self):
cache = Cache(2)
entry_1 = CacheEntry('A')
cache.add(entry_1)
self.assertListEqual(cache.queue, [entry_1])
entry_2 = CacheEntry('B')
cache.add(entry_2)
self.assertListEqual(cache.queue, [entry_2, entry_1])
entry_3 = CacheEntry('C')
self.assertRaises(Exception, cache.add, entry_3)
def test_detach_lru(self):
cache = Cache(3)
entry_1 = CacheEntry('A')
cache.add(entry_1)
lru = cache.detach_lru()
self.assertEqual(entry_1, lru)
entry_2 = CacheEntry('B')
cache.add(entry_2)
lru = cache.detach_lru()
self.assertEqual(entry_2, lru)
def test_remove(self):
cache = Cache(3)
entry_1 = CacheEntry('A')
cache.add(entry_1)
entry_2 = CacheEntry('B')
cache.add(entry_2)
entry_3 = CacheEntry('C')
cache.add(entry_3)
cache.remove(entry_2.data)
self.assertListEqual(cache.queue, [entry_3, entry_1])
cache.remove(entry_1.data)
self.assertListEqual(cache.queue, [entry_3])
if __name__ == '__main__':
unittest.main()
|
yosemiteyss/lru_cache
|
cache_entry.py
|
import time
class CacheEntry:
def __init__(self, data):
self.data = data
self.accessed_at = time.time()
|
yosemiteyss/lru_cache
|
lru.py
|
<reponame>yosemiteyss/lru_cache
import sys
import time
from cache import Cache
from cache_entry import CacheEntry
OUTPUT_FILE = '20514332-output.txt'
# Query
QUERY_CHECK = 'Check'
QUERY_ADD = 'Add'
QUERY_DELETE = 'Delete'
# Caches
l1_cache = Cache(8)
l2_cache = Cache(16)
l3_cache = Cache(32)
storage = []
def cache_str(frm):
return ','.join([str(data) for data in map(lambda entry: entry.data, frm.queue)])
def write_output(outfile, line):
outfile.write(f'{line}\n')
def move_mru(frm, to):
frm_mru = frm.get_mru()
frm.remove(frm_mru.data)
to.add(frm_mru)
def move_lru(frm, to):
frm_lru = frm.detach_lru()
to.add(frm_lru)
def delete(outfile, data):
if l1_cache.contains(data):
l1_cache.remove(data)
if not l2_cache.is_empty():
# Move L2's mru entry to L1
move_mru(l2_cache, l1_cache)
if not l3_cache.is_empty():
# Move L3's mru entry to L2
move_mru(l3_cache, l2_cache)
write_output(outfile, 'Deleted')
elif l2_cache.contains(data):
l2_cache.remove(data)
if not l3_cache.is_empty():
# Move L3's mru entry to L2
move_mru(l3_cache, l2_cache)
write_output(outfile, 'Deleted')
elif l3_cache.contains(data):
l3_cache.remove(data)
write_output(outfile, 'Deleted')
elif data in storage:
storage.remove(data)
write_output(outfile, 'Deleted')
else:
write_output(outfile, 'Already absent')
def add(outfile, data):
if l1_cache.contains(data) or l2_cache.contains(data) or l3_cache.contains(data) or data in storage:
write_output(outfile, 'Already present')
else:
# Add to storage, caches are left untouched
storage.append(data)
write_output(outfile, 'Added')
def check(outfile, data):
if l1_cache.contains(data):
# Re-add check entry to L1
l1_cache.remove(data)
l1_cache.add(CacheEntry(data))
write_output(outfile, 'Found in L1')
elif l2_cache.contains(data):
# Remove check entry from L2
l2_cache.remove(data)
# Move L1's lru entry to L2
move_lru(l1_cache, l2_cache)
# Add check entry to L1
l1_cache.add(CacheEntry(data))
write_output(outfile, 'Found in L2')
elif l3_cache.contains(data):
# Remove check entry from L3
l3_cache.remove(data)
# Move L2's lru entry to L3
move_lru(l2_cache, l3_cache)
# Move L1's lru entry to L2
move_lru(l1_cache, l2_cache)
# Add check entry to L1
l1_cache.add(CacheEntry(data))
write_output(outfile, 'Found in L3')
elif data in storage:
# Remove check entry from storage
storage.remove(data)
if l1_cache.is_full() and l2_cache.is_full() and l3_cache.is_full():
# Move L3's lru entry back to storage
l3_lru = l3_cache.detach_lru()
storage.append(l3_lru.data)
move_lru(l2_cache, l3_cache)
move_lru(l1_cache, l2_cache)
elif l1_cache.is_full() and l2_cache.is_full():
move_lru(l2_cache, l3_cache)
move_lru(l1_cache, l2_cache)
elif l1_cache.is_full():
move_lru(l1_cache, l2_cache)
# Add check entry to L1
l1_cache.add(CacheEntry(data))
write_output(outfile, 'Found in storage')
else:
write_output(outfile, 'Not found')
def main(argv):
input_file_name = argv[1]
with open(input_file_name, 'r') as input_file, open(OUTPUT_FILE, 'a') as output_file:
for line in input_file:
inputs = line.split()
query = inputs[0]
data = int(inputs[1])
# Execute query
switcher = {
QUERY_CHECK: check,
QUERY_ADD: add,
QUERY_DELETE: delete
}
switcher[query](output_file, data)
# Print cache result
write_output(output_file, cache_str(l1_cache))
write_output(output_file, cache_str(l2_cache))
write_output(output_file, cache_str(l3_cache))
if __name__ == "__main__":
main(sys.argv)
|
yosemiteyss/lru_cache
|
cache.py
|
<reponame>yosemiteyss/lru_cache
import time
from cache_entry import CacheEntry
class Cache:
def __init__(self, size):
self.size = size
self.queue = []
def __reorder_queue(self):
self.queue = sorted(self.queue, key=lambda entry: entry.accessed_at, reverse=True)
def is_full(self):
return len(self.queue) >= self.size
def is_empty(self):
return len(self.queue) == 0
def get_mru(self):
return self.queue[0]
def detach_lru(self):
return self.queue.pop()
def contains(self, data):
return len([entry for entry in self.queue if entry.data == data]) != 0
def add(self, entry):
if self.is_full():
raise Exception('cache is full')
self.queue.append(entry)
self.__reorder_queue()
def remove(self, data):
remove_entry = [entry for entry in self.queue if entry.data == data][0]
entry_index = self.queue.index(remove_entry)
self.queue.pop(entry_index)
self.__reorder_queue()
|
robbierobinette/rcv-tensorflow
|
DistrictData.py
|
import csv
from typing import List
from CombinedPopulation import CombinedPopulation
from PopulationGroup import PopulationGroup, Democrats, Republicans, Independents
class DistrictVotingRecord:
def __init__(self,
district: str,
incumbent: str,
expected_lean: float,
d1: float, r1: float,
d2: float, r2: float):
self.district = district
self.incumbent = incumbent
self.expected_lean = expected_lean
self.d1 = d1
self.r1 = r1
self.d2 = d2
self.r2 = r2
l1 = .5 - d1 / (d1 + r1)
l2 = .5 - d2 / (d2 + r2)
self.lean = 100 * (l1 + l2) / 2
def print(self) -> None:
print("%6s %25s % 5.2f" % (self.district, self.incumbent, self.lean))
def population(self, partisanship: float, skew_factor: float, stddev: float) -> CombinedPopulation:
s = self
r_pct = (s.r1 + s.r2) / 2 / 100
d_pct = (s.d1 + s.d2) / 2 / 100
i_weight = .20
r_weight = max(0.05, (1 - i_weight) * r_pct)
d_weight = max(0.05, (1 - i_weight) * d_pct)
skew = (r_weight - d_weight) / 2.0 * skew_factor * 100
rep = PopulationGroup(Republicans, partisanship + skew, stddev, r_weight, 12)
dem = PopulationGroup(Democrats, -partisanship + skew, stddev, d_weight, -12)
ind = PopulationGroup(Independents, 0 + skew, stddev, i_weight, 0)
return CombinedPopulation([rep, dem, ind])
class DistrictData:
def __init__(self, path: str):
self.path = path
self.dvr = {}
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for row in csv_reader:
if row[0] != 'district':
dvr = self.parse_row(row)
self.dvr[dvr.district] = dvr
def parse_row(self, row: List[str]) -> DistrictVotingRecord:
if row[2] == 'EVEN':
lean = 0
elif row[2][0] == 'R':
lean = float(row[2][2:])
else:
lean = -float(row[2][2:])
d1 = float(row[3])
r1 = float(row[4])
if row[5] == 'null':
d2 = d1
r2 = r1
else:
d2 = float(row[5])
r2 = float(row[6])
return DistrictVotingRecord(row[0], row[1], lean, d1, r1, d2, r2)
def main():
dd = DistrictData("data-5vPn3.csv")
print("got dd")
for k, v in dd.dvr.items():
v.print()
if __name__ == "__main__":
main()
|
robbierobinette/rcv-tensorflow
|
HeadToHeadElection.py
|
from typing import List, Set, Tuple
import numpy as np
from Candidate import Candidate
from Election import Election, BallotIter
from ElectionResult import ElectionResult
class HeadToHeadResult(ElectionResult):
def __init__(self, oc, rm):
super().__init__(oc)
self.rm = rm
# def __init__(self, ordered_candidates: List[Candidate], result_matrix: npt.ArrayLike[float]):
# super().__init__(ordered_candidates)
# self.result_matrix = result_matrix
class HeadToHeadElection(Election):
def __init__(self, ballots: BallotIter, candidates: Set[Candidate]):
super().__init__(ballots, candidates)
self.candidate_list = list(self.candidates)
self.indices = {}
for i in range(len(self.candidate_list)):
self.indices[self.candidate_list[i]] = i
self.result_matrix = self.compute_matrix()
def result(self) -> ElectionResult:
oc = self.minimax(self.candidates)
return HeadToHeadResult(oc, self.result_matrix)
def compute_matrix(self) -> np.array:
n_candidates = len(self.candidates)
results = np.zeros([n_candidates, n_candidates])
for b in self.ballots:
not_seen: Set[Candidate] = self.candidates.copy()
for cs1 in b.ordered_candidates:
c1 = cs1.candidate
not_seen.remove(c1)
row_i = self.indices[c1]
for c2 in not_seen:
col_i = self.indices[c2]
results[row_i, col_i] += 1
return results
def delta(self, c1: Candidate, c2: Candidate) -> float:
r = self.indices[c1]
c = self.indices[c2]
return self.result_matrix[r, c] - self.result_matrix[c, r]
def max_loss(self, candidate: Candidate, active_candidates: Set[Candidate]) -> float:
opponents = active_candidates.copy()
opponents.remove(candidate)
losses = [-self.delta(candidate, c2) for c2 in opponents]
return max(losses)
def minimax(self, active_candidates: Set[Candidate]) -> List[Candidate]:
if len(active_candidates) == 1:
return list(active_candidates)
ac = active_candidates.copy()
max_losses: List[Tuple[Candidate, float]] = [(ci, self.max_loss(ci, ac)) for ci in ac]
max_losses.sort(key=lambda x: x[1])
winner = max_losses[0][0]
ac.remove(winner)
return [winner] + self.minimax(ac)
|
robbierobinette/rcv-tensorflow
|
DefaultConfigOptions.py
|
<filename>DefaultConfigOptions.py
from PopulationGroup import *
from CombinedPopulation import *
unit_election_config = ElectionConfig(
partisanship=0,
stddev=1,
skew_factor=0,
primary_skew=0,
party_loyalty=0,
independent_bonus=0,
wasted_vote_factor=0,
uncertainty=.20
)
dw_nominate_election_config = ElectionConfig(
partisanship=30,
stddev=12,
skew_factor=.5,
primary_skew=12,
party_loyalty=30,
independent_bonus=20,
wasted_vote_factor=10,
uncertainty=15)
_cc = dw_nominate_election_config
_population_groups = [
PopulationGroup(Republicans, _cc.partisanship, _cc.stddev, .4, _cc.primary_skew),
PopulationGroup(Independents, 0, _cc.stddev, .4, 0),
PopulationGroup(Democrats, -_cc.partisanship, _cc.stddev, .4, -_cc.primary_skew),
]
combined_population = CombinedPopulation(_population_groups)
|
robbierobinette/rcv-tensorflow
|
Ballot.py
|
<reponame>robbierobinette/rcv-tensorflow
from typing import List, Set
from CandidateScore import CandidateScore
from Candidate import Candidate
from Voter import Voter
from ElectionConfig import ElectionConfig
class Ballot:
def __init__(self, voter: Voter, candidates: List[Candidate], config: ElectionConfig):
self.voter = voter
scores = list(map(lambda c: voter.score(c, config), candidates))
cs = list(map(lambda c: CandidateScore(c[0], c[1]), zip(candidates, scores)))
cs.sort(key=lambda c: c.score, reverse=True)
self.ordered_candidates = cs
def active_choice(self, active_candidates: Set[Candidate]) -> Candidate:
for c in self.ordered_candidates:
if c.candidate in active_candidates:
return c.candidate
assert(False, "no candidate in active candidates")
def print(self):
for cs in self.ordered_candidates:
print("\t %6s ideology: % 7.2f score: % 7.2f" % (cs.candidate.name, cs.candidate.ideology.vec[0], cs.score))
|
robbierobinette/rcv-tensorflow
|
Tensor.py
|
import tensorflow as tf
Tensor = tf.types.experimental.TensorLike
|
robbierobinette/rcv-tensorflow
|
CandidateModel.py
|
<filename>CandidateModel.py
from CandidateNetwork import CandidateNetwork
from Candidate import Candidate
import numpy as np
import tensorflow as tf
from typing import List
from ActionMemory import ActionMemory
from Ideology import Ideology
import random
import datetime as datetime
from Timings import Timings
from Tensor import Tensor
import pickle
class CandidateModel:
def __init__(self,
ideology_bins: int,
ideology_dim: int,
n_hidden: int,
n_latent: int,
learn_rate: float):
super().__init__()
self.ideology_bins = ideology_bins
self.ideology_dim = ideology_dim
self.n_hidden = n_hidden
self.n_latent = n_latent
self.learn_rate = learn_rate
self.model = CandidateNetwork(ideology_bins=ideology_bins,
ideology_dim=ideology_dim,
n_latent=n_latent,
width=n_hidden)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=learn_rate)
self.global_step = 0
self.memory = ActionMemory(1024, ideology_dim, ideology_dim)
self.action_width = self.ideology_bins * self.ideology_dim
self.ideology_range = 2
self.bin_width = self.ideology_range / self.ideology_bins
print("bin_width % .3f" % self.bin_width)
# this is the dimension of the input vector for a single opponent. It can be the same as ideology_dim, or
# it could be ideology_dim * ideology_bins for a one_hot representation of ideology
self.input_width = ideology_bins * ideology_dim
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
self.log_dir = 'logs/' + current_time + '/train'
self.summary_writer = tf.summary.create_file_writer(self.log_dir)
self.model_path = ""
def save_to_file(self, path: str):
self.model_path = path + ".model"
self.model.save(self.model_path)
with open(path, "wb") as f:
pickle.dump(self, f)
def __getstate__(self):
state = self.__dict__.copy()
# Don't pickle the model
del state["model"]
del state["memory"]
del state["optimizer"]
del state["summary_writer"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.memory = ActionMemory(100 * 1000, self.ideology_dim, self.ideology_dim)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learn_rate)
self.model = tf.keras.models.load_model(self.model_path)
self.summary_writer = tf.summary.create_file_writer(self.log_dir)
def ready(self) -> bool:
return self.memory.ready()
def train(self, batch_size: int):
for depth in self.memory.depths():
state, action, reward = self.memory.get_batch(depth, batch_size)
self.update(state, action, reward)
self.global_step += 1
def update(self, input_batch: np.ndarray, actions: np.ndarray, reward: np.ndarray):
batch_size = input_batch.shape[0]
one_hot = tf.one_hot(actions, depth=self.ideology_bins)
# flatten the one_hot array out to match the output of the network
# each row will have 'ideology_dim' hot elements, one in each chunk of 'ideology_bins'
one_hot = tf.reshape(one_hot, shape=(batch_size, self.action_width))
with tf.GradientTape() as tape:
y = self.model(input_batch, training=True)
rewards = tf.ones(shape=(batch_size, self.action_width)) * reward
deltas = tf.square(y - rewards) * one_hot
loss = tf.reduce_sum(deltas)
with self.summary_writer.as_default():
tf.summary.scalar('loss', loss, step=self.global_step)
grads = tape.gradient(loss, self.model.variables)
filtered_grad_vars = [(grad, var) for (grad, var) in zip(grads, self.model.trainable_variables) if
grad is not None]
self.optimizer.apply_gradients(filtered_grad_vars, self.global_step)
def convert_ideology_to_input(self, ideology: Ideology) -> Tensor:
return self.convert_ideology_to_input_vec(ideology)
def convert_ideology_to_input_vec(self, ideology: Ideology) -> Tensor:
return ideology.vec.astype(dtype=np.float32)
def convert_ideology_to_input_onehot(self, ideology: Ideology) -> Tensor:
float_vec = (ideology.vec / self.ideology_range + .5) * self.ideology_bins
one_hot = tf.one_hot(tf.cast(float_vec, tf.dtypes.int32), depth=self.ideology_bins)
return tf.reshape(one_hot, shape=(self.input_width))
def convert_ideology_to_int(self, ideology: float):
return int((ideology + self.ideology_range / 2) / self.ideology_range * self.ideology_bins)
# the action vector is a vector of integers corresponding to the actions
# taken where each action is a location on the i'th dimension of the
# ideological spectrum
# i.e. an ideology of [0,0,0] would correspond to [100, 100, 100]
#
def convert_ideology_to_action_vec(self, ideology: Ideology) -> Tensor:
ii = [self.convert_ideology_to_int(i) for i in ideology.vec]
return tf.constant(ii, dtype=tf.dtypes.int32)
def get_state_from_opponents(self, opponents: List[Candidate]) -> Tensor:
# shape is (observation_id, ideology_representation_vec)
if len(opponents) != 0:
candidate_observations = [self.convert_ideology_to_input(o.ideology) for o in opponents]
state = np.stack(candidate_observations)
else:
state = tf.zeros(shape=(0, self.input_width), dtype=tf.dtypes.float32)
return tf.expand_dims(state, 0)
def add_sample_from_candidates(self, candidate: Candidate, opponents: List[Candidate], winner: Candidate):
state = self.get_state_from_opponents(opponents)
action = self.convert_ideology_to_action_vec(candidate.ideology)
action = tf.expand_dims(action, 0)
if winner == candidate:
reward = tf.ones(shape=(1, 1), dtype=tf.dtypes.float32)
else:
reward = tf.zeros(shape=(1, 1), dtype=tf.dtypes.float32)
self.memory.add_sample(state, action, reward)
def choose_ideology(self, opponents: List[Candidate]) -> Tensor:
state = self.get_state_from_opponents(opponents)
ideology_pred = self.model.call(state, training=True)
ideology_hot = tf.reshape(ideology_pred, shape=(self.ideology_dim, self.ideology_bins))
ideology_indices = tf.cast(tf.argmax(ideology_hot, axis=1), tf.dtypes.float32)
ideology_vec = (ideology_indices / self.ideology_bins - .5) * self.ideology_range
ideology_vec = ideology_vec + tf.random.normal((self.ideology_dim,), 0, self.bin_width / 2)
return ideology_vec.numpy()
|
robbierobinette/rcv-tensorflow
|
CandidateScore.py
|
from Candidate import Candidate
class CandidateScore:
def __init__(self, candidate: Candidate, score: float):
self.candidate = candidate
self.score = score
|
robbierobinette/rcv-tensorflow
|
Ideology.py
|
from typing import List
import math
import numpy as np
class IdeologyBase:
def __init__(self, ideology_vec: np.ndarray):
self.vec: np.ndarray = ideology_vec
class Ideology(IdeologyBase):
def __init__(self, ideology_vec: np.ndarray):
super().__init__(ideology_vec)
self.dim = ideology_vec.shape[0]
def distance_from_o(self) -> float:
dim = self.vec.shape[0]
return self.distance(Ideology(np.zeros(shape=(dim,))))
def euclidean_distance(self, rhs: IdeologyBase) -> float:
deltas = rhs.vec - self.vec
return np.sqrt(np.sum(deltas * deltas))
def distance(self, rhs: IdeologyBase) -> float:
return self.euclidean_distance(rhs)
# Manhattan distance
def manhattan_distance(self, rhs: IdeologyBase) -> float:
l = np.shape(self.vec)[0]
distance = 0
for i in range(l):
distance += abs(self.vec[i] - rhs.vec[i])
return distance
# return np.sum(np.abs(self.vec - rhs.vec))
|
robbierobinette/rcv-tensorflow
|
CandidateNetwork.py
|
<reponame>robbierobinette/rcv-tensorflow<gh_stars>0
import tensorflow as tf
import numpy as np
from typing import List
from Tensor import Tensor
class CandidateNetwork(tf.keras.Model):
def __init__(self, ideology_bins: int, ideology_dim: int, n_latent: int, width: int):
super().__init__()
self.ideology_bins = ideology_bins
self.ideology_dim = ideology_dim
self.n_latent = n_latent
self.encoding_layers = []
self.encoding_layers.append(tf.keras.layers.Dense(width, activation='relu'))
self.encoding_layers.append(tf.keras.layers.Dense(width, activation='relu'))
self.encoding_layers.append(tf.keras.layers.Dense(width, activation='relu'))
self.state = tf.keras.layers.Dense(self.n_latent)
self.decoding_layers = []
self.decoding_layers.append(tf.keras.layers.Dense(width, activation='relu'))
self.decoding_layers.append(tf.keras.layers.Dense(width, activation='relu'))
self.decoding_layers.append(tf.keras.layers.Dense(width, activation='relu'))
self.dropout = tf.keras.layers.Dropout(.3)
self.returns = tf.keras.layers.Dense(ideology_bins * ideology_dim)
# input is a tensor of shape (batch_size, n_observations (n_candidates), input_dim)
def call(self, input: Tensor, training: bool = None, mask: bool = None) -> Tensor:
# runs the encoder portion of the model on a single input
if input.shape[1] != 0:
x = input
for e in self.encoding_layers:
x = self.dropout(e(x), training=training)
# reduce to state observations
encoded_observations = self.dropout(self.state(x), training=training)
# now, sum the observations (which have been put on dim 1)
encoded_state = tf.reduce_sum(encoded_observations, axis=1, keepdims=False)
else:
# this corresponds to no candidates in the race yet.
batch_size = input.shape[0]
encoded_state = tf.zeros(shape=(batch_size, self.n_latent), dtype=tf.dtypes.float32)
# use that composite state to predict the returns for each possible action
x = encoded_state
for d in self.decoding_layers:
x = self.dropout(d(x), training=training)
return self.returns(x)
|
robbierobinette/rcv-tensorflow
|
Population.py
|
<reponame>robbierobinette/rcv-tensorflow
from Voter import Voter
from Party import Party
from typing import List
class Population:
def __init__(self, party: Party):
super().__init__()
self.party = party
def partisan_sample_voter(self) -> Voter:
pass
def unit_sample_voter(self) -> Voter:
pass
def generate_partisan_voters(self, n: int) -> List[Voter]:
return list(map(lambda i: self.partisan_sample_voter(), range(n)))
def generate_unit_voters(self, n: int) -> List[Voter]:
return list(map(lambda i: self.unit_sample_voter(), range(n)))
|
robbierobinette/rcv-tensorflow
|
Party.py
|
class Party:
def __init__(self, name: str, short_name: str):
self.name = name
self.short_name = short_name
Republicans = Party("Republican", "rep")
Democrats = Party("Democrat", "dem")
Independents = Party("Independent", "ind")
|
robbierobinette/rcv-tensorflow
|
NDSimulation.py
|
import matplotlib.pyplot as plt
from Ballot import Ballot
from DefaultConfigOptions import *
from ElectionResult import ElectionResult
from InstantRunoffElection import InstantRunoffElection
from HeadToHeadElection import HeadToHeadElection
from Population import Population
from NDPopulation import NDPopulation
from typing import List, Set, Callable
from Election import Election
all_voters = np.empty(dtype=float, shape=0)
all_candidates = np.empty(dtype=float, shape=0)
class ElectionConstructor:
def __init__(self, constructor: Callable[[List[Ballot], Set[Candidate]], Election], name: str):
self.constructor = constructor
self.name = name
def run(self, ballots: List[Ballot], candidates: Set[Candidate]) -> ElectionResult:
e = self.constructor(ballots, candidates)
return e.result()
def construct_irv(ballots: List[Ballot], candidates: Set[Candidate]):
return InstantRunoffElection(ballots, candidates)
def construct_h2h(ballots: List[Ballot], candidates: Set[Candidate]):
return HeadToHeadElection(ballots, candidates)
def main():
winners: List[List[ElectionResult]] = []
processes = [
ElectionConstructor(construct_irv, "Instant Runoff"),
ElectionConstructor(construct_h2h, "Head to Head")
]
for i in range(1):
print("iteration %d" % i)
c = 0
for ii in range(1000):
winners.append(run_election(processes))
c += 1
print(".", end="")
if c % 100 == 0:
print("")
print("")
for process_index in range(len(processes)):
d = get_plot_column(winners, process_index, Independents)
print("mean distance of winner from center %.2f" % (sum(d) / len(d)))
plt.hist([d],
stacked=True,
density=True,
bins=30,
color=["purple"],
label=["Winners"],
)
plt.title(processes[process_index].name)
plt.xlabel('distance from center')
plt.ylabel('count')
plt.show()
plt.hist([all_voters],
stacked=True,
density=True,
bins=30,
color=["purple"],
label=["Voters"],
)
plt.title("Voters")
plt.xlabel('distance from median')
plt.ylabel('count')
plt.show()
plt.hist([all_candidates],
stacked=True,
density=True,
bins=30,
color=["purple"],
label=["Candidates"],
)
plt.title("Candidates")
plt.xlabel('distance from median')
plt.ylabel('count')
plt.show()
def get_plot_column(winners: List[List[ElectionResult]], process_index: int, party: Party) -> List[float]:
ideologies = [r[process_index].winner().ideology for r in winners if r[process_index].winner().party == party]
distances = [i.distance_from_o() for i in ideologies]
return distances
def gen_candidates(n: int, population: Population) -> List[Candidate]:
cc = []
for i in range(n):
v = population.unit_sample_voter()
cc.append(Candidate("%s-%d" % (population.party.short_name, i + 1), population.party, v.ideology, 0))
return cc
def run_election(processes: List[ElectionConstructor]) -> List[ElectionResult]:
global all_voters, all_candidates
pop = NDPopulation(np.array([0, 0]), np.array([40, 40]))
voters = pop.generate_unit_voters(1000)
candidates = gen_candidates(6, pop)
candidates.append(Candidate("V", Independents, Ideology(np.random.normal(scale=[1.0, 1.0])), quality=0))
vv = [v.ideology.distance_from_o() for v in voters]
all_voters = np.append(all_voters, vv)
cc = [c.ideology.distance_from_o() for c in candidates]
all_candidates = np.append(all_candidates, cc)
ballots = [Ballot(v, candidates, unit_election_config) for v in voters]
results = [p.run(ballots, set(candidates)) for p in processes]
return results
if __name__ == '__main__':
main()
|
robbierobinette/rcv-tensorflow
|
NDPopulation.py
|
<reponame>robbierobinette/rcv-tensorflow
from Population import Population
from Voter import Voter, UnitVoter
from Ideology import Ideology
from Party import Party
import numpy as np
from PopulationGroup import Independents
class NDPopulation(Population):
def __init__(self, location: np.array, scale: np.array):
super().__init__(Independents)
self.location = location
self.scale = scale
self.dim = location.shape[0]
def unit_sample_voter(self) -> UnitVoter:
ideology = np.random.normal(loc=self.location, scale=self.scale)
return UnitVoter(Ideology(ideology))
|
robbierobinette/rcv-tensorflow
|
Timings.py
|
import time
from contextlib import contextmanager
class Timings(object):
def __init__(self):
self.timings = {}
self.counts = {}
def add(self, label: str, delta: float):
self.timings[label] = self.timings.get(label, 0.0) + delta
self.counts[label] = self.counts.get(label, 0.0) + 1
def print(self):
for k in self.timings.keys():
print("%20s %.0f %8.3f %8.5f" % (k, self.counts[k], self.timings[k], self.timings[k] / self.counts[k]), flush=True)
def total_time(self):
return sum(self.timings.values())
def reset(self):
self.timings = {}
self.counts = {}
@contextmanager
def time_block(self, name):
t = time.time()
yield
self.add(name, time.time() - t)
|
robbierobinette/rcv-tensorflow
|
Candidate.py
|
<reponame>robbierobinette/rcv-tensorflow<filename>Candidate.py
from Party import Party
from Ideology import Ideology
class Candidate:
def __init__(self, name: str, party: Party, ideology: Ideology, quality: float):
self.ideology = ideology
self.name = name
self.party = party
self.quality = quality
|
robbierobinette/rcv-tensorflow
|
main.py
|
from PopulationGroup import PopulationGroup
from Party import *
from Voter import Voter
from CombinedPopulation import CombinedPopulation
import matplotlib.pyplot as plt
import numpy as np
from typing import List
def main():
population_groups = [
PopulationGroup(Republicans, 30, 30, .4, 12),
PopulationGroup(Independents, 0, 30, .2, 0),
PopulationGroup(Democrats, -30, 30, .4, -12)
]
combined_population = CombinedPopulation(population_groups)
voters = list(map(lambda i: combined_population.sample_voter(), range(10000)))
iv = filter(lambda v: v.party == Independents, voters)
ii = list(map(lambda v: v.ideology.vec[0], iv))
dv = filter(lambda v: v.party == Democrats, voters)
di = list(map(lambda v: v.ideology.vec[0], dv))
rv = filter(lambda v: v.party == Republicans, voters)
ri = list(map(lambda v: v.ideology.vec[0], rv))
plt.hist([di, ii, ri],
stacked=True,
density=True,
bins=30,
color=["blue", "gray", "red"],
label=["Democrats", "Independents", "Republicans"],
)
plt.xlabel('ideology')
plt.ylabel('count')
plt.show()
if __name__ == '__main__':
main()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
robbierobinette/rcv-tensorflow
|
FullSimulation.py
|
import matplotlib.pyplot as plt
from Ballot import Ballot
from DefaultConfigOptions import *
from PartyPrimaryElection import PartyPrimaryElection
from ElectionResult import ElectionResult
from DistrictData import DistrictVotingRecord, DistrictData
from InstantRunoffElection import InstantRunoffResult, InstantRunoffElection
def main():
dd = DistrictData("data-5vPn3.csv")
dec = default_election_config
winners: List[List[ElectionResult]] = []
for i in range(11):
dvr_list: List[DistrictVotingRecord] = list(dd.dvr.values())
print("iteration %d" % i)
c = 0
for dvr in dvr_list:
pop = dvr.population(dec.partisanship, dec.skew_factor, dec.stddev)
winners.append(run_election(pop))
c += 1
print(".", end="")
if c % 100 == 0:
print("")
print("")
processes = ["Party Primary", "Instant Runoff"]
for process_index in range(2):
rep_ideology = get_plot_column(winners, process_index, Republicans)
dem_ideology = get_plot_column(winners, process_index, Democrats)
plt.hist([dem_ideology, rep_ideology],
stacked=True,
density=True,
bins=30,
color=["blue", "red"],
label=["Democrats", "Republicans"],
)
plt.title(processes[process_index])
plt.xlabel('ideology')
plt.ylabel('count')
plt.show()
def get_plot_column(winners: List[List[ElectionResult]], process_index: int, party: Party) -> List[float]:
return [r[process_index].winner().ideology.vec[0] for r in winners if r[process_index].winner().party == party]
def gen_candidates(population: PopulationGroup) -> List[Candidate]:
cc = []
for i in range(0, 3):
v = population.partisan_sample_voter()
cc.append(Candidate("%s-%d" % (population.party.short_name, i + 1), population.party, v.ideology, 0))
return cc
def run_election(pop: CombinedPopulation) -> List[ElectionResult]:
voters = pop.generate_voters(1000)
candidates = set(gen_candidates(pop.republicans) + gen_candidates(pop.democrats))
ballots = [Ballot(v, candidates, default_election_config) for v in voters]
plurality = PartyPrimaryElection(ballots, candidates, pop, default_election_config)
irv = InstantRunoffElection(ballots, candidates)
return [plurality.result(), irv.result()]
if __name__ == '__main__':
main()
|
robbierobinette/rcv-tensorflow
|
Election.py
|
<gh_stars>0
from ElectionResult import ElectionResult
from Ballot import Ballot
from typing import List, Iterable, Union, Set
from Candidate import Candidate
BallotIter = Union[Iterable[Ballot], List[Ballot]]
class Election:
def __init__(self, ballots: List[Ballot], candidates: Set[Candidate]):
self.ballots = ballots
self.candidates = candidates
def result(self) -> ElectionResult:
pass
|
robbierobinette/rcv-tensorflow
|
ProcessResult.py
|
from ElectionConstructor import ElectionConstructor
from ModelStats import ModelStats
import pickle
from Candidate import Candidate
from typing import List
class ProcessResult:
def __init__(self, process: ElectionConstructor, bins: int, dim: int, stats: ModelStats, step: int):
self.process = process
self.dim = dim
self.bins = bins
self.stats = stats
self.label = "%15s ib%3d %dD" % (process.name, bins, dim)
self.step = step
def print(self):
self.stats.print(self.label, 0)
def name(self) -> str:
return "%s-%03d-%dD-%06d" % (self.process.name, self.bins, self.dim, self.step)
def save(self, dir: str):
with open("%s/%s.p" % (dir, self.name()), "wb") as f:
pickle.dump(self, f)
|
robbierobinette/rcv-tensorflow
|
InstantRunoffElection.py
|
<filename>InstantRunoffElection.py
from Election import ElectionResult, Election, BallotIter, Ballot
from typing import List, Iterable, Union, Set
from Candidate import Candidate
from PluralityElection import PluralityElection, PluralityResult
class InstantRunoffResult(ElectionResult):
def __init__(self, ordered_candidates: List[Candidate], rounds: List[PluralityResult]):
super().__init__(ordered_candidates)
self.rounds = rounds
class InstantRunoffElection(Election):
def __init__(self, ballots: BallotIter, candidates: Set[Candidate]):
super().__init__(ballots, candidates)
def result(self) -> InstantRunoffResult:
return self.compute_result()
def compute_result(self) -> InstantRunoffResult:
active_candidates = self.candidates.copy()
rounds = []
losers = []
while len(active_candidates) > 1:
plurality = PluralityElection(self.ballots, active_candidates)
r = plurality.result()
rounds.append(r)
loser = r.ordered_candidates[-1]
losers.append(loser)
active_candidates.remove(loser)
assert(len(active_candidates) == 1)
losers += list(active_candidates)
winners = list(reversed(losers))
return InstantRunoffResult(winners, rounds)
|
robbierobinette/rcv-tensorflow
|
ElectionConstructor.py
|
<filename>ElectionConstructor.py
from typing import Set, Callable
from Ballot import Ballot
from DefaultConfigOptions import *
from Election import Election
from ElectionResult import ElectionResult
from HeadToHeadElection import HeadToHeadElection
from InstantRunoffElection import InstantRunoffElection
class ElectionConstructor:
def __init__(self, constructor: Callable[[List[Ballot], Set[Candidate]], Election], name: str):
self.constructor = constructor
self.name = name
def run(self, ballots: List[Ballot], candidates: Set[Candidate]) -> ElectionResult:
e = self.constructor(ballots, candidates)
return e.result()
def construct_irv(ballots: List[Ballot], candidates: Set[Candidate]):
return InstantRunoffElection(ballots, candidates)
def construct_h2h(ballots: List[Ballot], candidates: Set[Candidate]):
return HeadToHeadElection(ballots, candidates)
|
robbierobinette/rcv-tensorflow
|
train_model.py
|
import argparse
import os.path
import pickle
from concurrent.futures import ThreadPoolExecutor
from typing import Tuple
import matplotlib.pyplot as plt
from Ballot import Ballot
from CandidateModel import CandidateModel
from DefaultConfigOptions import *
from NDPopulation import NDPopulation
from Timings import Timings
from ElectionConstructor import ElectionConstructor, construct_irv, construct_h2h
from ModelStats import ModelStats
from ProcessResult import ProcessResult
class Sample:
def __init__(self, opponents: List[Candidate], candidate: Candidate):
self.opponents = opponents.copy()
self.candidate = candidate
def create_model_and_population(ideology_bins: int, ideology_dim: int) -> (CandidateModel, NDPopulation):
ideology_bins = 64
hidden_ratio = 4
n_hidden = hidden_ratio * ideology_bins * ideology_dim
n_latent = ideology_bins * ideology_dim
batch_size = 128
learn_rate = .001
model = CandidateModel(ideology_bins=ideology_bins,
ideology_dim=ideology_dim,
n_hidden=n_hidden,
n_latent=n_latent,
learn_rate=learn_rate)
population_means = np.zeros(shape=(ideology_dim,))
population_stddev = np.ones(shape=(ideology_dim,))
pop = NDPopulation(population_means, population_stddev)
return model, pop
def gen_non_model_candidates(model: CandidateModel, population: NDPopulation) -> List[Candidate]:
candidates: List[Candidate] = []
if model.ready():
if np.random.choice([True, False]):
candidates += gen_example_candidates(population, .5)
else:
candidates += gen_random_candidates(population, 3)
else:
candidates += gen_example_candidates(population, .5)
candidates += gen_random_candidates(population, 3)
np.random.shuffle(candidates)
return candidates
def gen_example_candidates(population: NDPopulation, spacing: float) -> List[Candidate]:
candidates = []
dim = population.dim
d = spacing
fuzz = .05
c1_vec = np.random.normal(0, .01, dim)
c1_vec[0] += np.random.normal(d, fuzz)
candidates.append(Candidate("P-R", Independents, ideology=Ideology(c1_vec), quality=0))
c2_vec = np.random.normal(0, .01, dim)
c2_vec[0] -= np.random.normal(d, fuzz)
candidates.append(Candidate("P-L", Independents, ideology=Ideology(c2_vec), quality=0))
c3_vec = np.random.normal(0, .01, dim)
candidates.append(Candidate("P-C", Independents, ideology=Ideology(c3_vec), quality=0))
return candidates
def gen_random_candidates(population: NDPopulation, n: int) -> List[Candidate]:
candidates = []
for i in range(3):
ivec = population.unit_sample_voter().ideology.vec * .5
candidates.append(Candidate("r-" + str(i), Independents, Ideology(ivec), 0))
return candidates
def run_sample_election(model: CandidateModel, process: ElectionConstructor, population: NDPopulation, train: bool):
candidates = []
model_entries = set(np.random.choice(range(6), 3, replace=False))
r_candidates = gen_non_model_candidates(model, population)
for i in range(6):
if i in model_entries and model.ready():
ideology = Ideology(model.choose_ideology(candidates))
c = Candidate("m-" + str(i), Independents, ideology, 0)
else:
if train:
c = r_candidates.pop()
else:
ideology = population.unit_sample_voter().ideology
c = Candidate("r-" + str(i), Independents, ideology, 0)
candidates += [c]
voters = population.generate_unit_voters(1000)
ballots = [Ballot(v, candidates, unit_election_config) for v in voters]
result = process.run(ballots, set(candidates))
winner = result.winner()
balance = 0
return winner, candidates, balance
def train_candidate_model(model: CandidateModel, process: ElectionConstructor, population: NDPopulation,
max_steps: int):
timings = Timings()
stats = ModelStats()
first = True
while model.global_step < max_steps:
winner, candidates, balance = run_sample_election(model, process, population, True)
for i in range(len(candidates)):
model.add_sample_from_candidates(candidates[i], candidates[0:i], winner)
if model.ready():
if first:
print("starting to train")
first = False
stats.update(winner, candidates, balance)
for i in range(5):
with timings.time_block("model.train"):
model.train(128)
if model.global_step % 1000 == 0:
stats.print(process.name, model.global_step)
if model.global_step < max_steps:
stats.reset()
timings.print()
def check_stats(stats: ModelStats, model: CandidateModel, process: ElectionConstructor, population: NDPopulation):
results = []
timings = Timings()
for i in range(1000):
winner, candidates, balance = run_sample_election(model, process, population, train=False)
stats.update(winner, candidates, balance)
results.append((winner, candidates))
def run_parameter_set(process: ElectionConstructor, ibins: int, dim: int, steps: int):
save_path = "models/cm-%s-%03d-%dD.p" % (process.name, ibins, dim)
model, population = create_model_and_population(ibins, dim)
if os.path.exists(save_path):
with open(save_path, "rb") as f:
model: CandidateModel = pickle.load(f)
else:
while model.global_step < steps:
train_candidate_model(model, process, population, model.global_step + 2000)
stats = ModelStats()
check_stats(stats, model, process, population)
result = ProcessResult(process, ibins, dim, stats, model.global_step)
result.save(args.output)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dim", help="dimensionality", type=int, default=1)
parser.add_argument("--bins", help="ideology bins", type=int, default=64)
parser.add_argument("--steps", help="learning steps", type=int, default=6000)
parser.add_argument("--process", help="election proces: Hare or Minimax", type=str, default="Minimax")
parser.add_argument("--output", help="Location for output", type=str)
args = parser.parse_args()
print("dim: ", args.dim)
print("bins: ", args.bins)
print("process: ", args.process)
print("output: ", args.output)
if args.process == "Hare":
process = ElectionConstructor(construct_irv, "Hare")
else:
process = ElectionConstructor(construct_h2h, "Minimax")
run_parameter_set(process, args.bins, args.dim, args.steps)
|
robbierobinette/rcv-tensorflow
|
PartyPrimaryElection.py
|
from CombinedPopulation import Democrats, Republicans, CombinedPopulation
from Election import *
from Party import Party
from PluralityElection import PluralityElection
from PopulationGroup import PopulationGroup
from ElectionConfig import ElectionConfig
class PartyPrimaryElection(Election):
def __init__(self, ballots: List[Ballot], candidates: Set[Candidate], combined_pop: CombinedPopulation,
config: ElectionConfig):
super().__init__(ballots, candidates)
self.primaries = {}
self.combined_pop = combined_pop
self.config = config
dem_candidate = self.result_for_party(Democrats)
rep_candidate = self.result_for_party(Republicans)
self.general_candidates = {dem_candidate, rep_candidate}
self.general = PluralityElection(ballots, self.general_candidates)
def result(self) -> ElectionResult:
return self.general.result()
def result_for_party(self, party: Party) -> Candidate:
pop = self.combined_pop.pg_dict[party]
primary_pop = PopulationGroup(pop.party, pop.mean + pop.primary_shift, pop.stddev, pop.weight, 0)
party_candidates = set(filter(lambda c: c.party == party, self.candidates))
party_ballots = [Ballot(primary_pop.partisan_sample_voter(), list(party_candidates), self.config) for x in
range(len(self.ballots))]
primary = PluralityElection(party_ballots, party_candidates)
self.primaries[party] = primary
return primary.result().winner()
def print(self):
for k in self.primaries.keys():
print("Primary: %12s" % k.name)
self.primaries[k].print()
print("General:")
self.general.print()
|
robbierobinette/rcv-tensorflow
|
BallotTest.py
|
<reponame>robbierobinette/rcv-tensorflow
import matplotlib.pyplot as plt
from Ballot import Ballot
from DefaultConfigOptions import *
from PartyPrimaryElection import PartyPrimaryElection
def main():
ideology = []
for i in range(1000):
print(".")
if (i + 1) % 100 == 0:
print("")
ideology.append(run_election())
plt.hist([ideology],
stacked=True,
density=True,
bins=30,
color=["blue"],
label=["representatives"],
)
plt.xlabel('ideology')
plt.ylabel('count')
plt.show()
def gen_candidates(population: PopulationGroup) -> List[Candidate]:
cc = []
for i in range(0, 3):
v = population.partisan_sample_voter()
cc.append(Candidate("%s-%d" % (population.party.short_name, i + 1), population.party, v.ideology, 0))
return cc
def run_election() -> float:
pop = combined_population
voters = pop.generate_voters(1000)
candidates = gen_candidates(pop.republicans) + gen_candidates(pop.democrats)
ballots = list(map(lambda v: Ballot(v, candidates, default_election_config), voters))
election = PartyPrimaryElection(ballots, set(candidates), pop, default_election_config)
return election.result().winner().ideology.vec[0]
if __name__ == '__main__':
main()
|
robbierobinette/rcv-tensorflow
|
ppo_model.py
|
import datetime as datetime
import os.path
import pickle
import tensorflow as tf
from ActionMemory import ActionMemory
from Ballot import Ballot
from DefaultConfigOptions import *
from ElectionConstructor import ElectionConstructor, construct_irv, construct_h2h
from ModelStats import ModelStats
from NDPopulation import NDPopulation
from ProcessResult import ProcessResult
from Tensor import Tensor
from Timings import Timings
# Parameters for Ornstein–Uhlenbeck process
THETA = 0.15
DT = 1e-1
class ElectionStateEncoder(tf.keras.Model):
def get_config(self):
pass
def __init__(self, ideology_dim: int, n_latent: int, width: int):
super().__init__()
self.ideology_dim = ideology_dim
self.n_latent = n_latent
self.encoding_layers = []
self.encoding_layers.append(tf.keras.layers.Dense(width, activation='relu', name="actor-enc1"))
self.encoding_layers.append(tf.keras.layers.Dense(width, activation='relu', name="actor-enc2"))
self.encoding_layers.append(tf.keras.layers.Dense(width, activation='relu', name="actor-enc3"))
self.state = tf.keras.layers.Dense(self.n_latent)
# input is a tensor of shape (batch_size, n_observations (n_candidates), input_dim)
def call(self, state: Tensor, training: bool = None, mask: bool = None) -> Tensor:
# runs the encoder portion of the model on a single input
if state.shape[1] != 0:
x = state
for e in self.encoding_layers:
x = self.dropout(e(x), training=training)
# reduce to state observations
encoded_observations = self.dropout(self.state(x), training=training)
# now, sum the observations (which have been put on dim 1)
encoded_state = tf.reduce_sum(encoded_observations, axis=1, keepdims=False)
else:
# this corresponds to no candidates in the race yet.
batch_size = state.shape[0]
encoded_state = tf.zeros(shape=(batch_size, self.n_latent), dtype=tf.dtypes.float32)
return encoded_state
class CandidateActor(tf.keras.Model):
def get_config(self):
pass
def __init__(self, ideology_dim: int, width: int, learn_rate: float):
super().__init__()
self.ideology_dim = ideology_dim
self.decoding_layers = []
self.decoding_layers.append(tf.keras.layers.Dense(width, activation='relu', name="actor-dec1"))
self.decoding_layers.append(tf.keras.layers.Dense(width, activation='relu', name="actor-dec2"))
self.decoding_layers.append(tf.keras.layers.Dense(width, activation='relu', name="actor-dec3"))
self.dropout = tf.keras.layers.Dropout(.3, name="actor-dropout")
self.returns = tf.keras.layers.Dense(ideology_dim, name="actor-returns")
self.optimizer = tf.keras.optimizers.Adam(learning_rate=learn_rate)
# input is a tensor of shape (batch_size, n_observations (n_candidates), input_dim)
def call(self, encoded_state: Tensor, training: bool = None, mask: bool = None) -> Tensor:
x = encoded_state
for d in self.decoding_layers:
x = self.dropout(d(x), training=training)
result = tf.tanh(self.returns(x)) * 2
return result
class CandidateCritic(tf.keras.Model):
def __init__(self, ideology_dim: int, n_latent: int, width: int, learn_rate: float):
super().__init__()
self.ideology_dim = ideology_dim
self.n_latent = n_latent
self.decoding_layers = []
self.decoding_layers.append(tf.keras.layers.Dense(width, activation='relu', name="critic-dec1"))
self.decoding_layers.append(tf.keras.layers.Dense(width, activation='relu', name="critic-dec2"))
self.decoding_layers.append(tf.keras.layers.Dense(width, activation='relu', name="critic-dec3"))
self.dropout = tf.keras.layers.Dropout(.3)
self.returns = tf.keras.layers.Dense(ideology_dim, name="critic-returns")
self.optimizer = tf.keras.optimizers.Adam(learning_rate=learn_rate)
# input is a tensor of shape (batch_size, n_observations (n_candidates), input_dim)
def call(self, encoded_state: Tensor, action: Tensor, training: bool = None, mask: bool = None) -> Tensor:
# use the composite state and action to predict the returns for the given action
x = tf.concat([encoded_state, action], axis=1)
for d in self.decoding_layers:
x = self.dropout(d(x), training=training)
return self.returns(x)
class CandidateAgent:
def __init__(self, ideology_dim: int, n_latent: int, width: int, actor_lr: float, critic_lr: float):
self.ideology_dim = ideology_dim
self.n_latent = n_latent
self.width = width
self.gamma = .99
self.tau = .01
self.encoder = ElectionStateEncoder(ideology_dim, n_latent, width)
self.actor = CandidateActor(ideology_dim, width, actor_lr)
self.critic = CandidateCritic(ideology_dim, n_latent, width, critic_lr)
self.memory = ActionMemory(1024, ideology_dim, ideology_dim)
self.lower_bound = -2
self.upper_bound = 2
self.global_step = 0
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
self.log_dir = 'logs/' + current_time + '/train'
self.summary_writer = tf.summary.create_file_writer(self.log_dir)
def train(self, batch_size: int):
for depth in self.memory.depths():
state, action, reward = self.memory.get_batch(depth, batch_size)
self.update(state, action, reward)
self.global_step += 1
def update(self, raw_state, action, reward):
with tf.GradientTape() as tape:
encoded_state = self.encoder(raw_state)
critic_value = self.critic.call(encoded_state, action)
# print("critic_reward")
# for i in range(reward.shape[0]):
# print(f"\ta: {action.numpy()[i, 0]: 8.2f} ", end="")
# print(f" c: {critic_value.numpy()[i, 0]: 5.2f}", end="")
# print(f" t: {reward.numpy()[i, 0]:.2f}")
critic_loss = tf.math.reduce_mean(tf.keras.losses.MSE(reward, critic_value))
# print(f"critic_loss: {critic_loss.numpy().shape} {critic_loss.numpy(): .2f}")
with self.summary_writer.as_default():
label = f"critic_loss-{raw_state.shape[1]}"
tf.summary.scalar(label, critic_loss, step=self.global_step)
tf.summary.flush()
# This updates both the encoder and the critic. Update the encoder here because
# the critic error is known.
vars = self.critic.trainable_variables + self.encoder.trainable_variables
critic_gradient = tape.gradient(critic_loss, vars)
gv = [(g, v) for g, v in zip(critic_gradient, vars) if g is not None]
self.critic.optimizer.apply_gradients(gv)
with tf.GradientTape() as tape:
encoded_state = self.encoder(raw_state)
policy_actions = self.actor(encoded_state)
actor_loss = -self.critic(encoded_state, policy_actions)
actor_loss = tf.math.reduce_mean(actor_loss)
# Here we are only training the actor's decoding layer, we are not altering the encoding.
# We cannot update anything about the critic here because that would alter the critics opinion
# of the action and not the quality of the action.
actor_gradient = tape.gradient(actor_loss, self.actor.trainable_variables)
gv = [(g, v) for g, v in zip(actor_gradient, self.actor.trainable_variables) if g is not None]
self.actor.optimizer.apply_gradients(gv)
def _ornstein_uhlenbeck_process(self, x, theta=THETA, mu=0, dt=DT, std=0.2):
"""
Ornstein–Uhlenbeck process
"""
return x + theta * (mu - x) * dt + std * np.sqrt(dt) * np.random.normal(size=self.ideology_dim)
def get_action(self, observation, noise, evaluation=False):
raw_state = tf.convert_to_tensor([observation], dtype=tf.float32)
encoded_state = self.encoder(raw_state)
actions = self.actor(encoded_state)
if not evaluation:
self.noise = self._ornstein_uhlenbeck_process(noise)
actions += self.noise
actions = tf.clip_by_value(actions, self.lower_bound, self.upper_bound)
return actions[0]
def ready(self) -> bool:
return self.memory.ready()
@staticmethod
def convert_ideology_to_input(ideology: Ideology) -> Tensor:
return ideology.vec.astype(dtype=np.float32)
def choose_ideology(self, opponents: List[Candidate]):
raw_state = self.get_state_from_opponents(opponents)
encoded_state = self.encoder(raw_state)
ideology_pred = self.actor.call(encoded_state, training=True)
ideology_pred = tf.reshape(ideology_pred, shape=(self.ideology_dim,))
return ideology_pred.numpy()
def get_state_from_opponents(self, opponents: List[Candidate]) -> Tensor:
# shape is (observation_id, ideology_representation_vec)
if len(opponents) != 0:
candidate_observations = [self.convert_ideology_to_input(o.ideology) for o in opponents]
state = np.stack(candidate_observations)
else:
state = tf.zeros(shape=(0, self.ideology_dim), dtype=tf.dtypes.float32)
return tf.expand_dims(state, 0)
def add_sample_from_candidates(self, candidate: Candidate, opponents: List[Candidate], winner: Candidate):
state = self.get_state_from_opponents(opponents)
action = self.convert_ideology_to_input(candidate.ideology)
action = tf.expand_dims(action, 0)
if winner == candidate:
reward = tf.ones(shape=(1, 1), dtype=tf.dtypes.float32)
else:
reward = tf.zeros(shape=(1, 1), dtype=tf.dtypes.float32)
self.memory.add_sample(state, action, reward)
def save_to_file(self, path: str):
self.actor.save(path + ".actor")
self.critic.save(path + ".critic")
self.encoder.save(path + ".encoder")
with open(path, "wb") as f:
pickle.dump(self, f)
def __getstate__(self):
state = self.__dict__.copy()
# Don't pickle the model
del state["actor"]
del state["critic"]
del state["encoder"]
del state["memory"]
del state["optimizer"]
del state["summary_writer"]
return state
def create_model_and_population(ideology_dim: int) -> (CandidateAgent, NDPopulation):
hidden_ratio = 64
n_hidden = hidden_ratio * ideology_dim
n_latent = ideology_dim * 32
batch_size = 128
learn_rate = .001
model = CandidateAgent(ideology_dim=ideology_dim,
n_latent=n_latent,
width=n_hidden,
actor_lr=learn_rate,
critic_lr=learn_rate)
population_means = np.zeros(shape=(ideology_dim,))
population_stddev = np.ones(shape=(ideology_dim,))
pop = NDPopulation(population_means, population_stddev)
return model, pop
def measure_representation(candidate: Candidate, voters: List[Voter]) -> float:
n_voters = len(voters)
balance = []
for d in range(candidate.ideology.dim):
lc = len([v for v in voters if v.ideology.vec[d] < candidate.ideology.vec[d]])
balance.append(min(lc / n_voters, 1 - lc / n_voters))
return float(np.mean(balance))
def gen_non_model_candidates(model: CandidateAgent, population: NDPopulation) -> List[Candidate]:
candidates: List[Candidate] = []
if model.ready():
if np.random.choice([True, False]):
candidates += gen_pilot_candidates(population, .8)
else:
candidates += gen_random_candidates(population, 3)
else:
candidates += gen_pilot_candidates(population, .6)
candidates += gen_random_candidates(population, 3)
np.random.shuffle(candidates)
return candidates
def gen_pilot_candidates(population: NDPopulation, spacing: float) -> List[Candidate]:
candidates = []
dim = population.dim
d = spacing
fuzz = .05
c1_vec = np.random.normal(0, .01, dim)
c1_vec[0] += np.random.normal(d, fuzz)
candidates.append(Candidate("P-R", Independents, ideology=Ideology(c1_vec), quality=0))
c2_vec = np.random.normal(0, .01, dim)
c2_vec[0] -= np.random.normal(d, fuzz)
candidates.append(Candidate("P-L", Independents, ideology=Ideology(c2_vec), quality=0))
c3_vec = np.random.normal(0, .02, dim)
candidates.append(Candidate("P-C", Independents, ideology=Ideology(c3_vec), quality=0))
return candidates
def gen_random_candidates(population: NDPopulation, n: int) -> List[Candidate]:
candidates = []
for i in range(3):
ivec = population.unit_sample_voter().ideology.vec * .5
candidates.append(Candidate("r-" + str(i), Independents, Ideology(ivec), 0))
return candidates
def run_sample_election(model: CandidateAgent, process: ElectionConstructor, population: NDPopulation, train: bool):
candidates = []
model_entries = set(np.random.choice(range(6), 3, replace=False))
r_candidates = gen_non_model_candidates(model, population)
for i in range(6):
if i in model_entries and model.ready():
ideology = Ideology(model.choose_ideology(candidates))
c = Candidate("m-" + str(i), Independents, ideology, 0)
else:
if train:
c = r_candidates.pop()
else:
ideology = population.unit_sample_voter().ideology
c = Candidate("r-" + str(i), Independents, ideology, 0)
candidates += [c]
voters = population.generate_unit_voters(1000)
ballots = [Ballot(v, candidates, unit_election_config) for v in voters]
result = process.run(ballots, set(candidates))
winner = result.winner()
balance = measure_representation(winner, voters)
return winner, candidates, balance
def train_candidate_model(model: CandidateAgent, process: ElectionConstructor, population: NDPopulation,
max_steps=5000):
timings = Timings()
stats = ModelStats()
first = True
while model.global_step < max_steps:
winner, candidates, balance = run_sample_election(model, process, population, True)
for i in range(len(candidates)):
model.add_sample_from_candidates(candidates[i], candidates[0:i], winner)
if model.ready():
if first:
print("starting to train")
first = False
stats.update(winner, candidates, balance)
with timings.time_block("model.train"):
model.train(128)
s = model.global_step
if (s < 100 and s % 10 == 0) or (s < 1000 and s % 100 == 0) or s % 1000 == 0:
stats.print(process.name, model.global_step)
stats.reset()
timings.print()
def check_stats(stats: ModelStats, model: CandidateAgent, process: ElectionConstructor, population: NDPopulation):
results = []
timings = Timings()
for i in range(1000):
winner, candidates, balance = run_sample_election(model, process, population, train=False)
stats.update(winner, candidates, balance)
def run_parameter_set(process: ElectionConstructor, ibins: int, dim: int):
save_path = "models/cm-%s-%03d-%dD.p" % (process.name, ibins, dim)
model, population = create_model_and_population(dim)
if os.path.exists(save_path):
with open(save_path, "rb") as f:
model: CandidateAgent = pickle.load(f)
else:
train_candidate_model(model, process, population)
# Saving the model file is not working at this time.
model.save_to_file(save_path)
stats = ModelStats()
check_stats(stats, model, process, population)
return stats, model
def train_models():
dims = [1]
processes = [
ElectionConstructor(constructor=construct_irv, name="Instant Runoff"),
ElectionConstructor(constructor=construct_h2h, name="Head-to-Head"),
]
results = []
for bins in [64, 128]:
for process in processes:
for dim in dims:
stats, model = run_parameter_set(process, bins, dim)
results.append(ProcessResult(process, bins, dim, stats, 10000))
results[-1].print()
for r in results:
r.print()
def save_test():
process = ElectionConstructor(constructor=construct_irv, name="Instant Runoff")
model, population = create_model_and_population(ideology_dim=1)
train_candidate_model(model, process, population, 500000)
save_test()
|
robbierobinette/rcv-tensorflow
|
PopulationGroup.py
|
from Party import *
from Voter import *
import random
import numpy as np
class PopulationGroup:
def __init__(self, party: Party, mean: float, stddev: float, weight: float, primary_shift: float):
self.party = party
self.mean = mean
self.stddev = stddev
self.weight = weight
self.primary_shift = primary_shift
def partisan_sample_voter(self) -> PartisanVoter:
i = Ideology(np.random.normal(loc=[self.mean], scale=[self.stddev]))
return PartisanVoter(i, self.party)
def unit_voter(self) -> UnitVoter:
i = Ideology(np.random.normal(loc=[self.mean], scale=[self.stddev]))
return UnitVoter(i)
|
robbierobinette/rcv-tensorflow
|
CombinedPopulation.py
|
from Voter import *
from PopulationGroup import *
from typing import List
from random import uniform
from Population import Population
class CombinedPopulation(Population):
def __init__(self, population_groups: List[PopulationGroup]):
super().__init__(Independents)
self.population_groups = population_groups
self.combined_weight = sum([a.weight for a in population_groups])
self.pg_dict = {}
for g in population_groups:
self.pg_dict[g.party] = g
self.democrats = self.pg_dict[Democrats]
self.republicans = self.pg_dict[Republicans]
self.independents = self.pg_dict[Independents]
def get_weighted_population(self) -> PopulationGroup:
x = uniform(0, self.combined_weight)
i = 0
while x > 0:
x -= self.population_groups[i].weight
if x < 0:
return self.population_groups[i]
i += 1
raise Exception("can't get weighted population")
def sample_voter(self):
p_group = self.get_weighted_population()
return p_group.partisan_sample_voter()
|
robbierobinette/rcv-tensorflow
|
Voter.py
|
<filename>Voter.py
from Party import Party, Independents
from Ideology import Ideology
from Candidate import Candidate
from ElectionConfig import ElectionConfig
import random
class Voter:
def __init__(self, ideology: Ideology):
self.ideology = ideology
def score(self, candidate: Candidate, config: ElectionConfig):
pass
class PartisanVoter(Voter):
def __init__(self, ideology: Ideology, party: Party):
super(PartisanVoter, self).__init__(ideology)
self.party = party
def score(self, candidate: Candidate, config: ElectionConfig) -> float:
score = -self.ideology.distance(candidate.ideology)
score += candidate.quality
if self.party == candidate.party:
score += config.party_loyalty
elif candidate.party == Independents:
score += config.independent_bonus
score += random.normalvariate(0, config.uncertainty)
if candidate.party == Independents:
score -= config.wasted_vote_factor
return score
# just like a voter but without
class UnitVoter(Voter):
def __init__(self, ideology: Ideology):
super(UnitVoter, self).__init__(ideology)
def score(self, candidate: Candidate, config: ElectionConfig) -> float:
score = -self.ideology.distance(candidate.ideology)
score += candidate.quality
score += random.normalvariate(0, config.uncertainty)
return score
|
robbierobinette/rcv-tensorflow
|
ElectionConfig.py
|
class ElectionConfig:
def __init__(self,
partisanship: float,
stddev: float,
skew_factor: float,
primary_skew: float,
party_loyalty: float,
independent_bonus: float,
wasted_vote_factor: float,
uncertainty: float):
self.partisanship = partisanship
self.stddev = stddev
self.skew_factor = skew_factor
self.primary_skew = primary_skew
self.party_loyalty = party_loyalty
self.independent_bonus = independent_bonus
self.wasted_vote_factor = wasted_vote_factor
self.uncertainty = uncertainty
|
robbierobinette/rcv-tensorflow
|
PluralityElection.py
|
<reponame>robbierobinette/rcv-tensorflow
from Election import *
from ElectionResult import ElectionResult
from Ballot import Ballot
from typing import List, Union, Iterable, Set
from Candidate import Candidate
class PluralityResult(ElectionResult):
def __init__(self, ordered_candidates: List[Candidate], vote_totals: {}):
super().__init__(ordered_candidates)
self.vote_totals = vote_totals
class PluralityElection(Election):
def __init__(self, ballots: BallotIter, active_candidates: Set[Candidate]):
super().__init__(ballots, active_candidates)
self.vote_totals = {}
for c in active_candidates:
self.vote_totals[c] = 0
self.active_candidates = active_candidates
self.ordered_candidates: List[Candidate] = self.compute_results()
def print(self):
for c in self.ordered_candidates:
print("%6s % 7.2f %6d" % (c.name, c.ideology.vec[0], self.vote_totals[c]))
def compute_results(self) -> List[Candidate]:
for b in self.ballots:
w = b.active_choice(self.active_candidates)
self.vote_totals[w] += 1
c_list = list(self.vote_totals.items())
c_list.sort(key=lambda p: p[1], reverse=True)
return list(map(lambda p: p[0], c_list))
def result(self) -> PluralityResult:
return PluralityResult(self.ordered_candidates, self.vote_totals)
|
robbierobinette/rcv-tensorflow
|
ActionMemory.py
|
<filename>ActionMemory.py
from CandidateNetwork import CandidateNetwork
from Candidate import Candidate
import numpy as np
import tensorflow as tf
from typing import List
import random
from Tensor import Tensor
from Timings import Timings
class ActionMemory:
def __init__(self, max_size: int, state_width: int, action_width: int):
self.max_size = max_size
self.state_width = state_width
self.action_width = action_width
self.depth_memory = {}
self.size = 0
def depths(self) -> List[int]:
return list(self.depth_memory.keys())
def ready(self) -> bool:
return self.size > 1000
def add_sample(self, state: np.ndarray, action: np.ndarray, reward: np.ndarray):
self.size += 1
sample_depth = state.shape[1]
if sample_depth not in self.depth_memory:
self.depth_memory[sample_depth] = ActionMemoryDepth(sample_depth, self.max_size, self.state_width,
self.action_width)
self.depth_memory[sample_depth].add_sample(state, action, reward)
def get_batch(self, depth: int, batch_size: int) -> (Tensor, Tensor, Tensor):
return self.depth_memory[depth].get_batch(batch_size)
class ActionMemoryDepth:
def __init__(self, depth: int, max_size: int, state_width: int, action_width: int):
self.max_size = max_size
self.state: np.array = np.zeros(shape=(0, 1))
self.action: np.array = np.zeros(shape=(0, 1))
self.reward: np.array = np.zeros(shape=(0, 1))
self.depth = depth
self.idx = 0
# state is of dim (sample, observation, input_dim)
def add_sample(self, state: np.ndarray, action: np.ndarray, reward: np.ndarray):
assert (self.depth == state.shape[1], "depth must match")
if self.state.shape[0] == 0:
self.state = np.zeros(shape=(0, state.shape[1], state.shape[2]), dtype=np.single)
self.action = np.zeros(shape=(0, action.shape[1]), dtype=np.single)
self.reward = np.zeros(shape=(0, 1), dtype=np.single)
if self.state.shape[0] < self.max_size:
self.state = np.concatenate([self.state, state], axis=0)
self.action = np.concatenate([self.action, action], axis=0)
self.reward = np.concatenate([self.reward, reward], axis=0)
else:
i = self.idx
self.state[i] = state
self.action[i] = action
self.reward[i] = reward
self.idx = (self.idx + 1) % self.max_size
def get_batch(self, batch_size) -> (np.ndarray, np.ndarray, np.ndarray):
indices = np.random.randint(0, self.state.shape[0], batch_size)
return tf.gather(self.state, indices), tf.gather(self.action, indices), tf.gather(self.reward, indices)
|
robbierobinette/rcv-tensorflow
|
ModelStats.py
|
from typing import List
from Candidate import Candidate
class ModelStats:
def __init__(self):
self.model_count = 1e-5
self.model_winners = 1e-5
self.random_count = 1e-5
self.random_winners = 1e-5
self.model_winner_distance = 0
self.random_winner_distance = 0
self.winners: List[Candidate] = []
self.candidates: List[List[Candidate]] = []
self.balance: List[float] = []
def reset(self):
self.model_count = 1e-5
self.model_winners = 1e-5
self.random_count = 1e-5
self.random_winners = 1e-5
self.model_winner_distance = 0
self.random_winner_distance = 0
self.winners = []
self.candidates = []
self.balance = []
def update(self, winner: Candidate, candidates: List[Candidate], balance: float = 0):
self.winners.append(winner)
self.candidates.append(candidates)
self.balance.append(balance)
for c in candidates:
if c.name[0] == 'm':
self.add_model()
else:
self.add_random()
if winner.name[0] == 'm':
self.add_model_winner(winner)
else:
self.add_random_winner(winner)
def add_random(self):
self.random_count += 1
def add_model(self):
self.model_count += 1
def add_random_winner(self, w: Candidate):
self.random_winners += 1
self.random_winner_distance += w.ideology.distance_from_o()
def add_model_winner(self, w: Candidate):
self.model_winners += 1
self.model_winner_distance += w.ideology.distance_from_o()
def print(self, label: str, global_step: int):
print("%15s %6d, %5d " %
(label,
global_step,
len(self.winners)), end="")
print("random %6d/%6d %5.2f%% O: %5.2f" %
(self.random_count,
self.random_winners,
100 * self.random_winners / self.random_count,
self.random_winner_distance / self.random_winners), end='')
print(" model %6d/%6d %5.2f%% O: %5.2f" %
(self.model_count,
self.model_winners,
100 * self.model_winners / self.model_count,
self.model_winner_distance / self.model_winners), end='')
print(" chance of model_winner = %5.2f%%" % (
100 * self.model_winners / (self.model_winners + self.random_winners)),
flush=True)
|
robbierobinette/rcv-tensorflow
|
ElectionResult.py
|
from typing import List
from Candidate import Candidate
class ElectionResult:
def __init__(self, ordered_candidates: List[Candidate]):
self.ordered_candidates = ordered_candidates
def winner(self) -> Candidate:
return self.ordered_candidates[0]
|
freshchen/dev-tools
|
script/office/excel/mysql_update.py
|
<reponame>freshchen/dev-tools
import os
import pandas as pd
SQL = ""
def write_file(s, file_name):
fh = open(file_name, 'w', encoding='utf-8')
fh.write(s)
fh.close()
def parse(data):
global SQL
for row in data.values:
SQL += "INSERT IGNORE INTO zfhis.source_code_mapping (source_id, biz_code) VALUES ('" + str(row[0]) + "', '" + str(row[1]) + "');\n"
def main():
path_list = [
"./source.xlsx"
]
for path in path_list:
data = pd.read_excel(path)
parse(data)
write_file(SQL, "source-mapping.sql")
if __name__ == '__main__':
main()
|
freshchen/dev-tools
|
script/office/excel/source.py
|
import os
import xlrd
import time
import requests
import json
def parse(sheet, row_begin, url, token):
sheet_rows = sheet.nrows
temp = ''
for row_num in range(row_begin, sheet_rows):
row = sheet.row_values(row_num)
first = str(row[0])
second = str(row[1])
if first != '':
temp = first
if first == '' and temp != '':
first = temp
if second == '':
continue
body = {
'parentSourceId': first,
'sourceId': second
}
headers = {
'token': token,
'Content-Type': 'application/json;charset=UTF-8'
}
print("body: " + body.__str__())
time.sleep(3)
response = requests.post(url, data=json.dumps(body), headers=headers)
print(response.text)
def main():
path_list = [
"1.xls"
]
for path in path_list:
data = xlrd.open_workbook(path)
sheet = data.sheets()[0]
parse(sheet, 1,
'',
'')
if __name__ == '__main__':
main()
|
freshchen/dev-tools
|
script/office/excel/create_oracle_table.py
|
<filename>script/office/excel/create_oracle_table.py
import os
import xlrd
SQL = ""
TYPE_DICT = {"字符串": "VARCHAR", "字符": "CHAR", "数字": "DECIMAL", "DATE": "DATE", "Datetime": "TIMESTAMP",
"日期时间": "TIMESTAMP", "DATETIME": "TIMESTAMP", "数值": "DECIMAL", "整数": "DECIMAL"}
NEED_DICT = {"必填": "NOT NULL", "应填": "NULL", "不填": "NULL", "": "NULL"}
def write_file(s, file_name):
fh = open(file_name, 'w', encoding='utf-8')
fh.write(s)
fh.close()
def trim(s):
return str(s).strip()
def field_length(s):
if isinstance(s, float) or isinstance(s, int):
(s1, s2) = str(float(s)).split(".", 1)
if "0" == s2:
s2 = ""
else:
s2 = "," + s2
return "(" + s1 + s2 + ") "
else:
return " "
def parse_row(row):
global SQL
SQL += " "
SQL += "\"" + trim(row[1]) + "\" "
SQL += TYPE_DICT[trim(row[2])]
SQL += field_length(row[3])
SQL += NEED_DICT[trim(row[4])]
def parse_sheet(sheet, table_name, row_begin):
global SQL
sheet_rows = sheet.nrows
SQL += "CREATE TABLE \"" + table_name + "\" (\n"
for row_num in range(row_begin, sheet_rows):
row = sheet.row_values(row_num)
not_empty = list(filter(lambda s: "" != s, [row[1], row[2], row[3], row[4]]))
if row and len(not_empty) and row_num != (sheet_rows - 1):
parse_row(row)
if row_num != (sheet_rows - 2):
SQL += ","
SQL += "\n"
SQL += ");\n"
def main():
path_list = [
"/Users/darcy/Documents/work/sanyi/TB_MZ_GHMXB.xlsx",
"/Users/darcy/Documents/work/sanyi/TB_HZXX.xlsx",
"/Users/darcy/Documents/work/sanyi/TB_MZ_CFZB.xlsx",
"/Users/darcy/Documents/work/sanyi/TB_MZ_JSMXB.xlsx",
"/Users/darcy/Documents/work/sanyi/TB_MZ_JSZFFSMXB.xlsx",
"/Users/darcy/Documents/work/sanyi/TB_MZ_JZMXB.xlsx",
"/Users/darcy/Documents/work/sanyi/TB_MZ_QTCFMX.xlsx",
"/Users/darcy/Documents/work/sanyi/TB_MZ_SFMXB.xlsx",
"/Users/darcy/Documents/work/sanyi/TB_MZ_YPCFMX.xlsx",
"/Users/darcy/Documents/work/sanyi/TB_MZ_ZDMXB.xlsx"
]
for path in path_list:
data = xlrd.open_workbook(path)
(filepath, tempfilepager) = os.path.split(path)
(table_name, extension) = os.path.splitext(tempfilepager)
sheet = data.sheets()[0]
print("Start parse " + path)
parse_sheet(sheet, table_name, 2)
write_file(SQL, "sanyi_oracle.sql")
if __name__ == '__main__':
main()
|
freshchen/dev-tools
|
script/office/csv/mysql_update.py
|
<filename>script/office/csv/mysql_update.py
import os
import pandas as pd
SQL = ""
def write_file(s, file_name):
fh = open(file_name, 'w', encoding='utf-8')
fh.write(s)
fh.close()
def parse(data):
global SQL
for row in data.values:
SQL += "update zf_customer set qie_id=" + str(row[1]) + " where id=" + str(row[0]) + ";\n"
def main():
path_list = [
"./1.csv"
]
for path in path_list:
data = pd.read_csv(path)
parse(data)
write_file(SQL, "zf_customer-update.sql")
if __name__ == '__main__':
main()
|
freshchen/dev-tools
|
script/openstack/openstack-python-client-api.py
|
<reponame>freshchen/dev-tools<gh_stars>1-10
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneauth1 import loading
from keystoneclient.v3 import client as keyclient
from glanceclient import Client
from novaclient import client
from neutronclient.v2_0 import client as ntclient
from heatclient import client as hclient
def get_keystone_session():
loader = loading.get_plugin_loader('password')
auth = v3.Password(auth_url="http://<Openstack Controller Ip>:5000/v3", username="admin", password="<<PASSWORD>>",
project_name="admin", user_domain_id="default", project_domain_id="default")
sess = session.Session(auth=auth)
return sess
def get_nova_client():
sess = get_keystone_session()
nova = client.Client(2, session=sess)
return nova
def get_glance_client():
sess = get_keystone_session()
glance = Client('2', session=sess)
return glance
def get_keystone_client():
sess = get_keystone_session()
keystone = keyclient.Client(session=sess)
return keystone
def get_neutron_client():
sess = get_keystone_session()
neutron = ntclient.Client(session=sess)
return neutron
def get_heat_client():
creds = {}
creds['username'] = 'admin'
creds['password'] = '<PASSWORD>'
creds['auth_url'] = 'http://<Openstack Controller Ip>:5000/v3'
creds['project_name'] = 'admin'
ks_client = keyclient.Client(**creds)
heat_endpoint = ks_client.service_catalog.url_for(service_type='orchestration', endpoint_type='publicURL')
heat = hclient.Client('1', heat_endpoint, token=ks_client.auth_token)
return heat
def list_images():
glance = get_glance_client()
list = glance.images.list()
return list
def show_images():
list = list_images()
for image in list:
print
image.name, image.id, image.status
def get_image_id_by_name(image_name):
list = list_images()
id = ''
for image in list:
if image.name == image_name:
id = image.id
return id
return id
def upload_image(image_name, image_path):
glance = get_glance_client()
glance.images.create(name=image_name, disk_format="qcow2", container_format="bare", is_public="true")
id = get_image_id_by_name(image_name)
glance.images.upload(id, open(image_path, 'rb'))
def delete_image(image_name):
glance = get_glance_client()
id = get_image_id_by_name(image_name)
if id != '':
glance.images.delete(id)
|
darienmt/gcp-ads-b-collector
|
aircraft-json-to-bucket/main.py
|
<filename>aircraft-json-to-bucket/main.py
import sys
import os
import json
import time
import requests
from google.cloud import storage
from google.cloud.storage import Blob
device_id = os.getenv('DEVICE_ID')
if not device_id:
raise ValueError('DEVICE_ID env variable is not set')
receiver_url = os.getenv('RECEIVER_URL')
if not receiver_url:
raise ValueError('RECEIVER_URL env variable is not set')
output_bucket = os.getenv('OUTPUT_BUCKET')
if not output_bucket:
raise ValueError('OUTPUT_BUCKET env variable is not set')
sampling_period_seconds = os.getenv('SAMPLING_PERIOD_SECONDS')
if not sampling_period_seconds:
raise ValueError('SAMPLING_PERIOD_SECONDS env variable is not set')
sampling_period = int(sampling_period_seconds)
client = storage.Client()
bucket = client.get_bucket(output_bucket)
while True:
r = requests.get(f'{receiver_url}/data/aircraft.json')
if r.status_code != 200:
raise ValueError(f'ERROR: getting aircraft json data :{r.text}')
aircraft_data = r.json()
now = aircraft_data['now']
info_data = {
'now': now,
'aircraft_count' : len(aircraft_data['aircraft']),
'messages': aircraft_data['messages']
}
print('INFO: ' + json.dumps(info_data))
file_name = f'{device_id}/{now}.json'
blob = Blob(file_name, bucket)
blob.upload_from_string(json.dumps(aircraft_data), content_type='application/json')
print(f'INFO: Uploaded : {file_name}')
time.sleep(sampling_period)
|
darienmt/gcp-ads-b-collector
|
topic-cloud-function/simulation/send_data.py
|
<gh_stars>0
import sys
import os
import time
import json
import random
from google.cloud import pubsub_v1
with open('./sample_data.json') as json_file:
data = json.load(json_file)
device_id = 'simulated_device'
message = {
'device_id' : device_id,
'aircraft_data' : data
}
project_id = os.getenv('PROJECT_ID')
if not project_id:
raise ValueError('PROJECT_ID env variable is not set')
topic = os.getenv('INPUT_TOPIC')
if not topic:
raise ValueError('INPUT_PUBSUB env variable is not set')
client = pubsub_v1.PublisherClient()
full_topic_name = client.topic_path(project_id, topic)
message_bytes = json.dumps(message).encode('utf-8')
print(f'Message to be sent to {full_topic_name}:')
future_id = client.publish(full_topic_name, message_bytes)
event_id = future_id.result()
print(f'Message sent => Event id: {event_id}')
|
darienmt/gcp-ads-b-collector
|
topic-cloud-function/main.py
|
<reponame>darienmt/gcp-ads-b-collector
import base64
import json
import os
import hashlib
from google.cloud import bigquery
from google.api_core.exceptions import NotFound
from datetime import datetime
def get_table_and_bq_client(dataset_id, table_name):
"""
Returns the table instance and the BigQuery client `(table, bq_client)`.
"""
bq_client = bigquery.Client()
dataset_ref = bq_client.dataset(dataset_id)
try:
bq_client.get_dataset(dataset_ref)
except NotFound:
raise ValueError(f'The dataset {dataset_id} was not found')
table_ref = dataset_ref.table(table_name)
try:
table = bq_client.get_table(table_ref)
except NotFound:
raise ValueError(f'The table {table_name} could not be found in the dataset {dataset_id}')
return (table, bq_client)
def map_aircraft_to_record(aircrafts, message_now, device_id):
"""
Maps the `aircraft` entity to a BigQuery record and its unique id.
Returns `(unique_ids, records)`
"""
def copy_data(aircraft):
result = {
'hex': aircraft.get('hex'),
'squawk': aircraft.get('squawk'),
'flight': aircraft.get('flight'),
'lat': aircraft.get('lat'),
'lon': aircraft.get('lon'),
'nucp': aircraft.get('nucp'),
'seen_pos': aircraft.get('seen_pos'),
'altitude': aircraft.get('altitude'),
'vert_rate': aircraft.get('vert_rate'),
'track': aircraft.get('track'),
'speed': aircraft.get('speed'),
'messages': aircraft.get('messages'),
'seen': aircraft.get('seen'),
'rssi': aircraft.get('rssi'),
'device_id': device_id,
'timestamp': datetime.utcfromtimestamp(float(message_now)).isoformat()
}
result_json = json.dumps(result)
result_hash = hashlib.sha512(result_json.encode('utf-8')).hexdigest()
unique_id = f'{message_now}_{result_hash}'
result['created_at'] = datetime.now().isoformat()
return (unique_id, result)
return zip( *map( copy_data, aircrafts ) )
def create_records(data):
"""
Converts the received `data` into `(unique_ids, records)`
"""
device_id = data['device_id']
aircraft_data = data['aircraft_data']
def flattener( aircraft_message ):
message_now = aircraft_message['now']
return map_aircraft_to_record( aircraft_message['aircraft'], message_now, device_id )
unique_ids, records = zip(*map(flattener, aircraft_data))
flatten = lambda l: [item for sublist in l for item in sublist]
return (flatten(unique_ids), flatten(records))
def insert_records(data, table, bq_client):
"""
Insert the `data` aircraft messages into `table` with `bq_client`
"""
unique_ids, records = create_records(data)
results = bq_client.insert_rows_json(table, records, row_ids=unique_ids, skip_invalid_rows=True)
def get_error_data(result):
index = result.get('index')
record = records[index] if index != None else None
return (record, result.get('errors'))
errors = list(map(get_error_data, results)) if results else []
total_inserted = len(records) - len(errors)
return (total_inserted, errors)
def process_reporter(data_inserter):
"""
Process device aircraft data
"""
total_inserted, errors = data_inserter()
print(f'INFO: Total inserted records: {total_inserted}')
if errors:
for (record, err) in errors:
record_json = json.dumps(record) if record else 'NotFound'
joined_errors = json.dumps(err)
print(f'ERROR: Error inserting {record_json}, Errors : {joined_errors}')
global_bq_client = None
global_table = None
def handler(event, context):
"""
Receives an aircraft device message from a PubSub topic, and inserts it in a BigQuery table
"""
dataset_id = os.getenv('DATASET_ID')
if not dataset_id:
raise ValueError('The DATASET_ID environment variable is not set')
table_id = os.getenv('TABLE_ID')
if not table_id:
raise ValueError('The TABLE_ID environment variable is not set')
global global_bq_client, global_table
if global_bq_client == None or global_table == None:
global_table, global_bq_client = get_table_and_bq_client(dataset_id, table_id)
data = event.get('data')
if not data:
raise ValueError('No data attribute was found on the event')
message_raw = base64.b64decode(data).decode('utf-8')
message_json = json.loads(message_raw)
process_reporter(lambda : insert_records(message_json, global_table, global_bq_client))
|
darienmt/gcp-ads-b-collector
|
topic-cloud-function/test_main.py
|
import os
import json
from google.cloud import bigquery
from google.api_core.exceptions import NotFound
from main import get_table_and_bq_client
from main import map_aircraft_to_record
from main import create_records
from main import insert_records
from main import handler
class TestMain(object):
@classmethod
def setup_class(cls):
cls.project_id = os.getenv('PROJECT_ID')
if cls.project_id == None:
raise ValueError('The PROJECT_ID environment variable must be configured to run the test')
cls.dataset_id = os.getenv('DATASET_ID')
if cls.dataset_id == None:
raise ValueError('The DATASET_ID environment variable must be configured to run the test')
cls.table_id = os.getenv('TEST_TABLE_ID')
if cls.table_id == None:
raise ValueError('The TEST_TABLE_ID environment variable must be configured to run the test')
cls.table, cls.bq_client = get_table_and_bq_client(cls.dataset_id, cls.table_id)
def test_map_to_record_ok(self):
now = 1554347065.9
device_id = 'this_device'
data = [
{
"hex": "c08562", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
},
]
unique_ids, records = map_aircraft_to_record(data, now, device_id)
assert unique_ids
assert records
assert len(unique_ids) == 1
assert len(records) == 1
def test_test_map_to_record_same_values_has_same_unique_id(self):
now = 1554347065.9
device_id = 'this_device'
data = [
{
"hex": "c08562", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
},
{
"hex": "c08562", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
},
]
unique_ids, records = map_aircraft_to_record(data, now, device_id)
assert unique_ids
assert records
assert len(unique_ids) == 2
assert len(records) == 2
assert unique_ids[0] == unique_ids[1]
def test_create_records_ok(self):
data = {
'device_id' : 'this_device',
'aircraft_data' : [
{
'now' : 1554347065.9,
'aircraft' : [
{
"hex": "c08562", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
},
{
"hex": "c08522", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
}
]
},
{
'now' : 1554247065.9,
'aircraft' : [
{
"hex": "c08362", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
},
{
"hex": "c08422", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
}
]
}
]
}
unique_ids, records = create_records(data)
assert len(unique_ids) == 4
assert len(records) == 4
def test_insert_records_ok(self):
data = {
'device_id' : 'this_device',
'aircraft_data' : [
{
'now' : 1554347065.9,
'aircraft' : [
{
"hex": "c08562", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
},
{
"hex": "c08522", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
}
]
},
{
'now' : 1554247065.9,
'aircraft' : [
{
"hex": "c08362", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
},
{
"hex": "c08422", "squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
}
]
}
]
}
total_inserted, errors = insert_records(data, TestMain.table, TestMain.bq_client)
assert total_inserted == 4
assert not errors
def test_insert_records_with_errors(self):
data = {
'device_id' : 'this_device',
'aircraft_data' : [
{
'now' : 1554347065.9,
'aircraft' : [
{
"squawk": "4304", "lat": 43.517311, "lon": -79.821494, "nucp": 0, "seen_pos": 6.7, "altitude": 2300, "vert_rate": 0, "track": 144, "speed": 176, "messages": 70, "seen": 0.8, "rssi": -23.2
}
]
}
]
}
total_inserted, errors = insert_records(data, TestMain.table, TestMain.bq_client)
assert total_inserted == 0
assert len(errors) == 1
# def test_handler_ok(self):
# event = {
# 'data' : "<KEY>
# }
# context = {}
# handler(event, context)
# def test_handler_with_errors(self):
# event = {
# 'data' : "<KEY>"
# }
# context = {}
# handler(event, context)
|
darienmt/gcp-ads-b-collector
|
aircraft-json-collector/main.py
|
import sys
import os
import time
import json
import random
from google.cloud import pubsub_v1
import requests
project_id = os.getenv('PROJECT_ID')
if not project_id:
raise ValueError('PROJECT_ID env variable is not set')
topic = os.getenv('INPUT_TOPIC')
if not topic:
raise ValueError('INPUT_PUBSUB env variable is not set')
device_id = os.getenv('DEVICE_ID')
if not device_id:
raise ValueError('DEVICE_ID env variable is not set')
aircraft_json_url = os.getenv('AIRCRAFT_JSON_URL')
if not aircraft_json_url:
raise ValueError('AIRCRAFT_JSON_URL env variable is not set')
r = requests.get(aircraft_json_url)
if r.status_code != 200:
raise ValueError(f'Error getting aircraft json data :{r.text}')
aircraft_data = r.json()
aircraft_count = len(aircraft_data['aircraft'])
print(f'Messages: {aircraft_count}')
message = {
'device_id' : device_id,
'aircraft_data' : [aircraft_data]
}
client = pubsub_v1.PublisherClient()
full_topic_name = client.topic_path(project_id, topic)
message_bytes = json.dumps(message).encode('utf-8')
print(f'Message to be sent to {full_topic_name}:')
future_id = client.publish(full_topic_name, message_bytes)
event_id = future_id.result()
print(f'Message sent => Event id: {event_id}')
|
dansuh17/deep-supervised-hashing
|
train.py
|
<reponame>dansuh17/deep-supervised-hashing
"""
Liu et al., "Deep Supervised Hashing for Fast Image Retrieval"
"""
from collections import defaultdict
import random
import torch
from torch import nn
from torch import optim
from torchvision.datasets.mnist import MNIST
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from model import LiuDSH
# hyper-parameters
DATA_ROOT = 'data_out'
LR_INIT = 3e-4
BATCH_SIZE = 128
EPOCH = 40
NUM_WORKERS = 8
CODE_SIZE = 8 # bits
MARGIN = 5
ALPHA = 0.01 # TODO: adjust
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.set_default_dtype(torch.float)
mnist_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.1307, ), std=(0.3081, )),
])
class MNISTPairDataset(Dataset):
def __init__(self, data_root: str, transform=None, train=True):
super().__init__()
self.dataset = MNIST(root=data_root, train=train, transform=transform, download=True)
self.size = len(self.dataset)
def __len__(self):
return self.size
def __getitem__(self, item):
# return image pair
x_img, x_target = self.dataset[item]
pair_idx = item
# choose a different index
while pair_idx == item:
pair_idx = random.randint(0, self.size - 1)
y_img, y_target = self.dataset[pair_idx]
target_equals = 0 if x_target == y_target else 1
return x_img, x_target, y_img, y_target, target_equals
train_pair_dataset = MNISTPairDataset(data_root=DATA_ROOT, train=True, transform=mnist_transform)
print(f'Train set size: {len(train_pair_dataset)}')
test_pair_dataset = MNISTPairDataset(data_root=DATA_ROOT, train=False, transform=mnist_transform)
print(f'Test set size: {len(test_pair_dataset)}')
train_dataloader = DataLoader(
train_pair_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=NUM_WORKERS)
test_dataloader = DataLoader(
test_pair_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=NUM_WORKERS)
model = LiuDSH(code_size=CODE_SIZE).to(device)
mse_loss = nn.MSELoss(reduction='none')
l1_loss = nn.L1Loss(reduction='mean')
optimizer = optim.Adam(model.parameters(), lr=LR_INIT)
class Trainer:
def __init__(self):
self.global_step = 0
self.global_epoch = 0
self.total_epochs = EPOCH
self.input_shape = (1, 28, 28)
self.writer = SummaryWriter()
self.writer.add_graph(model, self.generate_dummy_input(), verbose=True)
def __del__(self):
self.writer.close()
def generate_dummy_input(self):
return torch.randn(1, *self.input_shape)
def run_step(self, model, x_imgs, y_imgs, target_equals, train: bool):
# convert from double (float64) -> float32
# TODO: dataset generates float64 by default?
x_out = model(x_imgs)
y_out = model(y_imgs)
squared_loss = torch.mean(mse_loss(x_out, y_out), dim=1)
# T1: 0.5 * (1 - y) * dist(x1, x2)
positive_pair_loss = (0.5 * (1 - target_equals) * squared_loss)
mean_positive_pair_loss = torch.mean(positive_pair_loss)
# T2: 0.5 * y * max(margin - dist(x1, x2), 0)
zeros = torch.zeros_like(squared_loss).to(device)
margin = MARGIN * torch.ones_like(squared_loss).to(device)
negative_pair_loss = 0.5 * target_equals * torch.max(zeros, margin - squared_loss)
mean_negative_pair_loss = torch.mean(negative_pair_loss)
# T3: alpha(dst_l1(abs(x1), 1)) + dist_l1(abs(x2), 1)))
mean_value_regularization = ALPHA * (
l1_loss(torch.abs(x_out), torch.ones_like(x_out)) +
l1_loss(torch.abs(y_out), torch.ones_like(y_out)))
loss = mean_positive_pair_loss + mean_negative_pair_loss + mean_value_regularization
print(f'epoch: {self.global_epoch:02d}\t'
f'step: {self.global_step:06d}\t'
f'loss: {loss.item():04f}\t'
f'positive_loss: {mean_positive_pair_loss.item():04f}\t'
f'negative_loss: {mean_negative_pair_loss.item():04f}\t'
f'regularize_loss: {mean_value_regularization:04f}')
# log them to tensorboard
self.writer.add_scalar('loss', loss.item(), self.global_step)
self.writer.add_scalar('positive_pair_loss', mean_positive_pair_loss.item(), self.global_step)
self.writer.add_scalar('negative_pair_loss', mean_negative_pair_loss.item(), self.global_step)
self.writer.add_scalar('regularizer_loss', mean_value_regularization.item(), self.global_step)
if train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
return x_out, y_out
def train(self):
for _ in range(self.total_epochs):
for x_imgs, x_targets, y_imgs, y_targets, target_equals in train_dataloader:
target_equals = target_equals.type(torch.float)
self.run_step(model, x_imgs, y_imgs, target_equals, train=True)
self.global_step += 1
# accumulate tensors for embeddings visualization
test_imgs = []
test_targets = []
hash_embeddings = []
embeddings = []
for test_x_imgs, test_x_targets, test_y_imgs, test_y_targets, test_target_equals in test_dataloader:
test_target_equals = test_target_equals.type(torch.float)
with torch.no_grad():
x_embeddings, y_embeddings = self.run_step(
model, test_x_imgs, test_y_imgs, test_target_equals, train=False)
# show all images that consist the pairs
test_imgs.extend([test_x_imgs.cpu()[:5], test_y_imgs.cpu()[:5]])
test_targets.extend([test_x_targets.cpu()[:5], test_y_targets.cpu()[:5]])
# embedding1: hamming space embedding
x_hash = torch.round(x_embeddings.cpu()[:5].clamp(-1, 1) * 0.5 + 0.5)
y_hash = torch.round(y_embeddings.cpu()[:5].clamp(-1, 1) * 0.5 + 0.5)
hash_embeddings.extend([x_hash, y_hash])
# emgedding2: raw embedding
embeddings.extend([x_embeddings.cpu(), y_embeddings.cpu()])
self.global_step += 1
self.writer.add_histogram(
'embedding_distribution',
torch.cat(embeddings).cpu().numpy(),
global_step=self.global_step)
# draw embeddings for a single batch - very nice for visualizing clusters
self.writer.add_embedding(
torch.cat(hash_embeddings),
metadata=torch.cat(test_targets),
label_img=torch.cat(test_imgs),
global_step=self.global_step)
# TODO: print text as hexadecimal strings
hash_vals = torch.cat(hash_embeddings).numpy().astype(int)
hash_vals = np.packbits(hash_vals, axis=-1).squeeze() # to uint8
targets = torch.cat(test_targets).numpy().astype(int)
hashdict = defaultdict(list)
for target_class, hash_value in zip(targets, hash_vals):
hashdict[target_class].append(f'{hash_value:#04x}') # ex) 15 -> 0x0f
result_texts = [] # TODO: debug
for target_class in sorted(hashdict.keys()):
for hashval in hashdict[target_class]:
result_texts.append(f'class: {target_class:02d} - {hashval}')
self.writer.add_text(
f'e{self.global_epoch}_hashvals/{target_class:02d}',
hashval, global_step=self.global_step)
result_text = '\n'.join(result_texts)
print(result_text) # TODO debug
self.global_epoch += 1
if __name__ == '__main__':
trainer = Trainer()
trainer.train()
|
dansuh17/deep-supervised-hashing
|
model.py
|
<reponame>dansuh17/deep-supervised-hashing<filename>model.py
import torch
from torch import nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride=1):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
)
self.downsample_layer = None
self.do_downsample = False
if in_channels != out_channels or stride != 1:
self.do_downsample = True
self.downsample_layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False),
nn.BatchNorm2d(out_channels),
)
# initialize weights
self.apply(self.init_weights)
def forward(self, x):
identity = x
out = self.net(x)
if self.do_downsample:
identity = self.downsample_layer(x)
return F.relu(out + identity, inplace=True)
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
class ResNet(nn.Module):
def __init__(self, num_classes: int):
super().__init__()
self.net = nn.Sequential(
ResBlock(in_channels=1, out_channels=16),
ResBlock(in_channels=16, out_channels=16),
ResBlock(in_channels=16, out_channels=16, stride=2),
)
self.linear_input_size = 3136
self.linear = nn.Linear(self.linear_input_size, num_classes)
# initialize weights
self.apply(self.init_weights)
def forward(self, x):
x = self.net(x)
x = x.view(-1, self.linear_input_size)
return self.linear(x)
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
class LiuDSH(nn.Module):
def __init__(self, code_size: int):
super().__init__()
resnet = ResNet(num_classes=10)
resnet.linear = nn.Linear(
in_features=resnet.linear_input_size, out_features=code_size)
self.net = resnet
# initialize weights
self.apply(self.init_weights)
def forward(self, x):
return self.net(x)
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
if __name__ == '__main__':
dummy_tensor = torch.randn((10, 1, 28, 28))
dsh = LiuDSH(code_size=11)
print(dsh)
print(dsh(dummy_tensor).size())
|
monkey-cli/monkey-playground
|
union_find_maze/maze.py
|
import math
import numpy as np
from random import randrange
from union_find import union, find, connected
from utils import get_possible_next_steps, get_non_connected_next_steps
from mock import get_maze
"""
Task: Try to find the route in the provided maze from origin (0,0) to destination (N-1,M-1).
N-number of rows, M-number of columns.
The maze is represented as a matrix of bits, where 0 represents an empty slot and 1 represents a wall.
# 0 -> empty
# 1 -> wall
Find the connected coordinates with value of 0 that connect from start to destination.
To solve the problem we will use the Disjoint Set (Union Find) algorithm.
"""
maze = get_maze()
rows = np.shape(maze)[0]
columns = np.shape(maze)[1]
# start = maze[0][0]
# end = maze[rows-1][columns-1]
# The number of elements in this union find
size = rows * columns
if size <= 0:
raise Exception("Size <= 0 is not allowed")
# Step 1
# construct a bijection (a mapping) between the coordinates of the matrix and integers in range [0, n).
# this will allow an array based union find, easy to work with.
hashTable = []
# data[i] points to the parent of i, if data[i] = i then i is a root node
data = []
hashIndex = 0
for row in range(0, rows):
for column in range(0, columns):
hashTable.append((row, column))
data.append(hashIndex)
hashIndex += 1
# ------------------------------------------------------------------------
def find_next_steps(currect_index):
"""
Helper function used to find only the acceptable next steps
"""
matrixCoord = hashTable[currect_index]
possible_next_steps = get_possible_next_steps(maze, hashTable, matrixCoord)
next_steps = get_non_connected_next_steps(
data, currect_index, possible_next_steps
)
return next_steps
# ------------------------------------------------------------------------
def run_union_find(onStepUpdate=None):
# start from the start of the maze and look for the next connection
currect_index = 0 # index in the data array
# while the start and end of the maze are not connected
# try to find the next connected item of the path
steps = []
while not connected(data, 0, size - 1):
# for currect cell get all surrounding coordinates
# from these coordinates randomly select one as the next step,
# but with the condition that this coordinate is not connected to the currect cell and is not a "WALL"
# for every loop save the steps
steps.append(currect_index)
next_steps = find_next_steps(currect_index)
if len(next_steps) == 0:
"""
Dead end reached. Need to get back and look at previous connections next steps.
"""
print(
"Dead end at index:",
currect_index,
"and coordinate:",
hashTable[currect_index],
)
if onStepUpdate:
onStepUpdate(
{"status": "DEAD_END", "value": hashTable[currect_index]}
)
prev_step = steps.index(currect_index) - 1
while (
prev_step >= 0 and len(find_next_steps(steps[prev_step])) == 0
):
# go check for a new route starting from one step before the current one
# loop until a node with possible next steps to be folowed
prev_step -= 1
if prev_step >= 0:
print("Loogin for new route at index", steps[prev_step])
currect_index = steps[prev_step]
continue
else:
print("Could not find a route from start to end... :(")
break
# get a random item from the array
next_index = next_steps[randrange(len(next_steps))]
union(data, currect_index, next_index)
print("Iteration at index", currect_index)
if onStepUpdate:
onStepUpdate(
{"status": "NEXT_STEP", "value": hashTable[currect_index]}
)
# prepare for next loop
currect_index = next_index
print("Iteration at last index", size - 1)
print("--------------------------------------------------------")
# append last index of the array
steps.append(size - 1)
step_coordinates = list(map(lambda item: hashTable[item], steps))
print("Iteration traversed the following coordinates:")
print(step_coordinates)
|
monkey-cli/monkey-playground
|
union_find_maze/mock.py
|
maze = [
[0, 0, 1, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 1, 1],
[1, 0, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1],
[0, 1, 1, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 1],
[0, 1, 0, 1, 1, 0, 0, 0],
]
# maze = [
# [0, 1],
# [0, 0]
# ]
# Generate maze example from
# https://github.com/keesiemeijer/maze-generator
# Remove first and last rows as they represend the top and bottom borders
# For each row also ignore the first and last items since they represent the horizontal borders
maze20x20 = [
"10000000001000001000000010000000000010001",
"11111011101110111010111010111111101010101",
"10001000100000100010001010100010001010101",
"10101110111110101111101110101110111011101",
"10100000101000100000100010101000101010001",
"10111111101011111110111010101011101010101",
"10001010000010000010101000101000101000101",
"11101010111111111010101111101110101111101",
"10001000000010000010001000001000100010001",
"10111111111010111011101110111011111010111",
"10000010001000100010100010001000100010101",
"10111011101011101110111010101110101010101",
"10101000100000101000001010101000101010101",
"10101110101110101011111011101011101110101",
"10000010100010001010000010001010001000101",
"11111010111011111010111110111010111011101",
"10001010101010001010100010100010100010001",
"11101010101010101010101010101110101110101",
"10001010100010100010001000101000001000101",
"10111010111010101111111110101011111111101",
"10100010001000101000000000101000000000101",
"10101111101011111011111111101110111110101",
"10000010001010000010000000001010100010001",
"11111110111110111110111111111010101011101",
"10000000100000100010100000000010101000101",
"10111110101111111010111111111010101110101",
"10000010101000000010001000001000100010101",
"11111010101110101011101011101111101110101",
"10001010100010101010001000100000101000101",
"10101011111010101110111110111110101011101",
"10100010000010100010000000000010101000101",
"10111110111111111011111111111110101110111",
"10000000100010000000001000100000100010001",
"10111111101010111111101010101110111011111",
"10100000001010000010100010100010001010001",
"10111111101011111010111110111011111010101",
"10000000001010001010100000101000000010101",
"11111111111010101010101111101111111110101",
"10000000000000100010000000000000000000101",
]
maze5x4 = [
"10001000101",
"10101010101",
"10100010101",
"10111110101",
"10001010101",
"10101010101",
"10101000001",
]
maze10x10 = [
"100000000000100000001",
"101010111111101111101",
"101010100000001000001",
"111010101111111011101",
"100010100000101010101",
"101111111110101010101",
"101000000010101010101",
"101011101110101010101",
"100010101000101010001",
"101110101011101010111",
"101000101010001010001",
"101110101010101011101",
"100010100000101000101",
"111010111111111110111",
"100010000010000010001",
"101111101010111011101",
"101000001010101010001",
"101011111011101010101",
"100010000000001000101",
]
def construct_maze(maze_template):
temp_maze = []
for row in maze_template:
valid_row = row[1 : len(row) - 1] # remove 1st and last item
split_row = [int(x) for x in valid_row]
temp_maze.append(split_row)
return temp_maze
# ---------------------------------------------------------------
maze = construct_maze(maze10x10)
def get_maze():
return maze
|
monkey-cli/monkey-playground
|
union_find_maze/union_find.py
|
# union find equipped with "path compression"
def find(data, i):
root = i
# Find the root of the dataset
while root != data[root]:
root = data[root]
# Compress the path leading back to the root.
# this operation is called "path compression"
while i != root:
next = data[i]
data[i] = root
i = next
return root
def union(data, i, j):
if connected(data, i, j):
return
pi, pj = find(data, i), find(data, j)
if pi != pj:
data[pi] = pj
def connected(data, i, j):
return find(data, i) == find(data, j)
|
monkey-cli/monkey-playground
|
union_find_maze/main.py
|
<reponame>monkey-cli/monkey-playground<filename>union_find_maze/main.py
from visualizer.maze_visualizer import create_matrix, on_step_reached
from maze import run_union_find
def on_step_update(data):
"""
Callback method to be called on every next step update from the union find runtime.
Example:
1. new step was found
2. dead-end reached
"""
on_step_reached(data)
def onRenderComplete():
print("Maze render complete.\nProceeding to path finding...")
run_union_find(on_step_update)
# trigger and matrix render and after that is done start solving the maze
create_matrix(onRenderComplete)
|
monkey-cli/monkey-playground
|
union_find_maze/visualizer/maze_visualizer.py
|
import numpy as np
from turtle import Screen, Turtle
from visualizer import utils
import mock
draw_graph, fill_matrix, fill_coordinate, is_matrix_edge = (
utils.draw_graph,
utils.fill_matrix,
utils.fill_coordinate,
utils.is_matrix_edge,
)
# ------------------------------------------------------------------------------------
def init_screen():
screen = Screen()
screen.setup(1.0, 1.0) # display size window
turtle = Turtle(visible=False) # hide the cursor completely
turtle.speed(0)
# screen sizes
# width, height = screen.window_width(), screen.window_height()
def wait_screen():
screen = Screen()
screen.tracer(True)
screen.mainloop()
# ------------------------------------------------------------------------------------
def create_matrix(onComplete=None):
init_screen()
# matrix specs
maze = mock.get_maze()
matrix_rows, matrix_columns = np.shape(maze)[0], np.shape(maze)[1]
draw_graph(matrix_rows, matrix_columns)
# fill walls and matrix edges
fill_matrix(maze, matrix_rows, matrix_columns)
if onComplete:
onComplete()
wait_screen()
def on_step_reached(data):
coordinate = data["value"]
status = data["status"]
maze = mock.get_maze()
matrix_rows, matrix_columns = np.shape(maze)[0], np.shape(maze)[1]
if not is_matrix_edge(maze, coordinate):
fill_coordinate(
coordinate,
matrix_rows,
matrix_columns,
("yellow" if status == "NEXT_STEP" else "red"),
)
|
monkey-cli/monkey-playground
|
union_find_maze/visualizer/utils.py
|
from turtle import Screen, Turtle
import numpy as np
turtle = Turtle(visible=False) # hide the cursor completely
turtle.speed(0)
def draw_graph(num_rows, num_columns, space_from_edge=10):
columns = num_columns + 1
rows = num_rows + 1
screen = Screen()
width, height = screen.window_width(), screen.window_height()
x = -(width / 2 - space_from_edge)
distanceX = width / columns
for _ in range(columns):
turtle.penup()
turtle.goto(x, (height / 2))
turtle.pendown()
turtle.goto((x, -(height / 2)))
x += distanceX
y = height / 2 - space_from_edge
distanceY = height / rows
for _ in range(rows):
turtle.penup()
turtle.goto((width / 2), y)
turtle.pendown()
turtle.goto((-(width / 2)), y)
y -= distanceY
def fill_matrix(maze, matrix_rows, matrix_columns):
for row in range(0, matrix_rows):
for column in range(0, matrix_columns):
if is_matrix_edge(maze, (row, column)):
fill_coordinate(
(row, column), matrix_rows, matrix_columns, "green"
)
if maze[row][column] == 1: # fill a wall
fill_coordinate(
(row, column), matrix_rows, matrix_columns, "black"
)
def fill_coordinate(
coordinate, num_rows, num_columns, fill_color="green", space_from_edge=10
):
turtle.color(fill_color)
turtle.fillcolor(fill_color)
rows = num_rows + 1
columns = num_columns + 1
screen = Screen()
width, height = screen.window_width(), screen.window_height()
distanceX = width / columns
distanceY = height / rows
# x -> represents columns
# y -> represents rows
Y, X = coordinate[0], coordinate[1]
# this is tested in MAC environment and the coordinate calculation is based on the mac-os display grid
startX = -width / 2 + X * distanceX + space_from_edge
startY = height / 2 - Y * distanceY - space_from_edge
turtle.begin_fill()
turtle.up()
turtle.goto(startX, startY)
turtle.down()
# draw top
turtle.forward(distanceX)
# draw right
turtle.right(90)
turtle.forward(distanceY)
# draw bottom
turtle.right(90)
turtle.forward(distanceX)
# draw left
turtle.right(90)
turtle.forward(distanceY)
turtle.right(90)
turtle.end_fill()
# reset colors back to default
turtle.color("black")
turtle.fillcolor("white")
return []
def is_matrix_edge(matrix, coordinate):
rows, columns = np.shape(matrix)[0], np.shape(matrix)[1]
X, Y = coordinate[0], coordinate[1]
if X == 0 and Y == 0:
return True
if X == rows - 1 and Y == columns - 1:
return True
return False
|
monkey-cli/monkey-playground
|
union_find_maze/utils.py
|
import numpy as np
from union_find import union, find, connected
def get_surroundings(matrix, coord):
"""
Get surrounding coordinates only if their indexes are part of the matrix
"""
width = np.shape(matrix)[0]
height = np.shape(matrix)[1]
coordinates = []
# top
(
coordinates.append((coord[0], coord[1] - 1))
if coord[1] - 1 >= 0
else None
)
# bottom
(
coordinates.append((coord[0], coord[1] + 1))
if coord[1] + 1 < height
else None
)
# left
(
coordinates.append((coord[0] - 1, coord[1]))
if coord[0] - 1 >= 0
else None
)
# right
(
coordinates.append((coord[0] + 1, coord[1]))
if coord[0] + 1 < width
else None
)
return coordinates
def find_indexes_of_cords(coordinates, hashTable):
indexes = []
for item in coordinates:
indexes.append(hashTable.index(item))
return indexes
def get_possible_next_steps(matrix, hashTable, coordinate):
"""
Get possible next steps indexes for the maze route.
"""
surroundings_coordinates = get_surroundings(matrix, coordinate)
# get only surrounding coordinates that represent an "EMPTY" /not "WALL"
filtered_surroundings = []
for coord in surroundings_coordinates:
item = matrix[coord[0]][coord[1]]
if item == 0:
filtered_surroundings.append(coord)
indexes = find_indexes_of_cords(filtered_surroundings, hashTable)
return indexes
def get_non_connected_next_steps(data, currentStep, possible_next_steps):
next_steps = []
for step in possible_next_steps:
if not connected(data, currentStep, step):
next_steps.append(step)
return next_steps
|
Paul-Verardi/nipyapi
|
tests/test_versioning.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `nipyapi` package."""
from __future__ import absolute_import
import pytest
from deepdiff import DeepDiff
from tests import conftest
from nipyapi import registry, nifi, versioning, canvas, utils, config
def test_create_registry_client(regress_flow_reg):
# First remove any leftover test client connections
[versioning.delete_registry_client(li) for
li in versioning.list_registry_clients().registries
if conftest.test_registry_client_name in li.component.name
]
r = versioning.create_registry_client(
name=conftest.test_registry_client_name,
uri=conftest.registry_test_endpoints[0][0],
description='a test connection'
)
assert isinstance(r, nifi.RegistryClientEntity)
# test duplicate catch result
with pytest.raises(ValueError):
_ = versioning.create_registry_client(
name=conftest.test_registry_client_name,
uri=conftest.registry_test_endpoints[0][0],
description='who cares?'
)
def test_list_registry_clients():
r = versioning.list_registry_clients()
assert isinstance(r, nifi.RegistryClientsEntity)
def test_get_registry_client():
f_reg_client = versioning.get_registry_client(
conftest.test_registry_client_name
)
r1 = versioning.get_registry_client(f_reg_client.component.name)
assert isinstance(r1, nifi.RegistryClientEntity)
assert r1.component.name == conftest.test_registry_client_name
r2 = versioning.get_registry_client(r1.id, 'id')
assert r2.id == r1.id
with pytest.raises(ValueError):
_ = versioning.get_registry_client('', 'NotIDorName')
def test_delete_registry_client():
f_reg_client = versioning.get_registry_client(
conftest.test_registry_client_name
)
r = versioning.delete_registry_client(f_reg_client)
assert isinstance(r, nifi.RegistryClientEntity)
assert r.uri is None
assert r.component.name == conftest.test_registry_client_name
with pytest.raises(AssertionError):
_ = versioning.delete_registry_client('FakeClient')
# TODO Add test for when a PG is attached to the client
def test_list_registry_buckets(regress_flow_reg, fix_bucket):
_ = fix_bucket()
r = versioning.list_registry_buckets()
assert isinstance(r, list)
assert len(r) >= 1
def test_create_registry_bucket(regress_flow_reg, fix_bucket):
# We include fix_bucket to handle the cleanup
r = versioning.create_registry_bucket(conftest.test_bucket_name)
assert isinstance(r, registry.Bucket)
assert r.name == conftest.test_bucket_name
# Bucket names are unique
with pytest.raises(ValueError) as v:
_ = versioning.create_registry_bucket(conftest.test_bucket_name)
# Cleanup, as no test fixture to do so here
def test_delete_registry_bucket(regress_flow_reg, fix_bucket):
f_bucket = fix_bucket()
r = versioning.delete_registry_bucket(f_bucket)
assert r.identifier == f_bucket.identifier
with pytest.raises(ValueError):
_ = versioning.delete_registry_bucket('FakeNews')
def test_get_registry_bucket(regress_flow_reg, fix_bucket):
f_bucket = fix_bucket()
r1 = versioning.get_registry_bucket(f_bucket.name)
assert r1.name == conftest.test_bucket_name
r2 = versioning.get_registry_bucket(r1.identifier, 'id')
assert r2.name == r1.name
with pytest.raises(ValueError):
_ = versioning.get_registry_bucket('Irrelevant', 'Invalid')
r3 = versioning.get_registry_bucket('NonExistantProbably')
assert r3 is None
def test_save_flow_ver(regress_flow_reg, fix_bucket, fix_pg, fix_proc):
f_reg_client = conftest.ensure_registry_client(
config.registry_local_name
)
f_bucket = fix_bucket()
f_pg = fix_pg.generate()
test_bucket = versioning.get_registry_bucket(f_bucket.identifier, 'id')
assert test_bucket.name == conftest.test_bucket_name
r1 = versioning.save_flow_ver(
process_group=f_pg,
registry_client=f_reg_client,
bucket=test_bucket,
flow_name=conftest.test_versioned_flow_name,
comment='a test comment',
desc='a test description'
)
assert isinstance(r1, nifi.VersionControlInformationEntity)
# Next we validate you can't duplicate a flow name in a bucket
with pytest.raises(ValueError):
_ = versioning.save_flow_ver(
process_group=f_pg,
registry_client=f_reg_client,
bucket=f_bucket,
flow_name=conftest.test_versioned_flow_name,
comment='NiPyApi Test',
desc='NiPyApi Test'
)
# Add a processor, refresh status, and save a new version
fix_proc.generate(parent_pg=f_pg)
f_pg = canvas.get_process_group(f_pg.id, 'id')
r2 = versioning.save_flow_ver(
process_group=f_pg,
registry_client=f_reg_client,
bucket=f_bucket,
flow_id=r1.version_control_information.flow_id,
comment='a test comment'
)
assert isinstance(r2, nifi.VersionControlInformationEntity)
assert r2.version_control_information.version > \
r1.version_control_information.version
with pytest.raises(ValueError):
_ = versioning.save_flow_ver(
process_group=f_pg,
registry_client=f_reg_client,
bucket=f_bucket,
flow_name=conftest.test_versioned_flow_name,
comment='a test comment',
desc='a test description',
refresh=False
)
# shortcut to clean up the test objects when not using the fixture
conftest.cleanup_reg()
def test_stop_flow_ver(regress_flow_reg, fix_ver_flow):
r1 = versioning.stop_flow_ver(fix_ver_flow.pg)
assert isinstance(r1, nifi.VersionControlInformationEntity)
assert r1.version_control_information is None
with pytest.raises(ValueError,
match='not currently under Version Control'):
_ = versioning.stop_flow_ver(fix_ver_flow.pg)
with pytest.raises(ValueError):
_ = versioning.stop_flow_ver(fix_ver_flow.pg, refresh=False)
def test_revert_flow_ver(regress_flow_reg, fix_ver_flow):
r1 = versioning.revert_flow_ver(fix_ver_flow.pg)
assert isinstance(r1, nifi.VersionedFlowUpdateRequestEntity)
# TODO: Add Tests for flows with data loss on reversion
with pytest.raises(ValueError):
_ = versioning.revert_flow_ver('NotAPg')
def test_list_flows_in_bucket(regress_flow_reg, fix_ver_flow):
r1 = versioning.list_flows_in_bucket(fix_ver_flow.bucket.identifier)
assert isinstance(r1, list)
assert isinstance(r1[0], registry.VersionedFlow)
with pytest.raises(ValueError, match='does not exist'):
_ = versioning.list_flows_in_bucket('NiPyApi-FakeNews')
def test_get_flow_in_bucket(regress_flow_reg, fix_ver_flow):
r1 = versioning.get_flow_in_bucket(
fix_ver_flow.bucket.identifier,
fix_ver_flow.flow.identifier,
'id'
)
assert isinstance(r1, registry.VersionedFlow)
assert r1.identifier == fix_ver_flow.info.version_control_information.\
flow_id
r2 = versioning.get_flow_in_bucket(fix_ver_flow.bucket.identifier,
'fakenews', 'id')
assert r2 is None
def test_get_latest_flow_ver(regress_flow_reg, fix_ver_flow):
r1 = versioning.get_latest_flow_ver(
fix_ver_flow.bucket.identifier,
fix_ver_flow.flow.identifier
)
assert isinstance(r1, registry.VersionedFlowSnapshot)
with pytest.raises(ValueError, match='does not exist'):
_ = versioning.get_latest_flow_ver(
fix_ver_flow.bucket.identifier,
'fakenews'
)
def test_update_flow_ver():
# This function is tested in test_complex_template_versioning
pass
def test_list_flow_versions():
# TODO: Implement test
pass
def test_get_version_info(regress_flow_reg, fix_ver_flow):
r1 = versioning.get_version_info(fix_ver_flow.pg)
assert isinstance(r1, nifi.VersionControlInformationEntity)
with pytest.raises(ValueError):
_ = versioning.get_version_info('NotAPG')
def test_create_flow(regress_flow_reg, fix_ver_flow):
r1 = versioning.create_flow(
bucket_id=fix_ver_flow.bucket.identifier,
flow_name=conftest.test_cloned_ver_flow_name,
)
assert isinstance(r1, registry.VersionedFlow)
assert r1.name == conftest.test_cloned_ver_flow_name
# test duplicate behavior
with pytest.raises(ValueError):
_ = versioning.create_flow(
bucket_id=fix_ver_flow.bucket.identifier,
flow_name=conftest.test_cloned_ver_flow_name,
)
def test_create_flow_version(regress_flow_reg, fix_ver_flow):
new_ver_stub = versioning.create_flow(
bucket_id=fix_ver_flow.bucket.identifier,
flow_name=conftest.test_cloned_ver_flow_name,
)
ver_flow_snapshot_0 = versioning.get_latest_flow_ver(
fix_ver_flow.bucket.identifier,
fix_ver_flow.flow.identifier
)
r1 = versioning.create_flow_version(
flow=new_ver_stub,
flow_snapshot=ver_flow_snapshot_0
)
assert isinstance(r1, registry.VersionedFlowSnapshot)
# registry bug https://issues.apache.org/jira/browse/NIFIREG-135
# assert r1.flow.version_count == 2
assert DeepDiff(
ver_flow_snapshot_0.flow_contents,
r1.flow_contents,
ignore_order=False,
verbose_level=2
) == {}
# Write it again to increment the version, check it's consistent
r2 = versioning.create_flow_version(
flow=new_ver_stub,
flow_snapshot=ver_flow_snapshot_0
)
assert isinstance(r2, registry.VersionedFlowSnapshot)
assert DeepDiff(
ver_flow_snapshot_0.flow_contents,
r2.flow_contents,
ignore_order=False,
verbose_level=2
) == {}
def test_complex_template_versioning(regress_flow_reg, fix_ctv):
# There is a complex bug where a new flow version cannot be switched to
# and generates a NiFi NPE if attempted when create_flow_version is used
# BUG FIXED: issue with variable name found in Swagger definition
# https://github.com/apache/nifi/pull/2479#issuecomment-366582829
# Create a new flow version
vers = versioning.list_flow_versions(
fix_ctv.bucket.identifier,
fix_ctv.flow.identifier
)
assert vers[0].version == 2
# create_flow_version is the problem
new_ss = versioning.create_flow_version(
flow=fix_ctv.flow,
flow_snapshot=fix_ctv.snapshot_w_template,
refresh=True
)
assert isinstance(new_ss, registry.VersionedFlowSnapshot)
vers = versioning.list_flow_versions(
fix_ctv.bucket.identifier,
fix_ctv.flow.identifier
)
assert vers[0].version == 3
new_ver_info = versioning.get_version_info(fix_ctv.pg)
r1 = versioning.update_flow_ver(fix_ctv.pg)
assert r1.request.complete is True
assert r1.request.failure_reason is None
r2 = canvas.schedule_process_group(fix_ctv.pg.id, True)
status = canvas.get_process_group(fix_ctv.pg.id, 'id')
assert status.running_count >= 1
with pytest.raises(ValueError):
_ = versioning.update_flow_ver(fix_ctv.pg, 'bob')
with pytest.raises(ValueError):
_ = versioning.update_flow_ver(fix_ctv.pg, '9999999')
def test_get_flow_version(regress_flow_reg, fix_ver_flow):
r1 = versioning.get_flow_version(
bucket_id=fix_ver_flow.bucket.identifier,
flow_id=fix_ver_flow.flow.identifier,
version=None
)
assert isinstance(r1, registry.VersionedFlowSnapshot)
assert r1.snapshot_metadata.version == 1
test_vf_2 = versioning.create_flow_version(
flow=r1.flow,
flow_snapshot=r1
)
assert isinstance(test_vf_2, registry.VersionedFlowSnapshot)
assert test_vf_2.snapshot_metadata.version == 2
r2 = versioning.get_flow_version(
bucket_id=test_vf_2.flow.bucket_identifier,
flow_id=test_vf_2.flow.identifier,
version=None
)
assert r2.flow.version_count == 2
assert r2.snapshot_metadata.version == 2
r3 = versioning.get_flow_version(
bucket_id=test_vf_2.flow.bucket_identifier,
flow_id=test_vf_2.flow.identifier,
version='1',
)
assert r3.snapshot_metadata.version == 1
assert r3.flow.version_count == 2
r4 = versioning.get_flow_version(
bucket_id=test_vf_2.flow.bucket_identifier,
flow_id=test_vf_2.flow.identifier,
version=None,
export=True
)
assert isinstance(r4, bytes)
assert isinstance(utils.load(r4), dict)
def test_export_flow_version(regress_flow_reg, fix_flow_serde):
# Test we can turn a flow snapshot into a json string
r1 = versioning.export_flow_version(
fix_flow_serde.bucket.identifier,
fix_flow_serde.flow.identifier
)
assert isinstance(r1, str)
# Test writing it to a file
r2 = versioning.export_flow_version(
fix_flow_serde.bucket.identifier,
fix_flow_serde.flow.identifier,
file_path=fix_flow_serde.filepath + '_test.json'
)
assert isinstance(r2, str)
r2l = utils.load(r2)
assert isinstance(r2l, dict)
assert r2l['snapshotMetadata'].__contains__('flowIdentifier')
# read in the file
r2f = utils.fs_read(fix_flow_serde.filepath + '_test.json')
DeepDiff(
r2,
r2f,
ignore_order=False,
verbose_level=2
)
# Test yaml dump
r3 = versioning.export_flow_version(
fix_flow_serde.bucket.identifier,
fix_flow_serde.flow.identifier,
mode='yaml'
)
assert isinstance(r3, str)
r3l = utils.load(r3)
assert isinstance(r3l, dict)
assert r3l['snapshotMetadata'].__contains__('flowIdentifier')
def test_import_flow_version(regress_flow_reg, fix_flow_serde):
compare_obj = fix_flow_serde.snapshot
test_obj = fix_flow_serde.raw
# Test that our test_obj serialises and deserialises through the layers of
# json reformatting. This is because we load the NiFi Java json object,
# dump it using the Python json library, and load it again using
# ruamel.yaml.
assert DeepDiff(
compare_obj,
utils.load(
utils.dump(
utils.load(
obj=test_obj
),
mode='json'
),
dto=fix_flow_serde.dto
),
ignore_order=False,
verbose_level=2
) == {}
# Test that we can issue a simple create_flow with this object
r0 = versioning.create_flow_version(
flow=fix_flow_serde.flow,
flow_snapshot=utils.load(
obj=fix_flow_serde.json,
dto=fix_flow_serde.dto
)
)
assert isinstance(r0, registry.VersionedFlowSnapshot)
assert DeepDiff(
compare_obj.flow_contents,
r0.flow_contents,
ignore_order=False,
verbose_level=2
) == {}
# Test we can import from a String in memory
# Test we can import as new version in existing bucket
r1 = versioning.import_flow_version(
bucket_id=fix_flow_serde.bucket.identifier,
encoded_flow=fix_flow_serde.json,
flow_id=fix_flow_serde.flow.identifier
)
assert isinstance(r1, registry.VersionedFlowSnapshot)
assert DeepDiff(
compare_obj.flow_contents,
r1.flow_contents,
ignore_order=False,
verbose_level=2
) == {}
# Test we can also import from a file
r2 = versioning.import_flow_version(
bucket_id=fix_flow_serde.bucket.identifier,
file_path=fix_flow_serde.filepath + '.yaml',
flow_id=fix_flow_serde.flow.identifier
)
assert isinstance(r2, registry.VersionedFlowSnapshot)
assert DeepDiff(
compare_obj.flow_contents,
r2.flow_contents,
ignore_order=False,
verbose_level=2
) == {}
# Test import into another bucket as first version
f_bucket_2 = versioning.create_registry_bucket(
conftest.test_bucket_name + '_02'
)
r3 = versioning.import_flow_version(
bucket_id=f_bucket_2.identifier,
encoded_flow=fix_flow_serde.yaml,
flow_name=conftest.test_cloned_ver_flow_name + '_01'
)
assert isinstance(r3, registry.VersionedFlowSnapshot)
assert DeepDiff(
compare_obj.flow_contents,
r3.flow_contents,
ignore_order=False,
verbose_level=2
) == {}
def test_deploy_flow_version(regress_flow_reg, fix_ver_flow):
r1 = versioning.deploy_flow_version(
parent_id=canvas.get_root_pg_id(),
location=(0,0),
bucket_id=fix_ver_flow.bucket.identifier,
flow_id=fix_ver_flow.flow.identifier,
reg_client_id=fix_ver_flow.client.id,
version=1
)
assert isinstance(r1, nifi.ProcessGroupEntity)
r2 = versioning.deploy_flow_version(
parent_id=canvas.get_root_pg_id(),
location=(0, 0),
bucket_id=fix_ver_flow.bucket.identifier,
flow_id=fix_ver_flow.flow.identifier,
reg_client_id=fix_ver_flow.client.id,
version=None
)
assert isinstance(r2, nifi.ProcessGroupEntity)
with pytest.raises(ValueError):
# can't deploy a pg inside itself
_ = versioning.deploy_flow_version(
parent_id=fix_ver_flow.pg.id,
location=(0, 0),
bucket_id=fix_ver_flow.bucket.identifier,
flow_id=fix_ver_flow.flow.identifier,
reg_client_id=fix_ver_flow.client.id,
version=None
)
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/variable_registry_dto.py
|
<reponame>Paul-Verardi/nipyapi<filename>nipyapi/nifi/models/variable_registry_dto.py
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VariableRegistryDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'variables': 'list[VariableEntity]',
'process_group_id': 'str'
}
attribute_map = {
'variables': 'variables',
'process_group_id': 'processGroupId'
}
def __init__(self, variables=None, process_group_id=None):
"""
VariableRegistryDTO - a model defined in Swagger
"""
self._variables = None
self._process_group_id = None
if variables is not None:
self.variables = variables
if process_group_id is not None:
self.process_group_id = process_group_id
@property
def variables(self):
"""
Gets the variables of this VariableRegistryDTO.
The variables that are available in this Variable Registry
:return: The variables of this VariableRegistryDTO.
:rtype: list[VariableEntity]
"""
return self._variables
@variables.setter
def variables(self, variables):
"""
Sets the variables of this VariableRegistryDTO.
The variables that are available in this Variable Registry
:param variables: The variables of this VariableRegistryDTO.
:type: list[VariableEntity]
"""
self._variables = variables
@property
def process_group_id(self):
"""
Gets the process_group_id of this VariableRegistryDTO.
The UUID of the Process Group that this Variable Registry belongs to
:return: The process_group_id of this VariableRegistryDTO.
:rtype: str
"""
return self._process_group_id
@process_group_id.setter
def process_group_id(self, process_group_id):
"""
Sets the process_group_id of this VariableRegistryDTO.
The UUID of the Process Group that this Variable Registry belongs to
:param process_group_id: The process_group_id of this VariableRegistryDTO.
:type: str
"""
self._process_group_id = process_group_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VariableRegistryDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/version_control_information_dto.py
|
<reponame>Paul-Verardi/nipyapi<gh_stars>0
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VersionControlInformationDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group_id': 'str',
'registry_id': 'str',
'registry_name': 'str',
'bucket_id': 'str',
'bucket_name': 'str',
'flow_id': 'str',
'flow_name': 'str',
'flow_description': 'str',
'version': 'int',
'state': 'str',
'state_explanation': 'str'
}
attribute_map = {
'group_id': 'groupId',
'registry_id': 'registryId',
'registry_name': 'registryName',
'bucket_id': 'bucketId',
'bucket_name': 'bucketName',
'flow_id': 'flowId',
'flow_name': 'flowName',
'flow_description': 'flowDescription',
'version': 'version',
'state': 'state',
'state_explanation': 'stateExplanation'
}
def __init__(self, group_id=None, registry_id=None, registry_name=None, bucket_id=None, bucket_name=None, flow_id=None, flow_name=None, flow_description=None, version=None, state=None, state_explanation=None):
"""
VersionControlInformationDTO - a model defined in Swagger
"""
self._group_id = None
self._registry_id = None
self._registry_name = None
self._bucket_id = None
self._bucket_name = None
self._flow_id = None
self._flow_name = None
self._flow_description = None
self._version = None
self._state = None
self._state_explanation = None
if group_id is not None:
self.group_id = group_id
if registry_id is not None:
self.registry_id = registry_id
if registry_name is not None:
self.registry_name = registry_name
if bucket_id is not None:
self.bucket_id = bucket_id
if bucket_name is not None:
self.bucket_name = bucket_name
if flow_id is not None:
self.flow_id = flow_id
if flow_name is not None:
self.flow_name = flow_name
if flow_description is not None:
self.flow_description = flow_description
if version is not None:
self.version = version
if state is not None:
self.state = state
if state_explanation is not None:
self.state_explanation = state_explanation
@property
def group_id(self):
"""
Gets the group_id of this VersionControlInformationDTO.
The ID of the Process Group that is under version control
:return: The group_id of this VersionControlInformationDTO.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""
Sets the group_id of this VersionControlInformationDTO.
The ID of the Process Group that is under version control
:param group_id: The group_id of this VersionControlInformationDTO.
:type: str
"""
self._group_id = group_id
@property
def registry_id(self):
"""
Gets the registry_id of this VersionControlInformationDTO.
The ID of the registry that the flow is stored in
:return: The registry_id of this VersionControlInformationDTO.
:rtype: str
"""
return self._registry_id
@registry_id.setter
def registry_id(self, registry_id):
"""
Sets the registry_id of this VersionControlInformationDTO.
The ID of the registry that the flow is stored in
:param registry_id: The registry_id of this VersionControlInformationDTO.
:type: str
"""
self._registry_id = registry_id
@property
def registry_name(self):
"""
Gets the registry_name of this VersionControlInformationDTO.
The name of the registry that the flow is stored in
:return: The registry_name of this VersionControlInformationDTO.
:rtype: str
"""
return self._registry_name
@registry_name.setter
def registry_name(self, registry_name):
"""
Sets the registry_name of this VersionControlInformationDTO.
The name of the registry that the flow is stored in
:param registry_name: The registry_name of this VersionControlInformationDTO.
:type: str
"""
self._registry_name = registry_name
@property
def bucket_id(self):
"""
Gets the bucket_id of this VersionControlInformationDTO.
The ID of the bucket that the flow is stored in
:return: The bucket_id of this VersionControlInformationDTO.
:rtype: str
"""
return self._bucket_id
@bucket_id.setter
def bucket_id(self, bucket_id):
"""
Sets the bucket_id of this VersionControlInformationDTO.
The ID of the bucket that the flow is stored in
:param bucket_id: The bucket_id of this VersionControlInformationDTO.
:type: str
"""
self._bucket_id = bucket_id
@property
def bucket_name(self):
"""
Gets the bucket_name of this VersionControlInformationDTO.
The name of the bucket that the flow is stored in
:return: The bucket_name of this VersionControlInformationDTO.
:rtype: str
"""
return self._bucket_name
@bucket_name.setter
def bucket_name(self, bucket_name):
"""
Sets the bucket_name of this VersionControlInformationDTO.
The name of the bucket that the flow is stored in
:param bucket_name: The bucket_name of this VersionControlInformationDTO.
:type: str
"""
self._bucket_name = bucket_name
@property
def flow_id(self):
"""
Gets the flow_id of this VersionControlInformationDTO.
The ID of the flow
:return: The flow_id of this VersionControlInformationDTO.
:rtype: str
"""
return self._flow_id
@flow_id.setter
def flow_id(self, flow_id):
"""
Sets the flow_id of this VersionControlInformationDTO.
The ID of the flow
:param flow_id: The flow_id of this VersionControlInformationDTO.
:type: str
"""
self._flow_id = flow_id
@property
def flow_name(self):
"""
Gets the flow_name of this VersionControlInformationDTO.
The name of the flow
:return: The flow_name of this VersionControlInformationDTO.
:rtype: str
"""
return self._flow_name
@flow_name.setter
def flow_name(self, flow_name):
"""
Sets the flow_name of this VersionControlInformationDTO.
The name of the flow
:param flow_name: The flow_name of this VersionControlInformationDTO.
:type: str
"""
self._flow_name = flow_name
@property
def flow_description(self):
"""
Gets the flow_description of this VersionControlInformationDTO.
The description of the flow
:return: The flow_description of this VersionControlInformationDTO.
:rtype: str
"""
return self._flow_description
@flow_description.setter
def flow_description(self, flow_description):
"""
Sets the flow_description of this VersionControlInformationDTO.
The description of the flow
:param flow_description: The flow_description of this VersionControlInformationDTO.
:type: str
"""
self._flow_description = flow_description
@property
def version(self):
"""
Gets the version of this VersionControlInformationDTO.
The version of the flow
:return: The version of this VersionControlInformationDTO.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this VersionControlInformationDTO.
The version of the flow
:param version: The version of this VersionControlInformationDTO.
:type: int
"""
self._version = version
@property
def state(self):
"""
Gets the state of this VersionControlInformationDTO.
The current state of the Process Group, as it relates to the Versioned Flow
:return: The state of this VersionControlInformationDTO.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this VersionControlInformationDTO.
The current state of the Process Group, as it relates to the Versioned Flow
:param state: The state of this VersionControlInformationDTO.
:type: str
"""
allowed_values = ["LOCALLY_MODIFIED", "STALE", "LOCALLY_MODIFIED_AND_STALE", "UP_TO_DATE", "SYNC_FAILURE"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def state_explanation(self):
"""
Gets the state_explanation of this VersionControlInformationDTO.
Explanation of why the group is in the specified state
:return: The state_explanation of this VersionControlInformationDTO.
:rtype: str
"""
return self._state_explanation
@state_explanation.setter
def state_explanation(self, state_explanation):
"""
Sets the state_explanation of this VersionControlInformationDTO.
Explanation of why the group is in the specified state
:param state_explanation: The state_explanation of this VersionControlInformationDTO.
:type: str
"""
self._state_explanation = state_explanation
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VersionControlInformationDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/link.py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Link(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'params': 'dict(str, str)',
'title': 'str',
'rels': 'list[str]',
'uri_builder': 'UriBuilder',
'rel': 'str',
'uri': 'str'
}
attribute_map = {
'type': 'type',
'params': 'params',
'title': 'title',
'rels': 'rels',
'uri_builder': 'uriBuilder',
'rel': 'rel',
'uri': 'uri'
}
def __init__(self, type=None, params=None, title=None, rels=None, uri_builder=None, rel=None, uri=None):
"""
Link - a model defined in Swagger
"""
self._type = None
self._params = None
self._title = None
self._rels = None
self._uri_builder = None
self._rel = None
self._uri = None
if type is not None:
self.type = type
if params is not None:
self.params = params
if title is not None:
self.title = title
if rels is not None:
self.rels = rels
if uri_builder is not None:
self.uri_builder = uri_builder
if rel is not None:
self.rel = rel
if uri is not None:
self.uri = uri
@property
def type(self):
"""
Gets the type of this Link.
:return: The type of this Link.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Link.
:param type: The type of this Link.
:type: str
"""
self._type = type
@property
def params(self):
"""
Gets the params of this Link.
:return: The params of this Link.
:rtype: dict(str, str)
"""
return self._params
@params.setter
def params(self, params):
"""
Sets the params of this Link.
:param params: The params of this Link.
:type: dict(str, str)
"""
self._params = params
@property
def title(self):
"""
Gets the title of this Link.
:return: The title of this Link.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title of this Link.
:param title: The title of this Link.
:type: str
"""
self._title = title
@property
def rels(self):
"""
Gets the rels of this Link.
:return: The rels of this Link.
:rtype: list[str]
"""
return self._rels
@rels.setter
def rels(self, rels):
"""
Sets the rels of this Link.
:param rels: The rels of this Link.
:type: list[str]
"""
self._rels = rels
@property
def uri_builder(self):
"""
Gets the uri_builder of this Link.
:return: The uri_builder of this Link.
:rtype: UriBuilder
"""
return self._uri_builder
@uri_builder.setter
def uri_builder(self, uri_builder):
"""
Sets the uri_builder of this Link.
:param uri_builder: The uri_builder of this Link.
:type: UriBuilder
"""
self._uri_builder = uri_builder
@property
def rel(self):
"""
Gets the rel of this Link.
:return: The rel of this Link.
:rtype: str
"""
return self._rel
@rel.setter
def rel(self, rel):
"""
Sets the rel of this Link.
:param rel: The rel of this Link.
:type: str
"""
self._rel = rel
@property
def uri(self):
"""
Gets the uri of this Link.
:return: The uri of this Link.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this Link.
:param uri: The uri of this Link.
:type: str
"""
self._uri = uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Link):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/processor_config_dto.py
|
<filename>nipyapi/nifi/models/processor_config_dto.py
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ProcessorConfigDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'properties': 'dict(str, str)',
'descriptors': 'dict(str, PropertyDescriptorDTO)',
'scheduling_period': 'str',
'scheduling_strategy': 'str',
'execution_node': 'str',
'penalty_duration': 'str',
'yield_duration': 'str',
'bulletin_level': 'str',
'run_duration_millis': 'int',
'concurrently_schedulable_task_count': 'int',
'auto_terminated_relationships': 'list[str]',
'comments': 'str',
'custom_ui_url': 'str',
'loss_tolerant': 'bool',
'annotation_data': 'str',
'default_concurrent_tasks': 'dict(str, str)',
'default_scheduling_period': 'dict(str, str)'
}
attribute_map = {
'properties': 'properties',
'descriptors': 'descriptors',
'scheduling_period': 'schedulingPeriod',
'scheduling_strategy': 'schedulingStrategy',
'execution_node': 'executionNode',
'penalty_duration': 'penaltyDuration',
'yield_duration': 'yieldDuration',
'bulletin_level': 'bulletinLevel',
'run_duration_millis': 'runDurationMillis',
'concurrently_schedulable_task_count': 'concurrentlySchedulableTaskCount',
'auto_terminated_relationships': 'autoTerminatedRelationships',
'comments': 'comments',
'custom_ui_url': 'customUiUrl',
'loss_tolerant': 'lossTolerant',
'annotation_data': 'annotationData',
'default_concurrent_tasks': 'defaultConcurrentTasks',
'default_scheduling_period': 'defaultSchedulingPeriod'
}
def __init__(self, properties=None, descriptors=None, scheduling_period=None, scheduling_strategy=None, execution_node=None, penalty_duration=None, yield_duration=None, bulletin_level=None, run_duration_millis=None, concurrently_schedulable_task_count=None, auto_terminated_relationships=None, comments=None, custom_ui_url=None, loss_tolerant=None, annotation_data=None, default_concurrent_tasks=None, default_scheduling_period=None):
"""
ProcessorConfigDTO - a model defined in Swagger
"""
self._properties = None
self._descriptors = None
self._scheduling_period = None
self._scheduling_strategy = None
self._execution_node = None
self._penalty_duration = None
self._yield_duration = None
self._bulletin_level = None
self._run_duration_millis = None
self._concurrently_schedulable_task_count = None
self._auto_terminated_relationships = None
self._comments = None
self._custom_ui_url = None
self._loss_tolerant = None
self._annotation_data = None
self._default_concurrent_tasks = None
self._default_scheduling_period = None
if properties is not None:
self.properties = properties
if descriptors is not None:
self.descriptors = descriptors
if scheduling_period is not None:
self.scheduling_period = scheduling_period
if scheduling_strategy is not None:
self.scheduling_strategy = scheduling_strategy
if execution_node is not None:
self.execution_node = execution_node
if penalty_duration is not None:
self.penalty_duration = penalty_duration
if yield_duration is not None:
self.yield_duration = yield_duration
if bulletin_level is not None:
self.bulletin_level = bulletin_level
if run_duration_millis is not None:
self.run_duration_millis = run_duration_millis
if concurrently_schedulable_task_count is not None:
self.concurrently_schedulable_task_count = concurrently_schedulable_task_count
if auto_terminated_relationships is not None:
self.auto_terminated_relationships = auto_terminated_relationships
if comments is not None:
self.comments = comments
if custom_ui_url is not None:
self.custom_ui_url = custom_ui_url
if loss_tolerant is not None:
self.loss_tolerant = loss_tolerant
if annotation_data is not None:
self.annotation_data = annotation_data
if default_concurrent_tasks is not None:
self.default_concurrent_tasks = default_concurrent_tasks
if default_scheduling_period is not None:
self.default_scheduling_period = default_scheduling_period
@property
def properties(self):
"""
Gets the properties of this ProcessorConfigDTO.
The properties for the processor. Properties whose value is not set will only contain the property name.
:return: The properties of this ProcessorConfigDTO.
:rtype: dict(str, str)
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of this ProcessorConfigDTO.
The properties for the processor. Properties whose value is not set will only contain the property name.
:param properties: The properties of this ProcessorConfigDTO.
:type: dict(str, str)
"""
self._properties = properties
@property
def descriptors(self):
"""
Gets the descriptors of this ProcessorConfigDTO.
Descriptors for the processor's properties.
:return: The descriptors of this ProcessorConfigDTO.
:rtype: dict(str, PropertyDescriptorDTO)
"""
return self._descriptors
@descriptors.setter
def descriptors(self, descriptors):
"""
Sets the descriptors of this ProcessorConfigDTO.
Descriptors for the processor's properties.
:param descriptors: The descriptors of this ProcessorConfigDTO.
:type: dict(str, PropertyDescriptorDTO)
"""
self._descriptors = descriptors
@property
def scheduling_period(self):
"""
Gets the scheduling_period of this ProcessorConfigDTO.
The frequency with which to schedule the processor. The format of the value will depend on th value of schedulingStrategy.
:return: The scheduling_period of this ProcessorConfigDTO.
:rtype: str
"""
return self._scheduling_period
@scheduling_period.setter
def scheduling_period(self, scheduling_period):
"""
Sets the scheduling_period of this ProcessorConfigDTO.
The frequency with which to schedule the processor. The format of the value will depend on th value of schedulingStrategy.
:param scheduling_period: The scheduling_period of this ProcessorConfigDTO.
:type: str
"""
self._scheduling_period = scheduling_period
@property
def scheduling_strategy(self):
"""
Gets the scheduling_strategy of this ProcessorConfigDTO.
Indcates whether the prcessor should be scheduled to run in event or timer driven mode.
:return: The scheduling_strategy of this ProcessorConfigDTO.
:rtype: str
"""
return self._scheduling_strategy
@scheduling_strategy.setter
def scheduling_strategy(self, scheduling_strategy):
"""
Sets the scheduling_strategy of this ProcessorConfigDTO.
Indcates whether the prcessor should be scheduled to run in event or timer driven mode.
:param scheduling_strategy: The scheduling_strategy of this ProcessorConfigDTO.
:type: str
"""
self._scheduling_strategy = scheduling_strategy
@property
def execution_node(self):
"""
Gets the execution_node of this ProcessorConfigDTO.
Indicates the node where the process will execute.
:return: The execution_node of this ProcessorConfigDTO.
:rtype: str
"""
return self._execution_node
@execution_node.setter
def execution_node(self, execution_node):
"""
Sets the execution_node of this ProcessorConfigDTO.
Indicates the node where the process will execute.
:param execution_node: The execution_node of this ProcessorConfigDTO.
:type: str
"""
self._execution_node = execution_node
@property
def penalty_duration(self):
"""
Gets the penalty_duration of this ProcessorConfigDTO.
The amount of time that is used when the process penalizes a flowfile.
:return: The penalty_duration of this ProcessorConfigDTO.
:rtype: str
"""
return self._penalty_duration
@penalty_duration.setter
def penalty_duration(self, penalty_duration):
"""
Sets the penalty_duration of this ProcessorConfigDTO.
The amount of time that is used when the process penalizes a flowfile.
:param penalty_duration: The penalty_duration of this ProcessorConfigDTO.
:type: str
"""
self._penalty_duration = penalty_duration
@property
def yield_duration(self):
"""
Gets the yield_duration of this ProcessorConfigDTO.
The amount of time that must elapse before this processor is scheduled again after yielding.
:return: The yield_duration of this ProcessorConfigDTO.
:rtype: str
"""
return self._yield_duration
@yield_duration.setter
def yield_duration(self, yield_duration):
"""
Sets the yield_duration of this ProcessorConfigDTO.
The amount of time that must elapse before this processor is scheduled again after yielding.
:param yield_duration: The yield_duration of this ProcessorConfigDTO.
:type: str
"""
self._yield_duration = yield_duration
@property
def bulletin_level(self):
"""
Gets the bulletin_level of this ProcessorConfigDTO.
The level at which the processor will report bulletins.
:return: The bulletin_level of this ProcessorConfigDTO.
:rtype: str
"""
return self._bulletin_level
@bulletin_level.setter
def bulletin_level(self, bulletin_level):
"""
Sets the bulletin_level of this ProcessorConfigDTO.
The level at which the processor will report bulletins.
:param bulletin_level: The bulletin_level of this ProcessorConfigDTO.
:type: str
"""
self._bulletin_level = bulletin_level
@property
def run_duration_millis(self):
"""
Gets the run_duration_millis of this ProcessorConfigDTO.
The run duration for the processor in milliseconds.
:return: The run_duration_millis of this ProcessorConfigDTO.
:rtype: int
"""
return self._run_duration_millis
@run_duration_millis.setter
def run_duration_millis(self, run_duration_millis):
"""
Sets the run_duration_millis of this ProcessorConfigDTO.
The run duration for the processor in milliseconds.
:param run_duration_millis: The run_duration_millis of this ProcessorConfigDTO.
:type: int
"""
self._run_duration_millis = run_duration_millis
@property
def concurrently_schedulable_task_count(self):
"""
Gets the concurrently_schedulable_task_count of this ProcessorConfigDTO.
The number of tasks that should be concurrently schedule for the processor. If the processor doesn't allow parallol processing then any positive input will be ignored.
:return: The concurrently_schedulable_task_count of this ProcessorConfigDTO.
:rtype: int
"""
return self._concurrently_schedulable_task_count
@concurrently_schedulable_task_count.setter
def concurrently_schedulable_task_count(self, concurrently_schedulable_task_count):
"""
Sets the concurrently_schedulable_task_count of this ProcessorConfigDTO.
The number of tasks that should be concurrently schedule for the processor. If the processor doesn't allow parallol processing then any positive input will be ignored.
:param concurrently_schedulable_task_count: The concurrently_schedulable_task_count of this ProcessorConfigDTO.
:type: int
"""
self._concurrently_schedulable_task_count = concurrently_schedulable_task_count
@property
def auto_terminated_relationships(self):
"""
Gets the auto_terminated_relationships of this ProcessorConfigDTO.
The names of all relationships that cause a flow file to be terminated if the relationship is not connected elsewhere. This property differs from the 'isAutoTerminate' property of the RelationshipDTO in that the RelationshipDTO is meant to depict the current configuration, whereas this property can be set in a DTO when updating a Processor in order to change which Relationships should be auto-terminated.
:return: The auto_terminated_relationships of this ProcessorConfigDTO.
:rtype: list[str]
"""
return self._auto_terminated_relationships
@auto_terminated_relationships.setter
def auto_terminated_relationships(self, auto_terminated_relationships):
"""
Sets the auto_terminated_relationships of this ProcessorConfigDTO.
The names of all relationships that cause a flow file to be terminated if the relationship is not connected elsewhere. This property differs from the 'isAutoTerminate' property of the RelationshipDTO in that the RelationshipDTO is meant to depict the current configuration, whereas this property can be set in a DTO when updating a Processor in order to change which Relationships should be auto-terminated.
:param auto_terminated_relationships: The auto_terminated_relationships of this ProcessorConfigDTO.
:type: list[str]
"""
self._auto_terminated_relationships = auto_terminated_relationships
@property
def comments(self):
"""
Gets the comments of this ProcessorConfigDTO.
The comments for the processor.
:return: The comments of this ProcessorConfigDTO.
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments):
"""
Sets the comments of this ProcessorConfigDTO.
The comments for the processor.
:param comments: The comments of this ProcessorConfigDTO.
:type: str
"""
self._comments = comments
@property
def custom_ui_url(self):
"""
Gets the custom_ui_url of this ProcessorConfigDTO.
The URL for the processor's custom configuration UI if applicable.
:return: The custom_ui_url of this ProcessorConfigDTO.
:rtype: str
"""
return self._custom_ui_url
@custom_ui_url.setter
def custom_ui_url(self, custom_ui_url):
"""
Sets the custom_ui_url of this ProcessorConfigDTO.
The URL for the processor's custom configuration UI if applicable.
:param custom_ui_url: The custom_ui_url of this ProcessorConfigDTO.
:type: str
"""
self._custom_ui_url = custom_ui_url
@property
def loss_tolerant(self):
"""
Gets the loss_tolerant of this ProcessorConfigDTO.
Whether the processor is loss tolerant.
:return: The loss_tolerant of this ProcessorConfigDTO.
:rtype: bool
"""
return self._loss_tolerant
@loss_tolerant.setter
def loss_tolerant(self, loss_tolerant):
"""
Sets the loss_tolerant of this ProcessorConfigDTO.
Whether the processor is loss tolerant.
:param loss_tolerant: The loss_tolerant of this ProcessorConfigDTO.
:type: bool
"""
self._loss_tolerant = loss_tolerant
@property
def annotation_data(self):
"""
Gets the annotation_data of this ProcessorConfigDTO.
The annotation data for the processor used to relay configuration between a custom UI and the procesosr.
:return: The annotation_data of this ProcessorConfigDTO.
:rtype: str
"""
return self._annotation_data
@annotation_data.setter
def annotation_data(self, annotation_data):
"""
Sets the annotation_data of this ProcessorConfigDTO.
The annotation data for the processor used to relay configuration between a custom UI and the procesosr.
:param annotation_data: The annotation_data of this ProcessorConfigDTO.
:type: str
"""
self._annotation_data = annotation_data
@property
def default_concurrent_tasks(self):
"""
Gets the default_concurrent_tasks of this ProcessorConfigDTO.
Maps default values for concurrent tasks for each applicable scheduling strategy.
:return: The default_concurrent_tasks of this ProcessorConfigDTO.
:rtype: dict(str, str)
"""
return self._default_concurrent_tasks
@default_concurrent_tasks.setter
def default_concurrent_tasks(self, default_concurrent_tasks):
"""
Sets the default_concurrent_tasks of this ProcessorConfigDTO.
Maps default values for concurrent tasks for each applicable scheduling strategy.
:param default_concurrent_tasks: The default_concurrent_tasks of this ProcessorConfigDTO.
:type: dict(str, str)
"""
self._default_concurrent_tasks = default_concurrent_tasks
@property
def default_scheduling_period(self):
"""
Gets the default_scheduling_period of this ProcessorConfigDTO.
Maps default values for scheduling period for each applicable scheduling strategy.
:return: The default_scheduling_period of this ProcessorConfigDTO.
:rtype: dict(str, str)
"""
return self._default_scheduling_period
@default_scheduling_period.setter
def default_scheduling_period(self, default_scheduling_period):
"""
Sets the default_scheduling_period of this ProcessorConfigDTO.
Maps default values for scheduling period for each applicable scheduling strategy.
:param default_scheduling_period: The default_scheduling_period of this ProcessorConfigDTO.
:type: dict(str, str)
"""
self._default_scheduling_period = default_scheduling_period
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ProcessorConfigDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/demo/fdlc.py
|
<reponame>Paul-Verardi/nipyapi<filename>nipyapi/demo/fdlc.py
# -*- coding: utf-8 -*-
"""
A self-paced walkthrough of version control using NiFi-Registry.
See initial print statement for detailed explanation.
"""
from __future__ import absolute_import
import logging
from time import sleep
import nipyapi
from nipyapi.utils import DockerContainer
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
logging.getLogger('nipyapi.versioning').setLevel(logging.INFO)
logging.getLogger('nipyapi.utils').setLevel(logging.INFO)
d_network_name = 'fdlcdemo'
dev_nifi_port = 8080
prod_nifi_port = 9090
dev_reg_port = dev_nifi_port + 1
prod_reg_port = prod_nifi_port + 1
dev_nifi_url = 'http://localhost:' + str(dev_nifi_port) + '/nifi'
prod_nifi_url = 'http://localhost:' + str(prod_nifi_port) + '/nifi'
dev_reg_url = 'http://localhost:' + str(dev_reg_port) + '/nifi-registry'
prod_reg_url = 'http://localhost:' + str(prod_reg_port) + '/nifi-registry'
dev_nifi_api_url = dev_nifi_url + '-api'
prod_nifi_api_url = prod_nifi_url + '-api'
dev_reg_api_url = dev_reg_url + '-api'
prod_reg_api_url = prod_reg_url + '-api'
d_containers = [
DockerContainer(
name='nifi-dev',
image_name='apache/nifi',
image_tag='latest',
ports={str(dev_nifi_port) + '/tcp': dev_nifi_port},
env={'NIFI_WEB_HTTP_PORT': str(dev_nifi_port)}
),
DockerContainer(
name='nifi-prod',
image_name='apache/nifi',
image_tag='latest',
ports={str(prod_nifi_port) + '/tcp': prod_nifi_port},
env={'NIFI_WEB_HTTP_PORT': str(prod_nifi_port)}
),
DockerContainer(
name='reg-dev',
image_name='apache/nifi-registry',
image_tag='latest',
ports={str(dev_reg_port) + '/tcp': dev_reg_port},
env={'NIFI_REGISTRY_WEB_HTTP_PORT': str(dev_reg_port)}
),
DockerContainer(
name='reg-prod',
image_name='apache/nifi-registry',
image_tag='latest',
ports={str(prod_reg_port) + '/tcp': prod_reg_port},
env={'NIFI_REGISTRY_WEB_HTTP_PORT': str(prod_reg_port)}
)
]
dev_pg_name = 'my_pg_0'
dev_proc_name = 'my_proc_0'
dev_proc2_name = 'my_s_proc_0'
dev_reg_client_name = 'dev_reg_client_0'
dev_bucket_name = 'dev_bucket_0'
dev_ver_flow_name = 'dev_ver_flow_0'
dev_flow_export_name = 'dev_flow_export_0'
prod_bucket_name = 'prod_bucket_0'
prod_ver_flow_name = 'prod_ver_flow_0'
prod_reg_client_name = 'prod_reg_client_0'
print("This python script demonstrates the steps to manage promotion of "
"versioned Flows between different environments. \nIt deploys NiFi and "
"NiFi-Registry in local Docker containers and illustrates the "
"steps you might follow in such a process."
"\nEach step is presented as a function of this script, they count up "
"in hex (0,1,2,3,4,5,6,7,8,9,a,b,c,d) and should be called in order."
"\nEach step will log activities to INFO, and you are encouraged to "
"look at the code in this script to see how each step is completed."
"\nhttp://github.com/Chaffelson/nipyapi/blob/master/nipyapi/demo/fdlc.py"
"\nEach step will also issue instructions through print statements like "
"this one, these instructions will vary so please read them as you go."
"\nNote that the first call will log a lot of information while it boots"
" the Docker containers, further instructions will follow."
"\nNote that you can reset it at any time by calling step_1 again.\n"
"\nPlease start by calling the function 'step_1_boot_demo_env()'.")
def step_1_boot_demo_env():
"""step_1_boot_demo_env"""
log.info("Starting Dev and Prod NiFi and NiFi-Registry Docker Containers"
"\nPlease wait, this may take a few minutes to download the "
"Docker images and then start them.")
nipyapi.utils.start_docker_containers(
docker_containers=d_containers,
network_name=d_network_name
)
for reg_instance in [dev_reg_api_url, prod_reg_api_url]:
log.info("Waiting for NiFi Registries to be ready")
nipyapi.utils.set_endpoint(reg_instance)
nipyapi.utils.wait_to_complete(
test_function=nipyapi.utils.is_endpoint_up,
endpoint_url='-'.join(reg_instance.split('-')[:-1]),
nipyapi_delay=nipyapi.config.long_retry_delay,
nipyapi_max_wait=nipyapi.config.long_max_wait
)
for nifi_instance in [dev_nifi_api_url, prod_nifi_api_url]:
log.info("Waiting for NiFi instances to be ready")
nipyapi.utils.set_endpoint(nifi_instance)
nipyapi.utils.wait_to_complete(
test_function=nipyapi.utils.is_endpoint_up,
endpoint_url='-'.join(nifi_instance.split('-')[:-1]),
nipyapi_delay=nipyapi.config.long_retry_delay,
nipyapi_max_wait=nipyapi.config.long_max_wait
)
# Sleeping to wait for all startups to return before printing guide
sleep(1)
print("Your Docker containers should now be ready, please find them at the"
"following URLs:"
"\nnifi-dev ", dev_nifi_url,
"\nreg-dev ", dev_reg_url,
"\nreg-prod ", prod_reg_url,
"\nnifi-prod ", prod_nifi_url,
"\nPlease open each of these in a browser tab."
"\nPlease then call the function 'step_2_create_reg_clients()'\n")
def step_2_create_reg_clients():
"""Set client connections between NiFi and Registry"""
log.info("Creating Dev Environment Nifi to NiFi-Registry Client")
nipyapi.utils.set_endpoint(dev_nifi_api_url)
nipyapi.versioning.create_registry_client(
name=dev_reg_client_name,
uri='http://reg-dev:8081',
description=''
)
log.info("Creating Prod Environment Nifi to NiFi-Registry Client")
nipyapi.utils.set_endpoint(prod_nifi_api_url)
nipyapi.versioning.create_registry_client(
name=prod_reg_client_name,
uri='http://reg-prod:9091',
description=''
)
print("We have attached each NiFi environment to its relevant Registry "
"for upcoming Version Control activities."
"\nYou can see these by going to NiFi, clicking on the 3Bar menu "
"icon in the top right corner, selecting 'Controller Settings', and"
" looking at the 'Registry Clients' tab."
"\nPlease now call 'step_3_create_dev_flow()'\n")
def step_3_create_dev_flow():
"""Connecting to Dev environment and creating some test objects"""
log.info("Connecting to Dev environment and creating some test objects")
nipyapi.utils.set_endpoint(dev_nifi_api_url)
nipyapi.utils.set_endpoint(dev_reg_api_url)
log.info("Creating %s as an empty process group", dev_pg_name)
dev_process_group_0 = nipyapi.canvas.create_process_group(
nipyapi.canvas.get_process_group(nipyapi.canvas.get_root_pg_id(),
'id'),
dev_pg_name,
location=(400.0, 400.0)
)
log.info("Creating dev_processor_0 as a new GenerateFlowFile in the PG")
nipyapi.canvas.create_processor(
parent_pg=dev_process_group_0,
processor=nipyapi.canvas.get_processor_type('GenerateFlowFile'),
location=(400.0, 400.0),
name=dev_proc_name,
config=nipyapi.nifi.ProcessorConfigDTO(
scheduling_period='1s',
auto_terminated_relationships=['success']
)
)
print("We have procedurally generated a new Process Group with a child "
"Processor in Dev NiFi. It is not yet version controlled."
"\nGo to your Dev NiFi browser tab, and refresh to see the new "
"Process Group, open the Process Group to see the new Generate "
"FlowFile Processor. Open the Processor and look at the Scheduling "
"tab to note that it is set to 1s."
"\nPlease now call 'step_4_create_dev_ver_bucket()'\n")
def step_4_create_dev_ver_bucket():
"""Creating dev registry bucket"""
log.info("Creating %s as new a Registry Bucket", dev_bucket_name)
nipyapi.versioning.create_registry_bucket(dev_bucket_name)
print("We have created a new Versioned Flow Bucket in the Dev "
"NiFi-Registry. Please go to the Dev Registry tab in your browser "
"and refresh, then click the arrow next to 'All' in the page header "
"to select the new bucket and see that it is currently empty."
"\nPlease now call 'step_5_save_flow_to_bucket()'\n")
def step_5_save_flow_to_bucket():
"""Saving the flow to the bucket as a new versioned flow"""
log.info(
"Saving %s to %s", dev_pg_name, dev_bucket_name)
process_group = nipyapi.canvas.get_process_group(dev_pg_name)
bucket = nipyapi.versioning.get_registry_bucket(dev_bucket_name)
registry_client = nipyapi.versioning.get_registry_client(
dev_reg_client_name)
nipyapi.versioning.save_flow_ver(
process_group=process_group,
registry_client=registry_client,
bucket=bucket,
flow_name=dev_ver_flow_name,
desc='A Versioned Flow',
comment='A Versioned Flow'
)
print("We have now saved the Dev Process Group to the Dev Registry bucket "
"as a new Versioned Flow. Return to the Dev Registry tab in your "
"browser and refresh to see the flow. Click on the flow to show "
"some details, note that it is version 1."
"\nPlease note that the next function requires that you save the "
"output to a variable when you continue."
"\nPlease now call 'flow = step_6_export_dev_flow()'\n")
def step_6_export_dev_flow():
"""Exporting the versioned flow as a yaml definition"""
log.info("Creating a sorted pretty Yaml export of %s",
dev_flow_export_name)
bucket = nipyapi.versioning.get_registry_bucket(dev_bucket_name)
ver_flow = nipyapi.versioning.get_flow_in_bucket(
bucket.identifier,
identifier=dev_ver_flow_name
)
out = nipyapi.versioning.export_flow_version(
bucket_id=bucket.identifier,
flow_id=ver_flow.identifier,
mode='yaml'
)
print("We have now exported the versioned Flow from the Dev environment as"
" a formatted YAML document, which is one of several options. Note "
"that you were asked to save it as the variable 'flow' so you can "
"then import it into your Prod environment."
"\nIf you want to view it, call 'print(flow)'."
"\nWhen you are ready, please call 'step_7_create_prod_ver_bucket()'"
"\n")
return out
def step_7_create_prod_ver_bucket():
"""Connecting to the Prod environment and creating a new bucket"""
log.info("Connecting to Prod Environment")
nipyapi.utils.set_endpoint(prod_nifi_api_url)
nipyapi.utils.set_endpoint(prod_reg_api_url)
log.info("Creating %s as a new Registry Bucket", prod_bucket_name)
nipyapi.versioning.create_registry_bucket(prod_bucket_name)
print("We have now created a bucket in the Prod Registry to promote our "
"Dev flow into. Go to the Prod Registry tab and click the arrow next"
" to 'All' to select it and see that it is currently empty."
"\nPlease note that the next function requires that you supply the "
"variable you saved from step 5."
"\nPlease call 'step_8_import_dev_flow_to_prod_reg(flow)'\n")
def step_8_import_dev_flow_to_prod_reg(versioned_flow):
"""Importing the yaml string into Prod"""
log.info("Saving dev flow export to prod container")
bucket = nipyapi.versioning.get_registry_bucket(prod_bucket_name)
nipyapi.versioning.import_flow_version(
bucket_id=bucket.identifier,
encoded_flow=versioned_flow,
flow_name=prod_ver_flow_name
)
print("The flow we exported from Dev is now imported into the bucket in "
"the Prod Registry, and ready for deployment to the Prod NiFi."
"\nPlease refresh your Prod Registry and you will see it, note that"
" it is version 1 and has the same comment as the Dev Flow Version."
"\nPlease then call 'step_9_deploy_prod_flow_to_nifi()'\n")
def step_9_deploy_prod_flow_to_nifi():
"""Deploying the flow to the Prod environment"""
log.info("Deploying promoted flow from Prod Registry to Prod Nifi")
bucket = nipyapi.versioning.get_registry_bucket(prod_bucket_name)
flow = nipyapi.versioning.get_flow_in_bucket(
bucket_id=bucket.identifier,
identifier=prod_ver_flow_name
)
reg_client = nipyapi.versioning.get_registry_client(prod_reg_client_name)
nipyapi.versioning.deploy_flow_version(
parent_id=nipyapi.canvas.get_root_pg_id(),
location=(0, 0),
bucket_id=bucket.identifier,
flow_id=flow.identifier,
reg_client_id=reg_client.id,
version=None
)
print("The Promoted Flow has now been deployed to the Prod NiFi, please "
"refresh the Prod NiFi tab and note that the Process Group has the "
"same name as the Dev Process Group, and has a green tick(√) "
"indicating it is up to date with Version Control. "
"\n Open the Process Group and note that the Processor is also the "
"same, including the Schedule of 1s."
"\nPlease now call 'step_a_change_dev_flow()'\n")
def step_a_change_dev_flow():
"""Procedurally modifying the Dev flow"""
log.info("Connecting to Dev Environment")
nipyapi.utils.set_endpoint(dev_nifi_api_url)
nipyapi.utils.set_endpoint(dev_reg_api_url)
log.info("Modifying Dev Processor Schedule")
processor = nipyapi.canvas.get_processor(dev_proc_name)
nipyapi.canvas.update_processor(
processor=processor,
update=nipyapi.nifi.ProcessorConfigDTO(
scheduling_period='3s'
)
)
print("Here we have made a simple modification to the processor in our Dev"
"Flow. \nGo to the Dev NiFi tab and refresh it, you will see that "
"the Process Group now has a star(*) icon next to the name, "
"indicating there are unsaved changes. Look at the Scheduling tab "
"in the Processor and note that it has changed from 1s to 3s."
"\nPlease now call 'step_b_update_dev_flow_ver()'\n")
def step_b_update_dev_flow_ver():
"""Committing the change to the dev flow version"""
log.info("Saving changes in Dev Flow to Version Control")
process_group = nipyapi.canvas.get_process_group(dev_pg_name)
bucket = nipyapi.versioning.get_registry_bucket(dev_bucket_name)
registry_client = nipyapi.versioning.get_registry_client(
dev_reg_client_name)
flow = nipyapi.versioning.get_flow_in_bucket(
bucket_id=bucket.identifier,
identifier=dev_ver_flow_name
)
nipyapi.versioning.save_flow_ver(
process_group=process_group,
registry_client=registry_client,
bucket=bucket,
flow_id=flow.identifier,
comment='An Updated Flow'
)
print("We have saved the change to the Dev Registry as a new version."
"\nRefresh the Dev Registry to see that the Flow now has a version "
"2, and a new comment."
"\nRefresh the Dev NiFi to see that the Process Group now has a "
"green tick again, indicating that Version Control is up to date."
"\nPlease now call 'step_c_promote_change_to_prod_reg()'\n")
def step_c_promote_change_to_prod_reg():
"""Promoting the committed change across to the prod environment"""
log.info("Exporting updated Dev Flow Version")
dev_bucket = nipyapi.versioning.get_registry_bucket(dev_bucket_name)
dev_ver_flow = nipyapi.versioning.get_flow_in_bucket(
dev_bucket.identifier,
identifier=dev_ver_flow_name
)
dev_export = nipyapi.versioning.export_flow_version(
bucket_id=dev_bucket.identifier,
flow_id=dev_ver_flow.identifier,
mode='yaml'
)
log.info("Connecting to Prod Environment")
nipyapi.utils.set_endpoint(prod_nifi_api_url)
nipyapi.utils.set_endpoint(prod_reg_api_url)
log.info("Pushing updated version into Prod Registry Flow")
prod_bucket = nipyapi.versioning.get_registry_bucket(prod_bucket_name)
prod_flow = nipyapi.versioning.get_flow_in_bucket(
bucket_id=prod_bucket.identifier,
identifier=prod_ver_flow_name
)
nipyapi.versioning.import_flow_version(
bucket_id=prod_bucket.identifier,
encoded_flow=dev_export,
flow_id=prod_flow.identifier
)
print("We have promoted the change from our Dev Registry to Prod, please "
"refresh your Prod Registry Tab to see the new version is present, "
"and that the new comment matches the Dev Environment."
"\nRefresh your Prod NiFi tab to see that the Process Group has a "
"red UpArrow(⬆︎) icon indicating a new version is available for "
"deployment."
"\nPlease now call 'step_d_promote_change_to_prod_nifi()'\n")
def step_d_promote_change_to_prod_nifi():
"""Pushing the change into the Prod flow"""
log.info("Moving deployed Prod Process Group to the latest version")
prod_pg = nipyapi.canvas.get_process_group(dev_pg_name)
nipyapi.versioning.update_flow_ver(
process_group=prod_pg,
target_version=None
)
print("Refresh your Prod NiFi to see that the PG now shows the green tick "
"of being up to date with its version control."
"\nLook at the Processor scheduling to note that it now matches the "
"dev environment as 3s."
"\nNow we will examine some typical deployment tests."
"\nPlease now call 'step_e_check_sensitive_processors()'\n")
def step_e_check_sensitive_processors():
log.info("Connecting to Dev Environment")
nipyapi.utils.set_endpoint(dev_nifi_api_url)
nipyapi.utils.set_endpoint(dev_reg_api_url)
log.info("Creating additional complex Processor")
nipyapi.canvas.create_processor(
parent_pg=nipyapi.canvas.get_process_group(dev_pg_name),
processor=nipyapi.canvas.get_processor_type('GetTwitter'),
location=(400.0, 600.0),
name=dev_proc2_name,
)
s_proc = nipyapi.canvas.list_sensitive_processors()
print("We have created a new Processor {0} which has security protected"
"properties, these will need to be completed in each environment "
"that this flow is used in. These properties are discoverable using "
"the API calls list 'canvas.list_sensitive_processors()'"
"\nFunction 'nipyapi.canvas.update_processor' as used in step_a is"
" intended for this purpose"
"\nPlease no call 'step_f_set_sensitive_values()'\n"
.format(s_proc[0].status.name, ))
def step_f_set_sensitive_values():
log.info("Setting Sensitive Values on Processor")
nipyapi.canvas.update_processor(
processor=nipyapi.canvas.get_processor(dev_proc2_name),
update=nipyapi.nifi.ProcessorConfigDTO(
properties={
'Consumer Key': 'Some',
'Consumer Secret': 'Secret',
'Access Token': 'values',
'Access Token Secret': 'here'
}
)
)
print("Here we have set the Sensitive values, again using the Update"
" process. Typically these values will be looked up in a Config DB "
"or some other secured service."
"\nPlease now call 'step_g_check_invalid_processors()'\n")
# Todo: update sensitive to return properites list precreated
def step_g_check_invalid_processors():
log.info("Retrieving Processors in Invalid States")
i_proc = nipyapi.canvas.list_invalid_processors()[0]
print("We now run a validity test against our flow to ensure that it can "
"be deployed. We can see that Processors [{0}] need further "
"attention."
"\nWe can also easily see the reasons for this [{1}]."
"\nPlease now call 'step_h_fix_validation_errors()'\n"
.format(i_proc.status.name, i_proc.component.validation_errors))
def step_h_fix_validation_errors():
log.info("Autoterminating Success status")
nipyapi.canvas.update_processor(
processor=nipyapi.canvas.get_processor(dev_proc2_name),
update=nipyapi.nifi.ProcessorConfigDTO(
auto_terminated_relationships=['success']
)
)
print("We now see that our Processor is configured and Valid within this "
"environment, and is ready for Promotion to the next stage."
"\nPlease now call 'step_i_promote_deploy_and_validate()'\n")
def step_i_promote_deploy_and_validate():
log.info("Saving changes in Dev Flow to Version Control")
dev_process_group = nipyapi.canvas.get_process_group(dev_pg_name)
dev_bucket = nipyapi.versioning.get_registry_bucket(dev_bucket_name)
dev_registry_client = nipyapi.versioning.get_registry_client(
dev_reg_client_name)
dev_flow = nipyapi.versioning.get_flow_in_bucket(
bucket_id=dev_bucket.identifier,
identifier=dev_ver_flow_name
)
nipyapi.versioning.save_flow_ver(
process_group=dev_process_group,
registry_client=dev_registry_client,
bucket=dev_bucket,
flow_id=dev_flow.identifier,
comment='A Flow update with a Complex Processor'
)
dev_ver_flow = nipyapi.versioning.get_flow_in_bucket(
dev_bucket.identifier,
identifier=dev_ver_flow_name
)
dev_export = nipyapi.versioning.export_flow_version(
bucket_id=dev_bucket.identifier,
flow_id=dev_ver_flow.identifier,
mode='yaml'
)
log.info("Connecting to Prod Environment")
nipyapi.utils.set_endpoint(prod_nifi_api_url)
nipyapi.utils.set_endpoint(prod_reg_api_url)
log.info("Pushing updated version into Prod Registry Flow")
prod_bucket = nipyapi.versioning.get_registry_bucket(prod_bucket_name)
prod_flow = nipyapi.versioning.get_flow_in_bucket(
bucket_id=prod_bucket.identifier,
identifier=prod_ver_flow_name
)
nipyapi.versioning.import_flow_version(
bucket_id=prod_bucket.identifier,
encoded_flow=dev_export,
flow_id=prod_flow.identifier
)
prod_pg = nipyapi.canvas.get_process_group(dev_pg_name)
nipyapi.versioning.update_flow_ver(
process_group=prod_pg,
target_version=None
)
val_errors = nipyapi.canvas.list_invalid_processors()
print("Here we have put all the steps in one place by taking the dev "
"changes all the way through to prod deployment. If we check"
" our Processor Validation again, we see that our regular "
"Properties have been carried through, but our Sensitive "
"Properties are unset in Production [{0}]"
"\nThis is because NiFi will not break"
" security by carrying them to a new environment. We leave setting"
" them again as an exercise for the user."
.format(val_errors[0].component.validation_errors))
print("\nThis is the end of the guide, you may restart at any time by "
"calling 'step_1_boot_demo_env()'\n")
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/controller_status_dto.py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ControllerStatusDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'active_thread_count': 'int',
'terminated_thread_count': 'int',
'queued': 'str',
'flow_files_queued': 'int',
'bytes_queued': 'int',
'running_count': 'int',
'stopped_count': 'int',
'invalid_count': 'int',
'disabled_count': 'int',
'active_remote_port_count': 'int',
'inactive_remote_port_count': 'int',
'up_to_date_count': 'int',
'locally_modified_count': 'int',
'stale_count': 'int',
'locally_modified_and_stale_count': 'int',
'sync_failure_count': 'int'
}
attribute_map = {
'active_thread_count': 'activeThreadCount',
'terminated_thread_count': 'terminatedThreadCount',
'queued': 'queued',
'flow_files_queued': 'flowFilesQueued',
'bytes_queued': 'bytesQueued',
'running_count': 'runningCount',
'stopped_count': 'stoppedCount',
'invalid_count': 'invalidCount',
'disabled_count': 'disabledCount',
'active_remote_port_count': 'activeRemotePortCount',
'inactive_remote_port_count': 'inactiveRemotePortCount',
'up_to_date_count': 'upToDateCount',
'locally_modified_count': 'locallyModifiedCount',
'stale_count': 'staleCount',
'locally_modified_and_stale_count': 'locallyModifiedAndStaleCount',
'sync_failure_count': 'syncFailureCount'
}
def __init__(self, active_thread_count=None, terminated_thread_count=None, queued=None, flow_files_queued=None, bytes_queued=None, running_count=None, stopped_count=None, invalid_count=None, disabled_count=None, active_remote_port_count=None, inactive_remote_port_count=None, up_to_date_count=None, locally_modified_count=None, stale_count=None, locally_modified_and_stale_count=None, sync_failure_count=None):
"""
ControllerStatusDTO - a model defined in Swagger
"""
self._active_thread_count = None
self._terminated_thread_count = None
self._queued = None
self._flow_files_queued = None
self._bytes_queued = None
self._running_count = None
self._stopped_count = None
self._invalid_count = None
self._disabled_count = None
self._active_remote_port_count = None
self._inactive_remote_port_count = None
self._up_to_date_count = None
self._locally_modified_count = None
self._stale_count = None
self._locally_modified_and_stale_count = None
self._sync_failure_count = None
if active_thread_count is not None:
self.active_thread_count = active_thread_count
if terminated_thread_count is not None:
self.terminated_thread_count = terminated_thread_count
if queued is not None:
self.queued = queued
if flow_files_queued is not None:
self.flow_files_queued = flow_files_queued
if bytes_queued is not None:
self.bytes_queued = bytes_queued
if running_count is not None:
self.running_count = running_count
if stopped_count is not None:
self.stopped_count = stopped_count
if invalid_count is not None:
self.invalid_count = invalid_count
if disabled_count is not None:
self.disabled_count = disabled_count
if active_remote_port_count is not None:
self.active_remote_port_count = active_remote_port_count
if inactive_remote_port_count is not None:
self.inactive_remote_port_count = inactive_remote_port_count
if up_to_date_count is not None:
self.up_to_date_count = up_to_date_count
if locally_modified_count is not None:
self.locally_modified_count = locally_modified_count
if stale_count is not None:
self.stale_count = stale_count
if locally_modified_and_stale_count is not None:
self.locally_modified_and_stale_count = locally_modified_and_stale_count
if sync_failure_count is not None:
self.sync_failure_count = sync_failure_count
@property
def active_thread_count(self):
"""
Gets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:return: The active_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_thread_count
@active_thread_count.setter
def active_thread_count(self, active_thread_count):
"""
Sets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:param active_thread_count: The active_thread_count of this ControllerStatusDTO.
:type: int
"""
self._active_thread_count = active_thread_count
@property
def terminated_thread_count(self):
"""
Gets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:return: The terminated_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._terminated_thread_count
@terminated_thread_count.setter
def terminated_thread_count(self, terminated_thread_count):
"""
Sets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:param terminated_thread_count: The terminated_thread_count of this ControllerStatusDTO.
:type: int
"""
self._terminated_thread_count = terminated_thread_count
@property
def queued(self):
"""
Gets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:return: The queued of this ControllerStatusDTO.
:rtype: str
"""
return self._queued
@queued.setter
def queued(self, queued):
"""
Sets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:param queued: The queued of this ControllerStatusDTO.
:type: str
"""
self._queued = queued
@property
def flow_files_queued(self):
"""
Gets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:return: The flow_files_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._flow_files_queued
@flow_files_queued.setter
def flow_files_queued(self, flow_files_queued):
"""
Sets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:param flow_files_queued: The flow_files_queued of this ControllerStatusDTO.
:type: int
"""
self._flow_files_queued = flow_files_queued
@property
def bytes_queued(self):
"""
Gets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:return: The bytes_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._bytes_queued
@bytes_queued.setter
def bytes_queued(self, bytes_queued):
"""
Sets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:param bytes_queued: The bytes_queued of this ControllerStatusDTO.
:type: int
"""
self._bytes_queued = bytes_queued
@property
def running_count(self):
"""
Gets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:return: The running_count of this ControllerStatusDTO.
:rtype: int
"""
return self._running_count
@running_count.setter
def running_count(self, running_count):
"""
Sets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:param running_count: The running_count of this ControllerStatusDTO.
:type: int
"""
self._running_count = running_count
@property
def stopped_count(self):
"""
Gets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:return: The stopped_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stopped_count
@stopped_count.setter
def stopped_count(self, stopped_count):
"""
Sets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:param stopped_count: The stopped_count of this ControllerStatusDTO.
:type: int
"""
self._stopped_count = stopped_count
@property
def invalid_count(self):
"""
Gets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:return: The invalid_count of this ControllerStatusDTO.
:rtype: int
"""
return self._invalid_count
@invalid_count.setter
def invalid_count(self, invalid_count):
"""
Sets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:param invalid_count: The invalid_count of this ControllerStatusDTO.
:type: int
"""
self._invalid_count = invalid_count
@property
def disabled_count(self):
"""
Gets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:return: The disabled_count of this ControllerStatusDTO.
:rtype: int
"""
return self._disabled_count
@disabled_count.setter
def disabled_count(self, disabled_count):
"""
Sets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:param disabled_count: The disabled_count of this ControllerStatusDTO.
:type: int
"""
self._disabled_count = disabled_count
@property
def active_remote_port_count(self):
"""
Gets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:return: The active_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_remote_port_count
@active_remote_port_count.setter
def active_remote_port_count(self, active_remote_port_count):
"""
Sets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:param active_remote_port_count: The active_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._active_remote_port_count = active_remote_port_count
@property
def inactive_remote_port_count(self):
"""
Gets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:return: The inactive_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._inactive_remote_port_count
@inactive_remote_port_count.setter
def inactive_remote_port_count(self, inactive_remote_port_count):
"""
Sets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:param inactive_remote_port_count: The inactive_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._inactive_remote_port_count = inactive_remote_port_count
@property
def up_to_date_count(self):
"""
Gets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:return: The up_to_date_count of this ControllerStatusDTO.
:rtype: int
"""
return self._up_to_date_count
@up_to_date_count.setter
def up_to_date_count(self, up_to_date_count):
"""
Sets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:param up_to_date_count: The up_to_date_count of this ControllerStatusDTO.
:type: int
"""
self._up_to_date_count = up_to_date_count
@property
def locally_modified_count(self):
"""
Gets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:return: The locally_modified_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_count
@locally_modified_count.setter
def locally_modified_count(self, locally_modified_count):
"""
Sets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:param locally_modified_count: The locally_modified_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_count = locally_modified_count
@property
def stale_count(self):
"""
Gets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:return: The stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stale_count
@stale_count.setter
def stale_count(self, stale_count):
"""
Sets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:param stale_count: The stale_count of this ControllerStatusDTO.
:type: int
"""
self._stale_count = stale_count
@property
def locally_modified_and_stale_count(self):
"""
Gets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:return: The locally_modified_and_stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_and_stale_count
@locally_modified_and_stale_count.setter
def locally_modified_and_stale_count(self, locally_modified_and_stale_count):
"""
Sets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:param locally_modified_and_stale_count: The locally_modified_and_stale_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_and_stale_count = locally_modified_and_stale_count
@property
def sync_failure_count(self):
"""
Gets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:return: The sync_failure_count of this ControllerStatusDTO.
:rtype: int
"""
return self._sync_failure_count
@sync_failure_count.setter
def sync_failure_count(self, sync_failure_count):
"""
Sets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:param sync_failure_count: The sync_failure_count of this ControllerStatusDTO.
:type: int
"""
self._sync_failure_count = sync_failure_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ControllerStatusDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/registry/models/bucket_item.py
|
# coding: utf-8
"""
NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 0.2.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BucketItem(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'link': 'Link',
'identifier': 'str',
'name': 'str',
'description': 'str',
'bucket_identifier': 'str',
'bucket_name': 'str',
'created_timestamp': 'int',
'modified_timestamp': 'int',
'type': 'str',
'permissions': 'Permissions'
}
attribute_map = {
'link': 'link',
'identifier': 'identifier',
'name': 'name',
'description': 'description',
'bucket_identifier': 'bucketIdentifier',
'bucket_name': 'bucketName',
'created_timestamp': 'createdTimestamp',
'modified_timestamp': 'modifiedTimestamp',
'type': 'type',
'permissions': 'permissions'
}
def __init__(self, link=None, identifier=None, name=None, description=None, bucket_identifier=None, bucket_name=None, created_timestamp=None, modified_timestamp=None, type=None, permissions=None):
"""
BucketItem - a model defined in Swagger
"""
self._link = None
self._identifier = None
self._name = None
self._description = None
self._bucket_identifier = None
self._bucket_name = None
self._created_timestamp = None
self._modified_timestamp = None
self._type = None
self._permissions = None
if link is not None:
self.link = link
if identifier is not None:
self.identifier = identifier
self.name = name
if description is not None:
self.description = description
self.bucket_identifier = bucket_identifier
if bucket_name is not None:
self.bucket_name = bucket_name
if created_timestamp is not None:
self.created_timestamp = created_timestamp
if modified_timestamp is not None:
self.modified_timestamp = modified_timestamp
self.type = type
if permissions is not None:
self.permissions = permissions
@property
def link(self):
"""
Gets the link of this BucketItem.
An WebLink to this entity.
:return: The link of this BucketItem.
:rtype: Link
"""
return self._link
@link.setter
def link(self, link):
"""
Sets the link of this BucketItem.
An WebLink to this entity.
:param link: The link of this BucketItem.
:type: Link
"""
self._link = link
@property
def identifier(self):
"""
Gets the identifier of this BucketItem.
An ID to uniquely identify this object.
:return: The identifier of this BucketItem.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this BucketItem.
An ID to uniquely identify this object.
:param identifier: The identifier of this BucketItem.
:type: str
"""
self._identifier = identifier
@property
def name(self):
"""
Gets the name of this BucketItem.
The name of the item.
:return: The name of this BucketItem.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this BucketItem.
The name of the item.
:param name: The name of this BucketItem.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def description(self):
"""
Gets the description of this BucketItem.
A description of the item.
:return: The description of this BucketItem.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this BucketItem.
A description of the item.
:param description: The description of this BucketItem.
:type: str
"""
self._description = description
@property
def bucket_identifier(self):
"""
Gets the bucket_identifier of this BucketItem.
The identifier of the bucket this items belongs to. This cannot be changed after the item is created.
:return: The bucket_identifier of this BucketItem.
:rtype: str
"""
return self._bucket_identifier
@bucket_identifier.setter
def bucket_identifier(self, bucket_identifier):
"""
Sets the bucket_identifier of this BucketItem.
The identifier of the bucket this items belongs to. This cannot be changed after the item is created.
:param bucket_identifier: The bucket_identifier of this BucketItem.
:type: str
"""
if bucket_identifier is None:
raise ValueError("Invalid value for `bucket_identifier`, must not be `None`")
self._bucket_identifier = bucket_identifier
@property
def bucket_name(self):
"""
Gets the bucket_name of this BucketItem.
The name of the bucket this items belongs to.
:return: The bucket_name of this BucketItem.
:rtype: str
"""
return self._bucket_name
@bucket_name.setter
def bucket_name(self, bucket_name):
"""
Sets the bucket_name of this BucketItem.
The name of the bucket this items belongs to.
:param bucket_name: The bucket_name of this BucketItem.
:type: str
"""
self._bucket_name = bucket_name
@property
def created_timestamp(self):
"""
Gets the created_timestamp of this BucketItem.
The timestamp of when the item was created, as milliseconds since epoch.
:return: The created_timestamp of this BucketItem.
:rtype: int
"""
return self._created_timestamp
@created_timestamp.setter
def created_timestamp(self, created_timestamp):
"""
Sets the created_timestamp of this BucketItem.
The timestamp of when the item was created, as milliseconds since epoch.
:param created_timestamp: The created_timestamp of this BucketItem.
:type: int
"""
if created_timestamp is not None and created_timestamp < 1:
raise ValueError("Invalid value for `created_timestamp`, must be a value greater than or equal to `1`")
self._created_timestamp = created_timestamp
@property
def modified_timestamp(self):
"""
Gets the modified_timestamp of this BucketItem.
The timestamp of when the item was last modified, as milliseconds since epoch.
:return: The modified_timestamp of this BucketItem.
:rtype: int
"""
return self._modified_timestamp
@modified_timestamp.setter
def modified_timestamp(self, modified_timestamp):
"""
Sets the modified_timestamp of this BucketItem.
The timestamp of when the item was last modified, as milliseconds since epoch.
:param modified_timestamp: The modified_timestamp of this BucketItem.
:type: int
"""
if modified_timestamp is not None and modified_timestamp < 1:
raise ValueError("Invalid value for `modified_timestamp`, must be a value greater than or equal to `1`")
self._modified_timestamp = modified_timestamp
@property
def type(self):
"""
Gets the type of this BucketItem.
The type of item.
:return: The type of this BucketItem.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this BucketItem.
The type of item.
:param type: The type of this BucketItem.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
allowed_values = ["Flow"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def permissions(self):
"""
Gets the permissions of this BucketItem.
The access that the current user has to the bucket containing this item.
:return: The permissions of this BucketItem.
:rtype: Permissions
"""
return self._permissions
@permissions.setter
def permissions(self, permissions):
"""
Sets the permissions of this BucketItem.
The access that the current user has to the bucket containing this item.
:param permissions: The permissions of this BucketItem.
:type: Permissions
"""
self._permissions = permissions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BucketItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
tests/test_canvas.py
|
<filename>tests/test_canvas.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `nipyapi` package."""
import pytest
from tests import conftest
from nipyapi import canvas, nifi
from nipyapi.nifi import ProcessGroupFlowEntity, ProcessGroupEntity
from nipyapi.nifi import ProcessorTypesEntity, DocumentedTypeDTO
from nipyapi.nifi.rest import ApiException
def test_get_root_pg_id():
r = canvas.get_root_pg_id()
assert isinstance(r, str)
def test_get_process_group_status(regress_nifi):
r = canvas.get_process_group_status(pg_id='root', detail='names')
assert isinstance(r, dict)
r = canvas.get_process_group_status('root', 'all')
assert isinstance(r, ProcessGroupEntity)
# We rely on this int for testing if a PG is running or not
assert isinstance(r.running_count, int)
with pytest.raises(AssertionError):
_ = canvas.get_process_group_status('root','invalid')
def test_get_flow():
r = canvas.get_flow('root')
assert isinstance(r, ProcessGroupFlowEntity)
assert r.process_group_flow.breadcrumb.breadcrumb.name == 'NiFi Flow'
with pytest.raises(ValueError):
_ = canvas.get_flow('definitelyNotAPG')
def test_recurse_flow(regress_nifi, fix_pg):
_ = fix_pg.generate()
r = canvas.recurse_flow('root')
assert isinstance(r, ProcessGroupFlowEntity)
assert r.process_group_flow.breadcrumb.breadcrumb.name == 'NiFi Flow'
assert isinstance(
r.process_group_flow.flow.process_groups[0].nipyapi_extended,
ProcessGroupFlowEntity
)
def test_list_all_process_groups(regress_nifi, fix_pg):
_ = fix_pg.generate()
r = canvas.list_all_process_groups()
assert isinstance(r, list)
for pg in r:
assert isinstance(pg, ProcessGroupEntity)
def test_create_process_group(regress_nifi):
r = canvas.create_process_group(
parent_pg=canvas.get_process_group(canvas.get_root_pg_id(), 'id'),
new_pg_name=conftest.test_pg_name,
location=(400.0,400.0)
)
assert r.component.name == conftest.test_pg_name
assert r.position.x == r.position.y == 400
assert r.component.parent_group_id == canvas.get_root_pg_id()
assert isinstance(r, nifi.ProcessGroupEntity)
with pytest.raises(ApiException):
parent_pg = canvas.get_process_group('NiFi Flow')
parent_pg.id = 'invalid'
_ = canvas.create_process_group(
parent_pg,
'irrelevant',
(0, 0)
)
def test_get_process_group(regress_nifi, fix_pg):
with pytest.raises(AssertionError):
_ = canvas.get_process_group('nipyapi_test', 'invalid')
f_pg = fix_pg.generate()
pg1 = canvas.get_process_group(f_pg.id, 'id')
assert isinstance(pg1, ProcessGroupEntity)
duplicate_pg = fix_pg.generate()
pg2 = canvas.get_process_group(duplicate_pg.id, 'id')
assert pg2.id != pg1.id
pg_list = canvas.get_process_group(f_pg.status.name)
assert isinstance(pg_list, list)
# the two duplicates, and root = 3
assert len(pg_list) == 3
def test_delete_process_group(regress_nifi, fix_pg, fix_proc):
# Delete stopped PG
f_pg1 = fix_pg.generate()
r1 = canvas.delete_process_group(f_pg1)
assert r1.id == f_pg1.id
assert r1.status is None
# Test deleting a running PG
pg_2 = fix_pg.generate()
_ = fix_proc.generate(parent_pg=pg_2)
canvas.schedule_process_group(pg_2.id, True)
with pytest.raises(ValueError):
_ = canvas.delete_process_group(pg_2)
# Once more with feeling
r2 = canvas.delete_process_group(
pg_2,
force=True
)
assert r2.status is None
def test_schedule_process_group(fix_proc, fix_pg):
f_pg = fix_pg.generate()
_ = fix_proc.generate(parent_pg=f_pg)
r1 = canvas.schedule_process_group(
f_pg.id,
True
)
status = canvas.get_process_group(f_pg.id, 'id')
assert r1 is True
assert status.running_count == 1
r2= canvas.schedule_process_group(
f_pg.id,
False
)
assert r2 is True
status = canvas.get_process_group(f_pg.id, 'id')
assert status.running_count == 0
assert status.stopped_count == 1
with pytest.raises(AssertionError):
_ = canvas.schedule_process_group(
f_pg.id,
'BANANA'
)
def test_list_all_processor_types(regress_nifi):
r = canvas.list_all_processor_types()
assert isinstance(r, ProcessorTypesEntity)
assert len(r.processor_types) > 1
def test_get_processor_type(regress_nifi):
r1 = canvas.get_processor_type('twitter')
assert r1.type == 'org.apache.nifi.processors.twitter.GetTwitter'
assert isinstance(r1, DocumentedTypeDTO)
r2 = canvas.get_processor_type("syslog", 'tag')
assert isinstance(r2, list)
r3 = canvas.get_processor_type('standard')
assert isinstance(r3, list)
assert len(r3) > 10
def test_create_processor(regress_nifi, fix_pg):
f_pg = fix_pg.generate()
r1 = canvas.create_processor(
parent_pg=f_pg,
processor=canvas.get_processor_type('GenerateFlowFile'),
location=(400.0, 400.0),
name=conftest.test_processor_name
)
assert isinstance(r1, nifi.ProcessorEntity)
assert r1.status.name == conftest.test_processor_name
def test_list_all_processors(regress_nifi, fix_proc):
_ = fix_proc.generate()
_ = fix_proc.generate()
r = canvas.list_all_processors()
assert len(r) >= 2
assert isinstance(r[0], nifi.ProcessorEntity)
def test_get_processor(regress_nifi, fix_proc):
f_p1 = fix_proc.generate()
r1 = canvas.get_processor(f_p1.status.name)
assert isinstance(r1, nifi.ProcessorEntity)
r2 = canvas.get_processor('ClearlyNotAProcessor')
assert r2 is None
f_p2 = fix_proc.generate()
r3 = canvas.get_processor(f_p1.status.name)
assert isinstance(r3, list)
r4 = canvas.get_processor(f_p2.id, 'id')
assert isinstance(r4, nifi.ProcessorEntity)
assert r4.id != r1.id
def test_schedule_processor(regress_nifi, fix_proc):
f_p1 = fix_proc.generate()
r1 = canvas.schedule_processor(
f_p1,
True
)
processor_info = canvas.get_processor(f_p1.id, 'id')
assert r1 is True
assert isinstance(processor_info, nifi.ProcessorEntity)
assert processor_info.component.state == 'RUNNING'
r2 = canvas.schedule_processor(
f_p1,
False
)
status = canvas.get_processor(f_p1.id, 'id')
assert status.component.state == 'STOPPED'
assert r2 is True
with pytest.raises(AssertionError):
_ = canvas.schedule_processor(
f_p1,
'BANANA'
)
def test_delete_processor(regress_nifi, fix_proc):
f_p1 = fix_proc.generate()
r1 = canvas.delete_processor(f_p1)
assert r1.status is None
assert isinstance(r1, nifi.ProcessorEntity)
# try to delete processor twice
with pytest.raises(ValueError):
_ = canvas.delete_processor(f_p1)
# try to delete running processor
f_p2 = fix_proc.generate()
canvas.schedule_processor(f_p2, True)
with pytest.raises(ValueError):
_ = canvas.delete_processor(f_p2)
# and once more with feeling, er, force
r2 = canvas.delete_processor(f_p2, force=True)
assert r2.status is None
def test_update_processor(regress_nifi, fix_proc):
# TODO: Add way more tests to this
f_p1 = fix_proc.generate()
update = nifi.ProcessorConfigDTO(
scheduling_period='3s'
)
r1 = canvas.update_processor(f_p1, update)
with pytest.raises(ValueError, match='update param is not an instance'):
_ = canvas.update_processor(f_p1, 'FakeNews')
def test_get_variable_registry(fix_pg):
test_pg = fix_pg.generate()
r1 = canvas.get_variable_registry(test_pg)
assert isinstance(r1, nifi.VariableRegistryEntity)
with pytest.raises(ValueError, match='Unable to locate group with id'):
canvas.delete_process_group(test_pg)
_ = canvas.get_variable_registry(test_pg)
def test_update_variable_registry(fix_pg):
test_pg = fix_pg.generate()
r1 = canvas.update_variable_registry(
test_pg,
conftest.test_variable_registry_entry
)
assert isinstance(r1, nifi.VariableRegistryEntity)
with pytest.raises(ValueError,
match='param update is not a valid list of'
):
_ = canvas.update_variable_registry(test_pg, '')
def test_get_connections():
# TODO: Waiting for create_connection to generate fixture
pass
def test_purge_connection():
# TODO: Waiting for create_connection to generate fixture
pass
def test_purge_process_group():
# TODO: Waiting for create_connection to generate fixture
pass
def test_get_bulletins():
r = canvas.get_bulletins()
assert isinstance(r, nifi.ControllerBulletinsEntity)
def test_get_bulletin_board():
r = canvas.get_bulletin_board()
assert isinstance(r, nifi.BulletinBoardEntity)
def test_list_invalid_processors():
# TODO: write test for new feature
pass
def test_list_sensitive_processors():
# TODO: write test for new feature
pass
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/flow_snippet_dto.py
|
<filename>nipyapi/nifi/models/flow_snippet_dto.py
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FlowSnippetDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'process_groups': 'list[ProcessGroupDTO]',
'remote_process_groups': 'list[RemoteProcessGroupDTO]',
'processors': 'list[ProcessorDTO]',
'input_ports': 'list[PortDTO]',
'output_ports': 'list[PortDTO]',
'connections': 'list[ConnectionDTO]',
'labels': 'list[LabelDTO]',
'funnels': 'list[FunnelDTO]',
'controller_services': 'list[ControllerServiceDTO]'
}
attribute_map = {
'process_groups': 'processGroups',
'remote_process_groups': 'remoteProcessGroups',
'processors': 'processors',
'input_ports': 'inputPorts',
'output_ports': 'outputPorts',
'connections': 'connections',
'labels': 'labels',
'funnels': 'funnels',
'controller_services': 'controllerServices'
}
def __init__(self, process_groups=None, remote_process_groups=None, processors=None, input_ports=None, output_ports=None, connections=None, labels=None, funnels=None, controller_services=None):
"""
FlowSnippetDTO - a model defined in Swagger
"""
self._process_groups = None
self._remote_process_groups = None
self._processors = None
self._input_ports = None
self._output_ports = None
self._connections = None
self._labels = None
self._funnels = None
self._controller_services = None
if process_groups is not None:
self.process_groups = process_groups
if remote_process_groups is not None:
self.remote_process_groups = remote_process_groups
if processors is not None:
self.processors = processors
if input_ports is not None:
self.input_ports = input_ports
if output_ports is not None:
self.output_ports = output_ports
if connections is not None:
self.connections = connections
if labels is not None:
self.labels = labels
if funnels is not None:
self.funnels = funnels
if controller_services is not None:
self.controller_services = controller_services
@property
def process_groups(self):
"""
Gets the process_groups of this FlowSnippetDTO.
The process groups in this flow snippet.
:return: The process_groups of this FlowSnippetDTO.
:rtype: list[ProcessGroupDTO]
"""
return self._process_groups
@process_groups.setter
def process_groups(self, process_groups):
"""
Sets the process_groups of this FlowSnippetDTO.
The process groups in this flow snippet.
:param process_groups: The process_groups of this FlowSnippetDTO.
:type: list[ProcessGroupDTO]
"""
self._process_groups = process_groups
@property
def remote_process_groups(self):
"""
Gets the remote_process_groups of this FlowSnippetDTO.
The remote process groups in this flow snippet.
:return: The remote_process_groups of this FlowSnippetDTO.
:rtype: list[RemoteProcessGroupDTO]
"""
return self._remote_process_groups
@remote_process_groups.setter
def remote_process_groups(self, remote_process_groups):
"""
Sets the remote_process_groups of this FlowSnippetDTO.
The remote process groups in this flow snippet.
:param remote_process_groups: The remote_process_groups of this FlowSnippetDTO.
:type: list[RemoteProcessGroupDTO]
"""
self._remote_process_groups = remote_process_groups
@property
def processors(self):
"""
Gets the processors of this FlowSnippetDTO.
The processors in this flow snippet.
:return: The processors of this FlowSnippetDTO.
:rtype: list[ProcessorDTO]
"""
return self._processors
@processors.setter
def processors(self, processors):
"""
Sets the processors of this FlowSnippetDTO.
The processors in this flow snippet.
:param processors: The processors of this FlowSnippetDTO.
:type: list[ProcessorDTO]
"""
self._processors = processors
@property
def input_ports(self):
"""
Gets the input_ports of this FlowSnippetDTO.
The input ports in this flow snippet.
:return: The input_ports of this FlowSnippetDTO.
:rtype: list[PortDTO]
"""
return self._input_ports
@input_ports.setter
def input_ports(self, input_ports):
"""
Sets the input_ports of this FlowSnippetDTO.
The input ports in this flow snippet.
:param input_ports: The input_ports of this FlowSnippetDTO.
:type: list[PortDTO]
"""
self._input_ports = input_ports
@property
def output_ports(self):
"""
Gets the output_ports of this FlowSnippetDTO.
The output ports in this flow snippet.
:return: The output_ports of this FlowSnippetDTO.
:rtype: list[PortDTO]
"""
return self._output_ports
@output_ports.setter
def output_ports(self, output_ports):
"""
Sets the output_ports of this FlowSnippetDTO.
The output ports in this flow snippet.
:param output_ports: The output_ports of this FlowSnippetDTO.
:type: list[PortDTO]
"""
self._output_ports = output_ports
@property
def connections(self):
"""
Gets the connections of this FlowSnippetDTO.
The connections in this flow snippet.
:return: The connections of this FlowSnippetDTO.
:rtype: list[ConnectionDTO]
"""
return self._connections
@connections.setter
def connections(self, connections):
"""
Sets the connections of this FlowSnippetDTO.
The connections in this flow snippet.
:param connections: The connections of this FlowSnippetDTO.
:type: list[ConnectionDTO]
"""
self._connections = connections
@property
def labels(self):
"""
Gets the labels of this FlowSnippetDTO.
The labels in this flow snippet.
:return: The labels of this FlowSnippetDTO.
:rtype: list[LabelDTO]
"""
return self._labels
@labels.setter
def labels(self, labels):
"""
Sets the labels of this FlowSnippetDTO.
The labels in this flow snippet.
:param labels: The labels of this FlowSnippetDTO.
:type: list[LabelDTO]
"""
self._labels = labels
@property
def funnels(self):
"""
Gets the funnels of this FlowSnippetDTO.
The funnels in this flow snippet.
:return: The funnels of this FlowSnippetDTO.
:rtype: list[FunnelDTO]
"""
return self._funnels
@funnels.setter
def funnels(self, funnels):
"""
Sets the funnels of this FlowSnippetDTO.
The funnels in this flow snippet.
:param funnels: The funnels of this FlowSnippetDTO.
:type: list[FunnelDTO]
"""
self._funnels = funnels
@property
def controller_services(self):
"""
Gets the controller_services of this FlowSnippetDTO.
The controller services in this flow snippet.
:return: The controller_services of this FlowSnippetDTO.
:rtype: list[ControllerServiceDTO]
"""
return self._controller_services
@controller_services.setter
def controller_services(self, controller_services):
"""
Sets the controller_services of this FlowSnippetDTO.
The controller services in this flow snippet.
:param controller_services: The controller_services of this FlowSnippetDTO.
:type: list[ControllerServiceDTO]
"""
self._controller_services = controller_services
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FlowSnippetDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/status_descriptor_dto.py
|
<reponame>Paul-Verardi/nipyapi<filename>nipyapi/nifi/models/status_descriptor_dto.py<gh_stars>0
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StatusDescriptorDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'field': 'str',
'label': 'str',
'description': 'str',
'formatter': 'str'
}
attribute_map = {
'field': 'field',
'label': 'label',
'description': 'description',
'formatter': 'formatter'
}
def __init__(self, field=None, label=None, description=None, formatter=None):
"""
StatusDescriptorDTO - a model defined in Swagger
"""
self._field = None
self._label = None
self._description = None
self._formatter = None
if field is not None:
self.field = field
if label is not None:
self.label = label
if description is not None:
self.description = description
if formatter is not None:
self.formatter = formatter
@property
def field(self):
"""
Gets the field of this StatusDescriptorDTO.
The name of the status field.
:return: The field of this StatusDescriptorDTO.
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""
Sets the field of this StatusDescriptorDTO.
The name of the status field.
:param field: The field of this StatusDescriptorDTO.
:type: str
"""
self._field = field
@property
def label(self):
"""
Gets the label of this StatusDescriptorDTO.
The label for the status field.
:return: The label of this StatusDescriptorDTO.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""
Sets the label of this StatusDescriptorDTO.
The label for the status field.
:param label: The label of this StatusDescriptorDTO.
:type: str
"""
self._label = label
@property
def description(self):
"""
Gets the description of this StatusDescriptorDTO.
The description of the status field.
:return: The description of this StatusDescriptorDTO.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this StatusDescriptorDTO.
The description of the status field.
:param description: The description of this StatusDescriptorDTO.
:type: str
"""
self._description = description
@property
def formatter(self):
"""
Gets the formatter of this StatusDescriptorDTO.
The formatter for the status descriptor.
:return: The formatter of this StatusDescriptorDTO.
:rtype: str
"""
return self._formatter
@formatter.setter
def formatter(self, formatter):
"""
Sets the formatter of this StatusDescriptorDTO.
The formatter for the status descriptor.
:param formatter: The formatter of this StatusDescriptorDTO.
:type: str
"""
self._formatter = formatter
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StatusDescriptorDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.