hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb477a967e80cf718f3ed771f6882921dbc6a434
| 278
|
py
|
Python
|
model/login.py
|
ElenaUS/automation
|
9f1b948590dae5fcc1a2ad247961e92b99d7a88d
|
[
"Apache-2.0"
] | null | null | null |
model/login.py
|
ElenaUS/automation
|
9f1b948590dae5fcc1a2ad247961e92b99d7a88d
|
[
"Apache-2.0"
] | null | null | null |
model/login.py
|
ElenaUS/automation
|
9f1b948590dae5fcc1a2ad247961e92b99d7a88d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
class Login:
def __init__(self, username):
self.username = username
"""у этого класса делаем констурктор,
именно конструктор будут принимать все эти многочисленные параметры,
а дальше в методы будет передаваться один объект этого класса"""
| 27.8
| 68
| 0.733813
|
2ce18f2d25729f561d2ab82db95bb4a411a7e8bd
| 755
|
py
|
Python
|
ensysmod/model/energy_model.py
|
NOWUM/EnSysMod
|
18c8a2198db3510e667c1f0298d00a3dfcb0aab7
|
[
"MIT"
] | 1
|
2021-12-10T19:41:01.000Z
|
2021-12-10T19:41:01.000Z
|
ensysmod/model/energy_model.py
|
NOWUM/EnSysMod
|
18c8a2198db3510e667c1f0298d00a3dfcb0aab7
|
[
"MIT"
] | 83
|
2021-10-20T22:54:28.000Z
|
2022-03-24T19:07:06.000Z
|
ensysmod/model/energy_model.py
|
NOWUM/EnSysMod
|
18c8a2198db3510e667c1f0298d00a3dfcb0aab7
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, Integer, String, ForeignKey, UniqueConstraint
from sqlalchemy.orm import relationship
from ensysmod.database.base_class import Base
from ensysmod.model import Dataset
class EnergyModel(Base):
id = Column(Integer, primary_key=True, index=True)
ref_dataset = Column(Integer, ForeignKey("dataset.id"), index=True, nullable=False)
name = Column(String, index=True, nullable=False)
description = Column(String, nullable=True)
# relationships
dataset: Dataset = relationship("Dataset")
parameters = relationship("EnergyModelParameter", back_populates="model")
# table constraints
__table_args__ = (
UniqueConstraint("ref_dataset", "name", name="_commodity_name_dataset_uc"),
)
| 34.318182
| 87
| 0.748344
|
54335d9dba3543df065233c086a77744ff652459
| 4,892
|
py
|
Python
|
characters/req.py
|
JoyMbugua/marvel-land
|
b482051240ca3b949bc5ed69f14533a25d265c04
|
[
"MIT"
] | null | null | null |
characters/req.py
|
JoyMbugua/marvel-land
|
b482051240ca3b949bc5ed69f14533a25d265c04
|
[
"MIT"
] | null | null | null |
characters/req.py
|
JoyMbugua/marvel-land
|
b482051240ca3b949bc5ed69f14533a25d265c04
|
[
"MIT"
] | null | null | null |
import requests
import datetime
import hashlib
from decouple import config
import aiohttp
import asyncio
from .models import Hero, Comic
base_url = 'https://gateway.marvel.com:443/v1/public/characters'
def getUrl():
'''
method to return the base marvel url
'''
global base_url
ts = datetime.datetime.now()
ts = str(int(ts.timestamp()))
public_key = config('PUBLIC_KEY')
private_key = config('PRIVATE_KEY')
hash = hashlib.md5((ts+private_key+public_key).encode()).hexdigest()
return f'ts={ts}&apikey={public_key}&hash={hash}'
async def get_characters():
''''
function to get the json response of the url request
returns processed results
'''
url = f'{base_url}?orderBy=-name&limit=100&{getUrl()}'
hero_results = None
async with aiohttp.ClientSession() as session:
async with session.get(url) as data:
resp = await data.json()
res = resp.get('data')
if res:
res = res.get('results')
hero_results = resp.get('data')['results']
return await process_characters(hero_results)
async def process_characters(results):
'''
function that processes the api results and converts them to a list
'''
hero_objects = []
for hero in results:
id = hero.get('id')
name = hero.get('name')
description = hero.get('description')
thumbnail = hero.get('thumbnail')
urls = hero.get('urls')
image_check = thumbnail['path'].endswith('image_not_available')
# check if the character has a description and image
if description and not image_check:
image_path = f"{thumbnail['path']}/portrait_uncanny.{thumbnail['extension']}"
hero_link = ''
for url in urls:
if url['type'] == 'wiki':
hero_link = url['url']
hero_object = Hero(id=id, name=name, description=description, image_path=image_path, link=hero_link)
hero_objects.append(hero_object)
return hero_objects[3:]
async def get_character_details(character_id):
'''
function to retrieve a single character and their details from the
list of already retrieved heroes
'''
character_details = {}
characters = await get_characters()
for character in characters:
if character.id == character_id:
character_details['id'] = character_id
character_details['name'] = character.name
character_details['description'] = character.description
character_details['image_path'] = character.image_path
character_details['link'] = character.link
return character_details
async def get_character_comics(character_id):
'''
function to retrieve comics per character
'''
global base_url
url = f'{base_url}/{character_id}/comics?{getUrl()}'
comic_results = None
async with aiohttp.ClientSession() as session:
async with session.get(url) as data:
response = await data.json()
# if the character has comics go ahead and process else return None
if len(response['data'].get('results')) > 0:
comic_results = response['data'].get('results')
return await process_comics(comic_results)
else:
return comic_results
async def process_comics(results):
'''
function to process comics response and return a list
'''
comic_list = []
for item in results:
id = item.get('id')
title = item.get('title')
description = item.get('description')
image_path = item.get('thumbnail')
if description:
image_path = f"{image_path['path']}/portrait_uncanny.{image_path['extension']}"
comic = Comic(id=id, title=title, description=description, image_path=image_path)
comic_list.append(comic)
return comic_list
def search_hero(name):
global base_url
url = f"{base_url}?name={name}&{getUrl()}"
data = requests.get(url)
response = data.json()
# print(response)
hero = {}
# process the json reponse
if response['status'] == "Ok":
results = response['data'].get('results')[0]
id = results.get('id')
name = results.get('name')
description = results.get('description')
thumbnail = results.get('thumbnail')
urls = results.get('urls')
image_path = f"{thumbnail['path']}/portrait_uncanny.{thumbnail['extension']}"
hero_link = ''
for url in urls:
if url['type'] == 'wiki':
hero_link = url['url']
hero = Hero(id=id, name=name, description=description, image_path=image_path, link=hero_link)
return hero
| 31.56129
| 116
| 0.61018
|
628b62037ab6be40279380ab9bdcf38fb28c98fb
| 336
|
py
|
Python
|
utils.py
|
1170500804/glow
|
eebe0cfdf1a4f70ac0dc9dc0141bf09de2818ac3
|
[
"MIT"
] | 2,898
|
2018-07-09T16:24:07.000Z
|
2022-03-29T23:34:40.000Z
|
utils.py
|
1170500804/glow
|
eebe0cfdf1a4f70ac0dc9dc0141bf09de2818ac3
|
[
"MIT"
] | 98
|
2018-07-09T17:37:14.000Z
|
2021-11-02T14:59:35.000Z
|
utils.py
|
1170500804/glow
|
eebe0cfdf1a4f70ac0dc9dc0141bf09de2818ac3
|
[
"MIT"
] | 542
|
2018-07-09T16:30:06.000Z
|
2022-03-02T00:26:37.000Z
|
import json
class ResultLogger(object):
def __init__(self, path, *args, **kwargs):
self.f_log = open(path, 'w')
self.f_log.write(json.dumps(kwargs) + '\n')
def log(self, **kwargs):
self.f_log.write(json.dumps(kwargs) + '\n')
self.f_log.flush()
def close(self):
self.f_log.close()
| 22.4
| 51
| 0.583333
|
dae2000b2b10b91e2a154054b7e61703b8eae81a
| 9,326
|
py
|
Python
|
core/import_primary.py
|
moshthepitt/shulezote
|
e903a208948ab5294183e2a8c2dac9360a184654
|
[
"MIT"
] | 2
|
2015-12-02T08:14:34.000Z
|
2020-12-16T19:56:46.000Z
|
core/import_primary.py
|
moshthepitt/shulezote
|
e903a208948ab5294183e2a8c2dac9360a184654
|
[
"MIT"
] | 4
|
2016-10-04T12:15:42.000Z
|
2021-06-10T19:47:39.000Z
|
core/import_primary.py
|
moshthepitt/shulezote
|
e903a208948ab5294183e2a8c2dac9360a184654
|
[
"MIT"
] | 1
|
2018-08-20T14:19:32.000Z
|
2018-08-20T14:19:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import datetime
from django.conf import settings
from django.contrib.gis.geos import Point
from schools.models import School
from facts.models import Fact
from staff.models import Staff
from facilities.models import Facility, FacilityRecord
from places.models import County, Constituency, Province, District
from places.models import Division, Location
def get_ownership(data):
if data == "PRIVATE":
return School.PRIVATE
return School.PUBLIC
def get_sponsor(data):
if data == "CENTRAL GOVERNMENT/DEB":
return School.GOVERNMENT
elif data == "RELIGIOUS ORGANIZATION":
return School.RELIGIOUS
elif data == "COMMUNITY":
return School.COMMUNITY
elif data == "NGO/CBO":
return School.NGO
elif data == "PRIVATE INDIVIDUAL":
return School.PRIVATE_INDIVIDUAL
return School.NOT_KNOWN
def get_student_gender(data):
if data == "BOYS ONLY":
return School.BOYS
elif data == "GIRLS ONLY":
return School.GIRLS
elif data == "MIXED":
return School.MIXED
else:
return School.NOT_KNOWN
def get_school_type(data):
if data == "DAY ONLY":
return School.DAY
elif data == "BOARDING ONLY":
return School.BOARDING
elif data == "DAY & BOARDING":
return School.DAY_AND_BOARDING
return School.NOT_KNOWN
def get_student_needs(data):
if data == "ORDINARY":
return School.ORDINARY
elif data == "INTEGRATED":
return School.INTEGRATED
elif data == "SPECIAL SCHOOL":
return School.SPECIAL
return School.ORDINARY
def import_primary_schools():
period = datetime.datetime(day=31, month=12, year=2007)
filename = "%s/documentation/data/2007/primary.csv" % settings.BASE_DIR
n = 1
with open(filename, "rb") as ifile:
reader = csv.reader(ifile)
for row in reader:
if n > 1:
school = School()
school.name = row[0].strip()
school.level = School.PRIMARY
school.ownership = get_ownership(row[2].strip())
school.sponsor = get_sponsor(row[3].strip())
school.student_gender = get_student_gender(row[4].strip())
school.school_type = get_school_type(row[5].strip())
school.student_needs = get_student_needs(row[6].strip())
# location
if row[29]:
county, created = County.objects.get_or_create(name=row[29].strip().upper())
school.county = county
province, created = Province.objects.get_or_create(name=row[28].strip().upper())
school.province = province
if row[33]:
constituency = Constituency.objects.filter(name=row[33].strip().upper()).first()
if not constituency:
constituency, created = Constituency.objects.get_or_create(
name=row[33].strip().upper(), county=county)
school.constituency = constituency
if row[30]:
district = District.objects.filter(name=row[30].strip().upper()).first()
if not district:
district, created = District.objects.get_or_create(
name=row[30].strip().upper(), province=province)
school.district = district
if row[31]:
division, created = Division.objects.get_or_create(
name=row[31].strip().upper(), district=district)
school.division = division
if row[32]:
location, created = Location.objects.get_or_create(
name=row[32].strip().upper(), division=division)
school.location = location
if row[34]:
coord = row[34].split(",")
x = float(coord[0][1:])
y = float(coord[1][1:-2])
school.coordinates = Point(y, x)
school.save()
# facilities
facility1, created = Facility.objects.get_or_create(name="Toilets")
facility2, created = Facility.objects.get_or_create(name="Classrooms")
facility3, created = Facility.objects.get_or_create(name="Enrollment")
facility_record1, created = FacilityRecord.objects.get_or_create(facility=facility1, school=school, period=period, boys=row[11].strip(),
girls=row[12].strip(), total=row[13].strip())
facility_record3, created = FacilityRecord.objects.get_or_create(facility=facility3, school=school, period=period, boys=row[15].strip(),
girls=row[16].strip(), total=row[17].strip())
# staff
if row[18]:
staff1, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.TSC_MALE,
number=row[18].strip(), is_teacher=True)
if row[19]:
staff2, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.TSC_FEMALE,
number=row[19].strip(), is_teacher=True)
if row[20]:
staff3, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.LOCAL_MALE,
number=row[20].strip(), is_teacher=True)
if row[21]:
staff4, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.LOCAL_FEMALE,
number=row[21].strip(), is_teacher=True)
if row[22]:
staff5, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.PTA_MALE,
number=row[22].strip(), is_teacher=True)
if row[23]:
staff6, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.PTA_FEMALE,
number=row[23].strip(), is_teacher=True)
if row[24]:
staff7, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.OTHER_MALE,
number=row[24].strip(), is_teacher=True)
if row[25]:
staff8, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.OTHER_FEMALE,
number=row[25].strip(), is_teacher=True)
if row[26]:
staff9, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.NON_TEACHING_MALE,
number=row[26].strip(), is_teacher=False)
if row[27]:
staff10, created = Staff.objects.get_or_create(period=period, school=school, staff_type=Staff.NON_TEACHING_FEMALE,
number=row[27].strip(), is_teacher=False)
# facts
if row[7]:
fact2, created = Fact.objects.get_or_create(name="Pupil Teacher Ratio", period=period, school=school,
value=row[7].strip())
if row[8]:
fact1, created = Fact.objects.get_or_create(name="Pupil Classroom Ratio", period=period, school=school,
facility=facility2, value=row[8].strip())
if row[9]:
fact4, created = Fact.objects.get_or_create(name="Pupil Toilet Ratio", period=period, school=school,
facility=facility1, value=row[9].strip())
if row[10]:
fact5, created = Fact.objects.get_or_create(name="Total Number of Classrooms", period=period, school=school,
facility=facility2, value=row[10].strip())
if row[14]:
fact5, created = Fact.objects.get_or_create(name="Teachers Toilets", period=period, school=school,
facility=facility1, value=row[14].strip())
n += 1
| 51.524862
| 156
| 0.50697
|
e6eda45b84ced3a76970db3d284222df38160185
| 476
|
py
|
Python
|
posts/migrations/0003_auto_20200325_0932.py
|
dragonrathony/zed_market
|
c73f17501608c8fe86692c3c4f6e03fc8ba03286
|
[
"bzip2-1.0.6"
] | 1
|
2020-06-17T13:45:54.000Z
|
2020-06-17T13:45:54.000Z
|
posts/migrations/0003_auto_20200325_0932.py
|
Honey4251996/zed_market
|
c73f17501608c8fe86692c3c4f6e03fc8ba03286
|
[
"bzip2-1.0.6"
] | 11
|
2021-03-19T07:55:39.000Z
|
2022-03-12T00:34:55.000Z
|
posts/migrations/0003_auto_20200325_0932.py
|
Honey4251996/zed_market
|
c73f17501608c8fe86692c3c4f6e03fc8ba03286
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-03-25 09:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20200325_0911'),
]
operations = [
migrations.AlterField(
model_name='post',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.Post'),
),
]
| 23.8
| 109
| 0.636555
|
6f4ef02b0cc7faa6c15867117e6a900caae9c749
| 5,570
|
py
|
Python
|
LMS_Student/views.py
|
Noisyfox/LMS
|
8acb5f4e8b98a6dfbe5f6692d00dd34812fd5101
|
[
"MIT"
] | null | null | null |
LMS_Student/views.py
|
Noisyfox/LMS
|
8acb5f4e8b98a6dfbe5f6692d00dd34812fd5101
|
[
"MIT"
] | null | null | null |
LMS_Student/views.py
|
Noisyfox/LMS
|
8acb5f4e8b98a6dfbe5f6692d00dd34812fd5101
|
[
"MIT"
] | null | null | null |
import re
from django.db.models import Q
from django.http import Http404
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.views import View
from django.views.generic import CreateView
from django.views.generic import DetailView
from django.views.generic import ListView
from django.views.generic import TemplateView
from sendfile import sendfile
from LMS.mixins import QueryMixin
from LMS.models import Unit, Material, Assignment, AssignmentFile, GradeRecord
from LMS.views import BaseTimetableView
from LMS_Student.mixins import StudentMixin
class UnitQueryMixin(QueryMixin):
def do_query(self, request, *args, **kwargs):
unit = get_object_or_404(Unit, Q(student=self.request.user.student) & Q(pk=kwargs['unit_id']))
self._unit = unit
@property
def unit(self):
if not self._unit:
raise Http404('Unknown unit.')
return self._unit
def get_context_data(self, **kwargs):
ctx = super(UnitQueryMixin, self).get_context_data(**kwargs)
ctx['unit'] = self._unit
return ctx
class MaterialQueryMixin(UnitQueryMixin):
def do_query(self, request, *args, **kwargs):
super(MaterialQueryMixin, self).do_query(request, *args, **kwargs)
material = get_object_or_404(Material, Q(unit=self.unit) & Q(pk=kwargs['material_id']))
self._material = material
@property
def material(self):
if not self._material:
raise Http404('Unknown material.')
return self._material
def get_context_data(self, **kwargs):
ctx = super(MaterialQueryMixin, self).get_context_data(**kwargs)
ctx['material'] = self._material
return ctx
class AssignmentQueryMixin(UnitQueryMixin):
def do_query(self, request, *args, **kwargs):
super(AssignmentQueryMixin, self).do_query(request, *args, **kwargs)
assignment = get_object_or_404(Assignment, Q(unit=self.unit) & Q(pk=kwargs['assignment_id']))
self._assignment = assignment
@property
def assignment(self):
if not self._assignment:
raise Http404('Unknown assignment.')
return self._assignment
def get_context_data(self, **kwargs):
ctx = super(AssignmentQueryMixin, self).get_context_data(**kwargs)
ctx['assignment'] = self._assignment
return ctx
class UnitListView(StudentMixin, ListView):
template_name = 'LMS_Student/unit.html'
context_object_name = 'units'
allow_empty = True
def get_queryset(self):
return self.request.user.student.enrolled_unit.all()
class EnrollListView(StudentMixin, ListView):
template_name = 'LMS_Student/enroll.html'
context_object_name = 'units'
allow_empty = True
model = Unit
def post(self, request, *args, **kwargs):
unit_id = request.POST.getlist('unit')
units = Unit.objects.filter(pk__in=unit_id).all()
self.request.user.student.enrolled_unit = units
self.request.user.student.save()
for u in units:
grade, _ = GradeRecord.objects.get_or_create(student=self.request.user.student, unit=u)
return HttpResponseRedirect(reverse_lazy('lms_stu:unit'))
class UnitInfoView(StudentMixin, UnitQueryMixin, DetailView):
template_name = 'LMS_Student/unit_info.html'
context_object_name = 'grade'
def get_object(self, queryset=None):
return GradeRecord.objects.get(unit=self.unit, student=self.request.user.student)
class MaterialListView(StudentMixin, UnitQueryMixin, ListView):
template_name = 'LMS_Student/unit_material.html'
context_object_name = 'materials'
allow_empty = True
def get_queryset(self):
return Material.objects.filter(unit=self.unit)
class MaterialDownloadView(StudentMixin, MaterialQueryMixin, View):
def get(self, request, *args, **kwargs):
return sendfile(request, self.material.file.path, attachment=True)
class AssignmentListView(StudentMixin, UnitQueryMixin, ListView):
template_name = 'LMS_Student/unit_assignment.html'
context_object_name = 'assignments'
allow_empty = True
def get_queryset(self):
return Assignment.objects.filter(unit=self.unit)
class AssignmentFileListView(StudentMixin, AssignmentQueryMixin, ListView):
template_name = 'LMS_Student/unit_assignment_detail.html'
context_object_name = 'files'
allow_empty = True
def get_queryset(self):
return AssignmentFile.objects.filter(Q(assignment=self.assignment) & Q(uploader=self.request.user.student))
class AssignmentSubmitView(StudentMixin, AssignmentQueryMixin, CreateView):
template_name = 'LMS_Student/unit_assignment_submit.html'
model = AssignmentFile
fields = ['name', 'file']
def form_valid(self, form):
# TODO: check due time
submission = form.save(commit=False)
submission.assignment = self.assignment
submission.uploader = self.request.user.student
return super().form_valid(form)
def get_success_url(self):
return reverse_lazy('lms_stu:assignment_detail',
kwargs={'unit_id': self.unit.pk, 'assignment_id': self.assignment.pk})
class TimetableView(StudentMixin, BaseTimetableView):
template_name = 'LMS_Student/timetable.html'
def get_units(self):
return self.request.user.student.enrolled_unit.all()
class PersonalInfoView(StudentMixin, TemplateView):
template_name = 'LMS_Student/personal_info.html'
| 30.944444
| 115
| 0.714363
|
7c72b64ce7ded30b034f399818d92258e246f233
| 761
|
py
|
Python
|
factory/matting_converter.py
|
vuthede/MMNet
|
b7f10770a16480c3875719a41b4d85ed74e828ee
|
[
"Apache-2.0"
] | 179
|
2018-11-23T05:46:38.000Z
|
2022-03-03T14:31:17.000Z
|
factory/matting_converter.py
|
vuthede/MMNet
|
b7f10770a16480c3875719a41b4d85ed74e828ee
|
[
"Apache-2.0"
] | 15
|
2019-05-01T11:46:48.000Z
|
2020-10-21T01:55:41.000Z
|
factory/matting_converter.py
|
vuthede/MMNet
|
b7f10770a16480c3875719a41b4d85ed74e828ee
|
[
"Apache-2.0"
] | 38
|
2018-12-24T09:50:50.000Z
|
2021-12-29T07:20:09.000Z
|
from abc import ABC
from abc import abstractmethod
import tensorflow as tf
class ConverterBase(ABC):
@classmethod
@abstractmethod
def convert(
cls,
logits: tf.Tensor,
output_name: str,
num_classes: int,
):
raise NotImplementedError(f"convert() not defined in {cls.__class__.__name__}")
class ProbConverter(ConverterBase):
@classmethod
def convert(
cls,
logits: tf.Tensor,
output_name: str,
num_classes: int,
):
assert num_classes == 2
softmax_scores = tf.contrib.layers.softmax(logits, scope="output/softmax")
# tf.identity to assign output_name
output = tf.identity(softmax_scores, name=output_name)
return output
| 23.060606
| 87
| 0.642576
|
b1fcbb382d71aff9b72a7f02a79ef8e2234d101a
| 4,199
|
py
|
Python
|
reley/impl/expr_based_ast.py
|
thautwarm/reley
|
17e5730c1afbefaeb22103719c85f08333c65937
|
[
"MIT"
] | 12
|
2018-09-13T02:32:21.000Z
|
2021-08-06T04:59:26.000Z
|
reley/impl/expr_based_ast.py
|
thautwarm/reley
|
17e5730c1afbefaeb22103719c85f08333c65937
|
[
"MIT"
] | null | null | null |
reley/impl/expr_based_ast.py
|
thautwarm/reley
|
17e5730c1afbefaeb22103719c85f08333c65937
|
[
"MIT"
] | null | null | null |
from typing import NamedTuple, List, Tuple
from Redy.Magic.Classic import record
from numpy import number
from rbnf.easy import Tokenizer
globals()['NamedTuple'] = object
class Loc:
__slots__ = ['lineno', 'colno', 'filename']
lineno: int
colno: int
filename: str
def __init__(self, lineno, colno, filename):
self.lineno = lineno
self.colno = colno
self.filename = filename
def __matmul__(self, other):
if isinstance(other, Tokenizer):
return Loc(other.lineno, other.colno, getattr(other, 'filename') or '<unknown>')
return Loc(*other.loc)
def __iter__(self):
yield self.lineno
yield self.colno
yield self.filename
def __repr__(self):
return str(self)
def __str__(self):
return 'Loc(lineno={!r}, colno={!r}, filename={!r})'.format(
self.lineno, self.colno, self.filename)
def update(self, lineno=None, colno=None, filename=None):
if lineno:
self.lineno = lineno
if colno:
self.colno = colno
if filename:
self.filename = filename
class TAST:
loc: Loc
@property
def iter_fields(self):
for it in self.__annotations__:
if not it.startswith('_') and it not in ('iter_fields', 'lineno'):
yield it, getattr(self, it)
@property
def lineno(self):
return self.loc.lineno
loc = Loc(1, 1, "")
@record
class DefTy(TAST, NamedTuple):
loc: Loc
name: str
structure: TAST
@record
class DefFun(TAST, NamedTuple):
loc: Loc
name: str
args: 'List[Arg]'
body: TAST
doc: 'Doc'
@record
class Lam(TAST, NamedTuple):
loc: Loc
name: str
args: 'List[Arg]'
body: TAST
@record
class Arg(TAST, NamedTuple):
loc: Loc
name: str
ty: TAST
@record
class Suite(TAST, NamedTuple):
loc: Loc
statements: List[TAST]
@record
class Definition(TAST, NamedTuple):
loc: Loc
statements: List[TAST]
@record
class Where(TAST, NamedTuple):
loc: Loc
out: Suite
pre_def: Definition
@record
class DefVar(TAST, NamedTuple):
loc: Loc
name: str
value: TAST
@record
class If(TAST, NamedTuple):
loc: Loc
cond: TAST
iftrue: TAST
iffalse: TAST
@record
class Call(TAST, NamedTuple):
loc: Loc
callee: TAST
arg: TAST
@record
class Symbol(TAST, NamedTuple):
loc: Loc
name: str
@record
class Number(TAST, NamedTuple):
loc: Loc
value: number
@record
class Str(TAST, NamedTuple):
loc: Loc
value: str
@record
class HList(TAST, NamedTuple):
loc: Loc
seq: List[TAST]
@record
class HDict(TAST, NamedTuple):
loc: Loc
seq: List[Tuple[TAST, TAST]]
def make_set(seq: List[TAST]):
return tuple((each, Void(each.loc)) for each in seq)
@record
class Tuple(TAST, NamedTuple):
loc: Loc
seq: Tuple[TAST, ...]
@record
class Return(TAST, NamedTuple):
loc: Loc
expr: TAST
@record
class Yield(TAST, NamedTuple):
loc: Loc
expr: TAST
@record
class BinSeq(TAST, NamedTuple):
loc: Loc
seq: List[TAST]
@record
class Infix(TAST, NamedTuple):
loc: Loc
precedence: int
op: str
@record
class Operator(TAST, NamedTuple):
loc: Loc
name: str
@record
class Void(TAST, NamedTuple):
loc: Loc
@record
class Alias(TAST, NamedTuple):
loc: Loc
imp_name: str
name: str
@record
class Doc(TAST, NamedTuple):
loc: Loc
text: str
@record
class Import(TAST, NamedTuple):
loc: Loc
imp_name: str
name: str
stuffs: List[Alias]
@record
class Module(NamedTuple):
stmts: Definition
doc: Doc
exports: List[Operator]
def transform(f):
def ff(it):
return generic_visit(f(it))
def generic_visit(ast: TAST):
def stream():
for key, value in ast.iter_fields:
if type(value) is tuple or isinstance(value, list):
yield key, list(ff(e) for e in value)
else:
yield key, ff(value)
if hasattr(ast, 'iter_fields'):
return type(ast)(**dict(stream()))
return ast
return ff
| 16.275194
| 92
| 0.607287
|
ca8441cb1bf7e728bfc9a159f47bf8d817afe35b
| 80
|
py
|
Python
|
VAE/test.py
|
PL187/idlm_Ben
|
5ba93da0d9b5d9313a9ce968e3593fefd0a05fc9
|
[
"MIT"
] | 1
|
2020-07-25T10:26:53.000Z
|
2020-07-25T10:26:53.000Z
|
VAE/test.py
|
PL187/idlm_Ben
|
5ba93da0d9b5d9313a9ce968e3593fefd0a05fc9
|
[
"MIT"
] | null | null | null |
VAE/test.py
|
PL187/idlm_Ben
|
5ba93da0d9b5d9313a9ce968e3593fefd0a05fc9
|
[
"MIT"
] | null | null | null |
import evaluate
evaluate.unpack_Xpred("data/test_Xpred_20190826_210307.csv",32)
| 26.666667
| 63
| 0.8625
|
2e5377fad129785cb5a304add9414be12a22f29f
| 10,334
|
py
|
Python
|
examples/advanced_operations/add_responsive_search_ad_with_ad_customizer.py
|
claudiapaveljlp/google-ads-python
|
c143e81804e237a9549dd5936503d921033c4e5a
|
[
"Apache-2.0"
] | null | null | null |
examples/advanced_operations/add_responsive_search_ad_with_ad_customizer.py
|
claudiapaveljlp/google-ads-python
|
c143e81804e237a9549dd5936503d921033c4e5a
|
[
"Apache-2.0"
] | null | null | null |
examples/advanced_operations/add_responsive_search_ad_with_ad_customizer.py
|
claudiapaveljlp/google-ads-python
|
c143e81804e237a9549dd5936503d921033c4e5a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds a customizer attribute.
Also links the customizer attribute to a customer, and then adds a responsive
search ad with a description using the ad customizer to the specified ad group.
Customizer attributes and ad group customizers are created for business data
customizers. For more information about responsive search ad customization see:
https://developers.google.com/google-ads/api/docs/ads/customize-responsive-search-ads?hl=en
"""
import argparse
from datetime import date, timedelta
import sys
from uuid import uuid4
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
# The name of the customizer attribute to be used in the ad customizer, which
# must be unique for a given customer account. To run this example multiple
# times, specify a unique value as a command line argument. Note that there is
# a limit for the number of enabled customizer attributes in one account, so
# you shouldn't run this example more than necessary. For more details visit:
# https://developers.google.com/google-ads/api/docs/ads/customize-responsive-search-ads#rules_and_limitations
_CUSTOMIZER_ATTRIBUTE_NAME = "Price"
def main(client, customer_id, ad_group_id, customizer_attribute_name):
"""The main method that creates all necessary entities for the example.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID.
ad_group_id: an ad group ID.
customizer_attribute_name: the name for the customizer attribute.
"""
customizer_attribute_resource_name = _create_customizer_attribute(
client, customer_id, customizer_attribute_name
)
_link_customizer_attribute_to_customer(
client, customer_id, customizer_attribute_resource_name
)
_create_responsive_search_ad_with_customization(
client, customer_id, ad_group_id, customizer_attribute_name
)
# [START add_responsive_search_ad_with_ad_customizer_1]
def _create_customizer_attribute(
client, customer_id, customizer_attribute_name
):
"""Creates a customizer attribute with the given customizer attribute name.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID.
customizer_attribute_name: the name for the customizer attribute.
Returns:
A resource name for a customizer attribute.
"""
# Creates a customizer attribute operation for creating a customizer
# attribute.
operation = client.get_type("CustomizerAttributeOperation")
# Creates a customizer attribute with the specified name.
customizer_attribute = operation.create
customizer_attribute.name = customizer_attribute_name
# Specifies the type to be 'PRICE' so that we can dynamically customize the
# part of the ad's description that is a price of a product/service we
# advertise.
customizer_attribute.type_ = client.enums.CustomizerAttributeTypeEnum.PRICE
# Issues a mutate request to add the customizer attribute and prints its
# information.
customizer_attribute_service = client.get_service(
"CustomizerAttributeService"
)
response = customizer_attribute_service.mutate_customizer_attributes(
customer_id=customer_id, operations=[operation]
)
resource_name = response.results[0].resource_name
print(f"Added a customizer attribute with resource name: '{resource_name}'")
return resource_name
# [END add_responsive_search_ad_with_ad_customizer_1]
# [START add_responsive_search_ad_with_ad_customizer_2]
def _link_customizer_attribute_to_customer(
client, customer_id, customizer_attribute_resource_name
):
"""Links the customizer attribute to the customer.
This is done by providing a value to be used in a responsive search ad
that will be created in a later step.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID.
customizer_attribute_resource_name: a resource name for customizer
attribute.
"""
# Creates a customer customizer operation.
operation = client.get_type("CustomerCustomizerOperation")
# Creates a customer customizer with the value to be used in the responsive
# search ad.
customer_customizer = operation.create
customer_customizer.customizer_attribute = (
customizer_attribute_resource_name
)
customer_customizer.value.type_ = (
client.enums.CustomizerAttributeTypeEnum.PRICE
)
# Specify '100USD' as a text value. The ad customizer will dynamically
# replace the placeholder with this value when the ad serves.
customer_customizer.value.string_value = "100USD"
customer_customizer_service = client.get_service(
"CustomerCustomizerService"
)
# Issues a mutate request to add the customer customizer and prints its
# information.
response = customer_customizer_service.mutate_customer_customizers(
customer_id=customer_id, operations=[operation]
)
resource_name = response.results[0].resource_name
print(f"Added a customer customizer with resource name: '{resource_name}'")
# [END add_responsive_search_ad_with_ad_customizer_2]
# [START add_responsive_search_ad_with_ad_customizer_3]
def _create_responsive_search_ad_with_customization(
client, customer_id, ad_group_id, customizer_attribute_resource_name
):
"""Creates a responsive search ad using the specified customizer attribute.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID.
ad_group_id: an ad group ID.
customizer_attribute_resource_name: a resource name for customizer
attribute.
"""
# Creates an ad group ad operation.
operation = client.get_type("AdGroupAdOperation")
# Creates an ad group ad.
ad_group_ad = operation.create
ad_group_service = client.get_service("AdGroupService")
ad_group_ad.ad_group = ad_group_service.ad_group_path(
customer_id, ad_group_id
)
ad_group_ad.status = client.enums.AdGroupAdStatusEnum.PAUSED
# Creates an ad and sets responsive search ad info.
ad = ad_group_ad.ad
ad.final_urls.append("http://www.example.com")
headline_1 = client.get_type("AdTextAsset")
headline_1.text = "Cruise to Mars"
headline_2 = client.get_type("AdTextAsset")
headline_2.text = "Best Space Cruise Line"
headline_3 = client.get_type("AdTextAsset")
headline_3.text = "Experience the Stars"
ad.responsive_search_ad.headlines.extend(
[headline_1, headline_2, headline_3]
)
description_1 = client.get_type("AdTextAsset")
description_1.text = "Buy your tickets now"
# Creates this particular description using the ad customizer. Visit
# https://developers.google.com/google-ads/api/docs/ads/customize-responsive-search-ads#ad_customizers_in_responsive_search_ads
# for details about the placeholder format. The ad customizer replaces the
# placeholder with the value we previously created and linked to the
# customer using CustomerCustomizer.
description_2 = client.get_type("AdTextAsset")
description_2.text = "Just {CUSTOMIZER.$customizerAttributeName:10USD}"
ad.responsive_search_ad.descriptions.extend([description_1, description_2])
ad.responsive_search_ad.path1 = "all-inclusive"
ad.responsive_search_ad.path2 = "deals"
# Issues a mutate request to add the ad group ad and prints its information.
ad_group_ad_service = client.get_service("AdGroupAdService")
response = ad_group_ad_service.mutate_ad_group_ads(
customer_id=customer_id, operations=[operation]
)
resource_name = response.results[0].resource_name
print(f"Created responsive search ad with resource name: '{resource_name}'")
# [END add_responsive_search_ad_with_ad_customizer_3]
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v9")
parser = argparse.ArgumentParser(
description=(
"Creates ad customizers and applies them to a responsive search ad."
)
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a", "--ad_group_id", type=str, required=True, help="An ad group ID.",
)
parser.add_argument(
"-n",
"--customizer_attribute_name",
type=str,
default=_CUSTOMIZER_ATTRIBUTE_NAME,
help=(
"The name of the customizer attribute to be created. The name must "
"be unique across a single client account, so be sure not to use "
"the same value more than once."
),
)
args = parser.parse_args()
try:
main(
googleads_client,
args.customer_id,
args.ad_group_id,
args.customizer_attribute_name,
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'Error with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| 39.292776
| 131
| 0.72905
|
d215f5660d06095bfa19474e13bb492e71765463
| 2,014
|
py
|
Python
|
apps/genres/tests/__init__.py
|
GiannisClipper/payments
|
94e08144597b3f4cd0de8485edf3f5535aeb9da6
|
[
"MIT"
] | null | null | null |
apps/genres/tests/__init__.py
|
GiannisClipper/payments
|
94e08144597b3f4cd0de8485edf3f5535aeb9da6
|
[
"MIT"
] | null | null | null |
apps/genres/tests/__init__.py
|
GiannisClipper/payments
|
94e08144597b3f4cd0de8485edf3f5535aeb9da6
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
import copy
from django.contrib.auth import get_user_model
from funds.models import Fund
from genres.models import Genre
from users.tests import UserCreateMethods
from funds.tests import FundCreateMethods
from users.tests import USER_SAMPLES, ADMIN_SAMPLES
from funds.tests import FUND_SAMPLES
GENRE_SAMPLES = {
# First key digit is equal to user id
11: {'user': {'id': 1}, 'fund': {'key': 11}, 'code': '1', 'name': 'INCOME',
'is_income': True}, # noqa: E127
12: {'user': {'id': 1}, 'fund': {'key': 11}, 'code': '2', 'name': 'EXPENSES',
'is_income': False}, # noqa: E127
21: {'user': {'id': 2}, 'fund': {'key': 21}, 'code': 'ES', 'name': 'ESODA',
'is_income': True}, # noqa: E127
22: {'user': {'id': 2}, 'fund': {'key': 21}, 'code': 'EX', 'name': 'EXODA',
'is_income': False}, # noqa: E127
}
class GenreCreateMethods:
def create_genre(self, **genre):
genre['user'] = get_user_model().objects.get(pk=genre['user']['id'])
genre['fund'] = Fund.objects.get(pk=genre['fund']['id'])
return Genre.objects.create(**genre)
def create_genres(self, samples):
for sample in samples.values():
genre = self.create_genre(**sample)
sample['id'] = genre.pk
class GenresTests(TestCase, GenreCreateMethods, UserCreateMethods, FundCreateMethods):
def setUp(self):
self.samples = {
'users': copy.deepcopy(USER_SAMPLES),
'admins': copy.deepcopy(ADMIN_SAMPLES),
'funds': copy.deepcopy(FUND_SAMPLES),
'genres': copy.deepcopy(GENRE_SAMPLES),
}
self.create_users(self.samples['users'])
self.create_admins(self.samples['admins'])
self.create_funds(self.samples['funds'])
for sample in self.samples['genres'].values():
key = sample['fund']['key']
sample['fund']['id'] = self.samples['funds'][key]['id']
sample['fund'].pop('key', None)
| 31.968254
| 86
| 0.602781
|
695a76be52944eba725cee11ac06597684824c5e
| 3,376
|
py
|
Python
|
dl_nn_mini.py
|
stevenwasik/dl
|
92bd6cff5619f8153f3675dd92d7d2969f2fe31b
|
[
"MIT"
] | null | null | null |
dl_nn_mini.py
|
stevenwasik/dl
|
92bd6cff5619f8153f3675dd92d7d2969f2fe31b
|
[
"MIT"
] | null | null | null |
dl_nn_mini.py
|
stevenwasik/dl
|
92bd6cff5619f8153f3675dd92d7d2969f2fe31b
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
import dl_helper_functions as hf
def nn_mini(target, predictors, classes, hidden_layer_nodes, hidden_layers, predictor_count, eps=0.001, epochs=100,
minibatch_size=50):
network_struc = [predictor_count] + [hidden_layer_nodes] * hidden_layers + [classes]
num_obs = len(target)
cost = 0
# Initialize
layer_dat = [[0, 0, 0, predictors]]
for i in range(1, len(network_struc)):
node_count_prior_layer = network_struc[i - 1]
node_count_current_layer = network_struc[i]
b = np.reshape(np.random.randn(node_count_current_layer), (1, node_count_current_layer))
w = np.reshape(np.random.randn(node_count_prior_layer * node_count_current_layer),
(node_count_prior_layer, node_count_current_layer))
z = np.zeros((num_obs, node_count_current_layer))
a = np.zeros((num_obs, node_count_current_layer))
layer_dat.append([b, w, z, a])
for i in range(epochs):
# Forward Step
for m in range(0, num_obs, minibatch_size):
if m + minibatch_size > num_obs:
minibatch_size = num_obs - m
for k in range(1, len(layer_dat)):
layer_dat[k][2][m:m + minibatch_size, :] = np.matmul(layer_dat[k - 1][3][m:m + minibatch_size, :],
layer_dat[k][1]) + layer_dat[k][0]
if k < len(layer_dat):
layer_dat[k][3][m:m + minibatch_size, :] = hf.relu(layer_dat[k][2][m:m + minibatch_size, :])
else:
layer_dat[k][3][m:m + minibatch_size, :] = hf.softmax(layer_dat[k][2][m:m + minibatch_size, :])
# Back Step
mini_target = target[m:m + minibatch_size, :]
mini_layer = [layer_dat[-1][0], layer_dat[-1][1],
layer_dat[-1][2][m:m + minibatch_size, :], layer_dat[-1][3][m:m + minibatch_size, :]]
mini_layer_p_a = layer_dat[-2][3][m:m + minibatch_size, :]
cost = np.mean((mini_target - mini_layer[3]) ** 2)
del_l = -(mini_target - mini_layer[3]) * hf.softmax_del(mini_layer[2], mini_target)
del_l_b = np.sum(del_l, keepdims=True, axis=0) / minibatch_size
del_l_w = np.matmul(np.transpose(mini_layer_p_a), del_l) / minibatch_size
layer_dat[-1][0] = mini_layer[0] - eps * del_l_b
layer_dat[-1][1] = mini_layer[1] - eps * del_l_w
for j in range(2, len(layer_dat)):
mini_layer = [layer_dat[-j][0], layer_dat[-j][1],
layer_dat[-j][2][m:m + minibatch_size, :], layer_dat[-j][3][m:m + minibatch_size, :]]
mini_layer_p_a = layer_dat[-j - 1][3][m:m + minibatch_size, :]
mini_layer_n_w = layer_dat[-j + 1][1]
del_l = np.matmul(del_l, np.transpose(mini_layer_n_w)) * hf.relu_del(mini_layer[2])
del_l_b = np.sum(del_l, keepdims=True, axis=0) / minibatch_size
del_l_w = np.matmul(np.transpose(mini_layer_p_a), del_l) / minibatch_size
layer_dat[-j][0] = mini_layer[0] - eps * del_l_b
layer_dat[-j][1] = mini_layer[1] - eps * del_l_w
if i % int(epochs / 10) == 0:
print('iteration ', i, ' cost: ', cost)
return cost, layer_dat
| 49.647059
| 115
| 0.573164
|
f7bda1308e8a50757694c2893fe6e4cf0a2073a2
| 3,918
|
py
|
Python
|
podcatcher.py
|
quandram/podcatcher
|
b1d14b10b3e1afd1947e09ddf2006dac37c6fae7
|
[
"MIT"
] | null | null | null |
podcatcher.py
|
quandram/podcatcher
|
b1d14b10b3e1afd1947e09ddf2006dac37c6fae7
|
[
"MIT"
] | null | null | null |
podcatcher.py
|
quandram/podcatcher
|
b1d14b10b3e1afd1947e09ddf2006dac37c6fae7
|
[
"MIT"
] | null | null | null |
import feedparser
import os
import requests
from datetime import datetime
from dateutil.parser import parse
from dateutil import tz
from pytz import timezone
from sanitize_filename import sanitize
import configKeys
class podcatcher:
def __init__(self, podConfig, configSection, configData):
self.podCatcherConfig = podConfig
self.configSection = configSection
self.config = configData
try:
self.maxEpisodesToDownload = int(self.podCatcherConfig[configKeys.MAX_EPISODES_TO_DOWNLOAD])
except:
self.maxEpisodesToDownload = 0
if not os.path.exists(os.path.join(self.podCatcherConfig[configKeys.OUTPUT])):
try:
os.mkdir(os.path.join(self.podCatcherConfig[configKeys.OUTPUT]))
except OSError as e:
print ("Creation of the directory %s failed" % self.podCatcherConfig[configKeys.OUTPUT], e.data)
return
else:
print ("Successfully created the directory %s " % self.podCatcherConfig[configKeys.OUTPUT])
if not os.path.exists(os.path.join(self.podCatcherConfig[configKeys.OUTPUT], self.configSection)):
try:
os.mkdir(os.path.join(self.podCatcherConfig[configKeys.OUTPUT], self.configSection))
except OSError as e:
print ("Creation of the directory %s failed" % self.configSection, e.data)
else:
print ("Successfully created the directory %s " % self.configSection)
PACIFIC = tz.gettz("America/Los_Angeles")
self.timezone_info = {"PST": PACIFIC, "PDT": PACIFIC}
def get_new_pods(self):
feed = feedparser.parse(self.config[configKeys.FEED])
lastProcessed = self.get_config_last_processed_date()
podLastDownloaded = lastProcessed
print ("Downloading %s: " % self.configSection, end = '')
podsDownloaded = 0
for pod in reversed(feed.entries):
podPublishedOn = self.get_utc_date(pod.published)
if podPublishedOn > lastProcessed and (self.maxEpisodesToDownload == 0 or podsDownloaded < self.maxEpisodesToDownload):
print (".", end = '')
try:
req = requests.get(pod.links[1]["href"], allow_redirects=True, timeout=(3.05, 27))
open(os.path.join(self.podCatcherConfig[configKeys.OUTPUT], self.configSection, self.get_pod_file_name(pod)),
"wb").write(req.content)
podLastDownloaded = podPublishedOn
podsDownloaded += 1
if self.maxEpisodesToDownload > 0 and podsDownloaded == self.maxEpisodesToDownload:
break
except requests.exceptions.ConnectionError:
print ("\nError: Request timedout: %s" % pod.title)
break
except Exception as e:
print ("\nError: catching pod: %s" % pod.title)
print(type(e))
print(e)
break
print (" | %d episodes downloaded\n" % podsDownloaded )
return podLastDownloaded
def get_pod_file_name(self, pod):
podPublishedOn = self.get_utc_date(pod.published)
podExtension = self.get_pod_file_extension(pod)
if "?" in podExtension:
podExtension = podExtension.rpartition("?")[0]
return sanitize(podPublishedOn.strftime("%Y-%m-%dT%H-%M-%SZ") + "_" + self.configSection + "_" + pod.title + "." + podExtension)
def get_pod_file_extension(self, pod):
return pod.links[1]["href"].rpartition(".")[-1]
def get_utc_date(self, date):
return parse(date, tzinfos=self.timezone_info).astimezone(timezone('UTC'))
def get_config_last_processed_date(self):
return self.get_utc_date(self.config[configKeys.LAST_DOWNLOADED_DATE])
| 43.054945
| 136
| 0.624043
|
8441c6433211e70e75343c45786e0382d75a2eea
| 3,769
|
py
|
Python
|
elimika_backend/users/models.py
|
bawabu/elimika_backend
|
d7a3d59454b7bebd09e6950a37532ff86848fb0b
|
[
"MIT"
] | null | null | null |
elimika_backend/users/models.py
|
bawabu/elimika_backend
|
d7a3d59454b7bebd09e6950a37532ff86848fb0b
|
[
"MIT"
] | null | null | null |
elimika_backend/users/models.py
|
bawabu/elimika_backend
|
d7a3d59454b7bebd09e6950a37532ff86848fb0b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import uuid
import math
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
GENDER_CHOICES = (
('boy', 'Boy'),
('girl', 'Girl'),
)
@python_2_unicode_compatible
class User(AbstractUser):
is_tutor = models.BooleanField(default=False)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
class Learner(models.Model):
"""Hold fields for a learner."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
username = models.CharField(max_length=255, unique=True)
age = models.PositiveSmallIntegerField(blank=True, null=True)
gender = models.CharField(choices=GENDER_CHOICES, max_length=50)
joined = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""String representation."""
return self.name
@property
def performance(self):
"""Performance in percentage per category."""
t_right = self.learner_answers.filter(
question__category__category_name='teeth',
choice__is_right=True).count()
t_total = self.learner_answers.filter(
question__category__category_name='teeth').count()
try:
t_percent = math.ceil(t_right / t_total * 100)
except ZeroDivisionError:
t_percent = 0
tt_right = self.learner_answers.filter(
question__category__category_name='teeth_types',
choice__is_right=True).count()
tt_total = self.learner_answers.filter(
question__category__category_name='teeth_types').count()
try:
tt_percent = math.ceil(tt_right / tt_total * 100)
except ZeroDivisionError:
tt_percent = 0
ts_right = self.learner_answers.filter(
question__category__category_name='teeth_sets',
choice__is_right=True).count()
ts_total = self.learner_answers.filter(
question__category__category_name='teeth_sets').count()
try:
ts_percent = math.ceil(ts_right / ts_total * 100)
except ZeroDivisionError:
ts_percent = 0
return [t_percent, tt_percent, ts_percent]
@property
def total_questions(self):
"""Return total questions answered per category"""
from elimika_backend.questions.models import Question
t_answeredQ = self.learner_answers.filter(
question__category__category_name='teeth').order_by(
'question__id').distinct('question__id').count()
t_total = Question.objects.filter(category__category_name='teeth').count()
tt_answeredQ = self.learner_answers.filter(
question__category__category_name='teeth_types').order_by(
'question__id').distinct('question__id').count()
tt_total = Question.objects.filter(
category__category_name='teeth_types').count()
ts_answeredQ = self.learner_answers.filter(
question__category__category_name='teeth_sets').order_by(
'question__id').distinct('question__id').count()
ts_total = Question.objects.filter(
category__category_name='teeth_sets').count()
return [
{ 'answered': t_answeredQ, 'total': t_total },
{ 'answered': tt_answeredQ, 'total': tt_total },
{ 'answered': ts_answeredQ, 'total': ts_total }
]
| 34.577982
| 82
| 0.669674
|
4de5e1ee206f18f9ce856d6327800a12221ce0b8
| 2,175
|
py
|
Python
|
crazyflie_t/crazyflie_state_estimate_t.py
|
joaoolavo/crazyflie-kinect-control
|
c77134eb3ec654d9c61830c0a1bd20732c1e596a
|
[
"MIT"
] | 73
|
2015-03-11T05:32:13.000Z
|
2022-03-25T23:30:17.000Z
|
crazyflie_t/crazyflie_state_estimate_t.py
|
joaoolavo/crazyflie-kinect-control
|
c77134eb3ec654d9c61830c0a1bd20732c1e596a
|
[
"MIT"
] | 18
|
2015-01-19T20:23:36.000Z
|
2017-02-14T21:03:07.000Z
|
crazyflie_t/crazyflie_state_estimate_t.py
|
joaoolavo/crazyflie-kinect-control
|
c77134eb3ec654d9c61830c0a1bd20732c1e596a
|
[
"MIT"
] | 49
|
2015-06-19T03:16:10.000Z
|
2021-01-13T08:56:02.000Z
|
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class crazyflie_state_estimate_t(object):
__slots__ = ["timestamp", "xhat", "t"]
def __init__(self):
self.timestamp = 0
self.xhat = [ 0.0 for dim0 in range(12) ]
self.t = 0.0
def encode(self):
buf = BytesIO()
buf.write(crazyflie_state_estimate_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">q", self.timestamp))
buf.write(struct.pack('>12d', *self.xhat[:12]))
buf.write(struct.pack(">d", self.t))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != crazyflie_state_estimate_t._get_packed_fingerprint():
raise ValueError("Decode error")
return crazyflie_state_estimate_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = crazyflie_state_estimate_t()
self.timestamp = struct.unpack(">q", buf.read(8))[0]
self.xhat = struct.unpack('>12d', buf.read(96))
self.t = struct.unpack(">d", buf.read(8))[0]
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if crazyflie_state_estimate_t in parents: return 0
tmphash = (0xf28782fb9b176eb0) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if crazyflie_state_estimate_t._packed_fingerprint is None:
crazyflie_state_estimate_t._packed_fingerprint = struct.pack(">Q", crazyflie_state_estimate_t._get_hash_recursive([]))
return crazyflie_state_estimate_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
| 33.984375
| 130
| 0.668046
|
b860a7db3c615f9c2313314f395388db18df959f
| 17,528
|
py
|
Python
|
pylangacq/tests/test_chat.py
|
mitjanikolaus/pylangacq
|
88c327ee5b41af4a7c1a59f1813d52bf49bf3bed
|
[
"MIT"
] | null | null | null |
pylangacq/tests/test_chat.py
|
mitjanikolaus/pylangacq
|
88c327ee5b41af4a7c1a59f1813d52bf49bf3bed
|
[
"MIT"
] | null | null | null |
pylangacq/tests/test_chat.py
|
mitjanikolaus/pylangacq
|
88c327ee5b41af4a7c1a59f1813d52bf49bf3bed
|
[
"MIT"
] | null | null | null |
import copy
import datetime
import filecmp
import functools
import os
import tempfile
import unittest
from unittest import mock
import pytest
from pylangacq.chat import _clean_word, Reader, cached_data_info, remove_cached_data
from pylangacq.objects import Gra, Utterance, Token
from pylangacq.tests.test_data import (
LOCAL_EVE_PATH,
REMOTE_BROWN_URL,
REMOTE_EVE_DIR,
REMOTE_EVE_FILE_PATH,
download_and_extract_brown,
)
download_and_extract_brown()
_EXPECTED_EVE_UTTERANCES = [
Utterance(
participant="CHI",
tokens=[
Token(
word="more",
pos="qn",
mor="more",
gra=Gra(dep=1, head=2, rel="QUANT"),
),
Token(
word="cookie",
pos="n",
mor="cookie",
gra=Gra(dep=2, head=0, rel="INCROOT"),
),
Token(word=".", pos=".", mor="", gra=Gra(dep=3, head=2, rel="PUNCT")),
],
time_marks=None,
tiers={
"CHI": "more cookie . [+ IMP]",
"%mor": "qn|more n|cookie .",
"%gra": "1|2|QUANT 2|0|INCROOT 3|2|PUNCT",
"%int": "distinctive , loud",
},
),
Utterance(
participant="MOT",
tokens=[
Token(
word="you",
pos="pro:per",
mor="you",
gra=Gra(dep=1, head=2, rel="SUBJ"),
),
Token(
word="0v",
pos="0v",
mor="v",
gra=Gra(dep=2, head=0, rel="ROOT"),
),
Token(
word="more",
pos="qn",
mor="more",
gra=Gra(dep=3, head=4, rel="QUANT"),
),
Token(
word="cookies",
pos="n",
mor="cookie-PL",
gra=Gra(dep=4, head=2, rel="OBJ"),
),
Token(word="?", pos="?", mor="", gra=Gra(dep=5, head=2, rel="PUNCT")),
],
time_marks=None,
tiers={
"MOT": "you 0v more cookies ?",
"%mor": "pro:per|you 0v|v qn|more n|cookie-PL ?",
"%gra": "1|2|SUBJ 2|0|ROOT 3|4|QUANT 4|2|OBJ 5|2|PUNCT",
},
),
]
class BaseTestCHATReader:
"""A base test class that collects all tests for a CHAT reader class.
The intention is to allow running the same set of tests using either
``pylangacq.Reader`` or its subclass.
The reader class being used is set by overriding the ``reader_class`` attribute.
"""
# Must be set by a subclass.
reader_class = None
@property
@functools.lru_cache(maxsize=1)
def eve_local(self):
return self.reader_class.from_files([LOCAL_EVE_PATH])
@property
@functools.lru_cache(maxsize=1)
def eve_remote(self):
return self.reader_class.from_files([REMOTE_EVE_FILE_PATH])
def test_use_cached(self):
remove_cached_data()
assert len(cached_data_info()) == 0
self.reader_class.from_zip(REMOTE_BROWN_URL, match="Eve")
assert len(cached_data_info()) == 1
assert REMOTE_BROWN_URL in cached_data_info()
# Use a mock session to block internet access.
# The `from_zip` shouldn't crash and shouldn't use the session object anyway,
# because it should use the cached data.
mock_session = mock.Mock()
self.reader_class.from_zip(REMOTE_BROWN_URL, match="Eve", session=mock_session)
assert len(cached_data_info()) == 1
assert REMOTE_BROWN_URL in cached_data_info()
mock_session.get.assert_not_called()
remove_cached_data()
assert len(cached_data_info()) == 0
def test_from_strs_same_as_from_files(self):
with open(LOCAL_EVE_PATH, encoding="utf-8") as f:
from_strs = self.reader_class.from_strs([f.read()])
file_from_strs = from_strs._files[0]
file_from_files = self.eve_local._files[0]
assert file_from_strs.utterances == file_from_files.utterances
assert file_from_strs.header == file_from_files.header
def test_from_dir(self):
r = self.reader_class.from_dir(REMOTE_EVE_DIR)
assert r.n_files() == 20
def test_to_strs(self):
expected = (
"@Languages:\teng , yue\n"
"@Participants:\tFOO Foo P1 , BAR Bar P2\n"
"@ID:\teng|Foobar|FOO||female|||P1|||\n"
"@ID:\teng|Foobar|BAR||male|||P2|||\n"
"@Date:\t03-NOV-2016\n"
"@Comment:\tThis is a comment.\n"
"*FOO:\thow are you ?\n"
"*BAR:\tfine , thank you ."
)
reader = self.reader_class.from_strs([expected])
actual = list(reader.to_strs())[0]
assert actual.strip() == expected.strip()
def test_to_chat_is_dir_true(self):
expected = (
"@Languages:\teng , yue\n"
"@Participants:\tFOO Foo P1 , BAR Bar P2\n"
"@ID:\teng|Foobar|FOO||female|||P1|||\n"
"@ID:\teng|Foobar|BAR||male|||P2|||\n"
"@Date:\t03-NOV-2016\n"
"@Comment:\tThis is a comment.\n"
"*FOO:\thow are you ?\n"
"*BAR:\tfine , thank you .\n"
)
reader = self.reader_class.from_strs([expected])
with tempfile.TemporaryDirectory() as temp_dir:
reader.to_chat(temp_dir, is_dir=True)
assert os.listdir(temp_dir) == ["0001.cha"]
with open(os.path.join(temp_dir, "0001.cha"), encoding="utf-8") as f:
assert f.read() == expected
def test_to_chat_is_dir_false(self):
expected = (
"@Languages:\teng , yue\n"
"@Participants:\tFOO Foo P1 , BAR Bar P2\n"
"@ID:\teng|Foobar|FOO||female|||P1|||\n"
"@ID:\teng|Foobar|BAR||male|||P2|||\n"
"@Date:\t03-NOV-2016\n"
"@Comment:\tThis is a comment.\n"
"*FOO:\thow are you ?\n"
"*BAR:\tfine , thank you .\n"
)
reader = self.reader_class.from_strs([expected])
with tempfile.TemporaryDirectory() as temp_dir:
basename = "data.cha"
file_path = os.path.join(temp_dir, basename)
reader.to_chat(file_path)
assert os.listdir(temp_dir) == [basename]
with open(file_path, encoding="utf-8") as f:
assert f.read() == expected
def test_round_trip_to_strs_and_from_strs_for_tabular_true(self):
original = self.eve_local
new = self.reader_class.from_strs([list(original.to_strs(tabular=True))[0]])
assert original.n_files() == new.n_files() == 1
assert (
# Utterance count
sum(len(f.utterances) for f in original._files)
== sum(len(f.utterances) for f in new._files)
== 1588
)
assert (
# Word count
sum(len(u.tokens) for f in original._files for u in f.utterances)
== sum(len(u.tokens) for f in new._files for u in f.utterances)
== 6101
)
def test_clear(self):
eve_copy = copy.deepcopy(self.eve_local)
eve_copy.clear()
assert eve_copy.n_files() == 0
def test_add(self):
reader1 = self.reader_class.from_strs(["*X: foo"])
reader2 = self.reader_class.from_strs(["*X: bar"])
reader3 = self.reader_class.from_strs(["*X: baz"])
assert list((reader1 + reader2).to_strs()) == ["*X:\tfoo\n", "*X:\tbar\n"]
reader2 += reader3
assert list(reader2.to_strs()) == ["*X:\tbar\n", "*X:\tbaz\n"]
def test_append_and_append_left(self):
eve_copy = copy.deepcopy(self.eve_local)
eve_copy.append(self.eve_remote)
assert eve_copy.file_paths() == [LOCAL_EVE_PATH, REMOTE_EVE_FILE_PATH]
eve_copy.append_left(self.eve_remote)
assert eve_copy.file_paths() == [
REMOTE_EVE_FILE_PATH,
LOCAL_EVE_PATH,
REMOTE_EVE_FILE_PATH,
]
def test_extend_and_extend_left(self):
eve_copy = copy.deepcopy(self.eve_local)
eve_copy.extend([self.eve_remote])
assert eve_copy.file_paths() == [LOCAL_EVE_PATH, REMOTE_EVE_FILE_PATH]
eve_copy.extend_left([self.eve_remote])
assert eve_copy.file_paths() == [
REMOTE_EVE_FILE_PATH,
LOCAL_EVE_PATH,
REMOTE_EVE_FILE_PATH,
]
def test_pop_and_pop_left(self):
eve = self.reader_class.from_dir(REMOTE_EVE_DIR)
eve_path_last = eve.file_paths()[-1]
eve_path_first = eve.file_paths()[0]
eve_last = eve.pop()
assert eve_last.file_paths() == [eve_path_last]
assert eve.file_paths()[-1] != eve_path_last
eve_first = eve.pop_left()
assert eve_first.file_paths() == [eve_path_first]
assert eve.file_paths()[0] != eve_path_first
@pytest.mark.skipif(os.name == "nt", reason="Windows OS sep is backslash instead")
def test_filter(self):
# Just two paths for each child in the American English Brown corpus.
eve_paths = {"Brown/Eve/010600a.cha", "Brown/Eve/010600b.cha"}
sarah_paths = {"Brown/Sarah/020305.cha", "Brown/Sarah/020307.cha"}
adam_paths = {"Brown/Adam/020304.cha", "Brown/Adam/020318.cha"}
adam_and_eve = self.reader_class.from_zip(REMOTE_BROWN_URL, exclude="Sarah")
assert eve_paths.issubset(set(adam_and_eve.file_paths()))
assert not sarah_paths.issubset(set(adam_and_eve.file_paths()))
assert adam_paths.issubset(set(adam_and_eve.file_paths()))
adam = adam_and_eve.filter(exclude="Eve")
assert not eve_paths.issubset(set(adam.file_paths()))
assert adam_paths.issubset(set(adam.file_paths()))
eve = adam_and_eve.filter(match="Eve")
assert eve_paths.issubset(set(eve.file_paths()))
assert not adam_paths.issubset(set(eve.file_paths()))
def test_utterances(self):
assert self.eve_local.utterances()[:2] == _EXPECTED_EVE_UTTERANCES
def test_headers(self):
assert self.eve_local.headers() == [
{
"Date": {datetime.date(1962, 10, 15), datetime.date(1962, 10, 17)},
"Participants": {
"CHI": {
"name": "Eve",
"language": "eng",
"corpus": "Brown",
"age": "1;06.00",
"sex": "female",
"group": "",
"ses": "",
"role": "Target_Child",
"education": "",
"custom": "",
},
"MOT": {
"name": "Sue",
"language": "eng",
"corpus": "Brown",
"age": "",
"sex": "female",
"group": "",
"ses": "",
"role": "Mother",
"education": "",
"custom": "",
},
"COL": {
"name": "Colin",
"language": "eng",
"corpus": "Brown",
"age": "",
"sex": "",
"group": "",
"ses": "",
"role": "Investigator",
"education": "",
"custom": "",
},
"RIC": {
"name": "Richard",
"language": "eng",
"corpus": "Brown",
"age": "",
"sex": "",
"group": "",
"ses": "",
"role": "Investigator",
"education": "",
"custom": "",
},
},
"UTF8": "",
"PID": "11312/c-00034743-1",
"Languages": ["eng"],
"Time Duration": "11:30-12:00",
"Types": "long, toyplay, TD",
"Tape Location": "850",
}
]
def test_headers_more_lenient_parsing(self):
header1 = "@UTF8\n@Foo:\tone two\n@Foo Bar:\thello how are you"
header2 = "@UTF8\n@Foo: one two\n@Foo Bar: hello how are you"
reader1 = self.reader_class.from_strs([header1])
reader2 = self.reader_class.from_strs([header2])
expected = {"UTF8": "", "Foo": "one two", "Foo Bar": "hello how are you"}
assert reader1.headers()[0] == reader2.headers()[0] == expected
def test_n_files(self):
assert self.eve_local.n_files() == 1
def test_participants(self):
assert self.eve_local.participants() == {"CHI", "MOT", "COL", "RIC"}
def test_languages(self):
assert self.eve_local.languages() == {"eng"}
def test_dates_of_recording(self):
assert self.eve_local.dates_of_recording() == {
datetime.date(1962, 10, 15),
datetime.date(1962, 10, 17),
}
def test_ages(self):
assert self.eve_local.ages() == [(1, 6, 0)]
assert self.eve_local.ages(months=True) == [18.0]
def test_tokens_by_utterances(self):
assert self.eve_local.tokens(by_utterances=True)[0] == [
Token(
word="more", pos="qn", mor="more", gra=Gra(dep=1, head=2, rel="QUANT")
),
Token(
word="cookie",
pos="n",
mor="cookie",
gra=Gra(dep=2, head=0, rel="INCROOT"),
),
Token(word=".", pos=".", mor="", gra=Gra(dep=3, head=2, rel="PUNCT")),
]
def test_tokens(self):
assert self.eve_local.tokens()[:5] == [
Token(
word="more", pos="qn", mor="more", gra=Gra(dep=1, head=2, rel="QUANT")
),
Token(
word="cookie",
pos="n",
mor="cookie",
gra=Gra(dep=2, head=0, rel="INCROOT"),
),
Token(word=".", pos=".", mor="", gra=Gra(dep=3, head=2, rel="PUNCT")),
Token(
word="you",
pos="pro:per",
mor="you",
gra=Gra(dep=1, head=2, rel="SUBJ"),
),
Token(word="0v", pos="0v", mor="v", gra=Gra(dep=2, head=0, rel="ROOT")),
]
def test_words_by_utterances(self):
assert self.eve_local.words(by_utterances=True)[:2] == [
["more", "cookie", "."],
["you", "0v", "more", "cookies", "?"],
]
def test_words(self):
assert self.eve_local.words()[:5] == ["more", "cookie", ".", "you", "0v"]
def test_mlum(self):
assert pytest.approx(self.eve_local.mlum(), abs=0.1) == [2.267022696929239]
def test_mlu(self):
assert pytest.approx(self.eve_local.mlu(), abs=0.1) == [2.267022696929239]
def test_mluw(self):
assert pytest.approx(self.eve_local.mluw(), abs=0.1) == [1.4459279038718291]
def test_ttr(self):
assert pytest.approx(self.eve_local.ttr(), abs=0.01) == [0.17543859649122806]
def test_ipsyn(self):
assert self.eve_local.ipsyn() == [29]
def test_word_ngrams(self):
assert self.eve_local.word_ngrams(1).most_common(5) == [
((".",), 1121),
(("?",), 455),
(("you",), 197),
(("that",), 151),
(("the",), 132),
]
assert self.eve_local.word_ngrams(2).most_common(5) == [
(("that", "?"), 101),
(("it", "."), 63),
(("what", "?"), 54),
(("yes", "‡"), 45),
(("it", "?"), 39),
]
def test_word_frequency(self):
assert self.eve_local.word_frequencies().most_common(5) == [
(".", 1121),
("?", 455),
("you", 197),
("that", 151),
("the", 132),
]
def test_file_from_empty_string(self):
# pytest.mark.parameterize didn't work for a class method?
for empty_input in ("", None):
reader = self.reader_class.from_strs([empty_input])
file_ = reader._files[0]
assert file_.header == {}
assert file_.utterances == []
assert reader.headers() == [{}]
assert reader.ages() == [None]
assert reader.dates_of_recording() == set()
assert reader.languages() == set()
assert reader.participants() == set()
class TestPylangacqReader(BaseTestCHATReader, unittest.TestCase):
"""Run the reader tests using ``pylangacq.Reader``."""
reader_class = Reader
@pytest.mark.skipif(
os.name == "nt",
reason="Not sure? We're good so long as this test passes on Linux and MacOS",
)
def test_if_childes_has_updated_data():
assert filecmp.cmp(LOCAL_EVE_PATH, REMOTE_EVE_FILE_PATH)
@pytest.mark.parametrize(
"original, expected",
[
("foo", "foo"),
("&foo", "foo"),
("foo@bar", "foo"),
("foo(", "foo"),
("foo)", "foo"),
("foo:", "foo"),
("foo;", "foo"),
("foo+", "foo"),
],
)
def test__clean_word(original, expected):
assert _clean_word(original) == expected
| 34.777778
| 87
| 0.510954
|
1378df557f0266eb7b0fb0907adb69e72d2b064a
| 7,796
|
py
|
Python
|
tests/test_data.py
|
xhochy/rejected
|
610a3e1401122ecb98d891b6795cca0255e5b044
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_data.py
|
xhochy/rejected
|
610a3e1401122ecb98d891b6795cca0255e5b044
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_data.py
|
xhochy/rejected
|
610a3e1401122ecb98d891b6795cca0255e5b044
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests for rejected.data"""
import random
import time
import unittest
import uuid
from rejected import data
from . import mocks
class TestProperties(unittest.TestCase):
def setUp(self):
self.properties = data.Properties(mocks.PROPERTIES)
def test_app_id(self):
self.assertEqual(self.properties.app_id, mocks.PROPERTIES.app_id)
def test_content_encoding(self):
self.assertEqual(self.properties.content_encoding,
mocks.PROPERTIES.content_encoding)
def test_content_type(self):
self.assertEqual(self.properties.content_type,
mocks.PROPERTIES.content_type)
def test_correlation_id(self):
self.assertEqual(self.properties.correlation_id,
mocks.PROPERTIES.correlation_id)
def test_delivery_mode(self):
self.assertEqual(self.properties.delivery_mode,
mocks.PROPERTIES.delivery_mode)
def test_expiration(self):
self.assertEqual(self.properties.expiration,
mocks.PROPERTIES.expiration)
def test_message_id(self):
self.assertEqual(self.properties.message_id,
mocks.PROPERTIES.message_id)
def test_priority(self):
self.assertEqual(self.properties.priority, mocks.PROPERTIES.priority)
def test_reply_to(self):
self.assertEqual(self.properties.reply_to, mocks.PROPERTIES.reply_to)
def test_timestamp(self):
self.assertEqual(self.properties.timestamp, mocks.PROPERTIES.timestamp)
def test_type(self):
self.assertEqual(self.properties.type, mocks.PROPERTIES.type)
def test_user_id(self):
self.assertEqual(self.properties.user_id, mocks.PROPERTIES.user_id)
class TestPartialProperties(unittest.TestCase):
def setUp(self):
self.properties = data.Properties(
content_type='application/json', priority=2)
def test_app_id(self):
self.assertIsNone(self.properties.app_id)
def test_content_encoding(self):
self.assertIsNone(self.properties.content_encoding)
def test_content_type(self):
self.assertEqual(self.properties.content_type, 'application/json')
def test_correlation_id(self):
self.assertIsNone(self.properties.correlation_id)
def test_delivery_mode(self):
self.assertIsNone(self.properties.delivery_mode)
def test_expiration(self):
self.assertIsNone(self.properties.expiration)
def test_message_id(self):
self.assertIsNone(self.properties.message_id)
def test_priority(self):
self.assertEqual(self.properties.priority, 2)
def test_reply_to(self):
self.assertIsNone(self.properties.app_id)
def test_timestamp(self):
self.assertIsNone(self.properties.timestamp)
def test_type(self):
self.assertIsNone(self.properties.type)
def test_user_id(self):
self.assertIsNone(self.properties.user_id)
class TestMessage(unittest.TestCase):
def setUp(self):
self.message = data.Message('mock', mocks.CHANNEL, mocks.METHOD,
mocks.PROPERTIES, mocks.BODY)
def test_body(self):
self.assertEqual(self.message.body, mocks.BODY)
def test_channel(self):
self.assertEqual(self.message.channel, mocks.CHANNEL)
def test_consumer_tag(self):
self.assertEqual(self.message.consumer_tag, mocks.METHOD.consumer_tag)
def test_delivery_tag(self):
self.assertEqual(self.message.delivery_tag, mocks.METHOD.delivery_tag)
def test_exchange(self):
self.assertEqual(self.message.exchange, mocks.METHOD.exchange)
def test_method(self):
self.assertEqual(self.message.method, mocks.METHOD)
def test_redelivered(self):
self.assertEqual(self.message.redelivered, mocks.METHOD.redelivered)
def test_routing_key(self):
self.assertEqual(self.message.routing_key, mocks.METHOD.routing_key)
def test_app_id(self):
self.assertEqual(self.message.properties.app_id,
mocks.PROPERTIES.app_id)
def test_content_encoding(self):
self.assertEqual(self.message.properties.content_encoding,
mocks.PROPERTIES.content_encoding)
def test_content_type(self):
self.assertEqual(self.message.properties.content_type,
mocks.PROPERTIES.content_type)
def test_correlation_id(self):
self.assertEqual(self.message.properties.correlation_id,
mocks.PROPERTIES.correlation_id)
def test_delivery_mode(self):
self.assertEqual(self.message.properties.delivery_mode,
mocks.PROPERTIES.delivery_mode)
def test_expiration(self):
self.assertEqual(self.message.properties.expiration,
mocks.PROPERTIES.expiration)
def test_message_id(self):
self.assertEqual(self.message.properties.message_id,
mocks.PROPERTIES.message_id)
def test_priority(self):
self.assertEqual(self.message.properties.priority,
mocks.PROPERTIES.priority)
def test_reply_to(self):
self.assertEqual(self.message.properties.reply_to,
mocks.PROPERTIES.reply_to)
def test_timestamp(self):
self.assertEqual(self.message.properties.timestamp,
mocks.PROPERTIES.timestamp)
def test_type(self):
self.assertEqual(self.message.properties.type, mocks.PROPERTIES.type)
def test_user_id(self):
self.assertEqual(self.message.properties.user_id,
mocks.PROPERTIES.user_id)
class TestMeasurement(unittest.TestCase):
def setUp(self):
self.measurement = data.Measurement()
def test_iter_and_default_values(self):
for _key, value in self.measurement:
self.assertDictEqual(dict(value), {})
def test_repr(self):
self.assertEqual(
repr(self.measurement), '<Measurement id={}>'.format(
id(self.measurement)))
def test_incr_decr(self):
keys = [str(uuid.uuid4()) for _i in range(0, 10)]
expectation = {}
for key in keys:
self.measurement.incr(key)
self.measurement.incr(key, 5)
self.measurement.decr(key)
self.measurement.decr(key, 2)
expectation[key] = 3
self.assertDictEqual(dict(self.measurement.counters), expectation)
def test_tags(self):
self.measurement.set_tag('foo', 'bar')
self.measurement.set_tag('baz', True)
self.measurement.set_tag('qux', 1)
self.assertDictEqual(self.measurement.tags, {
'foo': 'bar',
'baz': True,
'qux': 1
})
def test_add_duration(self):
expectation = random.random()
self.measurement.add_duration('duration1', expectation)
self.measurement.add_duration('duration1', expectation)
self.assertEqual(self.measurement.durations['duration1'],
[expectation, expectation])
def test_set_value(self):
key = str(uuid.uuid4())
expectation = random.random()
self.measurement.set_value(key, 10)
self.measurement.set_value(key, expectation)
self.assertEqual(self.measurement.values[key], expectation)
def test_track_duration(self):
key = str(uuid.uuid4())
with self.measurement.track_duration(key):
time.sleep(0.01)
with self.measurement.track_duration(key):
time.sleep(0.02)
self.assertGreaterEqual(self.measurement.durations[key][0], 0.01)
self.assertGreaterEqual(self.measurement.durations[key][1], 0.02)
| 33.316239
| 79
| 0.663417
|
fde9d6460785cc956169cca65da7f15351ec0fa2
| 833
|
py
|
Python
|
test/test_acps.py
|
edose/photrix
|
ed9b36a86564f4b0389db6b350b8603a0eb82d05
|
[
"Apache-2.0"
] | 3
|
2018-12-06T11:20:28.000Z
|
2020-12-16T23:17:40.000Z
|
test/test_acps.py
|
edose/photrix
|
ed9b36a86564f4b0389db6b350b8603a0eb82d05
|
[
"Apache-2.0"
] | 4
|
2018-10-22T15:30:04.000Z
|
2018-10-23T01:13:31.000Z
|
test/test_acps.py
|
edose/photrix
|
ed9b36a86564f4b0389db6b350b8603a0eb82d05
|
[
"Apache-2.0"
] | 1
|
2020-06-13T18:38:36.000Z
|
2020-06-13T18:38:36.000Z
|
from photrix import acps # don't include "acps." in calls to functions and classes.
__author__ = "Eric Dose :: Bois d'Arc Observatory, Kansas"
def test_ACPS_observation():
obs = acps.ACPS_observation("ST Tri", 34.555, 21.334)
obs.add_imageset("ST Tri", 3, 120, 'V')
obs.add_imageset("ST Tri I filter", 3, 30, 'I')
txt = obs.rtml()
print("\n" + txt + "\n")
def test_run():
project = acps.ACPS_project("AN20160630-BOREA") # for the whole night on one instrument.
plan = project.make_plan("first plan")
plan.horizon = 30 # override class defaults
plan.priority = 4 # "
obs = acps.ACPS_observation('obs_id', 37, -1)
obs.add_imageset("", 5, 60, 'V')
obs.add_imageset("name2", 2, 120, 'I')
plan.add_observation(obs)
project.add_plan(plan)
print('\n'*2 + project.rtml())
| 33.32
| 93
| 0.643457
|
df573495fa62a46d67b008763fa4bd5b5ac56090
| 10,522
|
py
|
Python
|
jsonfield/tests.py
|
Petro-Viron/django-jsonfield
|
df3eceda630b3d741e2a5d38a9f5e056921eac26
|
[
"MIT"
] | null | null | null |
jsonfield/tests.py
|
Petro-Viron/django-jsonfield
|
df3eceda630b3d741e2a5d38a9f5e056921eac26
|
[
"MIT"
] | null | null | null |
jsonfield/tests.py
|
Petro-Viron/django-jsonfield
|
df3eceda630b3d741e2a5d38a9f5e056921eac26
|
[
"MIT"
] | 1
|
2016-06-06T10:44:01.000Z
|
2016-06-06T10:44:01.000Z
|
from decimal import Decimal
from django.core.serializers import deserialize, serialize
from django.core.serializers.base import DeserializationError
from django.db import models
from django.test import TestCase
try:
import json
except ImportError:
from django.utils import simplejson as json
from .fields import JSONField, JSONCharField
try:
from django.forms.utils import ValidationError
except ImportError:
from django.forms.util import ValidationError
from collections import OrderedDict
class JsonModel(models.Model):
json = JSONField()
default_json = JSONField(default={"check": 12})
complex_default_json = JSONField(default=[{"checkcheck": 1212}])
empty_default = JSONField(default={})
class JsonCharModel(models.Model):
json = JSONCharField(max_length=100)
default_json = JSONCharField(max_length=100, default={"check": 34})
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, complex):
return {
'__complex__': True,
'real': obj.real,
'imag': obj.imag,
}
return json.JSONEncoder.default(self, obj)
def as_complex(dct):
if '__complex__' in dct:
return complex(dct['real'], dct['imag'])
return dct
class JSONModelCustomEncoders(models.Model):
# A JSON field that can store complex numbers
json = JSONField(
dump_kwargs={'cls': ComplexEncoder, "indent": 4},
load_kwargs={'object_hook': as_complex},
)
class JSONFieldTest(TestCase):
"""JSONField Wrapper Tests"""
json_model = JsonModel
def test_json_field_create(self):
"""Test saving a JSON object in our JSONField"""
json_obj = {
"item_1": "this is a json blah",
"blergh": "hey, hey, hey"}
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_string_in_json_field(self):
"""Test saving an ordinary Python string in our JSONField"""
json_obj = 'blah blah'
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_float_in_json_field(self):
"""Test saving a Python float in our JSONField"""
json_obj = 1.23
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_int_in_json_field(self):
"""Test saving a Python integer in our JSONField"""
json_obj = 1234567
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_decimal_in_json_field(self):
"""Test saving a Python Decimal in our JSONField"""
json_obj = Decimal(12.34)
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
# here we must know to convert the returned string back to Decimal,
# since json does not support that format
self.assertEqual(Decimal(new_obj.json), json_obj)
def test_json_field_modify(self):
"""Test modifying a JSON object in our JSONField"""
json_obj_1 = {'a': 1, 'b': 2}
json_obj_2 = {'a': 3, 'b': 4}
obj = self.json_model.objects.create(json=json_obj_1)
self.assertEqual(obj.json, json_obj_1)
obj.json = json_obj_2
self.assertEqual(obj.json, json_obj_2)
obj.save()
self.assertEqual(obj.json, json_obj_2)
self.assertTrue(obj)
def test_json_field_load(self):
"""Test loading a JSON object from the DB"""
json_obj_1 = {'a': 1, 'b': 2}
obj = self.json_model.objects.create(json=json_obj_1)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj_1)
def test_json_list(self):
"""Test storing a JSON list"""
json_obj = ["my", "list", "of", 1, "objs", {"hello": "there"}]
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_empty_objects(self):
"""Test storing empty objects"""
for json_obj in [{}, [], 0, '', False]:
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(json_obj, obj.json)
self.assertEqual(json_obj, new_obj.json)
def test_custom_encoder(self):
"""Test encoder_cls and object_hook"""
value = 1 + 3j # A complex number
obj = JSONModelCustomEncoders.objects.create(json=value)
new_obj = JSONModelCustomEncoders.objects.get(pk=obj.pk)
self.assertEqual(value, new_obj.json)
def test_django_serializers(self):
"""Test serializing/deserializing jsonfield data"""
for json_obj in [{}, [], 0, '', False, {'key': 'value', 'num': 42,
'ary': list(range(5)),
'dict': {'k': 'v'}}]:
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assert_(new_obj)
queryset = self.json_model.objects.all()
ser = serialize('json', queryset)
for dobj in deserialize('json', ser):
obj = dobj.object
pulled = self.json_model.objects.get(id=obj.pk)
self.assertEqual(obj.json, pulled.json)
def test_default_parameters(self):
"""Test providing a default value to the model"""
model = JsonModel()
model.json = {"check": 12}
self.assertEqual(model.json, {"check": 12})
self.assertEqual(type(model.json), dict)
self.assertEqual(model.default_json, {"check": 12})
self.assertEqual(type(model.default_json), dict)
def test_invalid_json(self):
# invalid json data {] in the json and default_json fields
ser = '[{"pk": 1, "model": "jsonfield.jsoncharmodel", ' \
'"fields": {"json": "{]", "default_json": "{]"}}]'
with self.assertRaises(DeserializationError) as cm:
next(deserialize('json', ser))
inner = cm.exception.args[0]
self.assertTrue(isinstance(inner, ValidationError))
self.assertEqual('Enter valid JSON', inner.messages[0])
def test_integer_in_string_in_json_field(self):
"""Test saving the Python string '123' in our JSONField"""
json_obj = '123'
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_boolean_in_string_in_json_field(self):
"""Test saving the Python string 'true' in our JSONField"""
json_obj = 'true'
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_pass_by_reference_pollution(self):
"""Make sure the default parameter is copied rather than passed by reference"""
model = JsonModel()
model.default_json["check"] = 144
model.complex_default_json[0]["checkcheck"] = 144
self.assertEqual(model.default_json["check"], 144)
self.assertEqual(model.complex_default_json[0]["checkcheck"], 144)
# Make sure when we create a new model, it resets to the default value
# and not to what we just set it to (it would be if it were passed by reference)
model = JsonModel()
self.assertEqual(model.default_json["check"], 12)
self.assertEqual(model.complex_default_json[0]["checkcheck"], 1212)
def test_normal_regex_filter(self):
"""Make sure JSON model can filter regex"""
JsonModel.objects.create(json={"boom": "town"})
JsonModel.objects.create(json={"move": "town"})
JsonModel.objects.create(json={"save": "town"})
self.assertEqual(JsonModel.objects.count(), 3)
self.assertEqual(JsonModel.objects.filter(json__regex=r"boom").count(), 1)
self.assertEqual(JsonModel.objects.filter(json__regex=r"town").count(), 3)
def test_save_blank_object(self):
"""Test that JSON model can save a blank object as none"""
model = JsonModel()
self.assertEqual(model.empty_default, {})
model.save()
self.assertEqual(model.empty_default, {})
model1 = JsonModel(empty_default={"hey": "now"})
self.assertEqual(model1.empty_default, {"hey": "now"})
model1.save()
self.assertEqual(model1.empty_default, {"hey": "now"})
class JSONCharFieldTest(JSONFieldTest):
json_model = JsonCharModel
class OrderedJsonModel(models.Model):
json = JSONField(load_kwargs={'object_pairs_hook': OrderedDict})
class OrderedDictSerializationTest(TestCase):
def setUp(self):
self.ordered_dict = OrderedDict([
('number', [1, 2, 3, 4]),
('notes', True),
('alpha', True),
('romeo', True),
('juliet', True),
('bravo', True),
])
self.expected_key_order = ['number', 'notes', 'alpha', 'romeo', 'juliet', 'bravo']
def test_ordered_dict_differs_from_normal_dict(self):
self.assertEqual(list(self.ordered_dict.keys()), self.expected_key_order)
self.assertNotEqual(dict(self.ordered_dict).keys(), self.expected_key_order)
def test_default_behaviour_loses_sort_order(self):
mod = JsonModel.objects.create(json=self.ordered_dict)
self.assertEqual(list(mod.json.keys()), self.expected_key_order)
mod_from_db = JsonModel.objects.get(id=mod.id)
# mod_from_db lost ordering information during json.loads()
self.assertNotEqual(mod_from_db.json.keys(), self.expected_key_order)
def test_load_kwargs_hook_does_not_lose_sort_order(self):
mod = OrderedJsonModel.objects.create(json=self.ordered_dict)
self.assertEqual(list(mod.json.keys()), self.expected_key_order)
mod_from_db = OrderedJsonModel.objects.get(id=mod.id)
self.assertEqual(list(mod_from_db.json.keys()), self.expected_key_order)
| 36.79021
| 90
| 0.64636
|
0fa65f5409132752a19d26b4e4db9be2368c76fc
| 5,745
|
py
|
Python
|
test_cifar_iterative.py
|
GhostofAdam/Adaptive-Diversity-Promoting
|
0b23f0f507f5c4310cddd35391c5b94673cb1237
|
[
"Apache-2.0"
] | null | null | null |
test_cifar_iterative.py
|
GhostofAdam/Adaptive-Diversity-Promoting
|
0b23f0f507f5c4310cddd35391c5b94673cb1237
|
[
"Apache-2.0"
] | null | null | null |
test_cifar_iterative.py
|
GhostofAdam/Adaptive-Diversity-Promoting
|
0b23f0f507f5c4310cddd35391c5b94673cb1237
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import keras
from keras.layers import AveragePooling2D, Input, Flatten
from keras.models import Model, load_model
from keras.datasets import cifar10, cifar100
import tensorflow as tf
import cleverhans.attacks as attacks
from cleverhans.utils_tf import model_eval
import os
from utils import *
from model import resnet_v1
from keras_wraper_ensemble import KerasModelWrapper
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
# Computed depth from supplied model parameter n
n = 3
depth = n * 6 + 2
version = 1
# Model name, depth and version
model_type = 'ResNet%dv%d' % (depth, version)
print(model_type)
print('Attack method is %s'%FLAGS.attack_method)
# Load the data.
if FLAGS.dataset == 'cifar10':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
elif FLAGS.dataset == 'cifar100':
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
clip_min = 0.0
clip_max = 1.0
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
clip_min -= x_train_mean
clip_max -= x_train_mean
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
y = tf.placeholder(tf.float32, shape=(None, num_classes))
sess = tf.Session()
keras.backend.set_session(sess)
# Prepare model pre-trained checkpoints directory.
save_dir = os.path.join(os.getcwd(), FLAGS.dataset+'_EE_LED_saved_models'+str(FLAGS.num_models)+'_lamda'+str(FLAGS.lamda)+'_logdetlamda'+str(FLAGS.log_det_lamda)+'_'+str(FLAGS.augmentation))
model_name = 'model.%s.h5' % str(FLAGS.epoch).zfill(3)
filepath = os.path.join(save_dir, model_name)
print('Restore model checkpoints from %s'% filepath)
# Prepare baseline model pre-trained checkpoints directory.
save_dir_baseline = os.path.join(os.getcwd(), FLAGS.dataset+'_EE_LED_saved_models'+str(FLAGS.num_models)+'_lamda0.0_logdetlamda0.0_'+str(FLAGS.augmentation))
model_name_baseline = 'model.%s.h5' % str(FLAGS.baseline_epoch).zfill(3)
filepath_baseline = os.path.join(save_dir_baseline, model_name_baseline)
print('Restore baseline model checkpoints from %s'% filepath_baseline)
#Creat model
model_input = Input(shape=input_shape)
model_dic = {}
model_out = []
feature_maps = []
for i in range(FLAGS.num_models):
model_dic[str(i)] = resnet_v1(input=model_input, depth=depth, num_classes=num_classes, dataset=FLAGS.dataset)
model_out.append(model_dic[str(i)][2])
model_output = keras.layers.concatenate(model_out)
model = Model(inputs=model_input, outputs=model_output)
model_ensemble = keras.layers.Average()(model_out)
model_ensemble = Model(input=model_input, output=model_ensemble)
#Creat baseline model
model_input_baseline = Input(shape=input_shape)
model_dic_baseline = {}
model_out_baseline = []
for i in range(FLAGS.num_models):
model_dic_baseline[str(i)] = resnet_v1(input=model_input_baseline, depth=depth, num_classes=num_classes, dataset=FLAGS.dataset)
model_out_baseline.append(model_dic_baseline[str(i)][2])
model_output_baseline = keras.layers.concatenate(model_out_baseline)
model_baseline = Model(inputs=model_input_baseline, outputs=model_output_baseline)
model_ensemble_baseline = keras.layers.Average()(model_out_baseline)
model_ensemble_baseline = Model(input=model_input_baseline, output=model_ensemble_baseline)
#Get individual models
wrap_ensemble = KerasModelWrapper(model_ensemble)
wrap_ensemble_baseline = KerasModelWrapper(model_ensemble_baseline)
#Load model
model.load_weights(filepath)
model_baseline.load_weights(filepath_baseline)
# Initialize the attack method
if FLAGS.attack_method == 'MadryEtAl':
att = attacks.MadryEtAl(wrap_ensemble)
att_baseline = attacks.MadryEtAl(wrap_ensemble_baseline)
elif FLAGS.attack_method == 'FastGradientMethod':
att = attacks.FastGradientMethod(wrap_ensemble)
att_baseline = attacks.FastGradientMethod(wrap_ensemble_baseline)
elif FLAGS.attack_method == 'MomentumIterativeMethod':
att = attacks.MomentumIterativeMethod(wrap_ensemble)
att_baseline = attacks.MomentumIterativeMethod(wrap_ensemble_baseline)
elif FLAGS.attack_method == 'BasicIterativeMethod':
att = attacks.BasicIterativeMethod(wrap_ensemble)
att_baseline = attacks.BasicIterativeMethod(wrap_ensemble_baseline)
# Consider the attack to be constant
eval_par = {'batch_size': 100}
eps_ = FLAGS.eps
print('eps is %.3f'%eps_)
if FLAGS.attack_method == 'FastGradientMethod':
att_params = {'eps': eps_,
'clip_min': clip_min,
'clip_max': clip_max}
else:
att_params = {'eps': eps_,
'eps_iter': eps_*1.0/10,
'clip_min': clip_min,
'clip_max': clip_max,
'nb_iter': 10}
adv_x = tf.stop_gradient(att.generate(x, **att_params))
adv_x_baseline = tf.stop_gradient(att_baseline.generate(x, **att_params))
preds = model_ensemble(adv_x)
preds_baseline = model_ensemble_baseline(adv_x_baseline)
acc = model_eval(sess, x, y, preds, x_test, y_test, args=eval_par)
acc_baseline = model_eval(sess, x, y, preds_baseline, x_test, y_test, args=eval_par)
print('adv_ensemble_acc: %.3f adv_ensemble_baseline_acc: %.3f'%(acc,acc_baseline))
| 35.90625
| 191
| 0.747084
|
839ddf997348196edda8e3980aa727541fadd835
| 956
|
py
|
Python
|
script/start.py
|
pranayaryal/electron
|
a7052efaf4fc6bb2aeedd6579e662e98aa2237dd
|
[
"MIT"
] | 1,027
|
2016-12-24T13:05:29.000Z
|
2022-02-21T11:07:32.000Z
|
script/start.py
|
pranayaryal/electron
|
a7052efaf4fc6bb2aeedd6579e662e98aa2237dd
|
[
"MIT"
] | 366
|
2016-12-24T05:58:54.000Z
|
2018-12-31T23:02:03.000Z
|
script/start.py
|
pranayaryal/electron
|
a7052efaf4fc6bb2aeedd6579e662e98aa2237dd
|
[
"MIT"
] | 122
|
2017-01-14T23:48:49.000Z
|
2022-03-09T01:51:53.000Z
|
#!/usr/bin/env python
import os
import subprocess
import sys
from lib.util import electron_gyp
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
def main():
os.chdir(SOURCE_ROOT)
config = 'D'
if '-R' in sys.argv:
config = 'R'
if sys.platform == 'darwin':
electron = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'MacOS', PRODUCT_NAME)
elif sys.platform == 'win32':
electron = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.exe'.format(PROJECT_NAME))
else:
electron = os.path.join(SOURCE_ROOT, 'out', config, PROJECT_NAME)
try:
subprocess.check_call([electron] + sys.argv[1:])
except KeyboardInterrupt:
return -1
if __name__ == '__main__':
sys.exit(main())
| 23.317073
| 73
| 0.621339
|
bbd9939de21307e5bfb2340653ed0f30fe6db6c5
| 810
|
py
|
Python
|
src/patched_flannel/classification/header.py
|
mannbiher/DeepLearningForHealthCareProject
|
4692031591bd88c489c9b905e7c340ac76a5366b
|
[
"MIT"
] | null | null | null |
src/patched_flannel/classification/header.py
|
mannbiher/DeepLearningForHealthCareProject
|
4692031591bd88c489c9b905e7c340ac76a5366b
|
[
"MIT"
] | 14
|
2021-05-10T18:00:58.000Z
|
2021-05-12T00:29:41.000Z
|
src/patched_flannel/classification/header.py
|
mannbiher/DeepLearningForHealthCareProject
|
4692031591bd88c489c9b905e7c340ac76a5366b
|
[
"MIT"
] | null | null | null |
# Test name
test_name = 'classifier_newset'
# sampling_option = 'oversampling'
sampling_option = None
# Batch size
train_batch_size = 16
val_batch_size = 16
test_batch_size = 1
# Num_classes
num_classes = 4
# Model
#model = 'resnet'
model = 'resnet'
# Feature extract or train all parameters
feature_extract = False
# Num epoch
epoch_max = 100
# Folder image dir
data_dir = './patched_flannel/classification_data/'
# Save dir
save_dir = './patched_flannel/checkpoint/' + test_name
# Test epoch: what epoch to load
inference_epoch = 100
# Resume training
continue_epoch = 36
# Early stopping
patience = 50
# Regularization
lambda_l1 = 1e-5
# Resize image
resize = 1024
# Repeat for inference
repeat = 100
# Learning rate
lr = 1e-5
# Patch size
img_size = 224
# ResNet layers
resnet = 'resnet34'
| 14.464286
| 54
| 0.740741
|
4cb9c81e0ca788f0034f0aa44fb47f09f8bd1d12
| 20,128
|
py
|
Python
|
research/street/python/vgslspecs.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:41:11.000Z
|
2019-09-11T09:41:11.000Z
|
research/street/python/vgslspecs.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
research/street/python/vgslspecs.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""String network description language mapping to TF-Slim calls where possible.
See vglspecs.md for detailed description.
"""
import re
from string import maketrans
import nn_ops
import shapes
from six.moves import xrange
import tensorflow as tf
import tensorflow.contrib.slim as slim
# Class that builds a set of ops to manipulate variable-sized images.
class VGSLSpecs(object):
"""Layers that can be built from a string definition."""
def __init__(self, widths, heights, is_training):
"""Constructs a VGSLSpecs.
Args:
widths: Tensor of size batch_size of the widths of the inputs.
heights: Tensor of size batch_size of the heights of the inputs.
is_training: True if the graph should be build for training.
"""
# The string that was used to build this model.
self.model_str = None
# True if we are training
self.is_training = is_training
# Tensor for the size of the images, of size batch_size.
self.widths = widths
self.heights = heights
# Overall reduction factors of this model so far for each dimension.
# TODO(rays) consider building a graph from widths and heights instead of
# computing a scale factor.
self.reduction_factors = [1.0, 1.0, 1.0, 1.0]
# List of Op parsers.
# TODO(rays) add more Op types as needed.
self.valid_ops = [self.AddSeries, self.AddParallel, self.AddConvLayer,
self.AddMaxPool, self.AddDropout, self.AddReShape,
self.AddFCLayer, self.AddLSTMLayer]
# Translation table to convert unacceptable characters that may occur
# in op strings that cannot be used as names.
self.transtab = maketrans('(,)', '___')
def Build(self, prev_layer, model_str):
"""Builds a network with input prev_layer from a VGSLSpecs description.
Args:
prev_layer: The input tensor.
model_str: Model definition similar to Tesseract as follows:
============ FUNCTIONAL OPS ============
C(s|t|r|l|m)[{name}]<y>,<x>,<d> Convolves using a y,x window, with no
shrinkage, SAME infill, d outputs, with s|t|r|l|m non-linear layer.
(s|t|r|l|m) specifies the type of non-linearity:
s = sigmoid
t = tanh
r = relu
l = linear (i.e., None)
m = softmax
F(s|t|r|l|m)[{name}]<d> Fully-connected with s|t|r|l|m non-linearity and
d outputs. Reduces height, width to 1. Input height and width must be
constant.
L(f|r|b)(x|y)[s][{name}]<n> LSTM cell with n outputs.
f runs the LSTM forward only.
r runs the LSTM reversed only.
b runs the LSTM bidirectionally.
x runs the LSTM in the x-dimension (on data with or without the
y-dimension).
y runs the LSTM in the y-dimension (data must have a y dimension).
s (optional) summarizes the output in the requested dimension,
outputting only the final step, collapsing the dimension to a
single element.
Examples:
Lfx128 runs a forward-only LSTM in the x-dimension with 128
outputs, treating any y dimension independently.
Lfys64 runs a forward-only LSTM in the y-dimension with 64 outputs
and collapses the y-dimension to 1 element.
NOTE that Lbxsn is implemented as (LfxsnLrxsn) since the summaries
need to be taken from opposite ends of the output
Do[{name}] Insert a dropout layer.
============ PLUMBING OPS ============
[...] Execute ... networks in series (layers).
(...) Execute ... networks in parallel, with their output concatenated
in depth.
S[{name}]<d>(<a>x<b>)<e>,<f> Splits one dimension, moves one part to
another dimension.
Splits input dimension d into a x b, sending the high part (a) to the
high side of dimension e, and the low part (b) to the high side of
dimension f. Exception: if d=e=f, then then dimension d is internally
transposed to bxa.
Either a or b can be zero, meaning whatever is left after taking out
the other, allowing dimensions to be of variable size.
Eg. S3(3x50)2,3 will split the 150-element depth into 3x50, with the 3
going to the most significant part of the width, and the 50 part
staying in depth.
This will rearrange a 3x50 output parallel operation to spread the 3
output sets over width.
Mp[{name}]<y>,<x> Maxpool the input, reducing the (y,x) rectangle to a
single vector value.
Returns:
Output tensor
"""
self.model_str = model_str
final_layer, _ = self.BuildFromString(prev_layer, 0)
return final_layer
def GetLengths(self, dim=2, factor=1):
"""Returns the lengths of the batch of elements in the given dimension.
WARNING: The returned sizes may not exactly match TF's calculation.
Args:
dim: dimension to get the sizes of, in [1,2]. batch, depth not allowed.
factor: A scalar value to multiply by.
Returns:
The original heights/widths scaled by the current scaling of the model and
the given factor.
Raises:
ValueError: If the args are invalid.
"""
if dim == 1:
lengths = self.heights
elif dim == 2:
lengths = self.widths
else:
raise ValueError('Invalid dimension given to GetLengths')
lengths = tf.cast(lengths, tf.float32)
if self.reduction_factors[dim] is not None:
lengths = tf.div(lengths, self.reduction_factors[dim])
else:
lengths = tf.ones_like(lengths)
if factor != 1:
lengths = tf.multiply(lengths, tf.cast(factor, tf.float32))
return tf.cast(lengths, tf.int32)
def BuildFromString(self, prev_layer, index):
"""Adds the layers defined by model_str[index:] to the model.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, next model_str index.
Raises:
ValueError: If the model string is unrecognized.
"""
index = self._SkipWhitespace(index)
for op in self.valid_ops:
output_layer, next_index = op(prev_layer, index)
if output_layer is not None:
return output_layer, next_index
if output_layer is not None:
return output_layer, next_index
raise ValueError('Unrecognized model string:' + self.model_str[index:])
def AddSeries(self, prev_layer, index):
"""Builds a sequence of layers for a VGSLSpecs model.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor of the series, end index in model_str.
Raises:
ValueError: If [] are unbalanced.
"""
if self.model_str[index] != '[':
return None, None
index += 1
while index < len(self.model_str) and self.model_str[index] != ']':
prev_layer, index = self.BuildFromString(prev_layer, index)
if index == len(self.model_str):
raise ValueError('Missing ] at end of series!' + self.model_str)
return prev_layer, index + 1
def AddParallel(self, prev_layer, index):
"""tf.concats outputs of layers that run on the same inputs.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor of the parallel, end index in model_str.
Raises:
ValueError: If () are unbalanced or the elements don't match.
"""
if self.model_str[index] != '(':
return None, None
index += 1
layers = []
num_dims = 0
# Each parallel must output the same, including any reduction factor, in
# all dimensions except depth.
# We have to save the starting factors, so they don't get reduced by all
# the elements of the parallel, only once.
original_factors = self.reduction_factors
final_factors = None
while index < len(self.model_str) and self.model_str[index] != ')':
self.reduction_factors = original_factors
layer, index = self.BuildFromString(prev_layer, index)
if num_dims == 0:
num_dims = len(layer.get_shape())
elif num_dims != len(layer.get_shape()):
raise ValueError('All elements of parallel must return same num dims')
layers.append(layer)
if final_factors:
if final_factors != self.reduction_factors:
raise ValueError('All elements of parallel must scale the same')
else:
final_factors = self.reduction_factors
if index == len(self.model_str):
raise ValueError('Missing ) at end of parallel!' + self.model_str)
return tf.concat(axis=num_dims - 1, values=layers), index + 1
def AddConvLayer(self, prev_layer, index):
"""Add a single standard convolutional layer.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, end index in model_str.
"""
pattern = re.compile(R'(C)(s|t|r|l|m)({\w+})?(\d+),(\d+),(\d+)')
m = pattern.match(self.model_str, index)
if m is None:
return None, None
name = self._GetLayerName(m.group(0), index, m.group(3))
width = int(m.group(4))
height = int(m.group(5))
depth = int(m.group(6))
fn = self._NonLinearity(m.group(2))
return slim.conv2d(
prev_layer, depth, [height, width], activation_fn=fn,
scope=name), m.end()
def AddMaxPool(self, prev_layer, index):
"""Add a maxpool layer.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, end index in model_str.
"""
pattern = re.compile(R'(Mp)({\w+})?(\d+),(\d+)(?:,(\d+),(\d+))?')
m = pattern.match(self.model_str, index)
if m is None:
return None, None
name = self._GetLayerName(m.group(0), index, m.group(2))
height = int(m.group(3))
width = int(m.group(4))
y_stride = height if m.group(5) is None else m.group(5)
x_stride = width if m.group(6) is None else m.group(6)
self.reduction_factors[1] *= y_stride
self.reduction_factors[2] *= x_stride
return slim.max_pool2d(
prev_layer, [height, width], [y_stride, x_stride],
padding='SAME',
scope=name), m.end()
def AddDropout(self, prev_layer, index):
"""Adds a dropout layer.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, end index in model_str.
"""
pattern = re.compile(R'(Do)({\w+})?')
m = pattern.match(self.model_str, index)
if m is None:
return None, None
name = self._GetLayerName(m.group(0), index, m.group(2))
layer = slim.dropout(
prev_layer, 0.5, is_training=self.is_training, scope=name)
return layer, m.end()
def AddReShape(self, prev_layer, index):
"""Reshapes the input tensor by moving each (x_scale,y_scale) rectangle to.
the depth dimension. NOTE that the TF convention is that inputs are
[batch, y, x, depth].
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, end index in model_str.
"""
pattern = re.compile(R'(S)(?:{(\w)})?(\d+)\((\d+)x(\d+)\)(\d+),(\d+)')
m = pattern.match(self.model_str, index)
if m is None:
return None, None
name = self._GetLayerName(m.group(0), index, m.group(2))
src_dim = int(m.group(3))
part_a = int(m.group(4))
part_b = int(m.group(5))
dest_dim_a = int(m.group(6))
dest_dim_b = int(m.group(7))
if part_a == 0:
part_a = -1
if part_b == 0:
part_b = -1
prev_shape = tf.shape(prev_layer)
layer = shapes.transposing_reshape(
prev_layer, src_dim, part_a, part_b, dest_dim_a, dest_dim_b, name=name)
# Compute scale factors.
result_shape = tf.shape(layer)
for i in xrange(len(self.reduction_factors)):
if self.reduction_factors[i] is not None:
factor1 = tf.cast(self.reduction_factors[i], tf.float32)
factor2 = tf.cast(prev_shape[i], tf.float32)
divisor = tf.cast(result_shape[i], tf.float32)
self.reduction_factors[i] = tf.div(tf.multiply(factor1, factor2), divisor)
return layer, m.end()
def AddFCLayer(self, prev_layer, index):
"""Parse expression and add Fully Connected Layer.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, end index in model_str.
"""
pattern = re.compile(R'(F)(s|t|r|l|m)({\w+})?(\d+)')
m = pattern.match(self.model_str, index)
if m is None:
return None, None
fn = self._NonLinearity(m.group(2))
name = self._GetLayerName(m.group(0), index, m.group(3))
depth = int(m.group(4))
input_depth = shapes.tensor_dim(prev_layer, 1) * shapes.tensor_dim(
prev_layer, 2) * shapes.tensor_dim(prev_layer, 3)
# The slim fully connected is actually a 1x1 conv, so we have to crush the
# dimensions on input.
# Everything except batch goes to depth, and therefore has to be known.
shaped = tf.reshape(
prev_layer, [-1, input_depth], name=name + '_reshape_in')
output = slim.fully_connected(shaped, depth, activation_fn=fn, scope=name)
# Width and height are collapsed to 1.
self.reduction_factors[1] = None
self.reduction_factors[2] = None
return tf.reshape(
output, [shapes.tensor_dim(prev_layer, 0), 1, 1, depth],
name=name + '_reshape_out'), m.end()
def AddLSTMLayer(self, prev_layer, index):
"""Parse expression and add LSTM Layer.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, end index in model_str.
"""
pattern = re.compile(R'(L)(f|r|b)(x|y)(s)?({\w+})?(\d+)')
m = pattern.match(self.model_str, index)
if m is None:
return None, None
direction = m.group(2)
dim = m.group(3)
summarize = m.group(4) == 's'
name = self._GetLayerName(m.group(0), index, m.group(5))
depth = int(m.group(6))
if direction == 'b' and summarize:
fwd = self._LSTMLayer(prev_layer, 'forward', dim, True, depth,
name + '_forward')
back = self._LSTMLayer(prev_layer, 'backward', dim, True, depth,
name + '_reverse')
return tf.concat(axis=3, values=[fwd, back], name=name + '_concat'), m.end()
if direction == 'f':
direction = 'forward'
elif direction == 'r':
direction = 'backward'
else:
direction = 'bidirectional'
outputs = self._LSTMLayer(prev_layer, direction, dim, summarize, depth,
name)
if summarize:
# The x or y dimension is getting collapsed.
if dim == 'x':
self.reduction_factors[2] = None
else:
self.reduction_factors[1] = None
return outputs, m.end()
def _LSTMLayer(self, prev_layer, direction, dim, summarize, depth, name):
"""Adds an LSTM layer with the given pre-parsed attributes.
Always maps 4-D to 4-D regardless of summarize.
Args:
prev_layer: Input tensor.
direction: 'forward' 'backward' or 'bidirectional'
dim: 'x' or 'y', dimension to consider as time.
summarize: True if we are to return only the last timestep.
depth: Output depth.
name: Some string naming the op.
Returns:
Output tensor.
"""
# If the target dimension is y, we need to transpose.
if dim == 'x':
lengths = self.GetLengths(2, 1)
inputs = prev_layer
else:
lengths = self.GetLengths(1, 1)
inputs = tf.transpose(prev_layer, [0, 2, 1, 3], name=name + '_ytrans_in')
input_batch = shapes.tensor_dim(inputs, 0)
num_slices = shapes.tensor_dim(inputs, 1)
num_steps = shapes.tensor_dim(inputs, 2)
input_depth = shapes.tensor_dim(inputs, 3)
# Reshape away the other dimension.
inputs = tf.reshape(
inputs, [-1, num_steps, input_depth], name=name + '_reshape_in')
# We need to replicate the lengths by the size of the other dimension, and
# any changes that have been made to the batch dimension.
tile_factor = tf.to_float(input_batch *
num_slices) / tf.to_float(tf.shape(lengths)[0])
lengths = tf.tile(lengths, [tf.cast(tile_factor, tf.int32)])
lengths = tf.cast(lengths, tf.int64)
outputs = nn_ops.rnn_helper(
inputs,
lengths,
cell_type='lstm',
num_nodes=depth,
direction=direction,
name=name,
stddev=0.1)
# Output depth is doubled if bi-directional.
if direction == 'bidirectional':
output_depth = depth * 2
else:
output_depth = depth
# Restore the other dimension.
if summarize:
outputs = tf.slice(
outputs, [0, num_steps - 1, 0], [-1, 1, -1], name=name + '_sum_slice')
outputs = tf.reshape(
outputs, [input_batch, num_slices, 1, output_depth],
name=name + '_reshape_out')
else:
outputs = tf.reshape(
outputs, [input_batch, num_slices, num_steps, output_depth],
name=name + '_reshape_out')
if dim == 'y':
outputs = tf.transpose(outputs, [0, 2, 1, 3], name=name + '_ytrans_out')
return outputs
def _NonLinearity(self, code):
"""Returns the non-linearity function pointer for the given string code.
For forwards compatibility, allows the full names for stand-alone
non-linearities, as well as the single-letter names used in ops like C,F.
Args:
code: String code representing a non-linearity function.
Returns:
non-linearity function represented by the code.
"""
if code in ['s', 'Sig']:
return tf.sigmoid
elif code in ['t', 'Tanh']:
return tf.tanh
elif code in ['r', 'Relu']:
return tf.nn.relu
elif code in ['m', 'Smax']:
return tf.nn.softmax
return None
def _GetLayerName(self, op_str, index, name_str):
"""Generates a name for the op, using a user-supplied name if possible.
Args:
op_str: String representing the parsed op.
index: Position in model_str of the start of the op.
name_str: User-supplied {name} with {} that need removing or None.
Returns:
Selected name.
"""
if name_str:
return name_str[1:-1]
else:
return op_str.translate(self.transtab) + '_' + str(index)
def _SkipWhitespace(self, index):
"""Skips any leading whitespace in the model description.
Args:
index: Position in model_str to start parsing
Returns:
end index in model_str of whitespace.
"""
pattern = re.compile(R'([ \t\n]+)')
m = pattern.match(self.model_str, index)
if m is None:
return index
return m.end()
| 37.62243
| 83
| 0.618839
|
432ed35d75e3859864531470ff3c9c6ca38ba7a1
| 1,212
|
py
|
Python
|
examples/recaptcha/app.py
|
jace/flask-wtf
|
bd6cfe07daf0607947c506fbb656e53de04851a1
|
[
"BSD-3-Clause"
] | 1
|
2015-09-16T16:20:13.000Z
|
2015-09-16T16:20:13.000Z
|
examples/recaptcha/app.py
|
jace/flask-wtf
|
bd6cfe07daf0607947c506fbb656e53de04851a1
|
[
"BSD-3-Clause"
] | null | null | null |
examples/recaptcha/app.py
|
jace/flask-wtf
|
bd6cfe07daf0607947c506fbb656e53de04851a1
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Flask, render_template, flash, session, redirect, url_for
from flask.ext.wtf import Form, TextAreaField, RecaptchaField, Required
DEBUG = True
SECRET_KEY = 'secret'
# keys for localhost. Change as appropriate.
RECAPTCHA_PUBLIC_KEY = '6LeYIbsSAAAAACRPIllxA7wvXjIE411PfdB2gt2J'
RECAPTCHA_PRIVATE_KEY = '6LeYIbsSAAAAAJezaIq3Ft_hSTo0YtyeFG-JgRtu'
app = Flask(__name__)
app.config.from_object(__name__)
class CommentForm(Form):
comment = TextAreaField("Comment", validators=[Required()])
recaptcha = RecaptchaField()
@app.route("/")
def index(form=None):
if form is None:
form = CommentForm()
comments = session.get("comments", [])
return render_template("index.html",
comments=comments,
form=form)
@app.route("/add/", methods=("POST",))
def add_comment():
form = CommentForm()
if form.validate_on_submit():
comments = session.pop('comments', [])
comments.append(form.comment.data)
session['comments'] = comments
flash("You have added a new comment")
return redirect(url_for("index"))
return index(form)
if __name__ == "__main__":
app.run()
| 27.545455
| 75
| 0.669142
|
bcdd3c622218a70bc9fae99c38f47a45298a8747
| 3,248
|
py
|
Python
|
src/hyperloop/Python/tests/test_cycle_group.py
|
jcchin/Hyperloop_v2
|
73861d2207af8738425c1d484909ed0433b9653f
|
[
"Apache-2.0"
] | 1
|
2021-04-29T00:23:03.000Z
|
2021-04-29T00:23:03.000Z
|
src/hyperloop/Python/tests/test_cycle_group.py
|
jcchin/Hyperloop_v2
|
73861d2207af8738425c1d484909ed0433b9653f
|
[
"Apache-2.0"
] | 9
|
2016-11-23T09:10:34.000Z
|
2016-12-06T01:10:09.000Z
|
src/hyperloop/Python/tests/test_cycle_group.py
|
jcchin/Hyperloop_v2
|
73861d2207af8738425c1d484909ed0433b9653f
|
[
"Apache-2.0"
] | 11
|
2016-01-19T20:26:35.000Z
|
2021-02-13T11:16:20.000Z
|
"""
Test for cycle_group.py. Uses test values and outputs given by NPSS
"""
from __future__ import print_function
import numpy as np
from openmdao.api import Group, Problem, IndepVarComp
from openmdao.core.group import Group, Component, IndepVarComp
from openmdao.solvers.newton import Newton
from openmdao.api import NLGaussSeidel
from openmdao.solvers.scipy_gmres import ScipyGMRES
from openmdao.units.units import convert_units as cu
from openmdao.api import Problem, LinearGaussSeidel
from openmdao.solvers.ln_direct import DirectSolver
from openmdao.api import SqliteRecorder
from pycycle.components import Compressor, Shaft, FlowStart, Inlet, Nozzle, Duct, Splitter, FlightConditions
from pycycle.species_data import janaf
from pycycle.connect_flow import connect_flow
from pycycle.constants import AIR_FUEL_MIX, AIR_MIX
from pycycle.constants import R_UNIVERSAL_ENG, R_UNIVERSAL_SI
from hyperloop.Python.pod.cycle import cycle_group
def create_problem(GroupName):
root = Group()
prob = Problem(root)
prob.root.add('Cycle', GroupName)
return prob
class TestCycle(object):
def test_case1_vs_inductrack(self):
CycleGroup = cycle_group.Cycle()
prob = create_problem(CycleGroup)
params = (('comp_PR', 12.6, {'units': 'unitless'}),
('PsE', 0.05588, {'units': 'psi'}),
('pod_mach_number', .8, {'units': 'unitless'}),
('tube_pressure', 850., {'units': 'Pa'}),
('tube_temp', 320., {'units': 'K'}),
('comp_inlet_area', 2.3884, {'units': 'm**2'}))
prob.root.add('des_vars', IndepVarComp(params))
prob.root.connect('des_vars.comp_PR', 'Cycle.comp.map.PRdes')
prob.root.connect('des_vars.PsE', 'Cycle.nozzle.Ps_exhaust')
prob.root.connect('des_vars.pod_mach_number', 'Cycle.pod_mach')
prob.root.connect('des_vars.tube_pressure', 'Cycle.tube_pressure')
prob.root.connect('des_vars.tube_temp', 'Cycle.tube_temp')
prob.root.connect('des_vars.comp_inlet_area', 'Cycle.comp_inlet_area')
prob.setup()
#prob.root.list_connections()
prob['Cycle.CompressorMass.comp_eff'] = 91.0
prob['Cycle.CompressorLen.h_stage'] = 58.2
prob['Cycle.FlowPathInputs.gamma'] = 1.4
prob['Cycle.FlowPathInputs.R'] = 287.
prob['Cycle.FlowPathInputs.eta'] = 0.99
prob['Cycle.FlowPathInputs.comp_mach'] = 0.6
prob.run()
# Test Values
assert np.isclose(prob['Cycle.comp_len'], 3.579, rtol=.01)
assert np.isclose(prob['Cycle.comp_mass'], 774.18, rtol=.01)
assert np.isclose(cu(prob['Cycle.comp.trq'], 'ft*lbf', 'N*m'), -2622.13, rtol=.01)
assert np.isclose(cu(prob['Cycle.comp.power'], 'hp', 'W'), -2745896.44, rtol=.01)
assert np.isclose(cu(prob['Cycle.comp.Fl_O:stat:area'], 'inch**2', 'm**2'), 0.314, rtol=.01)
assert np.isclose(cu(prob['Cycle.nozzle.Fg'], 'lbf', 'N'), 6562.36, rtol=.01)
assert np.isclose(cu(prob['Cycle.inlet.F_ram'], 'lbf', 'N'), 1855.47, rtol=.01)
assert np.isclose(cu(prob['Cycle.nozzle.Fl_O:tot:T'], 'degR', 'K'), 767.132, rtol=.01)
assert np.isclose(cu(prob['Cycle.nozzle.Fl_O:stat:W'], 'lbm/s', 'kg/s'), 6.467, rtol=.01)
| 41.113924
| 108
| 0.669643
|
3e1f14cb7d9e020f593678d8e89a9653140cb311
| 7,473
|
py
|
Python
|
projects/file_handling/filehandling.py
|
iannico322/Python-GUI-Project
|
5e5db3ef0677b5ee9d5378823eaa9f4f34c0b097
|
[
"Unlicense"
] | null | null | null |
projects/file_handling/filehandling.py
|
iannico322/Python-GUI-Project
|
5e5db3ef0677b5ee9d5378823eaa9f4f34c0b097
|
[
"Unlicense"
] | null | null | null |
projects/file_handling/filehandling.py
|
iannico322/Python-GUI-Project
|
5e5db3ef0677b5ee9d5378823eaa9f4f34c0b097
|
[
"Unlicense"
] | null | null | null |
from tkinter import*
import tkinter.ttk as ttk
import tkinter.messagebox as tkMessageBox
import sqlite3
import os
#DEVELOPED by:Author Ian Nico M. Caulin
#COURSE : BSIT-1R5
root = Tk()
root.title("Python - Save Data To Table (File Handling)")
root.iconbitmap('images/logo.ico')
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
width = 988
height = 500
x = (screen_width/2) - (width/2)
y = (screen_height/2) - (height/2)
root.geometry('%dx%d+%d+%d' % (width, height, x, y))
root.resizable(0, 0)
root.config(bg='#17161b')
#==================================METHODS============================================
def Database():
global conn, cursor
conn = sqlite3.connect('projects/file_handling/Filehandling.db')
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS `member` (mem_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, firstname TEXT, lastname TEXT, gender TEXT, address TEXT, username TEXT, password TEXT)")
def insertData():
if FIRSTNAME.get() == "" or LASTNAME.get() == "" or GENDER.get() == "" or ADDRESS.get() == "" or USERNAME.get() == "" or PASSWORD.get() == "":
txt_result.config(text="Please complete the required field!", fg="red")
else:
Database()
cursor.execute("INSERT INTO `member` (firstname,lastname, gender, address, username, password) VALUES(?, ?, ?, ?, ?,?)", (str(FIRSTNAME.get()), str(LASTNAME.get()), str(GENDER.get()), str(ADDRESS.get()), str(USERNAME.get()), str(PASSWORD.get())))
conn.commit()
FIRSTNAME.set("")
LASTNAME.set("")
GENDER.set("")
ADDRESS.set("")
USERNAME.set("")
PASSWORD.set("")
txt_result.config(text="Saved a data!", fg="green")
cursor.close()
conn.close()
displayData()
def displayData():
tree.delete(*tree.get_children())
Database()
cursor.execute("SELECT * FROM member ORDER BY firstname ASC")
fetch = cursor.fetchall()
for data in fetch:
tree.insert('', 'end', values=(
data[1], data[2], data[3], data[4],data[5],data[6], '******'))
cursor.close()
conn.close()
def Exit():
result = tkMessageBox.askquestion('P A H I M A N G N O !', 'Are you sure you want to exit?', icon="warning")
if result == 'yes':
root.withdraw()
os.system('main.py')
#==================================VARIABLES==========================================
titlepic = PhotoImage(file='images/filehandlingtitle.png')
FIRSTNAME = StringVar()
LASTNAME = StringVar()
GENDER = StringVar()
ADDRESS = StringVar()
USERNAME = StringVar()
PASSWORD = StringVar()
#==================================FRAME==============================================
Top = Frame(root)
Top.pack(side=TOP,anchor=W)
Left = Frame(root, width=700, height=500)
Left.pack(side=LEFT)
Right = Frame(root, width=600, height=500, bd=4,bg='#17161b')
Right.pack(side=RIGHT,padx=(0,0))
Forms = Frame(Right, width=100, height=750,bg='#17161b')
Forms.pack(side=TOP,pady=20)
Buttons = Frame(Right, width=300, height=100,bg='#17161b')
Buttons.pack(side=BOTTOM)
RadioGroup = Frame(Forms)
title = Frame(root)
#==================================LABEL WIDGET=======================================
txt_title = Label(Top,image=titlepic,bg='#050505',borderwidth=0)
txt_title.pack()
txt_firstname = Label(Forms, text="Firstname:",font='arial 14 ',fg='#c4c4c4',bg='#17161b',bd=15)
txt_firstname.grid(row=0, stick="e")
txt_lastname = Label(Forms, text="Lastname:",font='arial 14 ',fg='#c4c4c4',bg='#17161b',bd=15)
txt_lastname.grid(row=1, stick="e")
txt_gender = Label(Forms, text="Gender:",font='arial 14 ',fg='#c4c4c4',bg='#17161b',bd=15)
txt_gender.grid(row=2, stick="e")
txt_address = Label(Forms, text="Address:",font='arial 14 ',fg='#c4c4c4',bg='#17161b',bd=15)
txt_address.grid(row=3, stick="e")
txt_username = Label(Forms, text="Username:",font='arial 14 ',fg='#c4c4c4',bg='#17161b',bd=15)
txt_username.grid(row=4, stick="e")
txt_password = Label(Forms, text="Password:",font='arial 14 ',fg='#c4c4c4',bg='#17161b',bd=15)
txt_password.grid(row=5, stick="e")
txt_result = Label(Buttons,bg='#17161b')
txt_result.pack(side=TOP)
#==================================ENTRY WIDGET=======================================
firstname = Entry(Forms, textvariable=FIRSTNAME, width=28,font='arial 10 ',fg='#1df700',bg='#0f1012',bd=1,relief=SUNKEN)
firstname.grid(row=0, column=1)
lastname = Entry(Forms, textvariable=LASTNAME, width=28,font='arial 10 ',fg='#1df700',bg='#0f1012',bd=1,relief=SUNKEN)
lastname.grid(row=1, column=1)
gender1 = Radiobutton(Forms, text='Male', variable=GENDER, value="Male",bg='#17161b',fg='#1df700',selectcolor='#17161b',activebackground='#17161b').grid(row=2,columnspan=2)
gender2 =Radiobutton(Forms, text='Female', variable=GENDER, value="Female",bg='#17161b',fg='#1df700',selectcolor='#17161b',activebackground='#17161b').grid(row=2,column=1,columnspan=2)
address = Entry(Forms, textvariable=ADDRESS, width=30,font='arial 10 ',fg='#1df700',bg='#0f1012',bd=1,relief=SUNKEN)
address.grid(row=3, column=1)
username = Entry(Forms, textvariable=USERNAME, width=28,font='arial 10 ',fg='#1df700',bg='#0f1012',bd=1,relief=SUNKEN,)
username.grid(row=4, column=1)
password = Entry(Forms, textvariable=PASSWORD, show="*", width=28,font='arial 10 ',fg='#1df700',bg='#0f1012',bd=1,relief=SUNKEN)
password.grid(row=5, column=1)
#==================================BUTTONS WIDGET=====================================
btn_create = Button(Buttons,cursor='circle', width=10, text="Save",font='arial 10 bold',activebackground='#17161b',fg='#1df700',relief=SUNKEN,activeforeground='#1df700', command=insertData,bg='black',padx=10)
btn_create.pack(side=LEFT,expand=7)
btn_exit = Button(Buttons,cursor='circle', width=10, text="Exit", command=Exit,bg='black',activebackground='#17161b',fg='#1df700',relief=SUNKEN,activeforeground='#1df700',font='arial 10 bold',padx=10)
btn_exit.pack(side=LEFT)
#==================================LIST WIDGET========================================
scrollbary = Scrollbar(Left, orient=VERTICAL)
scrollbarx = Scrollbar(Left, orient=HORIZONTAL,)
tree = ttk.Treeview(Left, columns=("Firstname", "Lastname","Gender", "Address", "Username", "Password"),
selectmode="extended", height=500, yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set)
scrollbary.config(command=tree.yview)
scrollbary.pack(side=RIGHT, fill=Y)
scrollbarx.config(command=tree.xview)
scrollbarx.pack(side=BOTTOM, fill=X)
tree.heading('Firstname', text="Firstname", anchor=W)
tree.heading('Lastname', text="Lastname", anchor=W)
tree.heading('Gender', text="Gender", anchor=W)
tree.heading('Address', text="Address", anchor=W)
tree.heading('Username', text="Username", anchor=W)
tree.heading('Password', text="Password", anchor=W)
tree.column('#0', stretch=NO, minwidth=0, width=0)
tree.column('#1', stretch=NO, minwidth=0, width=80)
tree.column('#2', stretch=NO, minwidth=0, width=120)
tree.column('#3', stretch=NO, minwidth=0, width=80)
tree.column('#4', stretch=NO, minwidth=0, width=150)
tree.column('#5', stretch=NO, minwidth=0, width=120)
tree.column('#6', stretch=NO, minwidth=0, width=80)
tree.pack()
#==================================INITIALIZATION=====================================displayData()
root.mainloop()
| 47
| 255
| 0.623043
|
590d5e8e9fdee4e32777ed8089f491528417aa5c
| 13,250
|
py
|
Python
|
luna/gateware/soc/peripheral.py
|
pimdegroot/luna
|
16110a59c72279e7272310e81ca4656da11fb1da
|
[
"BSD-3-Clause"
] | 1
|
2021-02-28T04:35:15.000Z
|
2021-02-28T04:35:15.000Z
|
luna/gateware/soc/peripheral.py
|
pimdegroot/luna
|
16110a59c72279e7272310e81ca4656da11fb1da
|
[
"BSD-3-Clause"
] | 4
|
2020-11-11T17:32:33.000Z
|
2020-11-30T13:08:05.000Z
|
luna/gateware/soc/peripheral.py
|
pimdegroot/luna
|
16110a59c72279e7272310e81ca4656da11fb1da
|
[
"BSD-3-Clause"
] | null | null | null |
#
# This file is part of LUNA.
#
# Adapted from lambdasoc.
# This file includes content Copyright (C) 2020 LambdaConcept.
#
# Per our BSD license, derivative files must include this license disclaimer.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Peripheral helpers for LUNA devices. """
from contextlib import contextmanager
from nmigen import Module, Elaboratable
from nmigen import tracer
from nmigen.utils import log2_int
from nmigen_soc import csr, wishbone
from nmigen_soc.memory import MemoryMap
from nmigen_soc.csr.wishbone import WishboneCSRBridge
from .event import EventSource, IRQLine, InterruptSource
__all__ = ["Peripheral", "CSRBank", "PeripheralBridge"]
class Peripheral:
"""Wishbone peripheral.
A helper class to reduce the boilerplate needed to control a peripheral with a Wishbone interface.
It provides facilities for instantiating CSR registers, requesting windows to subordinate busses
and sending interrupt requests to the CPU.
The ``Peripheral`` class is not meant to be instantiated as-is, but rather as a base class for
actual peripherals.
Usage example
-------------
```
class ExamplePeripheral(Peripheral, Elaboratable):
def __init__(self):
super().__init__()
bank = self.csr_bank()
self._foo = bank.csr(8, "r")
self._bar = bank.csr(8, "w")
self._rdy = self.event(mode="rise")
self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
self.bus = self._bridge.bus
self.irq = self._bridge.irq
def elaborate(self, platform):
m = Module()
m.submodules.bridge = self._bridge
# ...
return m
```
Arguments
---------
name : str
Name of this peripheral. If ``None`` (default) the name is inferred from the variable
name this peripheral is assigned to.
Properties
----------
name : str
Name of the peripheral.
"""
def __init__(self, name=None, src_loc_at=1):
if name is not None and not isinstance(name, str):
raise TypeError("Name must be a string, not {!r}".format(name))
self.name = name or tracer.get_var_name(depth=2 + src_loc_at).lstrip("_")
self._csr_banks = []
self._windows = []
self._events = []
self._bus = None
self._irq = None
@property
def bus(self):
"""Wishbone bus interface.
Return value
------------
An instance of :class:`Interface`.
Exceptions
----------
Raises :exn:`NotImplementedError` if the peripheral does not have a Wishbone bus.
"""
if self._bus is None:
raise NotImplementedError("Peripheral {!r} does not have a bus interface"
.format(self))
return self._bus
@bus.setter
def bus(self, bus):
if not isinstance(bus, wishbone.Interface):
raise TypeError("Bus interface must be an instance of wishbone.Interface, not {!r}"
.format(bus))
self._bus = bus
@property
def irq(self):
"""Interrupt request line.
Return value
------------
An instance of :class:`IRQLine`.
Exceptions
----------
Raises :exn:`NotImplementedError` if the peripheral does not have an IRQ line.
"""
if self._irq is None:
raise NotImplementedError("Peripheral {!r} does not have an IRQ line"
.format(self))
return self._irq
@irq.setter
def irq(self, irq):
if not isinstance(irq, IRQLine):
raise TypeError("IRQ line must be an instance of IRQLine, not {!r}"
.format(irq))
self._irq = irq
def csr_bank(self, *, addr=None, alignment=None, desc=None):
"""Request a CSR bank.
Arguments
---------
addr : int or None
Address of the bank. If ``None``, the implicit next address will be used.
Otherwise, the exact specified address (which must be a multiple of
``2 ** max(alignment, bridge_alignment)``) will be used.
alignment : int or None
Alignment of the bank. If not specified, the bridge alignment is used.
See :class:`nmigen_soc.csr.Multiplexer` for details.
desc: (str, optional):
Documentation of the given CSR bank.
Return value
------------
An instance of :class:`CSRBank`.
"""
bank = CSRBank(name_prefix=self.name)
self._csr_banks.append((bank, addr, alignment))
return bank
def window(self, *, addr_width, data_width, granularity=None, features=frozenset(),
alignment=0, addr=None, sparse=None):
"""Request a window to a subordinate bus.
See :meth:`nmigen_soc.wishbone.Decoder.add` for details.
Return value
------------
An instance of :class:`nmigen_soc.wishbone.Interface`.
"""
window = wishbone.Interface(addr_width=addr_width, data_width=data_width,
granularity=granularity, features=features)
granularity_bits = log2_int(data_width // window.granularity)
window.memory_map = MemoryMap(addr_width=addr_width + granularity_bits,
data_width=window.granularity, alignment=alignment)
self._windows.append((window, addr, sparse))
return window
def event(self, *, mode="level", name=None, src_loc_at=0):
"""Request an event source.
See :class:`EventSource` for details.
Return value
------------
An instance of :class:`EventSource`.
"""
event = EventSource(mode=mode, name=name, src_loc_at=1 + src_loc_at)
self._events.append(event)
return event
def bridge(self, *, data_width=8, granularity=None, features=frozenset(), alignment=0):
"""Request a bridge to the resources of the peripheral.
See :class:`PeripheralBridge` for details.
Return value
------------
A :class:`PeripheralBridge` providing access to local resources.
"""
return PeripheralBridge(self, data_width=data_width, granularity=granularity,
features=features, alignment=alignment)
def iter_csr_banks(self):
"""Iterate requested CSR banks and their parameters.
Yield values
------------
A tuple ``bank, addr, alignment`` describing the bank and its parameters.
"""
for bank, addr, alignment in self._csr_banks:
yield bank, addr, alignment
def iter_windows(self):
"""Iterate requested windows and their parameters.
Yield values
------------
A tuple ``window, addr, sparse`` descr
given to :meth:`Peripheral.window`.
"""
for window, addr, sparse in self._windows:
yield window, addr, sparse
def iter_events(self):
"""Iterate requested event sources.
Yield values
------------
An instance of :class:`EventSource`.
"""
for event in self._events:
yield event
class CSRBank:
"""CSR register bank.
Parameters
----------
name_prefix : str
Name prefix of the bank registers.
"""
def __init__(self, *, name_prefix=""):
self._name_prefix = name_prefix
self._csr_regs = []
def csr(self, width, access, *, addr=None, alignment=None, name=None, desc=None,
src_loc_at=0):
"""Request a CSR register.
Parameters
----------
width : int
Width of the register. See :class:`nmigen_soc.csr.Element`.
access : :class:`Access`
Register access mode. See :class:`nmigen_soc.csr.Element`.
addr : int
Address of the register. See :meth:`nmigen_soc.csr.Multiplexer.add`.
alignment : int
Register alignment. See :class:`nmigen_soc.csr.Multiplexer`.
name : str
Name of the register. If ``None`` (default) the name is inferred from the variable
name this register is assigned to.
desc: str
Documentation for the provided register, if available.
Used to capture register documentation automatically.
Return value
------------
An instance of :class:`nmigen_soc.csr.Element`.
"""
if name is not None and not isinstance(name, str):
raise TypeError("Name must be a string, not {!r}".format(name))
name = name or tracer.get_var_name(depth=2 + src_loc_at).lstrip("_")
elem_name = "{}_{}".format(self._name_prefix, name)
elem = csr.Element(width, access, name=elem_name)
self._csr_regs.append((elem, addr, alignment))
return elem
def iter_csr_regs(self):
"""Iterate requested CSR registers and their parameters.
Yield values
------------
A tuple ``elem, addr, alignment`` describing the register and its parameters.
"""
for elem, addr, alignment in self._csr_regs:
yield elem, addr, alignment
class PeripheralBridge(Elaboratable):
"""Peripheral bridge.
A bridge providing access to the registers and windows of a peripheral, and support for
interrupt requests from its event sources.
Event managment is performed by an :class:`InterruptSource` submodule.
Parameters
---------
periph : :class:`Peripheral`
The peripheral whose resources are exposed by this bridge.
data_width : int
Data width. See :class:`nmigen_soc.wishbone.Interface`.
granularity : int or None
Granularity. See :class:`nmigen_soc.wishbone.Interface`.
features : iter(str)
Optional signal set. See :class:`nmigen_soc.wishbone.Interface`.
alignment : int
Resource alignment. See :class:`nmigen_soc.memory.MemoryMap`.
Attributes
----------
bus : :class:`nmigen_soc.wishbone.Interface`
Wishbone bus providing access to the resources of the peripheral.
irq : :class:`IRQLine`, out
Interrupt request. It is raised if any event source is enabled and has a pending
notification.
"""
def __init__(self, periph, *, data_width, granularity, features, alignment):
if not isinstance(periph, Peripheral):
raise TypeError("Peripheral must be an instance of Peripheral, not {!r}"
.format(periph))
self._wb_decoder = wishbone.Decoder(addr_width=1, data_width=data_width,
granularity=granularity,
features=features, alignment=alignment)
self._csr_subs = []
for bank, bank_addr, bank_alignment in periph.iter_csr_banks():
if bank_alignment is None:
bank_alignment = alignment
csr_mux = csr.Multiplexer(addr_width=1, data_width=8, alignment=bank_alignment)
for elem, elem_addr, elem_alignment in bank.iter_csr_regs():
if elem_alignment is None:
elem_alignment = alignment
csr_mux.add(elem, addr=elem_addr, alignment=elem_alignment, extend=True)
csr_bridge = WishboneCSRBridge(csr_mux.bus, data_width=data_width)
self._wb_decoder.add(csr_bridge.wb_bus, addr=bank_addr, extend=True)
self._csr_subs.append((csr_mux, csr_bridge))
for window, window_addr, window_sparse in periph.iter_windows():
self._wb_decoder.add(window, addr=window_addr, sparse=window_sparse, extend=True)
events = list(periph.iter_events())
if len(events) > 0:
self._int_src = InterruptSource(events, name="{}_ev".format(periph.name))
self.irq = self._int_src.irq
csr_mux = csr.Multiplexer(addr_width=1, data_width=8, alignment=alignment)
csr_mux.add(self._int_src.status, extend=True)
csr_mux.add(self._int_src.pending, extend=True)
csr_mux.add(self._int_src.enable, extend=True)
csr_bridge = WishboneCSRBridge(csr_mux.bus, data_width=data_width)
self._wb_decoder.add(csr_bridge.wb_bus, extend=True)
self._csr_subs.append((csr_mux, csr_bridge))
else:
self._int_src = None
self.irq = None
self.bus = self._wb_decoder.bus
def elaborate(self, platform):
m = Module()
for i, (csr_mux, csr_bridge) in enumerate(self._csr_subs):
m.submodules[ "csr_mux_{}".format(i)] = csr_mux
m.submodules["csr_bridge_{}".format(i)] = csr_bridge
if self._int_src is not None:
m.submodules._int_src = self._int_src
m.submodules.wb_decoder = self._wb_decoder
return m
| 34.960422
| 102
| 0.598189
|
985ab442db67c47af5f20c1838feedb6593fb004
| 678
|
py
|
Python
|
sdks/python/test/test_todays_stats.py
|
barryw/bjr
|
de56f22198b34a9d303ee43ac01134b5cf1ce863
|
[
"BSD-3-Clause"
] | 2
|
2020-06-04T03:04:15.000Z
|
2020-06-13T12:53:58.000Z
|
sdks/python/test/test_todays_stats.py
|
barryw/bjr
|
de56f22198b34a9d303ee43ac01134b5cf1ce863
|
[
"BSD-3-Clause"
] | 6
|
2020-05-24T12:56:25.000Z
|
2022-02-26T07:13:17.000Z
|
sdks/python/test/test_todays_stats.py
|
barryw/bjr
|
de56f22198b34a9d303ee43ac01134b5cf1ce863
|
[
"BSD-3-Clause"
] | null | null | null |
"""
BJR API V1
API specification for the BJR job server. # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import bjr4py
from bjr4py.model.todays_stats import TodaysStats
class TestTodaysStats(unittest.TestCase):
"""TodaysStats unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTodaysStats(self):
"""Test TodaysStats"""
# FIXME: construct object with mandatory attributes with example values
# model = TodaysStats() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 18.833333
| 79
| 0.659292
|
68abe8dc33bbbd11fcda951b888d96631084376b
| 13,492
|
py
|
Python
|
bin/commonSubroutines/drawFigure/drawFigure.py
|
Hughes-Genome-Group/CCseqBasicM
|
d806d0cc9a9a3323b2c486124c1d6002408d57e4
|
[
"MIT"
] | 1
|
2020-07-16T11:27:47.000Z
|
2020-07-16T11:27:47.000Z
|
bin/commonSubroutines/drawFigure/drawFigure.py
|
Hughes-Genome-Group/CCseqBasicM
|
d806d0cc9a9a3323b2c486124c1d6002408d57e4
|
[
"MIT"
] | 2
|
2020-01-30T10:53:38.000Z
|
2021-05-23T12:59:40.000Z
|
bin/commonSubroutines/drawFigure/drawFigure.py
|
Hughes-Genome-Group/CCseqBasicM
|
d806d0cc9a9a3323b2c486124c1d6002408d57e4
|
[
"MIT"
] | 1
|
2019-12-02T14:46:09.000Z
|
2019-12-02T14:46:09.000Z
|
##########################################################################
# Copyright 2017, Jelena Telenius (jelena.telenius@imm.ox.ac.uk) #
# #
# This file is part of CCseqBasic5 . #
# #
# CCseqBasic5 is free software: you can redistribute it and/or modify #
# it under the terms of the MIT license.
#
#
# #
# CCseqBasic5 is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# MIT license for more details.
# #
# You should have received a copy of the MIT license
# along with CCseqBasic5.
##########################################################################
# print "Preparing to run - check where we are and which version we have.."
import sys
# print ""
print "We are running in machine :"
print sys.platform
print "We are running in Python version :"
print sys.version
print ""
# print "We can load from these paths :"
# print sys.path
# print "We have these auto-loaded modules"
# print sys.modules
# print "We have these built-ins :"
# print dir(__builtins__)
# print "----------------------------------------"
# print "Run directory :"
import os
# print os.getcwd()
# print "----------------------------------------"
# print "Enabling log file output.."
import syslog
# print "Enabling resource use and run time statistics monitoring.."
import stat
# print "----------------------------------------"
# print "Importing script-specific libraries.."
# print "----------------------------------------"
#
# print "Importing regular expressions"
import re
#
# print "Importing matplotlib"
import matplotlib as mpl
print "We are running matplotlib version :"
print mpl.__version__
#
# print "Available back ends (instead of X windows) :"
# print (mpl.rcsetup.non_interactive_bk)
# print "Now loading the back end : "
# print "mpl.use('pdf')"
mpl.use('pdf')
# print "Importing pyplot "
import matplotlib.pyplot as plt
# print "Importing patches "
import matplotlib.patches as patch
# print "----------------------------------------"
print "Imported (and auto-loaded) modules :"
print(globals())
# print "----------------------------------------"
# print "Reading in the subroutines.."
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, "Reading in the subroutines.."
# Making the comments above the lines..
def writeComment(row) :
layer1.annotate(myComment[row], (2, 88-(10*row))
)
# Subroutines to make RED-GREEN drawing :
# row = from the top, what is the row number for this data ?
def drawTwoColorsFlashed(row) :
print >> sys.stderr, 'Flashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(0, myFlashedPercentages[row][0]), (myFlashedPercentages[row][0], myFlashedPercentages[row][1])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
def drawTwoColorsNonFlashed(row) :
print >> sys.stderr, 'NonFlashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(myPercentages[1][0], myNonFlashedPercentages[row][0]), (myPercentages[1][0]+myNonFlashedPercentages[row][0], myNonFlashedPercentages[row][1])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
# Subroutines to make RED-ORANGE-GREEN drawing :
# row = from the top, what is the row number for this data ?
def drawThreeColorsFlashed(row) :
print >> sys.stderr, 'Flashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(0, myFlashedPercentages[row][0]), (myFlashedPercentages[row][0], myFlashedPercentages[row][1]), (myFlashedPercentages[row][0]+myFlashedPercentages[row][1], myFlashedPercentages[row][2])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
def drawThreeColorsNonFlashed(row) :
print >> sys.stderr, 'NonFlashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(myPercentages[1][0], myNonFlashedPercentages[row][0]), (myPercentages[1][0]+myNonFlashedPercentages[row][0], myNonFlashedPercentages[row][1]), (myPercentages[1][0]+myNonFlashedPercentages[row][0]+myNonFlashedPercentages[row][1], myNonFlashedPercentages[row][2])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
# Subroutines to make GREEN-GREEN drawing :
# row = from the top, what is the row number for this data ?
def drawOneColorFlashed(row) :
print >> sys.stderr, 'Flashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(0, myFlashedPercentages[row])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
def drawOneColorNonFlashed(row) :
print >> sys.stderr, 'NonFlashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(myPercentages[1][0], myNonFlashedPercentages[row])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
# print "----------------------------------------"
# print "Starting the run.."
# print "----------------------------------------"
# print ""
print "Reading the input.."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Reading the input.."
# print >> sys.stderr, ""
names=[]
values = []
colors = []
valuesAsFloats = []
with open('percentages.txt') as f:
for line in f:
data = line.split()
names.append(re.sub(r'_', ' ', data[0]))
values.append(data[1:])
temp = []
for i, value in enumerate(data[1:]):
temp.append(float(value))
valuesAsFloats.append(temp)
# print "names :"
# print names
# print "values :"
# print values
# print "valuesAsFloats :"
# print valuesAsFloats
# print "valuesAsFloats[0] :"
# print valuesAsFloats[0]
# print "valuesAsFloats[1] :"
# print valuesAsFloats[1]
# print "valuesAsFloats[2] :"
# print valuesAsFloats[2]
# print "valuesAsFloats[3] :"
# print valuesAsFloats[3]
# print "valuesAsFloats[4] :"
# print valuesAsFloats[4]
# print "----------------------------------------"
# print ""
print "Setting values.."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Setting values.."
# print >> sys.stderr, ""
# Generating the lists..
myLabel=['0','1','2','3','4','5','6','7','8']
myComment=['0','1','2','3','4','5','6','7','8']
myPercentages=[0,1]
myFlashedPercentages=[0,1,2,3,4,5,6,7,8]
myNonFlashedPercentages=[0,1,2,3,4,5,6,7,8]
myColors=['0',['1','1'],['2','2'],['3','3'],['4','4'],['5','5','5'],['6','6'],['7','7'],['8','8']]
# Default colors (for color blindness support)
#
# PINK GREEN (default)
# RGB HEX
# red 255,74,179 #FF4ABE
# orange 255,140,0 #FF8C00
# green 62,176,145 #3EB091
#
red='#FF4ABE'
orange='#FF8C00'
green='#3EB091'
# Setting the values.. (most of them have four values - those are set here.)
for x in range(2, 9):
if (x != 3 and x !=5 ):
myFlashedPercentages[x]=[valuesAsFloats[x][0],valuesAsFloats[x][1]]
myNonFlashedPercentages[x]=[valuesAsFloats[x][2],valuesAsFloats[x][3]]
myLabel[0]='Total reads (input fastq)'
myComment[0]='Total reads (input fastq)'
myPercentages[0]=valuesAsFloats[0][0]
myColors[0]='blue'
myLabel[1]='Flashed / nonflashed'
myComment[1]='Flash-combined (light blue), non-combined (yellow)'
myPercentages[1]=[valuesAsFloats[1][0],valuesAsFloats[1][1]]
myColors[1]=['dodgerblue','gold']
myLabel[2]='Do/don\'t have RE site'
myComment[2]='With RE site(green), no RE site(red)'
myColors[2]=[green,red]
myLabel[3]='Continue to mapping'
myComment[3]='Continues to mapping :'
myFlashedPercentages[3]=valuesAsFloats[3][0]
myNonFlashedPercentages[3]=valuesAsFloats[3][1]
myColors[3]=green
myLabel[4]='Contains capture'
myComment[4]='cap(green), nocap(red)'
myColors[4]=[green,red]
myLabel[5]='Capture and/or reporter'
# myComment[5]='cap+rep(green), cap+excl(orange), only cap(red)'
myComment[5]='cap+rep(green),only cap(red) - cap+excl also red'
myFlashedPercentages[5]=[valuesAsFloats[5][0],valuesAsFloats[5][1],valuesAsFloats[5][2]]
myNonFlashedPercentages[5]=[valuesAsFloats[5][3],valuesAsFloats[5][4],valuesAsFloats[5][5]]
myColors[5]=[green,orange,red]
myLabel[6]='Multiple (different) captures'
myComment[6]='single cap (green), multicap (red)'
myColors[6]=[green,red]
myLabel[7]='Duplicate filtered'
myComment[7]='non-duplicate (green), duplicate (red)'
myColors[7]=[green,red]
myLabel[8]='Blat/ploidy filtered'
myComment[8]='no-blat-no-ploidy (green), blat and/or ploidy (red)'
myColors[8]=[green,red]
# print >> sys.stderr,"----------------------------------------"
# print >> sys.stderr,""
# print >> sys.stderr,"Checking that the labels are not in wonky order :"
# print >> sys.stderr,""
# for x in range(0, 9):
# print >> sys.stderr,"Label here :", myLabel[x]
# print >> sys.stderr,"Same line in input : ", names[x]
# print >> sys.stderr,""
# for x in range(0, 2):
# print >> sys.stderr,"Label here :", myLabel[x]
# print >> sys.stderr,"myPercentages : ", myPercentages[x]
# print >> sys.stderr,""
# for x in range(2, 9):
# print >> sys.stderr,"Label here :", myLabel[x]
# print >> sys.stderr,"myFlashedPercentages : ", myFlashedPercentages[x]
# print >> sys.stderr,"myNonFlashedPercentages : ", myNonFlashedPercentages[x]
# print "----------------------------------------"
# print ""
print "Drawing axes and tick marks (general overlay).."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Drawing axes and tick marks (general overlay).."
# print >> sys.stderr, ""
# class matplotlib.figure.Figure(figsize=None, dpi=None, facecolor=None, edgecolor=None, linewidth=0.0, frameon=None, subplotpars=None, tight_layout=None)
# matplotlib.pyplot.subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, subplot_kw=None, gridspec_kw=None, **fig_kw)
fig1, layer1 = plt.subplots()
# Set the overall settings here ..
# Grid on (dotted lines)
layer1.grid(True)
# Where (in whole canvas) we want to put our y-range and x-range
# 0,0 is as normally, left hand down.
# Set x-axis to be from 0 to 100
layer1.set_xlim(0, 100)
layer1.set_xticks([ 0,10,20,30,40,50,60,70,80,90,100])
layer1.set_xlabel('Percentage of input reads')
# Set y-axis to be contain all the reads..
layer1.set_ylim(0, 100)
# From bottom up (as the coordinates go that direction) :
# Copy and reverse the list..
myReverseLabels=myLabel[:]
myReverseLabels.reverse()
layer1.set_yticks([5,15,25,35,45,55,65,75,85])
layer1.set_yticklabels(myReverseLabels)
# print "----------------------------------------"
# print ""
print "Drawing boxes and their labels.."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Drawing boxes and their labels.."
# print >> sys.stderr, ""
# matplotlib.pyplot.broken_barh(xranges, yrange, hold=None, data=None, **kwargs)
# Plot horizontal bars.
myFlashedPercentages[1]=myPercentages[1]
layer1.broken_barh(
[(0, myFlashedPercentages[1][0]), (myFlashedPercentages[1][0], myFlashedPercentages[1][1])], # X (start, width)
(0, 75), # Y (start, height)
facecolors=['lightcyan','lemonchiffon'], edgecolor = "none"
)
# Total reads (input fastq)
writeComment(0)
myFlashedPercentages[0]=myPercentages[0]
drawOneColorFlashed(0)
# Flashed / nonflashed
writeComment(1)
myFlashedPercentages[1]=myPercentages[1]
drawTwoColorsFlashed(1)
# Do/don\'t have RE site
writeComment(2)
drawTwoColorsFlashed(2)
drawTwoColorsNonFlashed(2)
# Continue to mapping
writeComment(3)
drawOneColorFlashed(3)
drawOneColorNonFlashed(3)
# Contains capture
writeComment(4)
drawTwoColorsFlashed(4)
drawTwoColorsNonFlashed(4)
# Capture and/or reporter
writeComment(5)
drawThreeColorsFlashed(5)
drawThreeColorsNonFlashed(5)
# Multiple (different) captures
writeComment(6)
drawTwoColorsFlashed(6)
drawTwoColorsNonFlashed(6)
# Duplicate filtered
writeComment(7)
drawTwoColorsFlashed(7)
drawTwoColorsNonFlashed(7)
# Blat/ploidy filtered
writeComment(8)
drawTwoColorsFlashed(8)
drawTwoColorsNonFlashed(8)
# print "----------------------------------------"
# print ""
print "Saving figure.."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Saving figure.."
# print >> sys.stderr, ""
fig1.savefig('summary.pdf', dpi=90, bbox_inches='tight')
fig1.savefig('summary.png', dpi=90, bbox_inches='tight')
| 32.827251
| 303
| 0.599466
|
efb942ddde66af7f8ac63c968c06ff4b0485a45b
| 2,982
|
py
|
Python
|
common/blockchain_util.py
|
anandrgit/snet-marketplace-service
|
22dd66e9e34a65580eaffa70928bbdb1f67061e8
|
[
"MIT"
] | null | null | null |
common/blockchain_util.py
|
anandrgit/snet-marketplace-service
|
22dd66e9e34a65580eaffa70928bbdb1f67061e8
|
[
"MIT"
] | null | null | null |
common/blockchain_util.py
|
anandrgit/snet-marketplace-service
|
22dd66e9e34a65580eaffa70928bbdb1f67061e8
|
[
"MIT"
] | null | null | null |
import json
import uuid
import web3
from eth_account.messages import defunct_hash_message
from web3 import Web3
class BlockChainUtil(object):
def __init__(self, provider_type, provider):
if provider_type == "HTTP_PROVIDER":
self.provider = Web3.HTTPProvider(provider)
elif provider_type == "WS_PROVIDER":
self.provider = web3.providers.WebsocketProvider(provider)
else:
raise Exception("Only HTTP_PROVIDER and WS_PROVIDER provider type are supported.")
self.web3_object = Web3(self.provider)
def load_contract(self, path):
with open(path) as f:
contract = json.load(f)
return contract
def read_contract_address(self, net_id, path, key):
contract = self.load_contract(path)
return Web3.toChecksumAddress(contract[str(net_id)][key])
return nonce
def contract_instance(self, contract, address):
return self.web3_object.eth.contract(abi=contract, address=address)
def generate_signature(self, data_types, values, signer_key):
signer_key = "0x" + signer_key if not signer_key.startswith("0x") else signer_key
message = web3.Web3.soliditySha3(data_types, values)
signature = self.web3_object.eth.account.signHash(defunct_hash_message(message), signer_key)
return signature.signature.hex()
def get_nonce(self, address):
nonce = self.web3_object.eth.getTransactionCount(address)
return nonce
def sign_transaction_with_private_key(self, private_key, transaction_object):
return self.web3_object.eth.account.signTransaction(transaction_object, private_key).rawTransaction
def create_transaction_object(self, *positional_inputs, method_name, address, contract_path, contract_address_path,
net_id):
nonce = self.get_nonce(address=address)
self.contract = self.load_contract(path=contract_path)
self.contract_address = self.read_contract_address(net_id=net_id, path=contract_address_path, key='address')
self.contract_instance = self.contract_instance(contract=self.contract, address=self.contract_address)
print("gas_price == ", self.web3_object.eth.gasPrice)
print("nonce == ", nonce)
transaction_object = getattr(self.contract_instance.functions, method_name)(
*positional_inputs).buildTransaction({
"from": address,
"nonce": nonce,
"gasPrice": self.web3_object.eth.gasPrice,
"chainId": 3
})
return transaction_object
def process_raw_transaction(self, raw_transaction):
return self.web3_object.eth.sendRawTransaction(raw_transaction).hex()
def create_account(self):
account = self.web3_object.eth.account.create(uuid.uuid4().hex)
return account.address, account.privateKey.hex()
def get_current_block_no(self):
return self.web3_object.eth.blockNumber
| 40.849315
| 119
| 0.699866
|
9a55ebabefeaa8b6259a9ccced48feb800dabf86
| 2,085
|
py
|
Python
|
src/__tests__/screenTestRunner.py
|
KapJI/PathPicker
|
3d285f8de9eb43970ae14e0623168b248fbaad6f
|
[
"MIT"
] | null | null | null |
src/__tests__/screenTestRunner.py
|
KapJI/PathPicker
|
3d285f8de9eb43970ae14e0623168b248fbaad6f
|
[
"MIT"
] | null | null | null |
src/__tests__/screenTestRunner.py
|
KapJI/PathPicker
|
3d285f8de9eb43970ae14e0623168b248fbaad6f
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import curses
import sys
import os
sys.path.insert(0, '../')
from keyBindingsForTest import KeyBindingsForTest
from cursesForTest import CursesForTest
from screenForTest import ScreenForTest
from screenFlags import ScreenFlags
import processInput
import choose
INPUT_DIR = './inputs/'
def getLineObjsFromFile(inputFile, validateFileExists, allInput):
inputFile = os.path.join(INPUT_DIR, inputFile)
file = open(inputFile)
lines = file.read().split('\n')
file.close()
return processInput.getLineObjsFromLines(lines,
validateFileExists=validateFileExists,
allInput=allInput)
def getRowsFromScreenRun(
inputFile,
charInputs,
screenConfig={},
printScreen=True,
pastScreen=None,
pastScreens=None,
validateFileExists=False,
allInput=False,
args=[]):
lineObjs = getLineObjsFromFile(inputFile,
validateFileExists=validateFileExists,
allInput=allInput)
screen = ScreenForTest(
charInputs,
maxX=screenConfig.get('maxX', 80),
maxY=screenConfig.get('maxY', 30),
)
# mock our flags with the passed arg list
flags = ScreenFlags.initFromArgs(args)
# we run our program and throw a StopIteration exception
# instead of sys.exit-ing
try:
choose.doProgram(screen, flags, KeyBindingsForTest(),
CursesForTest(), lineObjs)
except StopIteration:
pass
if printScreen:
screen.printOldScreens()
if pastScreen:
return screen.getRowsWithAttributesForPastScreen(pastScreen)
elif pastScreens:
return screen.getRowsWithAttributesForPastScreens(pastScreens)
return screen.getRowsWithAttributes()
| 28.958333
| 83
| 0.658513
|
778ddc2a3716926a289ec11b99a800e4888bc0be
| 4,627
|
py
|
Python
|
dnachisel/builtin_specifications/EnforceSequence.py
|
Edinburgh-Genome-Foundry/DnaChisel
|
fed4613cee67c22326a5f654ecf8ff0490298359
|
[
"MIT"
] | 124
|
2017-11-14T14:42:25.000Z
|
2022-03-31T08:02:07.000Z
|
dnachisel/builtin_specifications/EnforceSequence.py
|
Edinburgh-Genome-Foundry/DnaChisel
|
fed4613cee67c22326a5f654ecf8ff0490298359
|
[
"MIT"
] | 65
|
2017-11-15T07:25:38.000Z
|
2022-01-31T10:38:45.000Z
|
dnachisel/builtin_specifications/EnforceSequence.py
|
Edinburgh-Genome-Foundry/DnaChisel
|
fed4613cee67c22326a5f654ecf8ff0490298359
|
[
"MIT"
] | 31
|
2018-10-18T12:59:47.000Z
|
2022-02-11T16:54:43.000Z
|
"""Implement EnforceSequence (DO NOT USE YET: Work in progress, stabilizing)"""
# TODO: factorize with self.sequence ?
import numpy as np
from ..Specification import Specification, SpecEvaluation
from ..Location import Location
from ..biotools import group_nearby_indices, reverse_complement, IUPAC_NOTATION
class EnforceSequence(Specification):
"""Enforces a (possibly degenerate) sequence at some location.
Shorthand for annotations: "sequence".
Parameters
----------
sequence
An ATGC string representing the wanted sequence, possibly degenerated,
for instance ATTCGCGTYTTKWNAA
location
Location of the DNA segment on which to enforce the pattern e.g.
``Location(10, 45, 1)`` or simply ``(10, 45, 1)``
"""
localization_interval_length = 6 # used when optimizing
best_possible_score = 0
enforced_by_nucleotide_restrictions = True
shorthand_name = "sequence"
def __init__(self, sequence=None, location=None, boost=1.0):
"""Initialize."""
self.sequence = sequence
self.location = Location.from_data(location)
self.boost = boost
def initialized_on_problem(self, problem, role):
"""Find out what sequence it is that we are supposed to conserve."""
return self._copy_with_full_span_if_no_location(problem)
# if self.location is None:
# result = self.copy_with_changes()
# result.location = Location(0, len(problem.sequence), 1)
# return result
# else:
# return self
def evaluate(self, problem):
"""Return a score equal to -number_of modifications.
Locations are "binned" modifications regions. Each bin has a length
in nucleotides equal to ``localization_interval_length`.`
"""
sequence = self.location.extract_sequence(problem.sequence)
discrepancies = np.array(
[
i
for i, nuc in enumerate(sequence)
if nuc not in IUPAC_NOTATION[self.sequence[i]]
]
)
if self.location.strand == -1:
discrepancies = self.location.end - discrepancies
else:
discrepancies = discrepancies + self.location.start
intervals = [
(r[0], r[-1] + 1)
for r in group_nearby_indices(
discrepancies, max_group_spread=self.localization_interval_length
)
]
locations = [Location(start, end, 1) for start, end in intervals]
return SpecEvaluation(
self, problem, score=-len(discrepancies), locations=locations
)
def localized(self, location, problem=None):
"""Localize the spec to the overlap of its location and the new."""
start, end = location.start, location.end
new_location = self.location.overlap_region(location)
if new_location is None:
return None
else:
if self.location.strand == -1:
start = self.location.end - new_location.end
end = self.location.end - new_location.start
else:
start = new_location.start - self.location.start
end = new_location.end - self.location.start
new_sequence = self.sequence[start:end]
return self.copy_with_changes(location=new_location, sequence=new_sequence)
def restrict_nucleotides(self, sequence, location=None):
"""When localizing, forbid any nucleotide but the one already there."""
if location is not None:
new_location = self.location.overlap_region(location)
if new_location is None:
return []
else:
new_location = self.location
start, end = new_location.start, new_location.end
if self.location.strand == -1:
lend = self.location.end
return [
(
i,
set(
reverse_complement(n)
for n in IUPAC_NOTATION[self.sequence[lend - i - 1]]
),
)
for i in range(start, end)
]
else:
lstart = self.location.start
return [
(i, IUPAC_NOTATION[self.sequence[i - lstart]])
for i in range(start, end)
]
def __repr__(self):
"""Represent."""
return "EnforceSequence(%s)" % str(self.location)
def __str__(self):
"""Represent."""
return "EnforceSequence(%s)" % str(self.location)
| 34.529851
| 87
| 0.594554
|
76edad2f5784d5c31f732f9e070b729dbbae3780
| 12,177
|
py
|
Python
|
tfx/orchestration/portable/mlmd/execution_lib_test.py
|
Anon-Artist/tfx
|
2692c9ab437d76b5d9517996bfe2596862e0791d
|
[
"Apache-2.0"
] | 2
|
2021-05-10T21:39:48.000Z
|
2021-11-17T11:24:29.000Z
|
tfx/orchestration/portable/mlmd/execution_lib_test.py
|
Anon-Artist/tfx
|
2692c9ab437d76b5d9517996bfe2596862e0791d
|
[
"Apache-2.0"
] | 1
|
2021-01-28T13:44:51.000Z
|
2021-04-28T16:15:47.000Z
|
tfx/orchestration/portable/mlmd/execution_lib_test.py
|
Anon-Artist/tfx
|
2692c9ab437d76b5d9517996bfe2596862e0791d
|
[
"Apache-2.0"
] | 1
|
2021-01-28T13:41:51.000Z
|
2021-01-28T13:41:51.000Z
|
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.portable.mlmd.execution_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import random
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import standard_artifacts
from tfx.utils import test_case_utils
from google.protobuf import text_format
from ml_metadata.proto import metadata_store_pb2
class ExecutionLibTest(test_case_utils.TfxTest):
def setUp(self):
super().setUp()
self._connection_config = metadata_store_pb2.ConnectionConfig()
self._connection_config.sqlite.SetInParent()
def _generate_contexts(self, metadata_handler):
context_spec = pipeline_pb2.NodeContexts()
text_format.Parse(
"""
contexts {
type {name: 'pipeline_context'}
name {
field_value {string_value: 'my_pipeline'}
}
}
contexts {
type {name: 'component_context'}
name {
field_value {string_value: 'my_component'}
}
}""", context_spec)
return context_lib.prepare_contexts(metadata_handler, context_spec)
def testPrepareExecution(self):
with metadata.Metadata(connection_config=self._connection_config) as m:
execution_type = metadata_store_pb2.ExecutionType()
text_format.Parse(
"""
name: 'my_execution'
properties {
key: 'p2'
value: STRING
}
""", execution_type)
result = execution_lib.prepare_execution(
m,
execution_type,
exec_properties={
'p1': 1,
'p2': '2'
},
state=metadata_store_pb2.Execution.COMPLETE)
self.assertProtoEquals(
"""
type_id: 1
last_known_state: COMPLETE
properties {
key: 'p2'
value {
string_value: '2'
}
}
custom_properties {
key: 'p1'
value {
int_value: 1
}
}
""", result)
def testArtifactAndEventPairs(self):
example = standard_artifacts.Examples()
example.uri = 'example'
example.id = 1
expected_artifact = metadata_store_pb2.Artifact()
text_format.Parse(
"""
id: 1
type_id: 1
uri: 'example'""", expected_artifact)
expected_event = metadata_store_pb2.Event()
text_format.Parse(
"""
path {
steps {
key: 'example'
}
steps {
index: 0
}
}
type: INPUT""", expected_event)
with metadata.Metadata(connection_config=self._connection_config) as m:
result = execution_lib._create_artifact_and_event_pairs(
m, {
'example': [example],
}, metadata_store_pb2.Event.INPUT)
self.assertCountEqual([(expected_artifact, expected_event)], result)
def testPutExecutionGraph(self):
with metadata.Metadata(connection_config=self._connection_config) as m:
# Prepares an input artifact. The artifact should be registered in MLMD
# before the put_execution call.
input_example = standard_artifacts.Examples()
input_example.uri = 'example'
input_example.type_id = common_utils.register_type_if_not_exist(
m, input_example.artifact_type).id
[input_example.id] = m.store.put_artifacts([input_example.mlmd_artifact])
# Prepares an output artifact.
output_model = standard_artifacts.Model()
output_model.uri = 'model'
execution = execution_lib.prepare_execution(
m,
metadata_store_pb2.ExecutionType(name='my_execution_type'),
exec_properties={
'p1': 1,
'p2': '2'
},
state=metadata_store_pb2.Execution.COMPLETE)
contexts = self._generate_contexts(m)
execution = execution_lib.put_execution(
m,
execution,
contexts,
input_artifacts={'example': [input_example]},
output_artifacts={'model': [output_model]})
self.assertProtoPartiallyEquals(
output_model.mlmd_artifact,
m.store.get_artifacts_by_id([output_model.id])[0],
ignored_fields=[
'create_time_since_epoch', 'last_update_time_since_epoch'
])
# Verifies edges between artifacts and execution.
[input_event] = m.store.get_events_by_artifact_ids([input_example.id])
self.assertEqual(input_event.execution_id, execution.id)
self.assertEqual(input_event.type, metadata_store_pb2.Event.INPUT)
[output_event] = m.store.get_events_by_artifact_ids([output_model.id])
self.assertEqual(output_event.execution_id, execution.id)
self.assertEqual(output_event.type, metadata_store_pb2.Event.OUTPUT)
# Verifies edges connecting contexts and {artifacts, execution}.
context_ids = [context.id for context in contexts]
self.assertCountEqual(
[c.id for c in m.store.get_contexts_by_artifact(input_example.id)],
context_ids)
self.assertCountEqual(
[c.id for c in m.store.get_contexts_by_artifact(output_model.id)],
context_ids)
self.assertCountEqual(
[c.id for c in m.store.get_contexts_by_execution(execution.id)],
context_ids)
def testGetExecutionsAssociatedWithAllContexts(self):
with metadata.Metadata(connection_config=self._connection_config) as m:
contexts = self._generate_contexts(m)
self.assertLen(contexts, 2)
# Create 2 executions and associate with one context each.
execution1 = execution_lib.prepare_execution(
m, metadata_store_pb2.ExecutionType(name='my_execution_type'),
metadata_store_pb2.Execution.RUNNING)
execution1 = execution_lib.put_execution(m, execution1, [contexts[0]])
execution2 = execution_lib.prepare_execution(
m, metadata_store_pb2.ExecutionType(name='my_execution_type'),
metadata_store_pb2.Execution.COMPLETE)
execution2 = execution_lib.put_execution(m, execution2, [contexts[1]])
# Create another execution and associate with both contexts.
execution3 = execution_lib.prepare_execution(
m, metadata_store_pb2.ExecutionType(name='my_execution_type'),
metadata_store_pb2.Execution.NEW)
execution3 = execution_lib.put_execution(m, execution3, contexts)
# Verify that the right executions are returned.
with self.subTest(for_contexts=(0,)):
executions = execution_lib.get_executions_associated_with_all_contexts(
m, [contexts[0]])
self.assertCountEqual([execution1.id, execution3.id],
[e.id for e in executions])
with self.subTest(for_contexts=(1,)):
executions = execution_lib.get_executions_associated_with_all_contexts(
m, [contexts[1]])
self.assertCountEqual([execution2.id, execution3.id],
[e.id for e in executions])
with self.subTest(for_contexts=(0, 1)):
executions = execution_lib.get_executions_associated_with_all_contexts(
m, contexts)
self.assertCountEqual([execution3.id], [e.id for e in executions])
def testGetArtifactIdsForExecutionIdGroupedByEventType(self):
with metadata.Metadata(connection_config=self._connection_config) as m:
# Register an input and output artifacts in MLMD.
input_example = standard_artifacts.Examples()
input_example.uri = 'example'
input_example.type_id = common_utils.register_type_if_not_exist(
m, input_example.artifact_type).id
output_model = standard_artifacts.Model()
output_model.uri = 'model'
output_model.type_id = common_utils.register_type_if_not_exist(
m, output_model.artifact_type).id
[input_example.id, output_model.id] = m.store.put_artifacts(
[input_example.mlmd_artifact, output_model.mlmd_artifact])
execution = execution_lib.prepare_execution(
m,
metadata_store_pb2.ExecutionType(name='my_execution_type'),
exec_properties={
'p1': 1,
'p2': '2'
},
state=metadata_store_pb2.Execution.COMPLETE)
contexts = self._generate_contexts(m)
execution = execution_lib.put_execution(
m,
execution,
contexts,
input_artifacts={'example': [input_example]},
output_artifacts={'model': [output_model]})
artifact_ids_by_event_type = (
execution_lib.get_artifact_ids_by_event_type_for_execution_id(
m, execution.id))
self.assertDictEqual(
{
metadata_store_pb2.Event.INPUT: set([input_example.id]),
metadata_store_pb2.Event.OUTPUT: set([output_model.id]),
}, artifact_ids_by_event_type)
def testGetArtifactsDict(self):
with metadata.Metadata(connection_config=self._connection_config) as m:
# Create and shuffle a few artifacts. The shuffled order should be
# retained in the output of `execution_lib.get_artifacts_dict`.
input_examples = []
for i in range(10):
input_example = standard_artifacts.Examples()
input_example.uri = 'example{}'.format(i)
input_example.type_id = common_utils.register_type_if_not_exist(
m, input_example.artifact_type).id
input_examples.append(input_example)
random.shuffle(input_examples)
output_models = []
for i in range(8):
output_model = standard_artifacts.Model()
output_model.uri = 'model{}'.format(i)
output_model.type_id = common_utils.register_type_if_not_exist(
m, output_model.artifact_type).id
output_models.append(output_model)
random.shuffle(output_models)
m.store.put_artifacts([
a.mlmd_artifact
for a in itertools.chain(input_examples, output_models)
])
execution = execution_lib.prepare_execution(
m,
metadata_store_pb2.ExecutionType(name='my_execution_type'),
state=metadata_store_pb2.Execution.RUNNING)
contexts = self._generate_contexts(m)
input_artifacts_dict = {'examples': input_examples}
output_artifacts_dict = {'model': output_models}
execution = execution_lib.put_execution(
m,
execution,
contexts,
input_artifacts=input_artifacts_dict,
output_artifacts=output_artifacts_dict)
# Verify that the same artifacts are returned in the correct order.
artifacts_dict = execution_lib.get_artifacts_dict(
m, execution.id, metadata_store_pb2.Event.INPUT)
self.assertCountEqual(['examples'], list(artifacts_dict.keys()))
self.assertEqual([ex.uri for ex in input_examples],
[a.uri for a in artifacts_dict['examples']])
artifacts_dict = execution_lib.get_artifacts_dict(
m, execution.id, metadata_store_pb2.Event.OUTPUT)
self.assertCountEqual(['model'], list(artifacts_dict.keys()))
self.assertEqual([model.uri for model in output_models],
[a.uri for a in artifacts_dict['model']])
if __name__ == '__main__':
tf.test.main()
| 38.904153
| 79
| 0.668063
|
e0d5ac2bb5c31874e96840e3207bc1df07ae272c
| 428
|
py
|
Python
|
testsuite/closure-array/run.py
|
LongerVision/OpenShadingLanguage
|
30d2a4a089c5c9d521b27519329c205763dfe483
|
[
"BSD-3-Clause"
] | 1,105
|
2015-01-02T20:47:19.000Z
|
2021-01-25T13:20:56.000Z
|
testsuite/closure-array/run.py
|
LongerVision/OpenShadingLanguage
|
30d2a4a089c5c9d521b27519329c205763dfe483
|
[
"BSD-3-Clause"
] | 696
|
2015-01-07T23:42:08.000Z
|
2021-01-25T03:55:08.000Z
|
testsuite/closure-array/run.py
|
LongerVision/OpenShadingLanguage
|
30d2a4a089c5c9d521b27519329c205763dfe483
|
[
"BSD-3-Clause"
] | 248
|
2015-01-05T13:41:28.000Z
|
2021-01-24T23:29:55.000Z
|
#!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
command += testshade ("-layer alayer a -layer dlayer d --layer clayer c --layer blayer b --connect alayer output_closure clayer in0 --connect dlayer output_closure clayer in1 --connect clayer output_closures blayer input_closures ")
| 53.5
| 233
| 0.794393
|
915a47c7531dd8dfdbdf5bfb6ae18e6870d9038d
| 1,186
|
py
|
Python
|
oo/pessoa.py
|
AdemilsonMelo/pythonbirds
|
f29ea58ad8acb2514b4e6196afd6b268f2c334f9
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
AdemilsonMelo/pythonbirds
|
f29ea58ad8acb2514b4e6196afd6b268f2c334f9
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
AdemilsonMelo/pythonbirds
|
f29ea58ad8acb2514b4e6196afd6b268f2c334f9
|
[
"MIT"
] | null | null | null |
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome = None, idade = 35):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá {id(self)}'
@staticmethod
def metodo_estatico():
return 42
@classmethod
def nome_e_atributos_de_classes(cls):
return f'{cls} - olhos {cls.olhos}'
if __name__ == '__main__':
renzo = Pessoa(nome ='Renzo')
luciano = Pessoa(renzo, nome='Luciano')
print(Pessoa.cumprimentar(luciano))
print(id(luciano))
print(luciano.cumprimentar())
print(luciano.nome)
print(luciano.idade)
for filho in luciano.filhos:
print(filho.nome)
luciano.sobrenome = 'Ramalho'
del luciano.filhos
luciano.olhos = 1
del luciano.olhos
print(renzo.__dict__)
print(luciano.__dict__)
Pessoa.olhos = 3
print(Pessoa.olhos)
print(luciano.olhos)
print(renzo.olhos)
print(id(Pessoa.olhos), id(luciano.olhos), id(renzo.olhos))
print(Pessoa.metodo_estatico(), luciano.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classes(), luciano.nome_e_atributos_de_classes())
| 26.954545
| 86
| 0.651771
|
ecaeac1c9b9266d71a7bfac0ec5b19382960a73d
| 472
|
py
|
Python
|
trac/versioncontrol/tests/__init__.py
|
wiraqutra/photrackjp
|
e120cba2a5d5d30f99ad084c6521e61f09694ee6
|
[
"BSD-3-Clause"
] | null | null | null |
trac/versioncontrol/tests/__init__.py
|
wiraqutra/photrackjp
|
e120cba2a5d5d30f99ad084c6521e61f09694ee6
|
[
"BSD-3-Clause"
] | null | null | null |
trac/versioncontrol/tests/__init__.py
|
wiraqutra/photrackjp
|
e120cba2a5d5d30f99ad084c6521e61f09694ee6
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from trac.versioncontrol.tests import cache, diff, svn_authz, svn_fs, api
from trac.versioncontrol.tests.functional import functionalSuite
def suite():
suite = unittest.TestSuite()
suite.addTest(cache.suite())
suite.addTest(diff.suite())
suite.addTest(svn_authz.suite())
suite.addTest(svn_fs.suite())
suite.addTest(api.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 26.222222
| 74
| 0.701271
|
ded89d087e25d9e2db8450406731cc12139287d8
| 10,428
|
py
|
Python
|
src/project_util.py
|
rrozema12/Data-Mining-Final-Project
|
4848f3daed4b75879b626c5dc460e8dbd70ae861
|
[
"MIT"
] | 1
|
2018-02-04T01:10:20.000Z
|
2018-02-04T01:10:20.000Z
|
src/project_util.py
|
rrozema12/Data-Mining-Final-Project
|
4848f3daed4b75879b626c5dc460e8dbd70ae861
|
[
"MIT"
] | null | null | null |
src/project_util.py
|
rrozema12/Data-Mining-Final-Project
|
4848f3daed4b75879b626c5dc460e8dbd70ae861
|
[
"MIT"
] | null | null | null |
import util
import constants
import table_utils
def print_confusion_matrix(labels, class_label_name):
""" Prints the confusion matrix of the given labels
:param labels: A list of tuples of class labels [(actual, predicted),...]
:param class_label_name: The name of the class label
"""
class_labels = list(set(table_utils.getCol(labels, 0))) # all the actual class labels
the_headers = [class_label_name]
the_headers.extend(class_labels)
the_headers.extend(['Total', 'Recognition (%)'])
# makes an table filled with zeros of #columns = len(the_headers) and #rows = len(class_labels)
confusion_matrix = [[0] * len(the_headers) for i in range(len(class_labels))]
# fills out the confusion matrix with the predicted vs. actual
for a_label_point in labels:
actual, predicted = a_label_point
confusion_matrix[class_labels.index(actual)][the_headers.index(predicted)] += 1
# add the rest of the values to the confusion matrix
for i in range(len(confusion_matrix)):
row = confusion_matrix[i] # current row
# adding total to the confusion matrix
total = sum(row)
row[the_headers.index('Total')] = total # add the total in for the row
row[0]= class_labels[i] # adds the class label for the row to the beginning of row
# adding recognition to the confusion matrix (% of guesses in row that are correct
recognition = row[the_headers.index(class_labels[i])] # TP
recognition /= float(total)
recognition *= 100
row[the_headers.index('Recognition (%)')] = recognition
# prints the table
print tabulate(confusion_matrix, headers = the_headers, tablefmt="rst")
def stratified_cross_fold(table, k, class_index, classify, *opts):
"""
Uses stratified crossfolding to predict labels
:param table: Table of data
:param k: Number of folds
:param class_index: the class's index
:param classify: a function to classify on
:param opts: anything else you'd like to pass into the classify
:return: labels in format list of tuples [(actual, predicted),...]
"""
labels = []
folds = strat_folds(table, class_index, k)
for index in range(len(folds)):
test = folds[index] # creates the test from one fold
training = []
# meshes the rest of the folds together to make the training set
for training_index in range(len(folds)):
if training_index != index:
training.extend(folds[training_index])
labels.extend(classify(training, test, class_index, *opts))
return labels
def strat_folds(table, by_label_index, k):
"""
Creates fold where each fold has the same distrubution of class labels as the origonal table
:param table: table of data
:param by_label_index: the class label index
:param k: the number of partitions to create
:return: a list of tables where each table is a folds, i.e.: [[P1],[P2],..., [Pnum]] where each P is a table
"""
labels_to_rows = {}
# spreads the data out into a dictionary where the key is the class label and the data is a table consisting of
# rows with that class label {class_label:rows_with_class_label
for row in table:
label = row[by_label_index]
try:
labels_to_rows[label].append(row)
except KeyError:
labels_to_rows[label] = [row]
# creates folds by evenly distributing the rows of each class label to the number of partitions
folds = {}
index = 0
for key, table in labels_to_rows.iteritems():
for row in table:
try:
folds[index].append(row)
except KeyError:
folds[index] = [row]
index += 1
if index > k:
index = 0
return util.dictionaryToArray(folds)
def getAgeRating(x):
""" Returns the dept of energy number rating"""
# Gets the range values for the DOE rankings
keys = util.getValues(constants.AGE_RATINGS)
# Gets the left-end of the range x belongs in
lowRange = util.getLowRange(keys, x)
# Flips the dictionary, so we can query by value
byValue = util.flipKeyValues(constants.AGE_RATINGS)
return byValue[lowRange]
def getNHTSASize(x):
""" Returns the NHTSA Vehicle size """
# Gets the range values for the DOE rankings
keys = util.getValues(constants.NHTSA)
# Gets the left-end of the range x belongs in
lowRange = util.getLowRange(keys, x)
# Flips the dictionary, so we can query by value
byValue = util.flipKeyValues(constants.NHTSA)
return byValue[lowRange]
def convertRowIntoIndexValuePairs(row):
""" Converts [x, y, z, ...] into [(0, x), (1, y), (2, z)]
for use in the classifiers in their "where" statements
"""
return [ (index, value) for index, value in enumerate(row)]
def getNamedTuples(row, names):
""" Gets a bunch of tuples by their name
Ex: getNamedColsFromRow(row, 'mpg', 'origin', 'weight')
might return [(0, 18.0), (4, 3504), (7, 1)]
WARNING: These don't necessarily return in any specific order.
"""
tuples = []
namesIndexes = [constants.INDICES[name] for name in names]
for index, value in enumerate(row):
if index in namesIndexes:
tuples.append((index, value))
return tuples
def get_survived_value(x):
""" returns the int value for the nominal value survived
:param x: a value that is either 'yes' or 'no'
:return: returns 1 if x is yes, or 0 if x is no
"""
if x == 'yes':
return 1
else:
return 0
def get_sex_value(x):
""" returns the int value for the nominal value sex
:param x: a value that is either 'male' or 'female'
:return: returns 1 if x is male, or 0 if x is female
"""
if x == 'male':
return 1
else:
return 0
def get_age_value(x):
""" returns the int value for the nominal value age
:param x: a value that is either 'adult' or 'child'
:return: returns 1 if x is adult, or 0 if x is fchild
"""
if x == 'adult':
return 1
else:
return 0
def get_class_value(x):
""" returns the int value for the ordinal value class
:param x: a value that is either 'crew', 'first', 'second', or 'third'
:return: returns 3 if 'crew', 2 if first, etc.
"""
if x == 'crew':
return 3
elif x == 'first':
return 2
elif x == 'second':
return 1
else:
return 0
"""NEW FINAL PROJECT MAPPING STUFF"""
def get_age(x):
if age > 90:
return 1
elif age <= 90 and age > 70:
return 2
elif age <= 70 and age > 50:
return 2
else:
return 0
def get_job_type(x):
""" returns the int value for the nominal value survived
:param x: a value that is either 'yes' or 'no'
:return: returns 1 if x is yes, or 0 if x is no
"""
if x == 'Government':
return 1
elif x == 'Private':
return 2
elif x == 'Self-employed':
return 3
else:
return 0
def get_degree(x):
""" returns the int value for the nominal value sex
:param x: a value that is either 'male' or 'female'
:return: returns 1 if x is male, or 0 if x is female
"""
if x == 'HS':
return 1
elif x == 'Bachelors':
return 2
elif x == 'Masters':
return 3
elif x == 'Doctorate':
return 4
elif x == 'College-drop-out':
return 5
elif x == 'Associate':
return 6
elif x == 'Middleschool':
return 7
elif x == 'Elementary':
return 8
elif x == 'Prof-school':
return 9
else:
return 0
def get_marital_status(x):
""" returns the int value for the nominal value age
:param x: a value that is either 'adult' or 'child'
:return: returns 1 if x is adult, or 0 if x is fchild
"""
if x == 'Never-married':
return 1
elif x == 'Married-civ-spouse':
return 2
elif x == 'Divorced':
return 3
elif x == 'Married-spouse-absent':
return 4
elif x == 'Widowed':
return 5
elif x == 'Separated':
return 6
elif x == 'Married-AF-spouse':
return 7
else:
return 0
def get_ethnicity(x):
""" returns the int value for the ordinal value class
:param x: a value that is either 'crew', 'first', 'second', or 'third'
:return: returns 3 if 'crew', 2 if first, etc.
"""
if x == 'White':
return 1
elif x == 'Black':
return 2
elif x == 'Amer-Indian-Eskimo':
return 3
elif x == 'Asian-Pac-Islander':
return 4
else:
return 0
def get_gender(x):
""" returns the int value for the ordinal value class
:param x: a value that is either 'crew', 'first', 'second', or 'third'
:return: returns 3 if 'crew', 2 if first, etc.
"""
if x == 'Male':
return 1
else:
return 0
def get_country(x):
""" returns the int value for the ordinal value class
:param x: a value that is either 'crew', 'first', 'second', or 'third'
:return: returns 3 if 'crew', 2 if first, etc.
"""
if x == 'United-States':
return 1
elif x == 'Philippines':
return 2
elif x == 'Puerto-Rico':
return 3
elif x == 'Mexico':
return 4
elif x == 'Dominican-Republic':
return 5
elif x == 'Portugal':
return 6
elif x == 'Canada':
return 7
elif x == 'Taiwan':
return 8
elif x == 'Cuba':
return 9
elif x == 'Jamaica':
return 10
else:
return 0
def get_salary(x):
""" returns the int value for the ordinal value class
:param x: a value that is either 'crew', 'first', 'second', or 'third'
:return: returns 3 if 'crew', 2 if first, etc.
"""
if x == '>50K':
return '1'
else:
return '0'
def get_salary_box(x):
""" returns the int value for the ordinal value class
:param x: a value that is either 'crew', 'first', 'second', or 'third'
:return: returns 3 if 'crew', 2 if first, etc.
"""
if x == '>50K':
return 1
else:
return 0
if __name__ == '__main__':
row = [18.0,8,307.0,130.0,3504,12.0,70,1,"chevrolet chevelle malibu",2881]
print getNamedTuples(row, 'mpg', 'origin', 'weight')
| 28.569863
| 115
| 0.60654
|
aecdde1dfba894df3dabb3922b29c6b230180dca
| 19,924
|
py
|
Python
|
abstract_metal_wires/abstract_metal_wires.py
|
qbilius/autoart
|
e82f0b28848648600e3e61d188231edd26f215b0
|
[
"MIT"
] | 2
|
2022-03-10T04:12:42.000Z
|
2022-03-12T02:44:32.000Z
|
abstract_metal_wires/abstract_metal_wires.py
|
qbilius/autoart
|
e82f0b28848648600e3e61d188231edd26f215b0
|
[
"MIT"
] | null | null | null |
abstract_metal_wires/abstract_metal_wires.py
|
qbilius/autoart
|
e82f0b28848648600e3e61d188231edd26f215b0
|
[
"MIT"
] | null | null | null |
import sys
import psychopy
from psychopy import visual, core
import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
import matplotlib as mpl
from PIL import Image
# import hmax
class Filters(object):
def gabor(self,
theta=0,
gamma=1,
sigma=2,
lam=5.6,
k=10
):
# g = np.exp(-np.pi*((alpha*x)**2+(beta*y)**2) ) * np.exp(-2*np.pi*np.i(u*x+v*y))
# g = mu**2/sigma**2 * np.exp( - mu**2 (x**2+y**2) / (2*sigma**2) ) * np.exp(np.i*mu*(x*np.cos(theta)+y*np.sin(theta)))
# Mutch and Lowe, 2006
theta -= np.pi/2
x,y = np.meshgrid(np.arange(-k,k),np.arange(-k,k))
X = x*np.cos(theta) - y*np.sin(theta)
Y = x*np.sin(theta) + y*np.cos(theta)
g = np.exp( - (X**2 + (gamma*Y)**2) / (2*sigma**2) ) * np.cos( 2*np.pi*X/lam )
g -= np.mean(g) # mean 0
g /= np.sum(g**2) # energy 1
return g
def gabor_pyramid(self):
pass
def second_gaussian(self):
pass
def gabor_circle(self,
r = 40,
k = 10,
#n = 36,
#theta=0,
gamma=1,
sigma=2,
lam=5.6,
):
oris = 2*np.pi/r*np.arange(r)
k += r
circle = np.zeros((2*k,2*k))
for ori in oris:
theta = ori - np.pi/2
x,y = np.meshgrid(np.arange(-k,k),np.arange(-k,k))
x -= -r*np.cos(theta)
y -= r*np.sin(theta)
X = x*np.cos(theta) - y*np.sin(theta)
Y = x*np.sin(theta) + y*np.cos(theta)
g = np.exp( - (X**2 + (gamma*Y)**2) / (2*sigma**2) ) * np.cos( 2*np.pi*X/lam )
g -= np.mean(g) # mean 0
g /= np.sum(g**2) # energy 1
circle += g
#
#circle[circle.shape[0]/2-4:circle.shape[0]/2+4,
#circle.shape[1]/2-4:circle.shape[1]/2+4] = 4*np.max(circle)
#import pdb; pdb.set_trace()
return circle
def plot(self):
circle = self.gabor_circle()
plt.imshow(circle,cmap=mpl.cm.gray)
plt.show()
sys.exit()
class association_field(object):
def naive(self,
o1,
o2,
a=.6 # from Sigman et al., 2001
):
r = np.linalg.norm(o1,o2)
theta1 = np.arctan2(o1[1],o1[0])
theta2 = np.arctan2(o2[1],o2[0])
optimal_angle = 2*theta1-phi
# penalty for suboptimal angle times penalty for distance
w = np.cos(2*(optimal_angle - theta2)) * r**(-a)
return w
def watts(self,
e1, # one gabor (x,y) position and orientation (in degrees)
e2, # the rest of the field
ds = .1, # size of the Gaussian envelope (overall size))
cs = np.pi/9, # curvature sensitivity
ts = np.pi/18, # tolerance for non-circularity (larger values mean that cocircularity is more ignored)
method = np.sum # how to calculate the final weight; could also be np.max
):
# R = np.array([np.cos(-t0), -sin ])
x, y, theta = e2-e1
xx = x*np.cos(-t0) - y*np.sin(-t0)
yy = y*np.cos(-t0) + x*np.sin(-t0)
dd = (xx**2+yy**2)/ds**2
curvature = yy/(dd*ds)
spatial_weight = np.exp(-dd) * np.exp(-(curvature/cs)**2 / 2 )
theta_optimal = 2*np.arctan2(yy,xx) - ei[2] # ei[2] this was not present in the original
# presumably ei[2]=0 in those simulations
theta_difference = theta_optimal-ej[2] # instead of subtrating theta
if theta_difference > np.pi/2 or theta_difference < -np.pi/2:
theta_difference = np.pi - np.abs(theta_difference)
a=exp(-((theta_difference/ts)**2)/2);
weight = method(spatial_weight*a)
return weight
def ernst_orig(self,
ei,
ej,
r0 = 1.16/4,
sigma_a = .27, # tolerance to co-circularity; chosen to be optimal from doi:10.1371/journal.pcbi.1002520.g006
sigma_b = .47, # curvature; chosen to be optimal from doi:10.1371/journal.pcbi.1002520.g007 assuming a typo in reporting .57
):
x, y, beta = ej-ei # angle between the two elements
r = np.linalg.norm([x,y])
alpha = np.arctan2(y,x)-ei[2] # angle of a line connecting the two centers (minus ei orientation)
# beta = ej[2] - ei[2]
Ad = np.exp(-r/r0)
At = np.cosh( np.cos(beta/2-alpha)/sigma_a**2 + 4*np.cos(beta/2)/sigma_b**2 )
A = Ad*At
# plt.figure()
# plt.imshow(Ad)
# plt.figure()
# plt.imshow(At)
# plt.show()
#import pdb; pdb.set_trace()
# K = 2*np.sin(beta/2)/r
return A
def ernst(self,
shape=(40,40),
beta = 0, # orientation
r0 = 11.6/4,
sigma_a = .27, # tolerance to co-circularity; chosen to be optimal from doi:10.1371/journal.pcbi.1002520.g006
sigma_b = .47, # curvature; chosen to be optimal from doi:10.1371/journal.pcbi.1002520.g007 assuming a typo in reporting .57
):
x,y=np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2))
# x, y, beta = ej-ei # angle between the two elements
r = np.sqrt(x**2+y**2)
alpha = np.arctan2(y,x) # angle of a line connecting the two centers
Ad = np.exp(-r/r0)
At = np.cosh( -np.cos(beta/2-alpha)/sigma_a**2 + 4*np.cos(beta/2)/sigma_b**2 )+\
np.cosh( np.cos(beta/2-alpha)/sigma_a**2 + 4*np.cos(beta/2)/sigma_b**2 )
A = Ad*At
A = A/np.sum(A)
return A-np.mean(A)
def ernst_half(self,
shape=(40,40),
beta = 0, # orientation
r0 = 11.6/4,
sigma_a = .27, # tolerance to co-circularity; chosen to be optimal from doi:10.1371/journal.pcbi.1002520.g006
sigma_b = .47, # curvature; chosen to be optimal from doi:10.1371/journal.pcbi.1002520.g007 assuming a typo in reporting .57
curvature = 'convex', # 'convex' or 'concave'
):
x,y=np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2))
# x, y, beta = ej-ei # angle between the two elements
r = np.sqrt(x**2+y**2)
alpha = np.arctan2(y,x) # angle of a line connecting the two centers
Ad = np.exp(-r/r0)
At = np.cosh( -np.cos(beta/2-alpha)/sigma_a**2 + 4*np.cos(beta/2)/sigma_b**2 )+\
np.cosh( np.cos(beta/2-alpha)/sigma_a**2 + 4*np.cos(beta/2)/sigma_b**2 )
A = Ad*At
if curvature == 'convex':
A[:A.shape[0]/2] = 0
else:
A[A.shape[0]/2:] = 0
return A/np.sum(A)
def ernst_trans(self,
shape=(100,100),
#size = (120,120),
beta = 0, # orientation
r0 = 11.6/4,
sigma_a = .27, # tolerance to co-circularity; chosen to be optimal from doi:10.1371/journal.pcbi.1002520.g006
sigma_b = .47, # curvature; chosen to be optimal from doi:10.1371/journal.pcbi.1002520.g007 assuming a typo in reporting .57
d = 10,
):
x,y=np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2))
# x, y, beta = ej-ei # angle between the two elements
shift_x = d*np.sin(beta/2)
shift_y = -d*np.cos(beta/2)
A = np.zeros(shape)
for ind in range(-shape[0]/2/d,shape[0]/2/d):
xn = x-ind*shift_x
yn = y-ind*shift_y
r = np.sqrt(xn**2+yn**2)
alpha = np.arctan2(yn,xn) # angle of a line connecting the two centers
Ad = np.exp(-r/r0)
At = np.cosh( -np.cos(beta/2-alpha)/sigma_a**2 + 4*np.cos(beta/2)/sigma_b**2 )+\
np.cosh( np.cos(beta/2-alpha)/sigma_a**2 + 4*np.cos(beta/2)/sigma_b**2 )
A += Ad*At
A = A/np.sum(A)
A -= np.mean(A)
return A
def plot(self, filter_name=None):
assoc = self.ernst_trans(beta=-2*np.pi/2)
plt.imshow(assoc)
plt.show()
sys.exit()
class Model(object):
def __init__(self):
pass
def reSample(self,w):
#RESAMPLE Residual Resampling (Liu et al)
# IX = reSample(w), where w is weights and IX is index set of the
# resulting particles
n = len(w)
w = n*w/np.sum(w) # normalize to sum up to n
wN = np.floor(w) # integer parts
wR = w-wN # residual weigths
wR = wR/np.sum(wR) # normalize
# filling indexes with integer parts
k = 1
IX = np.zeros((n,1))
for i in range(n):
for j in range(wN[i]):
IX[k] = i
k += 1
# use residuals to fill rest with roulette wheel selection
cs = np.cumsum(wR)
for j in range(k,n):
ix = np.nonzero(cs > np.random.rand())[0]
IX[j] = ix
def weighted_sample(self,weights, n):
a,b = weights.shape
weights = weights.ravel()
total = np.sum(weights)
i = 0
w = weights[0]
v = 0
out = []
while n:
x = total * (1 - np.random.rand() ** (1.0 / n))
total -= x
while x > w:
x -= w
i += 1
w = weights[i]
v = i
w -= x
out.append((v/b,v%b))
n -= 1
return np.array(out)
def get_filters(self, filter_sizes, num_orientation, sigDivisor = 4.):
self.filter_sizes = filter_sizes
self.num_orientation = num_orientation
self.gaussFilters = []
for filter_size in filter_sizes:
fxx = np.zeros((filter_size,filter_size,num_orientation))
sigmaq = (filter_size/sigDivisor)**2
i = np.arange(-filter_size/2+1,filter_size/2+1)
ii,jj = np.meshgrid(i,i)
for t in range(num_orientation):
theta = t*np.pi/num_orientation
x = ii*np.cos(theta) - jj*np.sin(theta)
y = ii*np.sin(theta) + jj*np.cos(theta)
fxx[:,:,t] = (y**2/sigmaq-1)/sigmaq * np.exp(-(x**2+y**2)/(2*sigmaq))
fxx[:,:,t] -= np.mean(fxx[:,:,t])
fxx[:,:,t] /= np.sqrt(np.sum(fxx[:,:,t]**2))
self.gaussFilters.append(fxx)
def set_filters(self):
pass
def S1resp_zeropad(self, stim):
# function S1 = S1resp_zeropad (stim)
# This function returns S1 responses with zero-padding,
# using the difference of the Gaussians as S1 filters.
# Filters are based on the original HMAX model.
# filter_sizes = filter_sizes_all[whichBand]
num_filter = len(self.filter_sizes)
# make S1 same size as stimulus
S1 = np.zeros((stim.shape[0], stim.shape[1], num_filter, self.num_orientation))
for j in range(num_filter):
S1_filter = self.gaussFilters[j]
fs = self.filter_sizes[j]
norm = scipy.ndimage.convolve(stim**2, np.ones((fs,fs)),mode='constant') + sys.float_info.epsilon
for i in range(self.num_orientation):
S1_buf = scipy.ndimage.convolve(stim, S1_filter[:,:,i],mode='constant')
S1[:,:,j,i] = S1_buf/np.sqrt(norm)
# Riesenhuber states that this 'contrast invariance' is done at C1
# and S1 should rather produce outputs in the range [-1,1]
return S1
def get_saliency_map(self, stim,n_part=None):
# xi = np.random.randint(0,stim.shape[0],n_elem)
# yi = np.random.randint(0,stim.shape[1],n_elem)
saliency = np.zeros((stim.size,))
inds = np.random.randint(0,stim.size,n_part)
saliency[inds] = 1
return saliency.reshape(stim.shape)
def detect_edges(self,stim, particle_pos, ori = 0, filter_stack = None):
def selective_filter(array, cond):
if cond: return np.dot(filter_flat,array)
else: return 0
gabor = Filters().gabor(theta=ori)
k = gabor.shape[0]/2
filter_flat = gabor.ravel()
# edge_map = sp.ndimage.generic_filter(stim,selective_filter,size = gabor.shape)
edge_map = np.zeros(stim.shape)
for pos in particle_pos:
# check that the filter fits within the stim box
if pos[0]-k>0 and pos[0]+k<stim.shape[0] and \
pos[1]-k>0 and pos[1]+k<stim.shape[1]:
neighbors = stim[pos[0]-k:pos[0]+k,pos[1]-k:pos[1]+k]
edge_map[pos[0],pos[1]] = np.dot(neighbors.ravel(),filter_flat)
else:
edge_map[pos[0],pos[1]] = 0
return np.abs(edge_map)
def probmap(self,edge_map,assoc_field):
# here the idea is to take the response to an edge at each position
# and multiply it by association field probabilities
# then add the resulting posteriors (across positions)
# and move particles to the highest probability regions
# Convolution does this trick albeit maybe it's not trivial too see that
prob_map = scipy.ndimage.convolve(edge_map,assoc_field, mode='constant')# /\
#scipy.ndimage.correlate(edge_map**2,np.ones(assoc_field.shape))
return prob_map
def run_thres(self):
im = Image.open('010a.png').convert('L')
stim = np.asarray(im)*1.
oris = np.pi/18*np.arange(18)
rs = np.arange(20,50,2)
thres = .002
grid_size = 10
sf = 1
mean = stim
for t in range(1): # loop over time
print str(t)+':',
edge_map = np.zeros((len(oris),)+mean.shape)
#surface_map = np.zeros((len(oris),)+mean.shape)
#for curno, cur in enumerate(['convex','concave']):
#for oi,ori in enumerate(oris):
for ri,r in enumerate(rs):
print ri,
#gabor = Filters().gabor(theta=ori, sigma=2*sf,lam=5.6*sf,k=10*sf)
gabor = Filters().gabor_circle(r=r)
norm = scipy.ndimage.correlate(mean**2,np.ones(gabor.shape),mode='nearest')
edges = scipy.ndimage.correlate(mean,gabor,mode='nearest')/np.sqrt(norm)
edges[edges<thres] = 0
#assoc_field = Filters().association_field().ernst_trans(beta=-2*ori)
#assoc_field90 = Filters().association_field().ernst(beta=-2*ori+np.pi/2)
#assoc_field_s = Filters().association_field().ernst(shape=(40,40),
#beta=-2*ori+np.pi/2,
#r0=33,
#)
#edges = self.probmap(edges,assoc_field)#-assoc_field90)
#edges -= np.max(edges)*.3
edges[edges<0] = 0
#surface_map[oi] = scipy.ndimage.convolve(mean,assoc_field_s)
#import pdb; pdb.set_trace()
edge_map[ri] = edges
mean = np.max(edge_map, axis=0)
#import pdb; pdb.set_trace()
#mean_s = np.max(surface_map,0)
#for sno in range(len(mean)):
#plt.subplot(2,2,sno+1)
#plt.imshow(mean[sno],cmap=mpl.cm.gray)
#plt.subplot(121)
#plt.imshow(stim,cmap=mpl.cm.gray)
#plt.subplot(122)
plt.imshow(mean,cmap=mpl.cm.gray)
plt.axis('off')
#plt.show()
plt.savefig('plieno_voratinkliai.jpg', dpi=300, format='jpg',
bbox_inches='tight', pad_inches=0)
sys.exit()
#import pdb; pdb.set_trace()
#k = gabor.shape[0]/2
#grid = np.meshgrid(
#np.arange(k,stim.shape[0],grid_size),
#np.arange(k,stim.shape[1],grid_size)
#)
#filter_flat = gabor.ravel()
#edge_map = np.zeros(stim.shape)
#for x,y in grid:
#neighbors = stim[x-k:x+k,y-k:y+k]
#edge_map[x,y] = np.dot(neighbors.ravel(),filter_flat)
#edge_map = self.detect_edges(stim,particle_pos,ori= ori)
sys.exit()
saliency = self.get_saliency_map(stim,n_part=n_part)
for t in range(10): # loop over time
print t,
prob_map = np.zeros([len(oris),stim.shape[0],stim.shape[1]])
for oi,ori in enumerate(oris):
# prob_map[oi]=scipy.ndimage.convolve(stim,Filters().gabor(theta=ori))
#plt.imshow(saliency);plt.show()
particle_pos = self.weighted_sample(saliency,n_part)
# ch=np.zeros(stim.shape)
# for p in particle_pos:
# ch[p[0],p[1]] = 1
# if t==1:plt.imshow(ch,cmap='gray');plt.show()
edge_map = self.detect_edges(stim,particle_pos,ori= ori)
# plt.imshow(edge_map,cmap='gray');plt.show()
assoc_field = Filters().association_field().ernst(beta=-2*ori)
# plt.imshow(assoc_field,cmap='gray');plt.show()
prob_map[oi] = self.probmap(edge_map, assoc_field)
# plt.imshow(prob_map[oi],cmap='gray');plt.show()
saliency = np.sum(prob_map,axis=0)
saliency /= np.sum(saliency)
plt.imshow(saliency,cmap='gray');plt.colorbar();plt.show()
def run_partfilt(self):
im = Image.open('L-POST/images/010.png').convert('L')
stim = np.asarray(im)*1.
oris = np.pi/18*np.arange(18)
n_part = 1000
saliency = self.get_saliency_map(stim,n_part=n_part)
for t in range(10): # loop over time
print t,
prob_map = np.zeros([len(oris),stim.shape[0],stim.shape[1]])
for oi,ori in enumerate(oris):
# prob_map[oi]=scipy.ndimage.convolve(stim,Filters().gabor(theta=ori))
#plt.imshow(saliency);plt.show()
particle_pos = self.weighted_sample(saliency,n_part)
# ch=np.zeros(stim.shape)
# for p in particle_pos:
# ch[p[0],p[1]] = 1
# if t==1:plt.imshow(ch,cmap='gray');plt.show()
edge_map = self.detect_edges(stim,particle_pos,ori= ori)
# plt.imshow(edge_map,cmap='gray');plt.show()
assoc_field = Filters().association_field().ernst(beta=-2*ori)
# plt.imshow(assoc_field,cmap='gray');plt.show()
prob_map[oi] = self.probmap(edge_map, assoc_field)
# plt.imshow(prob_map[oi],cmap='gray');plt.show()
saliency = np.sum(prob_map,axis=0)
saliency /= np.sum(saliency)
plt.imshow(saliency,cmap='gray');plt.colorbar();plt.show()
# plt.imshow(stim,cmap='gray');plt.show()
def proximity_rows():
dot = visual.Circle(win, radius = .03)
for i in range(5):
for j in range(2):
dot.setPos([.1*(i-2),.2*j])
dot.draw()
def run():
g = Model()
g.init_gaussian_filters([7,9,11,13,15], 12)
win = visual.Window(size = (256,256))
proximity_rows()
win.getMovieFrame(buffer='back')
# win.flip()
# core.wait(1)
win.clearBuffer()
stim = win.movieFrames[0]
stim = np.asarray(stim.convert('L'))*1.
win.movieFrames = []
win.close()
S1resp = g.S1resp_zeropad(stim)
plt.imshow(np.sum(np.sum(S1resp,axis=3),axis=2))
plt.show()
if __name__ == "__main__":
g = Model()
g.run_thres()
#Filters.association_field().plot()
#Filters().plot()
| 36.15971
| 137
| 0.524995
|
03da08fca9215585c345e38af24d2e5b59e3ec9d
| 412
|
py
|
Python
|
647-palindromic-substrings/647-palindromic-substrings.py
|
tlylt/LeetCodeAnki
|
9f69504c3762f7895d95c2a592f18ad395199ff4
|
[
"MIT"
] | 1
|
2022-02-14T08:03:32.000Z
|
2022-02-14T08:03:32.000Z
|
647-palindromic-substrings/647-palindromic-substrings.py
|
tlylt/LeetCodeAnki
|
9f69504c3762f7895d95c2a592f18ad395199ff4
|
[
"MIT"
] | null | null | null |
647-palindromic-substrings/647-palindromic-substrings.py
|
tlylt/LeetCodeAnki
|
9f69504c3762f7895d95c2a592f18ad395199ff4
|
[
"MIT"
] | null | null | null |
class Solution:
def countSubstrings(self, s: str) -> int:
ans = 0
for i in range(len(s)):
ans += self.helper(s, i, i, len(s))
ans += self.helper(s, i, i+1, len(s))
return ans
def helper(self, s, l, r, end):
ans = 0
while l >= 0 and r < end and s[l] == s[r]:
ans += 1
l -= 1
r += 1
return ans
| 27.466667
| 50
| 0.417476
|
1444139d9fc93abf885986970a756355c74285ca
| 3,348
|
py
|
Python
|
discordbot.py
|
nekosuke1/discordpy-startup
|
b6680c461c90a06ed8372545e944aad4f3be9203
|
[
"MIT"
] | null | null | null |
discordbot.py
|
nekosuke1/discordpy-startup
|
b6680c461c90a06ed8372545e944aad4f3be9203
|
[
"MIT"
] | null | null | null |
discordbot.py
|
nekosuke1/discordpy-startup
|
b6680c461c90a06ed8372545e944aad4f3be9203
|
[
"MIT"
] | null | null | null |
import os
import traceback
import discord
import random # おみくじで使用
from datetime import datetime
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_ready():
"""起動時に通知してくれる処理"""
print('ログインしました')
print(client.user.name) # ボットの名前
print(client.user.id) # ボットのID
print(discord.__version__) # discord.pyのバージョン
print('---起動完了、雑談用ファイル1---')
await client.change_presence(activity=discord.Game(name="起動時間-7:04~22:30/!nekosukehelp", type=1))
async def create_channel(message, channel_name):
category_id = message.channel.category_id
category = message.guild.get_channel(category_id)
new_channel = await category.create_text_channel(name=channel_name)
return new_channel
@bot.event
async def on_message(message):
# メッセージ送信者がBotだった場合は無視する
if message.author.bot:
if message.content.startswith('チャンネル作成'):
# チャンネルを作成する非同期関数を実行して Channel オブジェクトを取得
new_channel = await create_channel(message, channel_name='猫助が作成したチャンネル')
# チャンネルのリンクと作成メッセージを送信
text = f'{new_channel.mention} を作成したよーん'
await message.channel.send(text)
elif message.content == "!Nhelp":
# リアクションアイコンを付けたい
q = await message.channel.send("音楽のヘルプかな?使い方:まず、ボイスチャンネルに入って、チャットに!N1~5の数値を入力してその後に、playを入れてエンター!(既に猫助がいる場合は、resetと打って送信して一度出入りしてください、この時、既に猫助がいるボイスチャンネルにいる必要があります)")
[await q.add_reaction(i) for i in ('⭕', '❌')] # for文の内包表記
if message.content == 'えさだよー':
await message.channel.send('にゃーん')
if message.content == 'マタタビだよー':
await message.channel.send('にゃーん')
if message.content == '猫好き':
await message.channel.send('にゃーん')
if message.content == 'fuck':
await message.channel.send('しゃー')
if message.content == '!nekosukehelp':
await message.channel.send('最近追加された機能です!neko-gというチャンネルを作成し、nekosuke-webhookというウェブフックを追加するとグローバルチャットができます!')
if message.content == '犬嫌い':
await message.channel.send('にゃーん')
if message.content == 'ちゅーるだよー':
await message.channel.send('にゃーん')
elif message.content == "おみくじ":
# Embedを使ったメッセージ送信 と ランダムで要素を選択
embed = discord.Embed(title="おみくじ", description=f"{message.author.mention}さんの今日のにゃん勢は!",
color=0x2ECC69)
embed.set_thumbnail(url=message.author.avatar_url)
embed.add_field(name="[運勢] ", value=random.choice(('にゃい吉', '吉', '凶', 'にゃい凶','あんまり引くと怒るにゃ-大大凶','にゃい吉')), inline=False)
await message.channel.send(embed=embed)
GLOBAL_CH_NAME = "neko-g" # グローバルチャットのチャンネル名
GLOBAL_WEBHOOK_NAME = "neko-webhook" # グローバルチャットのWebhook名
if message.channel.name == GLOBAL_CH_NAME:
# hoge-globalの名前をもつチャンネルに投稿されたので、メッセージを転送する
await message.delete()
channels = client.get_all_channels()
global_channels = [ch for ch in channels if ch.name == GLOBAL_CH_NAME]
for channel in global_channels:
ch_webhooks = await channel.webhooks()
webhook = discord.utils.get(ch_webhooks, name=GLOBAL_WEBHOOK_NAME)
if webhook is None:
# そのチャンネルに hoge-webhook というWebhookは無かったので無視
continue
await webhook.send(content=message.content,
username=message.author.name,
avatar_url=message.author.avatar_url_as(format="png"))
bot.run(token)
| 40.337349
| 174
| 0.680108
|
1286a36a767791fab597432bc8a4adb71dfc32eb
| 1,050
|
py
|
Python
|
core/tests/test_models.py
|
gbutrykowska/recipe-app-api
|
7195d8af81bf3feaf6cf4cc5060dec4e26236203
|
[
"MIT"
] | null | null | null |
core/tests/test_models.py
|
gbutrykowska/recipe-app-api
|
7195d8af81bf3feaf6cf4cc5060dec4e26236203
|
[
"MIT"
] | null | null | null |
core/tests/test_models.py
|
gbutrykowska/recipe-app-api
|
7195d8af81bf3feaf6cf4cc5060dec4e26236203
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'test@gmail.com'
password = 'Testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'test@GMAIL.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(
'test@gmail.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 30
| 69
| 0.661905
|
a18b8a73e58ee5b7c4c68e8d5456f41b1863c6da
| 5,075
|
py
|
Python
|
hyper_parameter_tuning/_v6_build_model_general_u.py
|
sunway1999/deep_omics
|
5ceb61aa1555ceed49c85a1b49c99ca9ca48e6b5
|
[
"MIT"
] | 16
|
2022-01-11T19:58:18.000Z
|
2022-02-27T14:48:15.000Z
|
hyper_parameter_tuning/_v6_build_model_general_u.py
|
sunway1999/deep_omics
|
5ceb61aa1555ceed49c85a1b49c99ca9ca48e6b5
|
[
"MIT"
] | null | null | null |
hyper_parameter_tuning/_v6_build_model_general_u.py
|
sunway1999/deep_omics
|
5ceb61aa1555ceed49c85a1b49c99ca9ca48e6b5
|
[
"MIT"
] | 4
|
2022-01-15T03:25:29.000Z
|
2022-03-27T00:21:02.000Z
|
# change CNN structure to the same as that from the
# De novo prediction of cancer-associated T cell receptors
# for noninvasive cancer detection
# paper
# https://github.com/s175573/DeepCAT
# all parameters for CNN part are directly carried over from
# the inplementation of this repo
from tensorflow.keras.activations import relu
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Embedding, Flatten, Conv1D, MaxPooling1D
from tensorflow.keras.layers import Reshape, Dropout, concatenate
# structure currently limited to maximum two dense layers
# and one dropout layer
def get_model(HLA_shape, V_shape, CDR3_shape, len_shape, \
cdr1_shape, cdr2_shape, cdr25_shape,
V_cdrs = 2, \
CNN_flag = False, \
n_dense = 1, n_units = [16], \
dropout_flag = False, p_dropout = 0.2):
# check the inputs:
if n_dense >2:
print("Error from func get_model: number of dense layers not coded for yet.")
return
if n_dense > 1 and n_dense > len(n_units):
print('Error from func get_model: n_units input is not long enough.')
return
# Define input layers
HLA_input = Input(HLA_shape)
HLA_reshape = Reshape((HLA_shape[0] * HLA_shape[1],), \
input_shape = HLA_shape)(HLA_input)
V_input = Input(V_shape) #(28,)
CDR3_input = Input(CDR3_shape)
len_input = Input(len_shape)
cdr1_input = Input(cdr1_shape)
cdr2_input = Input(cdr2_shape)
cdr25_input = Input(cdr25_shape)
cdr1_reshape = Reshape((cdr1_shape[0] * cdr1_shape[1],), \
input_shape = cdr1_shape)(cdr1_input)
cdr2_reshape = Reshape((cdr2_shape[0] * cdr2_shape[1],), \
input_shape = cdr2_shape)(cdr2_input)
cdr25_reshape = Reshape((cdr25_shape[0] * cdr25_shape[1],), \
input_shape = cdr25_shape)(cdr25_input)
# whether to use CNN or not
if CNN_flag:
# construct CDR3_branches
CDR3_branch = Conv1D(filters=8, kernel_size=2, activation=relu, \
input_shape = CDR3_shape, name='Conv_CDR3_1')(CDR3_input)
CDR3_branch = MaxPooling1D(pool_size=2, strides=1, padding='valid', \
name='MaxPooling_CDR3_1')(CDR3_branch)
CDR3_branch = Conv1D(filters=16, kernel_size=2, activation=relu, \
input_shape = CDR3_shape, name='Conv_CDR3_2')(CDR3_branch)
CDR3_branch = MaxPooling1D(pool_size=2, strides=1, padding='valid', \
name='MaxPooling_CDR3_2')(CDR3_branch)
CDR3_flatten = Flatten(name='Flatten_CDR3')(CDR3_branch)
CDR3_reshape = Reshape((CDR3_shape[0] * CDR3_shape[1],), \
input_shape = CDR3_shape)(CDR3_input)
#CDR3_branches.append(CDR3_branch)
CDR3_inter_layer = concatenate([CDR3_flatten, CDR3_reshape], axis=-1)
else:
CDR3_inter_layer = Reshape((CDR3_shape[0] * CDR3_shape[1],), \
input_shape = CDR3_shape)(CDR3_input)
# concatenate parts together
HLA_part = Dense(64, activation = relu)(HLA_reshape)
if V_cdrs == 2:
TCR_combined = concatenate([V_input, len_input, CDR3_inter_layer, \
cdr1_reshape, cdr2_reshape, cdr25_reshape])
TCR_part = Dense(64, activation = relu)(TCR_combined)
inter_layer = concatenate([HLA_part, TCR_part])
elif V_cdrs == 0:
TCR_combined = concatenate([V_input, len_input, CDR3_inter_layer])
TCR_part = Dense(64, activation = relu)(TCR_combined)
inter_layer = concatenate([HLA_part, TCR_part])
else:
TCR_combined = concatenate([len_input, CDR3_inter_layer, \
cdr1_reshape, cdr2_reshape, cdr25_reshape])
TCR_part = Dense(64, activation = relu)(TCR_combined)
inter_layer = concatenate([HLA_part, TCR_part])
# move on to see how many dense layers we want
# and whether we want a dropout layer
if n_dense == 1:
if not dropout_flag:
last_layer = Dense(n_units[0], activation = relu)(inter_layer)
else:
dense_layer = Dense(n_units[0], activation = relu)(inter_layer)
last_layer = Dropout(p_dropout)(dense_layer)
else:
if not dropout_flag:
first_dense = Dense(n_units[0], activation = relu)(inter_layer)
last_layer = Dense(n_units[1], activation = relu)(first_dense)
else:
first_dense = Dense(n_units[0], activation = relu)(inter_layer)
dropout_layer = Dropout(p_dropout)(first_dense)
last_layer = Dense(n_units[1], activation = relu)(dropout_layer)
# final output layer
output = Dense(1, activation = 'sigmoid', name = 'output')(last_layer)
# build the model
model = Model(inputs=[HLA_input, V_input, CDR3_input, len_input, \
cdr1_input, cdr2_input, cdr25_input], outputs = output)
return model
| 48.333333
| 90
| 0.636256
|
c280ec56e29c9a2379eedae6b99e2908118f8a4c
| 8,047
|
py
|
Python
|
demo/HuggingFace/GPT2/frameworks.py
|
SsisyphusTao/TensorRT
|
69f5a5093a39184e137a55c908d5c4d1340b009a
|
[
"Apache-2.0"
] | 2
|
2020-06-17T06:01:27.000Z
|
2021-07-31T15:47:09.000Z
|
demo/HuggingFace/GPT2/frameworks.py
|
SsisyphusTao/TensorRT
|
69f5a5093a39184e137a55c908d5c4d1340b009a
|
[
"Apache-2.0"
] | null | null | null |
demo/HuggingFace/GPT2/frameworks.py
|
SsisyphusTao/TensorRT
|
69f5a5093a39184e137a55c908d5c4d1340b009a
|
[
"Apache-2.0"
] | null | null | null |
# std
import os
import sys
import argparse
from typing import List
# huggingface
from transformers import (
GPT2LMHeadModel,
GPT2Tokenizer,
GPT2Config,
)
# Add syspath for custom library
if __name__ == "__main__":
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, os.pardir)
sys.path.append(project_root)
# helpers
from NNDF.interface import FrameworkCommand
from NNDF.general_utils import confirm_folder_delete, NNFolderWorkspace
from NNDF.networks import (
NetworkResult,
NetworkMetadata,
NetworkRuntime,
Precision,
NetworkModel,
NetworkModels,
TimingProfile,
)
from GPT2.export import GPT2TorchFile
from GPT2.GPT2ModelConfig import GPT2ModelTRTConfig
from GPT2.measurements import gpt2_inference, full_inference_greedy
class GPT2HuggingFace(FrameworkCommand):
def __init__(self):
super().__init__(
GPT2ModelTRTConfig, description="Runs framework results for GPT2 model."
)
# Default inference input used during inference stage
self.onnx_gpt2 = None
self.torch_gpt2_dir = None
def generate_and_download_framework(
self, metadata: NetworkMetadata, workspace: NNFolderWorkspace
) -> NetworkModels:
cache_variant = False
if metadata.other.kv_cache:
cache_variant = True
trt_gpt2_config = self.config
metadata_serialized = trt_gpt2_config.get_metadata_string(metadata)
workspace_dir = workspace.get_path()
pytorch_model_dir = os.path.join(workspace_dir, metadata_serialized)
# We keep track of the generated torch location for cleanup later
self.torch_gpt2_dir = pytorch_model_dir
model = None
tfm_config = GPT2Config(use_cache=cache_variant)
if not os.path.exists(pytorch_model_dir):
# Generate the pre-trained weights
model = GPT2LMHeadModel(tfm_config).from_pretrained(metadata.variant)
model.save_pretrained(pytorch_model_dir)
print("Pytorch Model saved to {}".format(pytorch_model_dir))
else:
print(
"Frameworks file already exists, skipping generation and loading from file instead."
)
model = GPT2LMHeadModel(tfm_config).from_pretrained(pytorch_model_dir)
root_onnx_model_name = "{}.onnx".format(metadata_serialized)
root_onnx_model_fpath = os.path.join(
os.getcwd(), workspace_dir, root_onnx_model_name
)
onnx_model_fpath = root_onnx_model_fpath
gpt2 = GPT2TorchFile(model, metadata)
self.onnx_gpt2 = gpt2.as_onnx_model(onnx_model_fpath, force_overwrite=False)
onnx_models = [
NetworkModel(
name=GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=self.onnx_gpt2.fpath,
)
]
torch_models = [
NetworkModel(
name=GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=pytorch_model_dir,
)
]
return NetworkModels(torch=torch_models, onnx=onnx_models, trt=None)
def cleanup(
self,
workspace: NNFolderWorkspace,
save_onnx_model: bool = True,
keep_pytorch_model: bool = True,
) -> None:
"""
Cleans up the working directory and leaves models if available.
Should not assume any functions from the framework class has been called.
Returns:
None
"""
# Clean-up generated files
if not save_onnx_model and self.onnx_gpt2 is not None:
self.onnx_gpt2.cleanup()
# Remove any onnx external files by removing integer named values and weight files
workspace_path = workspace.get_path()
for d in os.listdir(workspace_path):
fpath = os.path.join(workspace_path, d)
if os.path.isfile(fpath) and os.path.splitext(d)[1] == ".weight":
os.remove(fpath)
elif d.isnumeric():
os.remove(fpath)
if not keep_pytorch_model:
# Using rmtree can be dangerous, have user confirm before deleting.
confirm_folder_delete(
self.torch_gpt2_dir,
prompt="Confirm you want to delete downloaded pytorch model folder?",
)
if not keep_pytorch_model and not save_onnx_model:
workspace.cleanup(force_remove=False)
def execute_inference(
self,
metadata: NetworkMetadata,
network_fpaths: NetworkModels,
inference_input: str,
timing_profile: TimingProfile,
) -> NetworkResult:
# Execute some tests
tokenizer = GPT2Tokenizer.from_pretrained(metadata.variant)
input_ids = tokenizer(inference_input, return_tensors="pt").input_ids
# By default, HuggingFace model structure is one giant file.
gpt2_torch_fpath = network_fpaths.torch[0].fpath
config = GPT2Config(use_cache=metadata.other.kv_cache)
gpt2_model = GPT2LMHeadModel(config).from_pretrained(gpt2_torch_fpath)
gpt2_torch = GPT2TorchFile.TorchModule(
gpt2_model.transformer, gpt2_model.lm_head, gpt2_model.config
)
greedy_output = gpt2_torch.generate(input_ids) #greedy search
# get single decoder iteration inference timing profile
_, decoder_e2e_median_time = gpt2_inference(
gpt2_torch, input_ids, timing_profile
)
# get complete decoder inference result and its timing profile
sample_output, full_e2e_median_runtime = full_inference_greedy(
gpt2_torch,
input_ids,
timing_profile,
max_length=GPT2ModelTRTConfig.MAX_SEQUENCE_LENGTH[metadata.variant],
)
semantic_outputs = []
for i, sample_output in enumerate(sample_output):
semantic_outputs.append(
tokenizer.decode(sample_output, skip_special_tokens=True)
)
return NetworkResult(
input=inference_input,
output_tensor=greedy_output,
semantic_output=semantic_outputs,
median_runtime=[
NetworkRuntime(
name=GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
runtime=decoder_e2e_median_time,
),
NetworkRuntime(
name=GPT2ModelTRTConfig.NETWORK_FULL_NAME,
runtime=full_e2e_median_runtime,
),
],
models=network_fpaths,
)
def run_framework(
self,
metadata: NetworkMetadata,
network_input: List[str],
working_directory: str,
keep_onnx_model: bool,
keep_pytorch_model: bool,
timing_profile: TimingProfile,
) -> List[NetworkResult]:
"""
Main entry point of our function which compiles and generates our model data.
"""
results = []
workspace = NNFolderWorkspace(
self.config.network_name, metadata, working_directory
)
try:
network_fpaths = self.generate_and_download_framework(metadata, workspace)
for ninput in network_input:
results.append(
self.execute_inference(
metadata, network_fpaths, ninput, timing_profile
)
)
finally:
self.cleanup(workspace, keep_onnx_model, keep_pytorch_model)
return results
def args_to_network_metadata(self, args: argparse.Namespace) -> NetworkMetadata:
return NetworkMetadata(
variant=args.variant,
precision=Precision(fp16=False),
other=self.config.MetadataClass(kv_cache=args.enable_kv_cache),
)
# Entry point
RUN_CMD = GPT2HuggingFace()
if __name__ == "__main__":
result = RUN_CMD()
print("Results: {}".format(result))
| 33.953586
| 100
| 0.639617
|
92b0eb0d7a930eb91ab90f6e7301b618ac6e32b7
| 4,020
|
py
|
Python
|
dreamcoder/dreaming.py
|
theosech/ec
|
7fc34fb9df8d1b34bd4eb11551ca6fa0f574ce0e
|
[
"Unlicense"
] | 6
|
2020-11-19T14:53:55.000Z
|
2021-09-27T19:28:51.000Z
|
dreamcoder/dreaming.py
|
evelinehong/dreamcoder
|
9de0434359721c8a4ecc44ae76649e21f1479c3d
|
[
"MIT"
] | 1
|
2020-10-17T00:51:43.000Z
|
2020-10-17T00:51:43.000Z
|
dreamcoder/dreaming.py
|
evelinehong/dreamcoder
|
9de0434359721c8a4ecc44ae76649e21f1479c3d
|
[
"MIT"
] | 1
|
2020-11-18T19:36:56.000Z
|
2020-11-18T19:36:56.000Z
|
import json
import os
import subprocess
from pathos.multiprocessing import Pool
from dreamcoder.domains.arithmetic.arithmeticPrimitives import k1, k0, addition, subtraction, multiplication
from dreamcoder.frontier import Frontier, FrontierEntry
from dreamcoder.grammar import Grammar
from dreamcoder.program import Program
from dreamcoder.recognition import RecognitionModel, DummyFeatureExtractor
from dreamcoder.task import Task
from dreamcoder.type import arrow, tint
from dreamcoder.utilities import tuplify, timing, eprint, get_root_dir, mean
def helmholtzEnumeration(g, request, inputs, timeout, _=None,
special=None, evaluationTimeout=None):
"""Returns json (as text)"""
message = {"request": request.json(),
"timeout": timeout,
"DSL": g.json(),
"extras": inputs}
if evaluationTimeout: message["evaluationTimeout"] = evaluationTimeout
if special: message["special"] = special
message = json.dumps(message)
with open('/tmp/hm', 'w') as handle:
handle.write(message)
try:
binary = os.path.join(get_root_dir(), 'helmholtz')
process = subprocess.Popen(binary,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
response, error = process.communicate(bytes(message, encoding="utf-8"))
except OSError as exc:
raise exc
return response
def backgroundHelmholtzEnumeration(tasks, g, timeout, _=None,
special=None, evaluationTimeout=None):
requests = list({t.request for t in tasks})
inputs = {r: list({tuplify(xs)
for t in tasks if t.request == r
for xs, y in t.examples})
for r in requests}
workers = Pool(len(requests))
promises = [workers.apply_async(helmholtzEnumeration,
args=(g, r, inputs[r], float(timeout)),
kwds={'special': special,
'evaluationTimeout': evaluationTimeout})
for r in requests]
def get():
results = [p.get() for p in promises]
frontiers = []
with timing("(Helmholtz enumeration) Decoded json into frontiers"):
for request, result in zip(requests, results):
response = json.loads(result.decode("utf-8"))
for b, entry in enumerate(response):
frontiers.append(Frontier([FrontierEntry(program=Program.parse(p),
logPrior=entry["ll"],
logLikelihood=0.)
for p in entry["programs"]],
task=Task(str(b),
request,
[])))
eprint("Total number of Helmholtz frontiers:", len(frontiers))
return frontiers
return get
if __name__ == "__main__":
g = Grammar.uniform([k1, k0, addition, subtraction, multiplication])
frontiers = helmholtzEnumeration(g,
arrow(tint, tint),
[[0], [1], [2]],
10.)
eprint("average frontier size", mean(len(f.entries) for f in frontiers))
f = DummyFeatureExtractor([])
r = RecognitionModel(f, g, hidden=[], contextual=True)
r.trainBiasOptimal(frontiers, frontiers, steps=70)
g = r.grammarOfTask(frontiers[0].task).untorch()
frontiers = helmholtzEnumeration(g,
arrow(tint, tint),
[[0], [1], [2]],
10.)
for f in frontiers:
eprint(f.summarizeFull())
eprint("average frontier size", mean(len(f.entries) for f in frontiers))
| 43.695652
| 108
| 0.539303
|
a04cbe32f9a3ccafd75d29ece6f91f0cf273a9aa
| 35
|
py
|
Python
|
zvgportal/parser/__init__.py
|
luccalb/zvg-portal
|
f812132aab657454b9a243dc7a1d1a4b7c3c8e21
|
[
"MIT"
] | null | null | null |
zvgportal/parser/__init__.py
|
luccalb/zvg-portal
|
f812132aab657454b9a243dc7a1d1a4b7c3c8e21
|
[
"MIT"
] | null | null | null |
zvgportal/parser/__init__.py
|
luccalb/zvg-portal
|
f812132aab657454b9a243dc7a1d1a4b7c3c8e21
|
[
"MIT"
] | null | null | null |
from zvgportal.parser import parser
| 35
| 35
| 0.885714
|
7cffb8c729fd20d9b30fa8a482b20f424acaa33a
| 193
|
py
|
Python
|
convertorrr.py
|
mrtehseen/signal-failure-detection
|
506f32b47e37227fd90100ab834290bc50ca83d6
|
[
"MIT"
] | null | null | null |
convertorrr.py
|
mrtehseen/signal-failure-detection
|
506f32b47e37227fd90100ab834290bc50ca83d6
|
[
"MIT"
] | null | null | null |
convertorrr.py
|
mrtehseen/signal-failure-detection
|
506f32b47e37227fd90100ab834290bc50ca83d6
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
converter = tf.lite.TFLiteConverter.from_keras_model_file("keras_model.h5")
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
| 32.166667
| 75
| 0.80829
|
0f8b4a3ad9684b71a88ef150c4930a079fae69c1
| 8,273
|
py
|
Python
|
common/utilities/tests/test_timing.py
|
tomzo/integration-adaptors
|
d4f296d3e44475df6f69a78a27fac6ed5b67513b
|
[
"Apache-2.0"
] | null | null | null |
common/utilities/tests/test_timing.py
|
tomzo/integration-adaptors
|
d4f296d3e44475df6f69a78a27fac6ed5b67513b
|
[
"Apache-2.0"
] | 4
|
2021-03-31T19:46:30.000Z
|
2021-03-31T19:55:03.000Z
|
common/utilities/tests/test_timing.py
|
tomzo/integration-adaptors
|
d4f296d3e44475df6f69a78a27fac6ed5b67513b
|
[
"Apache-2.0"
] | 2
|
2020-04-02T11:22:17.000Z
|
2021-04-11T07:24:48.000Z
|
import datetime
from unittest import TestCase
from unittest.mock import patch, Mock
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, RequestHandler
from utilities import timing
from utilities.test_utilities import async_test
DEFAULT_RETURN = "default"
class TestTimeUtilities(TestCase):
@patch('time.perf_counter')
def test_stopwatch(self, time_mock):
stopwatch = timing.Stopwatch()
time_mock.return_value = 0.0
stopwatch.start_timer()
time_mock.return_value = 5.0
result = stopwatch.stop_timer()
self.assertEqual(result, 5)
@patch.object(timing, 'logger')
def test_invoke_with_time_rounding(self, log_mock):
with self.subTest("Default"):
timing._log_time(5.1236, "yes")
log_mock.info.assert_called_with('0001', '{FuncName} took {Duration} seconds',
{'FuncName': 'yes', 'Duration': 5.124})
with self.subTest("Tornado"):
timing._log_tornado_time(5.1236, "yes", "methodName")
log_mock.info.assert_called_with('0002', '{FuncName} from {Handler} took {Duration} seconds',
{'FuncName': 'methodName', 'Handler': 'yes', 'Duration': 5.124})
@patch('utilities.timing._log_time')
@patch('utilities.timing.Stopwatch.stop_timer')
@async_test
async def test_invoke_with_time(self, time_mock, log_mock):
time_mock.return_value = 5
with self.subTest("Sync version"):
res = self.default_method()
log_mock.assert_called_with(5, 'default_method')
self.assertEqual(DEFAULT_RETURN, res)
with self.subTest("Async version"):
res = await self.default_method_async()
log_mock.assert_called_with(5, 'default_method_async')
self.assertEqual(DEFAULT_RETURN, res)
@patch('utilities.timing.Stopwatch.stop_timer')
@patch('utilities.timing._log_time')
@async_test
async def test_exception_thrown_whilst_timing(self, log_mock, time_mock):
time_mock.return_value = 10
with self.subTest("Sync"):
with self.assertRaises(ValueError):
self.throw_error_method()
log_mock.assert_called_with(10, 'throw_error_method')
with self.subTest("Async"):
with self.assertRaises(ValueError):
await self.throw_error_method_async()
log_mock.assert_called_with(10, 'throw_error_method_async')
@patch('utilities.timing.Stopwatch.stop_timer')
@patch.object(timing, 'logger')
@async_test
async def test_invoke_with_time_parameters(self, log_mock, time_mock):
with self.subTest("Sync"):
time_mock.return_value = 5
res = self.take_parameters("whew", 1, [2], {3: 3})
log_mock.info.assert_called_with('0001', '{FuncName} took {Duration} seconds',
{'FuncName': 'take_parameters', 'Duration': 5})
self.assertEqual("whew1", res)
with self.subTest("Async"):
time_mock.return_value = 5
res = await self.take_parameters_async("whew", 1, [2], {3: 3})
log_mock.info.assert_called_with('0001', '{FuncName} took {Duration} seconds',
{'FuncName': 'take_parameters_async', 'Duration': 5})
self.assertEqual("whew1", res)
@patch('utilities.timing.Stopwatch.stop_timer')
@patch.object(timing, 'logger')
@async_test
async def test_async_times_execution_correctly(self, log_mock, time_mock):
time_mock.return_value = 0
task = self.default_method_async()
# check the method doesn't get timed until awaited
time_mock.return_value = 2
await task
log_mock.info.assert_called_with('0001', '{FuncName} took {Duration} seconds',
{'FuncName': 'default_method_async', 'Duration': 2})
@patch('utilities.timing.Stopwatch.stop_timer')
@patch.object(timing, 'logger')
@async_test
async def test_invoke_with_time_varargs(self, log_mock, time_mock):
with self.subTest("Sync"):
time_mock.return_value = 5
res = self.var_parameters("whew", 1, 2, 3, 4, 5)
log_mock.info.assert_called_with('0001', '{FuncName} took {Duration} seconds',
{'FuncName': 'var_parameters', 'Duration': 5})
self.assertEqual("whew12345", res)
with self.subTest("Async"):
time_mock.return_value = 5
res = await self.var_parameters_async("whew", 1, "three", 4)
log_mock.info.assert_called_with('0001', '{FuncName} took {Duration} seconds',
{'FuncName': 'var_parameters_async', 'Duration': 5})
self.assertEqual("whew1three4", res)
@patch('utilities.timing.datetime')
def test_get_time(self, mock_datetime):
mock_datetime.datetime.utcnow.return_value = datetime.datetime(2019, 1, 5, 12, 13, 14, 567)
self.assertEqual('2019-01-05T12:13:14.000567Z', timing.get_time())
@timing.time_function
def default_method(self):
return DEFAULT_RETURN
@timing.time_function
async def default_method_async(self):
return DEFAULT_RETURN
@timing.time_function
def throw_error_method(self):
raise ValueError("Whew")
@timing.time_function
async def throw_error_method_async(self):
raise ValueError("Whew")
@timing.time_function
def take_parameters(self, check, one, two, three):
assert check is not None
assert one is not None
assert two is not None
assert three is not None
return check + str(one)
@timing.time_function
async def take_parameters_async(self, check, one, two, three):
return self.take_parameters(check, one, two, three)
@timing.time_function
def var_parameters(self, *arg):
return ''.join([str(string) for string in arg])
@timing.time_function
async def var_parameters_async(self, *arg):
return self.var_parameters(*arg)
class FakeRequestHandler(RequestHandler):
@timing.time_request
def post(self):
self.write("hello")
@timing.time_request
async def get(self):
self.write("hello")
@timing.time_request
def put(self):
try:
raise ValueError("Whew")
finally:
self.write("put")
@patch.object(timing, 'logger')
class TestHTTPWrapperTimeUtilities(AsyncHTTPTestCase):
duration = 5
def get_app(self):
self.sender = Mock()
return Application([
(r"/.*", FakeRequestHandler, {})
])
def _assert_handler_data(self, response, expected_code, expected_body, expected_func_name, log_mock):
self.assertEqual(response.code, expected_code)
if expected_body:
self.assertEqual(response.body.decode('utf8'), expected_body)
log_mock.info.assert_called_with('0002', '{FuncName} from {Handler} took {Duration} seconds',
{'FuncName': expected_func_name, 'Handler': 'FakeRequestHandler',
'Duration': self.duration})
@patch('utilities.timing.Stopwatch.stop_timer')
def test_post_synchronous_message(self, time_mock, log_mock):
time_mock.return_value = self.duration
response = self.fetch(f"/", method="POST", body="{'test': 'tested'}")
self._assert_handler_data(response, 200, 'hello', 'post', log_mock)
@patch('utilities.timing.Stopwatch.stop_timer')
def test_get_asynchronous_message(self, time_mock, log_mock):
time_mock.return_value = self.duration
response = self.fetch(f"/", method="GET")
self._assert_handler_data(response, 200, 'hello', 'get', log_mock)
@patch('utilities.timing.Stopwatch.stop_timer')
def test_raise_exception(self, time_mock, log_mock):
time_mock.return_value = self.duration
response = self.fetch(f"/", method="PUT", body="{'test': 'tested'}")
self._assert_handler_data(response, 500, None, 'put', log_mock)
| 38.300926
| 109
| 0.634111
|
13d12864a15fa32990757b2abc2dec8cbb443188
| 13,681
|
py
|
Python
|
squad2/run_python3.py
|
arfu2016/DuReader
|
66934852c508bff5540596aa71d5ce40c828b37d
|
[
"Apache-2.0"
] | null | null | null |
squad2/run_python3.py
|
arfu2016/DuReader
|
66934852c508bff5540596aa71d5ce40c828b37d
|
[
"Apache-2.0"
] | null | null | null |
squad2/run_python3.py
|
arfu2016/DuReader
|
66934852c508bff5540596aa71d5ce40c828b37d
|
[
"Apache-2.0"
] | null | null | null |
"""
@Project : DuReader
@Module : run_python3.py
@Author : Deco [deco@cubee.com]
@Created : 7/23/18 5:47 PM
@Desc :
"""
import argparse
import json
import os
import pickle
import random
import sys
from importlib import import_module
import tensorflow as tf
base_dir = os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)))
try:
from squad2.dataset import BRCDataset
from squad2.vocab import Vocab
from squad2.rc_model import RCModel
from squad2.logger_setup import define_logger
except ImportError:
if base_dir not in sys.path:
sys.path.insert(0, base_dir)
# 以base_dir为基准开始导入
from squad2.logger_setup import define_logger
module_dataset = import_module('.dataset', package='squad2')
module_vocab = import_module('.vocab', package='squad2')
module_rc_model = import_module('.rc_model', package='squad2')
BRCDataset = getattr(module_dataset, 'BRCDataset')
Vocab = getattr(module_vocab, 'Vocab')
RCModel = getattr(module_rc_model, 'RCModel')
logger = define_logger('squad2.run_python3')
os.chdir(os.path.join(base_dir, 'squad2'))
# 改变当前目录,因为后面要用到父目录,祖父目录
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# 设置环境变量,控制tensorflow的log level
def parse_args():
"""
Parses command line arguments.
"""
parser = argparse.ArgumentParser(
'Reading Comprehension on BaiduRC dataset')
parser.add_argument('--prepare', action='store_true',
help='create the directories, '
'prepare the vocabulary and embeddings')
# args = parser.parse_args()
# args.prepare is available
# when action='store_true' and --prepare exists, args.prepare is True
parser.add_argument('--train', action='store_true',
help='train the model')
parser.add_argument('--evaluate', action='store_true',
help='evaluate the model on dev set')
parser.add_argument('--predict', action='store_true',
help='predict the answers for test set '
'with trained model')
parser.add_argument('--gpu', type=str, default='0',
help='specify gpu device')
train_settings = parser.add_argument_group('train settings')
train_settings.add_argument('--optim', default='adam',
help='optimizer type')
train_settings.add_argument('--learning_rate', type=float, default=0.001,
help='learning rate')
train_settings.add_argument('--weight_decay', type=float, default=0,
help='weight decay')
# 算loss的时候,要不要加l2 regularization,默认不加
train_settings.add_argument('--dropout_keep_prob', type=float, default=1,
help='dropout keep rate')
train_settings.add_argument('--batch_size', type=int, default=32,
help='train batch size')
train_settings.add_argument('--epochs', type=int, default=10,
help='train epochs')
model_settings = parser.add_argument_group('model settings')
model_settings.add_argument('--algo', choices=['BIDAF', 'MLSTM'],
default='MLSTM',
help='choose the algorithm to use')
model_settings.add_argument('--embed_size', type=int, default=300,
help='size of the embeddings')
# 可以调参,默认300
model_settings.add_argument('--hidden_size', type=int, default=150,
help='size of LSTM hidden units')
# 可以调参,默认150
model_settings.add_argument('--max_p_num', type=int, default=5,
help='max passage num in one sample')
# 最多5个document备选
model_settings.add_argument('--max_p_len', type=int, default=500,
help='max length of passage')
# passage长度最多500?似乎看到过2500,500以后的tf计算时都不考虑
model_settings.add_argument('--max_q_len', type=int, default=60,
help='max length of question')
# 问题长度最长60
model_settings.add_argument('--max_a_len', type=int, default=200,
help='max length of answer')
# 回答长度最长200
path_settings = parser.add_argument_group('path settings')
path_settings.add_argument(
'--train_files', nargs='+',
default=['/decaNLP/.data/squad/train-v1.1.json'],
help='list of files that contain the preprocessed train data')
# nargs='+'表示--train_files之后可以有一个或者多个参数
path_settings.add_argument(
'--dev_files', nargs='+',
default=['/decaNLP/.data/squad/dev-v1.1.json'],
help='list of files that contain the preprocessed dev data')
path_settings.add_argument(
'--test_files', nargs='+',
default=['/decaNLP/.data/squad/dev-v1.1.json'],
help='list of files that contain the preprocessed test data')
path_settings.add_argument(
'--brc_dir', default='../data/baidu',
help='the dir with preprocessed baidu reading comprehension data')
path_settings.add_argument('--vocab_dir', default='../data/vocab/',
help='the dir to save vocabulary')
path_settings.add_argument('--model_dir',
default='../data/models/simple_preprocess/',
help='the dir to store models')
path_settings.add_argument('--result_dir',
default='../data/results/simple_preprocess/',
help='the dir to output the results')
path_settings.add_argument('--summary_dir',
default='../data/summary/simple_preprocess/',
help='the dir to write tensorboard summary')
path_settings.add_argument('--log_path',
help='path of the log file. If not set, '
'logs are printed to console')
return parser.parse_args()
def prepare(args):
"""
checks data, creates the directories, prepare the vocabulary and embeddings
"""
logger.info('Checking the data files...')
for data_path in args.train_files + args.dev_files + args.test_files:
assert os.path.exists(data_path), \
'{} file does not exist.'.format(data_path)
# 对输入的容错
logger.info('Preparing the directories...')
for dir_path in [args.vocab_dir, args.model_dir, args.result_dir,
args.summary_dir]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
logger.info('Building vocabulary...')
brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len,
args.train_files, args.dev_files, args.test_files)
# 数据列表的准备
vocab = Vocab(lower=True)
# obtain token2id, id2token, token_cnt
for word in brc_data.word_iter('train'):
# we yield words from a generator
vocab.add(word)
# logger.debug(word)
unfiltered_vocab_size = vocab.size()
logger.debug(unfiltered_vocab_size)
vocab.filter_tokens_by_cnt(min_cnt=2)
# 出现频数少于2次的不做统计
filtered_num = unfiltered_vocab_size - vocab.size()
# 被过滤掉的词的数目
logger.info('After filter {} tokens, the final vocab size is {}'.
format(filtered_num, vocab.size()))
logger.info('Assigning embeddings...')
vocab.randomly_init_embeddings(args.embed_size)
# 随机分配args.embed_size维度大小的word embedding
logger.info('Saving vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'wb') as fout:
pickle.dump(vocab, fout)
# serialize vocab and store it in a file
logger.info('Done with preparing!')
def train(args, restore=True):
"""
trains the reading comprehension model
"""
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
vocab = pickle.load(fin)
brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len,
args.train_files, args.dev_files)
# 准备training data
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
# convert tokens of questions and paragraphs in training data to ids
# 结果保存在brc_data中
logger.info('Initialize the model...')
rc_model = RCModel(vocab, args)
if restore:
try:
rc_model.restore(model_dir=args.model_dir,
model_prefix=args.algo)
# todo: 上面这句可能需要改,model_prefix=args.algo + '_' + str(2)
# except Exception as e:
except tf.errors.InvalidArgumentError:
# logger.info('Exception in train() in run_python3.py', e)
logger.info('InvalidArgumentError... '
'Initialize the model from beginning')
# str(e) or repr(e)
except Exception:
logger.info('Unknown exception. '
'Initialize the model from beginning')
logger.info('Training the model...')
rc_model.train(brc_data, args.epochs, args.batch_size,
save_dir=args.model_dir,
save_prefix=args.algo,
dropout_keep_prob=args.dropout_keep_prob)
logger.info('Done with model training!')
def evaluate(args):
"""
evaluate the trained model on dev files
在改变超参数时可以参考
"""
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
vocab = pickle.load(fin)
assert len(args.dev_files) > 0, 'No dev files are provided.'
brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len,
dev_files=args.dev_files)
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
logger.info('Restoring the model...')
rc_model = RCModel(vocab, args)
rc_model.restore(model_dir=args.model_dir,
model_prefix=args.algo)
# todo: 上面这句可能需要改,model_prefix=args.algo + '_' + str(2)
logger.info('Evaluating the model on dev set...')
dev_batches = brc_data.gen_mini_batches('dev', args.batch_size,
pad_id=vocab.get_id(
vocab.pad_token),
shuffle=False)
dev_loss, dev_bleu_rouge = rc_model.evaluate(
dev_batches, result_dir=args.result_dir, result_prefix='dev.predicted')
logger.info('Loss on dev set: {}'.format(dev_loss))
logger.info('Result on dev set: {}'.format(dev_bleu_rouge))
logger.info(
'Predicted answers are saved to {}'.format(
os.path.join(args.result_dir)))
def predict(args):
"""
predicts answers for test files
"""
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
vocab = pickle.load(fin)
assert len(args.test_files) > 0, 'No test files are provided.'
brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len,
test_files=args.test_files)
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
logger.info('Restoring the model...')
rc_model = RCModel(vocab, args)
rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo)
# todo: 上面这句可能需要改,model_prefix=args.algo + '_' + str(2)
logger.info('Predicting answers for test set...')
test_batches = brc_data.gen_mini_batches('test', args.batch_size,
pad_id=vocab.get_id(
vocab.pad_token),
shuffle=False)
rc_model.evaluate(test_batches,
result_dir=args.result_dir,
result_prefix='test.predicted')
# 同样使用evaluate函数
result_dir = args.result_dir
question_answer = list()
answer_string = 'Question and answer for testing:\n'
if result_dir is not None:
result_file = os.path.join(result_dir, 'test.predicted.json')
with open(result_file, 'r', encoding='utf8') as fin:
for line in fin:
answer_dict = json.loads(line.strip())
question_answer.append((answer_dict['question'],
answer_dict['predict_answer'],
answer_dict['real_answer'],
))
answer_samples = random.sample(question_answer, 20) # 10
for sample in answer_samples:
answer_string += '{}: \nPredict: {}\nReal: {}\n\n'.format(
sample[0], sample[1], sample[2][0])
logger.info(answer_string)
def run():
"""
Prepares and runs the whole system.
"""
args = parse_args()
logger.info('Running with args : {}'.format(args))
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# https://stackoverflow.com/questions/13781738/how-does-cuda-assign-device-ids-to-gpus?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# 指定使用哪个或者哪些gpu,当使用两个以上时,显存似乎主要还是用第一个gpu的显存,另一个gpu只提供计算上的帮助,不提供显存上的帮助
if args.prepare:
prepare(args)
if args.train:
train(args)
if args.evaluate:
evaluate(args)
if args.predict:
predict(args)
if __name__ == '__main__':
run()
# python squad2/run_python3.py --prepare
# python squad2/run_python3.py --train --algo BIDAF --epochs 1 --batch_size 32
| 39.088571
| 163
| 0.608143
|
341cf60bedca0d6aee7f9593467203341851043e
| 1,602
|
py
|
Python
|
applications/easyflow/controllers/workflow.py
|
zhangjiannan/easyflow
|
2c9906d3cc2088ed367f887da842a46db5d903ed
|
[
"BSD-3-Clause"
] | null | null | null |
applications/easyflow/controllers/workflow.py
|
zhangjiannan/easyflow
|
2c9906d3cc2088ed367f887da842a46db5d903ed
|
[
"BSD-3-Clause"
] | null | null | null |
applications/easyflow/controllers/workflow.py
|
zhangjiannan/easyflow
|
2c9906d3cc2088ed367f887da842a46db5d903ed
|
[
"BSD-3-Clause"
] | 1
|
2021-05-20T10:48:34.000Z
|
2021-05-20T10:48:34.000Z
|
# coding: utf8
from gluon.tools import Service
service = Service()
import json
def index():
response.title = "Manage"
response.subtitle = None
records = db(db.workflow.user_id==auth.user_id).select()
if len(records) > 0:
records = records
else:
records = 'No records yet'
return dict(title='Workflows',records=records,app='workflows')
def view():
rows = db(db.workflow.user_id==auth.user_id).select()
return dict(records=rows)
def status():
rows = db(db.status.user_id==auth.user_id).select()
return dict(records=rows)
def start():
workflowID = request.vars.workflowID
name = request.vars.name
description = request.vars.description
if workflowID:
db.occurrence.insert(workflow_id=workflowID,name=name,description=description)
return dict(record=workflowID,error=False,message='Workflow successfuly added!')
else:
return dict(record=workflowID,error=True,message='WorkflowID is empty!')
def delete():
workflowID = request.vars.workflowID
if workflowID:
db(db.workflow.id == workflowID).delete()
return dict(record=workflowID,error=False,message='Successfuly deleted!')
else:
return dict(record=workflowID,error=True,message='WorkflowID is empty!')
def single():
record = db.workflow(request.args(0)) or redirect(URL('view'))
return dict(records=record)
def edit():
return 'Hello'
def user():
return dict(form=auth())
@auth.requires_login()
def details():
form = SQLFORM.smartgrid(db.workflow)
return dict(form=form)
def data():
return dict(form=crud())
| 30.807692
| 88
| 0.696629
|
579453e25bb00036e653b2c1f8d4907314498311
| 6,310
|
py
|
Python
|
pymeasure/adapters/visa.py
|
matthias6/pymeasure
|
f226ab4aaec8265ff442c5baadc27cfdee513ca4
|
[
"MIT"
] | 4
|
2020-11-13T08:57:16.000Z
|
2021-09-16T12:45:33.000Z
|
pymeasure/adapters/visa.py
|
matthias6/pymeasure
|
f226ab4aaec8265ff442c5baadc27cfdee513ca4
|
[
"MIT"
] | 2
|
2021-12-16T16:15:47.000Z
|
2022-03-27T10:47:13.000Z
|
pymeasure/adapters/visa.py
|
matthias6/pymeasure
|
f226ab4aaec8265ff442c5baadc27cfdee513ca4
|
[
"MIT"
] | null | null | null |
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import copy
import pyvisa
import numpy as np
from pkg_resources import parse_version
from .adapter import Adapter
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# noinspection PyPep8Naming,PyUnresolvedReferences
class VISAAdapter(Adapter):
""" Adapter class for the VISA library using PyVISA to communicate
with instruments.
:param resource: VISA resource name that identifies the address
:param visa_library: VisaLibrary Instance, path of the VISA library or VisaLibrary spec string (@py or @ni).
if not given, the default for the platform will be used.
:param preprocess_reply: optional callable used to preprocess strings
received from the instrument. The callable returns the processed string.
:param kwargs: Any valid key-word arguments for constructing a PyVISA instrument
"""
def __init__(self, resource_name, visa_library='', preprocess_reply=None, **kwargs):
super().__init__(preprocess_reply=preprocess_reply)
if not VISAAdapter.has_supported_version():
raise NotImplementedError("Please upgrade PyVISA to version 1.8 or later.")
if isinstance(resource_name, int):
resource_name = "GPIB0::%d::INSTR" % resource_name
self.resource_name = resource_name
self.manager = pyvisa.ResourceManager(visa_library)
safeKeywords = [
'resource_name', 'timeout', 'chunk_size', 'lock', 'query_delay', 'send_end',
'read_termination', 'write_termination'
]
kwargsCopy = copy.deepcopy(kwargs)
for key in kwargsCopy:
if key not in safeKeywords:
kwargs.pop(key)
self.connection = self.manager.open_resource(
resource_name,
**kwargs
)
@staticmethod
def has_supported_version():
""" Returns True if the PyVISA version is greater than 1.8 """
if hasattr(pyvisa, '__version__'):
return parse_version(pyvisa.__version__) >= parse_version('1.8')
else:
return False
def write(self, command):
""" Writes a command to the instrument
:param command: SCPI command string to be sent to the instrument
"""
self.connection.write(command)
def read(self):
""" Reads until the buffer is empty and returns the resulting
ASCII response
:returns: String ASCII response of the instrument.
"""
return self.connection.read()
def read_bytes(self, size):
""" Reads specified number of bytes from the buffer and returns
the resulting ASCII response
:param size: Number of bytes to read from the buffer
:returns: String ASCII response of the instrument.
"""
return self.connection.read_bytes(size)
def ask(self, command):
""" Writes the command to the instrument and returns the resulting
ASCII response
:param command: SCPI command string to be sent to the instrument
:returns: String ASCII response of the instrument
"""
return self.connection.query(command)
def ask_values(self, command, **kwargs):
""" Writes a command to the instrument and returns a list of formatted
values from the result. This leverages the `query_ascii_values` method
in PyVISA.
:param command: SCPI command to be sent to the instrument
:param kwargs: Key-word arguments to pass onto `query_ascii_values`
:returns: Formatted response of the instrument.
"""
return self.connection.query_ascii_values(command, **kwargs)
def binary_values(self, command, header_bytes=0, dtype=np.float32):
""" Returns a numpy array from a query for binary data
:param command: SCPI command to be sent to the instrument
:param header_bytes: Integer number of bytes to ignore in header
:param dtype: The NumPy data type to format the values with
:returns: NumPy array of values
"""
self.connection.write(command)
binary = self.connection.read_raw()
header, data = binary[:header_bytes], binary[header_bytes:]
return np.fromstring(data, dtype=dtype)
def write_binary_values(self, command, values, **kwargs):
""" Write binary data to the instrument, e.g. waveform for signal generators
:param command: SCPI command to be sent to the instrument
:param values: iterable representing the binary values
:param kwargs: Key-word arguments to pass onto `write_binary_values`
:returns: number of bytes written
"""
return self.connection.write_binary_values(command, values, **kwargs)
def wait_for_srq(self, timeout=25, delay=0.1):
""" Blocks until a SRQ, and leaves the bit high
:param timeout: Timeout duration in seconds
:param delay: Time delay between checking SRQ in seconds
"""
self.connection.wait_for_srq(timeout * 1000)
def __repr__(self):
return "<VISAAdapter(resource='%s')>" % self.connection.resource_name
| 39.685535
| 112
| 0.68859
|
9529062fad263473db26922382e103a4b8815830
| 9,213
|
py
|
Python
|
examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env.py
|
frk2/bullet3
|
225d823e4dc3f952c6c39920c3f87390383e0602
|
[
"Zlib"
] | 7
|
2019-12-19T00:54:18.000Z
|
2021-03-03T23:34:58.000Z
|
examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env.py
|
frk2/bullet3
|
225d823e4dc3f952c6c39920c3f87390383e0602
|
[
"Zlib"
] | null | null | null |
examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env.py
|
frk2/bullet3
|
225d823e4dc3f952c6c39920c3f87390383e0602
|
[
"Zlib"
] | 8
|
2019-12-18T06:09:25.000Z
|
2021-09-25T21:51:48.000Z
|
"""This file implements the gym environment of minitaur alternating legs.
"""
import math
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
from gym import spaces
import numpy as np
from pybullet_envs.minitaur.envs import minitaur_gym_env
INIT_EXTENSION_POS = 2.6
INIT_SWING_POS = 0.0
DESIRED_PITCH = 0
NUM_LEGS = 4
NUM_MOTORS = 2 * NUM_LEGS
STEP_PERIOD = 1.0 / 3.0 # Three steps per second.
STEP_AMPLITUDE = 0.75
class MinitaurAlternatingLegsEnv(minitaur_gym_env.MinitaurGymEnv):
"""The gym environment for the minitaur.
It simulates the locomotion of a minitaur, a quadruped robot. The state space
include the angles, velocities and torques for all the motors and the action
space is the desired motor angle for each motor. The reward function is based
on how far the minitaur walks in 1000 steps and penalizes the energy
expenditure.
"""
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 66
}
def __init__(self,
urdf_version=None,
control_time_step=0.006,
action_repeat=6,
control_latency=0,
pd_latency=0,
on_rack=False,
motor_kp=1.0,
motor_kd=0.02,
remove_default_joint_damping=False,
render=False,
num_steps_to_log=1000,
env_randomizer=None,
log_path=None):
"""Initialize the minitaur alternating legs gym environment.
Args:
urdf_version: [DEFAULT_URDF_VERSION, DERPY_V0_URDF_VERSION] are allowable
versions. If None, DEFAULT_URDF_VERSION is used. Refer to
minitaur_gym_env for more details.
control_time_step: The time step between two successive control signals.
action_repeat: The number of simulation steps that an action is repeated.
control_latency: The latency between get_observation() and the actual
observation. See minituar.py for more details.
pd_latency: The latency used to get motor angles/velocities used to
compute PD controllers. See minitaur.py for more details.
on_rack: Whether to place the minitaur on rack. This is only used to debug
the walking gait. In this mode, the minitaur's base is hung midair so
that its walking gait is clearer to visualize.
motor_kp: The P gain of the motor.
motor_kd: The D gain of the motor.
remove_default_joint_damping: Whether to remove the default joint damping.
render: Whether to render the simulation.
num_steps_to_log: The max number of control steps in one episode. If the
number of steps is over num_steps_to_log, the environment will still
be running, but only first num_steps_to_log will be recorded in logging.
env_randomizer: An instance (or a list) of EnvRanzomier(s) that can
randomize the environment during when env.reset() is called and add
perturbations when env.step() is called.
log_path: The path to write out logs. For the details of logging, refer to
minitaur_logging.proto.
"""
# _swing_offset and _extension_offset is to mimick the bent legs.
self._swing_offset = np.zeros(NUM_LEGS)
self._extension_offset = np.zeros(NUM_LEGS)
super(MinitaurAlternatingLegsEnv, self).__init__(
urdf_version=urdf_version,
accurate_motor_model_enabled=True,
motor_overheat_protection=True,
hard_reset=False,
motor_kp=motor_kp,
motor_kd=motor_kd,
remove_default_joint_damping=remove_default_joint_damping,
control_latency=control_latency,
pd_latency=pd_latency,
on_rack=on_rack,
render=render,
num_steps_to_log=num_steps_to_log,
env_randomizer=env_randomizer,
log_path=log_path,
control_time_step=control_time_step,
action_repeat=action_repeat)
action_dim = 8
action_high = np.array([0.1] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self._cam_dist = 1.0
self._cam_yaw = 30
self._cam_pitch = -30
def reset(self):
self.desired_pitch = DESIRED_PITCH
# In this environment, the actions are
# [swing leg 1, swing leg 2, swing leg 3, swing leg 4,
# extension leg 1, extension leg 2, extension leg 3, extension leg 4]
init_pose = [
INIT_SWING_POS + self._swing_offset[0],
INIT_SWING_POS + self._swing_offset[1],
INIT_SWING_POS + self._swing_offset[2],
INIT_SWING_POS + self._swing_offset[3],
INIT_EXTENSION_POS + self._extension_offset[0],
INIT_EXTENSION_POS + self._extension_offset[1],
INIT_EXTENSION_POS + self._extension_offset[2],
INIT_EXTENSION_POS + self._extension_offset[3]
]
initial_motor_angles = self._convert_from_leg_model(init_pose)
super(MinitaurAlternatingLegsEnv, self).reset(
initial_motor_angles=initial_motor_angles, reset_duration=0.5)
return self._get_observation()
def _convert_from_leg_model(self, leg_pose):
motor_pose = np.zeros(NUM_MOTORS)
for i in range(NUM_LEGS):
motor_pose[2 * i] = leg_pose[NUM_LEGS + i] - (-1)**(i / 2) * leg_pose[i]
motor_pose[2 * i
+ 1] = leg_pose[NUM_LEGS + i] + (-1)**(i / 2) * leg_pose[i]
return motor_pose
def _signal(self, t):
initial_pose = np.array([
INIT_SWING_POS, INIT_SWING_POS, INIT_SWING_POS, INIT_SWING_POS,
INIT_EXTENSION_POS, INIT_EXTENSION_POS, INIT_EXTENSION_POS,
INIT_EXTENSION_POS
])
amplitude = STEP_AMPLITUDE
period = STEP_PERIOD
extension = amplitude * (-1.0 + math.cos(2 * math.pi / period * t))
ith_leg = int(t / period) % 2
first_leg = np.array([0, 0, 0, 0, 0, extension, extension, 0])
second_leg = np.array([0, 0, 0, 0, extension, 0, 0, extension])
if ith_leg:
signal = initial_pose + second_leg
else:
signal = initial_pose + first_leg
return signal
def _transform_action_to_motor_command(self, action):
# Add swing_offset and extension_offset to mimick the bent legs.
action[0:4] += self._swing_offset
action[4:8] += self._extension_offset
action += self._signal(self.minitaur.GetTimeSinceReset())
action = self._convert_from_leg_model(action)
return action
def is_fallen(self):
"""Decide whether the minitaur has fallen.
If the up directions between the base and the world is large (the dot
product is smaller than 0.85), the minitaur is considered fallen.
Returns:
Boolean value that indicates whether the minitaur has fallen.
"""
orientation = self.minitaur.GetBaseOrientation()
rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)
local_up = rot_mat[6:]
return np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.85
def _reward(self):
return 1.0
def _get_true_observation(self):
"""Get the true observations of this environment.
It includes the roll, the error between current pitch and desired pitch,
roll dot and pitch dot of the base.
Returns:
The observation list.
"""
observation = []
roll, pitch, _ = self.minitaur.GetTrueBaseRollPitchYaw()
roll_rate, pitch_rate, _ = self.minitaur.GetTrueBaseRollPitchYawRate()
observation.extend([roll, pitch, roll_rate, pitch_rate])
observation[1] -= self.desired_pitch # observation[1] is the pitch
self._true_observation = np.array(observation)
return self._true_observation
def _get_observation(self):
observation = []
roll, pitch, _ = self.minitaur.GetBaseRollPitchYaw()
roll_rate, pitch_rate, _ = self.minitaur.GetBaseRollPitchYawRate()
observation.extend([roll, pitch, roll_rate, pitch_rate])
observation[1] -= self.desired_pitch # observation[1] is the pitch
self._observation = np.array(observation)
return self._observation
def _get_observation_upper_bound(self):
"""Get the upper bound of the observation.
Returns:
The upper bound of an observation. See GetObservation() for the details
of each element of an observation.
"""
upper_bound = np.zeros(self._get_observation_dimension())
upper_bound[0:2] = 2 * math.pi # Roll, pitch, yaw of the base.
upper_bound[2:4] = 2 * math.pi / self._time_step # Roll, pitch, yaw rate.
return upper_bound
def _get_observation_lower_bound(self):
lower_bound = -self._get_observation_upper_bound()
return lower_bound
def set_swing_offset(self, value):
"""Set the swing offset of each leg.
It is to mimic the bent leg.
Args:
value: A list of four values.
"""
self._swing_offset = value
def set_extension_offset(self, value):
"""Set the extension offset of each leg.
It is to mimic the bent leg.
Args:
value: A list of four values.
"""
self._extension_offset = value
def set_desired_pitch(self, value):
"""Set the desired pitch of the base, which is a user input.
Args:
value: A scalar.
"""
self.desired_pitch = value
| 36.852
| 86
| 0.692934
|
fd4b1a480e4bb1d400fab0c8b01203c4b02dd856
| 4,489
|
py
|
Python
|
src/analyzers/MetadefenderCloud.py
|
lsoumille/file-analyzer
|
cabc7fb8b8727b35b6359cbcceb089f31228e1ef
|
[
"Apache-2.0"
] | null | null | null |
src/analyzers/MetadefenderCloud.py
|
lsoumille/file-analyzer
|
cabc7fb8b8727b35b6359cbcceb089f31228e1ef
|
[
"Apache-2.0"
] | null | null | null |
src/analyzers/MetadefenderCloud.py
|
lsoumille/file-analyzer
|
cabc7fb8b8727b35b6359cbcceb089f31228e1ef
|
[
"Apache-2.0"
] | null | null | null |
import json
import time
import urllib3
from src.analyzers.IAnalyzer import IAnalyzer
from src.utils.ConfigHelper import ConfigHelper
from src.utils.Constants import Constants
from src.utils.FileHelper import FileHelper
class MetadefenderCloud(IAnalyzer):
def __init__(self):
self.name = "Metadefender Cloud analyzer"
self.api_key = ""
self.file_path = ""
self.file_name = ""
self.data_id = ""
def get_conf(self, conf_file, file_path):
return (ConfigHelper.getMetadefenderCloud(conf_file), file_path)
def setup(self, tuple):
if tuple[0]:
self.api_key = tuple[0]
if tuple[1]:
self.file_path = tuple[1]
def analyze(self):
print("[*] Starting analyze of " + self.file_path)
fh = FileHelper()
self.file_name = fh.getFileName(self.file_path)
with open(self.file_path, "rb") as fp:
file_content = fp.read()
headers = {'apikey': self.api_key, 'user_agent': 'mcl-metadefender-rest-sanitize-disabled-unarchive'}
http = urllib3.PoolManager()
response = http.request('POST', Constants.METADEFENDER_SENDING_URL, headers=headers, body=file_content)
try:
self.data_id = self.handleScanResponse(response)
except Exception as error:
print("Error when uploading file to Metadefender Cloud platform : " + repr(error))
return
print("[*] File upload to Metadefender Cloud (scan id = " + self.data_id + ")")
def report(self, level):
if not self.data_id:
return (self.name, '')
# Retrieve scan result
header = {'apikey': self.api_key}
while True:
print("[*] Request Rapport (data id = " + self.data_id + ")")
http = urllib3.PoolManager()
response = http.request('GET', Constants.METADEFENDER_REPORT_URL + self.data_id, headers=header)
response_content = self.handleReportResponse(response, level)
if response_content != -1:
print("[!] Rapport received ")
return response_content
time.sleep(30)
def handleScanResponse(self, response):
if response.status != 200 or json.loads(response.data.decode())['status'] != 'inqueue':
raise Exception('Bad Response from Metadefender Cloud API: Check your internet connection or your API Key')
else:
return json.loads(response.data.decode())['data_id']
# Return -1 if the report is not available
# Else return the positive number
def handleReportResponse(self, response, level):
response_data = json.loads(response.data.decode())
if response.status != 200 or response_data['process_info']['result'] == 'Processing':
return -1
elif level == Constants.SHORT_REPORTING:
return self.createShortReport(response_data)
elif level == Constants.MEDIUM_REPORTING:
return self.createMediumReport(response_data)
else:
return self.createComprehensiveReport(response_data)
def createShortReport(self, response_data):
if response_data['scan_results']['total_detected_avs'] != 0:
return (self.name, True)
else:
return (self.name, False)
def createMediumReport(self, response_data):
if response_data['scan_results']['total_detected_avs'] != 0:
return (self.name, True)
else:
return (self.name, False)
def createComprehensiveReport(self, response_data):
content = self.metadefenderResultToStr(response_data)
if response_data['scan_results']['total_detected_avs'] != 0:
return (self.name, True, content)
else:
return (self.name, False, content)
def metadefenderResultToStr(self, response_data):
str_res = "[*] Metadefender Cloud report:\n"
for scanner in response_data['scan_results']['scan_details']:
str_res += "> " + scanner + " : [ Detected: " + str(
response_data['scan_results']['scan_details'][scanner]['scan_result_i']) + " | Result: " + str(
response_data['scan_results']['scan_details'][scanner]['threat_found']) + " ]\n"
return str_res + "Metadefender platform detects " + str(
response_data['scan_results']['total_detected_avs']) + " positive results for " + self.file_name + "\n[*] Metadefender Cloud report end"
| 41.183486
| 148
| 0.63288
|
bd4d83c5c6eaafdb57e559c4a611f8ea6811cd59
| 3,618
|
py
|
Python
|
exercises/ex8-1.py
|
missgreenwood/twisted-intro
|
5c178375fb33f038d4262324201da20af213027a
|
[
"MIT"
] | null | null | null |
exercises/ex8-1.py
|
missgreenwood/twisted-intro
|
5c178375fb33f038d4262324201da20af213027a
|
[
"MIT"
] | null | null | null |
exercises/ex8-1.py
|
missgreenwood/twisted-intro
|
5c178375fb33f038d4262324201da20af213027a
|
[
"MIT"
] | null | null | null |
# Solution to exercise 8-1a:
# Timeout if the poem isn't received after a given period of time
# Fire the deferred's errback with a custom exeption in that case
# Close the connection in that case
import optparse, sys
from twisted.internet import defer
from twisted.internet.protocol import Protocol, ClientFactory
class TimeoutError(Exception): # custom exception (that does nothing)
pass
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
"""
parser = optparse.OptionParser(usage)
help = "Timeout in seconds."
parser.add_option('-t', '--timeout', type='float', help=help, default=5.0)
options, addresses = parser.parse_args()
if not addresses:
print parser.format_help()
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses), options
class PoetryProtocol(Protocol):
poem = ''
def dataReceived(self, data):
self.poem += data
def connectionLost(self, reason):
self.poemReceived(self.poem)
def poemReceived(self, poem):
self.factory.poem_finished(poem)
class PoetryClientFactory(ClientFactory):
protocol = PoetryProtocol
def __init__(self, deferred, timeout):
self.deferred = deferred
self.timeout = timeout
self.timeout_call = None
def startedConnecting(self, connector):
from twisted.internet import reactor
self.timeout_call = reactor.callLater(self.timeout, self.on_timeout, connector) # cancel download if it does not finish within given timeout
def poem_finished(self, poem):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.callback(poem)
self.cancel_timeout()
def clientConnectionFailed(self, connector, reason):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.errback(reason)
self.cancel_timeout()
def on_timeout(self, connector):
self.timeout_call = None
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.errback(TimeoutError()) # fire custom exception if poem download failed
connector.disconnect() # close connection if poem download failed
def cancel_timeout(self):
if self.timeout_call is not None:
call, self.timeout_call = self.timeout_call, None
call.cancel()
def get_poetry(host, port, timeout):
d = defer.Deferred()
from twisted.internet import reactor
factory = PoetryClientFactory(d, timeout)
reactor.connectTCP(host, port, factory)
return d
def poetry_main():
addresses, options = parse_args()
from twisted.internet import reactor
poems = []
errors = []
def got_poem(poem):
poems.append(poem)
def poem_failed(err):
print >> sys.stderr, 'Poem failed: ', err
errors.append(err)
def poem_done(_):
if len(poems) + len(errors) == len(addresses):
reactor.stop()
for address in addresses:
host, port = address
d = get_poetry(host, port, options.timeout)
d.addCallbacks(got_poem, poem_failed)
d.addBoth(poem_done)
reactor.run()
for poem in poems:
print(poem)
if __name__ == '__main__':
poetry_main()
| 27.830769
| 150
| 0.63654
|
f1d0538107fdcaf92c5ab4f500f502655d085824
| 173
|
py
|
Python
|
regtools/lag/__init__.py
|
nickderobertis/regtools
|
f0727195a38716a22e18d4f2c99f3b1cbf48cac8
|
[
"MIT"
] | null | null | null |
regtools/lag/__init__.py
|
nickderobertis/regtools
|
f0727195a38716a22e18d4f2c99f3b1cbf48cac8
|
[
"MIT"
] | 11
|
2020-01-25T23:33:30.000Z
|
2022-03-12T00:30:36.000Z
|
regtools/lag/__init__.py
|
nickderobertis/regtools
|
f0727195a38716a22e18d4f2c99f3b1cbf48cac8
|
[
"MIT"
] | null | null | null |
from .create import create_lagged_variables, _convert_interaction_tuples, _convert_variable_names, _set_lag_variables
from .remove import remove_lag_names_from_reg_results
| 43.25
| 117
| 0.901734
|
0baef01af0d7f94df25d57fd0a7755ae3bd4831f
| 3,323
|
py
|
Python
|
image-classification/imagenet/models/resnext.py
|
AaratiAkkapeddi/nnabla-examples
|
db9e5ad850303c158773aeb275e5c3821b4a3935
|
[
"Apache-2.0"
] | 228
|
2017-11-20T06:05:56.000Z
|
2022-03-23T12:40:05.000Z
|
image-classification/imagenet/models/resnext.py
|
AaratiAkkapeddi/nnabla-examples
|
db9e5ad850303c158773aeb275e5c3821b4a3935
|
[
"Apache-2.0"
] | 36
|
2018-01-11T23:26:20.000Z
|
2022-03-12T00:53:38.000Z
|
image-classification/imagenet/models/resnext.py
|
AaratiAkkapeddi/nnabla-examples
|
db9e5ad850303c158773aeb275e5c3821b4a3935
|
[
"Apache-2.0"
] | 76
|
2017-11-22T22:00:00.000Z
|
2022-03-28T05:58:57.000Z
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
from .base import (
ResNetBase,
BottleneckBlock,
pf_convolution,
get_channel_axis,
shortcut,
)
from . import registry
class ResNeXtBottleneckBlock(BottleneckBlock):
'''
Bottleneck block for ResNeXt
Args:
cardinality (int):
Specifies group size of the second convolution at bottleneck.
The default is 32 which is used in the paper.
See `BottleneckBlock` for other details.
'''
def __init__(self, cardinality=32, shortcut_type='b', test=True, channel_last=False):
super(ResNeXtBottleneckBlock, self).__init__(
shortcut_type, test, channel_last)
self.cardinality = cardinality
def __call__(self, x, ochannels, stride):
div = 2
assert ochannels % div == 0
hchannels = ochannels // div
with nn.parameter_scope("bottleneck1"):
h = self.bn(pf_convolution(x, hchannels, (1, 1), **self.conv_opts))
with nn.parameter_scope("bottleneck2"):
h = self.bn(pf_convolution(h, hchannels, (3, 3),
stride=stride,
group=self.cardinality,
**self.conv_opts))
with nn.parameter_scope("bottleneck3"):
h = pf_convolution(h, ochannels, (1, 1), **self.conv_opts)
with nn.parameter_scope("bottleneck_s"):
s = shortcut(x, ochannels, stride, self.shortcut_type,
self.test, channel_last=self.channel_last)
with nn.parameter_scope("bottleneck3"): # backward compat.
h = self.bn(h, s)
return h
class ResNeXt(ResNetBase):
'''
A class which defines ResNeXt.
With a ResNet-based architecture, `SEBottleneckBlock` is used for blocks
in a cell instead of `BottleneckBlock`.
Args:
cardinality (int): See `ResNeXtBottleneckBlock`.
See `ResNetBase` for other details.
'''
def __init__(self, num_classes=1000, num_layers=50, cardinality=32,
test=True, channel_last=False):
block = ResNeXtBottleneckBlock(cardinality, 'b', test, channel_last)
super(ResNeXt, self).__init__(
num_classes, num_layers, max_pooling_ceil_border=True,
block=block, test=test, channel_last=channel_last)
def resnext50(x, num_classes=1000, test=True, channel_last=False):
'''
Defines ResNeXt50.
See `ResNeXt` for more details.
'''
net = ResNeXt(num_classes, 50, test=test, channel_last=channel_last)
return net(x)
# Register arch functions
registry.register_arch_fn('resnext50', resnext50)
| 32.262136
| 89
| 0.652723
|
8d9017e92eee5163fc24782c6069e755e0abd799
| 962
|
py
|
Python
|
stage1/thread_demo/demo2.py
|
kaixiang1992/python-review
|
7f4f82b453f81b47af7ab1e8f3b3d07d1d75cbe4
|
[
"MIT"
] | null | null | null |
stage1/thread_demo/demo2.py
|
kaixiang1992/python-review
|
7f4f82b453f81b47af7ab1e8f3b3d07d1d75cbe4
|
[
"MIT"
] | null | null | null |
stage1/thread_demo/demo2.py
|
kaixiang1992/python-review
|
7f4f82b453f81b47af7ab1e8f3b3d07d1d75cbe4
|
[
"MIT"
] | null | null | null |
"""
2019/12/12 22:11
150.【多线程】使用Thread类创建多线程(线程)
"""
"""
查看线程数:
使用`threading.enumerate()`函数可以看到当前线程的数量.
查看当前线程的名字:
使用`threading.currentThread()`可以看到当前线程的信息.
继承自`threading.Thread`类:
为了让线程代码更好的封装。可以使用threading模块下的Thread类,继承自这个类,
然后实现run方法,线程就会自动运行run方法中的代码。
"""
import threading
import time
class codingThread(threading.Thread):
def run(self):
for x in range(0, 3):
print('正在写代码%s' % threading.currentThread())
time.sleep(1)
class drawingThread(threading.Thread):
def run(self):
for x in range(0, 3):
print('正在画画%s' % threading.currentThread())
time.sleep(1)
if __name__ == '__main__':
coding = codingThread()
drawing = drawingThread()
coding.start()
drawing.start()
# TODO: [<_MainThread(MainThread, started 13016)>, <codingThread(Thread-1, started 3300)>, <drawingThread(
# Thread-2, started 8452)>]
print('当前总线程数量: %s' % threading.enumerate())
| 19.24
| 110
| 0.656965
|
ac80e80daa00c261cbfeb4767cbb95588286da89
| 6,583
|
py
|
Python
|
jupytext/languages.py
|
lclem/jupytext
|
145ae0ab67e60c46bea26e168c671bebaf98c38c
|
[
"MIT"
] | null | null | null |
jupytext/languages.py
|
lclem/jupytext
|
145ae0ab67e60c46bea26e168c671bebaf98c38c
|
[
"MIT"
] | null | null | null |
jupytext/languages.py
|
lclem/jupytext
|
145ae0ab67e60c46bea26e168c671bebaf98c38c
|
[
"MIT"
] | null | null | null |
"""Determine notebook or cell language"""
# Jupyter magic commands that are also languages
_JUPYTER_LANGUAGES = ['R', 'bash', 'sh', 'python', 'python2', 'python3', 'javascript', 'js', 'perl',
'html', 'latex', 'markdown', 'pypy', 'ruby', 'script', 'svg',
'matlab', 'octave', 'idl', 'robotframework', 'spark', 'sql']
# Supported file extensions (and languages)
# Please add more languages here (and add a few tests) - see CONTRIBUTING.md
_SCRIPT_EXTENSIONS = {'.py': {'language': 'python', 'comment': '#'},
'.R': {'language': 'R', 'comment': '#'},
'.r': {'language': 'R', 'comment': '#'},
'.jl': {'language': 'julia', 'comment': '#'},
'.cpp': {'language': 'c++', 'comment': '//'},
'.ss': {'language': 'scheme', 'comment': ';;'},
'.clj': {'language': 'clojure', 'comment': ';;'},
'.scm': {'language': 'scheme', 'comment': ';;'},
'.sh': {'language': 'bash', 'comment': '#'},
'.ps1': {'language': 'powershell', 'comment': '#'},
'.q': {'language': 'q', 'comment': '/'},
'.m': {'language': 'matlab', 'comment': '%'},
'.pro': {'language': 'idl', 'comment': ';'},
'.js': {'language': 'javascript', 'comment': '//'},
'.ts': {'language': 'typescript', 'comment': '//'},
'.scala': {'language': 'scala', 'comment': '//'},
'.rs': {'language': 'rust', 'comment': '//'},
'.robot': {'language': 'robotframework', 'comment': '#'},
'.cs': {'language': 'csharp', 'comment': '//'},
'.fsx': {'language': 'fsharp', 'comment': '//'},
'.fs': {'language': 'fsharp', 'comment': '//'},
'.sos': {'language': 'sos', 'comment': '#'},
'.agda': {'language': 'agda', 'comment': '--'},
}
_COMMENT_CHARS = [_SCRIPT_EXTENSIONS[ext]['comment'] for ext in _SCRIPT_EXTENSIONS if
_SCRIPT_EXTENSIONS[ext]['comment'] != '#']
_COMMENT = {_SCRIPT_EXTENSIONS[ext]['language']: _SCRIPT_EXTENSIONS[ext]['comment'] for ext in _SCRIPT_EXTENSIONS}
_JUPYTER_LANGUAGES = set(_JUPYTER_LANGUAGES).union(_COMMENT.keys()).union(['c#', 'f#', 'cs', 'fs'])
_JUPYTER_LANGUAGES_LOWER_AND_UPPER = _JUPYTER_LANGUAGES.union({str.upper(lang) for lang in _JUPYTER_LANGUAGES})
def default_language_from_metadata_and_ext(metadata, ext, pop_main_language=False):
"""Return the default language given the notebook metadata, and a file extension"""
default_from_ext = _SCRIPT_EXTENSIONS.get(ext, {}).get('language')
main_language = metadata.get('jupytext', {}).get('main_language')
default_language = metadata.get('kernelspec', {}).get('language') or default_from_ext
language = main_language or default_language
if main_language is not None and main_language == default_language and pop_main_language:
metadata['jupytext'].pop('main_language')
if language is None or language == 'R':
return language
if language.startswith('C++'):
return 'c++'
return language.lower().replace('#', 'sharp')
def usual_language_name(language):
"""Return the usual language name (one that may be found in _SCRIPT_EXTENSIONS above)"""
language = language.lower()
if language == 'r':
return 'R'
if language.startswith('c++'):
return 'c++'
if language == 'octave':
return 'matlab'
if language in ['cs', 'c#']:
return 'csharp'
if language in ['fs', 'f#']:
return 'fsharp'
return language
def same_language(kernel_language, language):
"""Are those the same language?"""
return usual_language_name(kernel_language) == usual_language_name(language)
def set_main_and_cell_language(metadata, cells, ext):
"""Set main language for the given collection of cells, and
use magics for cells that use other languages"""
main_language = default_language_from_metadata_and_ext(metadata, ext)
if main_language is None:
languages = {'python': 0.5}
for cell in cells:
if 'language' in cell['metadata']:
language = usual_language_name(cell['metadata']['language'])
languages[language] = languages.get(language, 0.0) + 1
main_language = max(languages, key=languages.get)
# save main language when no kernel is set
if 'language' not in metadata.get('kernelspec', {}) and cells:
metadata.setdefault('jupytext', {})['main_language'] = main_language
# Remove 'language' meta data and add a magic if not main language
for cell in cells:
if 'language' in cell['metadata']:
language = cell['metadata']['language']
if language == main_language:
cell['metadata'].pop('language')
continue
if usual_language_name(language) == main_language:
continue
if language in _JUPYTER_LANGUAGES:
cell['metadata'].pop('language')
magic = '%%' if main_language != 'csharp' else '#!'
if 'magic_args' in cell['metadata']:
magic_args = cell['metadata'].pop('magic_args')
cell['source'] = u'{}{} {}\n'.format(magic, language, magic_args) + cell['source']
else:
cell['source'] = u'{}{}\n'.format(magic, language) + cell['source']
def cell_language(source, default_language):
"""Return cell language and language options, if any"""
if source:
line = source[0]
if default_language == 'csharp':
if line.startswith('#!'):
lang = line[2:].strip()
if lang in _JUPYTER_LANGUAGES:
source.pop(0)
return lang, ''
elif line.startswith('%%'):
magic = line[2:]
if ' ' in magic:
lang, magic_args = magic.split(' ', 1)
else:
lang = magic
magic_args = ''
if lang in _JUPYTER_LANGUAGES:
source.pop(0)
return lang, magic_args
return None, None
def comment_lines(lines, prefix):
"""Return commented lines"""
if not prefix:
return lines
return [prefix + ' ' + line if line else prefix for line in lines]
| 43.026144
| 114
| 0.544736
|
e2120d456b47b78f1867f5c6c24d178132c81702
| 2,049
|
py
|
Python
|
pypowervm/tests/utils/test_uuid.py
|
stephenfin/pypowervm
|
68f2b586b4f17489f379534ab52fc56a524b6da5
|
[
"Apache-2.0"
] | 24
|
2015-12-02T19:49:45.000Z
|
2021-11-17T11:43:51.000Z
|
pypowervm/tests/utils/test_uuid.py
|
stephenfin/pypowervm
|
68f2b586b4f17489f379534ab52fc56a524b6da5
|
[
"Apache-2.0"
] | 18
|
2017-03-01T05:54:25.000Z
|
2022-03-14T17:32:47.000Z
|
pypowervm/tests/utils/test_uuid.py
|
stephenfin/pypowervm
|
68f2b586b4f17489f379534ab52fc56a524b6da5
|
[
"Apache-2.0"
] | 17
|
2016-02-10T22:53:04.000Z
|
2021-11-10T09:47:10.000Z
|
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from pypowervm.utils import uuid as uuid_utils
import unittest
class TestUUID(unittest.TestCase):
"""Unit tests for the uuid."""
def test_uuid_conversion(self):
uuid = '089ffb20-5d19-4a8c-bb80-13650627d985'
pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid)
self.assertEqual(uuid, pvm_uuid)
uuid = '989ffb20-5d19-4a8c-bb80-13650627d985'
pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid)
self.assertEqual('1' + uuid[1:], pvm_uuid)
uuid = 'c89ffb20-5d19-4a8c-bb80-13650627d985'
pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid)
self.assertEqual('4' + uuid[1:], pvm_uuid)
def test_id_or_uuid(self):
self.assertEqual((False, 123), uuid_utils.id_or_uuid(123))
# Test all stringish permutations
converters = [lambda x: x, six.text_type]
for conv in converters:
self.assertEqual((False, 123), uuid_utils.id_or_uuid(conv('123')))
uuid = conv('12345678-abcd-ABCD-0000-0a1B2c3D4e5F')
self.assertEqual((True, uuid), uuid_utils.id_or_uuid(uuid))
uuid = conv('12345678abcdABCD00000a1B2c3D4e5F')
self.assertEqual((True, uuid), uuid_utils.id_or_uuid(uuid))
# This one has too many digits
self.assertRaises(ValueError, uuid_utils.id_or_uuid,
conv('12345678-abcd-ABCD-0000-0a1B2c3D4e5F0'))
| 37.944444
| 78
| 0.676428
|
0c9e383c44731873cb061f35e8dcf11780037955
| 1,405
|
py
|
Python
|
crispy_forms/tests/test_utils.py
|
furkanx007/django-crispy-forms
|
77f4ea7edd3d8a4cd21f20bbe7fbce1511248e10
|
[
"MIT"
] | null | null | null |
crispy_forms/tests/test_utils.py
|
furkanx007/django-crispy-forms
|
77f4ea7edd3d8a4cd21f20bbe7fbce1511248e10
|
[
"MIT"
] | 3
|
2019-11-22T22:15:28.000Z
|
2021-04-08T12:42:56.000Z
|
crispy_forms/tests/test_utils.py
|
furkanx007/django-crispy-forms
|
77f4ea7edd3d8a4cd21f20bbe7fbce1511248e10
|
[
"MIT"
] | null | null | null |
import pytest
import django
from django import forms
from django.template.base import Context, Template
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from crispy_forms.utils import list_difference, list_intersection, render_field
def test_list_intersection():
assert list_intersection([1, 3], [2, 3]) == [3]
def test_list_difference():
assert list_difference([3, 1, 2, 3], [4, 1, ]) == [3, 2]
def test_render_field_with_none_field():
rendered = render_field(field=None, form=None, form_style=None, context=None)
assert rendered == ''
def test_custom_bound_field():
from django.forms.boundfield import BoundField
extra = 'xyxyxyxyxyx'
class CustomBoundField(BoundField):
@property
def auto_id(self):
return extra
class MyCharField(forms.CharField):
def get_bound_field(self, form, field_name):
return CustomBoundField(form, self, field_name)
class MyForm(forms.Form):
f = MyCharField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout('f')
template = Template('{% load crispy_forms_tags %}\n{% crispy form "bootstrap3" %}')
rendered = template.render(Context({'form': MyForm(data={'f': 'something'})}))
assert extra in rendered
| 27.54902
| 87
| 0.680427
|
db05c50cac396b212a9a7c49d08daac79cf41a0d
| 22,526
|
py
|
Python
|
withings_api/common.py
|
ademasi/python_withings_api
|
374fa985b383dea721a74eb4086a397ff26b45a1
|
[
"MIT"
] | null | null | null |
withings_api/common.py
|
ademasi/python_withings_api
|
374fa985b383dea721a74eb4086a397ff26b45a1
|
[
"MIT"
] | null | null | null |
withings_api/common.py
|
ademasi/python_withings_api
|
374fa985b383dea721a74eb4086a397ff26b45a1
|
[
"MIT"
] | null | null | null |
"""Common classes and functions."""
from dataclasses import dataclass
from datetime import tzinfo
from enum import Enum, IntEnum
import logging
from typing import Any, Dict, Optional, Tuple, Type, TypeVar, Union, cast
import arrow
from arrow import Arrow
from dateutil import tz
from dateutil.tz import tzlocal
from pydantic import BaseModel, Field, validator
from typing_extensions import Final
from .const import (
LOG_NAMESPACE,
STATUS_AUTH_FAILED,
STATUS_BAD_STATE,
STATUS_ERROR_OCCURRED,
STATUS_INVALID_PARAMS,
STATUS_SUCCESS,
STATUS_TIMEOUT,
STATUS_TOO_MANY_REQUESTS,
STATUS_UNAUTHORIZED,
)
_LOGGER = logging.getLogger(LOG_NAMESPACE)
_GenericType = TypeVar("_GenericType")
def to_enum(
enum_class: Type[_GenericType], value: Any, default_value: _GenericType
) -> _GenericType:
"""Attempt to convert a value to an enum."""
try:
return enum_class(value) # type: ignore
except ValueError:
_LOGGER.warning(
"Unsupported %s value %s. Replacing with UNKNOWN value %s. Please report this warning to the developer to ensure proper support.",
str(enum_class),
value,
str(default_value),
)
return default_value
class ConfiguredBaseModel(BaseModel):
"""An already configured pydantic model."""
class Config:
"""Config for pydantic model."""
ignore_extra: Final = True
allow_extra: Final = False
allow_mutation: Final = False
class TimeZone(tzlocal):
"""Subclass of tzinfo for parsing timezones."""
@classmethod
def __get_validators__(cls) -> Any:
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(cls, value: Any) -> tzinfo:
"""Convert input to the desired object."""
if isinstance(value, tzinfo):
return value
if isinstance(value, str):
timezone: Final = tz.gettz(value)
if timezone:
return timezone
raise ValueError(f"Invalid timezone provided {value}")
raise TypeError("string or tzinfo required")
class ArrowType(Arrow):
"""Subclass of Arrow for parsing dates."""
@classmethod
def __get_validators__(cls) -> Any:
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(cls, value: Any) -> Arrow:
"""Convert input to the desired object."""
if isinstance(value, str):
if value.isdigit():
return arrow.get(int(value))
return arrow.get(value)
if isinstance(value, int):
return arrow.get(value)
if isinstance(value, (Arrow, ArrowType)):
return value
raise TypeError("string or int required")
class SleepModel(IntEnum):
"""Sleep model."""
UNKNOWN = -999999
TRACKER = 16
SLEEP_MONITOR = 32
class SleepState(IntEnum):
"""Sleep states."""
UNKNOWN = -999999
AWAKE = 0
LIGHT = 1
DEEP = 2
REM = 3
class MeasureGetMeasGroupAttrib(IntEnum):
"""Measure group attributions."""
UNKNOWN = -1
DEVICE_ENTRY_FOR_USER = 0
DEVICE_ENTRY_FOR_USER_AMBIGUOUS = 1
MANUAL_USER_ENTRY = 2
MANUAL_USER_DURING_ACCOUNT_CREATION = 4
MEASURE_AUTO = 5
MEASURE_USER_CONFIRMED = 7
SAME_AS_DEVICE_ENTRY_FOR_USER = 8
class MeasureGetMeasGroupCategory(IntEnum):
"""Measure categories."""
UNKNOWN = -999999
REAL = 1
USER_OBJECTIVES = 2
class MeasureType(IntEnum):
"""Measure types."""
UNKNOWN = -999999
WEIGHT = 1
HEIGHT = 4
FAT_FREE_MASS = 5
FAT_RATIO = 6
FAT_MASS_WEIGHT = 8
DIASTOLIC_BLOOD_PRESSURE = 9
SYSTOLIC_BLOOD_PRESSURE = 10
HEART_RATE = 11
TEMPERATURE = 12
SP02 = 54
BODY_TEMPERATURE = 71
SKIN_TEMPERATURE = 73
MUSCLE_MASS = 76
HYDRATION = 77
BONE_MASS = 88
PULSE_WAVE_VELOCITY = 91
VO2 = 123
QRS_INTERVAL = 135
PR_INTERVAL = 136
QT_INTERVAL = 138
ATRIAL_FIBRILLATION = 139
class NotifyAppli(IntEnum):
"""Data to notify_subscribe to."""
UNKNOWN = -999999
WEIGHT = 1
CIRCULATORY = 4
ACTIVITY = 16
SLEEP = 44
USER = 46
BED_IN = 50
BED_OUT = 51
class GetActivityField(Enum):
"""Fields for the getactivity api call."""
STEPS = "steps"
DISTANCE = "distance"
ELEVATION = "elevation"
SOFT = "soft"
MODERATE = "moderate"
INTENSE = "intense"
ACTIVE = "active"
CALORIES = "calories"
TOTAL_CALORIES = "totalcalories"
HR_AVERAGE = "hr_average"
HR_MIN = "hr_min"
HR_MAX = "hr_max"
HR_ZONE_0 = "hr_zone_0"
HR_ZONE_1 = "hr_zone_1"
HR_ZONE_2 = "hr_zone_2"
HR_ZONE_3 = "hr_zone_3"
class GetSleepField(Enum):
"""Fields for getsleep api call."""
HR = "hr"
RR = "rr"
SNORING = "snoring"
class GetSleepSummaryField(Enum):
"""Fields for get sleep summary api call."""
BREATHING_DISTURBANCES_INTENSITY = "breathing_disturbances_intensity"
DEEP_SLEEP_DURATION = "deepsleepduration"
DURATION_TO_SLEEP = "durationtosleep"
DURATION_TO_WAKEUP = "durationtowakeup"
HR_AVERAGE = "hr_average"
HR_MAX = "hr_max"
HR_MIN = "hr_min"
LIGHT_SLEEP_DURATION = "lightsleepduration"
REM_SLEEP_DURATION = "remsleepduration"
RR_AVERAGE = "rr_average"
RR_MAX = "rr_max"
RR_MIN = "rr_min"
SLEEP_SCORE = "sleep_score"
SNORING = "snoring"
SNORING_EPISODE_COUNT = "snoringepisodecount"
WAKEUP_COUNT = "wakeupcount"
WAKEUP_DURATION = "wakeupduration"
class AuthScope(Enum):
"""Authorization scopes."""
USER_INFO = "user.info"
USER_METRICS = "user.metrics"
USER_ACTIVITY = "user.activity"
USER_SLEEP_EVENTS = "user.sleepevents"
class UserGetDeviceDevice(ConfiguredBaseModel):
"""UserGetDeviceDevice."""
type: str
model: str
battery: str
deviceid: str
timezone: TimeZone
class UserGetDeviceResponse(ConfiguredBaseModel):
"""UserGetDeviceResponse."""
devices: Tuple[UserGetDeviceDevice, ...]
class SleepGetTimestampValue(ConfiguredBaseModel):
"""SleepGetTimestampValue."""
timestamp: ArrowType
value: int
class SleepGetSerie(ConfiguredBaseModel):
"""SleepGetSerie."""
enddate: ArrowType
startdate: ArrowType
state: SleepState
hr: Tuple[SleepGetTimestampValue, ...] = () # pylint: disable=invalid-name
rr: Tuple[SleepGetTimestampValue, ...] = () # pylint: disable=invalid-name
snoring: Tuple[SleepGetTimestampValue, ...] = ()
@validator("hr", pre=True)
@classmethod
def _hr_to_tuple(cls, value: Dict[str, int]) -> Tuple:
return SleepGetSerie._timestamp_value_to_object(value)
@validator("rr", pre=True)
@classmethod
def _rr_to_tuple(cls, value: Dict[str, int]) -> Tuple:
return SleepGetSerie._timestamp_value_to_object(value)
@validator("snoring", pre=True)
@classmethod
def _snoring_to_tuple(cls, value: Dict[str, int]) -> Tuple:
return SleepGetSerie._timestamp_value_to_object(value)
@classmethod
def _timestamp_value_to_object(
cls, value: Any
) -> Tuple[SleepGetTimestampValue, ...]:
if not value:
return ()
if isinstance(value, dict):
return tuple(
[
SleepGetTimestampValue(timestamp=item_key, value=item_value)
for item_key, item_value in value.items()
]
)
return cast(Tuple[SleepGetTimestampValue, ...], value)
@validator("state", pre=True)
@classmethod
def _state_to_enum(cls, value: Any) -> SleepState:
return to_enum(SleepState, value, SleepState.UNKNOWN)
class SleepGetResponse(ConfiguredBaseModel):
"""SleepGetResponse."""
model: SleepModel
series: Tuple[SleepGetSerie, ...]
@validator("model", pre=True)
@classmethod
def _model_to_enum(cls, value: Any) -> SleepModel:
return to_enum(SleepModel, value, SleepModel.UNKNOWN)
class GetSleepSummaryData(
ConfiguredBaseModel
): # pylint: disable=too-many-instance-attributes
"""GetSleepSummaryData."""
breathing_disturbances_intensity: Optional[int]
deepsleepduration: Optional[int]
durationtosleep: Optional[int]
durationtowakeup: Optional[int]
hr_average: Optional[int]
hr_max: Optional[int]
hr_min: Optional[int]
lightsleepduration: Optional[int]
remsleepduration: Optional[int]
rr_average: Optional[int]
rr_max: Optional[int]
rr_min: Optional[int]
sleep_score: Optional[int]
snoring: Optional[int]
snoringepisodecount: Optional[int]
wakeupcount: Optional[int]
wakeupduration: Optional[int]
class GetSleepSummarySerie(ConfiguredBaseModel):
"""GetSleepSummarySerie."""
timezone: TimeZone
model: SleepModel
startdate: ArrowType
enddate: ArrowType
date: ArrowType
modified: ArrowType
data: GetSleepSummaryData
id: Optional[int] = None
@validator("startdate")
@classmethod
def _set_timezone_on_startdate(
cls, value: ArrowType, values: Dict[str, Any]
) -> Arrow:
return cast(Arrow, value.to(values["timezone"]))
@validator("enddate")
@classmethod
def _set_timezone_on_enddate(
cls, value: ArrowType, values: Dict[str, Any]
) -> Arrow:
return cast(Arrow, value.to(values["timezone"]))
@validator("date")
@classmethod
def _set_timezone_on_date(cls, value: ArrowType, values: Dict[str, Any]) -> Arrow:
return cast(Arrow, value.to(values["timezone"]))
@validator("modified")
@classmethod
def _set_timezone_on_modified(
cls, value: ArrowType, values: Dict[str, Any]
) -> Arrow:
return cast(Arrow, value.to(values["timezone"]))
@validator("model", pre=True)
@classmethod
def _model_to_enum(cls, value: Any) -> SleepModel:
return to_enum(SleepModel, value, SleepModel.UNKNOWN)
class SleepGetSummaryResponse(ConfiguredBaseModel):
"""SleepGetSummaryResponse."""
more: bool
offset: int
series: Tuple[GetSleepSummarySerie, ...]
class MeasureGetMeasMeasure(ConfiguredBaseModel):
"""MeasureGetMeasMeasure."""
type: MeasureType
unit: int
value: int
@validator("type", pre=True)
@classmethod
def _type_to_enum(cls, value: Any) -> MeasureType:
return to_enum(MeasureType, value, MeasureType.UNKNOWN)
class MeasureGetMeasGroup(ConfiguredBaseModel):
"""MeasureGetMeasGroup."""
attrib: MeasureGetMeasGroupAttrib
category: MeasureGetMeasGroupCategory
created: ArrowType
date: ArrowType
deviceid: Optional[str]
grpid: int
measures: Tuple[MeasureGetMeasMeasure, ...]
@validator("attrib", pre=True)
@classmethod
def _attrib_to_enum(cls, value: Any) -> MeasureGetMeasGroupAttrib:
return to_enum(
MeasureGetMeasGroupAttrib, value, MeasureGetMeasGroupAttrib.UNKNOWN
)
@validator("category", pre=True)
@classmethod
def _category_to_enum(cls, value: Any) -> MeasureGetMeasGroupCategory:
return to_enum(
MeasureGetMeasGroupCategory, value, MeasureGetMeasGroupCategory.UNKNOWN
)
class MeasureGetMeasResponse(ConfiguredBaseModel):
"""MeasureGetMeasResponse."""
measuregrps: Tuple[MeasureGetMeasGroup, ...]
more: Optional[bool]
offset: Optional[int]
timezone: TimeZone
updatetime: ArrowType
@validator("updatetime")
@classmethod
def _set_timezone_on_updatetime(
cls, value: ArrowType, values: Dict[str, Any]
) -> Arrow:
return cast(Arrow, value.to(values["timezone"]))
class MeasureGetActivityActivity(
BaseModel
): # pylint: disable=too-many-instance-attributes
"""MeasureGetActivityActivity."""
date: ArrowType
timezone: TimeZone
deviceid: Optional[str]
brand: int
is_tracker: bool
steps: Optional[int]
distance: Optional[float]
elevation: Optional[float]
soft: Optional[int]
moderate: Optional[int]
intense: Optional[int]
active: Optional[int]
calories: Optional[float]
totalcalories: float
hr_average: Optional[int]
hr_min: Optional[int]
hr_max: Optional[int]
hr_zone_0: Optional[int]
hr_zone_1: Optional[int]
hr_zone_2: Optional[int]
hr_zone_3: Optional[int]
class MeasureGetActivityResponse(ConfiguredBaseModel):
"""MeasureGetActivityResponse."""
activities: Tuple[MeasureGetActivityActivity, ...]
more: bool
offset: int
class HeartModel(IntEnum):
"""Heart model."""
UNKNOWN = -999999
BPM_CORE = 44
MOVE_ECG = 91
class AfibClassification(IntEnum):
"""Atrial fibrillation classification"""
UNKNOWN = -999999
NEGATIVE = 0
POSITIVE = 1
INCONCLUSIVE = 2
class HeartWearPosition(IntEnum):
"""Wear position of heart model."""
UNKNOWN = -999999
RIGHT_WRIST = 0
LEFT_WRIST = 1
RIGHT_ARM = 2
LEFT_ARM = 3
RIGHT_FOOT = 4
LEFT_FOOT = 5
class HeartGetResponse(ConfiguredBaseModel):
"""HeartGetResponse."""
signal: Tuple[int, ...]
sampling_frequency: int
wearposition: HeartWearPosition
@validator("wearposition", pre=True)
@classmethod
def _wearposition_to_enum(cls, value: Any) -> HeartWearPosition:
return to_enum(HeartWearPosition, value, HeartWearPosition.UNKNOWN)
class HeartListECG(ConfiguredBaseModel):
"""HeartListECG."""
signalid: int
afib: AfibClassification
@validator("afib", pre=True)
@classmethod
def _afib_to_enum(cls, value: Any) -> AfibClassification:
return to_enum(AfibClassification, value, AfibClassification.UNKNOWN)
class HeartBloodPressure(ConfiguredBaseModel):
"""HeartBloodPressure."""
diastole: int
systole: int
class HeartListSerie(ConfiguredBaseModel):
"""HeartListSerie"""
ecg: HeartListECG
heart_rate: int
timestamp: ArrowType
model: HeartModel
# blood pressure is optional as not all devices (e.g. Move ECG) collect it
bloodpressure: Optional[HeartBloodPressure] = None
deviceid: Optional[str] = None
@validator("model", pre=True)
@classmethod
def _model_to_enum(cls, value: Any) -> HeartModel:
return to_enum(HeartModel, value, HeartModel.UNKNOWN)
class HeartListResponse(ConfiguredBaseModel):
"""HeartListResponse."""
more: bool
offset: int
series: Tuple[HeartListSerie, ...]
@dataclass(frozen=True)
class Credentials:
"""Credentials."""
access_token: str
token_expiry: int
token_type: str
refresh_token: str
userid: int
client_id: str
consumer_secret: str
class Credentials2(ConfiguredBaseModel):
"""Credentials."""
access_token: str
token_type: str
refresh_token: str
userid: int
client_id: str
consumer_secret: str
expires_in: int
created: ArrowType = Field(default_factory=arrow.utcnow)
@property
def token_expiry(self) -> int:
"""Get the token expiry."""
return cast(int, self.created.shift(seconds=self.expires_in).int_timestamp)
CredentialsType = Union[Credentials, Credentials2]
def maybe_upgrade_credentials(value: CredentialsType) -> Credentials2:
"""Upgrade older versions of credentials to the newer signature."""
if isinstance(value, Credentials2):
return value
creds = cast(Credentials, value)
return Credentials2(
access_token=creds.access_token,
token_type=creds.token_type,
refresh_token=creds.refresh_token,
userid=creds.userid,
client_id=creds.client_id,
consumer_secret=creds.consumer_secret,
expires_in=creds.token_expiry - arrow.utcnow().int_timestamp,
)
class NotifyListProfile(ConfiguredBaseModel):
"""NotifyListProfile."""
appli: NotifyAppli
callbackurl: str
expires: Optional[ArrowType]
comment: Optional[str]
@validator("appli", pre=True)
@classmethod
def _appli_to_enum(cls, value: Any) -> NotifyAppli:
return to_enum(NotifyAppli, value, NotifyAppli.UNKNOWN)
class NotifyListResponse(ConfiguredBaseModel):
"""NotifyListResponse."""
profiles: Tuple[NotifyListProfile, ...]
class NotifyGetResponse(ConfiguredBaseModel):
"""NotifyGetResponse."""
appli: NotifyAppli
callbackurl: str
comment: Optional[str]
@validator("appli", pre=True)
@classmethod
def _appli_to_enum(cls, value: Any) -> NotifyAppli:
return to_enum(NotifyAppli, value, NotifyAppli.UNKNOWN)
class UnexpectedTypeException(Exception):
"""Thrown when encountering an unexpected type."""
def __init__(self, value: Any, expected: Type[_GenericType]):
"""Initialize."""
super().__init__(
'Expected of "%s" to be "%s" but was "%s."' % (value, expected, type(value))
)
AMBIGUOUS_GROUP_ATTRIBS: Final = (
MeasureGetMeasGroupAttrib.DEVICE_ENTRY_FOR_USER_AMBIGUOUS,
MeasureGetMeasGroupAttrib.MANUAL_USER_DURING_ACCOUNT_CREATION,
)
class MeasureGroupAttribs:
"""Groups of MeasureGetMeasGroupAttrib."""
ANY: Final = tuple(enum_val for enum_val in MeasureGetMeasGroupAttrib)
AMBIGUOUS: Final = AMBIGUOUS_GROUP_ATTRIBS
UNAMBIGUOUS: Final = tuple(
enum_val
for enum_val in MeasureGetMeasGroupAttrib
if enum_val not in AMBIGUOUS_GROUP_ATTRIBS
)
class MeasureTypes:
"""Groups of MeasureType."""
ANY: Final = tuple(enum_val for enum_val in MeasureType)
def query_measure_groups(
from_source: Union[
MeasureGetMeasGroup, MeasureGetMeasResponse, Tuple[MeasureGetMeasGroup, ...]
],
with_measure_type: Union[MeasureType, Tuple[MeasureType, ...]] = MeasureTypes.ANY,
with_group_attrib: Union[
MeasureGetMeasGroupAttrib, Tuple[MeasureGetMeasGroupAttrib, ...]
] = MeasureGroupAttribs.ANY,
) -> Tuple[MeasureGetMeasGroup, ...]:
"""Return a groups and measurements based on filters."""
if isinstance(from_source, MeasureGetMeasResponse):
iter_groups = cast(MeasureGetMeasResponse, from_source).measuregrps
elif isinstance(from_source, MeasureGetMeasGroup):
iter_groups = (cast(MeasureGetMeasGroup, from_source),)
else:
iter_groups = cast(Tuple[MeasureGetMeasGroup], from_source)
if isinstance(with_measure_type, MeasureType):
iter_measure_type = (cast(MeasureType, with_measure_type),)
else:
iter_measure_type = cast(Tuple[MeasureType], with_measure_type)
if isinstance(with_group_attrib, MeasureGetMeasGroupAttrib):
iter_group_attrib = (cast(MeasureGetMeasGroupAttrib, with_group_attrib),)
else:
iter_group_attrib = cast(Tuple[MeasureGetMeasGroupAttrib], with_group_attrib)
return tuple(
MeasureGetMeasGroup(
attrib=group.attrib,
category=group.category,
created=group.created,
date=group.date,
deviceid=group.deviceid,
grpid=group.grpid,
measures=tuple(
measure
for measure in group.measures
if measure.type in iter_measure_type
),
)
for group in iter_groups
if group.attrib in iter_group_attrib
)
def get_measure_value(
from_source: Union[
MeasureGetMeasGroup, MeasureGetMeasResponse, Tuple[MeasureGetMeasGroup, ...]
],
with_measure_type: Union[MeasureType, Tuple[MeasureType, ...]],
with_group_attrib: Union[
MeasureGetMeasGroupAttrib, Tuple[MeasureGetMeasGroupAttrib, ...]
] = MeasureGroupAttribs.ANY,
) -> Optional[float]:
"""Get the first value of a measure that meet the query requirements."""
groups: Final = query_measure_groups(
from_source, with_measure_type, with_group_attrib
)
return next(
iter(
tuple(
float(measure.value * pow(10, measure.unit))
for group in groups
for measure in group.measures
)
),
None,
)
class StatusException(Exception):
"""Status exception."""
def __init__(self, status: Any):
"""Create instance."""
super().__init__("Error code %s" % str(status))
class AuthFailedException(StatusException):
"""Withings status error code exception."""
class InvalidParamsException(StatusException):
"""Withings status error code exception."""
class UnauthorizedException(StatusException):
"""Withings status error code exception."""
class ErrorOccurredException(StatusException):
"""Withings status error code exception."""
class TimeoutException(StatusException):
"""Withings status error code exception."""
class BadStateException(StatusException):
"""Withings status error code exception."""
class TooManyRequestsException(StatusException):
"""Withings status error code exception."""
class UnknownStatusException(StatusException):
"""Unknown status code but it's still not successful."""
def response_body_or_raise(data: Any) -> Dict[str, Any]:
"""Parse withings response or raise exception."""
if not isinstance(data, dict):
raise UnexpectedTypeException(data, dict)
parsed_response: Final = cast(dict, data)
status: Final = parsed_response.get("status")
if status is None:
raise UnknownStatusException(status=status)
if status in STATUS_SUCCESS:
return cast(Dict[str, Any], parsed_response.get("body"))
if status in STATUS_AUTH_FAILED:
raise AuthFailedException(status=status)
if status in STATUS_INVALID_PARAMS:
raise InvalidParamsException(status=status)
if status in STATUS_UNAUTHORIZED:
raise UnauthorizedException(status=status)
if status in STATUS_ERROR_OCCURRED:
raise ErrorOccurredException(status=status)
if status in STATUS_TIMEOUT:
raise TimeoutException(status=status)
if status in STATUS_BAD_STATE:
raise BadStateException(status=status)
if status in STATUS_TOO_MANY_REQUESTS:
raise TooManyRequestsException(status=status)
raise UnknownStatusException(status=status)
| 26.752969
| 142
| 0.679126
|
8f6cd83956cbb9f3549d104ee26e92ff91fb49b2
| 1,701
|
py
|
Python
|
src/tests/test_load_all_grids.py
|
vineetjnair9/GridCal
|
5b63cbae45cbe176b015e5e99164a593f450fe71
|
[
"BSD-3-Clause"
] | null | null | null |
src/tests/test_load_all_grids.py
|
vineetjnair9/GridCal
|
5b63cbae45cbe176b015e5e99164a593f450fe71
|
[
"BSD-3-Clause"
] | null | null | null |
src/tests/test_load_all_grids.py
|
vineetjnair9/GridCal
|
5b63cbae45cbe176b015e5e99164a593f450fe71
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import os
from GridCal.Engine.IO.file_handler import FileOpen
def test_all_grids():
# get the directory of this file
current_path = os.path.dirname(__file__)
# navigate to the grids folder
grids_path = os.path.join(current_path, '..', '..', 'Grids_and_profiles', 'grids')
files = os.listdir(grids_path)
failed = list()
for file_name in files:
path = os.path.join(grids_path, file_name)
print('-' * 160)
print('Loading', file_name, '...', end='')
try:
file_handler = FileOpen(path)
circuit = file_handler.open()
circuit.compile_snapshot()
print('ok')
except:
print('Failed')
failed.append(file_name)
print('Failed:')
for f in failed:
print('\t', f)
for f in failed:
print('Attempting', f)
path = os.path.join(grids_path, f)
file_handler = FileOpen(path)
circuit = file_handler.open()
circuit.compile_snapshot()
assert len(failed) == 0
| 30.375
| 86
| 0.651969
|
3564ff65e33eb4b88fa47128ea197b2ec96d8dae
| 9,775
|
py
|
Python
|
Contents/Libraries/Shared/enzyme/parsers/ebml/core.py
|
luboslavgerliczy/SubZero
|
6aeca58736ee99803d2e7f52ab00d9f563645a93
|
[
"MIT"
] | null | null | null |
Contents/Libraries/Shared/enzyme/parsers/ebml/core.py
|
luboslavgerliczy/SubZero
|
6aeca58736ee99803d2e7f52ab00d9f563645a93
|
[
"MIT"
] | null | null | null |
Contents/Libraries/Shared/enzyme/parsers/ebml/core.py
|
luboslavgerliczy/SubZero
|
6aeca58736ee99803d2e7f52ab00d9f563645a93
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ...exceptions import ReadError
from .readers import *
from pkg_resources import resource_stream # @UnresolvedImport
from xml.dom import minidom
import logging
__all__ = ['INTEGER', 'UINTEGER', 'FLOAT', 'STRING', 'UNICODE', 'DATE', 'MASTER', 'BINARY',
'SPEC_TYPES', 'READERS', 'Element', 'MasterElement', 'parse', 'parse_element',
'get_matroska_specs']
logger = logging.getLogger(__name__)
# EBML types
INTEGER, UINTEGER, FLOAT, STRING, UNICODE, DATE, MASTER, BINARY = range(8)
# Spec types to EBML types mapping
SPEC_TYPES = {
'integer': INTEGER,
'uinteger': UINTEGER,
'float': FLOAT,
'string': STRING,
'utf-8': UNICODE,
'date': DATE,
'master': MASTER,
'binary': BINARY
}
# Readers to use per EBML type
READERS = {
INTEGER: read_element_integer,
UINTEGER: read_element_uinteger,
FLOAT: read_element_float,
STRING: read_element_string,
UNICODE: read_element_unicode,
DATE: read_element_date,
BINARY: read_element_binary
}
class BaseElement(object):
def __init__(self, id=None, position=None, size=None, data=None):
self.id = id
self.position = position
self.size = size
self.data = data
class Element(BaseElement):
"""Base object of EBML
:param int id: id of the element, best represented as hexadecimal (0x18538067 for Matroska Segment element)
:param type: type of the element
:type type: :data:`INTEGER`, :data:`UINTEGER`, :data:`FLOAT`, :data:`STRING`, :data:`UNICODE`, :data:`DATE`, :data:`MASTER` or :data:`BINARY`
:param string name: name of the element
:param int level: level of the element
:param int position: position of element's data
:param int size: size of element's data
:param data: data as read by the corresponding :data:`READERS`
"""
def __init__(self, id=None, type=None, name=None, level=None, position=None, size=None, data=None):
super(Element, self).__init__(id, position, size, data)
self.type = type
self.name = name
self.level = level
def __repr__(self):
return '<%s [%s, %r]>' % (self.__class__.__name__, self.name, self.data)
class MasterElement(Element):
"""Element of type :data:`MASTER` that has a list of :class:`Element` as its data
:param int id: id of the element, best represented as hexadecimal (0x18538067 for Matroska Segment element)
:param string name: name of the element
:param int level: level of the element
:param int position: position of element's data
:param int size: size of element's data
:param data: child elements
:type data: list of :class:`Element`
:class:`MasterElement` implements some magic methods to ease manipulation. Thus, a MasterElement supports
the `in` keyword to test for the presence of a child element by its name and gives access to it
with a container getter::
>>> ebml_element = parse(open('test1.mkv', 'rb'), get_matroska_specs())[0]
>>> 'EBMLVersion' in ebml_element
False
>>> 'DocType' in ebml_element
True
>>> ebml_element['DocType']
Element(DocType, u'matroska')
"""
def __init__(self, id=None, name=None, level=None, position=None, size=None, data=None):
super(MasterElement, self).__init__(id, MASTER, name, level, position, size, data)
def load(self, stream, specs, ignore_element_types=None, ignore_element_names=None, max_level=None):
"""Load children :class:`Elements <Element>` with level lower or equal to the `max_level`
from the `stream` according to the `specs`
:param stream: file-like object from which to read
:param dict specs: see :ref:`specs`
:param int max_level: maximum level for children elements
:param list ignore_element_types: list of element types to ignore
:param list ignore_element_names: list of element names to ignore
:param int max_level: maximum level of elements
"""
self.data = parse(stream, specs, self.size, ignore_element_types, ignore_element_names, max_level)
def get(self, name, default=None):
"""Convenience method for ``master_element[name].data if name in master_element else default``
:param string name: the name of the child to get
:param default: default value if `name` is not in the :class:`MasterElement`
:return: the data of the child :class:`Element` or `default`
"""
if name not in self:
return default
element = self[name]
if element.type == MASTER:
raise ValueError('%s is a MasterElement' % name)
return element.data
def __getitem__(self, key):
if isinstance(key, int):
return self.data[key]
children = [e for e in self.data if e.name == key]
if not children:
raise KeyError(key)
if len(children) > 1:
raise KeyError('More than 1 child with key %s (%d)' % (key, len(children)))
return children[0]
def __contains__(self, item):
return len([e for e in self.data if e.name == item]) > 0
def __iter__(self):
return iter(self.data)
def parse(stream, specs, size=None, ignore_element_types=None, ignore_element_names=None, max_level=None, include_element_names=None):
"""Parse a stream for `size` bytes according to the `specs`
:param stream: file-like object from which to read
:param size: maximum number of bytes to read, None to read all the stream
:type size: int or None
:param dict specs: see :ref:`specs`
:param list ignore_element_types: list of element types to ignore
:param list ignore_element_names: list of element names to ignore
:param int max_level: maximum level of elements
:param list include_element_names: list of element names to include exclusively, so ignoring all other element names
:return: parsed data as a tree of :class:`~enzyme.parsers.ebml.core.Element`
:rtype: list
.. note::
If `size` is reached in a middle of an element, reading will continue
until the element is fully parsed.
"""
ignore_element_types = ignore_element_types if ignore_element_types is not None else []
ignore_element_names = ignore_element_names if ignore_element_names is not None else []
include_element_names = include_element_names if include_element_names is not None else []
start = stream.tell()
elements = []
while size is None or stream.tell() - start < size:
try:
element = parse_element(stream, specs)
if element.type is None:
logger.error('Element with id 0x%x is not in the specs' % element_id)
stream.seek(element_size, 1)
continue
elif element.type in ignore_element_types or element.name in ignore_element_names:
logger.info('%s %s %s ignored', element.__class__.__name__, element.name, element.type)
stream.seek(element.size, 1)
continue
elif len(include_element_names) > 0 and element.name not in include_element_names:
stream.seek(element.size, 1)
continue
elif element.type == MASTER:
if max_level is not None and element.level >= max_level:
logger.info('Maximum level %d reached for children of %s %s', max_level, element.__class__.__name__, element.name)
stream.seek(element.size, 1)
else:
logger.debug('Loading child elements for %s %s with size %d', element.__class__.__name__, element.name, element.size)
element.data = parse(stream, specs, element.size, ignore_element_types, ignore_element_names, max_level,include_element_names)
else:
element.data = READERS[element.type](stream, element.size)
elements.append(element)
except ReadError:
if size is not None:
raise
break
return elements
def parse_element(stream, specs):
"""Extract a single :class:`Element` from the `stream` according to the `specs`
:param stream: file-like object from which to read
:param dict specs: see :ref:`specs`
:return: the parsed element
:rtype: :class:`Element`
"""
element_id = read_element_id(stream)
if element_id is None:
raise ReadError('Cannot read element id')
element_size = read_element_size(stream)
if element_size is None:
raise ReadError('Cannot read element size')
if element_id not in specs:
return BaseElement(element_id,stream.tell(),element_size)
element_type, element_name, element_level = specs[element_id]
if element_type == MASTER:
element = MasterElement(element_id, element_name, element_level, stream.tell(), element_size)
else:
element = Element(element_id, element_type, element_name, element_level, stream.tell(), element_size)
return element
def get_matroska_specs(webm_only=False):
"""Get the Matroska specs
:param bool webm_only: load *only* WebM specs
:return: the specs in the appropriate format. See :ref:`specs`
:rtype: dict
"""
specs = {}
with resource_stream(__name__, 'specs/matroska.xml') as resource:
xmldoc = minidom.parse(resource)
for element in xmldoc.getElementsByTagName('element'):
if not webm_only or element.hasAttribute('webm') and element.getAttribute('webm') == '1':
specs[int(element.getAttribute('id'), 16)] = (SPEC_TYPES[element.getAttribute('type')], element.getAttribute('name'), int(element.getAttribute('level')))
return specs
| 41.071429
| 169
| 0.662199
|
e9d3b1bcbf4585a1207d99319aa616988eaddc74
| 892
|
py
|
Python
|
fimed/config.py
|
dandobjim/FIMED2.0-BACKEND
|
1118a1afcf62a2d39de3464dce3929f008dba0ec
|
[
"MIT"
] | null | null | null |
fimed/config.py
|
dandobjim/FIMED2.0-BACKEND
|
1118a1afcf62a2d39de3464dce3929f008dba0ec
|
[
"MIT"
] | 2
|
2021-04-06T18:29:57.000Z
|
2021-06-02T03:57:43.000Z
|
fimed/config.py
|
dandobjim/FIMED2.0-BACKEND
|
1118a1afcf62a2d39de3464dce3929f008dba0ec
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from pydantic import BaseSettings, AnyUrl
from fimed.logger import logger
class MongoDns(AnyUrl):
allowed_schemes = {"mongodb"}
user_required = False
class _Settings(BaseSettings):
# api settings
API_HOST: str = "localhost"
API_PORT: int = 8080
# for applications sub-mounted below a given URL path
ROOT_PATH: str = ""
# database connection
MONGO_DNS: MongoDns = "mongodb://localhost:27017"
# Mongo settings
MINNIO_CONN = "localhost:9000"
ACCESS_KEY = "FIMED"
SECRET_KEY = "FIMEDFIMED"
class Config:
if not Path(".env").is_file():
logger.warning("⚠️ `.env` not found in current directory")
logger.info("⚙️ Loading settings from environment")
else:
logger.info("⚙️ Loading settings from dotenv file")
env_file = ".env"
settings = _Settings()
| 24.108108
| 70
| 0.651345
|
f9a7302d51a15d2c6bae57f68546502e33afa46d
| 4,090
|
py
|
Python
|
train_vgg19.py
|
SakhriHoussem/image-classification
|
82cb281eb4b7f114501df5d13cfc4cf00107d055
|
[
"MIT"
] | 20
|
2018-09-02T09:15:14.000Z
|
2022-03-24T04:40:42.000Z
|
train_vgg19.py
|
SakhriHoussem/image-classification
|
82cb281eb4b7f114501df5d13cfc4cf00107d055
|
[
"MIT"
] | 3
|
2018-09-03T11:52:04.000Z
|
2020-12-28T02:10:08.000Z
|
train_vgg19.py
|
SakhriHoussem/image-classification
|
82cb281eb4b7f114501df5d13cfc4cf00107d055
|
[
"MIT"
] | 13
|
2018-07-26T17:40:16.000Z
|
2022-01-25T04:30:26.000Z
|
"""
Simple tester for the vgg19_trainable
"""
import numpy as np
import tensorflow as tf
from dataSetGenerator import append,picShow
from vgg19 import vgg19_trainable as vgg19
import argparse
parser = argparse.ArgumentParser(prog="Train vgg19",description="Simple tester for the vgg19_trainable")
parser.add_argument('--dataset', metavar='dataset', type=str,required=True,
help='DataSet Name')
parser.add_argument('--batch', metavar='batch', type=int, default=10, help='batch size ')
parser.add_argument('--epochs', metavar='epochs', type=int, default=30,
help='number of epoch to train the network')
args = parser.parse_args()
classes_name = args.dataset
batch_size = args.batch
epochs = args.epochs
# batch_size = 10
# epochs = 30
# classes_name = "SIRI-WHU"
# classes_name = "UCMerced_LandUse"
# classes_name = "RSSCN7"
classes = np.load("DataSets/{0}/{0}_classes.npy".format(classes_name))
batch = np.load("DataSets/{0}/{0}_dataTrain.npy".format(classes_name))
labels = np.load("DataSets/{0}/{0}_labelsTrain.npy".format(classes_name))
classes_num = len(classes)
rib = batch.shape[1]
with tf.device('/device:GPU:0'):
# with tf.device('/cpu:0'):
# with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=int(environ['NUMBER_OF_PROCESSORS']))) as sess:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
images = tf.placeholder(tf.float32, [None, rib, rib, 3])
true_out = tf.placeholder(tf.float32, [None, classes_num])
train_mode = tf.placeholder(tf.bool)
try:
vgg = vgg19.Vgg19('Weights/VGG19_{}.npy'.format(classes_name),classes_num)
except:
print('Weights/VGG19_{}.npy Not Exist'.format(classes_name))
vgg = vgg19.Vgg19(None,classes_num)
vgg.build(images,train_mode)
# print number of variables used: 143667240 variables, i.e. ideal size = 548MB
# print('number of variables used:',vgg.get_var_count())
print('Data SHape used:',batch.shape)
sess.run(tf.global_variables_initializer())
# test classification
prob = sess.run(vgg.prob, feed_dict={images: batch[:8], train_mode: False})
# picShow(batch[:8], labels[:8], classes, None, prob, True)
# simple 1-step training
cost = tf.reduce_sum((vgg.prob - true_out) ** 2)
train = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
correct_prediction = tf.equal(tf.argmax(prob), tf.argmax(true_out))
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
batche_num = len(batch)
accs = []
costs = []
for _ in range(epochs):
indice = np.random.permutation(batche_num)
counter = 0
for i in range(int(batche_num/batch_size)):
min_batch = indice[i*batch_size:(i+1)*batch_size]
cur_cost, cur_train,cur_acc= sess.run([cost, train,acc], feed_dict={images: batch[min_batch], true_out: labels[min_batch], train_mode: True})
print("Iteration :{} Batch :{} loss :{}".format(_, i, cur_cost))
accs.append(cur_acc)
costs.append(cur_cost)
counter += 1
if counter % 100 == 0:
# save Weights
vgg.save_npy(sess, 'Weights/VGG19_{}.npy'.format(classes_name))
# save graph data
append(costs,'Data/cost19_{}.txt'.format(classes_name))
append(accs,'Data/acc19_{}.txt'.format(classes_name))
# save Weights
vgg.save_npy(sess, 'Weights/VGG19_{}.npy'.format(classes_name))
# test classification again, should have a higher probability about tiger
prob = sess.run(vgg.prob, feed_dict={images: batch[:8], train_mode: False})
picShow(batch[:8], labels[:8], classes, None, prob)
# import subprocess
# subprocess.call(["shutdown", "/s"])
| 41.313131
| 161
| 0.630318
|
dc5809c3fdbd031aec4ddd6f4a3751f4dbc4cc73
| 2,977
|
gyp
|
Python
|
deps/sqlite3.gyp
|
ibmruntimes/node-sqlite3
|
221630b1611bfdf9d938e978698989977ec19a79
|
[
"BSD-3-Clause"
] | null | null | null |
deps/sqlite3.gyp
|
ibmruntimes/node-sqlite3
|
221630b1611bfdf9d938e978698989977ec19a79
|
[
"BSD-3-Clause"
] | null | null | null |
deps/sqlite3.gyp
|
ibmruntimes/node-sqlite3
|
221630b1611bfdf9d938e978698989977ec19a79
|
[
"BSD-3-Clause"
] | null | null | null |
{
'includes': [ 'common-sqlite.gypi' ],
'target_defaults': {
'default_configuration': 'Release',
'cflags': [
'-std=c99'
],
'configurations': {
'Debug': {
'defines': [ 'DEBUG', '_DEBUG' ],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
},
},
},
'Release': {
'defines': [ 'NDEBUG' ],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 0, # static release
},
},
}
},
'msvs_settings': {
'VCCLCompilerTool': {
},
'VCLibrarianTool': {
},
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
},
},
'conditions': [
['OS == "win"', {
'defines': [
'WIN32'
],
}],
['OS == "zos"', {
'cflags!': [
'-std=c99'
],
'cflags': [
'-qlanglvl=stdc99',
'-qascii',
'-qxclang=-fexec-charset=ISO8859-1'
],
}]
],
},
'targets': [
{
'target_name': 'action_before_build',
'type': 'none',
'hard_dependency': 1,
'actions': [
{
'action_name': 'unpack_sqlite_dep',
'inputs': [
'./sqlite-autoconf-<@(sqlite_version).tar.gz'
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/sqlite-autoconf-<@(sqlite_version)/sqlite3.c'
],
'action': ['<!(node -p "process.env.npm_config_python || \\"python\\"")','./extract.py','./sqlite-autoconf-<@(sqlite_version).tar.gz','<(SHARED_INTERMEDIATE_DIR)']
}
],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/sqlite-autoconf-<@(sqlite_version)/',
]
},
},
{
'target_name': 'sqlite3',
'type': 'static_library',
'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)/sqlite-autoconf-<@(sqlite_version)/' ],
'dependencies': [
'action_before_build'
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/sqlite-autoconf-<@(sqlite_version)/sqlite3.c'
],
'direct_dependent_settings': {
'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)/sqlite-autoconf-<@(sqlite_version)/' ],
'defines': [
'SQLITE_THREADSAFE=1',
'HAVE_USLEEP=1',
'SQLITE_ENABLE_FTS3',
'SQLITE_ENABLE_FTS4',
'SQLITE_ENABLE_FTS5',
'SQLITE_ENABLE_JSON1',
'SQLITE_ENABLE_RTREE'
],
},
'cflags_cc': [
'-Wno-unused-value'
],
'defines': [
'_REENTRANT=1',
'SQLITE_THREADSAFE=1',
'HAVE_USLEEP=1',
'SQLITE_ENABLE_FTS3',
'SQLITE_ENABLE_FTS4',
'SQLITE_ENABLE_FTS5',
'SQLITE_ENABLE_JSON1',
'SQLITE_ENABLE_RTREE'
],
'export_dependent_settings': [
'action_before_build',
]
}
]
}
| 25.228814
| 173
| 0.48606
|
4ba9f09c2369fc68ee8d64c75f2399f4733951b6
| 7,640
|
py
|
Python
|
helper/args.py
|
jyothisable/dcscn-super-resolution
|
3799e5335cd493040d2d00386e93043840713774
|
[
"MIT"
] | null | null | null |
helper/args.py
|
jyothisable/dcscn-super-resolution
|
3799e5335cd493040d2d00386e93043840713774
|
[
"MIT"
] | null | null | null |
helper/args.py
|
jyothisable/dcscn-super-resolution
|
3799e5335cd493040d2d00386e93043840713774
|
[
"MIT"
] | null | null | null |
"""
Paper: "Fast and Accurate Image Super Resolution by Deep CNN with Skip Connection and Network in Network"
Ver: 2
functions for sharing arguments and their default values
"""
import sys
import numpy as np
import tensorflow.compat.v1 as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# Model (network) Parameters
flags.DEFINE_integer(
"scale", 2, "Scale factor for Super Resolution (should be 2 or more)")
flags.DEFINE_integer(
"layers", 12, "Number of layers of feature xxtraction CNNs")
flags.DEFINE_integer(
"filters", 196, "Number of filters of first feature-extraction CNNs")
flags.DEFINE_integer(
"min_filters", 48, "Number of filters of last feature-extraction CNNs")
flags.DEFINE_float("filters_decay_gamma", 1.5,
"Number of CNN filters are decayed from [filters] to [min_filters] by this gamma")
flags.DEFINE_boolean("use_nin", True, "Use Network In Network")
flags.DEFINE_integer(
"nin_filters", 64, "Number of CNN filters in A1 at Reconstruction network")
flags.DEFINE_integer(
"nin_filters2", 32, "Number of CNN filters in B1 and B2 at Reconstruction net.")
flags.DEFINE_integer("cnn_size", 3, "Size of CNN filters")
flags.DEFINE_integer("reconstruct_layers", 1,
"Number of Reconstruct CNN Layers. (can be 0.)")
flags.DEFINE_integer("reconstruct_filters", 32,
"Number of Reconstruct CNN Filters")
flags.DEFINE_float("dropout_rate", 0.8,
"Output nodes should be kept by this probability. If 1, don't use dropout.")
flags.DEFINE_string("activator", "prelu",
"Activator can be [relu, leaky_relu, prelu, sigmoid, tanh, selu]")
flags.DEFINE_boolean("pixel_shuffler", True,
"Use Pixel Shuffler instead of transposed CNN")
flags.DEFINE_integer("pixel_shuffler_filters", 0,
"Num of Pixel Shuffler output channels. 0 means use same channels as input.")
flags.DEFINE_integer("self_ensemble", 8,
"Number of using self ensemble method. [1 - 8]")
flags.DEFINE_boolean("batch_norm", False,
"use batch normalization after each CNNs")
flags.DEFINE_boolean("depthwise_separable", False,
"use depthwise seperable convolutions for each CNN layer instead")
# Training Parameters
flags.DEFINE_boolean("bicubic_init", True,
"make bicubic interpolation values as initial input for x2")
flags.DEFINE_float("clipping_norm", 5,
"Norm for gradient clipping. If it's <= 0 we don't use gradient clipping.")
flags.DEFINE_string("initializer", "he",
"Initializer for weights can be [uniform, stddev, xavier, he, identity, zero]")
flags.DEFINE_float("weight_dev", 0.01,
"Initial weight stddev (won't be used when you use he or xavier initializer)")
flags.DEFINE_float("l2_decay", 0.0001, "l2_decay")
flags.DEFINE_string("optimizer", "adam",
"Optimizer can be [gd, momentum, adadelta, adagrad, adam, rmsprop]")
flags.DEFINE_float("beta1", 0.9, "Beta1 for adam optimizer")
flags.DEFINE_float("beta2", 0.999, "Beta2 for adam optimizer")
flags.DEFINE_float("epsilon", 1e-8, "epsilon for adam optimizer")
flags.DEFINE_float(
"momentum", 0.9, "Momentum for momentum optimizer and rmsprop optimizer")
flags.DEFINE_integer(
"batch_num", 20, "Number of mini-batch images for training")
flags.DEFINE_integer("batch_image_size", 48, "Image size for mini-batch")
flags.DEFINE_integer(
"stride_size", 0, "Stride size for mini-batch. If it is 0, use half of batch_image_size")
flags.DEFINE_integer("training_images", 24000,
"Number of training on each epoch")
flags.DEFINE_boolean("use_l1_loss", False,
"Use L1 Error as loss function instead of MSE Error.")
# Learning Rate Control for Training
flags.DEFINE_float("initial_lr", 0.002, "Initial learning rate")
flags.DEFINE_float("lr_decay", 0.5, "Learning rate decay rate")
flags.DEFINE_integer("lr_decay_epoch", 9,
"After this epochs are completed, learning rate will be decayed by lr_decay.")
flags.DEFINE_float("end_lr", 2e-5, "Training end learning rate. If the current learning rate gets lower than this"
"value, then training will be finished.")
# Dataset or Others
flags.DEFINE_string("dataset", "bsd200",
"Training dataset dir. [yang91, general100, bsd200, other]")
flags.DEFINE_string("test_dataset", "set5",
"Directory for test dataset [set5, set14, bsd100, urban100, all]")
flags.DEFINE_integer("tests", 1, "Number of training sets")
flags.DEFINE_boolean("do_benchmark", False,
"Evaluate the performance for set5, set14 and bsd100 after the training.")
# Image Processing
flags.DEFINE_float("max_value", 1366, "For normalize image pixel value")
flags.DEFINE_integer(
"channels", 1, "Number of image channels used. Now it should be 1. using only Y from YCbCr.")
flags.DEFINE_integer("psnr_calc_border_size", -1,
"Cropping border size for calculating PSNR. if < 0, use 2 + scale for default.")
flags.DEFINE_boolean("build_batch", False, "Build pre-processed input batch. Makes training significantly faster but "
"the patches are limited to be on the grid.")
# flags.DEFINE_integer("input_image_width", -1, "The width of the input image. Put -1 if you do not want to have a fixed input size")
# flags.DEFINE_integer("input_image_height", -1, "The height of the input image. Put -1 if you do not want to hae a fixed input size")
# Environment (all directory name should not contain '/' after )
flags.DEFINE_string("checkpoint_dir", "models", "Directory for checkpoints")
flags.DEFINE_string("graph_dir", "graphs", "Directory for graphs")
flags.DEFINE_string("data_dir", "data", "Directory for original images")
flags.DEFINE_string("batch_dir", "batch_data",
"Directory for training batch images")
flags.DEFINE_string("output_dir", "output", "Directory for output test images")
flags.DEFINE_string("tf_log_dir", "tf_log", "Directory for tensorboard log")
flags.DEFINE_string("log_filename", "log.txt", "log filename")
flags.DEFINE_string(
"model_name", "", "model name for save files and tensorboard log")
flags.DEFINE_string("load_model_name", "",
"Filename of model loading before start [filename or 'default']")
# Debugging or Logging
flags.DEFINE_boolean("initialize_tf_log", True,
"Clear all tensorboard log before start")
flags.DEFINE_boolean("enable_log", True, "Enables tensorboard-log. Save loss.")
flags.DEFINE_boolean("save_weights", True, "Save weights and biases/gradients")
flags.DEFINE_boolean("save_images", False, "Save CNN weights as images")
flags.DEFINE_integer("save_images_num", 20, "Number of CNN images saved")
flags.DEFINE_boolean("save_meta_data", False, "")
flags.DEFINE_integer("gpu_device_id", 0,
"Device ID of GPUs which will be used to compute.")
# frozen model configurations
flags.DEFINE_boolean("frozenInference", False,
"Flag for whether the model to evaluate is frozen.")
flags.DEFINE_string("frozen_graph_path", './model_to_freeze/frozen_model_optimized.pb',
"the path to a frozen model if performing inference from it")
def get():
print("Python Interpreter version:%s" % sys.version[:3])
print("tensorflow version:%s" % tf.__version__)
print("numpy version:%s" % np.__version__)
# check which library you are using
# np.show_config()
return FLAGS
| 51.972789
| 134
| 0.696466
|
9e86574206b6c847b4f1ec334f91d65070f7313e
| 7,755
|
py
|
Python
|
scripts/west_commands/runners/nrfjprog.py
|
czeslawmakarski/zephyr
|
005103739c775fbe929070bf58a07a9a8fb211b1
|
[
"Apache-2.0"
] | 7
|
2021-07-27T00:12:02.000Z
|
2022-03-09T14:40:13.000Z
|
scripts/west_commands/runners/nrfjprog.py
|
czeslawmakarski/zephyr
|
005103739c775fbe929070bf58a07a9a8fb211b1
|
[
"Apache-2.0"
] | null | null | null |
scripts/west_commands/runners/nrfjprog.py
|
czeslawmakarski/zephyr
|
005103739c775fbe929070bf58a07a9a8fb211b1
|
[
"Apache-2.0"
] | 1
|
2021-08-06T15:51:17.000Z
|
2021-08-06T15:51:17.000Z
|
# Copyright (c) 2017 Linaro Limited.
# Copyright (c) 2019 Nordic Semiconductor ASA.
#
# SPDX-License-Identifier: Apache-2.0
'''Runner for flashing with nrfjprog.'''
import os
import shlex
import sys
from re import fullmatch, escape
from intelhex import IntelHex
from runners.core import ZephyrBinaryRunner, RunnerCaps
# Helper function for inspecting hex files.
# has_region returns True if hex file has any contents in a specific region
# region_filter is a callable that takes an address as argument and
# returns True if that address is in the region in question
def has_region(regions, hex_file):
try:
ih = IntelHex(hex_file)
return any((len(ih[rs:re]) > 0) for (rs, re) in regions)
except FileNotFoundError:
return False
class NrfJprogBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for nrfjprog.'''
def __init__(self, cfg, family, softreset, snr, erase=False,
tool_opt=[], force=False):
super().__init__(cfg)
self.hex_ = cfg.hex_file
self.family = family
self.softreset = softreset
self.snr = snr
self.erase = erase
self.force = force
self.tool_opt = []
for opts in [shlex.split(opt) for opt in tool_opt]:
self.tool_opt += opts
@classmethod
def name(cls):
return 'nrfjprog'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash'}, erase=True)
@classmethod
def do_add_parser(cls, parser):
parser.add_argument('--nrf-family', required=True,
choices=['NRF51', 'NRF52', 'NRF53', 'NRF91'],
help='family of nRF MCU')
parser.add_argument('--softreset', required=False,
action='store_true',
help='use reset instead of pinreset')
parser.add_argument('--snr', required=False,
help="""Serial number of board to use.
'*' matches one or more characters/digits.""")
parser.add_argument('--tool-opt', default=[], action='append',
help='''Additional options for nrfjprog,
e.g. "--recover"''')
parser.add_argument('--force', required=False,
action='store_true',
help='Flash even if the result cannot be guaranteed.')
@classmethod
def do_create(cls, cfg, args):
return NrfJprogBinaryRunner(cfg, args.nrf_family, args.softreset,
args.snr, erase=args.erase,
tool_opt=args.tool_opt, force=args.force)
def ensure_snr(self):
if not self.snr or "*" in self.snr:
self.snr = self.get_board_snr(self.snr or "*")
def get_boards(self):
snrs = self.check_output(['nrfjprog', '--ids'])
snrs = snrs.decode(sys.getdefaultencoding()).strip().splitlines()
if not snrs:
raise RuntimeError('"nrfjprog --ids" did not find a board; '
'is the board connected?')
return snrs
@staticmethod
def verify_snr(snr):
if snr == '0':
raise RuntimeError('"nrfjprog --ids" returned 0; '
'is a debugger already connected?')
def get_board_snr(self, glob):
# Use nrfjprog --ids to discover connected boards.
#
# If there's exactly one board connected, it's safe to assume
# the user wants that one. Otherwise, bail unless there are
# multiple boards and we are connected to a terminal, in which
# case use print() and input() to ask what the user wants.
re_glob = escape(glob).replace(r"\*", ".+")
snrs = [snr for snr in self.get_boards() if fullmatch(re_glob, snr)]
if len(snrs) == 0:
raise RuntimeError(
'There are no boards connected{}.'.format(
f" matching '{glob}'" if glob != "*" else ""))
elif len(snrs) == 1:
board_snr = snrs[0]
self.verify_snr(board_snr)
print("Using board {}".format(board_snr))
return board_snr
elif not sys.stdin.isatty():
raise RuntimeError(
f'refusing to guess which of {len(snrs)} '
'connected boards to use. (Interactive prompts '
'disabled since standard input is not a terminal.) '
'Please specify a serial number on the command line.')
snrs = sorted(snrs)
print('There are multiple boards connected{}.'.format(
f" matching '{glob}'" if glob != "*" else ""))
for i, snr in enumerate(snrs, 1):
print('{}. {}'.format(i, snr))
p = 'Please select one with desired serial number (1-{}): '.format(
len(snrs))
while True:
try:
value = input(p)
except EOFError:
sys.exit(0)
try:
value = int(value)
except ValueError:
continue
if 1 <= value <= len(snrs):
break
return snrs[value - 1]
def do_run(self, command, **kwargs):
self.require('nrfjprog')
self.ensure_snr()
commands = []
board_snr = self.snr.lstrip("0")
if not os.path.isfile(self.hex_):
raise ValueError('Cannot flash; hex file ({}) does not exist. '.
format(self.hex_) +
'Try enabling CONFIG_BUILD_OUTPUT_HEX.')
program_cmd = ['nrfjprog', '--program', self.hex_, '-f', self.family,
'--snr', board_snr] + self.tool_opt
self.logger.info('Flashing file: {}'.format(self.hex_))
if self.erase:
commands.extend([
['nrfjprog',
'--eraseall',
'-f', self.family,
'--snr', board_snr],
program_cmd
])
else:
if self.family == 'NRF51':
commands.append(program_cmd + ['--sectorerase'])
elif self.family == 'NRF52':
commands.append(program_cmd + ['--sectoranduicrerase'])
else:
uicr = {
'NRF53': ((0x00FF8000, 0x00FF8800),
(0x01FF8000, 0x01FF8800)),
'NRF91': ((0x00FF8000, 0x00FF8800),),
}[self.family]
if not self.force and has_region(uicr, self.hex_):
# Hex file has UICR contents.
raise RuntimeError(
'The hex file contains data placed in the UICR, which '
'needs a full erase before reprogramming. Run west '
'flash again with --force or --erase.')
else:
commands.append(program_cmd + ['--sectorerase'])
if self.family == 'NRF52' and not self.softreset:
commands.extend([
# Enable pin reset
['nrfjprog', '--pinresetenable', '-f', self.family,
'--snr', board_snr],
])
if self.softreset:
commands.append(['nrfjprog', '--reset', '-f', self.family,
'--snr', board_snr])
else:
commands.append(['nrfjprog', '--pinreset', '-f', self.family,
'--snr', board_snr])
for cmd in commands:
self.check_call(cmd)
self.logger.info('Board with serial number {} flashed successfully.'.
format(board_snr))
| 37.105263
| 82
| 0.526757
|
254a7935f3b33a088451180ea9c7cdbddbb8ed49
| 5,834
|
py
|
Python
|
cardscan/process.py
|
GroupTraining/BusinessCardReader
|
b89e9dfa4907937663cb766c6c4cbc4f26e5b540
|
[
"MIT"
] | 52
|
2016-03-16T15:19:03.000Z
|
2021-09-07T08:45:43.000Z
|
cardscan/process.py
|
GroupTraining/BusinessCardReader
|
b89e9dfa4907937663cb766c6c4cbc4f26e5b540
|
[
"MIT"
] | 1
|
2016-06-21T12:32:26.000Z
|
2019-08-06T08:58:48.000Z
|
cardscan/process.py
|
GroupTraining/BusinessCardReader
|
b89e9dfa4907937663cb766c6c4cbc4f26e5b540
|
[
"MIT"
] | 28
|
2016-05-06T16:13:40.000Z
|
2022-01-19T02:53:03.000Z
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
import subprocess
import utils
import findCard
DEBUG = False
PHONEALIASES = ['ph.', 'phone', 'tel.']
def parseText(text):
text = text.strip()
# Replace long lines with regular lines
text = text.replace('\xe2\x80\x94', '-')
# Remove left single quotation mark
text = text.replace('\xe2\x80\x98','')
text = text.replace('\xef','i')
text = text.replace('\xc3\xa9', '6')
text = text.decode('utf-8', 'ignore')
return text
def checkBlur(img):
img = cv2.resize(img, (1000,600))
grayImg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
grayImg = cv2.equalizeHist(np.copy(grayImg))
fft = np.fft.fft(grayImg)
avgFFT = np.average(fft)
threshFFT_x, threshFFT_y = np.where(fft> 1.25*avgFFT)
return len(threshFFT_x) > 130000
def getText(img, regions, debug=False):
textChunks = []
imgChunks = []
for region in regions:
x1, y1, x2, y2 = region
# Clip region and extract text
chunk = img[y1:y2, x1:x2, ::]
ret,chunk = cv2.threshold(chunk,140,255,0)
cv2.imwrite('tmp.jpg', chunk)
subprocess.call(['tesseract', 'tmp.jpg', 'tmp'])
f = open('tmp.txt')
lines = []
for line in f:
print line.strip()
if line.strip() != '':
lines.append(parseText(line))
textChunks.append(lines)
imgChunks.append(chunk)
f.close()
subprocess.call(['rm', 'tmp.jpg', 'tmp.txt'])
if DEBUG:
display = [(str(text), imgChunks[i]) for i, text in enumerate(textChunks)]
utils.display(display[:10])
utils.display(display[10:20])
return textChunks
def getRegions(img):
grayImg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# grayImg = cv2.equalizeHist(np.copy(grayImg))
edges = cv2.Canny(grayImg,100,200,apertureSize = 3)
if DEBUG:
utils.display([('Canny Edge Detection', edges)])
kernel = np.ones((3,3),np.uint8)
edges = cv2.dilate(edges,kernel,iterations = 14)
# edges = 255-edges
# utils.display([('', edges)])
contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if DEBUG:
utils.display([('Contours', edges)])
# Only take contours of a certain size
regions = []
for contour in contours:
imgH, imgW, _ = img.shape
[x, y, w, h] = cv2.boundingRect(contour)
if w < 50 or h < 50:
pass
elif w > .95*imgW or h > .95*imgH:
pass
else:
regions.append((x, y, x+w, y+h))
return regions
def drawRegions(img, regions):
for x1, y1, x2, y2 in regions:
cv2.rectangle(img, (x1,y1), (x2,y2), (0, 255, 0), 2)
if DEBUG:
utils.display([('Regions', img)])
return img
def processCard(img):
regions = getRegions(img)
text = getText(img, regions)
return regions, text
def guessFields(regions, textFields):
'''
Function to guess which fields are which based of of properties of the text
'''
def checkForPhone(line):
# Checks if any substrings of phone or related words are in the line
for word in PHONEALIASES:
if word in line:
return True
return False
def checkEmail(line):
if '@' in line and '.' in line:
return True
elif 'email' in line or 'e-mail' in line:
return True
else:
return False
def checkFax(line):
if 'fax' in line:
return True
else:
return False
def checkWebsite(line):
if 'www' in line:
return True
else:
return False
sizes = [(x2-x1)*(y2-y1) for x1,y1,x2,y2 in regions]
sortedSizes = sorted(sizes)
sortedSizes.reverse()
suggestedFields = []
for i in range(len(regions)):
suggestedLinesField = []
lines = textFields[i]
size = sizes[i]
lineCount = 0
for line in lines:
line = line.lower()
if size == sortedSizes[0] and lineCount == 0:
# Largest size suggest for company name
suggestedLinesField.append('Company')
elif checkForPhone(line):
suggestedLinesField.append('Phone')
elif checkFax(line):
suggestedLinesField.append('Fax')
elif checkWebsite(line):
suggestedLinesField.append('Website')
else:
suggestedLinesField.append('')
lineCount += 1
lineCount = 0
suggestedFields.append(suggestedLinesField)
return suggestedFields
if __name__ == "__main__":
imgs = utils.getImages('../../stanford_business_cards/photos/', 5)
# imgs = utils.getImages('../our_cards/', 8)
DEBUG = True
# img = utils.readImage('../../stanford_business_cards/photos/004.jpg')
# imgs = [('',img)]
# utils.display(imgs)
good, cardImg = findCard.findCard(imgs[4][1])
utils.display([('card',cardImg)])
regions, text = processCard(cardImg)
processedCard = drawRegions(cardImg, regions)
suggestedFields = guessFields(regions, text)
utils.display([('card',processedCard)])
# scores = []
# for imgName, img in imgs:
# # regions, text = processCard(img)
# # guessFields(regions, text)
# try:
# good, cardImg = findCard.findCard(img)
# except:
# good = False
# pass
# if good:
# scores.append((checkBlur(cardImg), imgName, cardImg))
# scores.sort()
# for score in scores:
# print score[0], score[1]
# imgs = [(str(score), img) for score, imgName, img in scores]
# for i in range(len(imgs)/10+1):
# utils.display(imgs[i*10:i*10+10])
| 31.031915
| 87
| 0.580391
|
ff408a1458e993c1f93a642ad657ef50627e7df5
| 9,643
|
py
|
Python
|
aries_cloudagent/protocols/issue_credential/v1_0/messages/tests/test_credential_issue.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 247
|
2019-07-02T21:10:21.000Z
|
2022-03-30T13:55:33.000Z
|
aries_cloudagent/protocols/issue_credential/v1_0/messages/tests/test_credential_issue.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 1,462
|
2019-07-02T20:57:30.000Z
|
2022-03-31T23:13:35.000Z
|
aries_cloudagent/protocols/issue_credential/v1_0/messages/tests/test_credential_issue.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 377
|
2019-06-20T21:01:31.000Z
|
2022-03-30T08:27:53.000Z
|
from unittest import mock, TestCase
from ......messaging.decorators.attach_decorator import AttachDecorator
from .....didcomm_prefix import DIDCommPrefix
from ...message_types import ATTACH_DECO_IDS, CREDENTIAL_ISSUE, PROTOCOL_PACKAGE
from ..credential_issue import CredentialIssue
class TestCredentialIssue(TestCase):
"""Credential issue tests"""
indy_cred = {
"schema_id": "LjgpST2rjsoxYegQDRm7EL:2:bc-reg:1.0",
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:18:tag",
"rev_reg_id": "LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:18:tag:CL_ACCUM:1",
"values": {
"busId": {"raw": "11155555", "encoded": "11155555"},
"legalName": {
"raw": "Babka Galaxy",
"encoded": "107723975795096474174315415205901102419879622561395089750910511985549475735747",
},
"id": {"raw": "5", "encoded": "5"},
"orgTypeId": {"raw": "1", "encoded": "1"},
"effectiveDate": {
"raw": "2012-12-01",
"encoded": "58785836675119218543950531421539993546216494060018521243314445986885543138388",
},
"jurisdictionId": {"raw": "1", "encoded": "1"},
"endDate": {
"raw": "",
"encoded": "102987336249554097029535212322581322789799900648198034993379397001115665086549",
},
},
"signature": {
"p_credential": {
"m_2": "60025883287089799626689274984362649922028954710702989273350424792094051625907",
"a": "33574785085847496372223801384241174668280696192852342004649681358898319989377891201713237406189930904621943660579244780378356431325594072391319837474469436200535615918847408676250915598611100068705846552950672619639766733118699744590194148554187848404028169947572858712592004307286251531728499790515868404251079046925435202101170698552776314885035743276729493940581544827310348632105741785505818500141788882165796461479904049413245974826370118124656594309043126033311790481868941737635314924873471152593101941520014919522243774177999183508913726745154494726830096189641688720673911842149721875115446765101254783088102",
"e": "259344723055062059907025491480697571938277889515152306249728583105665800713306759149981690559193987143012367913206299323899696942213235956742929940839890541204554505134958365542601",
"v": "8609087712648327689510560843448768242969198387856549646434987127729892694214386082710530362693226591495343780017066542203667948482019255226968628218013767981247576292730389932608795727994162072985790185993138122475561426334951896920290599111436791225402577204027790420706987810169826735050717355066696030347321187354133263894735515127702270039945304850524250402144664403971571904353156572222923701680935669167750650688016372444804704998087365054978152701248950729399377780813365024757989269208934482967970445445223084620917624825052959697120057360426040239100930790635416973591134497181715131476498510569905885753432826750000829362210364061766697316138646771666357343198925355584209303847699218225254051213598531538421032318684976506329062116913654998320196203740062523483508588929287294193683755114531891923195772740958",
},
"r_credential": {
"sigma": "1 00F38C50E192DAF9133130888DA4A3291754B1A7D09A7DCCDD408D4E13F57267 1 0C6C9D8510580A8C9D8F0E21F51FF76E8F1419C2C909BBB9761AD9E75E46517F 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8",
"c": "12F8B7BD08471C27F6AF8EE06374D200FCEA61718FACA61FD8B90EEED7A11AD6",
"vr_prime_prime": "103015BFD51C02121DF61993973F312D5972EFF3B3B1B80BC614D5A747510366",
"witness_signature": {
"sigma_i": "1 165767F82FF8FD92237985441D2C758706A5EC1D21FBEF8611C6AC4E3CAD10DA 1 1FC786E5CD2D8B30F1C567579B4EC143C5951B7464F78B86A03419CB335EA81B 1 0B1A1356056BEDF9C61AE2D66FF0405E3B1D934DAC97099BDF6AC3ECCBFAF745 1 106B15BC294810EEDF8AD363A85CC8ECC8AA061538BB31BAE5252377D77E7FA3 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8 1 0000000000000000000000000000000000000000000000000000000000000000",
"u_i": "1 017A61B7C8B5B80EB245BE6788A28F926D8CBB9829E657D437640EF09ACD0C80 1 1AF4229C05C728AEAEEE6FC411B357B857E773BA79FF677373A6BE8F60C02C3A 1 10CB82C4913E2324C06164BF22A2BD38CEE528C797C55061C2D2486C3F6BF747 1 116CE544B1CB99556BFC0621C57C3D9F2B78D034946322EEA218DFDBDD940EA3 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8 1 0000000000000000000000000000000000000000000000000000000000000000",
"g_i": "1 0042BF46E9BAE9696F394FE7C26AFDE3C8963A2A0658D4C32737405F1576EB46 1 0194E97A9D92D46AAD61DAE06926D3361F531EB10D03C7520F3BD69D3E49311C 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8",
},
"g_i": "1 0042BF46E9BAE9696F394FE7C26AFDE3C8963A2A0658D4C32737405F1576EB46 1 0194E97A9D92D46AAD61DAE06926D3361F531EB10D03C7520F3BD69D3E49311C 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8",
"i": 1,
"m2": "84B5722AE3A1CF27CB1EA56CD33D289CB87A4401C6B103D0D7B7EA869DAF6BB3",
},
},
"signature_correctness_proof": {
"se": "19792617148120152105226254239016588540058878757479987545108556827210662529343348161518678852958020771878595740749192412985440625444455760950622452787061547854765389520937092533324699495837410270589105368479415954380927050080439536019149709356488657394895381670676082762285043378943096265107585990717517541825549361747506315768406364562926877132553754434293723146759285511815164904802662712140021121638529229138315163496513377824821704164701067409581646133944445999621553849950380606679724798867481070896073389886302519310697801643262282687875393404841657943289557895895565050618203027512724917946512514235898009424924",
"c": "20346348618412341786428948997994890734628812067145521907471418530511751955386",
},
"rev_reg": {
"accum": "21 12E821764448DE2B5754DEC16864096CFAE4BB68D4DC0CE3E5C4849FC7CBCCC0C 21 11677132B2DFB0C291D0616811BF2AC0CD464A35FF6927B821A5EACF24D94F3A5 6 5471991A0950DBD431A4DD86A8AD101E033AB5EBC29A97CAFE0E4F2C426F5821 4 1B34A4C75174974A698061A09AFFED62B78AC2AAF876BF7788BAF3FC9A8B47DF 6 7D7C5E96AE17DDB21EC98378E3185707A69CF86426F5526C9A55D1FAA2F6FA83 4 277100094333E24170CD3B020B0C91A7E9510F69218AD96AC966565AEF66BC71"
},
"witness": {
"omega": "21 136960A5E73C494F007BFE156889137E8B6DF301D5FF673C410CEE0F14AFAF1AE 21 132D4BA49C6BD8AB3CF52929D115976ABB1785D288F311CBB4455A85D07E2568C 6 70E7C40BA4F607262697556BB17FA6C85E9C188FA990264F4F031C39B5811239 4 351B98620B239DF14F3AB0B754C70597035A3B099D287A9855D11C55BA9F0C16 6 8AA1C473D792DF4F8287D0A93749046385CE411AAA1D685AA3C874C15B8628DB 4 0D6491BF5F127C1A0048CF137AEE17B62F4E49F3BDD9ECEBD14D56C43D211544"
},
}
cred_issue = CredentialIssue(
comment="Test",
credentials_attach=[
AttachDecorator.data_base64(
mapping=indy_cred,
ident=ATTACH_DECO_IDS[CREDENTIAL_ISSUE],
)
],
)
def test_init(self):
"""Test initializer"""
credential_issue = CredentialIssue(
comment="Test",
credentials_attach=[
AttachDecorator.data_base64(
mapping=self.indy_cred,
ident=ATTACH_DECO_IDS[CREDENTIAL_ISSUE],
)
],
)
assert credential_issue.credentials_attach[0].content == self.indy_cred
assert credential_issue.credentials_attach[0].ident # auto-generates UUID4
assert credential_issue.indy_credential(0) == self.indy_cred
def test_type(self):
"""Test type"""
credential_issue = CredentialIssue(
comment="Test",
credentials_attach=[
AttachDecorator.data_base64(
mapping=self.indy_cred,
ident=ATTACH_DECO_IDS[CREDENTIAL_ISSUE],
)
],
)
assert credential_issue._type == DIDCommPrefix.qualify_current(CREDENTIAL_ISSUE)
@mock.patch(
f"{PROTOCOL_PACKAGE}.messages.credential_issue.CredentialIssueSchema.load"
)
def test_deserialize(self, mock_credential_issue_schema_load):
"""
Test deserialize
"""
obj = self.cred_issue
credential_issue = CredentialIssue.deserialize(obj)
mock_credential_issue_schema_load.assert_called_once_with(obj)
assert credential_issue is mock_credential_issue_schema_load.return_value
@mock.patch(
f"{PROTOCOL_PACKAGE}.messages.credential_issue.CredentialIssueSchema.dump"
)
def test_serialize(self, mock_credential_issue_schema_dump):
"""
Test serialization.
"""
obj = self.cred_issue
credential_issue_dict = obj.serialize()
mock_credential_issue_schema_dump.assert_called_once_with(obj)
assert credential_issue_dict is mock_credential_issue_schema_dump.return_value
class TestCredentialIssueSchema(TestCase):
"""Test credential cred issue schema"""
credential_issue = CredentialIssue(
comment="Test",
credentials_attach=[AttachDecorator.data_base64({"hello": "world"})],
)
def test_make_model(self):
"""Test making model."""
data = self.credential_issue.serialize()
model_instance = CredentialIssue.deserialize(data)
assert isinstance(model_instance, CredentialIssue)
| 63.860927
| 844
| 0.772374
|
1b632ce197382f4375dd44a121127b2b8aefab47
| 1,854
|
py
|
Python
|
orio/downloadBinaries.py
|
NIEHS/orio
|
bf996ebcf41d14b945cd5848460b023376b637ad
|
[
"MIT"
] | 6
|
2017-04-19T08:49:20.000Z
|
2020-12-18T16:13:28.000Z
|
orio/downloadBinaries.py
|
NIEHS/orio
|
bf996ebcf41d14b945cd5848460b023376b637ad
|
[
"MIT"
] | null | null | null |
orio/downloadBinaries.py
|
NIEHS/orio
|
bf996ebcf41d14b945cd5848460b023376b637ad
|
[
"MIT"
] | 1
|
2020-12-18T16:14:45.000Z
|
2020-12-18T16:14:45.000Z
|
import os
import sys
import platform
from clint.textui import progress
import requests
import . import utils
REQUIRED_SOFTWARE = ('bigWigAverageOverBed', 'validateFiles', )
def binaries_exist():
root = utils.get_bin_path()
return all([
os.path.exists(os.path.join(root, fn))
for fn in REQUIRED_SOFTWARE
])
def get_root_url():
system = platform.system()
if system == 'Darwin':
return 'http://hgdownload.soe.ucsc.edu/admin/exe/macOSX.x86_64'
elif system == 'Linux':
return 'http://hgdownload.soe.ucsc.edu/admin/exe/linux.x86_64'
else:
raise OSError('Mac or Linux system required.')
def download_ucsc_tools():
bits, _ = platform.architecture()
# check architecture
if bits != '64bit':
raise OSError('64-bit architecture required.')
root_url = get_root_url()
root = utils.get_bin_path()
# download required software and place in appropriate location
sys.stdout.write("Downloading UCSC binaries for ORIO\n")
for fn in REQUIRED_SOFTWARE:
url = os.path.join(root_url, fn)
path = os.path.join(root, fn)
sys.stdout.write("Downloading: {}\n".format(url))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024),
expected_size=(total_length / 1024) + 1):
if chunk:
f.write(chunk)
f.flush()
os.chmod(path, 0o751)
else:
sys.stderr.write("URL returned a non-200 status\n")
sys.stdout.write("Downloads complete!\n")
if __name__ == '__main__':
download_ucsc_tools()
| 28.090909
| 83
| 0.606257
|
44ab9432684515a768655ee09d6f9f0b0988927f
| 2,175
|
py
|
Python
|
setup.py
|
jklynch/bluesky-kafka-livegrid
|
1bd316f9e243e005278a9ac9404b32eaa03cb73f
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jklynch/bluesky-kafka-livegrid
|
1bd316f9e243e005278a9ac9404b32eaa03cb73f
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jklynch/bluesky-kafka-livegrid
|
1bd316f9e243e005278a9ac9404b32eaa03cb73f
|
[
"BSD-3-Clause"
] | null | null | null |
from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
bluesky-kafka does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='bluesky-kafka-livegrid',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Kafka integration for bluesky.",
long_description=readme,
author="Brookhaven National Lab",
author_email='jlynch@bnl.gov',
url='https://github.com/jklynch/bluesky-kafka-livegrid',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'bluesky_kafka_livegrid': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
| 31.985294
| 77
| 0.670345
|
cd0f00a663211ea7a9ee2f780241ab9b762134c3
| 1,844
|
py
|
Python
|
AlgorithmsDS/ex2.3.4appAnalyzis.py
|
w1ld/StepikExercies
|
3efe07819a0456aa3846587b2a23bad9dd9710db
|
[
"MIT"
] | 4
|
2019-05-11T17:26:24.000Z
|
2022-01-30T17:48:25.000Z
|
AlgorithmsDS/ex2.3.4appAnalyzis.py
|
w1ld/StepikExercises
|
3efe07819a0456aa3846587b2a23bad9dd9710db
|
[
"MIT"
] | null | null | null |
AlgorithmsDS/ex2.3.4appAnalyzis.py
|
w1ld/StepikExercises
|
3efe07819a0456aa3846587b2a23bad9dd9710db
|
[
"MIT"
] | 4
|
2019-01-24T22:15:21.000Z
|
2020-12-21T10:23:52.000Z
|
import sys
sys.setrecursionlimit(50000)
class DisjointSet:
def __init__(self, n):
self.a = [i for i in range(n)]
self.rank = [0 for i in range(n)]
def union(self, l, r):
lp = self.find(l)
rp = self.find(r)
if self.rank[lp] > self.rank[rp]:
self.a[rp] = lp
else:
self.a[lp] = rp
self.rank[lp] += 1
def find(self, i):
p = i
while True:
if p == self.a[p]:
break
p = self.a[p]
# flatten:
# if i != self.a[i]:
# self.a[i] = self.find(self.a[i])
return self.a[p]
def readinput():
n, e, d = [int(i) for i in input().strip().split(' ')]
ds = DisjointSet(n)
for k in range(e):
i, j = [int(i) for i in input().strip().split(' ')]
i -= 1
j -= 1
ds.union(i, j)
for k in range(d):
i, j = [int(i) for i in input().strip().split(' ')]
i -= 1
j -= 1
if ds.find(i) == ds.find(j):
print(0)
exit()
print(1)
# ===============================================================
def test():
from test import Tester
t = Tester(__file__)
t.test(
'''4 6 0
1 2
1 3
1 4
2 3
2 4
3 4''',
"1")
t.test(
'''6 5 3
2 3
1 5
2 5
3 4
4 2
6 1
4 6
4 5''',
"0")
t.test(
str.format('''100000 100000 100000
{}
{}''',
'\n'.join(
[str(i+1) + ' ' + str(i+2) for i in range(99999)]
+ ['1 1']
),
'\n'.join(
['1 100000' for i in range(100000)]
)
),
"1")
if __name__ == '__main__':
import sys
if '-t' in sys.argv:
test()
else:
readinput()
| 18.626263
| 66
| 0.383948
|
1055d7b55601c24be2fedc4813df69f19511c59b
| 1,240
|
py
|
Python
|
website/templatetags/reservation.py
|
jrdbnntt-com/com_jrdbnntt_wedding
|
101c825f420076e36ea598332abc87da403910be
|
[
"MIT"
] | null | null | null |
website/templatetags/reservation.py
|
jrdbnntt-com/com_jrdbnntt_wedding
|
101c825f420076e36ea598332abc87da403910be
|
[
"MIT"
] | null | null | null |
website/templatetags/reservation.py
|
jrdbnntt-com/com_jrdbnntt_wedding
|
101c825f420076e36ea598332abc87da403910be
|
[
"MIT"
] | null | null | null |
from django import template
from django.utils.html import escape
from django.utils.safestring import mark_safe
from website.models.guest import Guest
from website.models.reservation import Reservation
register = template.Library()
@register.simple_tag
def guest_full_name(guest: Guest) -> str:
return guest.full_name()
@register.simple_tag
def guest_rsvp_summary(guest: Guest) -> str:
summary = guest.rsvp_answer_display()
if guest.rsvp_comment is not None:
summary += '; ' + guest.rsvp_comment
return summary
@register.simple_tag
def guest_rsvp_status_sentence(reservation: Reservation, guest: Guest) -> str:
result = escape(guest.first_name)
if guest.rsvp_answer is None:
result += " has <b>not yet RSVP'd</b> to the wedding ceremony"
else:
result += " is <b>" + guest.rsvp_answer_display().lower() + "</b> to the wedding ceremony"
if reservation.invited_to_rehearsal:
if guest.rehearsal_rsvp_answer is None:
result += " and has <b>not yet RSVP'd</b> to the rehearsal dinner"
else:
result += " and is <b>" + guest.rehearsal_rsvp_answer_display().lower() + "</b> to the rehearsal dinner"
result += '.'
return mark_safe(result)
| 32.631579
| 116
| 0.698387
|
d9eca831f861c0a0c59e92767940fc06d3e8be84
| 1,201
|
py
|
Python
|
shadow-hunters/tests/class_tests/test_area.py
|
dolphonie/shadow-hunters
|
2257a67f965cf43e1e5c9c8e7af87fe9ae16f5c9
|
[
"MIT"
] | 17
|
2019-05-04T13:25:33.000Z
|
2022-01-22T14:50:49.000Z
|
shadow-hunters/tests/class_tests/test_area.py
|
dolphonie/shadow-hunters
|
2257a67f965cf43e1e5c9c8e7af87fe9ae16f5c9
|
[
"MIT"
] | 25
|
2020-05-24T03:29:42.000Z
|
2021-03-29T07:07:47.000Z
|
shadow-hunters/tests/class_tests/test_area.py
|
dolphonie/shadow-hunters
|
2257a67f965cf43e1e5c9c8e7af87fe9ae16f5c9
|
[
"MIT"
] | 7
|
2019-05-30T00:15:58.000Z
|
2022-01-16T14:37:25.000Z
|
import pytest
from area import Area
from zone import Zone
# test_area.py
# Tests for the Area object
def test_fields():
# test initialization
a = Area(
name="area_name",
desc="area_desc",
domain=[9],
action=lambda: 5,
resource_id="r_id"
)
# test fields
assert a.name == "area_name"
assert a.desc == "area_desc"
assert len(a.domain) == 1 and a.domain[0] == 9
assert a.action() == 5
assert a.resource_id == "r_id"
# test dump
dump = a.dump()
assert dump['name'] == "area_name"
assert dump['desc'] == "area_desc"
assert dump['domain'] == "[9]"
assert str(dump) == str(a)
def test_getAdjacent():
# Put two areas in a zone
a = Area(
name="A",
desc="area_desc",
domain=[8],
action=lambda: 5,
resource_id="a_id"
)
b = Area(
name="B",
desc="area_desc",
domain=[9],
action=lambda: 5,
resource_id="b_id"
)
z = Zone([a, b])
for x in z.areas:
x.zone = z
assert a.zone == z
assert b.zone == z
# Test adjacency
assert a.getAdjacent() == b
assert b.getAdjacent() == a
| 19.063492
| 50
| 0.535387
|
1ca162323202f5f9f581f7de1679a6cb12409d32
| 1,359
|
py
|
Python
|
onepage/models.py
|
naichilab/onepage
|
b656ac76fd83d80e45a8d13835436865461432a1
|
[
"MIT"
] | null | null | null |
onepage/models.py
|
naichilab/onepage
|
b656ac76fd83d80e45a8d13835436865461432a1
|
[
"MIT"
] | null | null | null |
onepage/models.py
|
naichilab/onepage
|
b656ac76fd83d80e45a8d13835436865461432a1
|
[
"MIT"
] | null | null | null |
from math import ceil
from orator import DatabaseManager
from orator import Model
from orator.orm import scope
from orator.orm import has_many
from orator.orm import belongs_to
from orator.orm import belongs_to_many
from onepage.db import dbconfig
from onepage.db import schema
Model.set_connection_resolver(DatabaseManager(dbconfig.DATABASES))
class User(Model):
__table__ = schema.USER_TABLE_NAME
__fillable__ = ['pen_name']
__guarded__ = ['email', 'password_hash']
__timestamps__ = False
@classmethod
def find_by_email(cls, email):
return cls.query().where_email(email).get().first()
@has_many
def novels(self):
return Novel
class Tag(Model):
__table__ = schema.TAG_TABLE_NAME
__fillable__ = ['name']
@has_many
def novels(self):
return Novel
class Novel(Model):
__table__ = schema.NOVEL_TABLE_NAME
__fillable__ = ['title', 'text', 'user_id', 'category_id']
@belongs_to
def user(self):
return User
@belongs_to_many
def tags(self):
return Tag
@scope
def author(self, query, user_id):
return query.where_user_id(user_id)
@scope
def pagenation(self, query, page):
return query.order_by('created_at').paginate(20, page)
@classmethod
def page_count(cls):
return ceil(cls.count() / 20)
| 21.571429
| 66
| 0.688742
|
2703a253f374f79ec71113a7792cec606a715f36
| 2,089
|
py
|
Python
|
Demo/pyasn1/psearch.py
|
reqa/python-ldap
|
e75c24dd70dcf10c8315d6f30ecf98f2c30f08e8
|
[
"MIT"
] | 299
|
2017-11-23T14:24:32.000Z
|
2022-03-25T08:45:24.000Z
|
Demo/pyasn1/psearch.py
|
reqa/python-ldap
|
e75c24dd70dcf10c8315d6f30ecf98f2c30f08e8
|
[
"MIT"
] | 412
|
2017-11-23T22:21:36.000Z
|
2022-03-18T11:20:59.000Z
|
Demo/pyasn1/psearch.py
|
reqa/python-ldap
|
e75c24dd70dcf10c8315d6f30ecf98f2c30f08e8
|
[
"MIT"
] | 114
|
2017-11-23T14:24:37.000Z
|
2022-03-24T20:55:42.000Z
|
"""
Demo script for Persistent Search Control
(see https://tools.ietf.org/html/draft-ietf-ldapext-psearch)
See https://www.python-ldap.org/ for project details.
This needs the following software:
Python
pyasn1
pyasn1-modules
python-ldap 2.4+
"""
import sys,ldap,ldapurl,getpass
from ldap.controls.psearch import PersistentSearchControl,EntryChangeNotificationControl,CHANGE_TYPES_STR
try:
ldap_url = ldapurl.LDAPUrl(sys.argv[1])
except IndexError:
print('Usage: psearch.py <LDAP URL>')
sys.exit(1)
# Set debugging level
#ldap.set_option(ldap.OPT_DEBUG_LEVEL,255)
ldapmodule_trace_level = 2
ldapmodule_trace_file = sys.stderr
ldap_conn = ldap.ldapobject.LDAPObject(
ldap_url.initializeUrl(),
trace_level=ldapmodule_trace_level,
trace_file=ldapmodule_trace_file
)
if ldap_url.cred is None:
print('Password for %s:' % (repr(ldap_url.who)))
ldap_url.cred = getpass.getpass()
try:
ldap_conn.simple_bind_s(ldap_url.who,ldap_url.cred)
except ldap.INVALID_CREDENTIALS as e:
print('Simple bind failed:',str(e))
sys.exit(1)
psc = PersistentSearchControl()
msg_id = ldap_conn.search_ext(
ldap_url.dn,
ldap_url.scope,
ldap_url.filterstr,
attrlist = ldap_url.attrs or ['*','+'],
serverctrls=[psc],
)
while True:
try:
res_type,res_data,res_msgid,_,_,_ = ldap_conn.result4(
msg_id,
all=0,
timeout=10.0,
add_ctrls=1,
add_intermediates=1,
resp_ctrl_classes={EntryChangeNotificationControl.controlType:EntryChangeNotificationControl},
)
except ldap.TIMEOUT:
print('Timeout waiting for results...')
else:
for dn,entry,srv_ctrls in res_data:
ecn_ctrls = [
c
for c in srv_ctrls
if c.controlType == EntryChangeNotificationControl.controlType
]
if ecn_ctrls:
changeType,previousDN,changeNumber = ecn_ctrls[0].changeType,ecn_ctrls[0].previousDN,ecn_ctrls[0].changeNumber
change_type_desc = CHANGE_TYPES_STR[changeType]
print('changeType: %s (%d), changeNumber: %s, previousDN: %s' % (change_type_desc,changeType,changeNumber,repr(previousDN)))
| 26.443038
| 132
| 0.733365
|
6d200cca3ea103bcf1f1aa0cd39caa76f4d4e06e
| 4,708
|
py
|
Python
|
Data/basis_converter_jaguar.py
|
certik/pyquante
|
f5cae27f519b1c1b70afbebfe8b5c83cb4b3c2a6
|
[
"DOC"
] | 8
|
2016-08-26T14:57:01.000Z
|
2019-12-23T07:39:37.000Z
|
Data/basis_converter_jaguar.py
|
nicodgomez/pyquante
|
483571110b83cab406d3d4d8f2eba5cae0a2da58
|
[
"DOC"
] | 1
|
2019-02-03T10:44:02.000Z
|
2019-02-03T10:44:02.000Z
|
Data/basis_converter_jaguar.py
|
nicodgomez/pyquante
|
483571110b83cab406d3d4d8f2eba5cae0a2da58
|
[
"DOC"
] | 8
|
2016-02-23T19:22:11.000Z
|
2021-08-28T12:12:59.000Z
|
#!/usr/bin/env python
"""\
basis_converter.py Convert a Jaguar basis record to a native python format.
This program is part of the PyQuante quantum chemistry suite.
PyQuante is copyright (c) 2002 Richard P. Muller. All Rights Reserved.
You may contact the author at rpm@wag.caltech.edu.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307
"""
import sys,string
sym2no = {
'X' : 0, 'H' : 1, 'He' : 2,
'Li' : 3, 'Be' : 4, 'B' : 5, 'C' : 6, 'N' : 7,
'O' : 8, 'F' : 9, 'Ne' : 10,
'Na' : 11, 'Mg' : 12, 'Al' : 13, 'Si' : 14,
'P' : 15, 'S' : 16, 'Cl' : 17, 'Ar' : 18,
'K' : 19, 'Ca' : 20, 'Sc':21, 'Ti':22, 'V':23,'Cr':24,'Mn':25,
'Fe' : 26, 'Co':27, 'Ni':28, 'Cu':29,'Zn':30,
'Ga' : 31,'Ge':32,'As':33,'Se':34,'Br':35,'Kr':36,
'Rb':37, 'Sr':38,'Y':39,'Zr':40,'Nb':41,'Mo':42,'Tc':43,
'Ru' : 44,'Rh':45,'Pd':46,'Ag':47,'Cd':48,'In':49,
'Sn':50,'Sb':51,'Te':52,'I':53,'Xe':54,
'Cs':55,'Ba':56,'La':57,'Ce':58,'Pr':59,'Nd':60,'Pm':61,'Sm':62,
'Eu':63,'Gd':64,'Tb':65,'Dy':66,'Ho':67,'Er':68,'Tm':69,'Yb':70,
'Lu':71,'Hf':72,'Ta':73,'W':74,'Re':75,'Os':76,'Ir':77,'Pt':78,
'Au':79,'Hg':80,'Tl':81,'Pb':82,'Bi':83,'At':85,'Rn':86,
'U' : 92,
'x' : 0, 'h' : 1, 'he' : 2,
'li' : 3, 'be' : 4, 'b' : 5, 'c' : 6, 'n' : 7,
'o' : 8, 'f' : 9, 'ne' : 10,
'na' : 11, 'mg' : 12, 'al' : 13, 'si' : 14,
'p' : 15, 's' : 16, 'cl' : 17, 'ar' : 18,
'k' : 19, 'ca' : 20, 'sc':21, 'ti':22, 'v':23,'cr':24,'mn':25,
'fe' : 26, 'co':27, 'ni':28, 'cu':29,'zn':30,
'ga' : 31,'ge':32,'as':33,'se':34,'br':35,'kr':36,
'rb':37, 'sr':38,'y':39,'zr':40,'nb':41,'mo':42,'tc':43,
'ru' : 44,'rh':45,'pd':46,'ag':47,'cd':48,'in':49,
'sn':50,'sb':51,'te':52,'i':53,'xe':54,
'cs':55,'ba':56,'la':57,'ce':58,'pr':59,'nd':60,'pm':61,'sm':62,
'eu':63,'gd':64,'tb':65,'dy':66,'ho':67,'er':68,'tm':69,'yb':70,
'lu':71,'hf':72,'ta':73,'w':74,'re':75,'os':76,'ir':77,'pt':78,
'au':79,'hg':80,'tl':81,'pb':82,'bi':83,'at':85,'rn':86,
'u' : 92,
}
def main(filename="basis_631ss.dat"):
outfilename = string.replace(filename,'.dat','.py')
file = open(filename)
bfs = []
while 1:
line = file.readline()
if not line: break
words = string.split(line)
sym = words[0]
atno = sym2no[sym]
nat = len(bfs)
if len(bfs) < atno+1:
for i in range(atno+1-len(bfs)): bfs.append([])
while 1:
line = file.readline()
if not line: break
words = string.split(line)
if len(words) < 1: break
if words[0] == '****': break
type,nprim = words[0],int(words[2])
try:
nprim2 = int(words[3])
nprim = nprim + nprim2
except:
pass
prims = []
pprims = []
for i in range(nprim):
line = file.readline()
words = string.split(line)
expnt = float(words[0])
coef = float(words[1])
prims.append((expnt,coef))
if type == 'SP':
coef2 = float(words[2])
pprims.append((expnt,coef2))
if type == 'SP':
bfs[atno].append(('S',prims))
bfs[atno].append(('P',pprims))
else:
bfs[atno].append((type,prims))
file.close()
file = open(outfilename,'w')
file.write('basis = [\n')
for bf in bfs:
if bf:
file.write(' [\n')
for type,prims in bf:
file.write(' (\'%s\',[\n' % type)
for expnt,coef in prims:
file.write(' (%f, %f),\n' % (expnt,coef))
file.write(' ]),\n')
file.write(' ],\n')
else:
file.write(' None,\n')
file.write(' ]\n')
if __name__ == '__main__':
if len(sys.argv) < 2:
main()
else:
main(sys.argv[1])
| 34.617647
| 76
| 0.481308
|
0eb89d848120d9018abefbfc5157d132b66f497f
| 139
|
py
|
Python
|
python_3/aula10a.py
|
felipesch92/CursoEmVideo
|
df443e4771adc4506c96d8f419aa7acb97b28366
|
[
"MIT"
] | null | null | null |
python_3/aula10a.py
|
felipesch92/CursoEmVideo
|
df443e4771adc4506c96d8f419aa7acb97b28366
|
[
"MIT"
] | null | null | null |
python_3/aula10a.py
|
felipesch92/CursoEmVideo
|
df443e4771adc4506c96d8f419aa7acb97b28366
|
[
"MIT"
] | null | null | null |
if 5 > 3:
print('5 é maior que 3')
else:
print('5 não é maior que 3')
print('5 é maior que 3' if 5 > 3 else '5 não é maior que 3')
| 23.166667
| 60
| 0.582734
|
3768e61dd43aba147dd49389e546ec40fc87d8bf
| 1,419
|
py
|
Python
|
Chapter07/07_fcn_32s_keras.py
|
PacktPublishing/Practical-Computer-Vision
|
8cf8ee58d306e0fb1a2e21b6518136bb4c174ff5
|
[
"MIT"
] | 23
|
2018-02-28T05:00:34.000Z
|
2022-02-04T13:26:03.000Z
|
Chapter07/07_fcn_32s_keras.py
|
PacktPublishing/Practical-Computer-Vision
|
8cf8ee58d306e0fb1a2e21b6518136bb4c174ff5
|
[
"MIT"
] | 4
|
2018-03-23T11:35:31.000Z
|
2022-01-31T14:52:57.000Z
|
Chapter07/07_fcn_32s_keras.py
|
PacktPublishing/Practical-Computer-Vision
|
8cf8ee58d306e0fb1a2e21b6518136bb4c174ff5
|
[
"MIT"
] | 23
|
2018-02-08T03:17:16.000Z
|
2022-01-03T09:12:47.000Z
|
from keras.models import *
from keras.layers import *
from keras.applications.vgg16 import VGG16
def create_model_fcn32(nb_class, input_w=256):
"""
Create FCN-32s model for segmentaiton.
Input:
nb_class: number of detection categories
input_w: input width, using square image
Returns model created for training.
"""
input = Input(shape=(input_w, input_w, 3))
# initialize feature extractor excuding fully connected layers
# here we use VGG model, with pre-trained weights.
vgg = VGG16(include_top=False, weights='imagenet', input_tensor=input)
# create further network
x = Conv2D(4096, kernel_size=(7,7), use_bias=False,
activation='relu', padding="same")(vgg.output)
x = Dropout(0.5)(x)
x = Conv2D(4096, kernel_size=(1,1), use_bias=False,
activation='relu', padding="same")(x)
x = Dropout(0.5)(x)
x = Conv2D(nb_class, kernel_size=(1,1), use_bias=False,
padding="same")(x)
# upsampling to image size
x = Conv2DTranspose(nb_class ,
kernel_size=(64,64),
strides=(32,32),
use_bias=False, padding='same')(x)
x = Activation('softmax')(x)
model = Model(input, x)
model.summary()
return model
# Create model for pascal voc image segmentation for 21 classes
model = create_model_fcn32(21)
| 33.785714
| 74
| 0.627907
|
ea11e6903e2d0696fffa09b8d9d3fa1d9fba8eba
| 67,569
|
py
|
Python
|
spectractor/fit/fitter.py
|
LSSTDESC/Spectractor
|
547f2ed6a6882ddc66030e2be989bee38401f213
|
[
"BSD-3-Clause"
] | 14
|
2018-04-05T06:55:32.000Z
|
2021-12-28T09:44:18.000Z
|
spectractor/fit/fitter.py
|
LSSTDESC/Spectractor
|
547f2ed6a6882ddc66030e2be989bee38401f213
|
[
"BSD-3-Clause"
] | 73
|
2018-04-23T09:39:07.000Z
|
2022-03-29T14:08:18.000Z
|
spectractor/fit/fitter.py
|
LSSTDESC/Spectractor
|
547f2ed6a6882ddc66030e2be989bee38401f213
|
[
"BSD-3-Clause"
] | 6
|
2018-07-05T05:36:25.000Z
|
2020-04-10T10:52:38.000Z
|
from iminuit import Minuit
from scipy import optimize
from schwimmbad import MPIPool
import emcee
import time
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
import sys
import os
import multiprocessing
from spectractor import parameters
from spectractor.config import set_logger
from spectractor.tools import formatting_numbers, compute_correlation_matrix, plot_correlation_matrix_simple
from spectractor.fit.statistics import Likelihood
class FitWorkspace:
def __init__(self, file_name="", nwalkers=18, nsteps=1000, burnin=100, nbins=10,
verbose=0, plot=False, live_fit=False, truth=None):
"""Generic class to create a fit workspace with parameters, bounds and general fitting methods.
Parameters
----------
file_name: str, optional
The generic file name to save results. If file_name=="", nothing is saved ond disk (default: "").
nwalkers: int, optional
Number of walkers for MCMC exploration (default: 18).
nsteps: int, optional
Number of steps for MCMC exploration (default: 1000).
burnin: int, optional
Number of burn-in steps for MCMC exploration (default: 100).
nbins: int, optional
Number of bins to make histograms after MCMC exploration (default: 10).
verbose: int, optional
Level of verbosity (default: 0).
plot: bool, optional
Level of plotting (default: False).
live_fit: bool, optional, optional
If True, model, data and residuals plots are made along the fitting procedure (default: False).
truth: array_like, optional
Array of true parameters (default: None).
Examples
--------
>>> w = FitWorkspace()
>>> w.ndim
0
"""
self.my_logger = set_logger(self.__class__.__name__)
self.filename = file_name
self.truth = truth
self.verbose = verbose
self.plot = plot
self.live_fit = live_fit
self.p = np.array([])
self.cov = np.array([[]])
self.rho = np.array([[]])
self.data = None
self.err = None
self.data_cov = None
self.W = None
self.x = None
self.outliers = []
self.mask = []
self.sigma_clip = 5
self.model = None
self.model_err = None
self.model_noconv = None
self.input_labels = []
self.axis_names = []
self.input_labels = []
self.bounds = ((), ())
self.fixed = []
self.nwalkers = max(2 * self.ndim, nwalkers)
self.nsteps = nsteps
self.nbins = nbins
self.burnin = burnin
self.start = []
self.likelihood = np.array([[]])
self.gelmans = np.array([])
self.chains = np.array([[]])
self.lnprobs = np.array([[]])
self.costs = np.array([[]])
self.params_table = None
self.flat_chains = np.array([[]])
self.valid_chains = [False] * self.nwalkers
self.global_average = None
self.global_std = None
self.title = ""
self.use_grid = False
if self.filename != "":
if "." in self.filename:
self.emcee_filename = os.path.splitext(self.filename)[0] + "_emcee.h5"
else:
self.my_logger.warning("\n\tFile name must have an extension.")
else:
self.emcee_filename = "emcee.h5"
@property
def ndim(self):
"""Number of parameters of the model.
Returns
-------
ndim: int
Examples
--------
>>> from spectractor.fit.fitter import FitWorkspace
>>> import numpy as np
>>> w = FitWorkspace()
>>> w.p = np.ones(5)
>>> w.ndim
5
"""
return len(self.p)
def get_bad_indices(self):
"""List of indices that are outliers rejected by a sigma-clipping method or other masking method.
Returns
-------
outliers: list
Examples
--------
>>> from spectractor.fit.fitter import FitWorkspace
>>> import numpy as np
>>> w = FitWorkspace()
>>> w.data = np.array([np.array([1,2,3]), np.array([1,2,3,4])])
>>> w.outliers = [2, 6]
>>> w.get_bad_indices()
[array([2]), array([3])]
"""
bad_indices = np.asarray(self.outliers, dtype=int)
if self.data.dtype == np.object:
if len(self.outliers) > 0:
bad_indices = []
start_index = 0
for k in range(self.data.shape[0]):
mask = np.zeros(self.data[k].size, dtype=bool)
outliers = np.asarray(self.outliers)[np.logical_and(np.asarray(self.outliers) > start_index,
np.asarray(self.outliers) < start_index +
self.data[k].size)]
mask[outliers - start_index] = True
bad_indices.append(np.arange(self.data[k].size)[mask])
start_index += self.data[k].size
else:
bad_indices = [[] for _ in range(self.data.shape[0])]
return bad_indices
def set_start(self, percent=0.02, a_random=1e-5):
"""Set the random starting points for MCMC exploration.
A set of parameters are drawn with a uniform distribution between +/- percent times the starting guess.
For null guess parameters, starting points are drawn from a uniform distribution between +/- a_random.
Parameters
----------
percent: float, optional
Percent of the guess parameters to set the uniform interval to draw random points (default: 0.02).
a_random: float, optional
Absolute value to set the +/- uniform interval to draw random points
for null guess parameters (default: 1e-5).
Returns
-------
start: np.array
Array of starting points of shape (ndim, nwalkers).
"""
self.start = np.array(
[np.random.uniform(self.p[i] - percent * self.p[i], self.p[i] + percent * self.p[i], self.nwalkers)
for i in range(self.ndim)]).T
self.start[self.start == 0] = a_random * np.random.uniform(-1, 1)
return self.start
def load_chains(self):
"""Load the MCMC chains from a hdf5 file. The burn-in points are not rejected at this stage.
Returns
-------
chains: np.array
Array of the chains.
lnprobs: np.array
Array of the logarithmic posterior probability.
"""
self.chains = [[]]
self.lnprobs = [[]]
self.nsteps = 0
# tau = -1
reader = emcee.backends.HDFBackend(self.emcee_filename)
try:
tau = reader.get_autocorr_time()
except emcee.autocorr.AutocorrError:
tau = -1
self.chains = reader.get_chain(discard=0, flat=False, thin=1)
self.lnprobs = reader.get_log_prob(discard=0, flat=False, thin=1)
self.nsteps = self.chains.shape[0]
self.nwalkers = self.chains.shape[1]
print(f"Auto-correlation time: {tau}")
print(f"Burn-in: {self.burnin}")
print(f"Chains shape: {self.chains.shape}")
print(f"Log prob shape: {self.lnprobs.shape}")
return self.chains, self.lnprobs
def build_flat_chains(self):
"""Flatten the chains array and apply burn-in.
Returns
-------
flat_chains: np.array
Flat chains.
"""
self.flat_chains = self.chains[self.burnin:, self.valid_chains, :].reshape((-1, self.ndim))
return self.flat_chains
def simulate(self, *p):
"""Compute the model prediction given a set of parameters.
Parameters
----------
p: array_like
Array of parameters for the computation of the model.
Returns
-------
x: array_like
The abscisse of the model prediction.
model: array_like
The model prediction.
model_err: array_like
The uncertainty on the model prediction.
Examples
--------
>>> w = FitWorkspace()
>>> p = np.zeros(3)
>>> x, model, model_err = w.simulate(*p)
.. doctest::
:hide:
>>> assert x is not None
"""
self.x = np.array([])
self.model = np.array([])
self.model_err = np.array([])
return self.x, self.model, self.model_err
def analyze_chains(self):
"""Load the chains, build the probability densities for the parameters, compute the best fitting values
and the uncertainties and covariance matrices, and plot.
"""
self.load_chains()
self.set_chain_validity()
self.convergence_tests()
self.build_flat_chains()
self.likelihood = self.chain2likelihood()
self.cov = self.likelihood.cov_matrix
self.rho = self.likelihood.rho_matrix
self.p = self.likelihood.mean_vec
self.simulate(*self.p)
self.plot_fit()
figure_name = os.path.splitext(self.emcee_filename)[0] + '_triangle.pdf'
self.likelihood.triangle_plots(output_filename=figure_name)
def plot_fit(self):
"""Generic function to plot the result of the fit for 1D curves.
Returns
-------
fig: plt.FigureClass
The figure.
"""
fig = plt.figure()
plt.errorbar(self.x, self.data, yerr=self.err, fmt='ko', label='Data')
if self.truth is not None:
x, truth, truth_err = self.simulate(*self.truth)
plt.plot(self.x, truth, label="Truth")
plt.plot(self.x, self.model, label='Best fitting model')
plt.xlabel('$x$')
plt.ylabel('$y$')
title = ""
for i, label in enumerate(self.input_labels):
if self.cov.size > 0:
err = np.sqrt(self.cov[i, i])
formatting_numbers(self.p[i], err, err)
_, par, err, _ = formatting_numbers(self.p[i], err, err, label=label)
title += rf"{label} = {par} $\pm$ {err}"
else:
title += f"{label} = {self.p[i]:.3g}"
if i < len(self.input_labels) - 1:
title += ", "
plt.title(title)
plt.legend()
plt.grid()
if parameters.DISPLAY: # pragma: no cover
plt.show()
return fig
def chain2likelihood(self, pdfonly=False, walker_index=-1):
"""Convert the chains to a psoterior probability density function via histograms.
Parameters
----------
pdfonly: bool, optional
If True, do not compute the covariances and the 2D correlation plots (default: False).
walker_index: int, optional
The walker index to plot. If -1, all walkers are selected (default: -1).
Returns
-------
likelihood: np.array
Posterior density function.
"""
if walker_index >= 0:
chains = self.chains[self.burnin:, walker_index, :]
else:
chains = self.flat_chains
rangedim = range(chains.shape[1])
centers = []
for i in rangedim:
centers.append(np.linspace(np.min(chains[:, i]), np.max(chains[:, i]), self.nbins - 1))
likelihood = Likelihood(centers, labels=self.input_labels, axis_names=self.axis_names, truth=self.truth)
if walker_index < 0:
for i in rangedim:
likelihood.pdfs[i].fill_histogram(chains[:, i], weights=None)
if not pdfonly:
for j in rangedim:
if i != j:
likelihood.contours[i][j].fill_histogram(chains[:, i], chains[:, j], weights=None)
output_file = ""
if self.filename != "":
output_file = os.path.splitext(self.filename)[0] + "_bestfit.txt"
likelihood.stats(output=output_file)
else:
for i in rangedim:
likelihood.pdfs[i].fill_histogram(chains[:, i], weights=None)
return likelihood
def compute_local_acceptance_rate(self, start_index, last_index, walker_index):
"""Compute the local acceptance rate in a chain.
Parameters
----------
start_index: int
Beginning index.
last_index: int
End index.
walker_index: int
Index of the walker.
Returns
-------
freq: float
The acceptance rate.
"""
frequences = []
test = -2 * self.lnprobs[start_index, walker_index]
counts = 1
for index in range(start_index + 1, last_index):
chi2 = -2 * self.lnprobs[index, walker_index]
if np.isclose(chi2, test):
counts += 1
else:
frequences.append(float(counts))
counts = 1
test = chi2
frequences.append(counts)
return 1.0 / np.mean(frequences)
def set_chain_validity(self):
"""Test the validity of a chain: reject chains whose chi2 is far from the mean of the others.
Returns
-------
valid_chains: list
List of boolean values, True if the chain is valid, or False if invalid.
"""
nchains = [k for k in range(self.nwalkers)]
chisq_averages = []
chisq_std = []
for k in nchains:
chisqs = -2 * self.lnprobs[self.burnin:, k]
# if np.mean(chisqs) < 1e5:
chisq_averages.append(np.mean(chisqs))
chisq_std.append(np.std(chisqs))
self.global_average = np.mean(chisq_averages)
self.global_std = np.mean(chisq_std)
self.valid_chains = [False] * self.nwalkers
for k in nchains:
chisqs = -2 * self.lnprobs[self.burnin:, k]
chisq_average = np.mean(chisqs)
chisq_std = np.std(chisqs)
if 3 * self.global_std + self.global_average < chisq_average < 1e5:
self.valid_chains[k] = False
elif chisq_std < 0.1 * self.global_std:
self.valid_chains[k] = False
else:
self.valid_chains[k] = True
return self.valid_chains
def convergence_tests(self):
"""Compute the convergence tests (Gelman-Rubin, acceptance rate).
"""
chains = self.chains[self.burnin:, :, :] # .reshape((-1, self.ndim))
nchains = [k for k in range(self.nwalkers)]
fig, ax = plt.subplots(self.ndim + 1, 2, figsize=(16, 7), sharex='all')
fontsize = 8
steps = np.arange(self.burnin, self.nsteps)
# Chi2 vs Index
print("Chisq statistics:")
for k in nchains:
chisqs = -2 * self.lnprobs[self.burnin:, k]
text = f"\tWalker {k:d}: {float(np.mean(chisqs)):.3f} +/- {float(np.std(chisqs)):.3f}"
if not self.valid_chains[k]:
text += " -> excluded"
ax[self.ndim, 0].plot(steps, chisqs, c='0.5', linestyle='--')
else:
ax[self.ndim, 0].plot(steps, chisqs)
print(text)
# global_average = np.mean(-2*self.lnprobs[self.valid_chains, self.burnin:])
# global_std = np.std(-2*self.lnprobs[self.valid_chains, self.burnin:])
ax[self.ndim, 0].set_ylim(
[self.global_average - 5 * self.global_std, self.global_average + 5 * self.global_std])
# Parameter vs Index
print("Computing Parameter vs Index plots...")
for i in range(self.ndim):
ax[i, 0].set_ylabel(self.axis_names[i], fontsize=fontsize)
for k in nchains:
if self.valid_chains[k]:
ax[i, 0].plot(steps, chains[:, k, i])
else:
ax[i, 0].plot(steps, chains[:, k, i], c='0.5', linestyle='--')
ax[i, 0].get_yaxis().set_label_coords(-0.05, 0.5)
ax[self.ndim, 0].set_ylabel(r'$\chi^2$', fontsize=fontsize)
ax[self.ndim, 0].set_xlabel('Steps', fontsize=fontsize)
ax[self.ndim, 0].get_yaxis().set_label_coords(-0.05, 0.5)
# Acceptance rate vs Index
print("Computing acceptance rate...")
min_len = self.nsteps
window = 100
if min_len > window:
for k in nchains:
ARs = []
indices = []
for pos in range(self.burnin + window, self.nsteps, window):
ARs.append(self.compute_local_acceptance_rate(pos - window, pos, k))
indices.append(pos)
if self.valid_chains[k]:
ax[self.ndim, 1].plot(indices, ARs, label=f'Walker {k:d}')
else:
ax[self.ndim, 1].plot(indices, ARs, label=f'Walker {k:d}', c='gray', linestyle='--')
ax[self.ndim, 1].set_xlabel('Steps', fontsize=fontsize)
ax[self.ndim, 1].set_ylabel('Aceptance rate', fontsize=fontsize)
# ax[self.dim + 1, 2].legend(loc='upper left', ncol=2, fontsize=10)
# Parameter PDFs by chain
print("Computing chain by chain PDFs...")
for k in nchains:
likelihood = self.chain2likelihood(pdfonly=True, walker_index=k)
likelihood.stats(pdfonly=True, verbose=False)
# for i in range(self.dim):
# ax[i, 1].plot(likelihood.pdfs[i].axe.axis, likelihood.pdfs[i].grid, lw=var.LINEWIDTH,
# label=f'Walker {k:d}')
# ax[i, 1].set_xlabel(self.axis_names[i])
# ax[i, 1].set_ylabel('PDF')
# ax[i, 1].legend(loc='upper right', ncol=2, fontsize=10)
# Gelman-Rubin test.py
if len(nchains) > 1:
step = max(1, (self.nsteps - self.burnin) // 20)
self.gelmans = []
print(f'Gelman-Rubin tests (burnin={self.burnin:d}, step={step:d}, nsteps={self.nsteps:d}):')
for i in range(self.ndim):
Rs = []
lens = []
for pos in range(self.burnin + step, self.nsteps, step):
chain_averages = []
chain_variances = []
global_average = np.mean(self.chains[self.burnin:pos, self.valid_chains, i])
for k in nchains:
if not self.valid_chains[k]:
continue
chain_averages.append(np.mean(self.chains[self.burnin:pos, k, i]))
chain_variances.append(np.var(self.chains[self.burnin:pos, k, i], ddof=1))
W = np.mean(chain_variances)
B = 0
for n in range(len(chain_averages)):
B += (chain_averages[n] - global_average) ** 2
B *= ((pos + 1) / (len(chain_averages) - 1))
R = (W * pos / (pos + 1) + B / (pos + 1) * (len(chain_averages) + 1) / len(chain_averages)) / W
Rs.append(R - 1)
lens.append(pos)
print(f'\t{self.input_labels[i]}: R-1 = {Rs[-1]:.3f} (l = {lens[-1] - 1:d})')
self.gelmans.append(Rs[-1])
ax[i, 1].plot(lens, Rs, lw=1, label=self.axis_names[i])
ax[i, 1].axhline(0.03, c='k', linestyle='--')
ax[i, 1].set_xlabel('Walker length', fontsize=fontsize)
ax[i, 1].set_ylabel('$R-1$', fontsize=fontsize)
ax[i, 1].set_ylim(0, 0.6)
# ax[self.dim, 3].legend(loc='best', ncol=2, fontsize=10)
self.gelmans = np.array(self.gelmans)
fig.tight_layout()
plt.subplots_adjust(hspace=0)
if parameters.DISPLAY: # pragma: no cover
plt.show()
figure_name = self.emcee_filename.replace('.h5', '_convergence.pdf')
print(f'Save figure: {figure_name}')
fig.savefig(figure_name, dpi=100)
def print_settings(self):
"""Print the main settings of the FitWorkspace.
"""
print('************************************')
print(f"Input file: {self.filename}\nWalkers: {self.nwalkers}\t Steps: {self.nsteps}")
print(f"Output file: {self.emcee_filename}")
print('************************************')
def save_parameters_summary(self, ipar, header=""):
"""Save the best fitting parameter summary in a text file.
The file name is build from self.file_name, adding the suffix _bestfit.txt.
Parameters
----------
ipar: list
The list of parameter indices to save.
header: str, optional
A header to add to the file (default: "").
"""
output_filename = os.path.splitext(self.filename)[0] + "_bestfit.txt"
f = open(output_filename, 'w')
txt = self.filename + "\n"
if header != "":
txt += header + "\n"
for k, ip in enumerate(ipar):
txt += "%s: %s +%s -%s\n" % formatting_numbers(self.p[ip], np.sqrt(self.cov[k, k]),
np.sqrt(self.cov[k, k]),
label=self.input_labels[ip])
for row in self.cov:
txt += np.array_str(row, max_line_width=20 * self.cov.shape[0]) + '\n'
self.my_logger.info(f"\n\tSave best fit parameters in {output_filename}.")
f.write(txt)
f.close()
def plot_correlation_matrix(self, ipar=None):
"""Compute and plot a correlation matrix.
Save the plot if parameters.SAVE is True. The output file name is build from self.file_name,
adding the suffix _correlation.pdf.
Parameters
----------
ipar: list, optional
The list of parameter indices to include in the matrix.
Examples
--------
>>> w = FitWorkspace()
>>> w.axis_names = ["x", "y", "z"]
>>> w.cov = np.array([[1,-0.5,0],[-0.5,1,-1],[0,-1,1]])
>>> w.plot_correlation_matrix()
"""
if ipar is None:
ipar = np.arange(self.cov.shape[0]).astype(int)
fig = plt.figure()
self.rho = compute_correlation_matrix(self.cov)
plot_correlation_matrix_simple(plt.gca(), self.rho, axis_names=[self.axis_names[i] for i in ipar])
if parameters.SAVE and self.filename != "": # pragma: no cover
figname = os.path.splitext(self.filename)[0] + "_correlation.pdf"
self.my_logger.info(f"Save figure {figname}.")
fig.savefig(figname, dpi=100, bbox_inches='tight')
if parameters.DISPLAY: # pragma: no cover
if self.live_fit:
plt.draw()
plt.pause(1e-8)
else:
plt.show()
def weighted_residuals(self, p): # pragma: nocover
"""Compute the weighted residuals array for a set of model parameters p.
Parameters
----------
p: array_like
The array of model parameters.
Returns
-------
residuals: np.array
The array of weighted residuals.
"""
x, model, model_err = self.simulate(*p)
if self.data_cov is None:
if len(self.outliers) > 0:
model_err = model_err.flatten()
err = self.err.flatten()
res = (model.flatten() - self.data.flatten()) / np.sqrt(model_err * model_err + err * err)
else:
res = ((model - self.data) / np.sqrt(model_err * model_err + self.err * self.err)).flatten()
else:
if self.data_cov.ndim > 2:
K = self.data_cov.shape[0]
if np.any(model_err > 0):
cov = [self.data_cov[k] + np.diag(model_err[k] ** 2) for k in range(K)]
L = [np.linalg.inv(np.linalg.cholesky(cov[k])) for k in range(K)]
else:
L = [np.linalg.cholesky(self.W[k]) for k in range(K)]
res = [L[k] @ (model[k] - self.data[k]) for k in range(K)]
res = np.concatenate(res).ravel()
else:
if np.any(model_err > 0):
cov = self.data_cov + np.diag(model_err * model_err)
L = np.linalg.inv(np.linalg.cholesky(cov))
else:
if self.W.ndim == 1 and self.W.dtype != np.object:
L = np.sqrt(self.W)
elif self.W.ndim == 2 and self.W.dtype != np.object:
L = np.linalg.cholesky(self.W)
else:
raise ValueError(f"Case not implemented with self.W.ndim={self.W.ndim} "
f"and self.W.dtype={self.W.dtype}")
res = L @ (model - self.data)
return res
def chisq(self, p, model_output=False):
"""Compute the chi square for a set of model parameters p.
Four cases are implemented: diagonal W, 2D W, array of diagonal Ws, array of 2D Ws. The two latter cases
are for multiple independent data vectors with W being block diagonal.
Parameters
----------
p: array_like
The array of model parameters.
model_output: bool, optional
If true, the simulated model is output.
Returns
-------
chisq: float
The chi square value.
"""
# check data format
if (self.data.dtype != np.object and self.data.ndim > 1) or (self.err.dtype != np.object and self.err.ndim > 1):
raise ValueError("Fitworkspace.data and Fitworkspace.err must be a flat 1D array,"
" or an array of flat arrays of unequal lengths.")
# prepare weight matrices in case they have not been built before
self.prepare_weight_matrices()
x, model, model_err = self.simulate(*p)
if self.W.ndim == 1 and self.W.dtype != np.object:
if np.any(model_err > 0):
W = 1 / (self.data_cov + model_err * model_err)
else:
W = self.W
res = (model - self.data)
chisq = res @ (W * res)
# fit_workspace.my_logger.warning(f"ll {tmp_params}")
# fit_workspace.my_logger.warning(f"yyyy {np.sum(residuals)} {np.sum(W)} {np.sum(W*residuals)}")
# fit_workspace.my_logger.warning(f"yoooo {np.sum(residuals)} {np.sum(W*residuals)} ")
# fit_workspace.my_logger.warning(f"yiiii {np.sum(tmp_model.flatten()[fit_workspace.not_outliers])} {np.sum(fit_workspace.data.flatten()[fit_workspace.not_outliers])} ")
# self.my_logger.warning(f"ll {p}")
# self.my_logger.warning(f"yyyy {np.sum(res)} {np.sum(W)} {np.sum(W*res)}")
# self.my_logger.warning(f"yoooo {np.sum(res[self.not_outliers])} {np.sum(W[self.not_outliers]*res[self.not_outliers])} ")
# self.my_logger.warning(f"yiiii {np.sum(model[self.not_outliers])} {np.sum(self.data[self.not_outliers])} ")
elif self.W.dtype == np.object:
K = len(self.W)
if self.W[0].ndim == 1:
if np.any(model_err > 0):
W = [1 / (self.data_cov[k] + model_err * model_err) for k in range(K)]
else:
W = self.W
res = [model[k] - self.data[k] for k in range(K)]
chisq = np.sum([res[k] @ (W[k] * res[k]) for k in range(K)])
elif self.W[0].ndim == 2:
K = len(self.W)
if np.any(model_err > 0):
cov = [self.data_cov[k] + np.diag(model_err[k] ** 2) for k in range(K)]
L = [np.linalg.inv(np.linalg.cholesky(cov[k])) for k in range(K)]
W = [L[k].T @ L[k] for k in range(K)]
else:
W = self.W
res = [model[k] - self.data[k] for k in range(K)]
chisq = np.sum([res[k] @ W[k] @ res[k] for k in range(K)])
else:
raise ValueError(f"First element of fitworkspace.W has no ndim attribute or has a dimension above 2. "
f"I get W[0]={self.W[0]}")
elif self.W.ndim == 2 and self.W.dtype != np.object:
if np.any(model_err > 0):
cov = self.data_cov + np.diag(model_err * model_err)
L = np.linalg.inv(np.linalg.cholesky(cov))
W = L.T @ L
else:
W = self.W
res = (model - self.data)
chisq = res @ W @ res
else:
raise ValueError(
f"Data inverse covariance matrix must be a np.ndarray of dimension 1 or 2,"
f"either made of 1D or 2D arrays of equal lengths or not for block diagonal matrices."
f"\nHere W type is {type(self.W)}, shape is {self.W.shape} and W is {self.W}.")
if model_output:
return chisq, x, model, model_err
else:
return chisq
def prepare_weight_matrices(self):
# Prepare covariance matrix for data
if self.data_cov is None:
self.data_cov = np.asarray(self.err.flatten() ** 2)
# Prepare inverse covariance matrix for data
if self.W is None:
if self.data_cov.ndim == 1 and self.data_cov.dtype != np.object:
self.W = 1 / self.data_cov
elif self.data_cov.ndim == 2 and self.data_cov.dtype != np.object:
L = np.linalg.inv(np.linalg.cholesky(self.data_cov))
self.W = L.T @ L
elif self.data_cov.dtype is np.object:
if self.data_cov[0].ndim == 1:
self.W = np.array([1 / self.data_cov[k] for k in range(self.data_cov.shape[0])])
else:
self.W = []
for k in range(len(self.data_cov)):
L = np.linalg.inv(np.linalg.cholesky(self.data_cov[k]))
self.W[k] = L.T @ L
self.W = np.asarray(self.W)
if len(self.outliers) > 0:
bad_indices = self.get_bad_indices()
if self.W.ndim == 1 and self.W.dtype != np.object:
self.W[bad_indices] = 0
elif self.W.ndim == 2 and self.W.dtype != np.object:
self.W[:, bad_indices] = 0
self.W[bad_indices, :] = 0
elif self.W.dtype == np.object:
if self.data_cov[0].ndim == 1:
for k in range(len(self.W)):
self.W[k][bad_indices[k]] = 0
else:
for k in range(len(self.W)):
self.W[k][:, bad_indices[k]] = 0
self.W[k][bad_indices[k], :] = 0
else:
raise ValueError(
f"Data inverse covariance matrix must be a np.ndarray of dimension 1 or 2,"
f"either made of 1D or 2D arrays of equal lengths or not for block diagonal matrices."
f"\nHere W type is {type(self.W)}, shape is {self.W.shape} and W is {self.W}.")
def lnlike(self, p):
"""Compute the logarithmic likelihood for a set of model parameters p as -0.5*chisq.
Parameters
----------
p: array_like
The array of model parameters.
Returns
-------
lnlike: float
The logarithmic likelihood value.
"""
return -0.5 * self.chisq(p)
def lnprior(self, p):
"""Compute the logarithmic prior for a set of model parameters p.
The function returns 0 for good parameters, and -np.inf for parameters out of their boundaries.
Parameters
----------
p: array_like
The array of model parameters.
Returns
-------
lnprior: float
The logarithmic value fo the prior.
"""
in_bounds = True
for npar, par in enumerate(p):
if par < self.bounds[npar][0] or par > self.bounds[npar][1]:
in_bounds = False
break
if in_bounds:
return 0.0
else:
return -np.inf
def jacobian(self, params, epsilon, fixed_params=None, model_input=None):
"""Generic function to compute the Jacobian matrix of a model, with numerical derivatives.
Parameters
----------
params: array_like
The array of model parameters.
epsilon: array_like
The array of small steps to compute the partial derivatives of the model.
fixed_params: array_like
List of boolean values. If True, the parameter is considered fixed and no derivative are computed.
model_input: array_like, optional
A model input as a list with (x, model, model_err) to avoid an additional call to simulate().
Returns
-------
J: np.array
The Jacobian matrix.
"""
if model_input:
x, model, model_err = model_input
else:
x, model, model_err = self.simulate(*params)
if self.W.dtype == np.object and self.W[0].ndim == 2:
J = [[] for _ in range(params.size)]
else:
model = model.flatten()
J = np.zeros((params.size, model.size))
for ip, p in enumerate(params):
if fixed_params[ip]:
continue
tmp_p = np.copy(params)
if tmp_p[ip] + epsilon[ip] < self.bounds[ip][0] or tmp_p[ip] + epsilon[ip] > self.bounds[ip][1]:
epsilon[ip] = - epsilon[ip]
tmp_p[ip] += epsilon[ip]
tmp_x, tmp_model, tmp_model_err = self.simulate(*tmp_p)
if self.W.dtype == np.object and self.W[0].ndim == 2:
for k in range(model.shape[0]):
J[ip].append((tmp_model[k] - model[k]) / epsilon[ip])
else:
J[ip] = (tmp_model.flatten() - model) / epsilon[ip]
return np.asarray(J)
def hessian(self, params, epsilon, fixed_params=None): # pragma: nocover
"""Experimental function to compute the hessian of a model.
Parameters
----------
params: array_like
The array of model parameters.
epsilon: array_like
The array of small steps to compute the partial derivatives of the model.
fixed_params: array_like
List of boolean values. If True, the parameter is considered fixed and no derivative are computed.
Returns
-------
"""
x, model, model_err = self.simulate(*params)
model = model.flatten()
J = self.jacobian(params, epsilon, fixed_params=fixed_params)
H = np.zeros((params.size, params.size, model.size))
tmp_p = np.copy(params)
for ip, p1 in enumerate(params):
print(ip, p1, params[ip], tmp_p[ip], self.bounds[ip], epsilon[ip], tmp_p[ip] + epsilon[ip])
if fixed_params[ip]:
continue
if tmp_p[ip] + epsilon[ip] < self.bounds[ip][0] or tmp_p[ip] + epsilon[ip] > self.bounds[ip][1]:
epsilon[ip] = - epsilon[ip]
tmp_p[ip] += epsilon[ip]
print(tmp_p)
# tmp_x, tmp_model, tmp_model_err = self.simulate(*tmp_p)
# J[ip] = (tmp_model.flatten() - model) / epsilon[ip]
tmp_J = self.jacobian(tmp_p, epsilon, fixed_params=fixed_params)
for ip, p1 in enumerate(params):
if fixed_params[ip]:
continue
for jp, p2 in enumerate(params):
if fixed_params[jp]:
continue
x, modelplus, model_err = self.simulate(params + epsilon)
x, modelmoins, model_err = self.simulate(params - epsilon)
model = model.flatten()
print("hh", ip, jp, tmp_J[ip], J[jp], tmp_p[ip], params, (tmp_J[ip] - J[jp]) / epsilon)
print((modelplus + modelmoins - 2 * model) / (np.asarray(epsilon) ** 2))
H[ip, jp] = (tmp_J[ip] - J[jp]) / epsilon
H[ip, jp] = (modelplus + modelmoins - 2 * model) / (np.asarray(epsilon) ** 2)
return H
def lnprob(p): # pragma: no cover
global fit_workspace
lp = fit_workspace.lnprior(p)
if not np.isfinite(lp):
return -1e20
return lp + fit_workspace.lnlike(p)
def gradient_descent(fit_workspace, params, epsilon, niter=10, fixed_params=None, xtol=1e-3, ftol=1e-3,
with_line_search=True):
"""
Four cases are implemented: diagonal W, 2D W, array of diagonal Ws, array of 2D Ws. The two latter cases
are for multiple independent data vectors with W being block diagonal.
Parameters
----------
fit_workspace: FitWorkspace
params
epsilon
niter
fixed_params
xtol
ftol
with_line_search
Returns
-------
"""
my_logger = set_logger(__name__)
tmp_params = np.copy(params)
fit_workspace.prepare_weight_matrices()
W = fit_workspace.W
ipar = np.arange(params.size)
if fixed_params is not None:
ipar = np.array(np.where(np.array(fixed_params).astype(int) == 0)[0])
costs = []
params_table = []
inv_JT_W_J = np.zeros((len(ipar), len(ipar)))
for i in range(niter):
start = time.time()
cost, tmp_lambdas, tmp_model, tmp_model_err = fit_workspace.chisq(tmp_params, model_output=True)
if isinstance(fit_workspace.W, np.ndarray) and fit_workspace.W.dtype != np.object:
residuals = (tmp_model - fit_workspace.data).flatten()
elif isinstance(fit_workspace.W, np.ndarray) and fit_workspace.W.dtype == np.object:
residuals = [(tmp_model[k] - fit_workspace.data[k]) for k in range(len(fit_workspace.W))]
else:
raise TypeError(f"Type of fit_workspace.W is {type(fit_workspace.W)}. It must be a np.ndarray.")
# Jacobian
J = fit_workspace.jacobian(tmp_params, epsilon, fixed_params=fixed_params,
model_input=[tmp_lambdas, tmp_model, tmp_model_err])
# remove parameters with unexpected null Jacobian vectors
for ip in range(J.shape[0]):
if ip not in ipar:
continue
if np.all(np.array(J[ip]).flatten() == np.zeros(np.array(J[ip]).size)):
ipar = np.delete(ipar, list(ipar).index(ip))
fixed_params[ip] = True
# tmp_params[ip] = 0
my_logger.warning(
f"\n\tStep {i}: {fit_workspace.input_labels[ip]} has a null Jacobian; parameter is fixed "
f"at its last known current value ({tmp_params[ip]}).")
# remove fixed parameters
J = J[ipar].T
# algebra
if fit_workspace.W.ndim == 1 and fit_workspace.W.dtype != np.object:
JT_W = J.T * W
JT_W_J = JT_W @ J
elif fit_workspace.W.ndim == 2 and fit_workspace.W.dtype != np.object:
JT_W = J.T @ W
JT_W_J = JT_W @ J
else:
if fit_workspace.W[0].ndim == 1:
JT_W = J.T * np.concatenate(W).ravel()
JT_W_J = JT_W @ J
else:
# warning ! here the data arrays indexed by k can have different lengths because outliers
# because W inverse covariance is block diagonal and blocks can have different sizes
# the philosophy is to temporarily flatten the data arrays
JT_W = [np.concatenate([J[ip][k].T @ W[k]
for k in range(fit_workspace.W.shape[0])]).ravel()
for ip in range(len(J))]
JT_W_J = np.array([[JT_W[ip2] @ np.concatenate(J[ip1][:]).ravel() for ip1 in range(len(J))]
for ip2 in range(len(J))])
try:
L = np.linalg.inv(np.linalg.cholesky(JT_W_J)) # cholesky is too sensible to the numerical precision
inv_JT_W_J = L.T @ L
except np.linalg.LinAlgError:
inv_JT_W_J = np.linalg.inv(JT_W_J)
if fit_workspace.W.dtype != np.object:
JT_W_R0 = JT_W @ residuals
else:
JT_W_R0 = JT_W @ np.concatenate(residuals).ravel()
dparams = - inv_JT_W_J @ JT_W_R0
if with_line_search:
def line_search(alpha):
tmp_params_2 = np.copy(tmp_params)
tmp_params_2[ipar] = tmp_params[ipar] + alpha * dparams
for ipp, pp in enumerate(tmp_params_2):
if pp < fit_workspace.bounds[ipp][0]:
tmp_params_2[ipp] = fit_workspace.bounds[ipp][0]
if pp > fit_workspace.bounds[ipp][1]:
tmp_params_2[ipp] = fit_workspace.bounds[ipp][1]
return fit_workspace.chisq(tmp_params_2)
# tol parameter acts on alpha (not func)
alpha_min, fval, iter, funcalls = optimize.brent(line_search, full_output=True, tol=5e-1, brack=(0, 1))
else:
alpha_min = 1
fval = np.copy(cost)
funcalls = 0
iter = 0
tmp_params[ipar] += alpha_min * dparams
# check bounds
for ip, p in enumerate(tmp_params):
if p < fit_workspace.bounds[ip][0]:
tmp_params[ip] = fit_workspace.bounds[ip][0]
if p > fit_workspace.bounds[ip][1]:
tmp_params[ip] = fit_workspace.bounds[ip][1]
# prepare outputs
costs.append(fval)
params_table.append(np.copy(tmp_params))
fit_workspace.p = tmp_params
if fit_workspace.verbose:
my_logger.info(f"\n\tIteration={i}: initial cost={cost:.5g} initial chisq_red={cost / (tmp_model.size - len(fit_workspace.mask)):.5g}"
f"\n\t\t Line search: alpha_min={alpha_min:.3g} iter={iter} funcalls={funcalls}"
f"\n\tParameter shifts: {alpha_min * dparams}"
f"\n\tNew parameters: {tmp_params[ipar]}"
f"\n\tFinal cost={fval:.5g} final chisq_red={fval / (tmp_model.size - len(fit_workspace.mask)):.5g} "
f"computed in {time.time() - start:.2f}s")
if fit_workspace.live_fit: # pragma: no cover
fit_workspace.simulate(*tmp_params)
fit_workspace.plot_fit()
fit_workspace.cov = inv_JT_W_J
# fit_workspace.plot_correlation_matrix(ipar)
if len(ipar) == 0:
my_logger.warning(f"\n\tGradient descent terminated in {i} iterations because all parameters "
f"have null Jacobian.")
break
if np.sum(np.abs(alpha_min * dparams)) / np.sum(np.abs(tmp_params[ipar])) < xtol:
my_logger.info(f"\n\tGradient descent terminated in {i} iterations because the sum of parameter shift "
f"relative to the sum of the parameters is below xtol={xtol}.")
break
if len(costs) > 1 and np.abs(costs[-2] - fval) / np.max([np.abs(fval), np.abs(costs[-2])]) < ftol:
my_logger.info(f"\n\tGradient descent terminated in {i} iterations because the "
f"relative change of cost is below ftol={ftol}.")
break
plt.close()
return tmp_params, inv_JT_W_J, np.array(costs), np.array(params_table)
def simple_newton_minimisation(fit_workspace, params, epsilon, niter=10, fixed_params=None,
xtol=1e-3, ftol=1e-3): # pragma: no cover
"""Experimental function to minimize a function.
Parameters
----------
fit_workspace: FitWorkspace
params
epsilon
niter
fixed_params
xtol
ftol
Returns
-------
"""
my_logger = set_logger(__name__)
tmp_params = np.copy(params)
ipar = np.arange(params.size)
if fixed_params is not None:
ipar = np.array(np.where(np.array(fixed_params).astype(int) == 0)[0])
funcs = []
params_table = []
inv_H = np.zeros((len(ipar), len(ipar)))
for i in range(niter):
start = time.time()
tmp_lambdas, tmp_model, tmp_model_err = fit_workspace.simulate(*tmp_params)
# if fit_workspace.live_fit:
# fit_workspace.plot_fit()
J = fit_workspace.jacobian(tmp_params, epsilon, fixed_params=fixed_params)
# remove parameters with unexpected null Jacobian vectors
for ip in range(J.shape[0]):
if ip not in ipar:
continue
if np.all(J[ip] == np.zeros(J.shape[1])):
ipar = np.delete(ipar, list(ipar).index(ip))
# tmp_params[ip] = 0
my_logger.warning(
f"\n\tStep {i}: {fit_workspace.input_labels[ip]} has a null Jacobian; parameter is fixed "
f"at its last known current value ({tmp_params[ip]}).")
# remove fixed parameters
J = J[ipar].T
# hessian
H = fit_workspace.hessian(tmp_params, epsilon, fixed_params=fixed_params)
try:
L = np.linalg.inv(np.linalg.cholesky(H)) # cholesky is too sensible to the numerical precision
inv_H = L.T @ L
except np.linalg.LinAlgError:
inv_H = np.linalg.inv(H)
dparams = - inv_H[:, :, 0] @ J[:, 0]
print("dparams", dparams, inv_H, J, H)
tmp_params[ipar] += dparams
# check bounds
print("tmp_params", tmp_params, dparams, inv_H, J)
for ip, p in enumerate(tmp_params):
if p < fit_workspace.bounds[ip][0]:
tmp_params[ip] = fit_workspace.bounds[ip][0]
if p > fit_workspace.bounds[ip][1]:
tmp_params[ip] = fit_workspace.bounds[ip][1]
tmp_lambdas, new_model, tmp_model_err = fit_workspace.simulate(*tmp_params)
new_func = new_model[0]
funcs.append(new_func)
r = np.log10(fit_workspace.regs)
js = [fit_workspace.jacobian(np.asarray([rr]), epsilon, fixed_params=fixed_params)[0] for rr in np.array(r)]
plt.plot(r, js, label="J")
plt.grid()
plt.legend()
plt.show()
if parameters.DISPLAY:
fig = plt.figure()
plt.plot(r, js, label="prior")
mod = tmp_model + J[0] * (r - (tmp_params - dparams)[0])
plt.plot(r, mod)
plt.axvline(tmp_params)
plt.axhline(tmp_model)
plt.grid()
plt.legend()
plt.draw()
plt.pause(1e-8)
plt.close(fig)
# prepare outputs
params_table.append(np.copy(tmp_params))
if fit_workspace.verbose:
my_logger.info(f"\n\tIteration={i}: initial func={tmp_model[0]:.5g}"
f"\n\tParameter shifts: {dparams}"
f"\n\tNew parameters: {tmp_params[ipar]}"
f"\n\tFinal func={new_func:.5g}"
f" computed in {time.time() - start:.2f}s")
if fit_workspace.live_fit:
fit_workspace.simulate(*tmp_params)
fit_workspace.plot_fit()
fit_workspace.cov = inv_H[:, :, 0]
print("shape", fit_workspace.cov.shape)
# fit_workspace.plot_correlation_matrix(ipar)
if len(ipar) == 0:
my_logger.warning(f"\n\tGradient descent terminated in {i} iterations because all parameters "
f"have null Jacobian.")
break
if np.sum(np.abs(dparams)) / np.sum(np.abs(tmp_params[ipar])) < xtol:
my_logger.info(f"\n\tGradient descent terminated in {i} iterations because the sum of parameter shift "
f"relative to the sum of the parameters is below xtol={xtol}.")
break
if len(funcs) > 1 and np.abs(funcs[-2] - new_func) / np.max([np.abs(new_func), np.abs(funcs[-2])]) < ftol:
my_logger.info(f"\n\tGradient descent terminated in {i} iterations because the "
f"relative change of cost is below ftol={ftol}.")
break
plt.close()
return tmp_params, inv_H[:, :, 0], np.array(funcs), np.array(params_table)
def print_parameter_summary(params, cov, labels):
"""Print the best fitting parameters on screen.
Parameters
----------
params: array_like
The best fitting parameter values.
cov: array_like
The associated covariance matrix.
labels: array_like
The list of associated parameter labels.
"""
my_logger = set_logger(__name__)
txt = ""
for ip in np.arange(0, cov.shape[0]).astype(int):
txt += "%s: %s +%s -%s\n\t" % formatting_numbers(params[ip], np.sqrt(cov[ip, ip]), np.sqrt(cov[ip, ip]),
label=labels[ip])
my_logger.info(f"\n\t{txt}")
def plot_gradient_descent(fit_workspace, costs, params_table):
fig, ax = plt.subplots(2, 1, figsize=(10, 6), sharex="all")
iterations = np.arange(params_table.shape[0])
ax[0].plot(iterations, costs, lw=2)
for ip in range(params_table.shape[1]):
ax[1].plot(iterations, params_table[:, ip], label=f"{fit_workspace.axis_names[ip]}")
ax[1].set_yscale("symlog")
ax[1].legend(ncol=6, loc=9)
ax[1].grid()
ax[0].set_yscale("log")
ax[0].set_ylabel(r"$\chi^2$")
ax[1].set_ylabel("Parameters")
ax[0].grid()
ax[1].set_xlabel("Iterations")
ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))
fig.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
if parameters.SAVE and fit_workspace.filename != "": # pragma: no cover
figname = os.path.splitext(fit_workspace.filename)[0] + "_fitting.pdf"
fit_workspace.my_logger.info(f"\n\tSave figure {figname}.")
fig.savefig(figname, dpi=100, bbox_inches='tight')
if parameters.DISPLAY: # pragma: no cover
plt.show()
fit_workspace.simulate(*fit_workspace.p)
fit_workspace.live_fit = False
fit_workspace.plot_fit()
def save_gradient_descent(fit_workspace, costs, params_table):
iterations = np.arange(params_table.shape[0]).astype(int)
t = np.zeros((params_table.shape[1] + 2, params_table.shape[0]))
t[0] = iterations
t[1] = costs
t[2:] = params_table.T
h = 'iter,costs,' + ','.join(fit_workspace.input_labels)
output_filename = os.path.splitext(fit_workspace.filename)[0] + "_fitting.txt"
np.savetxt(output_filename, t.T, header=h, delimiter=",")
fit_workspace.my_logger.info(f"\n\tSave gradient descent log {output_filename}.")
def run_gradient_descent(fit_workspace, guess, epsilon, params_table, costs, fix, xtol, ftol, niter, verbose=False,
with_line_search=True):
fit_workspace.p, fit_workspace.cov, tmp_costs, tmp_params_table = gradient_descent(fit_workspace, guess,
epsilon, niter=niter,
fixed_params=fix,
xtol=xtol, ftol=ftol,
with_line_search=with_line_search)
params_table = np.concatenate([params_table, tmp_params_table])
costs = np.concatenate([costs, tmp_costs])
ipar = np.array(np.where(np.array(fix).astype(int) == 0)[0])
if verbose or fit_workspace.verbose:
print_parameter_summary(fit_workspace.p[ipar], fit_workspace.cov,
[fit_workspace.input_labels[ip] for ip in ipar])
if parameters.DEBUG and (verbose or fit_workspace.verbose):
# plot_psf_poly_params(fit_workspace.p[fit_workspace.psf_params_start_index:])
# fit_workspace.plot_fit()
plot_gradient_descent(fit_workspace, costs, params_table)
if len(ipar) > 1:
fit_workspace.plot_correlation_matrix(ipar=ipar)
return params_table, costs
def run_simple_newton_minimisation(fit_workspace, guess, epsilon, fix=None, xtol=1e-8, ftol=1e-8,
niter=50, verbose=False): # pragma: no cover
if fix is None:
fix = [False] * guess.size
fit_workspace.p, fit_workspace.cov, funcs, params_table = simple_newton_minimisation(fit_workspace, guess,
epsilon, niter=niter,
fixed_params=fix,
xtol=xtol, ftol=ftol)
ipar = np.array(np.where(np.array(fix).astype(int) == 0)[0])
if verbose or fit_workspace.verbose:
print_parameter_summary(fit_workspace.p[ipar], fit_workspace.cov,
[fit_workspace.input_labels[ip] for ip in ipar])
if parameters.DEBUG and (verbose or fit_workspace.verbose):
# plot_psf_poly_params(fit_workspace.p[fit_workspace.psf_params_start_index:])
# fit_workspace.plot_fit()
plot_gradient_descent(fit_workspace, funcs, params_table)
if len(ipar) > 1:
fit_workspace.plot_correlation_matrix(ipar=ipar)
return params_table, funcs
def run_minimisation(fit_workspace, method="newton", epsilon=None, fix=None, xtol=1e-4, ftol=1e-4, niter=50,
verbose=False, with_line_search=True, minimizer_method="L-BFGS-B"):
my_logger = set_logger(__name__)
bounds = fit_workspace.bounds
nll = lambda params: -fit_workspace.lnlike(params)
guess = fit_workspace.p.astype('float64')
if verbose:
my_logger.debug(f"\n\tStart guess: {guess}")
if method == "minimize":
start = time.time()
result = optimize.minimize(nll, fit_workspace.p, method=minimizer_method,
options={'ftol': ftol, 'maxiter': 100000}, bounds=bounds)
fit_workspace.p = result['x']
if verbose:
my_logger.debug(f"\n\t{result}")
my_logger.debug(f"\n\tMinimize: total computation time: {time.time() - start}s")
fit_workspace.plot_fit()
elif method == 'basinhopping':
start = time.time()
minimizer_kwargs = dict(method=minimizer_method, bounds=bounds)
result = optimize.basinhopping(nll, guess, minimizer_kwargs=minimizer_kwargs)
fit_workspace.p = result['x']
if verbose:
my_logger.debug(f"\n\t{result}")
my_logger.debug(f"\n\tBasin-hopping: total computation time: {time.time() - start}s")
fit_workspace.plot_fit()
elif method == "least_squares": # pragma: no cover
fit_workspace.my_logger.warning("least_squares might not work, use with caution... "
"or repair carefully the function weighted_residuals()")
start = time.time()
x_scale = np.abs(guess)
x_scale[x_scale == 0] = 0.1
p = optimize.least_squares(fit_workspace.weighted_residuals, guess, verbose=2, ftol=1e-6, x_scale=x_scale,
diff_step=0.001, bounds=bounds.T)
fit_workspace.p = p.x # m.np_values()
if verbose:
my_logger.debug(f"\n\t{p}")
my_logger.debug(f"\n\tLeast_squares: total computation time: {time.time() - start}s")
fit_workspace.plot_fit()
elif method == "minuit":
start = time.time()
# fit_workspace.simulation.fix_psf_cube = False
error = 0.1 * np.abs(guess) * np.ones_like(guess)
error[2:5] = 0.3 * np.abs(guess[2:5]) * np.ones_like(guess[2:5])
z = np.where(np.isclose(error, 0.0, 1e-6))
error[z] = 1.
if fix is None:
fix = [False] * guess.size
# noinspection PyArgumentList
# m = Minuit(fcn=nll, values=guess, error=error, errordef=1, fix=fix, print_level=verbose, limit=bounds)
m = Minuit(nll, np.copy(guess))
m.errors = error
m.errordef = 1
m.fixed = fix
m.print_level = verbose
m.limits = bounds
m.tol = 10
m.migrad()
fit_workspace.p = np.array(m.values[:])
if verbose:
my_logger.debug(f"\n\t{m}")
my_logger.debug(f"\n\tMinuit: total computation time: {time.time() - start}s")
fit_workspace.plot_fit()
elif method == "newton":
if fit_workspace.costs.size == 0:
costs = np.array([fit_workspace.chisq(guess)])
params_table = np.array([guess])
else:
costs = np.concatenate([fit_workspace.costs, np.array([fit_workspace.chisq(guess)])])
params_table = np.concatenate([fit_workspace.params_table, np.array([guess])])
if epsilon is None:
epsilon = 1e-4 * guess
epsilon[epsilon == 0] = 1e-4
if fix is None:
fix = [False] * guess.size
start = time.time()
params_table, costs = run_gradient_descent(fit_workspace, guess, epsilon, params_table, costs,
fix=fix, xtol=xtol, ftol=ftol, niter=niter, verbose=verbose,
with_line_search=with_line_search)
fit_workspace.costs = costs
fit_workspace.params_table = params_table
if verbose:
my_logger.debug(f"\n\tNewton: total computation time: {time.time() - start}s")
if fit_workspace.filename != "":
ipar = np.array(np.where(np.array(fit_workspace.fixed).astype(int) == 0)[0])
fit_workspace.save_parameters_summary(ipar)
save_gradient_descent(fit_workspace, costs, params_table)
def run_minimisation_sigma_clipping(fit_workspace, method="newton", epsilon=None, fix=None, xtol=1e-4, ftol=1e-4,
niter=50, sigma_clip=5.0, niter_clip=3, verbose=False):
my_logger = set_logger(__name__)
fit_workspace.sigma_clip = sigma_clip
for step in range(niter_clip):
if verbose:
my_logger.info(f"\n\tSigma-clipping step {step}/{niter_clip} (sigma={sigma_clip})")
run_minimisation(fit_workspace, method=method, epsilon=epsilon, fix=fix, xtol=xtol, ftol=ftol, niter=niter)
# remove outliers
if fit_workspace.data.dtype == np.object:
# indices_no_nan = ~np.isnan(np.concatenate(fit_workspace.data).ravel())
data = np.concatenate(fit_workspace.data).ravel() # [indices_no_nan]
model = np.concatenate(fit_workspace.model).ravel() # [indices_no_nan]
err = np.concatenate(fit_workspace.err).ravel() # [indices_no_nan]
else:
# indices_no_nan = ~np.isnan(fit_workspace.data.flatten())
data = fit_workspace.data.flatten() # [indices_no_nan]
model = fit_workspace.model.flatten() # [indices_no_nan]
err = fit_workspace.err.flatten() # [indices_no_nan]
residuals = np.abs(data - model) / err
outliers = residuals > sigma_clip
outliers = [i for i in range(data.size) if outliers[i]]
outliers.sort()
if len(outliers) > 0:
my_logger.debug(f'\n\tOutliers flat index list: {outliers}')
my_logger.info(f'\n\tOutliers: {len(outliers)} / {data.size} data points '
f'({100 * len(outliers) / data.size:.2f}%) '
f'at more than {sigma_clip}-sigma from best-fit model.')
if np.all(fit_workspace.outliers == outliers):
my_logger.info(f'\n\tOutliers flat index list unchanged since last iteration: '
f'break the sigma clipping iterations.')
break
else:
fit_workspace.outliers = outliers
else:
my_logger.info(f'\n\tNo outliers detected at first iteration: break the sigma clipping iterations.')
break
def run_emcee(fit_workspace, ln=lnprob):
my_logger = set_logger(__name__)
fit_workspace.print_settings()
nsamples = fit_workspace.nsteps
p0 = fit_workspace.set_start()
filename = fit_workspace.emcee_filename
backend = emcee.backends.HDFBackend(filename)
try: # pragma: no cover
pool = MPIPool()
if not pool.is_master():
pool.wait()
sys.exit(0)
sampler = emcee.EnsembleSampler(fit_workspace.nwalkers, fit_workspace.ndim, ln, args=(),
pool=pool, backend=backend)
my_logger.info(f"\n\tInitial size: {backend.iteration}")
if backend.iteration > 0:
p0 = backend.get_last_sample()
if nsamples - backend.iteration > 0:
sampler.run_mcmc(p0, nsteps=max(0, nsamples - backend.iteration), progress=True)
pool.close()
except ValueError:
sampler = emcee.EnsembleSampler(fit_workspace.nwalkers, fit_workspace.ndim, ln, args=(),
threads=multiprocessing.cpu_count(), backend=backend)
my_logger.info(f"\n\tInitial size: {backend.iteration}")
if backend.iteration > 0:
p0 = sampler.get_last_sample()
for _ in sampler.sample(p0, iterations=max(0, nsamples - backend.iteration), progress=True, store=True):
continue
fit_workspace.chains = sampler.chain
fit_workspace.lnprobs = sampler.lnprobability
class RegFitWorkspace(FitWorkspace):
def __init__(self, w, opt_reg=parameters.PSF_FIT_REG_PARAM, verbose=0, live_fit=False):
"""
Parameters
----------
w: ChromaticPSFFitWorkspace
"""
FitWorkspace.__init__(self, verbose=verbose, live_fit=live_fit)
self.x = np.array([0])
self.data = np.array([0])
self.err = np.array([1])
self.w = w
self.p = np.asarray([np.log10(opt_reg)])
self.bounds = [(-20, np.log10(self.w.amplitude_priors.size) + 2)]
self.input_labels = ["log10_reg"]
self.axis_names = [r"$\log_{10} r$"]
self.fixed = [False] * self.p.size
self.opt_reg = opt_reg
self.resolution = np.zeros_like((self.w.amplitude_params.size, self.w.amplitude_params.size))
self.G = 0
self.chisquare = -1
def print_regularisation_summary(self):
self.my_logger.info(f"\n\tOptimal regularisation parameter: {self.opt_reg}"
f"\n\tTr(R) = {np.trace(self.resolution)}"
f"\n\tN_params = {len(self.w.amplitude_params)}"
f"\n\tN_data = {self.w.data.size - len(self.w.mask) - len(self.w.outliers)}"
f" (without mask and outliers)")
def simulate(self, log10_r):
reg = 10 ** log10_r
M_dot_W_dot_M_plus_Q = self.w.M_dot_W_dot_M + reg * self.w.Q
try:
L = np.linalg.inv(np.linalg.cholesky(M_dot_W_dot_M_plus_Q))
cov = L.T @ L
except np.linalg.LinAlgError:
cov = np.linalg.inv(M_dot_W_dot_M_plus_Q)
if self.w.W.ndim == 1:
A = cov @ (self.w.M.T @ (self.w.W * self.w.data) + reg * self.w.Q_dot_A0)
else:
A = cov @ (self.w.M.T @ (self.w.W @ self.w.data) + reg * self.w.Q_dot_A0)
if A.ndim == 2: # ndim == 2 when A comes from a sparse matrix computation
A = np.asarray(A).reshape(-1)
self.resolution = np.eye(A.size) - reg * cov @ self.w.Q
diff = self.w.data - self.w.M @ A
if self.w.W.ndim == 1:
self.chisquare = diff @ (self.w.W * diff)
else:
self.chisquare = diff @ self.w.W @ diff
self.w.amplitude_params = A
self.w.amplitude_cov_matrix = cov
self.w.amplitude_params_err = np.array([np.sqrt(cov[x, x]) for x in range(cov.shape[0])])
self.G = self.chisquare / ((self.w.data.size - len(self.w.mask) - len(self.w.outliers)) - np.trace(self.resolution)) ** 2
return np.asarray([log10_r]), np.asarray([self.G]), np.zeros_like(self.data)
def plot_fit(self):
log10_opt_reg = self.p[0]
opt_reg = 10 ** log10_opt_reg
regs = 10 ** np.linspace(min(-10, 0.9 * log10_opt_reg), max(3, 1.2 * log10_opt_reg), 50)
Gs = []
chisqs = []
resolutions = []
x = np.arange(len(self.w.amplitude_priors))
for r in regs:
self.simulate(np.log10(r))
if parameters.DISPLAY and False: # pragma: no cover
fig = plt.figure()
plt.errorbar(x, self.w.amplitude_params, yerr=[np.sqrt(self.w.amplitude_cov_matrix[i, i]) for i in x],
label=f"fit r={r:.2g}")
plt.plot(x, self.w.amplitude_priors, label="prior")
plt.grid()
plt.legend()
plt.draw()
plt.pause(1e-8)
plt.close(fig)
Gs.append(self.G)
chisqs.append(self.chisquare)
resolutions.append(np.trace(self.resolution))
fig, ax = plt.subplots(3, 1, figsize=(7, 5), sharex="all")
ax[0].plot(regs, Gs)
ax[0].axvline(opt_reg, color="k")
ax[1].axvline(opt_reg, color="k")
ax[2].axvline(opt_reg, color="k")
ax[0].set_ylabel(r"$G(r)$")
ax[0].set_xlabel("Regularisation hyper-parameter $r$")
ax[0].grid()
ax[0].set_title(f"Optimal regularisation parameter: {opt_reg:.3g}")
ax[1].plot(regs, chisqs)
ax[1].set_ylabel(r"$\chi^2(\mathbf{A}(r) \vert \mathbf{\theta})$")
ax[1].set_xlabel("Regularisation hyper-parameter $r$")
ax[1].grid()
ax[1].set_xscale("log")
ax[2].set_xscale("log")
ax[2].plot(regs, resolutions)
ax[2].set_ylabel(r"$\mathrm{Tr}\,\mathbf{R}$")
ax[2].set_xlabel("Regularisation hyper-parameter $r$")
ax[2].grid()
fig.tight_layout()
plt.subplots_adjust(hspace=0)
if parameters.DISPLAY:
plt.show()
if parameters.LSST_SAVEFIGPATH:
fig.savefig(os.path.join(parameters.LSST_SAVEFIGPATH, 'regularisation.pdf'))
fig = plt.figure(figsize=(7, 5))
rho = compute_correlation_matrix(self.w.amplitude_cov_matrix)
plot_correlation_matrix_simple(plt.gca(), rho, axis_names=[''] * len(self.w.amplitude_params))
# ipar=np.arange(10, 20))
plt.gca().set_title(r"Correlation matrix $\mathbf{\rho}$")
if parameters.LSST_SAVEFIGPATH:
fig.savefig(os.path.join(parameters.LSST_SAVEFIGPATH, 'amplitude_correlation_matrix.pdf'))
if parameters.DISPLAY:
plt.show()
def run_regularisation(self):
run_minimisation(self, method="minimize", ftol=1e-4, xtol=1e-2, verbose=self.verbose, epsilon=[1e-1],
minimizer_method="Nelder-Mead")
self.opt_reg = 10 ** self.p[0]
self.simulate(np.log10(self.opt_reg))
self.print_regularisation_summary()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43.14751
| 181
| 0.556187
|
2dd52a741d4c910b5b7360bc99589f35d51f8da3
| 298
|
py
|
Python
|
tests/kivy_garden/qrcode/test_qrcode.py
|
AndiEcker/qrcode
|
79673e9cb57cbc44d9c6a9e7970cb51b80f6df6b
|
[
"MIT"
] | 7
|
2019-10-25T00:46:16.000Z
|
2022-03-19T20:47:14.000Z
|
tests/kivy_garden/qrcode/test_qrcode.py
|
AndiEcker/qrcode
|
79673e9cb57cbc44d9c6a9e7970cb51b80f6df6b
|
[
"MIT"
] | 16
|
2019-10-26T13:30:40.000Z
|
2021-03-14T11:32:57.000Z
|
tests/kivy_garden/qrcode/test_qrcode.py
|
AndiEcker/qrcode
|
79673e9cb57cbc44d9c6a9e7970cb51b80f6df6b
|
[
"MIT"
] | 4
|
2019-10-25T00:48:09.000Z
|
2022-01-27T16:49:24.000Z
|
from kivy_garden.qrcode import QRCodeWidget
class TestQRCodeWidget:
def test_init(self):
"""
Simply initialises the widget and checks a property.
"""
qrcode_widget = QRCodeWidget()
assert qrcode_widget.loading_image == 'data/images/image-loading.gif'
| 24.833333
| 77
| 0.677852
|
fb1ef459ec8a83cdd8057f7844344e12a67e47b3
| 6,441
|
py
|
Python
|
tests/alerters/telegram_test.py
|
konstantin-kornienko/elastalert2
|
3e34751bf53b49cc12b923e5e6d3438fe5753611
|
[
"Apache-2.0"
] | null | null | null |
tests/alerters/telegram_test.py
|
konstantin-kornienko/elastalert2
|
3e34751bf53b49cc12b923e5e6d3438fe5753611
|
[
"Apache-2.0"
] | null | null | null |
tests/alerters/telegram_test.py
|
konstantin-kornienko/elastalert2
|
3e34751bf53b49cc12b923e5e6d3438fe5753611
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import pytest
from unittest import mock
from requests import RequestException
from requests.auth import HTTPProxyAuth
from elastalert.alerters.telegram import TelegramAlerter
from elastalert.loaders import FileRulesLoader
from elastalert.util import EAException
def test_telegram(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test Telegram Rule',
'type': 'any',
'telegram_bot_token': 'xxxxx1',
'telegram_room_id': 'xxxxx2',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = TelegramAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'chat_id': rule['telegram_room_id'],
'text': '⚠ *Test Telegram Rule* ⚠ ```\nTest Telegram Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n ```',
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
mock_post_request.assert_called_once_with(
'https://api.telegram.org/botxxxxx1/sendMessage',
data=mock.ANY,
headers={'content-type': 'application/json'},
proxies=None,
auth=None
)
actual_data = json.loads(mock_post_request.call_args_list[0][1]['data'])
assert expected_data == actual_data
assert ('elastalert', logging.INFO, 'Alert sent to Telegram room xxxxx2') == caplog.record_tuples[0]
def test_telegram_proxy():
rule = {
'name': 'Test Telegram Rule',
'type': 'any',
'telegram_bot_token': 'xxxxx1',
'telegram_room_id': 'xxxxx2',
'telegram_proxy': 'http://proxy.url',
'telegram_proxy_login': 'admin',
'telegram_proxy_pass': 'password',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = TelegramAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'chat_id': rule['telegram_room_id'],
'text': '⚠ *Test Telegram Rule* ⚠ ```\nTest Telegram Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n ```',
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
mock_post_request.assert_called_once_with(
'https://api.telegram.org/botxxxxx1/sendMessage',
data=mock.ANY,
headers={'content-type': 'application/json'},
proxies={'https': 'http://proxy.url'},
auth=HTTPProxyAuth('admin', 'password')
)
actual_data = json.loads(mock_post_request.call_args_list[0][1]['data'])
assert expected_data == actual_data
def test_telegram_text_maxlength():
rule = {
'name': 'Test Telegram Rule' + ('a' * 3985),
'type': 'any',
'telegram_bot_token': 'xxxxx1',
'telegram_room_id': 'xxxxx2',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = TelegramAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'chat_id': rule['telegram_room_id'],
'text': '⚠ *Test Telegram Rule' + ('a' * 3979) +
'\n⚠ *message was cropped according to telegram limits!* ⚠ ```',
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
mock_post_request.assert_called_once_with(
'https://api.telegram.org/botxxxxx1/sendMessage',
data=mock.ANY,
headers={'content-type': 'application/json'},
proxies=None,
auth=None
)
actual_data = json.loads(mock_post_request.call_args_list[0][1]['data'])
assert expected_data == actual_data
def test_telegram_ea_exception():
with pytest.raises(EAException) as ea:
rule = {
'name': 'Test Telegram Rule',
'type': 'any',
'telegram_bot_token': 'xxxxx1',
'telegram_room_id': 'xxxxx2',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = TelegramAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
mock_run = mock.MagicMock(side_effect=RequestException)
with mock.patch('requests.post', mock_run), pytest.raises(RequestException):
alert.alert([match])
assert 'Error posting to Telegram: . Details: ' in str(ea)
def test_telegram_getinfo():
rule = {
'name': 'Test Telegram Rule',
'type': 'any',
'telegram_bot_token': 'xxxxx1',
'telegram_room_id': 'xxxxx2',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = TelegramAlerter(rule)
expected_data = {
'type': 'telegram',
'telegram_room_id': 'xxxxx2'
}
actual_data = alert.get_info()
assert expected_data == actual_data
@pytest.mark.parametrize('telegram_bot_token, telegram_room_id, expected_data', [
('', '', 'Missing required option(s): telegram_bot_token, telegram_room_id'),
('xxxxx1', '', 'Missing required option(s): telegram_bot_token, telegram_room_id'),
('', 'xxxxx2', 'Missing required option(s): telegram_bot_token, telegram_room_id'),
('xxxxx1', 'xxxxx2',
{
'type': 'telegram',
'telegram_room_id': 'xxxxx2'
}),
])
def test_telegram_required_error(telegram_bot_token, telegram_room_id, expected_data):
try:
rule = {
'name': 'Test Telegram Rule',
'type': 'any',
'alert': []
}
if telegram_bot_token:
rule['telegram_bot_token'] = telegram_bot_token
if telegram_room_id:
rule['telegram_room_id'] = telegram_room_id
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = TelegramAlerter(rule)
actual_data = alert.get_info()
assert expected_data == actual_data
except Exception as ea:
assert expected_data in str(ea)
| 31.729064
| 130
| 0.613569
|
10ff1512834ddaca1df0d6277b249f2a8a0d5a76
| 559
|
py
|
Python
|
randomized_uncertain_social_preferences/rusp/test_env_oasis.py
|
bglick13/multi-agent-emergence-environments
|
e02d66f0734d95470d15a4508ff369a75fa093a4
|
[
"MIT"
] | 1,317
|
2019-09-17T15:50:42.000Z
|
2022-03-30T18:24:24.000Z
|
randomized_uncertain_social_preferences/rusp/test_env_oasis.py
|
jihan1218/multi-agent-predator-prey
|
ebf11e601de07e80c27c87dc41837d91f53e9465
|
[
"MIT"
] | 35
|
2019-09-20T11:36:20.000Z
|
2022-02-10T00:24:27.000Z
|
randomized_uncertain_social_preferences/rusp/test_env_oasis.py
|
jihan1218/multi-agent-predator-prey
|
ebf11e601de07e80c27c87dc41837d91f53e9465
|
[
"MIT"
] | 279
|
2019-09-18T00:14:19.000Z
|
2022-03-30T09:39:12.000Z
|
import subprocess
import unittest
import os
EXAMPLES_DIR = os.path.dirname(os.path.abspath(__file__))
EXAMINE_FILE_PATH = os.path.join(EXAMPLES_DIR, "../../bin/examine.py")
class ExamineTest(unittest.TestCase):
def test_examine_env(self):
envs = [
"env_oasis.py"
]
for env in envs:
with self.assertRaises(subprocess.TimeoutExpired):
subprocess.check_call(
["/usr/bin/env", "python", EXAMINE_FILE_PATH, os.path.join(EXAMPLES_DIR, env)],
timeout=10)
| 29.421053
| 99
| 0.620751
|
883e71720b9f0c7e4186408e03c38b8755b69591
| 43,150
|
py
|
Python
|
sdscli/adapters/hysds/fabfile.py
|
sdskit/sdscli
|
9bb96e880c8251d1dce56b901c1289ed80f83ce7
|
[
"Apache-2.0"
] | null | null | null |
sdscli/adapters/hysds/fabfile.py
|
sdskit/sdscli
|
9bb96e880c8251d1dce56b901c1289ed80f83ce7
|
[
"Apache-2.0"
] | 24
|
2018-03-14T15:37:38.000Z
|
2021-11-30T21:59:44.000Z
|
sdscli/adapters/hysds/fabfile.py
|
sdskit/sdscli
|
9bb96e880c8251d1dce56b901c1289ed80f83ce7
|
[
"Apache-2.0"
] | 13
|
2018-02-22T15:01:35.000Z
|
2019-02-07T18:58:57.000Z
|
"""
Fabric file for HySDS.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from sdscli.prompt_utils import highlight, blink
from sdscli.conf_utils import get_user_config_path, get_user_files_path
from sdscli.log_utils import logger
from fabric.contrib.project import rsync_project
from fabric.contrib.files import upload_template, exists, append
from fabric.api import run, cd, put, sudo, prefix, env, settings, hide
from copy import deepcopy
import requests
import json
import yaml
import re
import os
from builtins import open
from future import standard_library
standard_library.install_aliases()
# ssh_opts and extra_opts for rsync and rsync_project
ssh_opts = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
extra_opts = "-k"
# repo regex
repo_re = re.compile(r'.+//.*?/(.*?)/(.*?)(?:\.git)?$')
# define private EC2 IP addresses for infrastructure hosts
context = {}
this_dir = os.path.dirname(os.path.abspath(__file__))
sds_cfg = get_user_config_path()
if not os.path.isfile(sds_cfg):
raise RuntimeError(
"SDS configuration file doesn't exist. Run 'sds configure'.")
with open(sds_cfg) as f:
context = yaml.load(f, Loader=yaml.FullLoader)
# define and build groups to reduce redundancy in defining roles
# mozart hosts
mozart_host = '%s' % context['MOZART_PVT_IP']
mozart_rabbit_host = '%s' % context['MOZART_RABBIT_PVT_IP']
mozart_redis_host = '%s' % context['MOZART_REDIS_PVT_IP']
mozart_es_host = '%s' % context['MOZART_ES_PVT_IP']
# metrics host
metrics_host = '%s' % context['METRICS_PVT_IP']
metrics_redis_host = '%s' % context['METRICS_REDIS_PVT_IP']
metrics_es_host = '%s' % context['METRICS_ES_PVT_IP']
# grq host
grq_host = '%s' % context['GRQ_PVT_IP']
grq_es_host = '%s' % context['GRQ_ES_PVT_IP']
# factotum host
factotum_host = '%s' % context['FACTOTUM_PVT_IP']
# continuous integration host
ci_host = '%s' % context['CI_PVT_IP']
# all verdi hosts
verdi_hosts = [
'%s' % context['VERDI_PVT_IP'],
]
if context.get('OTHER_VERDI_HOSTS', None) is not None:
verdi_hosts.extend([i['VERDI_PVT_IP']
for i in context['OTHER_VERDI_HOSTS'] if i['VERDI_PVT_IP'] is not None])
# define roles
env.roledefs = {
'mozart': [mozart_host],
'mozart-rabbit': [mozart_rabbit_host],
'mozart-redis': [mozart_redis_host],
'mozart-es': [mozart_es_host],
'metrics': [metrics_host],
'metrics-redis': [metrics_redis_host],
'metrics-es': [metrics_es_host],
'grq': [grq_host],
'grq-es': [grq_es_host],
'factotum': [factotum_host],
'ci': [ci_host],
'verdi': verdi_hosts,
}
# define key file
env.key_filename = context['KEY_FILENAME']
if not os.path.isfile(env.key_filename):
raise RuntimeError("SSH key filename %s doesn't exist. " % env.key_filename +
"Run 'ssh-keygen -t rsa' or copy existing key.")
# abort on prompts (password, hosts, etc.)
env.abort_on_prompts = True
# do all tasks in parallel
env.parallel = True
# set connection timeout
env.timeout = 60
# define ops home directory
ops_dir = context['OPS_HOME']
##########################
# general functions
##########################
def get_context(node_type=None):
"""Modify context based on host string."""
ctx = deepcopy(context)
if node_type == 'mozart':
if ctx['MOZART_PVT_IP'] == ctx['MOZART_RABBIT_PVT_IP']:
ctx['MOZART_RABBIT_PVT_IP'] = "127.0.0.1"
if ctx['MOZART_PVT_IP'] == ctx['MOZART_REDIS_PVT_IP']:
ctx['MOZART_REDIS_PVT_IP'] = "127.0.0.1"
if ctx['MOZART_PVT_IP'] == ctx['MOZART_ES_PVT_IP']:
ctx['MOZART_ES_PVT_IP'] = "127.0.0.1"
if node_type == 'metrics':
if ctx['METRICS_PVT_IP'] == ctx['METRICS_REDIS_PVT_IP']:
ctx['METRICS_REDIS_PVT_IP'] = "127.0.0.1"
if ctx['METRICS_PVT_IP'] == ctx['METRICS_ES_PVT_IP']:
ctx['METRICS_ES_PVT_IP'] = "127.0.0.1"
if node_type == 'grq':
if ctx['GRQ_PVT_IP'] == ctx['GRQ_ES_PVT_IP']:
ctx['GRQ_ES_PVT_IP'] = "127.0.0.1"
# set redis passwords
if ctx['MOZART_REDIS_PASSWORD'] is None:
ctx['MOZART_REDIS_PASSWORD'] = ''
if ctx['METRICS_REDIS_PASSWORD'] is None:
ctx['METRICS_REDIS_PASSWORD'] = ''
# set hostname
ctx['HOST_STRING'] = env.host_string
# split LDAP groups
ctx['LDAP_GROUPS'] = [i.strip() for i in ctx['LDAP_GROUPS'].split(',')]
return ctx
def resolve_files_dir(fname, files_dir):
"""Resolve file or template from user SDS files or default location."""
user_path = get_user_files_path()
return user_path if os.path.exists(os.path.join(user_path, fname)) else files_dir
def resolve_role():
"""Resolve role and hysds directory."""
for role in env.effective_roles:
if env.host_string in env.roledefs[role]:
if '@' in env.host_string:
hostname = env.host_string.split('@')[1]
else:
hostname = env.host_string
break
if role in ('factotum', 'ci'):
hysds_dir = "verdi"
elif role == 'grq':
hysds_dir = "sciflo"
else:
hysds_dir = role
return role, hysds_dir, hostname
def host_type():
run('uname -s')
def fqdn():
run('hostname --fqdn')
def get_ram_size_bytes():
return run("free -b | grep ^Mem: | awk '{print $2}'")
def yum_update():
sudo('yum -y -q update')
def yum_install(package):
sudo('yum -y install %s' % package)
def yum_remove(package):
sudo('yum -y remove %s' % package)
def ps_x():
run('ps x')
def df_hv():
run('df -hv')
def echo(s):
run('echo "%s"' % s)
def mpstat():
sudo('mpstat -P ALL 5 1')
def copy(src, dest):
put(src, dest)
def ln_sf(src, dest):
if exists(dest):
run('rm -rf %s' % dest)
with cd(os.path.dirname(dest)):
run('ln -sf %s %s' % (src, os.path.basename(dest)))
def cp_rp(src, dest):
run('cp -rp %s %s' % (src, dest))
def cp_rp_exists(src, dest):
if exists(src):
run('cp -rp %s %s' % (src, dest))
def rm_rf(path):
run('rm -rf %s' % path)
def sudo_rm_rf(path):
run('sudo rm -rf %s' % path)
def send_template(tmpl, dest, tmpl_dir=None, node_type=None):
if tmpl_dir is None:
tmpl_dir = get_user_files_path()
else:
tmpl_dir = os.path.expanduser(tmpl_dir)
upload_template(tmpl, dest, use_jinja=True, context=get_context(node_type),
template_dir=tmpl_dir)
def send_template_user_override(tmpl, dest, tmpl_dir=None, node_type=None):
"""
Write filled-out template to destination using the template found in a specified template directory.
If template exists in the user files (i.e. ~/.sds/files), that template will be used.
:param tmpl: template file name
:param dest: output file name
:param tmpl_dir: nominal directory containing the template
:param node_type: node type/role
:return: None
"""
if tmpl_dir is None:
tmpl_dir = get_user_files_path()
else:
tmpl_dir = os.path.expanduser(tmpl_dir)
upload_template(tmpl, dest, use_jinja=True, context=get_context(node_type),
template_dir=resolve_files_dir(tmpl, tmpl_dir))
def set_spyddder_settings():
upload_template('settings.json.tmpl', '~/verdi/ops/spyddder-man/settings.json', use_jinja=True,
context=get_context(), template_dir=os.path.join(ops_dir, 'mozart/ops/spyddder-man'))
def rsync_code(node_type, dir_path=None):
if dir_path is None:
dir_path = node_type
rm_rf('%s/ops/osaka' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/osaka'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
rm_rf('%s/ops/hysds_commons' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/hysds_commons'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
rm_rf('%s/ops/hysds' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/hysds'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
rm_rf('%s/ops/prov_es' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/prov_es'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
rm_rf('%s/ops/sciflo' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/sciflo'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
rm_rf('%s/ops/chimera' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/chimera'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
rm_rf('%s/ops/container-builder' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/container-builder'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
rm_rf('%s/ops/lightweight-jobs' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/lightweight-jobs'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
rm_rf('%s/ops/hysds-dockerfiles' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/hysds-dockerfiles'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
if node_type == 'mozart':
rm_rf('%s/ops/mozart' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/mozart'), extra_opts=extra_opts,
ssh_opts=ssh_opts)
if node_type == 'verdi':
rm_rf('%s/ops/spyddder-man' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/spyddder-man'), extra_opts=extra_opts,
ssh_opts=ssh_opts)
if node_type == 'factotum':
rm_rf('%s/ops/spyddder-man' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/spyddder-man'), extra_opts=extra_opts,
ssh_opts=ssh_opts)
if node_type == 'grq':
rm_rf('%s/ops/grq2' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/grq2'), extra_opts=extra_opts,
ssh_opts=ssh_opts)
rm_rf('%s/ops/pele' % dir_path)
rsync_project('%s/ops/' % dir_path, os.path.join(ops_dir, 'mozart/ops/pele'), extra_opts=extra_opts,
ssh_opts=ssh_opts)
def svn_co(path, svn_url):
if not exists(path):
with cd(os.path.dirname(path)):
run('svn co --non-interactive --trust-server-cert %s' % svn_url)
def svn_rev(rev, path):
run('svn up -r %s %s' % (rev, path))
def ls(path):
run('ls -al {}'.format(path))
def cat(path):
run('cat {}'.format(path))
def tail(path):
run('tail {}'.format(path))
def tail_f(path):
run('tail -f {}'.format(path))
def grep(grep_str, dir_path):
try:
run('grep -r %s %s' % (grep_str, dir_path))
except:
pass
def chmod(perms, path):
run('chmod -R %s %s' % (perms, path))
def reboot():
sudo('reboot')
def mkdir(d, o, g):
#sudo('mkdir -p %s' % d)
#sudo('chown -R %s:%s %s' % (o, g, d))
run("mkdir -p %s" % d)
def untar(tarfile, chdir):
with cd(chdir):
run('tar xvfj %s' % tarfile)
def untar_gz(cwd, tar_file):
with cd(cwd):
run('tar xvfz %s' % tar_file)
def untar_bz(cwd, tar_file):
with cd(cwd):
run('tar xvfj %s' % tar_file)
def mv(src, dest):
sudo('mv -f %s %s' % (src, dest))
def rsync(src, dest):
rsync_project(dest, src, extra_opts=extra_opts, ssh_opts=ssh_opts)
def remove_docker_images():
run('docker rmi -f $(docker images -q)')
def remove_running_containers():
run('docker rm -f $(docker ps -aq)')
def remove_docker_volumes():
run('docker volume rm $(docker volume ls -q)')
def list_docker_images():
run('docker images')
def stop_docker_containers():
run('docker stop $(docker ps -aq)')
def systemctl(cmd, service):
with settings(warn_only=True):
with hide('everything'):
return run('sudo systemctl %s %s' % (cmd, service), pty=False)
def status():
role, hysds_dir, hostname = resolve_role()
if exists('%s/run/supervisor.sock' % hysds_dir):
with prefix('source %s/bin/activate' % hysds_dir):
run('supervisorctl status')
else:
print((blink(highlight("Supervisord is not running on %s." % role, 'red'))))
def ensure_venv(hysds_dir, update_bash_profile=True, system_site_packages=True, install_supervisor=True):
act_file = "~/%s/bin/activate" % hysds_dir
if system_site_packages:
venv_cmd = "virtualenv --system-site-packages %s" % hysds_dir
else:
venv_cmd = "virtualenv %s" % hysds_dir
if not exists(act_file):
run(venv_cmd)
with prefix('source %s/bin/activate' % hysds_dir):
run('pip install -U pip')
run('pip install -U setuptools')
if install_supervisor:
run('pip install --ignore-installed supervisor')
mkdir('%s/etc' % hysds_dir,
context['OPS_USER'], context['OPS_USER'])
mkdir('%s/log' % hysds_dir,
context['OPS_USER'], context['OPS_USER'])
mkdir('%s/run' % hysds_dir,
context['OPS_USER'], context['OPS_USER'])
if update_bash_profile:
append('.bash_profile',
"source $HOME/{}/bin/activate".format(hysds_dir), escape=True)
append('.bash_profile',
"export FACTER_ipaddress=$(/usr/sbin/ifconfig $(/usr/sbin/route | awk '/default/{print $NF}') | grep 'inet ' | sed 's/addr://' | awk '{print $2}')", escape=True)
def install_pkg_es_templates():
role, hysds_dir, hostname = resolve_role()
if role not in ('grq', 'mozart'):
raise RuntimeError("Invalid fabric function for %s." % role)
with prefix('source %s/bin/activate' % hysds_dir):
run('%s/ops/mozart/scripts/install_es_template.sh %s' % (hysds_dir, role))
def install_base_es_template():
role, hysds_dir, hostname = resolve_role()
send_template(
"es_template-base.json",
"/tmp/es_template-base.json"
)
run("curl -XPUT 'localhost:9200/_template/index_defaults?pretty' -H 'Content-Type: application/json' -d@/tmp/es_template-base.json")
##########################
# grq functions
##########################
def grqd_start(force=False):
mkdir('sciflo/run', context['OPS_USER'], context['OPS_USER'])
if not exists('sciflo/run/supervisord.pid') or force:
with prefix('source sciflo/bin/activate'):
run('supervisord', pty=False)
def grqd_clean_start():
run('rm -rf %s/sciflo/log/*' % ops_dir)
# with prefix('source %s/sciflo/bin/activate' % ops_dir):
# with cd(os.path.join(ops_dir, 'sciflo/ops/grq2/scripts')):
# run('./reset_dumby_indices.sh')
grqd_start(True)
def grqd_stop():
if exists('sciflo/run/supervisor.sock'):
with prefix('source sciflo/bin/activate'):
run('supervisorctl shutdown')
def install_es_template():
with prefix('source sciflo/bin/activate'):
run('sciflo/ops/grq2/scripts/install_es_template.sh')
def clean_hysds_ios():
with prefix('source sciflo/bin/activate'):
run('sciflo/ops/grq2/scripts/clean_hysds_ios_indexes.sh http://localhost:9200')
def create_grq_user_rules_index():
with prefix('source ~/sciflo/bin/activate'):
with cd('~/sciflo/ops/grq2/scripts'):
run('./create_user_rules_index.py')
def create_hysds_ios_grq_index():
with prefix('source ~/sciflo/bin/activate'):
with cd('~/sciflo/ops/grq2/scripts'):
run('./create_hysds_ios_index.py')
def install_ingest_pipeline():
with cd('~/sciflo/ops/grq2/scripts'):
run('python install_ingest_pipeline.py')
##########################
# mozart functions
##########################
def mozartd_start(force=False):
if not exists('mozart/run/supervisord.pid') or force:
with prefix('source mozart/bin/activate'):
run('supervisord', pty=False)
def mozartd_clean_start():
run('rm -rf %s/mozart/log/*' % ops_dir)
mozartd_start(True)
def mozartd_stop():
if exists('mozart/run/supervisor.sock'):
with prefix('source mozart/bin/activate'):
run('supervisorctl shutdown')
def redis_flush():
role, hysds_dir, hostname = resolve_role()
ctx = get_context()
if role == 'mozart' and ctx['MOZART_REDIS_PASSWORD'] != '':
cmd = 'redis-cli -a {MOZART_REDIS_PASSWORD} flushall'.format(**ctx)
elif role == 'metrics' and ctx['METRICS_REDIS_PASSWORD'] != '':
cmd = 'redis-cli -a {METRICS_REDIS_PASSWORD} flushall'.format(**ctx)
else:
cmd = 'redis-cli flushall'.format(**ctx)
run(cmd)
def mozart_redis_flush():
ctx = get_context()
if ctx['MOZART_REDIS_PASSWORD'] != '':
run('redis-cli -a {MOZART_REDIS_PASSWORD} -h {MOZART_REDIS_PVT_IP} flushall'.format(**ctx))
else:
run('redis-cli -h {MOZART_REDIS_PVT_IP} flushall'.format(**ctx))
def rabbitmq_queues_flush():
ctx = get_context()
url = 'http://%s:15672/api/queues' % ctx['MOZART_RABBIT_PVT_IP']
r = requests.get('%s?columns=name' % url, auth=(ctx['MOZART_RABBIT_USER'],
ctx['MOZART_RABBIT_PASSWORD']))
r.raise_for_status()
res = r.json()
for i in res:
r = requests.delete('%s/%%2f/%s' % (url, i['name']),
auth=(ctx['MOZART_RABBIT_USER'], ctx['MOZART_RABBIT_PASSWORD']))
r.raise_for_status()
logger.debug("Deleted queue %s." % i['name'])
def mozart_es_flush():
ctx = get_context()
run('curl -XDELETE http://{MOZART_ES_PVT_IP}:9200/_template/*_status'.format(**ctx))
run('~/mozart/ops/hysds/scripts/clean_job_status_indexes.sh http://{MOZART_ES_PVT_IP}:9200'.format(
**ctx))
run('~/mozart/ops/hysds/scripts/clean_task_status_indexes.sh http://{MOZART_ES_PVT_IP}:9200'.format(
**ctx))
run('~/mozart/ops/hysds/scripts/clean_worker_status_indexes.sh http://{MOZART_ES_PVT_IP}:9200'.format(
**ctx))
run('~/mozart/ops/hysds/scripts/clean_event_status_indexes.sh http://{MOZART_ES_PVT_IP}:9200'.format(
**ctx))
#run('~/mozart/ops/hysds/scripts/clean_job_spec_container_indexes.sh http://{MOZART_ES_PVT_IP}:9200'.format(**ctx))
def npm_install_package_json(dest):
with cd(dest):
run('npm install --silent')
##########################
# metrics functions
##########################
def metricsd_start(force=False):
if not exists('metrics/run/supervisord.pid') or force:
with prefix('source metrics/bin/activate'):
run('supervisord', pty=False)
def metricsd_clean_start():
run('rm -rf /home/ops/metrics/log/*')
metricsd_start(True)
def metricsd_stop():
if exists('metrics/run/supervisor.sock'):
with prefix('source metrics/bin/activate'):
run('supervisorctl shutdown')
##########################
# verdi functions
##########################
def kill_hung():
try:
run(
'ps x | grep [j]ob_worker | awk \'{print $1}\' | xargs kill -TERM', quiet=True)
except:
pass
try:
run(
'ps x | grep [s]flExec | awk \'{print $1}\' | xargs kill -TERM', quiet=True)
except:
pass
try:
run(
'ps x | grep [s]flExec | awk \'{print $1}\' | xargs kill -KILL', quiet=True)
except:
pass
ps_x()
def import_kibana(path):
with cd(path):
run("./import_dashboard.sh")
def verdid_start(force=False):
if not exists('verdi/run/supervisord.pid') or force:
with prefix('source verdi/bin/activate'):
run('supervisord', pty=False)
def verdid_clean_start():
run('rm -rf /data/work/scifloWork-ops/* /data/work/jobs/* /data/work/cache/* %s/verdi/log/*' % ops_dir)
verdid_start(True)
def verdid_stop():
if exists('verdi/run/supervisor.sock'):
with prefix('source verdi/bin/activate'):
run('supervisorctl shutdown')
def supervisorctl_up():
with prefix('source verdi/bin/activate'):
run('supervisorctl reread')
run('supervisorctl update')
def supervisorctl_status():
with prefix('source verdi/bin/activate'):
run('supervisorctl status')
def pip_install(pkg, node_type='verdi'):
with prefix('source ~/%s/bin/activate' % node_type):
run('pip install %s' % pkg)
def pip_upgrade(pkg, node_type='verdi'):
with prefix('source ~/%s/bin/activate' % node_type):
run('pip install -U %s' % pkg)
def pip_uninstall(pkg, node_type='verdi'):
with prefix('source ~/%s/bin/activate' % node_type):
run('pip uninstall -y %s' % pkg)
def pip_install_with_req(node_type, dest):
with prefix('source ~/%s/bin/activate' % node_type):
with cd(dest):
run('pip install -e .')
def pip_install_with_req(node_type, dest, ndeps):
with prefix('source ~/%s/bin/activate' % node_type):
with cd(dest):
if ndeps:
logger.debug("ndeps is set, so running pip with --no-deps")
run('pip install --no-deps -e .')
else:
logger.debug(
"ndeps is NOT set, so running pip without --no-deps")
run('pip install -e .')
def python_setup_develop(node_type, dest):
with prefix('source ~/%s/bin/activate' % node_type):
with cd(dest):
run('python setup.py develop')
##########################
# ci functions
##########################
def get_ci_job_info(repo, branch=None):
ctx = get_context()
match = repo_re.search(repo)
if not match:
raise RuntimeError("Failed to parse repo owner and name: %s" % repo)
owner, name = match.groups()
if branch is None:
job_name = "%s_container-builder_%s_%s" % (ctx['VENUE'], owner, name)
config_tmpl = 'config.xml'
else:
job_name = "%s_container-builder_%s_%s_%s" % (
ctx['VENUE'], owner, name, branch)
config_tmpl = 'config-branch.xml'
return job_name, config_tmpl
def add_ci_job(repo, proto, branch=None, release=False):
with settings(sudo_user=context["JENKINS_USER"]):
job_name, config_tmpl = get_ci_job_info(repo, branch)
ctx = get_context()
ctx['PROJECT_URL'] = repo
ctx['BRANCH'] = branch
job_dir = '%s/jobs/%s' % (ctx['JENKINS_DIR'], job_name)
dest_file = '%s/config.xml' % job_dir
mkdir(job_dir, None, None)
chmod('777', job_dir)
if release:
ctx['BRANCH_SPEC'] = "origin/tags/release-*"
else:
ctx['BRANCH_SPEC'] = "**"
if proto in ('s3', 's3s'):
ctx['STORAGE_URL'] = "%s://%s/%s/" % (
proto, ctx['S3_ENDPOINT'], ctx['CODE_BUCKET'])
elif proto == 'gs':
ctx['STORAGE_URL'] = "%s://%s/%s/" % (
proto, ctx['GS_ENDPOINT'], ctx['CODE_BUCKET'])
elif proto in ('dav', 'davs'):
ctx['STORAGE_URL'] = "%s://%s:%s@%s/repository/products/containers/" % \
(proto, ctx['DAV_USER'],
ctx['DAV_PASSWORD'], ctx['DAV_SERVER'])
else:
raise RuntimeError(
"Unrecognized storage type for containers: %s" % proto)
upload_template(config_tmpl, "tmp-jenkins-upload", use_jinja=True, context=ctx,
template_dir=get_user_files_path())
cp_rp("tmp-jenkins-upload", dest_file)
run("rm tmp-jenkins-upload")
def add_ci_job_release(repo, proto):
add_ci_job(repo, proto, release=True)
def run_jenkins_cli(cmd):
ctx = get_context()
juser = ctx.get("JENKINS_API_USER", "").strip()
jkey = ctx.get("JENKINS_API_KEY", "").strip()
if juser == "" or jkey == "":
raise RuntimeError(
"An API user/key is needed for Jenkins. Reload manually or specify one.")
with prefix('source verdi/bin/activate'):
run('java -jar %s/war/WEB-INF/jenkins-cli.jar -s http://localhost:8080 -http -auth %s:%s %s' %
(ctx['JENKINS_DIR'], juser, jkey, cmd))
def reload_configuration():
run_jenkins_cli('reload-configuration')
def build_ci_job(repo, branch=None):
job_name, config_tmpl = get_ci_job_info(repo, branch)
run_jenkins_cli('build %s -s -v' % job_name)
def remove_ci_job(repo, branch=None):
job_name, config_tmpl = get_ci_job_info(repo, branch)
run_jenkins_cli('delete-job %s' % job_name)
##########################
# logstash functions
##########################
def send_shipper_conf(node_type, log_dir, cluster_jobs, redis_ip_job_status,
cluster_metrics, redis_ip_metrics):
role, hysds_dir, hostname = resolve_role()
ctx = get_context(node_type)
ctx.update({'cluster_jobs': cluster_jobs,
'cluster_metrics': cluster_metrics})
if node_type == 'mozart':
upload_template('indexer.conf.mozart', '~/mozart/etc/indexer.conf', use_jinja=True, context=ctx,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('job_status.template', '~/mozart/etc/job_status.template', use_jinja=True,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('worker_status.template', '~/mozart/etc/worker_status.template', use_jinja=True,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('task_status.template', '~/mozart/etc/task_status.template', use_jinja=True,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('event_status.template', '~/mozart/etc/event_status.template', use_jinja=True,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('sdswatch_client.conf', '~/mozart/etc/sdswatch_client.conf', use_jinja=True,
context=ctx, template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
send_template("run_sdswatch_client.sh", "~/mozart/bin/run_sdswatch_client.sh")
run("chmod 755 ~/mozart/bin/run_sdswatch_client.sh")
send_template("watch_supervisord_services.py", "~/mozart/bin/watch_supervisord_services.py")
run("chmod 755 ~/mozart/bin/watch_supervisord_services.py")
send_template("watch_systemd_services.py", "~/mozart/bin/watch_systemd_services.py")
run("chmod 755 ~/mozart/bin/watch_systemd_services.py")
elif node_type == 'metrics':
upload_template('indexer.conf.metrics', '~/metrics/etc/indexer.conf', use_jinja=True, context=ctx,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('job_status.template', '~/metrics/etc/job_status.template', use_jinja=True,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('worker_status.template', '~/metrics/etc/worker_status.template', use_jinja=True,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('task_status.template', '~/metrics/etc/task_status.template', use_jinja=True,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('event_status.template', '~/metrics/etc/event_status.template', use_jinja=True,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('sdswatch_client.conf', '~/metrics/etc/sdswatch_client.conf', use_jinja=True,
context=ctx, template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
send_template("run_sdswatch_client.sh", "~/metrics/bin/run_sdswatch_client.sh")
run("chmod 755 ~/metrics/bin/run_sdswatch_client.sh")
send_template("watch_supervisord_services.py", "~/metrics/bin/watch_supervisord_services.py")
run("chmod 755 ~/metrics/bin/watch_supervisord_services.py")
send_template("watch_systemd_services.py", "~/metrics/bin/watch_systemd_services.py")
run("chmod 755 ~/metrics/bin/watch_systemd_services.py")
elif node_type == 'grq':
upload_template('sdswatch_client.conf', '~/sciflo/etc/sdswatch_client.conf', use_jinja=True,
context=ctx, template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
send_template("run_sdswatch_client.sh", "~/sciflo/bin/run_sdswatch_client.sh")
run("chmod 755 ~/sciflo/bin/run_sdswatch_client.sh")
send_template("watch_supervisord_services.py", "~/sciflo/bin/watch_supervisord_services.py")
run("chmod 755 ~/sciflo/bin/watch_supervisord_services.py")
send_template("watch_systemd_services.py", "~/sciflo/bin/watch_systemd_services.py")
run("chmod 755 ~/sciflo/bin/watch_systemd_services.py")
elif node_type in ('verdi', 'verdi-asg', 'factotum'):
upload_template('sdswatch_client.conf', '~/verdi/etc/sdswatch_client.conf', use_jinja=True,
context=ctx, template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
send_template("run_sdswatch_client.sh", "~/verdi/bin/run_sdswatch_client.sh")
run("chmod 755 ~/verdi/bin/run_sdswatch_client.sh")
send_template("watch_supervisord_services.py", "~/verdi/bin/watch_supervisord_services.py")
run("chmod 755 ~/verdi/bin/watch_supervisord_services.py")
send_template("watch_systemd_services.py", "~/verdi/bin/watch_systemd_services.py")
run("chmod 755 ~/verdi/bin/watch_systemd_services.py")
else:
raise RuntimeError("Unknown node type: %s" % node_type)
def send_logstash_jvm_options(node_type):
ctx = get_context(node_type)
ram_size_gb = int(get_ram_size_bytes())//1024**3
echo("instance RAM size: {}GB".format(ram_size_gb))
ram_size_gb_half = int(ram_size_gb//2)
ctx['LOGSTASH_HEAP_SIZE'] = 8 if ram_size_gb_half >= 8 else ram_size_gb_half
echo("configuring logstash heap size: {}GB".format(ctx['LOGSTASH_HEAP_SIZE']))
upload_template('jvm.options', '~/logstash/config/jvm.options',
use_jinja=True, context=ctx, template_dir=get_user_files_path())
##########################
# hysds config functions
##########################
def send_celeryconf(node_type):
ctx = get_context(node_type)
template_dir = os.path.join(ops_dir, 'mozart/ops/hysds/configs/celery')
if node_type == 'mozart':
base_dir = "mozart"
elif node_type == 'metrics':
base_dir = "metrics"
elif node_type in ('verdi', 'verdi-asg'):
base_dir = "verdi"
elif node_type == 'grq':
base_dir = "sciflo"
else:
raise RuntimeError("Unknown node type: %s" % node_type)
tmpl = 'celeryconfig.py.tmpl'
user_path = get_user_files_path()
if node_type == 'verdi-asg':
tmpl_asg = 'celeryconfig.py.tmpl.asg'
if os.path.exists(os.path.join(user_path, tmpl_asg)):
tmpl = tmpl_asg
dest_file = '~/%s/ops/hysds/celeryconfig.py' % base_dir
upload_template(tmpl, dest_file, use_jinja=True, context=ctx,
template_dir=resolve_files_dir(tmpl, template_dir))
def send_mozartconf():
dest_file = '~/mozart/ops/mozart/settings.cfg'
upload_template('settings.cfg.tmpl', dest_file, use_jinja=True, context=get_context('mozart'),
template_dir=os.path.join(ops_dir, 'mozart/ops/mozart/settings'))
with prefix('source ~/mozart/bin/activate'):
with cd('~/mozart/ops/mozart'):
mkdir('~/mozart/ops/mozart/data',
context['OPS_USER'], context['OPS_USER'])
run('./db_create.py')
def send_hysds_ui_conf():
dest_file = '~/mozart/ops/hysds_ui/src/config/index.js'
upload_template('index.template.js', dest_file, use_jinja=True, context=get_context('mozart'),
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config'))
user_path = get_user_files_path()
tosca_cfg = '~/mozart/etc/tosca.js'
if os.path.exists(os.path.join(user_path, 'tosca.js')):
print('using custom tosca configuration in .sds/files')
send_template_user_override('tosca.js', tosca_cfg, node_type='mozart')
else:
print('using default tosca configuration')
send_template_user_override('tosca.template.js', tosca_cfg,
tmpl_dir=os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config'),
node_type='mozart')
figaro_cfg = '~/mozart/etc/figaro.js'
if os.path.exists(os.path.join(user_path, 'figaro.js')):
print('using custom figaro configuration in .sds/files')
send_template_user_override('figaro.js', figaro_cfg, node_type='mozart')
else:
print('using default figaro configuration')
send_template_user_override('figaro.template.js', figaro_cfg,
tmpl_dir=os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config'),
node_type='mozart')
# symlink to ~/mozart/ops/hysds_ui/src/config/
ln_sf(tosca_cfg, os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config', 'tosca.js'))
ln_sf(figaro_cfg, os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config', 'figaro.js'))
def send_grq2conf():
dest_file = '~/sciflo/ops/grq2/settings.cfg'
upload_template('settings.cfg.tmpl', dest_file, use_jinja=True, context=get_context('grq'),
template_dir=os.path.join(ops_dir, 'mozart/ops/grq2/config'))
def send_peleconf(send_file='settings.cfg.tmpl', template_dir=get_user_files_path()):
tmpl_dir = os.path.expanduser(template_dir)
dest_file = '~/sciflo/ops/pele/settings.cfg'
upload_template(send_file, dest_file, use_jinja=True, context=get_context('grq'),
template_dir=tmpl_dir)
with prefix('source ~/sciflo/bin/activate'):
with cd('~/sciflo/ops/pele'):
run('flask create-db')
run('flask db init', warn_only=True)
run('flask db migrate', warn_only=True)
def build_hysds_ui():
with cd('~/mozart/ops/hysds_ui'):
run('npm run build')
def create_user_rules_index():
with prefix('source ~/mozart/bin/activate'):
with cd('~/mozart/ops/mozart/scripts'):
run('./create_user_rules_index.py')
def create_hysds_ios_index():
with prefix('source ~/mozart/bin/activate'):
with cd('~/mozart/ops/mozart/scripts'):
run('./create_hysds_ios_index.py')
def send_hysds_scripts(node_type):
role, hysds_dir, hostname = resolve_role()
if node_type == 'mozart':
send_template("run_docker_registry.sh", "~/mozart/bin/run_docker_registry.sh")
run("chmod 755 ~/mozart/bin/run_docker_registry.sh")
elif node_type == 'metrics':
send_template("run_docker_registry.sh", "~/metrics/bin/run_docker_registry.sh")
run("chmod 755 ~/metrics/bin/run_docker_registry.sh")
elif node_type == 'grq':
send_template("run_docker_registry.sh", "~/sciflo/bin/run_docker_registry.sh")
run("chmod 755 ~/sciflo/bin/run_docker_registry.sh")
elif node_type in ('verdi', 'verdi-asg', 'factotum'):
send_template("run_docker_registry.sh", "~/verdi/bin/run_docker_registry.sh")
run("chmod 755 ~/verdi/bin/run_docker_registry.sh")
else:
raise RuntimeError("Unknown node type: %s" % node_type)
##########################
# self-signed SSL certs
##########################
def ensure_ssl(node_type):
ctx = get_context(node_type)
if node_type == "grq":
commonName = ctx['GRQ_FQDN']
elif node_type == "mozart":
commonName = ctx['MOZART_FQDN']
else:
raise RuntimeError("Unknown node type: %s" % node_type)
if not exists('ssl/server.key') or not exists('ssl/server.pem'):
mkdir('ssl', context['OPS_USER'], context['OPS_USER'])
upload_template('ssl_server.cnf', 'ssl/server.cnf', use_jinja=True,
context={'commonName': commonName},
template_dir=get_user_files_path())
with cd('ssl'):
run('openssl genrsa -des3 -passout pass:hysds -out server.key 1024', pty=False)
run('OPENSSL_CONF=server.cnf openssl req -passin pass:hysds -new -key server.key -out server.csr', pty=False)
run('cp server.key server.key.org')
run('openssl rsa -passin pass:hysds -in server.key.org -out server.key', pty=False)
run('chmod 600 server.key*')
run('openssl x509 -passin pass:hysds -req -days 99999 -in server.csr -signkey server.key -out server.pem', pty=False)
##########################
# ship code
##########################
def ship_code(cwd, tar_file, encrypt=False):
ctx = get_context()
with cd(cwd):
run('tar --exclude-vcs -cvjf %s *' % tar_file)
if encrypt is False:
run('aws s3 cp %s s3://%s/' % (tar_file, ctx['CODE_BUCKET']))
else:
run('aws s3 cp --sse %s s3://%s/' % (tar_file, ctx['CODE_BUCKET']))
##########################
# ship creds
##########################
def send_awscreds(suffix=None):
ctx = get_context()
if suffix is None:
aws_dir = '.aws'
boto_file = '.boto'
s3cfg_file = '.s3cfg'
else:
aws_dir = '.aws{}'.format(suffix)
boto_file = '.boto{}'.format(suffix)
s3cfg_file = '.s3cfg{}'.format(suffix)
if exists(aws_dir):
run('rm -rf {}'.format(aws_dir))
mkdir(aws_dir, context['OPS_USER'], context['OPS_USER'])
run('chmod 700 {}'.format(aws_dir))
upload_template('aws_config', '{}/config'.format(aws_dir), use_jinja=True, context=ctx,
template_dir=get_user_files_path())
if ctx['AWS_ACCESS_KEY'] not in (None, ""):
upload_template('aws_credentials', '{}/credentials'.format(aws_dir), use_jinja=True, context=ctx,
template_dir=get_user_files_path())
run('chmod 600 {}/*'.format(aws_dir))
if exists(boto_file):
run('rm -rf {}'.format(boto_file))
upload_template('boto', boto_file, use_jinja=True, context=ctx,
template_dir=get_user_files_path())
run('chmod 600 {}'.format(boto_file))
if exists(s3cfg_file):
run('rm -rf {}'.format(s3cfg_file))
upload_template('s3cfg', s3cfg_file, use_jinja=True, context=ctx,
template_dir=get_user_files_path())
run('chmod 600 {}'.format(s3cfg_file))
##########################
# ship verdi code bundle
##########################
def send_queue_config(queue):
ctx = get_context()
ctx.update({'queue': queue})
upload_template('install.sh', '~/verdi/ops/install.sh', use_jinja=True, context=ctx,
template_dir=get_user_files_path())
upload_template('datasets.json.tmpl.asg', '~/verdi/etc/datasets.json',
use_jinja=True, context=ctx, template_dir=get_user_files_path())
upload_template('supervisord.conf.tmpl', '~/verdi/etc/supervisord.conf.tmpl',
use_jinja=True, context=ctx, template_dir=get_user_files_path())
##########################
# ship s3-bucket style
##########################
def ship_style(bucket=None, encrypt=False):
ctx = get_context()
if bucket is None:
bucket = ctx['DATASET_BUCKET']
else:
ctx.update({'DATASET_BUCKET': bucket})
repo_dir = os.path.join(ops_dir, 'mozart/ops/s3-bucket-listing')
index_file = os.path.join(repo_dir, 'tmp_index.html')
list_js = os.path.join(repo_dir, 'list.js')
index_style = os.path.join(repo_dir, 'index-style')
upload_template('s3-bucket-listing.html.tmpl', index_file, use_jinja=True,
context=ctx, template_dir=get_user_files_path())
if encrypt is False:
run('aws s3 cp %s s3://%s/index.html' % (index_file, bucket))
run('aws s3 cp %s s3://%s/' % (list_js, bucket))
run('aws s3 sync %s s3://%s/index-style' % (index_style, bucket))
else:
run('aws s3 cp --sse %s s3://%s/index.html' % (index_file, bucket))
run('aws s3 cp --sse %s s3://%s/' % (list_js, bucket))
run('aws s3 sync --sse %s s3://%s/index-style' % (index_style, bucket))
##########################
# create cloud function zip
##########################
def create_zip(zip_dir, zip_file):
if exists(zip_file):
run('rm -rf %s' % zip_file)
with cd(zip_dir):
run('zip -r -9 {} *'.format(zip_file))
##########################
# container orchestration
##########################
def rsync_sdsadm():
role, hysds_dir, hostname = resolve_role()
rm_rf('%s/ops/sdsadm' % hysds_dir)
rsync_project('%s/ops/' % hysds_dir, os.path.join(ops_dir, 'mozart/ops/sdsadm'),
extra_opts=extra_opts, ssh_opts=ssh_opts)
def init_sdsadm():
role, hysds_dir, hostname = resolve_role()
with cd(os.path.join(hysds_dir, 'ops', 'sdsadm')):
with prefix('source ~/%s/bin/activate' % hysds_dir):
run("./sdsadm init {} -f".format(role))
def start_sdsadm(release):
role, hysds_dir, hostname = resolve_role()
with cd(os.path.join(hysds_dir, 'ops', 'sdsadm')):
with prefix('source ~/%s/bin/activate' % hysds_dir):
run("./sdsadm -r {} start {} -d".format(release, role))
def stop_sdsadm():
role, hysds_dir, hostname = resolve_role()
with cd(os.path.join(hysds_dir, 'ops', 'sdsadm')):
with prefix('source ~/%s/bin/activate' % hysds_dir):
run("./sdsadm stop {}".format(role))
def logs_sdsadm(follow=False):
role, hysds_dir, hostname = resolve_role()
with cd(os.path.join(hysds_dir, 'ops', 'sdsadm')):
with prefix('source ~/%s/bin/activate' % hysds_dir):
if follow:
run("./sdsadm logs {} -f".format(role))
else:
run("./sdsadm logs {}".format(role))
def ps_sdsadm():
role, hysds_dir, hostname = resolve_role()
with cd(os.path.join(hysds_dir, 'ops', 'sdsadm')):
with prefix('source ~/%s/bin/activate' % hysds_dir):
run("./sdsadm ps {}".format(role))
def run_sdsadm(cmd):
role, hysds_dir, hostname = resolve_role()
with cd(os.path.join(hysds_dir, 'ops', 'sdsadm')):
with prefix('source ~/%s/bin/activate' % hysds_dir):
run("./sdsadm run {} {}".format(role, cmd))
def conf_sdsadm(tmpl, dest, shared=False):
role, hysds_dir, hostname = resolve_role()
if shared:
tmpl_dir = os.path.join(get_user_files_path(), 'orch')
else:
if role in ('factotum', 'ci'):
tmpl_dir = os.path.join(get_user_files_path(), 'orch', 'verdi')
else:
tmpl_dir = os.path.join(get_user_files_path(), 'orch', role)
upload_template(tmpl, dest, use_jinja=True, context=get_context(role), template_dir=tmpl_dir)
| 36.018364
| 176
| 0.627486
|
81f1731e3477258d79fb08e4036db0cea44d216c
| 50
|
py
|
Python
|
src/game/player.py
|
ziyadedher/ml-game-base
|
de9e68b385cc5c49c584577df8ea9ce45ef31b4e
|
[
"MIT"
] | null | null | null |
src/game/player.py
|
ziyadedher/ml-game-base
|
de9e68b385cc5c49c584577df8ea9ce45ef31b4e
|
[
"MIT"
] | null | null | null |
src/game/player.py
|
ziyadedher/ml-game-base
|
de9e68b385cc5c49c584577df8ea9ce45ef31b4e
|
[
"MIT"
] | null | null | null |
"""Manages the player independent of the game."""
| 25
| 49
| 0.72
|
2301408459fb77e8bffd9a4e251c7f3d2db89c9f
| 14,297
|
py
|
Python
|
addons/account/tests/test_tax_report.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/account/tests/test_tax_report.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/account/tests/test_tax_report.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class TaxReportTest(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.test_country_1 = cls.env['res.country'].create({
'name': "The Old World",
'code': 'YY',
})
cls.test_country_2 = cls.env['res.country'].create({
'name': "The Principality of Zeon",
'code': 'ZZ',
})
cls.test_country_3 = cls.env['res.country'].create({
'name': "Alagaësia",
'code': 'QQ',
})
cls.tax_report_1 = cls.env['account.tax.report'].create({
'name': "Tax report 1",
'country_id': cls.test_country_1.id,
})
cls.tax_report_line_1_1 = cls.env['account.tax.report.line'].create({
'name': "[01] Line 01",
'tag_name': '01',
'report_id': cls.tax_report_1.id,
'sequence': 2,
})
cls.tax_report_line_1_2 = cls.env['account.tax.report.line'].create({
'name': "[01] Line 02",
'tag_name': '02',
'report_id': cls.tax_report_1.id,
'sequence': 3,
})
cls.tax_report_line_1_3 = cls.env['account.tax.report.line'].create({
'name': "[03] Line 03",
'tag_name': '03',
'report_id': cls.tax_report_1.id,
'sequence': 4,
})
cls.tax_report_line_1_4 = cls.env['account.tax.report.line'].create({
'name': "[04] Line 04",
'report_id': cls.tax_report_1.id,
'sequence': 5,
})
cls.tax_report_line_1_5 = cls.env['account.tax.report.line'].create({
'name': "[05] Line 05",
'report_id': cls.tax_report_1.id,
'sequence': 6,
})
cls.tax_report_line_1_55 = cls.env['account.tax.report.line'].create({
'name': "[55] Line 55",
'tag_name': '55',
'report_id': cls.tax_report_1.id,
'sequence': 7,
})
cls.tax_report_line_1_6 = cls.env['account.tax.report.line'].create({
'name': "[100] Line 100",
'tag_name': '100',
'report_id': cls.tax_report_1.id,
'sequence': 8,
})
cls.tax_report_2 = cls.env['account.tax.report'].create({
'name': "Tax report 2",
'country_id': cls.test_country_1.id,
})
cls.tax_report_line_2_1 = cls.env['account.tax.report.line'].create({
'name': "[01] Line 01, but in report 2",
'tag_name': '01',
'report_id': cls.tax_report_2.id,
'sequence': 1,
})
cls.tax_report_line_2_2 = cls.env['account.tax.report.line'].create({
'name': "[02] Line 02, but in report 2",
'report_id': cls.tax_report_2.id,
'sequence': 2,
})
cls.tax_report_line_2_42 = cls.env['account.tax.report.line'].create({
'name': "[42] Line 42",
'tag_name': '42',
'report_id': cls.tax_report_2.id,
'sequence': 3,
})
cls.tax_report_line_2_6 = cls.env['account.tax.report.line'].create({
'name': "[100] Line 100",
'tag_name': '100',
'report_id': cls.tax_report_2.id,
'sequence': 4,
})
def _get_tax_tags(self, tag_name=None):
domain = [('country_id', '=', self.test_country_1.id), ('applicability', '=', 'taxes')]
if tag_name:
domain.append(('name', 'like', '_' + tag_name ))
return self.env['account.account.tag'].search(domain)
def test_write_add_tagname(self):
""" Adding a tag_name to a line without any should create new tags.
"""
tags_before = self._get_tax_tags()
self.tax_report_line_2_2.tag_name = 'tournicoti'
tags_after = self._get_tax_tags()
self.assertEqual(len(tags_after), len(tags_before) + 2, "Two tags should have been created, +tournicoti and -tournicoti.")
def test_write_single_line_tagname(self):
""" Writing on the tag_name of a line with a non-null tag_name used in
no other line should overwrite the name of the existing tags.
"""
start_tags = self._get_tax_tags()
original_tag_name = self.tax_report_line_1_55.tag_name
original_tags = self.tax_report_line_1_55.tag_ids
self.tax_report_line_1_55.tag_name = 'Mille sabords !'
self.assertEqual(len(self._get_tax_tags(original_tag_name)), 0, "The original tag name of the line should not correspond to any tag anymore.")
self.assertEqual(original_tags, self.tax_report_line_1_55.tag_ids, "The tax report line should still be linked to the same tags.")
self.assertEqual(len(self._get_tax_tags()), len(start_tags), "No new tag should have been created.")
def test_write_single_line_remove_tagname(self):
""" Setting None as the tag_name of a line with a non-null tag_name used
in a unique line should delete the tags, also removing all the references to it
from tax repartition lines and account move lines
"""
test_tax = self.env['account.tax'].create({
'name': "Test tax",
'amount_type': 'percent',
'amount': 25,
'type_tax_use': 'sale',
'invoice_repartition_line_ids': [
(0,0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0,0, {
'factor_percent': 100,
'repartition_type': 'tax',
'tag_ids': [(6, 0, self.tax_report_line_1_55.tag_ids[0].ids)],
}),
],
'refund_repartition_line_ids': [
(0,0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0,0, {
'factor_percent': 100,
'repartition_type': 'tax',
}),
],
})
test_invoice = self.env['account.move'].create({
'move_type': 'out_invoice',
'partner_id': self.partner_a.id,
'date': '1992-12-22',
'invoice_line_ids': [
(0, 0, {'quantity': 1, 'price_unit': 42, 'tax_ids': [(6, 0, test_tax.ids)]}),
],
})
test_invoice.action_post()
self.assertTrue(any(line.tax_tag_ids == self.tax_report_line_1_55.tag_ids[0] for line in test_invoice.line_ids), "The test invoice should contain a tax line with tag 55")
tag_name_before = self.tax_report_line_1_55.tag_name
tag_nber_before = len(self._get_tax_tags())
self.tax_report_line_1_55.tag_name = None
self.assertFalse(self.tax_report_line_1_55.tag_name, "The tag name for line 55 should now be None")
self.assertEqual(len(self._get_tax_tags(tag_name_before)), 0, "None of the original tags for this line should be left after setting tag_name to None if no other line was using this tag_name.")
self.assertEqual(len(self._get_tax_tags()), tag_nber_before - 2, "No new tag should have been created, and the two that were assigned to the report line should have been removed.")
self.assertFalse(test_tax.mapped('invoice_repartition_line_ids.tag_ids'), "There should be no tag left on test tax's repartition lines after the removal of tag 55.")
self.assertFalse(test_invoice.mapped('line_ids.tax_tag_ids'), "The link between test invoice and tag 55 should have been broken. There should be no tag left on the invoice's lines.")
def test_write_multi_no_change(self):
""" Writing the same tag_name as they already use on a set of tax report
lines with the same tag_name should not do anything.
"""
tags_before = self._get_tax_tags().ids
(self.tax_report_line_1_1 + self.tax_report_line_2_1).write({'tag_name': '01'})
tags_after = self._get_tax_tags().ids
self.assertEqual(tags_before, tags_after, "Re-assigning the same tag_name should keep the same tags.")
def test_edit_line_shared_tags(self):
""" Setting the tag_name of a tax report line sharing its tags with another line
should edit the tags' name and the tag_name of this other report line, to
keep consistency.
"""
original_tag_name = self.tax_report_line_1_1.tag_name
self.tax_report_line_1_1.tag_name = 'Groucha'
self.assertEqual(self.tax_report_line_2_1.tag_name, self.tax_report_line_1_1.tag_name, "Modifying the tag name of a tax report line sharing it with another one should also modify the other's.")
def test_edit_multi_line_tagname_all_different_new(self):
""" Writing a tag_name on multiple lines with distinct tag_names should
delete all the former tags and replace them by new ones (also on lines
sharing tags with them).
"""
lines = self.tax_report_line_1_1 + self.tax_report_line_2_2 + self.tax_report_line_2_42
previous_tag_ids = lines.mapped('tag_ids.id')
lines.write({'tag_name': 'crabe'})
new_tags = lines.mapped('tag_ids')
self.assertNotEqual(new_tags.ids, previous_tag_ids, "All the tags should have changed")
self.assertEqual(len(new_tags), 2, "Only two distinct tags should be assigned to all the lines after writing tag_name on them all")
surviving_tags = self.env['account.account.tag'].search([('id', 'in', previous_tag_ids)])
self.assertEqual(len(surviving_tags), 0, "All former tags should have been deleted")
self.assertEqual(self.tax_report_line_1_1.tag_ids, self.tax_report_line_2_1.tag_ids, "The report lines initially sharing their tag_name with the written-on lines should also have been impacted")
def test_remove_line_dependency(self):
""" Setting to None the tag_name of a report line sharing its tags with
other lines should only impact this line ; the other ones should keep their
link to the initial tags (their tag_name will hence differ in the end).
"""
tags_before = self.tax_report_line_1_1.tag_ids
self.tax_report_line_1_1.tag_name = None
self.assertEqual(len(self.tax_report_line_1_1.tag_ids), 0, "Setting the tag_name to None should have removed the tags.")
self.assertEqual(self.tax_report_line_2_1.tag_ids, tags_before, "Setting tag_name to None on a line linked to another one via tag_name should break this link.")
def test_tax_report_change_country(self):
""" Tests that duplicating and modifying the country of a tax report works
as intended (countries wanting to use the tax report of another
country need that).
"""
# Copy our first report
tags_before = self._get_tax_tags().ids
copied_report_1 = self.tax_report_1.copy()
copied_report_2 = self.tax_report_1.copy()
tags_after = self._get_tax_tags().ids
self.assertEqual(tags_before, tags_after, "Report duplication should not create or remove any tag")
for original, copy in zip(self.tax_report_1.get_lines_in_hierarchy(), copied_report_1.get_lines_in_hierarchy()):
self.assertEqual(original.tag_ids, copy.tag_ids, "Copying the lines of a tax report should keep the same tags on lines")
# Assign another country to one of the copies
copied_report_1.country_id = self.test_country_2
for original, copy in zip(self.tax_report_1.get_lines_in_hierarchy(), copied_report_1.get_lines_in_hierarchy()):
if original.tag_ids or copy.tag_ids:
self.assertNotEqual(original.tag_ids, copy.tag_ids, "Changing the country of a copied report should create brand new tags for all of its lines")
for original, copy in zip(self.tax_report_1.get_lines_in_hierarchy(), copied_report_2.get_lines_in_hierarchy()):
self.assertEqual(original.tag_ids, copy.tag_ids, "Changing the country of a copied report should not impact the other copies or the original report")
# Direclty change the country of a report without copying it first (some of its tags are shared, but not all)
original_report_2_tags = {line.id: line.tag_ids.ids for line in self.tax_report_2.get_lines_in_hierarchy()}
self.tax_report_2.country_id = self.test_country_2
for line in self.tax_report_2.get_lines_in_hierarchy():
if line == self.tax_report_line_2_42:
# This line is the only one of the report not sharing its tags
self.assertEqual(line.tag_ids.ids, original_report_2_tags[line.id], "The tax report lines not sharing their tags with any other report should keep the same tags when the country of their report is changed")
elif line.tag_ids or original_report_2_tags[line.id]:
self.assertNotEqual(line.tag_ids.ids, original_report_2_tags[line.id], "The tax report lines sharing their tags with other report should receive new tags when the country of their report is changed")
def test_unlink_report_line_tags(self):
""" Under certain circumstances, unlinking a tax report line should also unlink
the tags that are linked to it. We test those cases here.
"""
def check_tags_unlink(tag_name, report_lines, unlinked, error_message):
report_lines.unlink()
surviving_tags = self._get_tax_tags(tag_name)
required_len = 0 if unlinked else 2 # 2 for + and - tag
self.assertEqual(len(surviving_tags), required_len, error_message)
check_tags_unlink('42', self.tax_report_line_2_42, True, "Unlinking one line not sharing its tags should also unlink them")
check_tags_unlink('01', self.tax_report_line_1_1, False, "Unlinking one line sharing its tags with others should keep the tags")
check_tags_unlink('100', self.tax_report_line_1_6 + self.tax_report_line_2_6, True, "Unlinkink all the lines sharing the same tags should also unlink them")
| 49.98951
| 222
| 0.638665
|
31fde5c69a5219440b7aabd672f18928d88b9d4f
| 6,082
|
py
|
Python
|
simple_icd_10.py
|
StefanoTrv/simple_icd_10
|
4995baacb8a5f5e78c067a5c17734ff1af283704
|
[
"CC0-1.0"
] | 8
|
2020-12-07T14:41:00.000Z
|
2022-02-05T09:15:44.000Z
|
simple_icd_10.py
|
StefanoTrv/simple-icd-10
|
c1a0d15ab6a7a924bfaac3d889716380e5441370
|
[
"CC0-1.0"
] | 2
|
2021-08-16T09:55:18.000Z
|
2021-09-23T21:00:31.000Z
|
simple_icd_10.py
|
StefanoTrv/simple-icd-10
|
c1a0d15ab6a7a924bfaac3d889716380e5441370
|
[
"CC0-1.0"
] | null | null | null |
import xml.etree.ElementTree as ET
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
import data # relative-import the "package" containing the data
chapter_list = []
code_to_node = {}
all_codes_list = []
all_codes_list_no_dots = []
code_to_index_dictionary = {}
class _CodeTree:
def __init__(self, tree, parent = None):
#initialize all the values
self.name = ""
self.description = ""
self.type = ""
self.parent = parent
self.children = []
#reads the data from the subtrees
self.type=tree.attrib["type"]
for subtree in tree:
if subtree.tag=="name":
self.name=subtree.text
elif subtree.tag=="description":
self.description=subtree.text
else:
self.children.append(_CodeTree(subtree, parent=self))
#adds the new node to the dictionary
code_to_node[self.name]=self
def _load_codes():
#creates the tree
root = ET.fromstring(pkg_resources.read_text(data, 'icd_10_v2019.xml'))
for child in root:
chapter_list.append(_CodeTree(child))
_load_codes()
def _add_dot_to_code(code):
if len(code)<4 or code[3]==".":
return code
elif code[:3]+"."+code[3:] in code_to_node:
return code[:3]+"."+code[3:]
else:
return code
def is_valid_item(code):
return code in code_to_node or len(code)>=4 and code[:3]+"."+code[3:] in code_to_node
def is_chapter(code):
code = _add_dot_to_code(code)
if code in code_to_node:
return code_to_node[code].type=="chapter"
else:
return False
def is_block(code):
code = _add_dot_to_code(code)
if code in code_to_node:
return code_to_node[code].type=="block"
else:
return False
def is_category(code):
code = _add_dot_to_code(code)
if code in code_to_node:
return code_to_node[code].type=="category"
else:
return False
def is_subcategory(code):
code = _add_dot_to_code(code)
if code in code_to_node:
return code_to_node[code].type=="subcategory"
else:
return False
def is_category_or_subcategory(code):
return is_subcategory(code) or is_category(code)
def is_chapter_or_block(code):
return is_block(code) or is_chapter(code)
def get_description(code):
if not is_valid_item(code):
raise ValueError("The code \""+code+"\" does not exist.")
node = code_to_node[_add_dot_to_code(code)]
return node.description
def get_parent(code):
if not is_valid_item(code):
raise ValueError("The code \""+code+"\" does not exist.")
node = code_to_node[_add_dot_to_code(code)]
if node.parent!=None:
return node.parent.name
else:
return ""
def get_children(code):
if not is_valid_item(code):
raise ValueError("The code \""+code+"\" does not exist.")
node = code_to_node[_add_dot_to_code(code)]
res = []
for child in node.children:
res.append(child.name)
return res
def is_leaf(code):
if not is_valid_item(code):
raise ValueError("The code \""+code+"\" does not exist.")
node = code_to_node[_add_dot_to_code(code)]
return len(node.children)==0
def get_ancestors(code):
if not is_valid_item(code):
raise ValueError("The code \""+code+"\" does not exist.")
node = code_to_node[_add_dot_to_code(code)]
result = []
while node.parent != None:
result.append(node.parent.name)
node=node.parent
return result
def get_descendants(code):
if not is_valid_item(code):
raise ValueError("The code \""+code+"\" does not exist.")
node = code_to_node[_add_dot_to_code(code)]
result = []
_add_children_to_list(node, result)
return result
def _add_children_to_list(node, list):
for child in node.children:
list.append(child.name)
_add_children_to_list(child,list)
def is_ancestor(a,b):
if not is_valid_item(a):
raise ValueError("The code \""+a+"\" does not exist.")
node = code_to_node[_add_dot_to_code(a)]
return a in get_ancestors(b) and a!=b
def is_descendant(a,b):
return is_ancestor(b,a)
def get_nearest_common_ancestor(a,b):
anc_a = [_add_dot_to_code(a)] + get_ancestors(a)
anc_b = [_add_dot_to_code(b)] + get_ancestors(b)
if len(anc_b) > len(anc_a):
temp = anc_a
anc_a = anc_b
anc_b = temp
for anc in anc_a:
if anc in anc_b:
return anc
return ""
def get_all_codes(with_dots=True):
if all_codes_list==[]:
for chapter in chapter_list:
_add_tree_to_list(chapter)
if with_dots:
return all_codes_list.copy()
else:
return all_codes_list_no_dots.copy()
def _add_tree_to_list(tree):
all_codes_list.append(tree.name)
if(len(tree.name)>4 and tree.name[3]=="."):
all_codes_list_no_dots.append(tree.name[:3]+tree.name[4:])
else:
all_codes_list_no_dots.append(tree.name)
for child in tree.children:
_add_tree_to_list(child)
def get_index(code):
if not is_valid_item(code):
raise ValueError("The code \""+code+"\" does not exist.")
code = _add_dot_to_code(code)
if all_codes_list==[]:
for chapter in chapter_list:
_add_tree_to_list(chapter)
if code in code_to_index_dictionary:
return code_to_index_dictionary[code]
else:
i=0
for c in all_codes_list:
if c==code:
code_to_index_dictionary[code]=i
return i
else:
i=i+1
def remove_dot(code):
if all_codes_list==[]:
for chapter in chapter_list:
_add_tree_to_list(chapter)
return all_codes_list_no_dots[get_index(code)]
def add_dot(code):
if all_codes_list==[]:
for chapter in chapter_list:
_add_tree_to_list(chapter)
return all_codes_list[get_index(code)]
| 28.157407
| 89
| 0.643374
|
43512cc067e70620f4ec0c95779359cf60eea53b
| 5,572
|
py
|
Python
|
configs/representation/ssb/ssb_r18_ncg_sgd_cos_50e_r2_2x8x1_k400.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/ssb/ssb_r18_ncg_sgd_cos_50e_r2_2x8x1_k400.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/ssb/ssb_r18_ncg_sgd_cos_50e_r2_2x8x1_k400.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
# model settings
temperature = 0.2
with_norm = True
query_dim = 128
model = dict(
type='SimSiamBaseTracker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
# cls_head=None,
# patch_head=None,
img_head=dict(
type='SimSiamHead',
in_channels=512,
norm_cfg=dict(type='SyncBN'),
num_projection_fcs=3,
projection_mid_channels=512,
projection_out_channels=512,
num_predictor_fcs=2,
predictor_mid_channels=128,
predictor_out_channels=512,
with_norm=True,
loss_feat=dict(type='CosineSimLoss', negative=False),
spatial_type='avg'))
# model training and testing settings
train_cfg = dict(intra_video=False, transpose_temporal=True)
test_cfg = dict(
precede_frames=20,
topk=10,
temperature=0.2,
strides=(1, 2, 1, 1),
out_indices=(2, 3),
neighbor_range=24,
with_first=True,
with_first_neighbor=True,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=2, frame_interval=8, num_clips=1),
# dict(type='DuplicateFrames', times=2),
dict(type='DecordDecode'),
# dict(
# type='RandomResizedCrop',
# area_range=(0.2, 1.),
# same_across_clip=False,
# same_on_clip=False),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
# dict(
# type='Flip',
# flip_ratio=0.5,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='ColorJitter',
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.1,
# p=0.8,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGrayScale',
# p=0.2,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGaussianBlur',
# p=0.5,
# same_across_clip=False,
# same_on_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=128,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline)),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1,
metrics='davis',
key_indicator='feat_1.J&F-Mean',
rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['ssb'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 30.78453
| 78
| 0.61827
|
ebf5b5c7e4c142c242656d1f859b26eeb4ce462a
| 5,555
|
py
|
Python
|
GoogleTTS.py
|
zisti/79
|
775cce82c67bf45317f235f5dd2696203de3b21f
|
[
"MIT"
] | 110
|
2015-01-10T14:11:03.000Z
|
2022-03-25T14:53:56.000Z
|
GoogleTTS.py
|
zisti/79
|
775cce82c67bf45317f235f5dd2696203de3b21f
|
[
"MIT"
] | 4
|
2015-02-08T21:55:49.000Z
|
2018-04-25T18:51:52.000Z
|
GoogleTTS.py
|
zisti/79
|
775cce82c67bf45317f235f5dd2696203de3b21f
|
[
"MIT"
] | 44
|
2015-01-04T23:18:25.000Z
|
2022-03-25T14:53:58.000Z
|
#!/usr/bin/python
import sys
import argparse
import re
import urllib, urllib2
import time
from collections import namedtuple
def split_text(input_text, max_length=100):
"""
Try to split between sentences to avoid interruptions mid-sentence.
Failing that, split between words.
See split_text_rec
"""
def split_text_rec(input_text, regexps, max_length=max_length):
"""
Split a string into substrings which are at most max_length.
Tries to make each substring as big as possible without exceeding
max_length.
Will use the first regexp in regexps to split the input into
substrings.
If it it impossible to make all the segments less or equal than
max_length with a regexp then the next regexp in regexps will be used
to split those into subsegments.
If there are still substrings who are too big after all regexps have
been used then the substrings, those will be split at max_length.
Args:
input_text: The text to split.
regexps: A list of regexps.
If you want the separator to be included in the substrings you
can add parenthesis around the regular expression to create a
group. Eg.: '[ab]' -> '([ab])'
Returns:
a list of strings of maximum max_length length.
"""
if(len(input_text) <= max_length): return [input_text]
#mistakenly passed a string instead of a list
if isinstance(regexps, basestring): regexps = [regexps]
regexp = regexps.pop(0) if regexps else '(.{%d})' % max_length
text_list = re.split(regexp, input_text)
combined_text = []
#first segment could be >max_length
combined_text.extend(split_text_rec(text_list.pop(0), regexps, max_length))
for val in text_list:
current = combined_text.pop()
concat = current + val
if(len(concat) <= max_length):
combined_text.append(concat)
else:
combined_text.append(current)
#val could be >max_length
combined_text.extend(split_text_rec(val, regexps, max_length))
return combined_text
return split_text_rec(input_text.replace('\n', ''),
['([\,|\.|;]+)', '( )'])
audio_args = namedtuple('audio_args',['language','output'])
def audio_extract(input_text='',args=None):
# This accepts :
# a dict,
# an audio_args named tuple
# or arg parse object
if args is None:
args = audio_args(language='en',output=open('output.mp3', 'w'))
if type(args) is dict:
args = audio_args(
language=args.get('language','en'),
output=open(args.get('output','output.mp3'), 'w')
)
#process input_text into chunks
#Google TTS only accepts up to (and including) 100 characters long texts.
#Split the text in segments of maximum 100 characters long.
combined_text = split_text(input_text)
#download chunks and write them to the output file
for idx, val in enumerate(combined_text):
mp3url = "http://translate.google.com/translate_tts?tl=%s&q=%s&total=%s&idx=%s" % (
args.language,
urllib.quote(val),
len(combined_text),
idx)
headers = {"Host": "translate.google.com",
"Referer": "http://www.gstatic.com/translate/sound_player2.swf",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) "
"AppleWebKit/535.19 (KHTML, like Gecko) "
"Chrome/18.0.1025.163 Safari/535.19"
}
req = urllib2.Request(mp3url, '', headers)
sys.stdout.write('.')
sys.stdout.flush()
if len(val) > 0:
try:
response = urllib2.urlopen(req)
args.output.write(response.read())
time.sleep(.5)
except urllib2.URLError as e:
print ('%s' % e)
args.output.close()
print('Saved MP3 to %s' % args.output.name)
def text_to_speech_mp3_argparse():
description = 'Google TTS Downloader.'
parser = argparse.ArgumentParser(description=description,
epilog='tunnel snakes rule')
parser.add_argument('-o', '--output',
action='store', nargs='?',
help='Filename to output audio to',
type=argparse.FileType('wb'), default='out.mp3')
parser.add_argument('-l', '--language',
action='store',
nargs='?',
help='Language to output text to.', default='en')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-f', '--file',
type=argparse.FileType('r'),
help='File to read text from.')
group.add_argument('-s', '--string',
action='store',
nargs='+',
help='A string of text to convert to speech.')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
if __name__ == "__main__":
args = text_to_speech_mp3_argparse()
if args.file:
input_text = args.file.read()
if args.string:
input_text = ' '.join(map(str, args.string))
audio_extract(input_text=input_text, args=args)
| 38.576389
| 91
| 0.578398
|
4dc558e78479f2fc07c46b49a21c0c61024a7498
| 397
|
py
|
Python
|
DjangoKoan/wsgi.py
|
sarvex/DjangoKoans
|
a4ba1e787dc508d1706897f26d3e91219ad5d982
|
[
"0BSD"
] | null | null | null |
DjangoKoan/wsgi.py
|
sarvex/DjangoKoans
|
a4ba1e787dc508d1706897f26d3e91219ad5d982
|
[
"0BSD"
] | null | null | null |
DjangoKoan/wsgi.py
|
sarvex/DjangoKoans
|
a4ba1e787dc508d1706897f26d3e91219ad5d982
|
[
"0BSD"
] | null | null | null |
"""
WSGI config for DjangoKoan project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoKoan.settings")
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
ac66e9927f71bc58680e077b0dd19f9c2c6ce41a
| 789
|
py
|
Python
|
src/main.py
|
lukefav/Reinforcement-Learning-Sandbox
|
0f705e9f0d663594c7169839c55fd8e6b5d01c7b
|
[
"MIT"
] | null | null | null |
src/main.py
|
lukefav/Reinforcement-Learning-Sandbox
|
0f705e9f0d663594c7169839c55fd8e6b5d01c7b
|
[
"MIT"
] | null | null | null |
src/main.py
|
lukefav/Reinforcement-Learning-Sandbox
|
0f705e9f0d663594c7169839c55fd8e6b5d01c7b
|
[
"MIT"
] | null | null | null |
import argparse
from argparse import RawTextHelpFormatter
from q_learning.q_learning import QLearning
def main():
help_description = "Choose Project:\n" \
"0: QLearning"
parser = argparse.ArgumentParser(description="Choose Reinforcement Learning Project",
formatter_class=RawTextHelpFormatter)
parser.add_argument("project", metavar='p', type=int,
help=help_description)
project_num = parser.parse_args().project
if project_num == 0:
print("QLearning Loading...")
project = QLearning()
print("Running Project.")
project.run()
else:
print(f"Incorrect project number, you entered: {project_num}")
if __name__ == "__main__":
main()
| 27.206897
| 89
| 0.627376
|
045617c66a6763adac6a0a732fcb31d244e7bdfb
| 4,316
|
py
|
Python
|
PhysicsTools/SelectorUtils/python/VIDSelectorValidator.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
PhysicsTools/SelectorUtils/python/VIDSelectorValidator.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
PhysicsTools/SelectorUtils/python/VIDSelectorValidator.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from __future__ import print_function
import md5
import ROOT
# load FWLite C++ libraries
ROOT.gSystem.Load("libFWCoreFWLite.so")
ROOT.gSystem.Load("libDataFormatsFWLite.so")
ROOT.FWLiteEnabler.enable()
#cms python data types
import FWCore.ParameterSet.Config as cms
# load FWlite python libraries
from DataFormats.FWLite import Handle, Events
#hasher= md5.new()
#
#hasher.update('hello world')
#
#print hasher.digest()
#print hasher.hexdigest()
class VIDSelectorValidator:
def __init__(self, selector, collection_type, collection_name):
self.__hasher = md5.new()
self.__selector = selector
self.__colltype = collection_type
self.__collname = collection_name
self.__signalfiles = []
self.__backgroundfiles = []
self.__mixfiles = []
def setSignalFiles(self, files):
if not isinstance(files,list):
raise Exception('BadFileInput','You need to give "setSignalFiles" a list of strings')
self.__signalfiles = files[:]
def setBackgroundFiles(self, files):
if not isinstance(files,list):
raise Exception('BadFileInput','You need to give "setBackgroundFiles" a list of strings')
self.__backgroundfiles = files[:]
def setMixFiles(self, files):
if not isinstance(files,list):
raise Exception('BadFileInput','You need to give "setMixFiles" a list of strings')
self.__mixfiles = files[:]
def runValidation(self):
samples = {}
samples['signal'] = self.__signalfiles
samples['background'] = self.__backgroundfiles
samples['mix'] = self.__mixfiles
select = self.__selector
print('running validation for: %s'%(select.name()))
# checksum of the input files
if not len(samples['signal'] + samples['background'] + samples['mix']):
raise Exception('NoInputFiles','There were no input files given, cannot validate!')
for key in sorted(samples.keys()):
self.processInputList(samples[key],key)
print('input files checksum: %s'%(self.__hasher.hexdigest()))
for key in sorted(samples.keys()):
if len(samples[key]):
local_hash = md5.new()
self.processEvents(samples[key],key,local_hash)
self.__hasher.update(local_hash.hexdigest())
print('event processing checksum: %s'%(self.__hasher.hexdigest()))
self.__hasher.update(select.md5String())
print('total checksum: %s'%(self.__hasher.hexdigest()))
def processInputList(self,the_list,name):
for item in the_list:
self.__hasher.update(item)
print('Input %s file: %s'%(name,item))
def processEvents(self,the_list,name,hasher):
#data products
handle, productLabel = Handle(self.__colltype), self.__collname
#now loop over the events in each category
events = Events(the_list)
n_pass, n_fail = 0,0
sub_cutnames = []
sub_hashes = []
for idstring in repr(self.__selector).split('\n'):
if idstring == '': continue
sub_cutnames.append(idstring.split()[2]) # gets the cutname
sub_hashes.append(md5.new(idstring))
for event in events:
event.getByLabel(productLabel,handle)
for i,obj in enumerate(handle.product()):
if self.__selector(handle.product(),i,event):
n_pass += 1
else:
n_fail += 1
icut = 0
for idstring in repr(self.__selector).split('\n'):
if idstring == '': continue
sub_hashes[icut].update(idstring)
icut += 1
for sub_hash in sub_hashes:
hasher.update(sub_hash.hexdigest())
hasher.update(str(n_pass))
hasher.update(str(n_fail))
print('%s sample pass : fail : hash -> %d : %d : %s'%(name,n_pass,n_fail,hasher.hexdigest()))
print('%s sample cut breakdown:'%(name))
for i,sub_hash in enumerate(sub_hashes):
print('\t%s hash -> %s'%(sub_cutnames[i],sub_hash.hexdigest()))
| 35.669421
| 101
| 0.596386
|
71dc823685969f017e26e36c4383e28ba3f8196e
| 2,172
|
py
|
Python
|
nstcentertainmentbot/helpers/database/spotify_sql.py
|
TharukRenuja/NSTC-ENTERTAINMENT-BOT
|
683bf584fa8c87c7249ea462fbffb5b0879b8633
|
[
"MIT"
] | 1
|
2021-10-04T07:48:47.000Z
|
2021-10-04T07:48:47.000Z
|
nstcentertainmentbot/helpers/database/spotify_sql.py
|
TharukRenuja/NSTC-ENTERTAINMENT-BOT
|
683bf584fa8c87c7249ea462fbffb5b0879b8633
|
[
"MIT"
] | null | null | null |
nstcentertainmentbot/helpers/database/spotify_sql.py
|
TharukRenuja/NSTC-ENTERTAINMENT-BOT
|
683bf584fa8c87c7249ea462fbffb5b0879b8633
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
# Copyright (c) 2021 Tharuk , This is a part of nstcentertainmentbot
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
import threading
from sqlalchemy import Column, Integer, UnicodeText
from nstcentertainmentbot.helpers.database import SESSION, BASE
class SpotifyCreds(BASE):
__tablename__ = "spotifycreds"
user_id = Column(Integer, primary_key=True)
spotify_id = Column(UnicodeText)
spotify_access_token = Column(UnicodeText)
spotify_refresh_token = Column(UnicodeText)
def __init__(
self,
user_id,
spotify_id=None,
spotify_access_token=None,
spotify_refresh_token=None,
):
self.user_id = user_id
self.spotify_id = spotify_id
self.spotify_access_token = spotify_access_token
self.spotify_refresh_token = spotify_refresh_token
SpotifyCreds.__table__.create(checkfirst=True)
SPT_INSERTION_LOCK = threading.RLock()
def update_creds(
user_id, spotify_id=None, spotify_access_token=None, spotify_refresh_token=None
):
with SPT_INSERTION_LOCK:
sptcreds = SESSION.query(SpotifyCreds).get(user_id)
if not sptcreds:
sptcreds = SpotifyCreds(
user_id, spotify_id, spotify_access_token, spotify_refresh_token
)
SESSION.add(sptcreds)
SESSION.flush()
else:
sptcreds.spotify_id = spotify_id
sptcreds.spotify_access_token = spotify_access_token
sptcreds.spotify_refresh_token = spotify_refresh_token
SESSION.commit()
def get_sptuser(user_id):
try:
return SESSION.query(SpotifyCreds).get(user_id)
finally:
SESSION.close()
| 31.478261
| 83
| 0.710866
|
49d689e7329299832a2abc18d9c46a87998afd36
| 3,966
|
py
|
Python
|
nodes/skadilance.py
|
fryougi/farmbot
|
739511965cdbcba444fd39235da2dbed84809cba
|
[
"MIT"
] | 1
|
2020-07-02T18:06:41.000Z
|
2020-07-02T18:06:41.000Z
|
nodes/skadilance.py
|
fryougi/farmbot
|
739511965cdbcba444fd39235da2dbed84809cba
|
[
"MIT"
] | null | null | null |
nodes/skadilance.py
|
fryougi/farmbot
|
739511965cdbcba444fd39235da2dbed84809cba
|
[
"MIT"
] | 1
|
2020-06-29T04:18:42.000Z
|
2020-06-29T04:18:42.000Z
|
# -*- coding: utf-8 -*-
"""
LSS (2k4)
Lancelot
YourOwnSkader
SupportSkader
"""
# Adding to the system path is needed
# because no longer in parent directory
# and I want to run this file as a script
import sys, os
sys.path.append(os.path.abspath('../'))
import farmbot as fb
class Farmer_DSS(fb.Farmbot):
def __init__(self):
fb.Farmbot.__init__(self,'blue','../')
def wave1(self):
res = self.advancestart()
if res < 0:
return -1
# Skills selection (may be empty)
res = self.useskill(self.xy_skillb1)
if res < 0:
return -1
res = self.seltarget(self.xy_targeta)
if res < 0:
return -1
res = self.useskill(self.xy_skillc1)
if res < 0:
return -1
res = self.seltarget(self.xy_targeta)
if res < 0:
return -1
res = self.usemcskill(self.xy_mcskill3)
if res < 0:
return -1
res = self.seltarget(self.xy_targeta)
if res < 0:
return -1
# Attack
res = self.attack()
if res < 0:
return -1
# Card selection (pick 3)
self.usecard(self.xy_npa)
self.usecard(self.xy_card2)
self.usecard(self.xy_card3)
return 0
def wave2(self):
res = self.advancewave()
if res < 0:
return -1
# Skills selection (may be empty)
res = self.useskill(self.xy_skilla3)
if res < 0:
return -1
res = self.useskill(self.xy_skillb2)
if res < 0:
return -1
res = self.useskill(self.xy_skillb3)
if res < 0:
return -1
res = self.seltarget(self.xy_targeta)
if res < 0:
return -1
# Attack
res = self.attack()
if res < 0:
return -1
# Card selection (pick 3)
self.usecard(self.xy_npa)
self.usecard(self.xy_card2)
self.usecard(self.xy_card3)
return 0
def wave3(self):
res = self.advancewave()
if res < 0:
return -1
# Skills selection (may be empty)
#res = self.useskill(self.xy_skillb2)
#if res < 0:
# return -1
res = self.useskill(self.xy_skillc2)
if res < 0:
return -1
res = self.useskill(self.xy_skillc3)
if res < 0:
return -1
res = self.seltarget(self.xy_targeta)
if res < 0:
return -1
res = self.usemcskill(self.xy_mcskill1)
if res < 0:
return -1
res = self.seltarget(self.xy_targeta)
if res < 0:
return -1
# Attack
res = self.attack()
if res < 0:
return -1
# Card selection (pick 3)
self.usecard(self.xy_npa)
self.usecard(self.xy_card2)
self.usecard(self.xy_card3)
# Potential cleanup
res = self.cardcleanup(3)
if res < 0:
return -1
return 0
def farm(self,nruns=1):
self.runs = 0
self.refills = 0
self.refilltype = 'gapple' # [rapple,gapple,sapple,bapple]
self.supportce = 'first' # [lunchtime,training,lesson,monalisa,eventspecific]
self.supportservant = 'skadi' # [waver,skadi]
self.saveframe = False
while True:
# Start quest (set it up for the farmer)
# Repeat quest no longer uses the party screen
# Battle procedure Wave1
res = self.wave1()
if res < 0:
return -1
# Battle prodedure Wave2
res = self.wave2()
if res < 0:
return -1
# Battle prodedure Wave3
res = self.wave3()
if res < 0:
return -1
# Finished run
res = self.finishbattle()
if res < 0:
return -1
self.runs += 1
# Exit out to main menu if finished
if self.runs >= nruns:
res = self.norepeatquest()
break
# Repeat quest if not done (automatic refills)
res = self.repeatquestrefill()
if res < 0:
return -1
# Select new support
res = self.selectsupport()
if res < 0:
return -1
return self.runs
def farmalarm(self, nruns=1):
res = self.farm(nruns)
print(res)
self.playalarm()
return
if __name__ == "__main__":
farmer = Farmer_DSS()
farmer.activate()
| 23.607143
| 81
| 0.590267
|
0e8fe5ba20fe2167d28a9366dab7c347244b12a8
| 2,487
|
py
|
Python
|
deepdrive_zero/experiments/intersection_2_agents_fine_tune_collision_resume_add_comfort1.py
|
shantanuwadnerkar/deepdrive-zero
|
3134a5b092a53ff60e4207d7419fd6a19cb5a6e9
|
[
"MIT"
] | null | null | null |
deepdrive_zero/experiments/intersection_2_agents_fine_tune_collision_resume_add_comfort1.py
|
shantanuwadnerkar/deepdrive-zero
|
3134a5b092a53ff60e4207d7419fd6a19cb5a6e9
|
[
"MIT"
] | null | null | null |
deepdrive_zero/experiments/intersection_2_agents_fine_tune_collision_resume_add_comfort1.py
|
shantanuwadnerkar/deepdrive-zero
|
3134a5b092a53ff60e4207d7419fd6a19cb5a6e9
|
[
"MIT"
] | null | null | null |
import os
import sys
from deepdrive_zero.experiments import utils
from spinup.utils.run_utils import ExperimentGrid
from spinup import ppo_pytorch
import torch
experiment_name = os.path.basename(__file__)[:-3]
notes = """Previously, we diverged by forgetting how to avoid collisions. However,
g-force/jerk did fall and episode return did rise nicely throughout, so reward
did not match well with desired perf (i.e. not to collide). Going to resume from
the best trip pct model (before divergence of
intersection_2_agents_fine_tune_collision_resume_add_comfort.py) and lower
the jerk and gforce penalties. Also going to drop the learning rate and
increase steps per epoch to curtail forgetting. Also we were underestimating
jerk by 60x and it was already 10x higher on average than g-force, so this
is a crazy amount of jerk #1, but #2 we need to decrease jerk penalty by a
lot to not dominate reward."""
env_config = dict(
env_name='deepdrive-2d-intersection-w-gs-allow-decel-v0',
is_intersection_map=True,
expect_normalized_action_deltas=False,
jerk_penalty_coeff=0.10 / (60*10), # dropped from 0.10, see notes
gforce_penalty_coeff=0.031,
collision_penalty_coeff=1,
end_on_harmful_gs=False,
incent_win=True,
constrain_controls=False,
)
net_config = dict(
hidden_units=(256, 256),
activation=torch.nn.Tanh
)
eg = ExperimentGrid(name=experiment_name)
eg.add('env_name', env_config['env_name'], '', False)
# eg.add('seed', 0)
eg.add('resume', '/home/c2/src/tmp/spinningup/data/intersection_2_agents_fine_tune_collision_resume_add_comfort/intersection_2_agents_fine_tune_collision_resume_add_comfort_s0_2020_03-13_15-24.07/best_trip_pct/2020_03-13_16-18.59')
eg.add('reinitialize_optimizer_on_resume', True)
eg.add('pi_lr', 3e-6) # doesn't seem to have an effect, but playing it safe and lowering learning rate since we're not restoring adam rates
eg.add('vf_lr', 1e-5) # doesn't seem to have an effect, but playing it safe and lowering learning rate since we're not restoring adam rates
eg.add('epochs', 8000)
eg.add('steps_per_epoch', 8000)
eg.add('ac_kwargs:hidden_sizes', net_config['hidden_units'], 'hid')
eg.add('ac_kwargs:activation', net_config['activation'], '')
eg.add('notes', notes, '')
eg.add('run_filename', os.path.realpath(__file__), '')
eg.add('env_config', env_config, '')
def train():
eg.run(ppo_pytorch)
if __name__ == '__main__':
utils.run(train_fn=train, env_config=env_config, net_config=net_config)
| 42.87931
| 231
| 0.7692
|
eec9d821076d4e657808a530fe841598fb6097ee
| 3,883
|
py
|
Python
|
datacube/ui/common.py
|
Kirill888/datacube-core
|
996b395e15f975decb77c0ca9fa0555177674b2f
|
[
"Apache-2.0"
] | 2
|
2019-10-24T15:29:54.000Z
|
2019-10-24T15:29:58.000Z
|
datacube/ui/common.py
|
Kirill888/datacube-core
|
996b395e15f975decb77c0ca9fa0555177674b2f
|
[
"Apache-2.0"
] | 2
|
2021-03-26T00:37:36.000Z
|
2021-03-31T20:05:01.000Z
|
datacube/ui/common.py
|
Kirill888/datacube-core
|
996b395e15f975decb77c0ca9fa0555177674b2f
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
"""
Common methods for UI code.
"""
from pathlib import Path
from typing import Union
from toolz.functoolz import identity
from datacube.utils import read_documents, InvalidDocException, SimpleDocNav, is_supported_document_type, is_url
def get_metadata_path(possible_path: Union[str, Path]):
"""
Find a metadata path for a given input/dataset path.
Needs to handle local files as well as remote URLs
:rtype: str
"""
# We require exact URLs, lets skip any sort of fancy investigation and mapping
if isinstance(possible_path, str) and is_url(possible_path):
return possible_path
dataset_path = Path(possible_path)
# They may have given us a metadata file directly.
if dataset_path.is_file() and is_supported_document_type(dataset_path):
return dataset_path
# Otherwise there may be a sibling file with appended suffix '.agdc-md.yaml'.
expected_name = dataset_path.parent.joinpath('{}.agdc-md'.format(dataset_path.name))
found = _find_any_metadata_suffix(expected_name)
if found:
return found
# Otherwise if it's a directory, there may be an 'agdc-metadata.yaml' file describing all contained datasets.
if dataset_path.is_dir():
expected_name = dataset_path.joinpath('agdc-metadata')
found = _find_any_metadata_suffix(expected_name)
if found:
return found
raise ValueError('No metadata found for input %r' % dataset_path)
def _find_any_metadata_suffix(path):
"""
Find any supported metadata files that exist with the given file path stem.
(supported suffixes are tried on the name)
Eg. searching for '/tmp/ga-metadata' will find if any files such as '/tmp/ga-metadata.yaml' or
'/tmp/ga-metadata.json', or '/tmp/ga-metadata.yaml.gz' etc that exist: any suffix supported by read_documents()
:type path: pathlib.Path
"""
existing_paths = list(filter(is_supported_document_type, path.parent.glob(path.name + '*')))
if not existing_paths:
return None
if len(existing_paths) > 1:
raise ValueError('Multiple matched metadata files: {!r}'.format(existing_paths))
return existing_paths[0]
def ui_path_doc_stream(paths, logger=None, uri=True, raw=False):
"""Given a stream of URLs, or Paths that could be directories, generate a stream of
(path, doc) tuples.
For every path:
1. If directory find the metadata file or log error if not found
2. Load all documents from that path and return one at a time (parsing
errors are logged, but processing should continue)
:param paths: Filesystem paths
:param logger: Logger to use to report errors
:param uri: If True return path in uri format, else return it as filesystem path
:param raw: By default docs are wrapped in :class:`SimpleDocNav`, but you can
instead request them to be raw dictionaries
"""
def on_error1(p, e):
if logger is not None:
logger.error('No supported metadata docs found for dataset %s', str(p))
def on_error2(p, e):
if logger is not None:
logger.error('Failed reading documents from %s', str(p))
yield from _path_doc_stream(_resolve_doc_files(paths, on_error=on_error1),
on_error=on_error2, uri=uri, raw=raw)
def _resolve_doc_files(paths, on_error):
for p in paths:
try:
yield get_metadata_path(p)
except ValueError as e:
on_error(p, e)
def _path_doc_stream(files, on_error, uri=True, raw=False):
"""See :func:`ui_path_doc_stream` for documentation"""
maybe_wrap = identity if raw else SimpleDocNav
for fname in files:
try:
for p, doc in read_documents(fname, uri=uri):
yield p, maybe_wrap(doc)
except InvalidDocException as e:
on_error(fname, e)
| 32.630252
| 115
| 0.68993
|
56f5dbc9ef3ce178368b9a59a6cb1a180da3b181
| 5,003
|
py
|
Python
|
datamax_printer/datamax_printer.py
|
inNETMonitoring/datamax-python-adapter
|
ca47992a3a1f30577a90e6ea01d5f0aae79f8dfb
|
[
"MIT"
] | 1
|
2019-05-20T18:48:16.000Z
|
2019-05-20T18:48:16.000Z
|
datamax_printer/datamax_printer.py
|
inNETMonitoring/datamax-python-adapter
|
ca47992a3a1f30577a90e6ea01d5f0aae79f8dfb
|
[
"MIT"
] | 2
|
2019-06-19T09:36:23.000Z
|
2021-10-20T22:33:49.000Z
|
datamax_printer/datamax_printer.py
|
inNETMonitoring/datamax-python-adapter
|
ca47992a3a1f30577a90e6ea01d5f0aae79f8dfb
|
[
"MIT"
] | 3
|
2019-06-18T12:34:19.000Z
|
2021-03-17T12:57:39.000Z
|
import socket
class DPLPrinter:
SOH = '\x01'
STX = '\x02'
command_mode = True
def __init__(self, printer_ip, printer_port=9100):
self.printer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection_info = (printer_ip, printer_port)
self.printer.connect(self.connection_info)
def __send_to_printer(self, command: str):
print('Sent: ' + command)
return self.printer.send(command.encode('ASCII'))
def __adjust_number_length(self, value: str, length: int):
while len(value) < length:
value = '0' + value
return value
def start_document(self):
"""
Sets the printer into label formating mode. Call this function before using set_label() or set_qr_code()
:return: True if successful, False otherwise
"""
if not self.command_mode:
raise RuntimeError('Already in label formatting mode')
success = False
if self.command_mode and self.__send_to_printer(f'{self.STX}L') == 2:
self.__send_to_printer('D11\x0D')
self.command_mode = False
success = True
return success
def configure(self, border_bottom=0, imperial=False):
"""
:param border_bottom: The distance (in 0.1mm) from the bottom for labels. This value will be added to the
y-coordinate every time you specify a label.
If a value bellow 50 is passed, it is reset to the default value (the default values can be found in the DPL
Guide).
:param imperial: For those who like no SI measurements it is possible to set the printer to imperial mode.
If this flag is true, al distances are passed in inches/100.
:return:
"""
if not self.command_mode:
raise RuntimeError('Cannot configure printer label formatting mode')
if imperial:
self.__send_to_printer(f'{self.STX}n')
else:
self.__send_to_printer(f'{self.STX}m')
sop = str(border_bottom)
while len(sop) < 4:
sop = '0' + sop
self.__send_to_printer(f'{self.STX}O{sop}')
def set_label(self, x_pos, y_pos, text, font_id, font_size, rotation=0):
"""
:param x_pos: Position of the text label on the X-Axis (in 0.1mm)
:param y_pos: Position of the text label on the Y-Axis (in 0.1mm)
:param text: The text to print
:param font_id: The font ID (1 - 9). Please refer to the DPL Manual Appendix C for examples.
:param font_size: If you are a monospaced font (1-8) this value is a tuple containig the factors to multiply
width and the height (width_multiplier, height_multiplier). Values 1-9 are supported.
In case you use the CG Triumvirate font (ID = 9) this value is the font size in pt.
:return: Number of bytes sent to the printer
"""
if self.command_mode:
raise RuntimeError('Cannot print label in command mode')
rot_value = 1 # default = no rotation
if rotation == 90:
rot_value = 2
elif rotation == 180:
rot_value = 3
elif rotation == 270:
rot_value = 4
# Adjust lengths
x_pos = self.__adjust_number_length(str(x_pos), 4)
y_pos = self.__adjust_number_length(str(y_pos), 4)
size = '000'
width_multiplier = 1
height_multiplier = 1
if font_id == 9:
size = 'A' + self.__adjust_number_length(str(font_size), 2)
else:
if len(font_size) == 2:
width_multiplier = font_size[0]
height_multiplier = font_size[1]
data = str(rot_value) + str(font_id) + str(width_multiplier) + str(height_multiplier) + size + y_pos + x_pos + \
text + '\x0D'
return self.__send_to_printer(data)
def set_qr_code(self, x_pos, y_pos, data, size=8):
"""
Generates a QR-Code.
:param x_pos: Position of the QR-Code on the X-Axis (in 0.1mm)
:param y_pos: Position of the QR-Code on the Y-Axis (in 0.1mm)
:param data: Data to be encoded in the QR-Code.
(Numeric Data, Alphanumeric Data, 8-bit byte data or Kanji characters)
:param size: Size of 1 dot in QR-Code (in 0.1mm) (1-37)
:return: Number of bytes sent to the printer
"""
if self.command_mode:
raise RuntimeError('Cannot print qr-code in command mode')
x_pos = str(x_pos)
while len(x_pos) < 4:
x_pos = '0' + x_pos
y_pos = str(y_pos)
while len(y_pos) < 4:
y_pos = '0' + y_pos
if size > 9:
size = chr(ord('A') + (size - 10))
command = f'1W1d{size}{size}000{y_pos}{x_pos}{data}\x0D\x0D'
return self.__send_to_printer(command)
def print(self):
self.__send_to_printer('E')
self.command_mode = True # After sending E, the printer switches bach to command mode
| 38.782946
| 120
| 0.608035
|
e0ea5ef69acc96ddbf81d992d4bd0d6e4a0f3c53
| 1,427
|
py
|
Python
|
python/misc/collatz_conjecture.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
python/misc/collatz_conjecture.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | 2
|
2022-03-10T03:49:14.000Z
|
2022-03-14T00:49:54.000Z
|
python/misc/collatz_conjecture.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Collatz Conjecture.
A Collatz sequence is generated like this. Start with a positive number.
If it's even, halve it. If it's odd, multiply it by 3 and add one.
Repeat the process with the resulting number. The Collatz Conjecture is
that every sequence eventually reaches 1 (continuing past 1 just results in
an endless repeat of the sequence (4, 2, 1)).
The length of the sequence from starting number to 1 varies widely.
Create a function that takes a number as an argument and returns a tuple of
two elements — the number of steps in the Collatz sequence of the number,
and the highest number reached.
Source:
https://edabit.com/challenge/Z8REdTE5P57f4q7dK
"""
def collatz_sequence(n: int):
"""Find the Collatz sequence of n."""
CACHE = {1: [1]}
if n in CACHE:
return CACHE[n]
next_ = int(n // 2) if n % 2 == 0 else int(3 * n + 1)
CACHE[n] = [n] + collatz_sequence(next_)
return CACHE[n]
def collatz(n: int):
"""Take number n and calculate steps and highest Collatz sequence."""
sequence = collatz_sequence(n)
return (len(sequence), max(sequence),)
def main():
"""Run sample functions. Do not import."""
assert collatz(3) == (8, 16)
assert collatz(7) == (17, 52)
assert collatz(17) == (13, 52)
assert collatz(42) == (9, 64)
assert collatz(33) == (27, 100)
print('Passed.')
if __name__ == "__main__":
main()
| 27.980392
| 75
| 0.673441
|
c3c3b36b48c0af9437451e9c88202a4b3f891459
| 1,712
|
py
|
Python
|
src/autoks/core/prior.py
|
lschlessinger1/MS-project
|
e1c02d1d1a7a2480ff6f14f30625dc42ee3417e3
|
[
"MIT"
] | 2
|
2019-04-29T15:18:11.000Z
|
2019-12-13T18:58:40.000Z
|
src/autoks/core/prior.py
|
lschlessinger1/MS-project
|
e1c02d1d1a7a2480ff6f14f30625dc42ee3417e3
|
[
"MIT"
] | 275
|
2019-02-19T22:59:39.000Z
|
2020-10-03T08:56:08.000Z
|
src/autoks/core/prior.py
|
lschlessinger1/MS-project
|
e1c02d1d1a7a2480ff6f14f30625dc42ee3417e3
|
[
"MIT"
] | null | null | null |
import importlib
from typing import Type
from src.autoks.backend.prior import RawPriorType, PRIOR_DICT
from src.evalg.serialization import Serializable
class PriorDist(Serializable):
"""Wrapper for backend prior."""
def __init__(self, raw_prior_cls: Type[RawPriorType], raw_prior_args: dict):
self._raw_prior_cls = raw_prior_cls
self._raw_prior_args = raw_prior_args
self.raw_prior = self._raw_prior_cls(**self._raw_prior_args)
@classmethod
def from_prior_str(cls, prior_name: str, raw_prior_args: dict):
return cls(PRIOR_DICT[prior_name], raw_prior_args)
def to_dict(self) -> dict:
"""Get a dictionary representation of the object.
This dict representation includes metadata such as the object's module and class name.
:return:
"""
input_dict = super().to_dict()
input_dict["raw_prior_cls"] = self.raw_prior.__class__.__name__
input_dict["raw_prior_module"] = self.raw_prior.__module__
input_dict["raw_prior_args"] = self._raw_prior_args
return input_dict
@classmethod
def _format_input_dict(cls, input_dict: dict) -> dict:
input_dict = super()._format_input_dict(input_dict)
class_name = input_dict.pop("raw_prior_cls")
module_name = input_dict.pop("raw_prior_module")
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
input_dict["raw_prior_cls"] = class_
return input_dict
def __eq__(self, other):
if isinstance(other, PriorDist):
return self._raw_prior_cls == other._raw_prior_cls and self._raw_prior_args == other._raw_prior_args
return False
| 35.666667
| 112
| 0.702687
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.