max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
core/views.py | jwestarb/bazar | 0 | 12758351 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from django.db.models import Avg, Max, Sum, Count, ProtectedError, DateTimeField
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models.functions import Trunc, ExtractHour
from django.utils import timezone
from .models import Cadastro, Recibo
from .forms import CadastroForm, ReciboForm
from .exportexcel import QuerysetToWorkbook
def index(request):
qtd_cadastro = Cadastro.objects.count()
total_vendas = Recibo.objects.all().aggregate(Sum('total'))
media_vendas = Recibo.objects.all().aggregate(Avg('total'))
hoje = timezone.now()
dt_inicio = hoje.replace(hour=0, minute=0, second=0, microsecond=0)
dt_final = hoje.replace(hour=23, minute=59, second=59, microsecond=0)
cad_por_hora = Cadastro.objects.filter(data_hora__range=(dt_inicio, dt_final)) \
.annotate(hora=ExtractHour('data_hora')) \
.values('hora') \
.order_by('hora') \
.annotate(qtd=Count('id')) \
.values('hora', 'qtd')
vendas_por_dia = Recibo.objects.annotate(dia=Trunc('data_hora', 'day', output_field=DateTimeField())) \
.values('dia') \
.order_by('dia') \
.annotate(qtd=Count('id')) \
.annotate(total=Sum('total')) \
.values('dia', 'qtd', 'total')
return render(request,
'index.html',
{
'qtd_cadastro': qtd_cadastro,
'total_vendas': total_vendas,
'media_vendas': media_vendas,
'vendas_por_dia': vendas_por_dia,
'cad_por_hora': cad_por_hora
})
def cadastro(request):
max_senha = Cadastro.objects.all().aggregate(Max('senha'))
if max_senha['senha__max']:
nova_senha = max_senha['senha__max'] + 1
else:
nova_senha = 1
if request.method == 'POST':
form = CadastroForm(request.POST)
if form.is_valid():
novo = form.save()
messages.success(request, 'Cadastro adicionado com sucesso.')
return HttpResponseRedirect(reverse('cadastro'))
else:
form = CadastroForm()
cadastro_lista = Cadastro.objects.order_by('-senha')
paginator = Paginator(cadastro_lista, 25)
page = request.GET.get('page')
cadastros = paginator.get_page(page)
return render(request,
'cadastro.html',
{
'cadastros': cadastros,
'form': form,
'nova_senha': nova_senha
}
)
def cadastro_delete(request, cadastro_id):
try:
cadastro = Cadastro.objects.get(pk=cadastro_id)
try:
cadastro.delete()
except ProtectedError:
messages.warning(request, 'Cadastro {} já possui recibo e não pode ser deletado.'.format(cadastro_id))
return HttpResponseRedirect(reverse('cadastro'))
messages.success(request, 'Cadastro {} deletado com sucesso.'.format(cadastro_id))
return HttpResponseRedirect(reverse('cadastro'))
except Cadastro.DoesNotExist:
messages.warning(request, 'Cadastro {} não encontrado.'.format(cadastro_id))
return HttpResponseRedirect(reverse('cadastro'))
def recibo_lista(request):
recibos = Recibo.objects.order_by('-id')[:5]
return render(request,
'recibo_lista.html',
{
'recibos': recibos
})
def recibo_novo(request, senha):
try:
cadastro = Cadastro.objects.get(senha=senha)
except Cadastro.DoesNotExist:
messages.warning(request, 'Cadastro com a senha {} não encontrado.'.format(senha))
return HttpResponseRedirect(reverse('recibo_lista'))
existe_rec = Recibo.objects.filter(cadastro=cadastro)
if len(existe_rec) > 0:
messages.warning(request, 'Cadastro com a senha {} já possui o recibo {}.'.format(senha, existe_rec[0].id))
return HttpResponseRedirect(reverse('recibo_lista'))
soma_compras = Recibo.objects.filter(cadastro__cpf=cadastro.cpf).aggregate(total_compras=Sum('total'))
if soma_compras['total_compras']:
if soma_compras['total_compras'] > 700:
cor_alerta = 'red'
else:
cor_alerta = 'black'
else:
cor_alerta = 'black'
if request.method == 'POST':
form = ReciboForm(request.POST)
if form.is_valid():
novo = form.save(commit=False)
novo.cadastro = cadastro
novo.total = novo.brinquedo_vl+novo.bazar_vl+novo.eletro_vl+novo.relogio_vl+novo.musical_vl+novo.vestuario_vl+novo.perfume_vl
novo.save()
messages.success(request, 'Recibo adicionado com sucesso.')
return HttpResponseRedirect(reverse('recibo_lista'))
else:
form = ReciboForm()
return render(request,
'recibo_novo.html',
{
'form': form,
'cadastro': cadastro,
'soma_compras': soma_compras,
'cor_alerta': cor_alerta
}
)
def recibo_delete(request, recibo_id):
try:
recibo = Recibo.objects.get(pk=recibo_id)
recibo.delete()
messages.success(request, 'Recibo {} deletado com sucesso.'.format(recibo_id))
return HttpResponseRedirect(reverse('recibo_lista'))
except Cadastro.DoesNotExist:
messages.warning(request, 'Recibo {} não encontrado.'.format(recibo_id))
return HttpResponseRedirect(reverse('recibo_lista'))
def recibo_imprimir(request, recibo_id):
try:
recibo = Recibo.objects.get(pk=recibo_id)
except Recibo.DoesNotExist:
messages.warning(request, 'Recibo {} não encontrado.'.format(recibo_id))
return HttpResponseRedirect(reverse('recibo_lista'))
return render(request,
'recibo_imprimir.html',
{
'rec': recibo
})
def export_excel(request):
qs = Recibo.objects.all()
columns = [
("Recibo", 10, 'id'),
("Data/Hora", 20, 'data_hora'),
("Senha", 10, 'cadastro.senha'),
("CPF", 20, 'cadastro.cpf'),
("Nome", 35, 'cadastro.nome'),
("E-mail", 30, 'cadastro.email'),
("Qt Brinquedo", 10, 'brinquedo_qt'),
("Vl Brinquedo", 10, 'brinquedo_vl'),
("Qt Bazar", 10, 'bazar_qt'),
("Vl Bazar", 10, 'bazar_vl'),
("Qt Eletro", 10, 'eletro_qt'),
("Vl Eletro", 10, 'eletro_vl'),
("Qt Relogio", 10, 'relogio_qt'),
("Vl Relogio", 10, 'relogio_vl'),
("Qt Musical", 10, 'musical_qt'),
("Vl Musical", 10, 'musical_vl'),
("Qt Vestuario", 10, 'vestuario_qt'),
("Vl Vestuario", 10, 'vestuario_vl'),
("Qt Perfume", 10, 'perfume_qt'),
("Vl Perfume", 10, 'perfume_vl'),
("Vl Total", 10, 'total')
]
qtw = QuerysetToWorkbook(qs, columns, filename='Recibos')
qtw.build_workbook()
return qtw.response()
| 1.921875 | 2 |
setup.py | karlicoss/google_takeout_parser | 10 | 12758352 | from pathlib import Path
from setuptools import setup, find_packages
long_description = Path("README.md").read_text()
reqs = Path("requirements.txt").read_text().strip().splitlines()
pkg = "google_takeout_parser"
setup(
name=pkg,
version="0.1.0",
url="https://github.com/seanbreckenridge/google_takeout_parser",
author="<NAME>",
author_email="<EMAIL>",
description=(
"""Parses data out of your Google Takeout (History, Activity, Youtube, Locations, etc...)"""
),
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
packages=find_packages(
include=["google_takeout_parser", "google_takeout_parser.parse_html"]
),
install_requires=reqs,
package_data={pkg: ["py.typed"]},
zip_safe=False,
keywords="google data parsing",
python_requires=">=3.7",
entry_points={
"console_scripts": [
"google_takeout_parser = google_takeout_parser.__main__:main"
]
},
extras_require={
"testing": [
"pytest",
"mypy",
"flake8",
],
':python_version<"3.7"': [
"typing_extensions",
],
},
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)
| 1.632813 | 2 |
tankonyv/namedtuple_hasznalat.py | bozi6/hello-world | 0 | 12758353 | <filename>tankonyv/namedtuple_hasznalat.py
import collections
Employee = collections.namedtuple('Emplyee', ['name', 'age', 'designation'])
E = Employee('Alison', '30', 'Software Engineer')
E1 = ['Tom', '39', 'Sales manager']
Ed = {'name': 'Bob', 'age': 30, 'designation': 'Manager'}
print('The demosntration for using namedtuple as iterable is : ')
print(Employee._make(E1))
print('\n')
print("The demonstration of OrderedDict instance using namedtuple is : ")
print(E._asdict())
print("\n")
print("The demonstration of converstion of namedtuple instance to dict is :")
print(Employee(**Ed))
print("\n")
print("All the fields of Employee are :")
print(E._fields)
print("\n")
print("The demonstration of replace() that modifies namedtuple is : ")
print(E._replace(name='Bob'))
| 3.734375 | 4 |
server/urls.py | eddowh/flask-react-gae-multiuser-blog | 1 | 12758354 | # -*- coding: utf-8 -*-
from settings import ROOT_URL
import blogs.resources
import users.resources
def get_user_uri(username):
return ROOT_URL + users.resources.api.url_for(
users.resources.UserAPI, username=username
)
def get_user_blogpost_reaction_uri(username, post_id, reaction_id):
return ROOT_URL + blogs.resources.api.url_for(
blogs.resources.UserBlogPostReactionAPI,
username=username, post_id=post_id, reaction_id=reaction_id
)
def get_user_blogpost_reactions_uri(username, post_id):
return ROOT_URL + blogs.resources.api.url_for(
blogs.resources.UserBlogPostReactionsAPI,
username=username, post_id=post_id
)
def get_user_blogpost_comments_uri(username, post_id):
return ROOT_URL + blogs.resources.api.url_for(
blogs.resources.UserBlogPostCommentsAPI,
username=username, post_id=post_id
)
def get_user_blogpost_uri(username, post_id):
return ROOT_URL + blogs.resources.api.url_for(
blogs.resources.UserBlogPostAPI,
username=username, post_id=post_id
)
def get_user_blogposts_uri(username):
return ROOT_URL + blogs.resources.api.url_for(
blogs.resources.UserBlogPostsAPI, username=username
)
def get_user_reactions_uri(username):
return ROOT_URL + blogs.resources.api.url_for(
blogs.resources.UserReactionsAPI, username=username
)
def get_user_comment_uri(username, comment_id):
return ROOT_URL + blogs.resources.api.url_for(
blogs.resources.UserCommentAPI,
username=username, comment_id=comment_id
)
def get_user_comments_uri(username):
return ROOT_URL + blogs.resources.api.url_for(
blogs.resources.UserCommentsAPI, username=username
)
def get_comment_replies_uri(username, comment_id):
return ROOT_URL + blogs.resources.api.url_for(
blogs.resources.CommentRepliesAPI,
username=username, comment_id=comment_id
)
| 2.078125 | 2 |
app/tests/api_tests/test_exclusions.py | AirWalk-Digital/airview-api | 2 | 12758355 | <filename>app/tests/api_tests/test_exclusions.py
from datetime import datetime
import pytest
from pprint import pprint
from airview_api.models import (
TechnicalControlSeverity,
Exclusion,
ExclusionState,
MonitoredResourceState,
SystemStage,
TechnicalControlAction,
)
from tests.common import client
from tests.factories import *
from dateutil import parser
def setup():
reset_factories()
SystemFactory(id=1, name="one", stage=SystemStage.BUILD)
SystemFactory(id=2, name="two", stage=SystemStage.BUILD)
EnvironmentFactory(id=1)
ApplicationFactory(id=11, parent_id=None, name="svc 13", environment_id=1)
ApplicationReferenceFactory(
id=311, application_id=11, type="app-ref", reference="app-11"
)
TechnicalControlFactory(
id=22,
name="ctl1",
reference="control_a",
control_action=TechnicalControlAction.LOG,
system_id=1,
severity=TechnicalControlSeverity.HIGH,
)
TechnicalControlFactory(
id=230,
name="ctl2",
reference="control_5",
control_action=TechnicalControlAction.LOG,
system_id=2,
severity=TechnicalControlSeverity.HIGH,
)
ApplicationTechnicalControlFactory(
id=33, application_id=11, technical_control_id=22
)
ApplicationTechnicalControlFactory(
id=340, application_id=11, technical_control_id=230
)
def add_get_items_to_db():
ExclusionFactory(
id=44,
application_technical_control_id=33,
summary="sss",
mitigation="mmm",
impact=3,
probability=4,
is_limited_exclusion=True,
end_date=datetime(1, 1, 1),
notes="nnn",
)
MonitoredResourceFactory(
id=55,
exclusion_id=44,
reference="res-a",
exclusion_state=ExclusionState.PENDING,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=33,
)
# unexpected other data
ApplicationFactory(id=12, parent_id=None, name="svc 13", environment_id=1)
ApplicationReferenceFactory(
id=312, application_id=12, type="app-ref", reference="app-svc-13"
)
ApplicationTechnicalControlFactory(
id=34, application_id=12, technical_control_id=22
)
ExclusionFactory(
id=45,
application_technical_control_id=340,
summary="sss",
mitigation="mmm",
impact=3,
probability=4,
is_limited_exclusion=True,
end_date=datetime(1, 1, 1),
notes="nnn",
)
MonitoredResourceFactory(
id=56,
exclusion_id=45,
reference="res-5",
exclusion_state=ExclusionState.PENDING,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=340,
)
MonitoredResourceFactory(
id=57,
exclusion_id=45,
reference="res-6",
exclusion_state=ExclusionState.ACTIVE,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=340,
)
def test_exclusions_post_ok_for_new_resources(client):
"""
Given: An empty exclusions colllection, linked app controls, existing resources
When: When the api is called with an exclusion request
Then: The exclusions request is persisted & linked to existing resources, 201 status
"""
# Arrange
MonitoredResourceFactory(
application_technical_control_id=33,
reference="res-a",
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
)
MonitoredResourceFactory(
application_technical_control_id=33,
reference="res-b",
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
)
data = {
"applicationTechnicalControlId": 33,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-c", "res-d"],
"isLimitedExclusion": True,
"endDate": "2022-01-01T00:00:00.000000Z",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
print(resp.get_json())
# Assert
assert resp.status_code == 201
exclusion = db.session.query(Exclusion).first()
assert exclusion.application_technical_control_id == 33
assert exclusion.summary == data["summary"]
assert exclusion.mitigation == data["mitigation"]
assert exclusion.probability == data["probability"]
assert exclusion.impact == data["impact"]
assert exclusion.is_limited_exclusion == data["isLimitedExclusion"]
assert exclusion.end_date == datetime(2022, 1, 1, 0, 0)
assert exclusion.notes == data["notes"]
assert len(exclusion.resources) == 2
assert exclusion.resources[0].reference == "res-c"
assert exclusion.resources[1].reference == "res-d"
assert exclusion.resources[0].exclusion_state == ExclusionState.PENDING
assert exclusion.resources[1].exclusion_state == ExclusionState.PENDING
assert exclusion.resources[0].exclusion_id == exclusion.id
assert exclusion.resources[1].exclusion_id == exclusion.id
def test_exclusions_post_ok_for_existing_resources(client):
"""
Given: An empty exclusions colllection, linked app controls, existing resources
When: When the api is called with an exclusion request
Then: The exclusions request is persisted & linked to existing resources, 201 status
"""
# Arrange
MonitoredResourceFactory(
application_technical_control_id=33,
reference="res-a",
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
)
MonitoredResourceFactory(
application_technical_control_id=33,
reference="res-b",
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
)
data = {
"applicationTechnicalControlId": 33,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-a", "res-b"],
"isLimitedExclusion": True,
"endDate": "2022-01-01T00:00:00.000000Z",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
print(resp.get_json())
# Assert
assert resp.status_code == 201
exclusion = db.session.query(Exclusion).first()
assert exclusion.application_technical_control_id == 33
assert exclusion.summary == data["summary"]
assert exclusion.mitigation == data["mitigation"]
assert exclusion.probability == data["probability"]
assert exclusion.impact == data["impact"]
assert exclusion.is_limited_exclusion == data["isLimitedExclusion"]
assert exclusion.end_date == datetime(2022, 1, 1, 0, 0)
assert exclusion.notes == data["notes"]
assert len(exclusion.resources) == 2
assert exclusion.resources[0].reference == "res-a"
assert exclusion.resources[1].reference == "res-b"
assert exclusion.resources[0].exclusion_state == ExclusionState.PENDING
assert exclusion.resources[1].exclusion_state == ExclusionState.PENDING
assert exclusion.resources[0].exclusion_id == exclusion.id
assert exclusion.resources[1].exclusion_id == exclusion.id
def test_exclusions_bad_request_for_missing_app_tech_control(client):
"""
Given: An empty exclusions colllection, unlinked app/controls
When: When the api is called with an exclusion request for missing app tech control
Then: 404, no persistance
"""
# Arrange
data = {
"applicationTechnicalControlId": 999,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-a", "res-b"],
"isLimitedExclusion": True,
"endDate": "2022-01-01T00:00:00.000Z",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
# Assert
assert resp.status_code == 400
assert len(db.session.query(Exclusion).all()) == 0
def test_exclusions_post_bad_request_for_duplicate_resources(client):
"""
Given: An existing exclusion in the db
When: When the api is called with an exclusion request for pre existing resources
Then: 404, no persistance
"""
# Arrange
ExclusionFactory(
id=44,
application_technical_control_id=33,
summary="sss",
mitigation="mmm",
impact=3,
probability=4,
is_limited_exclusion=True,
end_date=datetime(1, 1, 1),
notes="nnn",
)
MonitoredResourceFactory(
id=55,
exclusion_id=44,
reference="res-a",
exclusion_state=ExclusionState.PENDING,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=33,
)
data = {
"applicationTechnicalControlId": 33,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-a", "res-b"],
"isLimitedExclusion": True,
"endDate": "2022-01-01 00:00:00.000",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
# Assert
assert resp.status_code == 400
assert len(db.session.query(Exclusion).all()) == 1
assert len(db.session.query(MonitoredResource).all()) == 1
def test_exclusions_post_ok_for_different_resources_resources(client):
"""
Given: An existing exclusion in the db
When: When the api is called with an exclusion request for non-existing resources
Then: 201, new exclusion created
"""
# Arrange
ExclusionFactory(
id=44,
application_technical_control_id=33,
summary="sss",
mitigation="mmm",
impact=3,
probability=4,
is_limited_exclusion=True,
end_date=datetime(1, 1, 1),
notes="nnn",
)
MonitoredResourceFactory(
id=55,
exclusion_id=44,
reference="res-a",
exclusion_state=ExclusionState.PENDING,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=33,
)
data = {
"applicationTechnicalControlId": 33,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-b", "res-c"],
"isLimitedExclusion": True,
"endDate": "2022-01-01T00:00:00.000000Z",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
# Assert
assert resp.status_code == 201
assert len(db.session.query(Exclusion).all()) == 2
assert len(db.session.query(MonitoredResource).all()) == 3
exclusion = db.session.query(Exclusion).filter(Exclusion.id != 44).first()
assert exclusion.application_technical_control_id == 33
assert exclusion.summary == data["summary"]
assert exclusion.mitigation == data["mitigation"]
assert exclusion.probability == data["probability"]
assert exclusion.impact == data["impact"]
assert exclusion.is_limited_exclusion == data["isLimitedExclusion"]
assert exclusion.end_date == datetime(2022, 1, 1, 0, 0)
assert exclusion.notes == data["notes"]
def test_exclusions_get_returns_correct_response(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system
Then: 200, exclusions returned
"""
# Arrange
add_get_items_to_db()
# Act
resp = client.get("/systems/1/exclusion-resources/")
# Assert
data = resp.get_json()
assert resp.status_code == 200
assert len(data) == 1
item = data[0]
assert item["id"] == 55
assert item["technicalControlReference"] == "control_a"
assert item["reference"] == "res-a"
assert item["state"] == "PENDING"
assert len(item["applicationReferences"]) == 1
assert item["applicationReferences"][0]["type"] == "app-ref"
assert item["applicationReferences"][0]["reference"] == "app-11"
def test_exclusions_get_filters_out_by_state(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system
Then: 200, exclusions returned
"""
# Arrange
add_get_items_to_db()
# Act
resp = client.get("/systems/2/exclusion-resources/?state=PENDING")
# Assert
data = resp.get_json()
assert resp.status_code == 200
assert len(data) == 1
item = data[0]
assert item["id"] == 56
def test_exclusions_get_handles_invalid_filter(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system with a bad filter
Then: 200, empty array returned
"""
# Arrange
add_get_items_to_db()
# Act
resp = client.get("/systems/2/exclusion-resources/?state=XXXXX")
# Assert
data = resp.get_json()
assert resp.status_code == 200
assert len(data) == 0
def test_exclusion_resources_put_bad_request_for_id_mismatch(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to update an exclusion resource with url & payload id mismatch
Then: 400, no data changed
"""
# Arrange
add_get_items_to_db()
data = {
"id": 55,
"technicalControlReference": "control_a",
"reference": "res-a",
"state": "ACTIVE",
}
# Act
resp = client.put("/exclusion-resources/999/", json=data)
# Assert
assert resp.status_code == 400
item = db.session.query(MonitoredResource).get(55)
assert item.exclusion_state == ExclusionState.PENDING
def test_exclusion_resources_put_conflict_for_invalid_exclusion(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to update an exclusion which does not yet exist
Then: 409 (conflict), no data changed
"""
# Arrange
add_get_items_to_db()
data = {
"id": 999,
"technicalControlReference": "control_a",
"reference": "res-a",
"state": "ACTIVE",
}
# Act
resp = client.put("/exclusion-resources/999/", json=data)
# Assert
assert resp.status_code == 409
item = db.session.query(MonitoredResource).get(55)
assert item.exclusion_state == ExclusionState.PENDING
def test_exclusion_resources_put_updates_record(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system with a bad filter
Then: 200, empty array returned
"""
# Arrange
add_get_items_to_db()
data = {
"id": 55,
"technicalControlReference": "control_a",
"reference": "res-a",
"state": "ACTIVE",
}
# Act
resp = client.put("/exclusion-resources/55/", json=data)
# Assert
assert resp.status_code == 204
item = db.session.query(MonitoredResource).get(55)
assert item.exclusion_state == ExclusionState.ACTIVE
def test_exclusion_resources_put_updates_record_with_sparse_response(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system with a bad filter
Then: 200, empty array returned
"""
# Arrange
add_get_items_to_db()
data = {
"id": 55,
"state": "ACTIVE",
}
# Act
resp = client.put("/exclusion-resources/55/", json=data)
# Assert
assert resp.status_code == 204
item = db.session.query(MonitoredResource).get(55)
assert item.exclusion_state == ExclusionState.ACTIVE
| 1.945313 | 2 |
parsing/domain/rule.py | alexandrustoica/parsing.system | 0 | 12758356 | <filename>parsing/domain/rule.py
from typing import List
from parsing.domain.symbol import Symbol
from parsing.domain.non_terminal import NonTerminal
class Rule:
def __init__(self, left: NonTerminal, right: List[Symbol]):
self.left = left
self.right = right
def __str__(self) -> str:
return "{} -> {}".format(str(self.left), "".join(str(symbol) for symbol in self.right))
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.left == other.left and self.right == other.right
@staticmethod
def from_string(string: str):
result = string.replace(' ', '').split('->')
return Rule(NonTerminal(result[0]), [Symbol(x) for x in result[1]])
@staticmethod
def from_complex_string(string: str):
result = string.split(' -> ')
return Rule(NonTerminal(result[0].replace(' ', '')),
[Symbol(x) for x in Rule.__get_symbols_from_string(result)])
@staticmethod
def __get_symbols_from_string(result):
return ['ε'] if result[1].split(' ') == [''] else result[1].split(' ')
@staticmethod
def from_parser_item(parser_item):
return Rule(parser_item.left, parser_item.right.all_symbols)
| 3.0625 | 3 |
TemplateV2.py | T0UGH/NBAComposer | 0 | 12758357 | <filename>TemplateV2.py
import random
tag_list = ['<time>', '<home_team_name>', '<home_team_score>', '<away_team_name>',
'<away_star_score>', '<home_star_name>', '<home_star_score>', '<away_star_score>']
def choice_template(template_type: int) -> str:
type_0_templates = [
"<time>,<away_team_name>队手感火热,在<away_star_name>的带领下打出一波<away_team_score>分的进攻波,以<home_star_name>为核心的<home_team_name>队奋力直追,不过分差依然被拉大都<score_gap>分。",
"<time>,<away_team_name>队的<away_star_name>表现神勇,连突带投,顶着<home_team_name>队同样疯狂的进攻火力,帮助<away_team_name>队拉大分差。",
"<time>,<home_team_name>队的<home_star_name>与<away_team_name>队的<away_star_name>展开对轰,怎奈<away_star_name>技高一筹,<away_team_name>成功将分差拉大到<score_gap>分。"]
type_1_templates = ["<time>,<away_team_name>队虽然手感不顺,但防守端在<away_star_name>的指挥下表现的十分顽强,依然将分差拉大到<score_gap>分。",
"<time>,<away_team_name>队在篮下建立起钢铁防线,<home_team_name>队的<home_star_name>等大将得分异常艰难, 似乎难以扭转颓势。",
"<time>,防守赢得总冠军似乎是一条永恒的真理,在<away_star_name>的带领下<away_team_name>的防守出色到连一只苍蝇都飞不过去,<home_team_name>显得有些无可奈何,分差被越拉越大。"]
type_2_templates = [
"<time>,<away_team_name>队的<away_star_name>似乎变得不可阻挡,他不仅在进攻端为球队贡献了<away_star_score>分,防守端也出色的完成了防守任务,<home_team_name>队仿佛梦游了一般,落后到<score_gap>分。",
"<time>,在<away_star_name>的带领下,<away_team_name>在攻防两端均打出统治力,比分被越拉越大。"]
type_3_templates = ["<time>,在<away_star_name>的带领下,<away_team_name>继续扩大着领先优势。",
"<time>,<away_team_name>多点开花,打出一波<away_team_score>分的进攻波,领先优势更加稳固了。"]
type_4_templates = [
"<time>,<away_team_name>队进攻端表现神勇,在<away_star_name>的带领下打出一波<away_team_score>分的进攻波,建立起了<score_gap>分的领先优势。",
"<time>,<home_team_name>队的<home_star_name>表现神勇,接连砍分;怎奈<away_team_name>,在<away_star_name>的梳理下,进攻端更加高效,成功取得<score_gap>分的领先。",
"<time>,<home_team_name>队的<home_star_name>与<away_team_name>队的<away_star_name>展开对轰,怎奈<away_star_name>技高一筹,领先到了<score_gap>分。"]
type_5_templates = ["<time>,在外线接连打铁的情况下,凭借有效的防守,<away_team_name>还是建立起了<score_gap>分的领先。",
"<time>,强硬的防守让双方的得分都变得的艰难,不过随着一波<away_team_score>:<home_team_score>的进攻,<team_name>队还是保持着<score_gap>分的领先。"]
type_6_templates = [
"<time>,<away_team_name>队的<away_star_name>打破了场上的僵局,得到<away_star_score>分,帮<away_team_name>队领先到<score_gap>分。",
"<time>,在<away_star_name>的带领下,<away_team_name>在攻防两端均打出统治力,比分有被拉大的趋势。"]
type_7_templates = ["<time>,在<away_star_name>的带领下,<away_team_name>建立起了<score_gap>分的领先优势。",
"<time>,<away_team_name>与<home_team_name>相比,明显是更加投入的一方,他们稳扎稳打,建立了<score_gap>分的领先。"]
type_8_templates = [
"<time>,双方进攻端火力全开,<away_team_name>队更是手热如火,一波<away_team_score>:<home_team_score>的进攻波,缩小比分,让球队看到胜利的曙光。",
"<time>,面对着巨大的落后和对方猛烈的进攻火力,<away_team_name>队的<away_star_name>奋起直追,得到<away_ster_score>分,将分差迫近至<score_gap>分。",
"<time>,<home_team_name>队的<home_star_name>与<away_team_name>队的<away_star_name>展开对轰,怎奈<away_star_name>技高一筹,将落后缩小到<score_gap>分。"]
type_9_templates = ["<time>,双方得分效率均有下降,但<away_team_name>队仍然靠着自己的拼搏,奋力直追,分差被大幅缩小。",
"<time>,<away_team_name>队奋力防守,<home_team_name>队也不甘示弱,连续的进攻转换,<home_team_name>的优势被渐渐蚕食。"]
type_10_templates = [
"<time>,在取得领先之后,<home_team_name>队明显在心态上有些松懈,被<away_team_name>抓住了机会,在<away_star_name>的带领下送上一波<away_team_score>比<home_team_score>的进攻波,缩小了分差。",
"<time>,失败二字显然不存在于<away_team_name>队的基因中,在<away_star_name>的带领下,全队在攻防两端打出统治力,狠狠咬住比分。",
"<time>,<away_team_name>手感回暖,其中<away_star_name>连续得分,打出一波<away_team_score>:<home_team_score>的进攻波,缩小落后。"]
type_11_templates = ["<time>,<away_team_name>不甘示弱,在<away_star_name>的带领下不断追赶比分,缩小分差。",
"<time>,<away_team_name>表现得很顽强,把分差缩小到了<score_gap>分。"]
type_12_templates = [
"<time>,<home_team_name>的<home_star_name>和<away_team_name>的<away_star_name>协力为球迷奉上了一场对攻大战,然而分差依然十分胶着。",
"<time>,<home_team_name>的<home_star_name>连砍<home_star_score>分,<away_team_name>的<away_star_name>也贡献了<away_star_score>分,但任谁也无法拉开差距。",
"<time>,双方打得难解难分,运动战中连连得分,十分胶着,多次打平。"]
type_13_templates = ["<time>,双方都疯狂打铁,场上的进攻似乎阻塞住了。",
"<time>,强而有力的防守力度,让双方的进攻效率都有所下降,但攻防之间,我们可以看到两只球队都实力不俗。"]
type_14_templates = ["<time>,双方得分有来有往,比分焦灼。"]
type_15_templates = ["<time>,双方得分有来有往,比分焦灼。"]
type_16_templates = [
"<time>,<home_team_name>队手感火热,在<home_star_name>的带领下打出一波<home_team_score>分的进攻波,以<away_star_name>为核心的<away_team_name>队奋力直追,不过分差依然被拉大都<score_gap>分。",
"<time>,<home_team_name>队的<home_star_name>表现神勇,连突带投,顶着<away_team_name>队同样疯狂的进攻火力,帮助<home_team_name>队拉大分差。",
"<time>,<away_team_name>队的<away_star_name>与<home_team_name>队的<home_star_name>展开对轰,怎奈<home_star_name>技高一筹,<home_team_name>成功将分差拉大到<score_gap>分。"]
type_17_templates = ["<time>,<home_team_name>队虽然手感不顺,但防守端在<home_star_name>的指挥下表现的十分顽强,依然将分差拉大到<score_gap>分。",
"<time>,<home_team_name>队在篮下建立起钢铁防线,<away_team_name>队的<away_star_name>等大将得分异常艰难, 似乎难以扭转颓势。",
"<time>,防守赢得总冠军似乎是一条永恒的真理,在<home_star_name>的带领下<home_team_name>的防守出色到连一只苍蝇都飞不过去,<away_team_name>显得有些无可奈何,分差被越拉越大。"]
type_18_templates = [
"<time>,<home_team_name>队的<home_star_name>似乎变得不可阻挡,他不仅在进攻端为球队贡献了<home_star_score>分,防守端也出色的完成了防守任务,<away_team_name>队仿佛梦游了一般,落后到<score_gap>分。",
"<time>,在<home_star_name>的带领下,<home_team_name>在攻防两端均打出统治力,比分被越拉越大。"]
type_19_templates = ["<time>,在<home_star_name>的带领下,<home_team_name>继续扩大着领先优势。",
"<time>,<home_team_name>多点开花,打出一波<home_team_score>分的进攻波,领先优势更加稳固了。"]
type_20_templates = [
"<time>,<home_team_name>队进攻端表现神勇,在<home_star_name>的带领下打出一波<home_team_score>分的进攻波,建立起了<score_gap>分的领先优势。",
"<time>,<away_team_name>队的<away_star_name>表现神勇,接连砍分;怎奈<home_team_name>,在<home_star_name>的梳理下,进攻端更加高效,成功取得<score_gap>分的领先。",
"<time>,<away_team_name>队的<away_star_name>与<home_team_name>队的<home_star_name>展开对轰,怎奈<home_star_name>技高一筹,领先到了<score_gap>分。"]
type_21_templates = ["<time>,在外线接连打铁的情况下,凭借有效的防守,<home_team_name>还是建立起了<score_gap>分的领先。",
"<time>,强硬的防守让双方的得分都变得的艰难,不过随着一波<home_team_score>:<away_team_score>的进攻,<team_name>队还是保持着<score_gap>分的领先。"]
type_22_templates = [
"<time>,<home_team_name>队的<home_star_name>打破了场上的僵局,得到<home_star_score>分,帮<home_team_name>队领先到<score_gap>分。",
"<time>,在<home_star_name>的带领下,<home_team_name>在攻防两端均打出统治力,比分有被拉大的趋势。"]
type_23_templates = ["<time>,在<home_star_name>的带领下,<home_team_name>建立起了<score_gap>分的领先优势。",
"<time>,<home_team_name>与<away_team_name>相比,明显是更加投入的一方,他们稳扎稳打,建立了<score_gap>分的领先。"]
type_24_templates = [
"<time>,双方进攻端火力全开,<home_team_name>队更是手热如火,一波<home_team_score>:<away_team_score>的进攻波,缩小比分,让球队看到胜利的曙光。",
"<time>,面对着巨大的落后和对方猛烈的进攻火力,<home_team_name>队的<home_star_name>奋起直追,得到<home_ster_score>分,将分差迫近至<score_gap>分。",
"<time>,<away_team_name>队的<away_star_name>与<home_team_name>队的<home_star_name>展开对轰,怎奈<home_star_name>技高一筹,将落后缩小到<score_gap>分。"]
type_25_templates = ["<time>,双方得分效率均有下降,但<home_team_name>队仍然靠着自己的拼搏,奋力直追,分差被大幅缩小。",
"<time>,<home_team_name>队奋力防守,<away_team_name>队也不甘示弱,连续的进攻转换,<away_team_name>的优势被渐渐蚕食。"]
type_26_templates = [
"<time>,在取得领先之后,<away_team_name>队明显在心态上有些松懈,被<home_team_name>抓住了机会,在<home_star_name>的带领下送上一波<home_team_score>比<away_team_score>的进攻波,缩小了分差。",
"<time>,失败二字显然不存在于<home_team_name>队的基因中,在<home_star_name>的带领下,全队在攻防两端打出统治力,狠狠咬住比分。",
"<time>,<home_team_name>手感回暖,其中<home_star_name>连续得分,打出一波<home_team_score>:<away_team_score>的进攻波,缩小落后。"]
type_27_templates = ["<time>,<home_team_name>不甘示弱,在<home_star_name>的带领下不断追赶比分,缩小分差。",
"<time>,<home_team_name>表现得很顽强,把分差缩小到了<score_gap>分。"]
templates = [type_0_templates, type_1_templates, type_2_templates, type_3_templates, type_4_templates,
type_5_templates, type_6_templates, type_7_templates, type_8_templates, type_9_templates,
type_10_templates, type_11_templates, type_12_templates, type_13_templates, type_14_templates,
type_15_templates, type_16_templates, type_17_templates, type_18_templates, type_19_templates,
type_20_templates, type_21_templates, type_22_templates, type_23_templates, type_24_templates,
type_25_templates, type_26_templates, type_27_templates]
return random.choice(templates[template_type])
| 2.6875 | 3 |
profile_api/views.py | atulsingh-41189/profile_rest_api_new | 0 | 12758358 | <filename>profile_api/views.py<gh_stars>0
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework import viewsets
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profile_api import models
from profile_api import serializers
from profile_api import permissions
# Create your views here.
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self,request,fromat=None):
"""Returns list of API VIew features"""
an_apiview = [
'Uses HTTP methods as function(get,post,patch,put,delete)',
'Is similar to traditional Django View',
'Gives you the most control over you application logic',
'Is mapped manually to URL',
]
return Response({'message':'Hello','an_apiview':an_apiview})
def post(self,request):
"""Create a hello message with name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)
def put(self, request, pk=None):
"""Handle Updating object"""
return Response({'method':'PUT'})
def patch(self,request,pk=None):
"""Handle a partial update of object"""
return Response({'method':'PATCH'})
def delete(self,request,pk=None):
"""Delete an object in database"""
return Response({'method':'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API Viewset"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return Hello Message"""
a_viewset = [
'Uses Actions (list, create, retrieve, update, partial update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message':'Hello', 'a_viewset':a_viewset})
def create(self, request):
"""Create a new Hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message':message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""Handle getting object by ID"""
return Response({'http_method':'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method':'PUT'})
def partial_update(self,request,pk=None):
"""Handle update part of an object"""
return Response({'http_method':'PATCH'})
def destroy(self,request, pk=None):
"""Handle Removing an object"""
return Response({'http_method':'DELETE'})
class UserProfileViewset(viewsets.ModelViewSet):
"""Handle creating and updating profile"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)#How user authenticate
#Then how user have permission for operation:
permission_classes = (permissions.UpdateOwnProfile,)
fiter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginApiView(ObtainAuthToken):
"""Handle Creating User Authentication Token"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profile feed Items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
""" Set the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
| 2.46875 | 2 |
deepmath/base_classes/__init__.py | mathraim/deepmath | 2 | 12758359 | <filename>deepmath/base_classes/__init__.py
from deepmath.base_classes.layer import Layer
from deepmath.base_classes.network import Network
from deepmath.base_classes.optimizer import Optimizer | 1.4375 | 1 |
FEV_KEGG/Experiments/41.py | ryhaberecht/FEV-KEGG | 0 | 12758360 | """
Question
--------
When comparing the core metabolism of Archaea and Bacteria, what differences and similarities occur?
Method
------
- build Bacteria clade
- build Archaea clade
- REPEAT for varying majority-percentages:
- overlap core metabolisms and print amount of EC numbers inside the intersection and falling off either side
- remove wildcard EC numbers
- END
- build clade pair
- export unified metabolism, coloured by only-Archaea/both/only-Bacteria
Result
------
::
Maj. % Bacteria both Archaea
100%: 0 1 7
90%: 50 40 36
80%: 83 67 47
70%: 103 103 59
60%: 125 129 58
50%: 153 163 72
40%: 191 192 75
30%: 235 229 90
20%: 304 279 83
10%: 400 386 87
1%: 631 653 91
See bacteria_vs_archaea.jpeg
Conclusion
----------
Bacteria and Archaea always share a significant amount of EC numbers, but never all of them. The much bigger group of Bacteria also has many more EC numbers which never occur in Archaea.
This might be because there are more known Bacteria organisms than known Archaea organisms, i.e. a statistical skew. Or it might be because Bacteria are, as a group, more versatile in habitat than Archaea.
The exported graph comparing Bacteria and Archaea directly (at 80% majority) shows several regions (more or less complete pathways) which only occur in either of the clades' core metabolisms.
Which does not mean they do not occur in any individual organism of the other clade!
For example:
Only in Bacteria: 00061 Fatty acid biosynthesis and 00550 Peptidoglycan biosynthesis
Only in Archaea: 00790 Folate biosynthesis and 00900 Terpenoid backbone biosynthesis
Apart from these regions standing out, both clades seem to have evolved different ways of providing redundancy to their common metabolism.
"""
from FEV_KEGG.Drawing import Export
from FEV_KEGG.KEGG.File import cache
from FEV_KEGG.Evolution.Clade import CladePair, Clade
from FEV_KEGG.Graph.Elements import EcNumber
@cache(folder_path='experiments/41', file_name='bacteria_clade')
def getBacteriaClade():
bacteriaClade = Clade('/Bacteria')
# pre-fetch collective metabolism into memory
bacteriaClade.collectiveMetabolism(excludeMultifunctionalEnzymes=True)
return bacteriaClade
@cache(folder_path='experiments/41', file_name='archaea_clade')
def getArchaeaClade():
archaeaClade = Clade('/Archaea')
# pre-fetch collective metabolism into memory
archaeaClade.collectiveMetabolism(excludeMultifunctionalEnzymes=True)
return archaeaClade
if __name__ == '__main__':
output = ['Maj. %\tBacteria\tboth\tArchaea']
#- build Bacteria clade
bacteriaClade = getBacteriaClade()
#- build Archaea clade
archaeaClade = getArchaeaClade()
#- REPEAT for varying majority-percentages:
for percentage in [100, 90, 80, 70, 60, 50, 40, 30, 20, 10 , 1]:
#- overlap core metabolisms and print amount of EC numbers inside the intersection and falling off either side
bacteriaECs = bacteriaClade.coreMetabolism(percentage).getECs()
archaeaECs = archaeaClade.coreMetabolism(percentage).getECs()
bothECs = bacteriaECs.intersection(archaeaECs)
onlyBacteriaECs = bacteriaECs.difference(archaeaECs)
onlyArchaeaECs = archaeaECs.difference(bacteriaECs)
#- remove wildcard EC numbers
onlyBacteriaECs = EcNumber.removeWildcards(onlyBacteriaECs)
bothECs = EcNumber.removeWildcards(bothECs)
onlyArchaeaECs = EcNumber.removeWildcards(onlyArchaeaECs)
output.append( str(percentage) + '%:\t' + str(len(onlyBacteriaECs)) + '\t' + str(len(bothECs)) + '\t' + str(len(onlyArchaeaECs)) )
for line in output:
print(line)
#- build clade pair
cladePair = CladePair(bacteriaClade, archaeaClade)
#- export unified metabolism, coloured by only Archaea/both/only Bacteria
unifiedEcGraph = cladePair.unifiedMetabolism(colour = True)
Export.forCytoscape(unifiedEcGraph, file = 'experiments/41/bacteria_vs_archaea', inCacheFolder=True)
| 2.609375 | 3 |
1265_print_immutable_linked_list_in_reverse.py | claytonjwong/leetcode-py | 1 | 12758361 | <reponame>claytonjwong/leetcode-py<filename>1265_print_immutable_linked_list_in_reverse.py
#
# 1265. Print Immutable Linked List in Reverse
#
# Q: https://leetcode.com/problems/print-immutable-linked-list-in-reverse/
# A: https://leetcode.com/problems/print-immutable-linked-list-in-reverse/discuss/436558/Javascript-Python3-C%2B%2B-1-Liners
#
class Solution:
def printLinkedListInReverse(self, head: 'ImmutableListNode') -> None:
if head:
self.printLinkedListInReverse(head.getNext())
head.printValue()
| 3.890625 | 4 |
novice/02-05/latihan/Refactoring - Bowler/02-rename-method.py | fakihAlim/zimera | 0 | 12758362 | <reponame>fakihAlim/zimera<filename>novice/02-05/latihan/Refactoring - Bowler/02-rename-method.py
from bowler import Query
def main():
(
Query()
.select_function("run")
.in_class("FooClass")
.rename("increament")
.execute()
)
(
Query()
.select_method("run")
.is_call()
.rename("increment")
.execute()
) | 2.3125 | 2 |
nixietube.py | lionyhw/PlanetX_MicroPython | 2 | 12758363 | from microbit import *
from enum import *
TM1637_CMD1 = 64 # 0x40 data command
TM1637_CMD2 = 192 # 0xC0 address command
TM1637_CMD3 = 128 # 0x80 display control command
_SEGMENTS = (0x3F, 0x06, 0x5B, 0x4F, 0x66, 0x6D, 0x7D, 0x07, 0x7F, 0x6F, 0x77, 0x7C, 0x39, 0x5E, 0x79, 0x71)
class NIXIETUBE(object):
"""基本描述
7段4位数码管, 7-Seg LED Nixie tube
"""
def __init__(self, RJ_pin, intensity=7, number=4):
if RJ_pin == J1:
self.__clk = pin1
self.__dio = pin8
elif RJ_pin == J2:
self.__clk = pin2
self.__dio = pin12
elif RJ_pin == J3:
self.__clk = pin13
self.__dio = pin14
elif RJ_pin == J4:
self.__clk = pin15
self.__dio = pin16
self.__intensity = intensity % 8
self.__LED = number
self.__ON = 8
self.__buf_d = [0, 0, 0, 0]
self.__clk.write_digital(0)
self.__dio.write_digital(0)
self.set_clear()
def __start(self):
self.__dio.write_digital(0)
self.__clk.write_digital(0)
def __stop(self):
self.__dio.write_digital(0)
self.__clk.write_digital(1)
self.__dio.write_digital(1)
def __write_data_cmd(self):
self.__start()
self.__write_byte(TM1637_CMD1)
self.__stop()
def __write_dsp_ctrl(self):
self.__start()
self.__write_byte(TM1637_CMD3 | self.__ON | self.__intensity)
self.__stop()
def __write_byte(self, b):
for i in range(8):
self.__dio.write_digital((b >> i) & 1)
self.__clk.write_digital(1)
self.__clk.write_digital(0)
self.__clk.write_digital(1)
self.__clk.write_digital(0)
def __dat(self, bit, dat):
self.__write_data_cmd()
self.__start()
self.__write_byte(TM1637_CMD2 | (bit % self.__LED))
self.__write_byte(dat)
self.__stop()
self.__write_dsp_ctrl()
def set_power_on(self):
"""
数码管点亮显示,默认点亮
"""
self.__ON = 8
self.__write_data_cmd()
self.__write_dsp_ctrl()
def set_power_off(self):
"""
数码管熄灭
"""
self.__ON = 0
self.__write_data_cmd()
self.__write_dsp_ctrl()
def set_intensity(self, val=None):
"""
设置数码管显示亮度
Args:
val (number): 亮度 0-8
"""
if val is None:
return self.__intensity
val = max(0, min(val, 8))
if val == 0:
self.set_power_off()
else:
self.__ON = 8
self.__intensity = val - 1
self.__write_data_cmd()
self.__write_dsp_ctrl()
def set_clear(self):
"""
清空数码管显示内容
"""
self.__dat(0, 0)
self.__dat(1, 0)
self.__dat(2, 0)
self.__dat(3, 0)
self.__buf_d = [0, 0, 0, 0]
def set_show_bit(self, num, bit=0):
"""
在指定位置显示单个数字
Args:
bit (number): 小数点位置 0-4
num (number): 要显示的数字0-9
"""
self.__buf_d[bit % self.__LED] = _SEGMENTS[num % 16]
self.__dat(bit, _SEGMENTS[num % 16])
def set_show_DP(self, bit=1, show=True):
"""
显示一个4小数点
Args:
bit (number): 小数点位置 0-4
show (bool): 显示控制位 Ture显示 False不显示
"""
bit = bit % self.__LED
if show:
self.__dat(bit, self.__buf_d[bit] | 0x80)
else:
self.__dat(bit, self.__buf_d[bit] & 0x7F)
def set_show_num(self, num):
"""
显示一个4位数字
Args:
num (number): 要显示的数字 -999——9999
"""
if num < 0:
self.__dat(0, 0x40) # '-'
num = -num
else:
self.set_show_bit((num // 1000) % 10)
self.set_show_bit(num % 10, 3)
self.set_show_bit((num // 10) % 10, 2)
self.set_show_bit((num // 100) % 10, 1)
if __name__ == '__main__':
tm = NIXIETUBE(J2)
n = 0
while 1:
tm.set_show_num(n)
n += 1
| 2.40625 | 2 |
nova/tests/functional/wsgi/test_flavor_manage.py | cloudbase/nova-virtualbox | 4 | 12758364 | <reponame>cloudbase/nova-virtualbox
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova import db
from nova import exception as ex
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers as helper
def rand_flavor():
flav = {
'name': 'name-%s' % helper.generate_random_alphanumeric(10),
'id': helper.generate_random_alphanumeric(10),
'ram': int(helper.generate_random_numeric(2)) + 1,
'disk': int(helper.generate_random_numeric(3)),
'vcpus': int(helper.generate_random_numeric(1)) + 1,
}
return flav
class FlavorManageFullstack(test.TestCase):
"""Tests for flavors manage administrative command.
Extention: os-flavors-manage
os-flavors-manage adds a set of admin functions to the flavors
resource for create and delete of flavors.
POST /v2/flavors:
{
'name': NAME, # string, required unique
'id': ID, # string, required unique
'ram': RAM, # in MB, required
'vcpus': VCPUS, # int value, required
'disk': DISK, # in GB, required
'OS-FLV-EXT-DATA:ephemeral', # in GB, ephemeral disk size
'is_public': IS_PUBLIC, # boolean
'swap': SWAP, # in GB?
'rxtx_factor': RXTX, # ???
}
Returns Flavor
DELETE /v2/flavors/ID
Functional Test Scope::
This test starts the wsgi stack for the nova api services, uses an
in memory database to ensure the path through the wsgi layer to
the database.
"""
def setUp(self):
super(FlavorManageFullstack, self).setUp()
self.api = self.useFixture(nova_fixtures.OSAPIFixture()).api
def assertFlavorDbEqual(self, flav, flavdb):
# a mapping of the REST params to the db fields
mapping = {
'name': 'name',
'disk': 'root_gb',
'ram': 'memory_mb',
'vcpus': 'vcpus',
'id': 'flavorid',
'swap': 'swap'
}
for k, v in mapping.iteritems():
if k in flav:
self.assertEqual(flav[k], flavdb[v],
"%s != %s" % (flav, flavdb))
def test_flavor_manage_func_negative(self):
# Test for various API failure conditions
# bad body is 400
resp = self.api.api_request('flavors', method='POST')
self.assertEqual(400, resp.status_code)
# get unknown flavor is 404
resp = self.api.api_request('flavors/foo')
self.assertEqual(404, resp.status_code)
# delete unknown flavor is 404
resp = self.api.api_request('flavors/foo', method='DELETE')
self.assertEqual(404, resp.status_code)
def test_flavor_manage_func(self):
ctx = context.get_admin_context()
flav1 = {
'flavor': rand_flavor(),
}
# Create flavor and ensure it made it to the database
resp = self.api.api_post('flavors', flav1)
flav1db = db.flavor_get_by_flavor_id(ctx, flav1['flavor']['id'])
self.assertFlavorDbEqual(flav1['flavor'], flav1db)
# Delete flavor and ensure it was removed from the database
resp = self.api.api_request('flavors/%s' % flav1['flavor']['id'],
method='DELETE')
self.assertRaises(ex.FlavorNotFound,
db.flavor_get_by_flavor_id,
ctx, flav1['flavor']['id'])
resp = self.api.api_request('flavors/%s' % flav1['flavor']['id'],
method='DELETE')
self.assertEqual(404, resp.status_code)
| 2.015625 | 2 |
app/my_bokeh_app.py | adarsh0806/scipy2015-blaze-bokeh | 168 | 12758365 | # -*- coding: utf-8 -*-
import math
from collections import OrderedDict
import flask
import pandas as pd
import netCDF4
import numpy as np
from bokeh.embed import components
from bokeh.resources import INLINE
from bokeh.templates import RESOURCES
from bokeh.util.string import encode_utf8
from bokeh.models import DatetimeTickFormatter, ColumnDataSource, HoverTool, Plot, Range1d
from bokeh.palettes import RdBu11
from bokeh.models.glyphs import Text, Rect
from bokeh.plotting import figure, show, output_notebook, hplot, vplot
import utils.world_countries as wc
from utils.colormap import RGBAColorMapper
from viz2 import climate_map, timeseries, legend, title, get_slice
app = flask.Flask(__name__)
colormap = RGBAColorMapper(-6, 6, RdBu11)
@app.route("/")
def index():
# Create layout
c_map = climate_map()
ts = timeseries()
l = legend()
t = title()
map_legend = hplot(c_map, l)
layout = vplot(t, map_legend, ts)
plot_resources = RESOURCES.render(
js_raw=INLINE.js_raw,
css_raw=INLINE.css_raw,
js_files=INLINE.js_files,
css_files=INLINE.css_files,
)
script, div = components(layout, INLINE)
html = flask.render_template(
'embed.html',
plot_script=script,
plot_div=div,
plot_resources=plot_resources,
)
return encode_utf8(html)
if __name__ == "__main__":
app.run(debug=True) | 2.109375 | 2 |
ros2_utils/ros.py | blakermchale/ros2_utils | 0 | 12758366 | <reponame>blakermchale/ros2_utils
from typing import Union
from .math import convert_axes, AxesFrame
from geometry_msgs.msg import Pose, Transform, Twist, Point, Vector3, PoseStamped, Polygon, Point32
from nav_msgs.msg import Odometry, Path
from .structs import NpVector4, NpPose, NpVector3, NpTwist
import numpy as np
def convert_axes_from_msg(msg: Union[Pose, Transform, Twist, Odometry, Path, Polygon], in_axes: AxesFrame, out_axes: AxesFrame):
"""Converts ROS message coordinate frame."""
if isinstance(msg, Pose):
q = NpVector4.from_ros(msg.orientation)
x, y, z, roll, pitch, yaw = convert_axes(msg.position.x, msg.position.y, msg.position.z, q.roll, q.pitch, q.yaw, in_axes, out_axes)
return NpPose(NpVector3.from_xyz(x, y, z), NpVector4.from_rpy(roll, pitch, yaw)).get_msg()
elif isinstance(msg, Transform):
q = NpVector4.from_ros(msg.rotation)
x, y, z, roll, pitch, yaw = convert_axes(msg.translation.x, msg.translation.y, msg.translation.z, q.roll, q.pitch, q.yaw, in_axes, out_axes)
return NpPose(NpVector3.from_xyz(x, y, z), NpVector4.from_rpy(roll, pitch, yaw)).get_tf_msg()
elif isinstance(msg, Twist):
x, y, z, roll, pitch, yaw = convert_axes(msg.linear.x, msg.linear.y, msg.linear.z, msg.angular.x, msg.angular.y, msg.angular.z, in_axes, out_axes)
return NpTwist(NpVector3.from_xyz(x, y, z), NpVector3.from_xyz(roll, pitch, yaw)).get_msg()
elif isinstance(msg, Odometry):
out_msg = Odometry()
pose_msg = msg.pose.pose
twist_msg = msg.twist.twist
q = NpVector4.from_ros(pose_msg.orientation)
x, y, z, roll, pitch, yaw = convert_axes(pose_msg.position.x, pose_msg.position.y, pose_msg.position.z, q.roll, q.pitch, q.yaw, in_axes, out_axes)
out_msg.pose.pose = NpPose(NpVector3.from_xyz(x, y, z), NpVector4.from_rpy(roll, pitch, yaw)).get_msg()
x, y, z, roll, pitch, yaw = convert_axes(twist_msg.linear.x, twist_msg.linear.y, twist_msg.linear.z, twist_msg.angular.x, twist_msg.angular.y, twist_msg.angular.z, in_axes, out_axes)
out_msg.twist.twist = NpTwist(NpVector3.from_xyz(x, y, z), NpVector3.from_xyz(roll, pitch, yaw)).get_msg()
return out_msg
elif isinstance(msg, Path):
out_msg = Path()
out_msg.header = msg.header
for pose in msg.poses:
p = pose.pose
q = NpVector4.from_ros(p.orientation)
x, y, z, roll, pitch, yaw = convert_axes(p.position.x, p.position.y, p.position.z, q.roll, q.pitch, q.yaw, in_axes, out_axes)
o = PoseStamped()
o.header = pose.header
o.pose = NpPose(NpVector3.from_xyz(x, y, z), NpVector4.from_rpy(roll, pitch, yaw)).get_msg()
out_msg.poses.append(o)
return out_msg
elif isinstance(msg, Polygon):
out_msg = Polygon()
for p in msg.points:
x, y, z, _, _, _ = convert_axes(p.x, p.y, p.z, 0, 0, 0, in_axes, out_axes)
o = Point32(x=x, y=y, z=z)
out_msg.points.append(o)
return out_msg
else:
raise ValueError(f"ROS message type {type(msg)} is not supported")
def msg_contains_nan(msg: Union[Point, Vector3]):
"""Checks if ROS message contains any nans."""
if isinstance(msg, (Point, Vector3)):
return np.isnan(msg.x) or np.isnan(msg.y) or np.isnan(msg.z)
else:
raise ValueError(f"ROS message type {type(msg)} is not supported")
| 2.203125 | 2 |
api/devjobhubapi/jobs/views.py | Mastersam07/devjobhub | 0 | 12758367 | from rest_framework import viewsets
from rest_framework.generics import ListAPIView
from rest_framework.permissions import AllowAny
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.generics import CreateAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from jobs.permissions import IsEmployee
from jobs.serializers import ApplicantSerializer, JobSerializer
from jobs.models import Applicant, Job
from .serializers import *
from django.core import serializers
from django.core.serializers import serialize
import json
from django.http.response import HttpResponse
class JobViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = JobSerializer
# queryset = serializer_class.Meta.model.objects.filter(filled=False)
queryset = serializer_class.Meta.model.objects.all()
permission_classes = [AllowAny]
class SearchApiView(ListAPIView):
serializer_class = JobSerializer
permission_classes = [AllowAny]
def get_queryset(self):
if 'location' in self.request.GET and 'position' in self.request.GET:
return self.serializer_class.Meta.model.objects.filter(location__icontains=self.request.GET['location'],
title__icontains=self.request.GET['position'])
else:
# return self.serializer_class.Meta.model.objects.filter(filled=False)
return self.serializer_class.Meta.model.objects.all()
class SaveJobApiView(CreateAPIView):
serializer_class = ApplicantSerializer
http_method_names = [u'post']
permission_classes = [IsAuthenticated, IsEmployee]
# def perform_create(self, serializer):
# serializer.save(user=self.request.user)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class SavedJobsAPIView(ListAPIView):
serializer_class = JobSerializer
permission_classes = [IsAuthenticated, IsEmployee]
def get_queryset(self):
saved_jobs_id = list(Applicant.objects.filter(user=self.request.user).values_list('job_id', flat=True))
return Job.objects.filter(id__in=saved_jobs_id)
@api_view(['GET'])
@permission_classes([IsAuthenticated, IsEmployee])
def already_saved_job_api_view(request, job_id):
saved_job_id = Applicant.objects.filter(job_id=job_id).values_list('job_id')
data = serializers.serialize("json", Job.objects.filter(id__in=saved_job_id))
return HttpResponse(data, content_type="application/json") | 1.90625 | 2 |
play.py | robhardwick/music | 0 | 12758368 | #!/usr/bin/env python2
from argparse import ArgumentParser
from markovmusic.player import Player
parser = ArgumentParser()
parser.add_argument('--input',
default='input/bach', metavar='PATH',
help='MIDI input, either a single file or a directory')
parser.add_argument('--chain-len',
type=int, default=4, metavar='LENGTH',
help='Length of Markov chains to generate')
parser.add_argument('--time-scale', metavar='SCALE',
type=int, default=1,
help='Temporal scale')
parser.add_argument('--port',
default=None, metavar='NAME',
help='Output MIDI port name')
parser.add_argument('--list-ports',
action='store_true',
help='List available MIDI ports')
player = Player(parser.parse_args())
player.run()
| 2.78125 | 3 |
docs/static/reference_code/spacytextblob_example.py | SamEdwardes/spaCy_TextBlob_sentiment | 7 | 12758369 | <filename>docs/static/reference_code/spacytextblob_example.py
import spacy
from spacytextblob.spacytextblob import SpacyTextBlob
nlp = spacy.load('en_core_web_sm')
text = "I had a really horrible day. It was the worst day ever! But every now and then I have a really good day that makes me happy."
nlp.add_pipe("spacytextblob")
doc = nlp(text)
print(doc._.blob.polarity)
# -0.125
print(doc._.blob.subjectivity)
# 0.9
print(doc._.blob.sentiment_assessments.assessments)
# [(['really', 'horrible'], -1.0, 1.0, None), (['worst', '!'], -1.0, 1.0, None), (['really', 'good'], 0.7, 0.6000000000000001, None), (['happy'], 0.8, 1.0, None)]
print(doc._.blob.ngrams())
# [WordList(['I', 'had', 'a']), WordList(['had', 'a', 'really']), WordList(['a', 'really', 'horrible']), WordList(['really', 'horrible', 'day']), WordList(['horrible', 'day', 'It']), WordList(['day', 'It', 'was']), WordList(['It', 'was', 'the']), WordList(['was', 'the', 'worst']), WordList(['the', 'worst', 'day']), WordList(['worst', 'day', 'ever']), WordList(['day', 'ever', 'But']), WordList(['ever', 'But', 'every']), WordList(['But', 'every', 'now']), WordList(['every', 'now', 'and']), WordList(['now', 'and', 'then']), WordList(['and', 'then', 'I']), WordList(['then', 'I', 'have']), WordList(['I', 'have', 'a']), WordList(['have', 'a', 'really']), WordList(['a', 'really', 'good']), WordList(['really', 'good', 'day']), WordList(['good', 'day', 'that']), WordList(['day', 'that', 'makes']), WordList(['that', 'makes', 'me']), WordList(['makes', 'me', 'happy'])] | 3.09375 | 3 |
src/pyrin/security/permission/base.py | wilsonGmn/pyrin | 0 | 12758370 | <filename>src/pyrin/security/permission/base.py
# -*- coding: utf-8 -*-
"""
permission base module.
"""
from abc import abstractmethod
import pyrin.security.permission.services as permission_services
from pyrin.core.structs import CoreObject
from pyrin.core.exceptions import CoreNotImplementedError
class PermissionBase(CoreObject):
"""
permission base class.
all application permissions must be subclassed from this.
"""
def __init__(self, *args, **options):
"""
initializes an instance of PermissionBase.
input parameters of this method in subclasses must be
customized based on application's design requirements.
"""
super().__init__()
permission_services.register_permission(self, **options)
def __hash__(self):
"""
gets the hash value of current permission.
:rtype: int
"""
return hash(self.get_id())
def __eq__(self, other):
"""
gets the comparison between current and other permission for equality.
:param PermissionBase other: other permission instance to be
compared to the current one.
:rtype: bool
"""
if not isinstance(other, PermissionBase):
return False
return other.get_id() == self.get_id()
def __ne__(self, other):
"""
gets the comparison between current and other permission for not equality.
:param PermissionBase other: other permission instance to be
compared to the current one.
:rtype: bool
"""
return not self == other
@abstractmethod
def __str__(self):
"""
gets the string representation of current permission.
this method must be implemented in subclasses.
:raises CoreNotImplementedError: core not implemented error.
:rtype: str
"""
raise CoreNotImplementedError()
def __repr__(self):
"""
gets the string representation of current permission.
:rtype: str
"""
return str(self)
@abstractmethod
def to_entity(self):
"""
gets the equivalent entity of current permission.
this method must be implemented in subclasses.
:raises CoreNotImplementedError: core not implemented error.
:rtype: BaseEntity
"""
raise CoreNotImplementedError()
@abstractmethod
def get_id(self):
"""
gets permission id.
this method must be implemented in subclasses.
it could return a single value or a combination of multiple values
(ex. a tuple). note that the returned value must be fully unique for
each different permission and also it must be a hashable value to
be used as dict key.
:raises CoreNotImplementedError: core not implemented error.
:rtype: object
"""
raise CoreNotImplementedError()
| 2.578125 | 3 |
lib/JumpScale/baselib/vfs/__init__.py | jumpscale7/jumpscale_core7 | 0 | 12758371 | from JumpScale import j
def cb():
from .BackupFactory import BackupFactory
return BackupFactory
j.base.loader.makeAvailable(j, 'clients')
j.clients._register('backup', cb)
| 1.546875 | 2 |
advent_of_code_2021__day_11_1/grid.py | MaRNG/advent-of-code-2021 | 0 | 12758372 | <filename>advent_of_code_2021__day_11_1/grid.py
class Grid:
def __init__(self, grid: [[int]]):
self.grid = grid
self.flashes = 0
self.ticks = 0
self.height = len(grid)
self.width = len(grid[0])
self.flashed_cells: [str] = []
def tick(self):
self.ticks += 1
i = 0
for row in self.grid:
j = 0
for col in row:
self.grid[i][j] += 1
j += 1
i += 1
should_refresh_grid = True
while should_refresh_grid:
should_refresh_grid = False
i = 0
for row in self.grid:
j = 0
for col in row:
if self.grid[i][j] > 9 and not self.is_flashed(j, i):
self.flash(j, i)
should_refresh_grid = True
j += 1
i += 1
for flashed_cell in self.flashed_cells:
x = int(flashed_cell.split(';')[0])
y = int(flashed_cell.split(';')[1])
self.grid[y][x] = 0
self.flashed_cells.clear()
def flash(self, x: int, y: int):
self.flashes += 1
self.mark_flashed_cell(x, y)
for _y in range(-1, 2):
for _x in range(-1, 2):
# if not middle
if (abs(_x) + abs(_y)) != 0:
observing_point_x = x + _x
observing_point_y = y + _y
if observing_point_x >= 0 and observing_point_y >= 0 and observing_point_x < self.width and observing_point_y < self.height:
self.grid[observing_point_y][observing_point_x] += 1
def is_flashed(self, x: int, y :int):
if ';'.join([str(x), str(y)]) in self.flashed_cells:
return True
return False
def mark_flashed_cell(self, x: int, y: int):
if not self.is_flashed(x, y):
self.flashed_cells.append(';'.join([str(x), str(y)]))
@staticmethod
def create_from_lines(lines: [str]):
grid = []
for line in lines:
grid.append([int(number) for number in list(line.strip())])
return Grid(grid)
| 3.34375 | 3 |
uninstrumented/special/special.py | timescale/promscale-tracing-demo | 1 | 12758373 | <gh_stars>1-10
import random
import time
from flask import Flask, Response
from flask.json import jsonify
app = Flask(__name__)
def work(mu: float, sigma: float) -> None:
# simulate work being done
time.sleep(max(0.0, random.normalvariate(mu, sigma)))
def random_special() -> str:
work(0.0003, 0.0001)
c = random.choice('!@#$%^&*<>,.:;?/+={}[]-_\|~`')
return c
def process_special(c: str) -> str:
work(0.0001, 0.00005)
# these chars are extra slow
if c in {'$', '@', '#', '?', '%'}:
work(0.005, 0.0005)
# these chars fail 5% of the time
if c in {'!', '@', '?'} and random.random() > 0.95:
e = Exception(f"FAILED to process {c}")
raise e
return c
def render_special(c: str) -> Response:
work(0.0002, 0.0001)
return jsonify(char=c)
@app.route('/')
def special():
c = random_special()
c = process_special(c)
return render_special(c)
if __name__ == '__main__':
app.run()
| 2.78125 | 3 |
t66y-spider/src/spider2/__init__.py | cd871127/hodgepodge-cloud | 2 | 12758374 | fid = {
"2": "亞洲無碼原創區",
"15": "亞洲有碼原創區",
"4": "歐美原創區",
"25": "國產原創區",
"5": "動漫原創區",
"26": "中字原創區",
"27": "轉帖交流區"
}
base_url = "http://www.t66y.com"
page_path = "thread0806.php"
page_num = list(range(1, 101))
headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"Upgrade-Insecure-Requests": 1,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
}
class PageHandler(object):
def __init__(self, con):
self.__con = con
pass
def handle(self, html):
self.__con.request("get", "http://www.baidu.com")
print(html)
| 2.390625 | 2 |
wqpmap/GeoEDF/processor/helper/GeomHelper.py | rkalyanapurdue/processors | 0 | 12758375 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import geopandas as gpd
from shapely.geometry import Polygon
from geoedfframework.utils.GeoEDFError import GeoEDFError
""" Helper module implementing various geometry operations
"""
def geom_distance(lat1, lon1, lat2, lon2):
try:
R = 6378.137 # Radius of earth in KM
dLat = lat2 * math.pi / 180 - lat1 * math.pi / 180
dLon = lon2 * math.pi / 180 - lon1 * math.pi / 180
a = math.sin(dLat/2) * math.sin(dLat/2) + math.cos(lat1 * math.pi / 180) * math.cos(lat2 * math.pi / 180) * math.sin(dLon/2) * math.sin(dLon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c
except:
raise GeoEDFError('Could not determine geometry distance')
return d # Km
def geom_diagonal(geom):
try:
lon1 = geom.total_bounds[0]
lat1 = geom.total_bounds[1]
lon2 = geom.total_bounds[2]
lat2 = geom.total_bounds[3]
d = geom_distance(lat1, lon1, lat2, lon2)
except:
raise GeoEDFError('Could not determine geometry diagonal')
return d # Km
def geom_extent(geom):
try:
d2 = geom_width(geom)+geom_height(geom)
except:
raise GeoEDFError('Could not determine geometry extent')
return d2 # Km
def geom_height(geom):
try:
lon1 = geom.total_bounds[0]
lat1 = geom.total_bounds[1]
lon2 = geom.total_bounds[2]
lat2 = geom.total_bounds[3]
h = geom_distance(lat1, lon1, lat2, lon1)
except:
raise GeoEDFError('Could not determine geometry height')
return h # Km
def geom_width(geom):
try:
lon1 = geom.total_bounds[0]
lat1 = geom.total_bounds[1]
lon2 = geom.total_bounds[2]
lat2 = geom.total_bounds[3]
w = geom_distance(lat1, lon1, lat1, lon2)
except:
raise GeoEDFError('Could not determine geometry width')
return w # Km
def geom_bbox(geom):
try:
polygon = gpd.GeoDataFrame(gpd.GeoSeries(geom.envelope), columns=['geometry'])
except:
raise GeoEDFError('Could not determine geometry bbox')
return polygon
# In case CRS is different
def geom_bbox2(geom):
try:
lon_point_list = [geom.total_bounds[0],geom.total_bounds[2],geom.total_bounds[2],geom.total_bounds[0],geom.total_bounds[0]]
lat_point_list = [geom.total_bounds[1],geom.total_bounds[1],geom.total_bounds[3],geom.total_bounds[3],geom.total_bounds[1]]
polygon_geom = Polygon(zip(lon_point_list, lat_point_list))
crs = {'init': 'epsg:4326'}
polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])
except:
raise GeoEDFError('Could not determine geometry bbox2')
return polygon
# Try using the area of the total_bounds polygon in both degrees and meters to generate an approximate "conversion" factor
def geom_area(geom):
try:
factor = geom_width(geom)*geom_height(geom)/geom_bbox(geom).area
area = factor*geom.area
except:
raise GeoEDFError('Could not determine geometry area')
return area # Km^2
# Use a cartesian projection coordinate system to get true area
# *** Currently crashes kernel ***
def geom_area2(geom):
try:
geom_m = geom.to_crs(epsg=3857) # or 3395 (WGS 84 compliant)
# May need to use explicit definition for 3395:
# proj4.defs("EPSG:3395","+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs")
a = geom_m.area/10**6
except:
raise GeoEDFError('Could not determine geometry area2')
return a # Km^2
| 2.96875 | 3 |
src/getOCRtext.py | tanvimehta/IDR_CRF_ipy | 3 | 12758376 | <filename>src/getOCRtext.py
import pytesseract
from PIL import Image, ImageEnhance, ImageFilter
image = "../datasets/SplitYB.png"
im = Image.open(image)
im = im.filter(ImageFilter.MedianFilter())
enhancer = ImageEnhance.Contrast(im)
im = enhancer.enhance(2)
im = im.convert('1')
im.save('temp2.jpg')
text = pytesseract.image_to_string(Image.open('temp2.jpg'))
print(text)
| 2.90625 | 3 |
scripts/compute_possible_instructions.py | m-smith/babyai | 411 | 12758377 | <reponame>m-smith/babyai<filename>scripts/compute_possible_instructions.py
#!/usr/bin/env python3
"""
Compute the number of possible instructions in the BabyAI grammar.
"""
from gym_minigrid.minigrid import COLOR_NAMES
def count_Sent():
return (
count_Sent1() +
# Sent1, then Sent1
count_Sent1() * count_Sent1() +
# Sent1 after you Sent1
count_Sent1() * count_Sent1()
)
def count_Sent1():
return (
count_Clause() +
# Clause and Clause
count_Clause() * count_Clause()
)
def count_Clause():
return (
# go to
count_Descr() +
# pick up
count_DescrNotDoor() +
# open
count_DescrDoor() +
# put next
count_DescrNotDoor() * count_Descr()
)
def count_DescrDoor():
# (the|a) Color door Location
return 2 * count_Color() * count_LocSpec()
def count_DescrBall():
return count_DescrDoor()
def count_DescrBox():
return count_DescrDoor()
def count_DescrKey():
return count_DescrDoor()
def count_Descr():
return count_DescrDoor() + count_DescrBall() + count_DescrBox() + count_DescrKey()
def count_DescrNotDoor():
return count_DescrBall() + count_DescrBox() + count_DescrKey()
def count_Color():
# Empty string or color
return len([None] + COLOR_NAMES)
def count_LocSpec():
# Empty string or location
return len([None, 'left', 'right', 'front', 'behind'])
print('DescrKey: ', count_DescrKey())
print('Descr: ', count_Descr())
print('DescrNotDoor: ', count_DescrNotDoor())
print('Clause: ', count_Clause())
print('Sent1: ', count_Sent1())
print('Sent: ', count_Sent())
print('Sent: {:.3g}'.format(count_Sent()))
| 2.734375 | 3 |
Numpy-example/code.py | sagarbhadra7/greyatom-python-for-data-science | 0 | 12758378 | # --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data=np.genfromtxt(path,delimiter=",",skip_header=1)
##print(data)
census=np.concatenate((data,new_record))
#print(census.shape)
#print(census)
# --------------
#Code starts here
age=census[:,0]
##print(age)
max_age=np.max(age)
##print(max_age)
min_age=np.min(age)
##print(min_age)
age_mean=np.mean(age)
##print(age_mean)
age_std=np.std(age)
##print(age_std)
# --------------
import numpy as np
#Code starts here
##race=census[:,2]
##print(race)
race_0=census[census[:,2]==0]
##print(race_0)
race_1=census[census[:,2]==1]
##print(race_1
race_2=census[census[:,2]==2]
##print(race_2)
race_3=census[census[:,2]==3]
##print(race_3)
race_4=census[census[:,2]==4]
##print(race_4)
len_0=len(race_0)
print(len_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
print(len_1)
print(len_2)
print(len_3)
print(len_4)
minority=np.array([len_0,len_1,len_2,len_3,len_4])
minority_race=minority.argmin()
print(minority_race)
# --------------
#Code starts here
import numpy as np
senior_citizens=census[census[:,0]>60]
##print(senior_citizens)
working_hours_sum=np.sum(senior_citizens[:,6])
print(working_hours_sum)
senior_citizens_len=len(senior_citizens)
print(senior_citizens_len)
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
##print(high)
##print(low)
avg_pay_high=np.mean(high[:,7])
avg_pay_low=np.mean(low[:,7])
print(avg_pay_high)
print(avg_pay_low)
| 3.25 | 3 |
azure/functions/_http.py | DalavanCloud/azure-functions-python-worker | 1 | 12758379 | import collections.abc
import types
import typing
from . import _abc
class BaseHeaders(collections.abc.Mapping):
def __init__(self, source: typing.Optional[typing.Mapping]=None) -> None:
self.__http_headers__: typing.Dict[str, str] = {}
if source is not None:
self.__http_headers__.update(
{k.lower(): v for k, v in source.items()})
def __getitem__(self, key: str) -> str:
return self.__http_headers__[key.lower()]
def __len__(self):
return len(self.__http_headers__)
def __contains__(self, key: typing.Any):
return key.lower() in self.__http_headers__
def __iter__(self):
return iter(self.__http_headers__)
class RequestHeaders(BaseHeaders):
pass
class ResponseHeaders(BaseHeaders, collections.abc.MutableMapping):
def __setitem__(self, key: str, value: str):
self.__http_headers__[key.lower()] = value
def __delitem__(self, key: str):
del self.__http_headers__[key.lower()]
class HttpRequest(_abc.HttpRequest):
"""An HTTP request object."""
def __init__(self, method: str, url: str,
headers: typing.Mapping[str, str],
params: typing.Mapping[str, str],
body) -> None:
self.__method = method
self.__url = url
self.__headers = RequestHeaders(headers)
self.__params = types.MappingProxyType(params)
self.__body = body
@property
def url(self):
return self.__url
@property
def method(self):
return self.__method.upper()
@property
def headers(self):
return self.__headers
@property
def params(self):
return self.__params
def get_body(self):
return self.__body
class HttpResponse(_abc.HttpResponse):
"""An HTTP response object."""
def __init__(self, body=None, *,
status_code=None, headers=None, mimetype=None, charset=None):
if status_code is None:
status_code = 200
self.__status_code = status_code
if mimetype is None:
mimetype = 'text/plain'
self.__mimetype = mimetype
if charset is None:
charset = 'utf-8'
self.__charset = charset
if headers is None:
headers = {}
self.__headers = ResponseHeaders(headers)
if body is not None:
self.__set_body(body)
else:
self.__body = b''
@property
def mimetype(self):
return self.__mimetype
@property
def charset(self):
return self.__charset
@property
def headers(self):
return self.__headers
@property
def status_code(self):
return self.__status_code
def __set_body(self, body):
if isinstance(body, str):
body = body.encode(self.__charset)
if not isinstance(body, (bytes, bytearray)):
raise TypeError(
f'reponse is expected to be either of '
f'str, bytes, or bytearray, got {type(body).__name__}')
self.__body = bytes(body)
def get_body(self) -> bytes:
return self.__body
| 2.515625 | 3 |
src/imports/http_parser/domain.py | brando90/isabelle-gym | 0 | 12758380 | <reponame>brando90/isabelle-gym
from urllib.parse import urlparse
class Domain:
def __init__(self, url):
self.url = url
self.domain = self.get_domain()
self.sub_domain = self.get_sub_domain()
def get_domain(self):
results = self.get_sub_domain().split('.')
try:
return results[-2] + '.' + results[-1]
except IndexError:
return False
def get_sub_domain(self):
return urlparse(self.url).netloc
| 2.890625 | 3 |
FWCore/MessageService/test/u34_cfg.py | SWuchterl/cmssw | 6 | 12758381 | # Unit test configuration file for MessageLogger service:
# Suppression based on minimum severity level.
# (This is part A - verify that the suppression occurs.
# u35 is part B - verifying that suppression does not occur if any threshold
# is low enough to not suppress.)
#
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
import FWCore.Framework.test.cmsExceptionsFatal_cff
process.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options
process.load("FWCore.MessageService.test.Services_cff")
process.MessageLogger = cms.Service("MessageLogger",
u34_warnings = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING'),
noTimeStamps = cms.untracked.bool(True)
),
destinations = cms.untracked.vstring('u34_warnings',
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
process.source = cms.Source("EmptySource")
process.sendSomeMessages = cms.EDAnalyzer("UnitTestClient_W")
process.p = cms.Path(process.sendSomeMessages)
| 2.078125 | 2 |
python/MinGeneticMutation.py | gondimribeiro/leetcode | 0 | 12758382 | <filename>python/MinGeneticMutation.py
'''
A gene string can be represented by an 8-character long string, with choices from 'A', 'C', 'G', and 'T'.
Suppose we need to investigate a mutation from a gene string start to a gene string end where one mutation is defined as one single character changed in the gene string.
For example, "AACCGGTT" --> "AACCGGTA" is one mutation.
There is also a gene bank bank that records all the valid gene mutations. A gene must be in bank to make it a valid gene string.
Given the two gene strings start and end and the gene bank bank, return the minimum number of mutations needed to mutate from start to end. If there is no such a mutation, return -1.
Note that the starting point is assumed to be valid, so it might not be included in the bank.
https://leetcode.com/problems/minimum-genetic-mutation/
'''
from typing import List
from collections import deque
class Solution:
def minMutation(self, start: str, end: str, bank: List[str]) -> int:
def isMutation(s: str, t: str) -> bool:
has_diff = False
for base_s, base_t in zip(s, t):
if base_s != base_t:
if has_diff:
return False
else:
has_diff = True
return has_diff
# BFS
# previous node, current node, number of mutations
queue = deque([("", start, 0)])
while queue:
previous, current, num_mutations = queue.popleft()
if current == end:
return num_mutations
for gene in bank:
if gene != previous and isMutation(current, gene):
queue.append((current, gene, num_mutations + 1))
return -1
| 4.125 | 4 |
src/ocr_json2cc.py | AzureMediaCognitiveDemos/Video-OCR-Search | 0 | 12758383 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
def empty(s):
return False if (s and not s.isspace()) else True
def get_if_key_exist(obj,key):
return obj[key] if obj.has_key(key) else ''
def sec2timefmt(int_sec):
h= int_sec / 3600
m= (int_sec % 3600)/60
s= int_sec % 60
return '%02d:%02d:%02d' % (h,m,s)
def remove_dup_items_in_list(li):
li_uniq = []
for x in li:
if x not in li_uniq:
li_uniq.append(x)
return li_uniq
def print_simple(fragments):
for frag in fragments:
frag.print_fragment_simple()
def print_webvtt (fragments):
print "WEBVTT"
print "NOTE\n"
for frag in fragments:
frag.print_fragment_webvtt()
def usage(c):
print 'Usage: # python %s <jsonfile> <outmode>' % c
print 'outmode : 0 - simple text, 1 - vtt'
class Fragment:
def __init__(self,js,timescale):
self.__start = get_if_key_exist(js,'start')
self.__timescale = timescale
self.__duration = get_if_key_exist(js,'duration')
self.__interval = get_if_key_exist(js,'interval')
self.__texts = []
events_js = get_if_key_exist(js,'events')
if isinstance(self.__start, (int, long)) and events_js:
for event_js in events_js:
self.__parse_event__(event_js)
def __parse_event__(self, event_js):
if not event_js:
return
for rs_js in event_js:
lang= get_if_key_exist(rs_js,'language')
t = get_if_key_exist(rs_js,'text')
#print "debug t={0}".format(t.encode('utf8'))
t = t.replace(' ', '') #strip space
if not empty(t):
self.__texts.append(t)
def __get_time_in_sec(self, intval):
return intval / int(self.__timescale)
def __get_time_in_timefmt(self,intval):
return sec2timefmt(self.__get_time_in_sec(intval))
def get_start_in_sec(self):
return int(self.__start) /int( self.__timescale)
def get_start_in_timefmt(self):
return sec2timefmt(self.get_start_in_sec())
def get_end_in_sec(self):
return (self.__start + self.__duration ) / self.__timescale
def get_end_in_timefmt(self):
return sec2timefmt(self.get_end_in_sec())
def get_texts(self):
return self.__texts
def get_serialized_texts(self):
texts_uniq = remove_dup_items_in_list(self.__texts)
return ' | '.join(texts_uniq).encode('utf8')
def print_fragment_simple(self):
if len(self.__texts) > 1 and self.__duration != self.__interval:
start_ticks = int(self.__start)
end_ticks = start_ticks + int(self.__interval)
for text in self.__texts:
print "[{0} - {1}] {2}".format(
self.__get_time_in_timefmt(start_ticks),
self.__get_time_in_timefmt(end_ticks),
text.encode('utf8'))
start_ticks = start_ticks + int(self.__interval)
end_ticks = end_ticks + int(self.__interval)
else:
print "[{0} - {1}] {2}".format(
self.get_start_in_timefmt(),
self.get_end_in_timefmt(),
self.get_serialized_texts())
def print_fragment_webvtt (self):
if len(self.__texts) > 1 and self.__duration != self.__interval:
start_ticks = int(self.__start)
end_ticks = start_ticks + int(self.__interval)
for text in self.__texts:
print "{0}.000 --> {1}.000\n{2}\n".format(
self.__get_time_in_timefmt(start_ticks),
self.__get_time_in_timefmt(end_ticks),
text.encode('utf8'))
start_ticks = start_ticks + int(self.__interval)
end_ticks = end_ticks + int(self.__interval)
else:
print "{0}.000 --> {1}.000\n{2}\n".format(
self.get_start_in_timefmt(),
self.get_end_in_timefmt(),
self.get_serialized_texts())
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
if (argc != 3):
usage(argvs[0])
quit()
jsonfile = argvs[1]
if ( not str(argvs[2]).isdigit()) or (int(argvs[2]) !=0 and int(argvs[2]) !=1 ):
usage(argvs[0])
quit()
outmode = int(argvs[2])
frag_list = []
with open(jsonfile, 'r') as f:
obj = json.load(f)
timescale =obj["timescale"]
frags_js =obj["fragments"]
for frag_js in frags_js:
f = Fragment(frag_js,timescale)
if len(f.get_texts()) > 0:
frag_list.append(f)
if ( outmode == 0 ): # simple
print_simple(frag_list)
else: # webvtt or ttml
print_webvtt(frag_list)
| 2.671875 | 3 |
espider/api.py | gitduk/espider | 0 | 12758384 | <filename>espider/api.py
import asyncio
import importlib
import logging
import os
import json
import sys
from collections import Iterable
from fastapi import BackgroundTasks, FastAPI
from starlette.templating import Jinja2Templates
from starlette.requests import Request as Req
from starlette.staticfiles import StaticFiles
from espider import ColoredFormatter, Spider
from espider.utils import pretty_error, match_list
from espider.settings import LOG_LEVEL, LOG_FORMAT, LOG_DATEFMT
from pathlib import Path
from aredis import StrictRedis
redis_client = StrictRedis(host='127.0.0.1', port=6379)
class SpiderApp(FastAPI):
""" Core application to test. """
def __init__(self):
super(SpiderApp, self).__init__()
# 指定静态文件存放路径
self.static_dir = '{}/static'.format(Path(__file__).resolve().parent)
self.mount("/static", StaticFiles(directory=self.static_dir), name="static")
# 指定html 存放目录
self.templates = Jinja2Templates(directory=self.static_dir)
self.spiders = {}
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(LOG_LEVEL or logging.DEBUG)
if not self.logger.handlers:
sh = logging.StreamHandler()
sh.setLevel(LOG_LEVEL or logging.DEBUG)
formatter = ColoredFormatter(fmt=LOG_FORMAT, datefmt=LOG_DATEFMT)
sh.setFormatter(formatter)
self.logger.addHandler(sh)
# register endpoints
self.get("/")(self.root)
self.get("/{name}")(self.spider_card)
self.get("/{name}/run")(self.run_spider)
self.get("/{name}/info")(self.spider_info)
self.spider_factory = lambda spider: spider()
self.redis_client = redis_client
self._loop = None
@property
def loop(self):
if not self._loop:
try:
self._loop = asyncio.get_event_loop()
except:
self._loop = asyncio.get_running_loop()
return self._loop
def add_spider(self, sp):
if self._spider_filter(sp.__name__, self.spiders.keys()): return
if issubclass(sp, Spider):
self.logger.info('Add Spider: {}'.format(sp.__name__))
self.spiders[sp.__name__] = sp
else:
self.logger.warning('TypeError: except espider.Spider object')
def load_spider(self, path='./spiders', target_dir=None, target_cls=None, ignore_dir=None, ignore_cls=None):
if not os.path.exists(path): return
if os.path.isdir(path):
for root, _, files in os.walk(path):
# 筛选加载目录
if root.split('/')[-1].startswith('__'): continue
if ignore_dir is not None and self._spider_filter(root, ignore_dir): continue
if target_dir is not None and not self._spider_filter(root, target_dir): continue
py_files = [f for f in files if os.path.splitext(f)[1] == '.py' and not f.startswith('__')]
if not py_files: continue
for pf in py_files:
module_path = '{}.{}'.format(root.replace('/', '.').strip('.'), os.path.splitext(pf)[0])
module = importlib.import_module(module_path)
for name, cls in vars(module).items():
if not isinstance(cls, type): continue
if name == 'Spider' or name.startswith('__'): continue
if issubclass(cls, Spider):
cls_key = '{}.{}'.format(module_path, cls.__name__)
# 筛选爬虫
if ignore_cls is not None and self._spider_filter(cls_key, ignore_cls): continue
if target_cls is not None and not self._spider_filter(cls_key, target_cls): continue
self.logger.info('Add Spider: {}'.format(cls_key))
self.spiders[cls_key] = cls
else:
self.logger.warning('Load Spiders Error: {} is not a dir'.format(path))
@staticmethod
def _spider_filter(string, clist):
if isinstance(clist, str): clist = [clist]
if match_list(string, clist): return True
def _get_spider(self, name=None):
if not self.spiders: return
spiders = None
if name is None:
spiders = []
for key, sp in self.spiders.items():
spiders.append(sp)
if isinstance(name, str):
for key, sp in self.spiders.items():
if key.endswith(name):
spiders = sp
elif isinstance(name, Iterable):
spiders = []
for n in name:
for key, sp in self.spiders.items():
if key.endswith(n):
spiders.append(sp)
break
if not isinstance(spiders, list):
spiders = [spiders]
return spiders
async def _run_spiders(self, spiders):
for sp in spiders:
self.logger.info(f' Run Spider {sp.__name__} '.center(100, '='))
await asyncio.gather(*[self.spider_factory(sp)._downloader() for sp in spiders])
async def root(self, req: Req):
if self.spiders:
spiders = [sp.__name__ for sp in self._get_spider()]
else:
spiders = []
datas = []
for n in spiders:
data = await self.spider_info(n)
data['name'] = n
datas.append(data)
return self.templates.TemplateResponse('root.html', {
'request': req,
'spiders': spiders,
'datas': datas
})
async def spider_info(self, name):
info = await self.redis_client.get('Spider:{}'.format(name))
return json.loads(info) if info else {}
async def run_spider(self, background_task: BackgroundTasks, name):
spiders = self._get_spider(name=name)
if name is None and not spiders:
self.logger.error('Spiders map is null')
return
if not spiders:
self.logger.error('Cannot find spider: {}'.format(name))
return
try:
background_task.add_task(self._run_spiders, spiders)
except KeyboardInterrupt:
self.logger.warning('KeyboardInterrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
except Exception as e:
pretty_error(e, self.logger)
return await self.spider_info(name)
def start(self, name=None):
spiders = self._get_spider(name=name)
if name is None and not spiders:
self.logger.error('Spiders map is null')
return
if not spiders:
self.logger.error('Cannot find spider: {}'.format(name))
return
try:
self.loop.run_until_complete(self._run_spiders(spiders))
except KeyboardInterrupt:
self.logger.warning('KeyboardInterrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
except Exception as e:
pretty_error(e, self.logger)
async def spider_card(self, req: Req, name):
if self.spiders:
spiders = [sp.__name__ for sp in self._get_spider()]
else:
spiders = []
data = await self.spider_info(name)
info = {
'request': req,
'data': data,
'name': name,
'spiders': spiders
}
return self.templates.TemplateResponse('card.html', info)
| 2.03125 | 2 |
scripts/pytest_parallel.py | ashnair1/buzzard | 30 | 12758385 | <reponame>ashnair1/buzzard
"""
Run buzzard's tests in parallel. Pass the same arguments as you would pass to `pytest`.
Gets the tests down to 2min from 7.5min. One test takes 2min, it would be a good idea to split it.
```sh
$ python buzzard/test/pytest_parallel.py -x .
```
"""
import sys
import subprocess
from concurrent.futures import ThreadPoolExecutor
import multiprocessing as mp
import uuid
import os
import tempfile
import collections
import datetime
TOCHUNK = [
'test_cached_raster_recipe',
'test_vectorsource_getsetdata_general',
]
def group(n, iterable):
stop = False
l = []
for obj in iterable:
l.append(obj)
if len(l) == n:
yield l
l = []
if l:
yield l
def _print_cmd(s):
print('\033[33m$ {}\033[0m'.format(s))
def _gen_tests():
path = os.path.join(tempfile.gettempdir(), 'pytest-collection-tmp')
args_phase0 = [
s
for s in sys.argv[1:]
if 'junitxml' not in s and
'cov' not in s
]
cmd = ['pytest', '--collect-only'] + args_phase0 + ['&>{}'.format(path)]
cmd = ' '.join(cmd)
cmd = 'bash -c "{}"'.format(cmd)
try:
_print_cmd(cmd)
a = datetime.datetime.now()
code = os.system(cmd)
b = datetime.datetime.now()
finally:
res = ''
if os.path.isfile(path):
with open(path) as stream:
res = stream.read()
os.remove(path)
dt = (b - a).total_seconds()
print(' ', cmd, '(took {:.1f}sec)'.format(dt))
if code != 0:
raise Exception(
'{} failed with code {}\n============= output:\n{}\n=============\n'.format(
cmd, code, res
)
)
d = collections.defaultdict(list)
m = None
for l in res.split('\n'):
l = l.strip()
if l.startswith("<Module "):
m = l.replace("<Module ", '')[:-1]
elif l.startswith("<Function "):
assert m is not None, l
f = l.replace("<Function ", '')[:-1]
d[os.path.join("buzzard", "test", m)].append(f)
else:
pass
print('Found {} tests scattered on {} files'.format(
sum(map(len, d.values())),
len(d),
))
for m, fs in d.items():
if any(
n in m
for n in TOCHUNK
):
print(' {} -> ({} calls of 1 test)'.format(m, len(fs)))
for f in fs:
yield '{}::{}'.format(m, f)
else:
print(' {} -> (1 call of {} tests)'.format(m, len(fs)))
yield m
def _run_test(batch):
uid = str(uuid.uuid4())
path = os.path.join(tempfile.gettempdir(), uid)
args_phase1 = [
s.replace(
'pytest-report.xml', 'pytest-report-{}.xml'.format(uid)
).replace(
'coverage.xml', 'coverage-{}.xml'.format(uid)
)
for s in sys.argv[1:]
if s[0] == '-'
]
cmd = ' '.join(
['pytest'] +
args_phase1 +
["'{}'".format(s)
for s in batch
] +
['&>{}'.format(path)]
)
cmd = 'COVERAGE_FILE=.coverage.{} bash -c "{}"'.format(uid, cmd)
try:
_print_cmd(cmd)
a = datetime.datetime.now()
code = os.system(cmd)
b = datetime.datetime.now()
finally:
res = ''
if os.path.isfile(path):
with open(path) as stream:
res = stream.read()
os.remove(path)
dt = (b - a).total_seconds()
print(' ', cmd, '(took {:.1f}sec)'.format(dt))
if code != 0:
raise Exception(
'{} failed with code {}\n============= output:\n{}\n=============\n'.format(
cmd, code, res
)
)
if __name__ == '__main__':
print('-- Discovering tests --')
tests = list(_gen_tests())
tests = sorted(tests)[::-1]
tests = list(group(1, tests))
print('-- Running tests, {} calls to pytest, {} simulateneous --'.format(
len(tests), mp.cpu_count()
))
with ThreadPoolExecutor(mp.cpu_count()) as ex:
list(ex.map(_run_test, tests))
| 2.421875 | 2 |
script/issue_cmd.py | ScarletAI/Arch-Apple-Installer | 4 | 12758386 | <filename>script/issue_cmd.py
import sys, usb.core
dev = usb.core.find(idVendor=0x05ac, idProduct=0x4141)
if dev is None:
raise ValueError('Device not found')
dev.set_configuration()
dev.ctrl_transfer(0x21, 3, 0, 0, sys.argv[1] + '\n') | 2.25 | 2 |
serving/engine.py | cunshen/text-antispam | 1 | 12758387 | #! /usr/bin/env python3
"""与加载了RNN Classifier导出的Servable的TensorFlow Serving进行通信
"""
import numpy as np
import jieba
import tensorlayer as tl
from grpc.beta import implementations
import predict_pb2
import prediction_service_pb2
from packages import text_regularization as tr
def text_tensor(text, wv):
"""获取文本向量
Args:
text: 待检测文本
wv: 词向量模型
Returns:
[[[ 3.80905056 1.94315064 -0.20703495 -1.31589055 1.9627794
...
2.16935492 2.95426321 -4.71534014 -3.25034237 -11.28901672]]]
"""
text = tr.extractWords(text)
words = jieba.cut(text.strip())
text_sequence = []
for word in words:
try:
text_sequence.append(wv[word])
except KeyError:
text_sequence.append(wv['UNK'])
text_sequence = np.asarray(text_sequence)
sample = text_sequence.reshape(1, len(text_sequence), 200)
return sample
print(" ".join(jieba.cut('分词初始化')))
wv = tl.files.load_npy_to_any(name='../word2vec/output/model_word2vec_200.npy')
host, port = ('localhost', '9000')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'antispam'
| 2.578125 | 3 |
ClipboardSharer-Windows.py | EnriqueSoria/Android-PC-clipboard-sharer | 1 | 12758388 | <reponame>EnriqueSoria/Android-PC-clipboard-sharer
'''
PC (server) side.
It receives data, and puts it into the clipboard.
It needs: http://sourceforge.net/projects/pywin32/
'''
import socket
import time
import win32clipboard
def get_clipboard():
''' Opens a clipbard and get the text '''
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
return data
def set_clipboard(text):
''' Opens a clipboard and sets text to it '''
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboard(text.encode('utf-8'), win32clipboard.CF_TEXT)
win32clipboard.SetClipboard(unicode(text), win32clipboard.CF_UNICODETEXT)
win32clipboard.CloseClipboard()
# Constants
TCP_IP = '192.168.1.3'
TCP_PORT = 6006
BUFFER_SIZE = 1024
# We create a socket server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
while True:
# Accept connection
conn, addr = s.accept()
# Log addres
print 'Connection address:', addr
# Receive data
data = conn.recv(BUFFER_SIZE)
# Process data
if data:
set_clipboard(data)
print data
conn.send('ok')
conn.close()
| 3.390625 | 3 |
python/checkDifference_yieldVariance.py | villano-lab/nrFano_paper2019 | 2 | 12758389 | <reponame>villano-lab/nrFano_paper2019<filename>python/checkDifference_yieldVariance.py
# import "standard" python modules
import numpy as np
import h5py
from datetime import datetime
from astropy.table import Table, Column
import os
import argparse
# import our custom NR Fano analysis code
import sys
sys.path.append('../python/')
from EdwRes import *
from prob_dist import *
def getPosteriorSamples(filename):
# get the data
# the posterior distribution is in samples
f = h5py.File(filename,'r')
# need to store data in an array:
# The sampler will now have a chains attribute
# which is an array with shape (Nwalker,N,Ndim)
# where N is the number of interations (500 in our inital run)
# and Ndim is the number of fit parameters
path='{}/{}/'.format('mcmc','sampler')
aH = np.asarray(f[path+'aH'])
C = np.asarray(f[path+'C'])
m = np.asarray(f[path+'m'])
scale = np.asarray(f[path+'scale'])
A = np.asarray(f[path+'A'])
B = np.asarray(f[path+'B'])
samples = np.asarray(f[path+'samples'])
f.close()
#print ("sampler dimensions are: ", np.shape(samples))
nwalkers, N, ndim = np.shape(samples)
return ndim, nwalkers, N, samples
"""
returns the filename of an hdf5 file
the hdf5 file contains:
"""
def checkDifference_yieldVariance(Erecoil, numSamples, posteriorFile, datadir='./data', startIndex=None, cutoffIndex=0, lowerLimit=-1):
# get the samples
# for the most accurate fit, 'data/edelweiss_corr_C_systematicErrors_sampler_nll_allpars_gausPrior.h5'
ndim, nwalkers, nsteps, samples = getPosteriorSamples(posteriorFile)
print (ndim, nwalkers, nsteps)
print(np.size(samples))
# reshape the samples
samples = samples[:, cutoffIndex:, :].reshape((-1, ndim))
print(np.size(samples))
# not wise to ask for more samples than there were steps in the origianl
# sample chain
if numSamples > nsteps:
numSamples = nsteps
print ("You are requesting more samples than were in the original sample chain. \n Reducing the number of samples to ", numSamples)
elif startIndex is not None:
if numSamples + startIndex > nsteps:
numSamples = nsteps - startIndex
print ("You are requesting more samples than were in the original sample chain. \n Reducing the number of samples to ", numSamples)
aH_col, C_col, m_col, scale_col, A_col, B_col = [], [], [], [], [], []
sig_yield_col, sig_yield_estimate_col = [], []
energy_col = np.repeat(Erecoil, numSamples) #np.full((numSamples, 1), Erecoil)
# create the mask that will select the samples
if startIndex is None:
mask = np.random.randint(len(samples), size=numSamples)
else:
mask = np.arange(numSamples) + startIndex
# now use the mask to select the samples and iterate over those
for aH, C, m, scale, A, B in samples[mask]:
V = scale*4.0 #,'eps_eV' : 3.0, 'a': A, 'b': B
# calculate the yield standard deviation used for fitting in edelweiss_fit_allParameters_
## get the NR prediction for the input parameters
# series_NRQ_var_corr1(Er=10.0,F=0.0,V=4.0,aH=0.0381,alpha=(1/18.0),A=0.16,B=0.18,label='GGA3',corr1file='data/sigdiff_test.h5')
# series_NRQ_var_corr1 returns the *variance*
model_NR_0 = np.sqrt(series_NRQ_var_corr1(Erecoil, 0, V, aH, 1/18.0, A, B, 'GGA3'))
#model_NR = np.sqrt(np.power(C + m*Erecoil, 2) + model_NR_0)
# calculate the exact yield standard deviation
"""
sig_real = []
for Er_val in Er:
sig_real.append(sigmomEdw(Er_val, band='NR', F=0.000001, V=scale*4.0, aH=aH, alpha=(1/100), A=A, B=B))
"""
true_NR_sig = sigmomEdw(Erecoil,band='NR',label='GGA3',F=0.000001,V=V,aH=aH,alpha=(1/18.0), A=A, B=B, lowlim=lowerLimit)
# store the parameter data
#print (aH, C, m, scale, A, B)
aH_col.append(aH)
C_col.append(C)
m_col.append(m)
scale_col.append(scale)
A_col.append(A)
B_col.append(B)
# and store the yield information
sig_yield_col.append(true_NR_sig)
sig_yield_estimate_col.append(model_NR_0)
#############################
# Store the information
#############################
now = datetime.now()
time = now.strftime('%Y%h%d_%H%M%S%f')
#print (time)
filename = os.path.join(datadir, 'yield_accuracy_Erecoil_%.2f_keV_%s.h5' % (Erecoil, time))
#print(filename)
# make an astropy table
# thank you astropy!!
# energy would probably be handled better with metadata but OH WELL
# having it in a column is easier for retrieval from the hdf5 file
data_tab = Table()
data_tab['energy_recoil_keV'] = energy_col
data_tab['aH'] = aH_col
data_tab['C'] = C_col
data_tab['m'] = m_col
data_tab['scale'] = scale_col
data_tab['A'] = A_col
data_tab['B'] = B_col
data_tab['true_yield_sig'] = sig_yield_col
data_tab['cor1_yield_sig'] = sig_yield_estimate_col
#print(data_tab)
data_tab.write(filename, format='hdf5', path='table')
return filename
def main(args):
# We'll look at the Er values of the data points
# import data from Edelweiss
resNR_filename = os.path.join(args.repoPath, 'analysis_notebooks/data/edelweiss_NRwidth_GGA3_data.txt')
resNR_data = pd.read_csv(resNR_filename, skiprows=1, \
names=['E_recoil', 'sig_NR', 'E_recoil_err', 'sig_NR_err'], \
delim_whitespace=True)
# the sorting is necessary!
# otherwise the mask defined below will select the wrong data
resNR_data = resNR_data.sort_values(by='E_recoil')
NR_data = {'Erecoil': resNR_data["E_recoil"][2::], 'sigma': resNR_data["sig_NR"][2::], 'sigma_err': resNR_data["sig_NR_err"][2::]}
Er = np.sort(NR_data['Erecoil'])
Erecoil = Er[args.energyIndex]
# generate and store the data
# def checkDifference_yieldVariance(Erecoil, numSamples, posteriorFile, datadir='./data', startIndex=None, cutoffIndex=0, lowerLimit=-1):
MCMC_data_filename = os.path.join(args.repoPath, 'analysis_notebooks/data/', args.fileName)
checkDifference_yieldVariance(Erecoil, args.numSamples, MCMC_data_filename, args.dataPath, args.startIndex, args.cutoffIndex, args.lowerLimit)
"""
Example use:
(nr_fano) <EMAIL>1SLP9K python$ python checkDifference_yieldVariance.py --energyIndex 0 --numSamples 5 --startIndex 3 --repoPath "/mnt/c/Users/canto/Repositories/nrFano_paper2019" --dataPath "../analysis_notebooks/data"
"""
if __name__ == "__main__":
# execute only if run as a script
# add several arguments: which energy and how many samples
parser = argparse.ArgumentParser(description='Sample the posterior distribution')
parser.add_argument('--energyIndex', type=int,
help='an integer between 0 and 4 to specify the energy index')
parser.add_argument('--numSamples', type=int,
help='number of samples to draw from the posterior distribution')
parser.add_argument('--repoPath',
help='path to the repository')
parser.add_argument('--fileName', default='edelweiss_NRwidth_GGA3_data.txt',
help='data file name')
parser.add_argument('--dataPath',
help='path to the repository')
parser.add_argument('--cutoffIndex', type=int, default=0,
help='drop this number of samples from the sampler')
parser.add_argument('--startIndex', type=int,
help='will sample from startIndex to startIndex + numSamples')
parser.add_argument('--lowerLimit', type=float, default=-1,
help='set the integration lower limit, defaults to -1')
args = parser.parse_args()
main(args)
| 2.296875 | 2 |
Examples/QButtonGroup.py | GrayLand119/PyQt5-Demos | 0 | 12758390 | from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QLabel, QPushButton, QButtonGroup
import sys
from PyQt5 import QtCore
class Window(QWidget):
def __init__(self):
super().__init__()
self.title = "PyQt5 QButton Group"
self.top = 200
self.left = 500
self.width = 400
self.height = 300
self.setWindowTitle(self.title)
self.setWindowIcon(QtGui.QIcon("icon.png"))
self.setGeometry(self.left, self.top, self.width, self.height)
hbox = QHBoxLayout()
self.label = QLabel(self)
self.label.setFont(QtGui.QFont("Sanserif", 15))
hbox.addWidget(self.label)
self.buttongroup = QButtonGroup()
# self.buttongroup.setExclusive(False)
self.buttongroup.buttonClicked[int].connect(self.on_button_clicked)
button = QPushButton("Python")
self.buttongroup.addButton(button, 1)
button.setFont(QtGui.QFont("Sanserif", 15))
button.setIcon(QtGui.QIcon("pythonicon.png"))
button.setIconSize(QtCore.QSize(40, 40))
hbox.addWidget(button)
button = QPushButton("Java")
self.buttongroup.addButton(button, 2)
button.setFont(QtGui.QFont("Sanserif", 15))
button.setIcon(QtGui.QIcon("java.png"))
button.setIconSize(QtCore.QSize(40, 40))
hbox.addWidget(button)
button = QPushButton("C++")
self.buttongroup.addButton(button, 3)
button.setFont(QtGui.QFont("Sanserif", 15))
button.setIcon(QtGui.QIcon("cpp.png"))
button.setIconSize(QtCore.QSize(40, 40))
hbox.addWidget(button)
self.setLayout(hbox)
self.show()
def on_button_clicked(self, id):
for button in self.buttongroup.buttons():
if button is self.buttongroup.button(id):
self.label.setText(button.text() + " Was Clicked ")
if __name__ == "__main__":
App = QApplication(sys.argv)
window = Window()
sys.exit(App.exec())
| 3.0625 | 3 |
models/seq2seq-pytorch/utils/scheduler_helper.py | victorywys/contk | 5 | 12758391 | <reponame>victorywys/contk
from torch.optim import Optimizer
class ReduceLROnLambda():
def __init__(self, optimizer, func, factor=0.1,\
verbose=False, min_lr=0, eps=1e-8):
if factor >= 1.0:
raise ValueError('Factor should be < 1.0.')
self.factor = factor
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(\
type(optimizer).__name__))
self.optimizer = optimizer
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(\
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.func = func
self.verbose = verbose
self.eps = eps
self.history_data = None
def step(self, metrics):
flag, self.history_data = self.func(metrics, self.history_data)
if flag:
self._reduce_lr()
def _reduce_lr(self):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group['lr'] = new_lr
if self.verbose:
print('Reducing learning rate' \
' of group {} to {:.4e}.'.format(i, new_lr))
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key not in {'optimizer', 'func'}}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
| 2.203125 | 2 |
python/hfbs_fpga.py | uwsampa/hfbs | 25 | 12758392 | <reponame>uwsampa/hfbs
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
import os
import time
import datetime
import argparse
from multiprocessing import Process, Queue
# local imports
import utils
#flags
RUN_FPGA = False
VERBOSE = True
FIXEDP = True
LOOP_OUTSIDE = False
NO_GARBAGE_ADDED = True
SAVE_FIG = False
DISPLAY_FIG = True
TWO_FLOW = False
# settings
EPS = np.finfo(float).eps
np.set_printoptions(threshold=np.nan)
# constants
BILATERAL_SIGMA_LUMA = 32
BILATERAL_SIGMA_SPATIAL = 32
LOSS_SMOOTH_MULT = 8
A_VERT_DIAG_MIN = 1e-3
NUM_PCG_ITERS = 100
NUM_NEIGHBORS = 6
reference = '../data/depth_superres/reference.png'
target = '../data/depth_superres/target.png'
confidence = '../data/depth_superres/confidence.png'
# FPGA-specific constants, yours may be different
c2h_str = "/dev/xdma/card0/c2h0"
h2c_str = "/dev/xdma/card0/h2c0"
def read_from_fpga(fpga_read_channel, num_bytes, my_queue):
if not LOOP_OUTSIDE:
print("reading (num_bytes-8)/5)+8", 8+(num_bytes-8)/5)
data_readback = os.read(fpga_read_channel, ((num_bytes-8)/5)+8)
else:
if NO_GARBAGE_ADDED:
print "reading 3*(num_bytes/10)", 3*(num_bytes/10)
data_readback = os.read(fpga_read_channel, 3*(num_bytes/10))
else:
data_readback = os.read(fpga_read_channel, num_bytes)
#data_readback = os.read(fpga_read_channel, 8)
print "\tdone reading, putting"
my_queue.put(data_readback)
return
def invoke_fpga(im):
result = ''
c2h_fd = 0
h2c_fd = 0
new_im = ''.join([utils.to_bytestr(num) for num in im])
try:
# open up read and write channels
h2c_fd = os.open(h2c_str, os.O_RDWR)
c2h_fd = os.open(c2h_str, os.O_RDWR)
bytes_left = im.size
result_queue = Queue()
# processes to handle r/w channels
read_process = Process(target=read_from_fpga, args=(c2h_fd,len(new_im), result_queue))
# write_process = Process(target=write_to_fpga, args=(h2c_fd,buffer(im, im.size - bytes_left)))
# open up rx
read_process.start()
print "started read"
time.sleep(.5) # wait for the read to start TODO do this in the correct way (maybe wait until poll != 0 )
# open up tx, tx should trigger rx
print "writing"
if FIXEDP:
bytes_written = os.write(h2c_fd, buffer(new_im, 0))
else:
bytes_written = os.write(h2c_fd, buffer(im, 0))
print "bytes_written:", bytes_written
# wait for done
proc_out = result_queue.get()
print "got proc out"
read_process.join()
print "read process joined"
result = proc_out
finally:
os.close(c2h_fd)
os.close(h2c_fd)
if RUN_FPGA:
if FIXEDP:
new_result = [k for k in [result[i : i + 8] for i in range(0, len(result), 8)]]
result = [utils.from_bytestr(k) for k in new_result]
else:
result = np.frombuffer(result, dtype=np.float64).reshape(im.shape)
return np.around(np.array(result),decimals=32) # round for extreme vals
def prepare_flow(reference, flow_tuple, confidence):
reference_image = np.array(plt.imread(reference, format='png'), dtype=np.float32)*256
flow_a = np.array(plt.imread(flow_tuple[0], format='png'), dtype=np.float32)*65536
if TWO_FLOW:
flow_b = np.array(plt.imread(flow_tuple[1], format='png'), dtype=np.float32)*65536
flow = np.stack((flow_a,flow_b),axis=-1)
else:
flow = flow_a
flow = np.subtract(flow, 2**15)
flow = np.divide(flow, 256)
weight = np.array(plt.imread(confidence, format='png'), dtype=np.float32)*256*256
weight = np.divide(weight, 65536)
if VERBOSE:
print(">>> preparing flow data")
sz = [reference_image.shape[0], reference_image.shape[1]]
I_x = np.tile(np.floor(np.divide(np.arange(sz[1]), BILATERAL_SIGMA_SPATIAL)), (sz[0],1))
I_y = np.tile( np.floor(np.divide(np.arange(sz[0]), BILATERAL_SIGMA_SPATIAL)).reshape(1,-1).T, (1,sz[1]) )
I_luma = np.floor_divide(utils.rgb2gray(reference_image), float(BILATERAL_SIGMA_LUMA))
X = np.concatenate((I_x[:,:,None],I_y[:,:,None],I_luma[:,:,None]),axis=2).reshape((-1,3),order='F')
W0 = np.ravel(weight.T)
if TWO_FLOW:
X0 = np.reshape(flow,[-1,2],order='F')
else:
X0 = np.reshape(flow,[-1,1],order='F')
return X, W0, X0, flow_a.shape
def bistochastize(splat_mat, grid_size, diffuse3_mat, W0, Xshape):
splat_sum = np.reshape( splat_mat * np.ones(Xshape), grid_size,order='F').astype('float32')
splat_norm = np.ones(splat_sum.shape).astype('float32')
for i_norm in range(20):
diffuse_splat_norm = np.reshape(diffuse3_mat * np.reshape(splat_norm, -1, 1), splat_norm.shape,order='F')
blurred_splat_norm = 2 * splat_norm + diffuse_splat_norm
denom = np.maximum(EPS, blurred_splat_norm)
splat_norm = np.sqrt(splat_norm * (splat_sum / denom))
A_diag = np.reshape((splat_mat * W0), grid_size,order='F') + LOSS_SMOOTH_MULT * (splat_sum - 2 * np.square(splat_norm))
return splat_norm, A_diag
def make_neighbors(grid_size):
ii,jj,kk = np.mgrid[0:grid_size[0],0:grid_size[1],0:grid_size[2]]
idxs0 = np.array([], dtype=np.int64).reshape(0,2)
idxs = np.array([], dtype=np.int64).reshape(0,2)
if VERBOSE:
print(">>> neighbors")
for diff in [-1,1]:
ii_ = ii + diff
jj_ = jj + diff
kk_ = kk + diff
# first ii
idx = np.array([ii[(0 <= ii_) & (ii_ < grid_size[0])],
jj[(0 <= ii_) & (ii_ < grid_size[0])],
kk[(0 <= ii_) & (ii_ < grid_size[0])]]).T
idx2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx])
idx_ = np.array([ii_[(0 <= ii_) & (ii_ < grid_size[0])],
jj[(0 <= ii_) & (ii_ < grid_size[0])],
kk[(0 <= ii_) & (ii_ < grid_size[0])]]).T
idx_2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx_])
idxs = np.vstack( [idxs, np.array([idx2ravel,idx_2ravel]).T ] )
# then jj
idx = np.array([ii[(0 <= jj_) & (jj_ < grid_size[1])],
jj[(0 <= jj_) & (jj_ < grid_size[1])],
kk[(0 <= jj_) & (jj_ < grid_size[1])]]).T
idx2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx])
idx_ = np.array([ii[(0 <= jj_) & (jj_ < grid_size[1])],
jj_[(0 <= jj_) & (jj_ < grid_size[1])],
kk[(0 <= jj_) & (jj_ < grid_size[1])]]).T
idx_2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx_])
idxs = np.vstack( [idxs, np.array([idx2ravel,idx_2ravel]).T ] )
# then kk
idx = np.array([ii[(0 <= kk_) & (kk_ < grid_size[2])],
jj[(0 <= kk_) & (kk_ < grid_size[2])],
kk[(0 <= kk_) & (kk_ < grid_size[2])]]).T
idx2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx])
idx_ = np.array([ii[(0 <= kk_) & (kk_ < grid_size[2])],
jj[(0 <= kk_) & (kk_ < grid_size[2])],
kk_[(0 <= kk_) & (kk_ < grid_size[2])]]).T
idx_2ravel = np.array([np.ravel_multi_index(item,dims=grid_size,order='F') for item in idx_])
idxs = np.vstack( [idxs, np.array([idx2ravel,idx_2ravel]).T ] )
return idxs
def dense_solve_jacobi(X, W0, X0):
if VERBOSE:
print(">>> starting bilateral grid optimization")
grid_size = (np.amax(X,axis=0) + np.ones(3)).astype('int64')
print grid_size
basis = np.insert(np.cumprod(grid_size)[0:-1],0,1)
grid_num_vertices = np.prod(grid_size)
grid_splat_indices = np.dot(X,basis)
if VERBOSE:
print(">>> building splat mat")
# this is where the info to build the sparse matrix is:
# http://stackoverflow.com/questions/7760937/issue-converting-matlab-sparse-code-to-numpy-scipy-with-csc-matrix
splat_ones = np.ones(grid_splat_indices.shape[0])
ij = ( grid_splat_indices-1, np.arange(0, grid_splat_indices.shape[0]) )
splat_mat_shape = ( np.prod(grid_size), X.shape[0] )
splat_mat = csr_matrix( (splat_ones, ij), shape=splat_mat_shape )
start_new_neighbors = time.time()
idxs = make_neighbors(grid_size)
if VERBOSE:
print("time neighbors: ", time.time() - start_new_neighbors)
d_shape = np.prod(grid_size)
diffuse_s = np.tile(1,idxs.shape[0])
diffuse3_mat = csr_matrix( (diffuse_s, (idxs[:,0],idxs[:,1])), shape=(d_shape,d_shape) )
if VERBOSE:
print(">>> bistochastization")
start_bistoch = time.time()
splat_norm, A_diag = bistochastize(splat_mat, grid_size, diffuse3_mat, W0, X.shape[0])
if VERBOSE:
print("time bistoch: ", time.time() - start_bistoch)
if TWO_FLOW:
XW0 = np.column_stack((np.multiply(W0,X0[:, 0]) , np.multiply(W0,X0[:, 1]))).astype('float32')
b_mat = np.reshape((splat_mat * XW0), np.hstack([grid_size,2]),order='F').astype('float32')
else:
XW0 = (W0*X0.T).T.astype('float32')
b_mat = np.reshape((splat_mat * XW0), grid_size,order='F').astype('float32')
#% Construct A, b, and c, which define our optimization problem
c = 0.5 * sum(XW0 * X0,1);
# initial solution
inv_a = 1/np.maximum(A_VERT_DIAG_MIN, A_diag)
#inv_a = np.divide(1,np.maximum(A_VERT_DIAG_MIN, A_diag)).astype('float32')
final_result_cpu = []
final_result_fpga = []
run_fpga_time = 0
# for each channel in the flow
if VERBOSE:
print("starting flow!")
if TWO_FLOW:
for channel in range(b_mat.shape[-1]):
if VERBOSE:
print("channel", channel)
b_c = b_mat[:,:,:,channel].astype('float32')
Y_c = np.multiply(b_c, inv_a)
Y_c_cpu = np.multiply(b_c, inv_a)
Y_c_fpga = np.multiply(b_c, inv_a).astype('float32')
cpu_time_sequential = 0
cpu_time_vector = 0
cpu_time_sequential_compute_only = 0
fpga_time = 0
start_vector = time.time()
if LOOP_OUTSIDE:
for iter in range(NUM_PCG_ITERS):
if VERBOSE:
print("iteration", iter)
start_vector = time.time()
Ysn = np.multiply(Y_c, splat_norm) # flow 1
Ysn_reshaped = np.reshape(Ysn,-1,1)
Ysn_r2 = np.reshape((diffuse3_mat * Ysn_reshaped), Ysn.shape, order='F')
temp_Y = (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 ))
#Y_c_cpu = np.multiply((b_c - (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 ) )) , inv_a)
Y_c_cpu = np.multiply((b_c - temp_Y) , inv_a)
Y_c = Y_c_cpu
total_time_vector = time.time() - start_vector #end timing
Y_c_cpu = Y_c
if VERBOSE:
print("\t CPU vector time: \t\t\t", total_time_vector)
if RUN_FPGA:
np_fpga_input = np.empty(np.prod(grid_size)*5)
fpga_elapsed = 0
npit = np.nditer(Y_c_fpga, flags=['f_index','multi_index'], order='F')
for x in npit:
# order: Y, 1/a, bc, splatnorm
#print npit.index, "\t", npit.multi_index
np_fpga_input[5*npit.index + 0] = Y_c_fpga[npit.multi_index]
np_fpga_input[5*npit.index + 1] = splat_norm[npit.multi_index]
np_fpga_input[5*npit.index + 2] = b_c[npit.multi_index]
np_fpga_input[5*npit.index + 3] = inv_a[npit.multi_index]
np_fpga_input[5*npit.index + 4] = Ysn[npit.multi_index]
run_fpga_time = time.time()
proc_output = invoke_fpga(np_fpga_input)
fpga_input = np.array(np_fpga_input).astype('float32')
processed_output = invoke_fpga(fpga_input)
if NO_GARBAGE_ADDED:
Y_c_fpga = processed_output[0::3].reshape(grid_size)
else:
Y_c_fpga = processed_output[0::10].reshape(grid_size)
else: # we are looping inside the FPGA, so we send the data once
start_vector = time.time()
for iter in range(NUM_PCG_ITERS):
Ysn = np.multiply(Y_c_cpu, splat_norm) # flow 1
Ysn_reshaped = np.reshape(Ysn,-1,1)
Ysn_r2 = np.reshape((diffuse3_mat * Ysn_reshaped), Ysn.shape, order='F')
temp_Y = (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 )).astype('float32')
Y_c_cpu = np.multiply((b_c - temp_Y) , inv_a).astype('float32')
total_time_vector = time.time() - start_vector #end timing
cpu_time_vector += total_time_vector
if VERBOSE:
print("\t CPU vector time: \t\t\t", total_time_vector)
np_fpga_input = np.empty(np.prod(grid_size)*5+1)
fpga_elapsed = 0
npit = np.nditer(Y_c_fpga, flags=['f_index','multi_index'], order='F')
np_fpga_input[0] = NUM_PCG_ITERS
for x in npit:
# order: Y, 1/a, bc, splatnorm
#print npit.index, "\t", npit.multi_index
np_fpga_input[5*npit.index + 0+1] = Y_c_fpga[npit.multi_index]
np_fpga_input[5*npit.index + 1+1] = splat_norm[npit.multi_index]
np_fpga_input[5*npit.index + 2+1] = b_c[npit.multi_index]
np_fpga_input[5*npit.index + 3+1] = inv_a[npit.multi_index]
np_fpga_input[5*npit.index + 4+1] = Ysn[npit.multi_index]
if RUN_FPGA:
run_fpga_time = time.time()
proc_output_2 = invoke_fpga(np_fpga_input)
cycles = proc_output_2[0]
print "cycles: ", cycles
proc_output = proc_output_2[1:]
Y_c_fpga = proc_output.reshape(grid_size,order='F')
fpga_runtime = time.time() - run_fpga_time
fpga_time +=fpga_runtime
final_result_cpu.append(Y_c_cpu)
final_result_fpga.append(Y_c_fpga)
if VERBOSE:
print("Channel",channel,"completed, total time:")
print("\t CPU vector: \t\t\t", cpu_time_vector)
print("\t FPGA: \t\t\t", fpga_time)
final_solution_cpu = np.stack((final_result_cpu[0], final_result_cpu[1]),axis=3)
final_solution_fpga = np.stack((final_result_fpga[0], final_result_fpga[1]),axis=3)
else:
b_c = b_mat[:,:,:].astype('float32')
Y_c = np.multiply(b_c, inv_a)
Y_c_cpu = np.multiply(b_c, inv_a)
Y_c_fpga = np.multiply(b_c, inv_a).astype('float32')
Ysn = np.multiply(Y_c, splat_norm)
cpu_time_sequential = 0
cpu_time_vector = 0
cpu_time_sequential_compute_only = 0
fpga_time = 0
start_vector = time.time()
if LOOP_OUTSIDE:
for iter in range(NUM_PCG_ITERS):
if VERBOSE:
print("iteration", iter)
start_vector = time.time()
Ysn = np.multiply(Y_c, splat_norm) # flow 1
Ysn_reshaped = np.reshape(Ysn,-1,1)
Ysn_r2 = np.reshape((diffuse3_mat * Ysn_reshaped), Ysn.shape, order='F')
temp_Y = (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 ))
#Y_c_cpu = np.multiply((b_c - (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 ) )) , inv_a)
Y_c_cpu = np.multiply((b_c - temp_Y) , inv_a)
Y_c = Y_c_cpu
total_time_vector = time.time() - start_vector #end timing
Y_c_cpu = Y_c
if VERBOSE:
print("\t CPU vector time: \t\t\t", total_time_vector)
if RUN_FPGA:
np_fpga_input = np.empty(np.prod(grid_size)*5)
fpga_elapsed = 0
npit = np.nditer(Y_c_fpga, flags=['f_index','multi_index'], order='F')
for x in npit:
# order: Y, 1/a, bc, splatnorm
#print npit.index, "\t", npit.multi_index
np_fpga_input[5*npit.index + 0] = Y_c_fpga[npit.multi_index]
np_fpga_input[5*npit.index + 1] = splat_norm[npit.multi_index]
np_fpga_input[5*npit.index + 2] = b_c[npit.multi_index]
np_fpga_input[5*npit.index + 3] = inv_a[npit.multi_index]
np_fpga_input[5*npit.index + 4] = Ysn[npit.multi_index]
run_fpga_time = time.time()
proc_output = invoke_fpga(np_fpga_input)
fpga_input = np.array(np_fpga_input).astype('float32')
processed_output = invoke_fpga(fpga_input)
if NO_GARBAGE_ADDED:
Y_c_fpga = processed_output[0::3].reshape(grid_size)
else:
Y_c_fpga = processed_output[0::10].reshape(grid_size)
else: # we are looping inside the FPGA, so we send the data once
start_vector = time.time()
# for iter in range(NUM_PCG_ITERS):
# Ysn = np.multiply(Y_c_cpu, splat_norm) # flow 1
# Ysn_reshaped = np.reshape(Ysn,-1,1)
# Ysn_r2 = np.reshape((diffuse3_mat * Ysn_reshaped), Ysn.shape, order='F')
# temp_Y = (-1 * LOSS_SMOOTH_MULT * np.multiply(splat_norm , Ysn_r2 )).astype('float32')
# Y_c_cpu = np.multiply((b_c - temp_Y) , inv_a).astype('float32')
total_time_vector = time.time() - start_vector #end timing
cpu_time_vector += total_time_vector
if VERBOSE:
print("\t CPU vector time: \t\t\t", total_time_vector)
np_fpga_input = np.empty(np.prod(grid_size)*5+1)
fpga_elapsed = 0
npit = np.nditer(Y_c_fpga, flags=['f_index','multi_index'], order='F')
np_fpga_input[0] = NUM_PCG_ITERS
for x in npit:
# order: Y, 1/a, bc, splatnorm
#print npit.index, "\t", npit.multi_index
np_fpga_input[5*npit.index + 0+1] = Y_c_fpga[npit.multi_index]
np_fpga_input[5*npit.index + 1+1] = splat_norm[npit.multi_index]
np_fpga_input[5*npit.index + 2+1] = b_c[npit.multi_index]
np_fpga_input[5*npit.index + 3+1] = inv_a[npit.multi_index]
np_fpga_input[5*npit.index + 4+1] = Ysn[npit.multi_index]
print(np_fpga_input.size)
if RUN_FPGA:
run_fpga_time = time.time()
proc_output_2 = invoke_fpga(np_fpga_input)
cycles = proc_output_2[0]
print "cycles: ", cycles
proc_output = proc_output_2[1:]
else:
proc_output = invoke_fpga(np_fpga_input)
# pdb.set_trace()
Y_c_fpga = proc_output.reshape(grid_size,order='F')
fpga_runtime = time.time() - run_fpga_time
fpga_time +=fpga_runtime
final_result_cpu.append(Y_c_cpu)
final_result_fpga.append(Y_c_fpga)
if VERBOSE:
print("completed, total time:")
print("\t CPU vector: \t\t\t", cpu_time_vector)
print("\t FPGA: \t\t\t", fpga_time)
final_solution_cpu = final_result_cpu
final_solution_fpga = final_result_fpga
if TWO_FLOW:
sliced_solution_cpu = (splat_mat.T) * (final_solution_cpu.reshape(grid_num_vertices,-1,order='F'))
sliced_solution_fpga = (splat_mat.T) * (final_solution_fpga.reshape(grid_num_vertices,-1,order='F'))
else:
sliced_solution_cpu = (splat_mat.T) * (final_solution_cpu[0].reshape(grid_num_vertices,-1,order='F'))
sliced_solution_fpga = (splat_mat.T) * (final_solution_fpga[0].reshape(grid_num_vertices,-1,order='F'))
return sliced_solution_cpu, sliced_solution_fpga # final result is an array with improved flows in both directions
def make_parser():
parser = argparse.ArgumentParser(description='Run the bilateral solver on some depth results.')
parser.add_argument('--fpga', '-f', action='store_true', default=False)
parser.add_argument('--quiet', '-q', action='store_false', default=False)
parser.add_argument('--small', '-s', action='store_true')
parser.add_argument('--fixedpoint', '-fp', action='store_true')
parser.add_argument('--loopoutside', '-lo', action='store_true')
parser.add_argument('--debug', '-d', action='store_true')
parser.add_argument('--nogarb', '-ng', action='store_true', default=False)
parser.add_argument('--savefig', '-sf', action='store_true')
parser.add_argument('--display', '-df', action='store_true')
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# loop over test images
# for imdir in [x[0] for x in os.walk('img/')][1:]:
# reference = imdir+'/reference.png'
# target = imdir+'/input.png'
# confidence = imdir+'/weight.png'
#imdir = 'img/Adirondack'
#reference = imdir+'/reference.png'
#target = imdir+'/input.png'
#confidence = imdir+'/weight.png'
# target = 'img/input0.png'
# reference = 'img/reference.png'
# confidence = 'img/weight.png'
input_X, input_W0, input_X0, flow_shape = prepare_flow(reference,(target,), confidence)
res_cpu, res_fpga = dense_solve_jacobi(input_X, input_W0, input_X0)
res_cpu = res_cpu.reshape(flow_shape[0], flow_shape[1], -1,order='F')
res_fpga = res_fpga.reshape(flow_shape[0], flow_shape[1], -1,order='F')
flow_a = np.array(plt.imread(target, format='png'), dtype=np.float32)
# flow_b = np.array(plt.imread(flow_b_img, format='png'), dtype=np.float32)
plt.subplot(3,2,1)
plt.imshow(flow_a)
plt.title('flow a input')
plt.colorbar()
# plt.subplot(3,2,2)
# plt.imshow(flow_b)
# plt.title('flow b input')
# plt.colorbar()
plt.subplot(3,2,3)
plt.imshow(res_cpu[:,:,0])
plt.title('CPU flow a output')
plt.colorbar()
# plt.subplot(3,2,4)
# plt.imshow(res_cpu[:,:,1])
# plt.title('CPU flow b output')
# plt.colorbar()
plt.subplot(3,2,5)
plt.imshow(res_fpga[:,:,0])
plt.title('FPGA flow a output')
plt.colorbar()
# plt.subplot(3,2,6)
# plt.imshow(res_fpga[:,:,1])
# plt.title('FPGA flow b output')
# plt.colorbar()
if SAVE_FIG:
timestr = time.strftime("%Y%m%d-%H%M%S")
plt.savefig('myfig'+timestr)
if DISPLAY_FIG:
plt.show()
if __name__ == "__main__":
main()
| 2.1875 | 2 |
tasks/tasks6.py | dasa4ok/home_tasks | 0 | 12758393 | #6. Дан список чисел. Выведите все элементы списка, которые больше предыдущего элемента.
def bigger_then_pred(values):
number_that_bigger_then_pred = []
for index, value in enumerate(values):
if value > values[index - 1]:
number_that_bigger_then_pred.append(value)
return number_that_bigger_then_pred
print(bigger_then_pred([-1, -2, -1, -5,6, 7, 9, 2, 4])) | 4.1875 | 4 |
bocadillo/__init__.py | sfermigier/bocadillo | 0 | 12758394 | <reponame>sfermigier/bocadillo<filename>bocadillo/__init__.py
from .applications import App
from .errors import HTTPError
from .middleware import ASGIMiddleware, Middleware
from .injection import discover_providers, provider, useprovider
from .recipes import Recipe
from .request import ClientDisconnect, Request
from .response import Response
from .sse import server_event
from .staticfiles import static
from .templates import Templates
from .views import view
from .websockets import WebSocket, WebSocketDisconnect
__version__ = "0.13.0"
| 1.03125 | 1 |
mmd_test.py | csadrian/wae | 0 | 12758395 | import numpy as np
import tensorflow as tf
def mmd_penalty(sample_qz, sample_pz, pz_scale, kernel='RBF'):
sigma2_p = pz_scale ** 2
n, d = sample_pz.get_shape().as_list()
n = tf.cast(n, tf.int32)
nf = tf.cast(n, tf.float32)
half_size = (n * n - n) / 2
norms_pz = tf.reduce_sum(tf.square(sample_pz), axis=1, keep_dims=True)
dotprods_pz = tf.matmul(sample_pz, sample_pz, transpose_b=True)
distances_pz = norms_pz + tf.transpose(norms_pz) - 2. * dotprods_pz
norms_qz = tf.reduce_sum(tf.square(sample_qz), axis=1, keep_dims=True)
dotprods_qz = tf.matmul(sample_qz, sample_qz, transpose_b=True)
distances_qz = norms_qz + tf.transpose(norms_qz) - 2. * dotprods_qz
dotprods = tf.matmul(sample_qz, sample_pz, transpose_b=True)
distances = norms_qz + tf.transpose(norms_pz) - 2. * dotprods
if kernel == 'RBF':
# Median heuristic for the sigma^2 of Gaussian kernel
'''
sigma2_k = tf.nn.top_k(
tf.reshape(distances, [-1]), half_size).values[half_size - 1]
sigma2_k += tf.nn.top_k(
tf.reshape(distances_qz, [-1]), half_size).values[half_size - 1]
'''
# Maximal heuristic for the sigma^2 of Gaussian kernel
# sigma2_k = tf.nn.top_k(tf.reshape(distances_qz, [-1]), 1).values[0]
# sigma2_k += tf.nn.top_k(tf.reshape(distances, [-1]), 1).values[0]
sigma2_k = d * sigma2_p
res1 = tf.exp( - distances_qz / 2. / sigma2_k)
res1 += tf.exp( - distances_pz / 2. / sigma2_k)
res1 = tf.multiply(res1, 1. - tf.eye(n))
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res2 = tf.exp( - distances / 2. / sigma2_k)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
stat = res1 - res2
elif kernel == 'IMQ':
# k(x, y) = C / (C + ||x - y||^2)
# C = tf.nn.top_k(tf.reshape(distances, [-1]), half_size).values[half_size - 1]
# C += tf.nn.top_k(tf.reshape(distances_qz, [-1]), half_size).values[half_size - 1]
pz_kind = 'normal'
if pz_kind == 'normal':
Cbase = 2. * d * sigma2_p
elif pz_kind == 'sphere':
Cbase = 2.
elif pz_kind == 'uniform':
# E ||x - y||^2 = E[sum (xi - yi)^2]
# = zdim E[(xi - yi)^2]
# = const * zdim
Cbase = d
stat = 0.
for scale in [.1, .2, .5, 1., 2., 5., 10.]:
C = Cbase * scale
res1 = C / (C + distances_qz)
res1 += C / (C + distances_pz)
res1 = tf.multiply(res1, 1. - tf.eye(n))
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res2 = C / (C + distances)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
stat += res1 - res2
else:
assert False
return stat
def main():
with tf.Session() as sess:
def e(t):
return sess.run(t)
def p(s, t):
print(s, e(t))
n = 10000
d = 64
scale = tf.Variable(1.0, dtype=tf.float32)
sample_qz = scale * tf.random.normal((n, d), dtype=tf.float32)
sample_pz = tf.random.normal((n, d), dtype=tf.float32)
mmd = mmd_penalty(sample_qz, sample_pz, pz_scale=1.0, kernel='IMQ')
e(tf.global_variables_initializer())
for scale_np in np.linspace(-2, +2, 21):
print(scale_np, sess.run(mmd, feed_dict={scale: scale_np}))
if __name__ == "__main__":
main()
| 2.0625 | 2 |
resp_base/Agents.py | probablytom/msci-model | 0 | 12758396 | from theatre_ag.theatre_ag.actor import Actor as TheatreActor
from theatre_ag.theatre_ag.task import Task
from .Constraints import Deadline, ResourceDelta
from .Responsibilities import Responsibility, Obligation
from abc import ABCMeta
from .utility_functions import mean
from .Responsibilities import Act, ResponsibilityEffect
from copy import copy
import random
class BasicResponsibleAgent(TheatreActor):
responsible = True # This will be a toggle for deactivating the formalism
__metaclass__ = ABCMeta
def __init__(self,
notions,
name,
clock,
workflows: list,
sociotechnical_states = {},
interpreting_coefficients = {}):
super().__init__(name, clock)
self.interpreting_coefficients = interpreting_coefficients
# Make a responsibility for self for chilling out, which idling fulfils
chill_deadline = Deadline(1, clock)
chill_effect = ResourceDelta({'personal_enjoyment': 1})
chill = Obligation([chill_deadline,
chill_effect],
name="idle")
chill.set_importances([0.25, 0.5])
self.chill_resp = Responsibility(chill, self, self)
notions.append(self.interpret(self.chill_resp))
self.responsibilities = copy(notions) # Default beliefs about the world
self.notions = copy(notions)
self.consequential_responsibilities = [] # All discharged constraints
self.workflows = workflows
self.socio_states = sociotechnical_states
self.idle_act = Act(ResponsibilityEffect({'personal_enjoyment': 1}),
self.idling.idle,
self.idling)
self.basic_judgement_responsible = 0.5 # How responsible is someone if you don't know what they've done before?
# Assign all of the workflows to me
for workflow in self.workflows:
workflow.assign_agent(self)
# To be updated with the responsibility the current action represents
self.current_responsibility = None
# An act is a bound method. self.acts is a dictionary of the form:
# {effect: (act_entry_point, workflow, args)}
# ..where the effect is a list of tuples, where each tuple is a string and an integer effect on the atttribute
# the string represents.
self.acts = {}
def delegate_responsibility(self,
obligation: Obligation,
importances: list,
delegee): # Make this a ResponsibleAgent
obligation.set_importances(importances)
resp = Responsibility(copy(obligation), self, delegee)
accepted = resp.delegee.accept_responsibility(resp)
if not accepted:
raise NotImplemented("What happens if a responsibility \
isn't allocated?")
def interpret(self,
resp: Responsibility):
resp = copy(resp)
for factor, coefficient in self.interpreting_coefficients.items():
for constraint_index in range(len(resp.constraints)):
old_constraint = resp.constraints[constraint_index]
constraint = copy(old_constraint)
importance = constraint.importance
# Work out the new importance value, if there is one.
if factor in constraint.factors.keys() or factor == constraint.__class__:
# constraint.interpreted = False
importance = importance * coefficient
importance = max(min(1, importance), 0) # Normalise!
constraint.assign_importance(importance)
resp.constraints[constraint_index] = constraint
# Return the responsibility with a new set of constraints
return resp
def __decide_acceptance(self, resp):
# importances = [constraint.importance
# for constraint in resp.constraints]
# return mean(importances) > 0.5
return True
def accept_responsibility(self, resp: Responsibility):
interpreted_responsibility = self.interpret(resp)
accepted = self.__decide_acceptance(interpreted_responsibility)
if accepted:
self.responsibilities.append(interpreted_responsibility)
return accepted
def __judge_degree_responsible(self, other_agent):
# Re-interpret constraints
resps = [r
for r in copy([c[0] for c in other_agent.consequential_responsibilities])]
resps += [r
for r in other_agent.responsibilities
if r not in other_agent.notions]
for responsibility in resps:
for i in range(len(responsibility.constraints)):
constraint = copy(responsibility.constraints[i])
constraint.importance = constraint.original_importance
responsibility.constraints[i] = self.interpret(constraint)
# Calculate each resource type's specific responsibility
specific_responsibilities = {}
importance = 0
outcome = False
def process_factor(factor, outcome, importance):
if factor not in specific_responsibilities.keys():
specific_responsibilities[factor] = (0, 0)
score, count = specific_responsibilities[factor]
count += 1
if outcome is None:
outcome = False
if outcome:
score += importance
specific_responsibilities[factor] = (score, count)
for responsibility in resps:
for constraint in responsibility.constraints:
importance = constraint.importance
outcome = constraint.outcome
if type(constraint) == Deadline:
process_factor(Deadline, outcome, importance)
for factor in constraint.factors.keys():
process_factor(factor, outcome, importance)
for factor, score_tuple in specific_responsibilities.items():
specific_responsibilities[factor] = score_tuple[0]/score_tuple[1]
return specific_responsibilities
def basic_responsibility_judgement(self):
return self.basic_judgement_responsible
def general_responsibility_judgement(self, other_agent):
judgement = self.__judge_degree_responsible(other_agent)
return mean(judgement.values())
def specific_responsibility_judgement(self, other_agent, resource_type):
return self.__judge_degree_responsible(other_agent).get(resource_type,
self.basic_judgement_responsible)
def choose_action(self, responsibility):
'''
RETURNS: a function which returns a tuple (a,b):
a: the success or failure of the discharge
b: the set of constraint satisfactions (the consequential
responsibility)
Will choose the first action which seems to move resources in the intended
direction.
'''
intended_effect = responsibility.calculate_effect()
intended_effect.disregard('duration')
return self.acts.get(intended_effect,
self.idle_act)
@property
def actionable_responsibilities(self):
return self.responsibilities ## To be changed by actors who dont act on all notions
def choose_responsibility(self):
'''
Choose a responsibility with the highest average importance across various
factors.
TODO: make this smarter! Just taking the mean doesn't take into account
the nature of the importances.
TODO: Consider deadlines! Implement an eisenhower matrix, to weigh
importance against urgency?
'''
resps = self.actionable_responsibilities
if resps == []:
return None
else:
resp = sorted(resps,
key=lambda x: sum(x.importances))[::-1][0]
return resp
def next_action(self):
resp_chosen = self.choose_responsibility()
if resp_chosen is not None:
self.current_responsibility = resp_chosen
discharge_act = self.choose_action(resp_chosen)
else:
discharge_act = self.idle_act
return discharge_act
def get_next_task(self):
# Get the next action
next_action = self.next_action()
# Create and return the relevant task
return Task(next_action.entry_point_function,
next_action.workflow,
next_action.args)
def calculate_delay(self, entry_point, workflow=None, args=()):
# If the current responsibility is None, we're idling.
if self.current_responsibility is None:
return 1 # Duration of an idle
else:
# Get the duration of the current responsibility as the length of the task.
return self.current_responsibility.calculate_effect().get('duration')
def handle_task_return(self, task, value):
if value is not None:
discharged_successfully, constraint_satisfactions = value
consequential_responsibility = copy(self.current_responsibility)
consequential_responsibility.obligation.constraint_set = [copy(c) for c in constraint_satisfactions]
self.consequential_responsibilities.append((consequential_responsibility, discharged_successfully))
self.responsibilities.pop(self.responsibilities.index(self.current_responsibility))
else:
if not self.current_responsibility == self.chill_resp:
self.responsibilities.pop(self.responsibilities.index(self.current_responsibility))
consequential_responsibility = copy(self.current_responsibility)
self.consequential_responsibilities.append((consequential_responsibility, True))
self.current_responsibility = None
def register_act(self,
act: Act):
act.args = [self]
act.entry_point_function.default_cost = 0
self.acts[act.effect] = act
def register_new_workflow(self,
workflow):
workflow.assign_agent(self)
self.workflows.append(workflow)
def get_sociotechnical_state(self, state_key):
return self.socio_states.get(state_key,
None)
def advise(self, other_agent):
other_agent.take_advice(self.interpreting_coefficients, self)
def take_advice(self, advice, authority):
# Optionally here, check for whether the authority figure is authoritative enough to listen to.
# For now, we blindly accept all advice, so long as it's from a lecturer.
if type(authority) is Lecturer:
for key, advised_value in self.interpreting_coefficients.items():
original_value = self.interpreting_coefficients.get(key, 0)
self.interpreting_coefficients[key] = original_value + advised_value
class HedonisticAgent(BasicResponsibleAgent):
def __init__(self,
notions,
name,
clock,
workflows: list,
sociotechnical_states = {},
interpreting_coefficients = {'personal_enjoyment': 5}):
super().__init__(notions,
name,
clock,
workflows,
copy(sociotechnical_states),
copy(interpreting_coefficients))
class StudiousAgent(BasicResponsibleAgent):
def __init__(self,
notions,
name,
clock,
workflows: list,
sociotechnical_states = {},
interpreting_coefficients = {'working_programs': 5,
'essays_written': 5}):
super().__init__(notions,
name,
clock,
workflows,
copy(sociotechnical_states),
copy(interpreting_coefficients))
class Lecturer(BasicResponsibleAgent):
def __init__(self,
notions,
name,
clock,
workflows: list,
sociotechnical_states = {},
interpreting_coefficients = {}):
super().__init__(notions,
name,
clock,
workflows,
copy(sociotechnical_states),
copy(interpreting_coefficients))
class BullshitAgent(BasicResponsibleAgent):
def choose_responsibility(self):
return random.choice(self.responsibilities)
| 2.484375 | 2 |
requests_example.py | collinoeight/random | 0 | 12758397 | <reponame>collinoeight/random
""" Checking for connectivity to Google.com"""
import requests
r= requests.get('https://www.google.com/')
if r.status_code!=200:
print("There was an error."+str(r.status_code))
else:
print("All good.") | 2.96875 | 3 |
rebound/python_examples/simulationarchive/problem.py | rodluger/ttv-devil | 0 | 12758398 | <filename>rebound/python_examples/simulationarchive/problem.py
# Import the rebound module
import rebound
import os
filename = "simulationarchive.bin"
try:
sim = rebound.Simulation.from_archive(filename)
sim.simulationarchive_filename = filename
print("Restarting from simulation archive. Last snapshot found at t=%.1f"%sim.t)
except:
# Create new simualtion
print("Creating new simulation.")
sim = rebound.Simulation()
sim.add(m=1) # star
sim.add(m=1e-3, a=1, e=0.01) # planet 1
sim.add(m=1e-3, a=2.5, e=0.01) # planet 2
sim.integrator = "whfast"
sim.dt = 3.1415*2.*6./365.25 # 6 days in units where G=1
sim.move_to_com()
sim.initSimulationArchive(filename, interval=2.*3.1415*1e5)
# Run a very long simulation.
# This can be interrupted at any time and then restarted.
sim.integrate(2.*3.1415*1e10) # 10 Gyr
| 2.53125 | 3 |
spark-1.3.0/python/build/py4j/tests/py4j_callback_example2.py | iflink/spark | 0 | 12758399 | <gh_stars>0
from py4j.java_gateway import JavaGateway
class ClassNone(object):
def getName(self):
return None
class Java:
implements = ['py4j.examples.InterfaceNone']
if __name__ == '__main__':
gateway = JavaGateway(start_callback_server=True)
objectNone = ClassNone()
returnValue = gateway.entry_point.testNone(objectNone)
print(returnValue)
gateway.shutdown()
| 2.28125 | 2 |
reversion/compat.py | sperrygrove/django-reversion | 1 | 12758400 | <reponame>sperrygrove/django-reversion<filename>reversion/compat.py
import django
def remote_field(field):
# remote_field is new in Django 1.9
return field.remote_field if hasattr(field, 'remote_field') else field.rel
def remote_model(field):
# remote_field is new in Django 1.9
return field.remote_field.model if hasattr(field, 'remote_field') else field.rel.to
def is_authenticated(user):
if django.VERSION < (1, 10):
return user.is_authenticated()
return user.is_authenticated
| 2.25 | 2 |
controller/logicTopoController.py | aga3134/SourcingWater | 6 | 12758401 | from sqlalchemy.sql.functions import func
from model.db import db
import json
from controller.logicTopoBasin import LogicTopoBasin
from controller.logicTopoLivingArea import LogicTopoLivingArea
from controller.logicTopoAgricultureArea import LogicTopoAgricultureArea
from controller.logicTopoWaterwork import LogicTopoWaterwork
from controller.logicTopoWaterin import LogicTopoWaterin
from controller.logicTopoFlowPath import LogicTopoFlowPath
from controller.logicTopoCatchment import LogicTopoCatchment
from controller.logicTopoPollution import LogicTopoPollution
from controller.logicTopoIndustryArea import LogicTopoIndustryArea
from controller.logicTopoFactory import LogicTopoFactory
from controller.logicTopoSewageTreatmentPlant import LogicTopoSewageTreatmentPlant
from controller.logicTopoReservoir import LogicTopoReservoir
from controller.logicTopoDebris import LogicTopoDebris
from controller.logicTopoRainStation import LogicTopoRainStation
from controller.logicTopoFloodStation import LogicTopoFloodStation
from controller.logicTopoWaterLevelStation import LogicTopoWaterLevelStation
from controller.util import GetSInfoPoint
class LogicTopoController():
def ListKind(self):
sql = "select * from s_topology_kind"
rows = db.engine.execute(sql)
result = [dict(r) for r in rows]
return result
def ListTransfer(self,kind=None):
sql = "select * from s_topology_transfer"
if kind is not None:
sql += " where from_類別='%s'" % kind
rows = db.engine.execute(sql)
result = [dict(r) for r in rows]
return result
def FindNodeByKind(self,param):
if not "kind" in param:
return {"error":"no kind parameter"}
kind = param["kind"]
if kind == "流域":
return LogicTopoBasin().FindBasinByID(param)
elif kind == "地點":
return LogicTopoPlace().FindVillageByLatLng(param)
elif kind == "淨水場":
return LogicTopoWaterwork().FindWaterworkByID(param)
else:
return {"error":"not implemented"}
def FindNodeByTransfer(self,param):
if not "kind" in param:
return {"error":"no kind parameter"}
if not "transfer" in param:
return {"error":"no transfer parameter"}
kind = param["kind"]
transfer = param["transfer"]
if kind == "流域":
ltb = LogicTopoBasin()
if transfer == "流域範圍":
return ltb.FindBasinByID(param)
elif transfer in ["主要河道","源頭到海洋路徑"]:
return ltb.FindMainRiverByID(param)
elif transfer == "所有河道":
return ltb.FindStreams(param)
elif transfer == "流域分區":
return ltb.FindSubBasins(param)
elif transfer == "生活區域":
return ltb.FindLivingArea(param)
elif transfer == "農業區域":
return ltb.FindAgricultureArea(param)
elif transfer == "工業區域":
return ltb.FindIndustryArea(param)
elif transfer == "水庫堰壩":
return ltb.FindReservoir(param)
elif transfer == "水質水量保護區":
return ltb.FindProtectArea(param)
elif transfer == "淹水潛勢圖":
return ltb.FindFloodArea(param)
elif transfer == "土石流潛勢溪流":
return ltb.FindDebris(param)
elif transfer in ["雨水下水道","污水下水道","圳路"]:
return {"error":"無開放資料"}
elif kind == "流路":
ltfp = LogicTopoFlowPath()
if transfer == "上游集水區":
return ltfp.FindUpstreamCatchment(param)
elif transfer == "下游入海線":
return ltfp.FindDownstreamPath(param)
elif transfer == "所屬流域":
return ltfp.FindBasin(param)
elif transfer == "鳥覽流路":
return ltfp.BirdView(param)
elif kind == "生活區域":
ltla = LogicTopoLivingArea()
if transfer == "淨水廠為何":
return ltla.FindVillageWaterwork(param)
elif transfer == "水源在哪":
return ltla.FindVillageWaterin(param)
elif transfer == "有哪些污染源":
return ltla.FindVillagePollution(param)
elif transfer == "用水統計(三級經濟區)":
return ltla.FindWaterUse(param)
elif kind == "農業區域":
ltaa = LogicTopoAgricultureArea()
if transfer == "有哪些污染源":
return ltaa.FindAgriculturePollution(param)
elif transfer == "有哪些農作物":
return ltaa.FindCrop(param)
elif kind == "淨水場":
ltww = LogicTopoWaterwork()
if transfer == "取水口為何":
return ltww.FindWaterinByID(param)
elif transfer == "淨水場水質":
return ltww.FindWaterworkQuality(param)
elif transfer == "淨水場供水量":
return ltww.FindWaterworkQuantity(param)
elif transfer == "供給哪些區域":
return ltww.FindSupplyLivingArea(param)
elif kind == "取水口":
ltwi = LogicTopoWaterin()
if transfer == "集水區為何":
return ltwi.FindCatchmentByID(param)
elif transfer == "取水量":
return ltwi.FindWaterinQuantity(param)
elif transfer == "生活供給範圍":
return ltwi.FindSupplyLivingArea(param)
elif kind == "集水區":
ltc = LogicTopoCatchment()
if transfer == "有哪些污染源":
return ltc.FindCatchmentPollution(param)
elif transfer == "雨量站":
return ltc.FindRainStation(param)
elif transfer == "河川水位站":
return ltc.FindWaterLevelStation(param)
elif transfer == "淹水感測站":
return ltc.FindFloodStation(param)
elif kind == "鄰近污染源":
ltp = LogicTopoPollution()
if transfer == "工廠":
return ltp.FindFactory(param)
elif transfer == "環境保護列管對象":
return ltp.FindEPAFactoryBase(param)
elif transfer == "工業區域":
return ltp.FindIndustryArea(param)
elif transfer == "工業污水處理廠":
return ltp.FindSewageTreatmentPlant(param)
elif transfer == "農地工廠":
return ltp.FindFactoryInFarm(param)
elif transfer == "水污染源放流口":
return ltp.FindWaterpRecord(param)
elif kind == "工業區域":
ltia = LogicTopoIndustryArea()
if transfer == "哪個污水廠":
return ltia.FindSewageTreatmentPlant(param)
elif transfer == "有哪些工廠":
return ltia.FindFactory(param)
elif kind == "工廠":
ltf = LogicTopoFactory()
if transfer == "哪個污水廠":
return ltf.FindSewageTreatmentPlant(param)
elif transfer == "屬於哪個工業區":
return ltf.FindIndustryArea(param)
elif kind == "工業污水處理廠":
ltstp = LogicTopoSewageTreatmentPlant()
if transfer == "處理範圍":
return ltstp.FindProcessingArea(param)
elif kind == "水庫":
ltr = LogicTopoReservoir()
if transfer == "蓄水範圍":
return ltr.FindStorageArea(param)
elif transfer == "集水區為何":
return ltr.FindCatchment(param)
elif transfer == "水質水量保護區":
return ltr.FindProtectArea(param)
elif kind == "土石流":
ltd = LogicTopoDebris()
if transfer == "集水區為何":
return ltd.FindCatchment(param)
elif transfer == "影響範圍":
return ltd.FindInfluence(param)
elif transfer == "歷史影像":
return ltd.FindHistoryPhoto(param)
elif transfer == "流路":
return ltd.FindFlowPath(param)
elif kind == "雨量站":
ltrs = LogicTopoRainStation()
if transfer == "雨量資料":
return ltrs.FindRainData(param)
elif transfer == "鄰近河川水位站":
return ltrs.FindWaterLevelStation(param)
elif transfer == "鄰近淹水感測站":
return ltrs.FindFloodStation(param)
elif transfer == "淹水潛勢圖":
return ltrs.FindFloodArea(param)
elif kind == "河川水位站":
ltwls = LogicTopoWaterLevelStation()
if transfer == "水位資料":
return ltwls.FindWaterLevelData(param)
elif transfer == "鄰近雨量站":
return ltwls.FindRainStation(param)
elif transfer == "鄰近淹水感測站":
return ltwls.FindFloodStation(param)
elif transfer == "淹水潛勢圖":
return ltwls.FindFloodArea(param)
elif kind == "淹水感測站":
ltfs = LogicTopoFloodStation()
if transfer == "淹水資料":
return ltfs.FindFloodData(param)
elif transfer == "鄰近雨量站":
return ltfs.FindRainStation(param)
elif transfer == "鄰近河川水位站":
return ltfs.FindWaterLevelStation(param)
elif transfer == "淹水潛勢圖":
return ltfs.FindFloodArea(param)
return {"error":"not implemented"}
def GetNodeInfo(self,param):
if not "kind" in param:
return {"error":"no kind parameter"}
kind = param["kind"]
nodeName = None
if "nodeName" in param:
nodeName = param["nodeName"]
if nodeName is None:
return {"error":"no nodeName parameter"}
info = GetSInfoPoint(param["kind"],nodeName)
if info is None:
return {"error":" 查無基本資料"}
else:
return info
| 2.328125 | 2 |
src/consplit/formats/repo.py | rwestgeest/consplit | 0 | 12758402 | from os import path
from lxml import objectify, etree
class Repo:
def __init__(self, extension):
self._extension = extension
def filepath(self, drawing, location):
return path.join(location, '{}.{}'.format(drawing.name, self._extension))
def as_svg(drawing):
E = objectify.ElementMaker(annotate=False, namespace="http://www.w3.org/2000/svg", nsmap ={ None : "http://www.w3.org/2000/svg", 'xlink':"http://www.w3.org/1999/xlink" })
root = E.svg(
E.title(drawing.name),
*[ as_svg_layer(E, layer) for layer in drawing.layers ],
width=drawing.width,
height=drawing.height,
version=drawing.version,
viewBox=drawing.view_box)
return etree.tostring(etree.ElementTree(root), xml_declaration=True, encoding="UTF-8", standalone="yes", pretty_print=True)
def as_svg_layer(E, layer):
return E.g(
* [ as_svg_stroke(E, stroke) for stroke in layer.strokes ],
id=layer.name,
opacity=layer.opacity)
def as_svg_stroke(E, stroke):
svg_stroke = getattr(E, stroke.type)()
for (key, val) in stroke.attributes.items():
svg_stroke.attrib[key] = val
return svg_stroke
def write_bytes_to(filepath, content):
with open(filepath, 'w+b') as file:
return file.write(content)
| 2.53125 | 3 |
src/tf/gen/neuralnetwork.py | juanprietob/gan-brain | 1 | 12758403 | #
# Copyright 2016 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import time
import numpy as np
import sys
sys.path.append('../util')
from netFunctions import print_tensor_shape
def read_and_decode(filename_queue, size, namescope):
# input: filename
# output: image, label pair
# setup a TF record reader
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
# list the features we want to extract, i.e., the image and the label
features = tf.parse_single_example(
serialized_example,
features={
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string)
})
# Set image and label shapes
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
depth = tf.cast(features['depth'], tf.int32)
# Decode the training image
image = tf.decode_raw(features['raw'], tf.float32)
image_re = tf.reshape(image, (size))
print_tensor_shape(image_re, namescope + ' image')
# Decode label
label = tf.decode_raw(features['label'], tf.float32)
label_re = tf.reshape(label, (size))
print_tensor_shape(label_re, namescope + ' image label')
return image_re, label_re
def inputs(batch_size, num_epochs, filenames, size, namescope="input"):
# inputs: batch_size, num_epochs are scalars, filename
# output: image and label pairs for use in training or eval
# define the input node
with tf.name_scope(namescope):
# setup a TF filename_queue
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=num_epochs)
# return and image and label
image, label = read_and_decode(filename_queue, size, namescope)
# shuffle the images, not strictly necessary as the data creating
# phase already did it, but there's no harm doing it again.
images, labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size, num_threads=4,
capacity=50000,
min_after_dequeue=10000)
print_tensor_shape(images, namescope)
# labels = tf.one_hot(labels, 2)
print_tensor_shape(labels, namescope + ' labels')
# beta = tf.Variable(tf.constant(0.0, shape=[1]),
# name='beta', trainable=True)
# gamma = tf.Variable(tf.constant(1.0, shape=[1]),
# name='gamma', trainable=True)
# mean, variance = tf.nn.moments(images, [0])
# images = tf.nn.batch_normalization(images, mean, variance, beta, gamma, 1e-3)
return images, labels
def convolution2d(images, out_channels, name, relu=True, ps_device="/cpu:0", w_device="/gpu:0"):
in_channels = images.get_shape().as_list()[-1]
with tf.variable_scope(name):
# weight variable 4d tensor, first two dims are patch (kernel) size
# third dim is number of input channels and fourth dim is output channels
with tf.device(ps_device):
w_conv_name = 'w_' + name
w_conv = tf.get_variable(w_conv_name, shape=[3,3,in_channels,out_channels], dtype=tf.float32, initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1))
print_tensor_shape( w_conv, 'weight shape')
b_conv_name = 'b_' + name
b_conv = tf.get_variable(b_conv_name, shape=[out_channels])
print_tensor_shape( b_conv, 'bias shape')
with tf.device(w_device):
conv_op = tf.nn.conv2d( images, w_conv, strides=[1,2,2,1], padding="SAME", name='conv1_op')
print_tensor_shape( conv_op, 'conv_op shape')
conv_op = tf.nn.bias_add(conv_op, b_conv, name='bias_add_op')
if(relu):
conv_op = tf.nn.relu( conv_op, name='relu_op' )
print_tensor_shape( conv_op, 'relu_op shape')
return conv_op
def convolution(images, name, activation=None, out_channels=1, ps_device="/cpu:0", w_device="/gpu:0", w_shape=None, strides=None, padding='SAME'):
in_channels = images.get_shape().as_list()[-1]
if w_shape is None:
w_shape = [5,5,5,in_channels,out_channels]
if strides is None:
strides = [1,2,2,2,1]
with tf.variable_scope(name):
# weight variable 4d tensor, first two dims are patch (kernel) size
# third dim is number of input channels and fourth dim is output channels
with tf.device(ps_device):
w_conv_name = 'w_' + name
w_conv = tf.get_variable(w_conv_name, shape=w_shape, dtype=tf.float32, initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1))
print_tensor_shape( w_conv, name + ' weight shape')
b_conv_name = 'b_' + name
b_conv = tf.get_variable(b_conv_name, shape=w_shape[-1:])
print_tensor_shape( b_conv, name + ' bias shape')
with tf.device(w_device):
conv_op = tf.nn.conv3d( images, w_conv, strides=strides, padding=padding, name='conv1_op')
print_tensor_shape( conv_op, name + ' conv_op shape')
conv_op = tf.nn.bias_add(conv_op, b_conv, name='bias_add_op')
if(activation):
conv_op = activation( conv_op, name='activation_op' )
print_tensor_shape( conv_op, 'activation_op shape')
return conv_op
def deconvolution2d(images, output_shape, name, activation=None, ps_device="/cpu:0", w_device="/gpu:0", w_shape=None, strides=None, padding="SAME"):
with tf.variable_scope(name):
in_channels = images.get_shape()[-1]
out_channels = output_shape[-1]
if w_shape is None:
w_shape = [3,3,in_channels,out_channels]
if strides is None:
strides = [1,2,2,1]
with tf.device(ps_device):
w_deconv_name = 'w_' + name
w_deconv = tf.get_variable(w_deconv_name, shape=w_shape, dtype=tf.float32, initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1))
print_tensor_shape( w_deconv, name + 'weight shape')
b_deconv_name = 'b_' + name
b_deconv = tf.get_variable(b_deconv_name, shape=[out_channels])
print_tensor_shape( b_deconv, name + 'bias shape')
with tf.device(w_device):
deconv_op = tf.nn.conv2d_transpose( images, w_deconv,
output_shape=output_shape,
# use_bias=True,
strides=strides,
padding=padding, name='deconv_op' )
print_tensor_shape( deconv_op, 'deconv_op shape')
deconv_op = tf.nn.bias_add(deconv_op, b_deconv, name='bias_add_op')
if activation:
deconv_op = activation( deconv_op, name='activation_op' )
print_tensor_shape( deconv_op, 'activation_op shape')
return deconv_op
def deconvolution(images, output_shape, name, activation=None, ps_device="/cpu:0", w_device="/gpu:0", w_shape=None, strides=None, padding="SAME"):
with tf.variable_scope(name):
in_channels = images.get_shape()[-1]
out_channels = output_shape[4]
if w_shape is None:
w_shape = [5,5,5,out_channels,in_channels]
if strides is None:
strides = [1,2,2,2,1]
with tf.device(ps_device):
w_deconv_name = 'w_' + name
w_deconv = tf.get_variable(w_deconv_name, shape=w_shape, dtype=tf.float32, initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1))
print_tensor_shape( w_deconv, name + ' weight shape')
b_deconv_name = 'b_' + name
b_deconv = tf.get_variable(b_deconv_name, shape=[out_channels])
print_tensor_shape( b_deconv, name + ' bias shape')
with tf.device(w_device):
deconv_op = tf.nn.conv3d_transpose( images, w_deconv,
output_shape=output_shape,
# use_bias=True,
strides=strides,
padding=padding, name='deconv_op' )
print_tensor_shape( deconv_op, 'deconv_op shape')
deconv_op = tf.nn.bias_add(deconv_op, b_deconv, name='bias_add_op')
if activation:
deconv_op = activation( deconv_op, name='activation_op' )
print_tensor_shape( deconv_op, 'activation_op shape')
return deconv_op
def matmul(images, out_channels, name, relu=True, ps_device="/cpu:0", w_device="/gpu:0"):
with tf.variable_scope(name):
shape = images.get_shape().as_list()
with tf.device(ps_device):
w_matmul_name = 'w_' + name
w_matmul = tf.get_variable(w_matmul_name, shape=[shape[1],out_channels], dtype=tf.float32, initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1))
print_tensor_shape( w_matmul, 'w_matmul shape')
b_matmul_name = 'b_' + name
b_matmul = tf.get_variable(name='b_matmul_name', shape=[out_channels])
with tf.device(w_device):
matmul_op = tf.nn.bias_add(tf.matmul(images, w_matmul), b_matmul)
if(relu):
matmul_op = tf.nn.relu(matmul_op)
return matmul_op
def generator(images, keep_prob=1, batch_size=1, regularization_constant=0.0, ps_device="/cpu:0", w_device="/gpu:0", is_training=False):
# Encoder part of the network
# input: tensor of images
# output: tensor of computed logits
# resize the image tensors to add the number of channels, 1 in this case
# required to pass the images to various layers upcoming in the graph
#print("Image size:", size)
#num_channels = size[0], depth = size[0], height = size[1], width = size[2], num_channels = size[3]
images = tf.layers.batch_normalization(images, training=is_training)
print_tensor_shape(images, "images")
# Convolution layer
conv1_op = convolution(images, "Conv1", out_channels=256, activation=tf.nn.relu, ps_device=ps_device, w_device=w_device, padding="SAME")
conv2_op = convolution(conv1_op, "Conv2", out_channels=512, activation=tf.nn.relu,ps_device=ps_device, w_device=w_device, padding="SAME")
# conv3_op = convolution(conv2_op, "Conv3", out_channels=1024, activation=tf.nn.relu, ps_device=ps_device, w_device=w_device, padding="VALID")
# conv4_op = convolution(conv3_op, "Conv4", out_channels=1280, activation=tf.nn.relu, ps_device=ps_device, w_device=w_device, padding="VALID")
#relu4_op = convolution(relu3_op, 4048, "Conv4", ps_device=ps_device, w_device=w_device)
# shape = conv3_op.get_shape().as_list()
# deconv1_op = deconvolution(conv4_op, shape, "Deconv1", activation=tf.nn.relu, ps_device=ps_device, w_device=w_device, padding="VALID")
# shape = conv2_op.get_shape().as_list()
# deconv2_op = deconvolution(conv3_op, shape, "Deconv2", activation=tf.nn.relu, ps_device=ps_device, w_device=w_device, padding="VALID")
shape = conv1_op.get_shape().as_list()
deconv3_op = deconvolution(conv2_op, shape, "Deconv3", activation=tf.nn.relu, ps_device=ps_device, w_device=w_device, padding="SAME")
shape = images.get_shape().as_list()
shape[4] = 128
deconv4_op = deconvolution(deconv3_op, shape, "Deconv4", activation=tf.nn.relu, ps_device=ps_device, w_device=w_device, padding="SAME")
with tf.device(w_device):
deconv4_op = tf.nn.dropout( deconv4_op, keep_prob )
deconv4_op = tf.concat([images, deconv4_op], axis=4)
convp_op = convolution(deconv4_op, "ConvScore", strides=[1, 1, 1, 1, 1], w_shape=[1, 1, 1, 129, 1], ps_device=ps_device, w_device=w_device, padding="SAME")
return convp_op
def generator2d(images, keep_prob=1, batch_size=1, regularization_constant=0.0, ps_device="/cpu:0", w_device="/gpu:0", is_training=False):
# Encoder part of the network
# input: tensor of images
# output: tensor of computed logits
# resize the image tensors to add the number of channels, 1 in this case
# required to pass the images to various layers upcoming in the graph
#print("Image size:", size)
#num_channels = size[0], depth = size[0], height = size[1], width = size[2], num_channels = size[3]
images = tf.layers.batch_normalization(images, training=is_training)
print_tensor_shape(images, "images")
# Convolution layer
relu1_op = convolution2d(images, 128, "Conv1", ps_device=ps_device, w_device=w_device)
relu2_op = convolution2d(relu1_op, 512, "Conv2", ps_device=ps_device, w_device=w_device)
with tf.device(w_device):
relu2_op = tf.nn.dropout( relu2_op, keep_prob )
relu3_op = convolution2d(relu2_op, 2048, "Conv3", ps_device=ps_device, w_device=w_device)
#relu4_op = convolution(relu3_op, 4048, "Conv4", ps_device=ps_device, w_device=w_device)
shape = relu2_op.get_shape().as_list()
deconv1_op = deconvolution2d(relu3_op, shape, "Deconv1", ps_device=ps_device, w_device=w_device)
shape = relu1_op.get_shape().as_list()
deconv2_op = deconvolution2d(deconv1_op, shape, "Deconv2", ps_device=ps_device, w_device=w_device)
with tf.device(w_device):
deconv2_op = tf.nn.dropout( deconv2_op, keep_prob )
shape = images.get_shape().as_list()
deconv3_op = deconvolution2d(deconv2_op, shape, "Deconv3", relu=False, ps_device=ps_device, w_device=w_device)
#shape = images.get_shape().as_list()
#deconv4_op = deconvolution(deconv3_op, [batch_size,shape[1],shape[2],shape[3],1], "Deconv4", relu=False, ps_device=ps_device, w_device=w_device)
return deconv3_op
def loss(logits, labels):
print_tensor_shape( logits, 'logits shape')
print_tensor_shape( labels, 'labels shape')
#labels = tf.to_int64(labels)
#loss = tf.losses.absolute_difference(predictions=logits, labels=labels)
loss = tf.losses.mean_squared_error(predictions=logits, labels=labels)
#loss = tf.losses.mean_pairwise_squared_error(predictions=logits, labels=labels)
#loss = tf.losses.huber_loss(predictions=logits, labels=labels, delta=10.0)
return loss
def training_adam(loss, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False, name='Adam', var_list=None):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
use_locking=use_locking,
name=name)
train_op = optimizer.minimize(loss, var_list=var_list)
return train_op
def training(loss, learning_rate, decay_steps, decay_rate, name):
# input: loss: loss tensor from loss()
# input: learning_rate: scalar for gradient descent
# output: train_op the operation for training
# Creates a summarizer to track the loss over time in TensorBoard.
# Creates an optimizer and applies the gradients to all trainable variables.
# The Op returned by this function is what must be passed to the
# `sess.run()` call to cause the model to train.
# Add a scalar summary for the snapshot loss.
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# create learning_decay
lr = tf.train.exponential_decay(learning_rate,
global_step,
decay_steps,
decay_rate, staircase=True )
tf.summary.scalar('2learning_rate', lr )
# Create the gradient descent optimizer with the given learning rate.
# optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer = tf.train.GradientDescentOptimizer(lr)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, labels, name="accuracy"):
# accuracy = tf.metrics.accuracy(labels=labels, predictions=logits, name=name)
# tf.summary.scalar("accuracy", accuracy[0])
# return accuracy
accuracy = tf.metrics.root_mean_squared_error(labels=labels, predictions=logits, name=name)
tf.summary.scalar("accuracy", accuracy[0])
return accuracy
# def evaluation(logits, labels):
# # input: logits: Logits tensor, float - [batch_size, 195, 233, NUM_CLASSES].
# # input: labels: Labels tensor, int32 - [batch_size, 195, 233]
# # output: scaler int32 tensor with number of examples that were
# # predicted correctly
# with tf.name_scope('eval'):
# print()
# print_tensor_shape(logits, 'logits eval shape before')
# print_tensor_shape(labels, 'labels eval shape before')
# # reshape to match args required for the top_k function
# logits_re = tf.reshape(logits, [-1])
# print_tensor_shape(logits_re, 'logits_re eval shape after')
# labels_re = tf.reshape(labels, [-1])
# print_tensor_shape(labels_re, 'labels_re eval shape after')
# # get accuracy :
# diff = tf.sub(labels_re,logits_re)
# acc = tf.div(tf.reduce_mean(diff), 195.0*233.0)
# acc = 1 - acc
# # get accuracy :
# diff = tf.abs(tf.sub(labels_re,logits_re))
# lessthan0_01 = tf.less_equal(diff, 0.01)
# sum = tf.reduce_sum(tf.cast(lessthan0_01, tf.float32))
# acc2 = tf.div(sum, 195.0*233.0)
# print(acc)
# # Return the tuple of intersection, label and example areas
# labels_re = tf.cast(labels_re, tf.float32)
# indices_re = tf.cast(logits_re, tf.float32)
# return indices_re, labels_re, acc2
| 2.5625 | 3 |
ext/DataMining/0_DA/udfs/text/preprocessing.py | Jie-Yuan/1_DataMining | 14 | 12758404 | <filename>ext/DataMining/0_DA/udfs/text/preprocessing.py
# coding: utf-8
import jieba
from snownlp import SnowNLP
def get_text_tokens(text, stop_words_path="./stop_words.txt"):
text_tokens = jieba.cut(text.strip())
stop_words = get_stop_words(stop_words_path)
word_list = []
for word in text_tokens:
if word not in stop_words:
if word != '\t':
word_list.append(word)
return word_list # 词频Counter(word_list)
def get_stop_words(path="./stop_words.txt"):
with open(path) as f:
stop_words = [line.strip() for line in f.readlines()]
return stop_words
def fan2jian(text):
"""
:param text:
:return: 繁体转简体
"""
return SnowNLP(text).han
def get_pinyin(text):
"""
:param text:
:return: 汉字转拼音
"""
return SnowNLP(text).pinyin
| 2.78125 | 3 |
venv/lib/python3.6/site-packages/furl/compat.py | ostar0816/mc-crypto | 4 | 12758405 | # -*- coding: utf-8 -*-
#
# furl - URL manipulation made simple.
#
# <NAME>
# grunseid.com
# <EMAIL>
#
# License: Build Amazing Things (Unlicense)
#
import sys
if sys.version_info[0] == 2:
basestring = basestring
else:
basestring = (str, bytes)
if list(sys.version_info[:2]) >= [2, 7]:
from collections import OrderedDict
else:
from ordereddict import OrderedDict
class UnicodeMixin(object):
"""
Mixin class to handle defining the proper __str__/__unicode__ methods in
Python 2 or 3.
"""
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
| 2.546875 | 3 |
src/utils/Utils.py | kaczor6418/smpd | 0 | 12758406 | <filename>src/utils/Utils.py
from typing import TypeVar, Dict, List
T = TypeVar('T')
V = TypeVar('V')
class Utils:
@staticmethod
def get_key_value_by_id_from_dictionaries(dictionaries: List[Dict[T, V]], id_value: V, key: T) -> V:
for dictionary in dictionaries:
if dictionary['id'] == id_value:
return dictionary.get(key)
| 3.109375 | 3 |
aldryn_apphooks_config/utils.py | bmcmurray/aldryn-apphooks-config | 7 | 12758407 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.db.models import ForeignKey
from django.urls import Resolver404, resolve
from django.utils.translation import get_language_from_request, override
from cms.apphook_pool import apphook_pool
from app_data import AppDataContainer, app_registry
# making key app/model specific to avoid inheritance issues
APP_CONFIG_FIELDS_KEY = '_app_config_field_names_{app_label}_{model_name}'
def get_app_instance(request):
"""
Returns a tuple containing the current namespace and the AppHookConfig instance
:param request: request object
:return: namespace, config
"""
app = None
if getattr(request, 'current_page', None) and request.current_page.application_urls:
app = apphook_pool.get_apphook(request.current_page.application_urls)
if app and app.app_config:
try:
config = None
with override(get_language_from_request(request, check_path=True)):
namespace = resolve(request.path_info).namespace
config = app.get_config(namespace)
return namespace, config
except Resolver404:
pass
return '', None
def setup_config(form_class, config_model=None):
"""
Register the provided form as config form for the provided config model
This can be used as a decorator by adding a `model` attribute to the config form::
@setup_config
class ExampleConfigForm(AppDataForm):
model = ExampleConfig
:param form_class: Form class derived from AppDataForm
:param config_model: Model class derived from AppHookConfig
:return:
"""
# allow use as a decorator
if config_model is None:
return setup_config(form_class, form_class.model)
app_registry.register('config', AppDataContainer.from_form(form_class), config_model)
def _get_apphook_field_names(model):
"""
Return all foreign key field names for a AppHookConfig based model
"""
from .models import AppHookConfig # avoid circular dependencies
fields = []
for field in model._meta.fields:
if isinstance(field, ForeignKey) and issubclass(field.remote_field.model, AppHookConfig):
fields.append(field)
return [field.name for field in fields]
def get_apphook_field_names(model):
"""
Cache app-hook field names on model
:param model: model class or object
:return: list of foreign key field names to AppHookConfigs
"""
key = APP_CONFIG_FIELDS_KEY.format(
app_label=model._meta.app_label,
model_name=model._meta.object_name
).lower()
if not hasattr(model, key):
field_names = _get_apphook_field_names(model)
setattr(model, key, field_names)
return getattr(model, key)
def get_apphook_configs(obj):
"""
Get apphook configs for an object obj
:param obj: any model instance
:return: list of apphook configs for given obj
"""
keys = get_apphook_field_names(obj)
return [getattr(obj, key) for key in keys] if keys else []
def get_apphook_model(model, app_config_attribute):
"""
Return the AppHookConfig model for the provided main model
:param model: Main model
:param app_config_attribute: Fieldname of the app_config
:return: app_config model
"""
return model._meta.get_field(app_config_attribute).remote_field.model
| 1.96875 | 2 |
sg_demo.py | openhealthalgorithms/algorithms | 2 | 12758408 | <reponame>openhealthalgorithms/algorithms
from OHA.SgFramingham import SgFramingham
from OHA.SingHealth import SingHealth as SHA
from OHA.param_builders.sg_framingham_param_builder import SGFraminghamParamsBuilder as SGFPB
# params = SGFPB().gender('male').age(60).ethnicity('indian').t_chol(4.6, 'mmol/L').hdl_chol(1.8, 'mmol/L').sbp(125)\
# .smoker(True).diabetic(True).bp_medication(False).build()
params = SGFPB()\
.gender('m')\
.age(60)\
.ethnicity('indian')\
.t_chol(4.6, 'mmol/L')\
.hdl_chol(1.8, 'mmol/L')\
.sbp(125)\
.smoker()\
.diabetic(True)\
.bp_medication(False)\
.build()
print(params)
result = SgFramingham().calculate(params)
print('--> Sg Framingham:', result)
print()
input_params = {
'request': {
'api_key': '<KEY>',
'api_secret': '3459823jfweureitu',
'request_api': 'https://developers.openhealthalgorithms.org/algos/hearts/',
'country_code': 'D',
'response_type': 'COMPLETE',
},
'body': {
'region': 'SEARD',
'last_assessment': {
'assessment_date': '',
'cvd_risk': '20',
},
'demographics': {
'gender': 'M',
'age': 40,
'ethnicity': 'caucasian',
'dob': ['computed', '01/10/1987'],
'occupation': 'office_worker',
'monthly_income': '',
},
'measurements': {
'height': [1.5, 'm'],
'weight': [60.0, 'kg'],
'waist': [99.0, 'cm'],
'hip': [104.0, 'cm'],
'sbp': [161, 'sitting'],
'dbp': [91, 'sitting'],
},
'smoking': {
'current': 0,
'ex_smoker': 1,
'quit_within_year': 0,
},
'physical_activity': '120',
'diet_history': {
'fruit': 1, 'veg': 6, 'rice': 2, 'oil': 'olive',
},
'medical_history': {
'conditions': ['asthma', 'tuberculosis'],
},
'allergies': {},
'medications': ['anti_hypertensive', 'statin', 'antiplatelet', 'bronchodilator'],
'family_history': ['diabetes', 'cvd'],
'pathology': {
'bsl': {
'type': 'random', 'units': 'mmol/L', 'value': 5,
},
'cholesterol': {
'type': 'fasting', 'units': 'mmol/L', 'total_chol': 5.2, 'hdl': 1.6, 'ldl': 2.4,
},
},
},
}
print('---- Calculating SingHealth ----')
result = SHA.calculate(input_params)
| 2.3125 | 2 |
userInteraction/interfaces/IFinanceInteraction.py | tylertjburns/ledgerkeeper | 0 | 12758409 | <reponame>tylertjburns/ledgerkeeper
from ledgerkeeper.mongoData.account import Account
from abc import ABC, abstractmethod
class IFinanceInteraction(ABC):
# region Account UI
@abstractmethod
def request_bank_total(self):
pass
@abstractmethod
def select_account(self, statusList=None):
pass
@abstractmethod
def select_collection(self):
pass
@abstractmethod
def get_add_new_account_input(self):
pass
@abstractmethod
def get_record_expense_input(self, accountManager):
pass
@abstractmethod
def get_add_bucket_to_account_input(self):
pass
@abstractmethod
def get_move_funds_input(self, account:Account):
pass
@abstractmethod
def get_add_waterfall_funds_input(self, account: Account):
pass
@abstractmethod
def get_delete_bucket_from_account_input(self, account:Account):
pass
@abstractmethod
def get_update_bucket_priority_input(self, account:Account):
pass
@abstractmethod
def get_print_full_waterfall_input(self):
pass
@abstractmethod
def get_add_open_balance_input(self):
pass
@abstractmethod
def get_delete_open_balance_input(self, account:Account):
pass
# endregion
# region Ledger UI
@abstractmethod
def get_add_ledger_manually_input(self):
pass
@abstractmethod
def get_split_transaction_input(self, currentAmount: float):
pass
@abstractmethod
def get_enter_ledger_from_income_transaction_input(self):
pass
@abstractmethod
def get_enter_ledger_from_expense_transaction_input(self):
pass
# endregion
# region Printing
@abstractmethod
def pretty_print_items(self, items, title=None):
pass
# endregion | 2.53125 | 3 |
Utilities/WeightAPI.py | ScorchChamp/Telofik | 0 | 12758410 | import Utilities.HypixelAPI as HypixelAPI
import Utilities.MojangAPI as MojangAPI
import Utilities.Weights.playerStore as playerStore
from Constants import *
from dotenv import load_dotenv, dotenv_values
import json
import os
load_dotenv()
API_KEY = dotenv_values('.env')["API_KEY"]
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def getBreakdownFormatted(username):
breakdown = getWeight(username)[1]
return '\n'.join([f"{item}: {breakdown[item]}" for item in breakdown])
def getBreakdownMAXFormatted():
score, breakdown = maxStats()
return '\n'.join([f"{item}: {breakdown[item]}" for item in breakdown])
def getWeightUUID(uuid):
if not uuid: return (0,{})
try: stranded_data = HypixelAPI.getStrandedData(uuid)
except: return (0, {})
try:
weights = [getStrandedWeight(member_data["members"][uuid]) for member_data in stranded_data]
if not len(weights): return (0,{})
return max(weights)
except Exception as e: return (0,{})
def getWeight(username):
uuid = MojangAPI.getUUIDFromUsername(username)
if not uuid: return (0,{})
weight, breakdown = getWeightUUID(uuid)
playerStore.storePlayerScore(username, weight, breakdown)
return weight, breakdown
def maxStats(): return getStrandedWeight({}, max=True)
def generateWeightParts():
with open(WEIGHT_PARTS, "r") as f: weight_parts = json.load(f)
for part in weight_parts:
part["time"] = part["maxXP"] / part["XPh"]
part["real_time"] = (part["time"] * part["effort"]) + part["coin_cost"]
part["maxXP"] = part["maxXP"] ** part["curve"]
return weight_parts, sum([part["real_time"] for part in weight_parts]), 100000
def getStrandedWeight(profileData, max = False):
weight_parts, totalTime, max_score = generateWeightParts()
score_breakdown = {}
total_score = 0
for part in weight_parts:
name = str(part["name"]).lower()
if max: xp_name = part["maxXP"]
else:
if name == "minions": xp_name = len(profileData[f"crafted_generators"])
else:
try: xp_name = profileData[f"experience_skill_{name}"]
except:
try: xp_name = profileData["slayer_bosses"][name]["xp"]
except: xp_name = 0
xp_name = min(xp_name ** part["curve"], part["maxXP"])
score_breakdown[name] = round((part["real_time"] / totalTime) * max_score * (xp_name / part["maxXP"]))
total_score += score_breakdown[name]
return round(total_score), score_breakdown | 2.40625 | 2 |
models/predictive models/log_env.py | AmineKheldouni/Graphs-in-Machine-Learning | 1 | 12758411 | # -*- coding: utf-8 -*-
import os
import time
import gym
import numpy as np
import matplotlib.pyplot as plt
from gym.envs.classic_control.cartpole import *
from gym.envs.mujoco import *
import pandas as pd
# from gym.envs.registration import *
# from gym.wrappers.time_limit import TimeLimit
env_name = 'Walker2d-v2'
env = gym.make(env_name)
# print('Observation space: ', env.observation_space)
# print('Action space: ', env.action_space)
# print('Observation space low: ', env.observation_space.low)
# print('Observation space high: ', env.observation_space.high)
# print('Action space low: ', env.action_space.low)
# print('Action space high: ', env.action_space.high)
state = env.reset()
print(state)
while True:
env.render()
time.sleep(1)
state, reward, done, _ = env.step(env.action_space.sample())
env.close()
##### Render environment for a specific state #####
#
# env = CartPoleEnv()
# env.state = np.array([0.03971514, -0.01205, 0.039588, -0.00371212])
# # state = env.reset()
# while True:
# env.render()
# env = Walker2dEnv()
#
# state = env.reset()
# env.step(np.random.rand(6))
# print('qpos: ', env.sim.data.qpos)
# print('qvel: ', env.sim.data.qvel)
# print('state: ', env._get_obs())
# file_name_gt = 'GroundTruth_2019-01-11_20-01-56.csv'
# file_name_pred = 'Prediction_2019-01-11_20-01-56.csv'
# env_label = 'CartPole'
# df_test = pd.read_csv('./GN v1.5/results/' + env_label + '/test/' + file_name_pred, sep=',', header=0)
# env = Walker2dEnv()
# df = df_test.as_matrix()
# i = 300
# # _,_,s2,s3,s0,s1,_,_ = df[i,:]
# # env.state = np.array([s0,s1,s2,s3])
# while True:
# env.render()
| 2.53125 | 3 |
pyrsss/gnss/level_new.py | grawe/pyrsss | 0 | 12758412 | <gh_stars>0
from __future__ import division
import logging
import numpy as NP
import pandas as PD
from ..stats.stats import weighted_avg_and_std
from constants import LAMBDA_1, LAMBDA_2, TECU_TO_M, M_TO_TECU, glonass_lambda
from level import DEFAULT_CONFIG
from rms_model import RMSModel
logger = logging.getLogger('pyrsss.gps.level_new')
class LeveledArc(PD.DataFrame):
_metadata = ['xyz',
'llh',
'stn',
'recv_type',
'stn',
'sat',
'L',
'L_scatter']
@property
def _constructor(self):
return LeveledArc
def convert_phase_m(df_arc, sat):
"""
"""
print(df_arc.shape)
if sat[0] == 'G':
return (df_arc.L1 * LAMBDA_1,
df_arc.L2 * LAMBDA_2)
elif sat[0] == 'R':
dt = df_arc.iloc[0].gps_time
slot = int(sat[1:])
lambda1, lambda2 = glonass_lambda(slot, dt)
return (df_arc.L1 * lambda1,
df_arc.L2 * lambda2)
else:
raise ValueError('cannot convert phase to [m] for {}'.format(sat))
assert False
def level(rinex_dump,
config=DEFAULT_CONFIG):
"""
"""
rms_model = RMSModel()
leveled_arcs = []
for arc_index, arc in enumerate(sorted(set(rinex_dump.arc))):
df_arc = rinex_dump[rinex_dump.arc == arc]
sat = df_arc.iloc[0].sat
delta = df_arc.iloc[-1].gps_time - df_arc.iloc[0].gps_time
arc_time_length = delta.total_seconds()
if arc_time_length < config.minimum_arc_time:
# reject short arc (time)
logger.info('rejecting arc={} --- '
'begin={:%Y-%m-%d %H:%M:%S} '
'end={:%Y-%m-%d %H:%M:%S} '
'length={} [s] '
'< {} [s]'.format(arc,
df_arc.iloc[0].gps_time,
df_arc.iloc[-1].gps_time,
arc_time_length,
config.minimum_arc_time))
continue
if df_arc.shape[0] < config.minimum_arc_points:
# reject short arc (number of epochs)
logger.info('rejecting arc={} --- len(arc) = '
'{} < {}'.format(sat,
arc,
df_arc.shape[0],
config.minimum_arc_points))
continue
# remove observations below minimum elevation limit
I = df_arc.el >= config.minimum_elevation
# remove observations for which P1, P2, L1, or L2 are nan
I &= df_arc.P1.notnull()
I &= df_arc.P2.notnull()
I &= df_arc.L1.notnull()
I &= df_arc.L2.notnull()
# remove measurements with |p1 - p2| < threshold
I &= abs(df_arc.P1 - df_arc.P2) > config.p1p2_threshold
# compute geometry free combinations
df_arc = df_arc.loc[I, :]
if df_arc.shape[0] == 0:
continue
P_I = df_arc.P2 - df_arc.P1
L1m, L2m = convert_phase_m(df_arc, sat)
# THIS IS ONLY TRUE FOR GPS!!!
# L1m = df_arc.L1 * LAMBDA_1
# L2m = df_arc.L2 * LAMBDA_2
L_Im = L1m - L2m
diff = P_I - L_Im
modeled_var = (NP.array(map(rms_model,
df_arc.el.values)) * TECU_TO_M)**2
# compute level, level scatter, and modeled scatter
N = len(diff)
if N == 0:
continue
L, L_scatter = weighted_avg_and_std(diff, 1 / modeled_var)
sigma_scatter = NP.sqrt(NP.sum(modeled_var) / N)
# check for excessive leveling uncertainty
if L_scatter > config.scatter_factor * sigma_scatter:
logger.info('rejecting arc={} --- L scatter={:.6f} '
'> {:.1f} * {:.6f}'.format(arc_index,
L_scatter,
config.scatter_factor,
sigma_scatter))
continue
if L_scatter / TECU_TO_M > config.scatter_threshold:
logger.info('rejecting arc={} --- L uncertainty (in '
'[TECU])={:.1f} > '
'{:.1f}'.format(arc_index,
L_scatter * M_TO_TECU,
config.scatter_threshold))
continue
# store information
data_map = {'gps_time': df_arc.gps_time.values,
'az': df_arc.az.values,
'el': df_arc.el.values,
'satx': df_arc.satx.values,
'saty': df_arc.saty.values,
'satz': df_arc.satz.values,
'P_I': P_I * M_TO_TECU,
'L_I': (L_Im + L) * M_TO_TECU}
leveled_arc = LeveledArc(data_map)
leveled_arc.xyz = df_arc.xyz
leveled_arc.llh = df_arc.llh
leveled_arc.stn = df_arc.stn
leveled_arc.recv_type = df_arc.recv_type
leveled_arc.sat = sat
leveled_arc.L = L * M_TO_TECU
leveled_arc.L_scatter = L_scatter * M_TO_TECU
leveled_arcs.append(leveled_arc)
return leveled_arcs
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
pkl_fname = '/tmp/jplm0010.14o.pkl'
rinex_dump = PD.read_pickle(pkl_fname)
leveled_arcs = level(rinex_dump)
# print(leveled_arcs[0])
# print(leveled_arcs[10])
import pylab as PL
fig = PL.figure()
for i, arc in enumerate(leveled_arcs):
fig.clf()
PL.plot_date(arc.gps_time, arc.P_I, marker='x', ls='None', color='r')
PL.plot_date(arc.gps_time, arc.L_I, marker='None', ls='-', color='b')
PL.title('arc = {} sat = {}'.format(i, arc.sat))
PL.savefig('/tmp/test/arc_{:03d}.pdf'.format(i), bbox_inches='tight')
| 2.234375 | 2 |
Modules,Packages,File_operations/ReadNwrite.py | tymsai/Python_Basic | 0 | 12758413 | #%%
text=open('new.txt', 'r+')
text.write('Hello file')
for i in range(0, 11):
text.write(str(i))
print(text.seek(2))
#%%
#file operations Read & Write
text=open('sampletxt.txt', 'r')
text= text.read()
print(text)
text=text.split(' ')
print(text)
| 3.609375 | 4 |
pyerge/cli.py | emcek/pyerge | 0 | 12758414 | import sys
from argparse import ArgumentParser, Namespace
from logging import basicConfig, DEBUG, INFO, ERROR, info, error
from typing import List
from pyerge import tmerge, utils, __version__
def run_parser() -> None:
"""
Function to collect command line arguments.
Construct main object with correct set of parameters.
"""
parser = ArgumentParser(description='Emerge in temporary RAM disk')
parser.add_argument('-s', '--size', action='store', dest='size', default='4G', help='Size or RAM disk, default 4G')
parser.add_argument('-l', '--check_local', action='store_true', dest='local', default=False, help='check locally')
parser.add_argument('-d', '--clean-print', action='store_true', dest='deep_print', default=False, help='print deep clean info after emerge')
parser.add_argument('-c', '--clean-run', action='store_true', dest='deep_run', default=False, help='run deep clean after emerge')
parser.add_argument('-w', '--world', action='store_true', dest='world', default=False, help='run emerge -NDu @world')
parser.add_argument('-r', '--pretend_world', action='store_true', dest='pretend_world', default=False, help='run emerge -pvNDu @world')
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False, help='no output from pyerge itself only form other tools')
parser.add_argument('-v', '--verbose', action='count', dest='verbose', default=0, help='Increase output verbosity')
parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('action', help='check or emerge')
opts, emerge_opts = parser.parse_known_args()
level = DEBUG if opts.verbose else INFO
if opts.quiet:
level = ERROR
basicConfig(format='%(asctime)s | %(levelname)-6s | %(message)s', level=level)
if opts.action not in ['check', 'emerge']:
error(f'Wrong options: {opts} {emerge_opts}')
sys.exit(1)
main_exec(opts, emerge_opts)
def main_exec(opts: Namespace, emerge_opts: List[str]) -> None:
"""
Main execution function.
:param opts: cli arguments
:param emerge_opts: list of arguments for emege
"""
if opts.world:
emerge_opts = ['--with-bdeps=y', '--keep-going=y', '--newuse', '--deep', '--update', '@world']
if opts.pretend_world:
emerge_opts = ['--with-bdeps=y', '--pretend', '--verbose', '--newuse', '--deep', '--update', '@world']
info(f'Pyerge version: {__version__}')
opts.online = utils.is_internet_connected()
if not tmerge.is_portage_running():
utils.set_portage_tmpdir()
utils.handling_mounting(opts)
tmerge.run_emerge(emerge_opts, opts)
tmerge.run_check(opts)
utils.unmounttmpfs(opts)
else:
info('emerge already running!')
| 2.765625 | 3 |
PhosQuest_app/data_access/db_sessions.py | anapenedos/PhosQuest | 2 | 12758415 | <reponame>anapenedos/PhosQuest
import os
from PhosQuest_app.data_access.sqlalchemy_declarative import Base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.interfaces import PoolListener
def set_db_path():
""" Returns the path to the DB being used."""
return os.path.join('database', 'PhosQuest.db')
def session_maker(db_path=set_db_path()):
"""
Produces a session maker object for standard database query sessions.
:param db_path: system path to relevant DB (str)
:return: DB sesseion maker object (sqlalchemy sessionmaker)
"""
engine = create_engine('sqlite:///' + db_path, echo=False)
Base.metadata.bind = engine
DBSession = sessionmaker()
return DBSession
def print_sql_session_maker(db_path=set_db_path()):
"""
Produces a session maker object for database query sessions where sql
statements are printed to console.
:param db_path: system path to relevant DB (str)
:return: DB sesseion maker object (sqlalchemy sessionmaker)
"""
engine = create_engine('sqlite:///' + db_path, echo=True)
Base.metadata.bind = engine
DBSession = sessionmaker()
return DBSession
class MyListener(PoolListener):
"""
Class defining session execution in SQLite. Allows OS management of
writing to disk operations, speeding up imports.
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma journal_mode=OFF')
dbapi_con.execute('PRAGMA synchronous=OFF')
dbapi_con.execute('PRAGMA cache_size=100000')
def import_session_maker(db_path=set_db_path()):
"""
Produces a session maker object for database import sessions, where write
operations are managed by OS.
:param db_path: system path to relevant DB (str)
:return: DB session maker object (sqlalchemy sessionmaker)
"""
# Create engine that stores data to database\<file_name>.db
# defines engine as SQLite, uses listeners to implement faster import
# (record writing to disk is managed by the OS and hence can occur
# simultaneously with data processing
engine = create_engine('sqlite:///' + db_path, echo=False,
listeners=[MyListener()])
# Bind the engine to the metadata of the base class so that the
# classes can be accessed through a DBSession instance
Base.metadata.bind = engine
# DB session to connect to DB and keep any changes in a "staging zone"
DBSession = sessionmaker(bind=engine)
return DBSession
def pandas_sql_session_maker(db_path=set_db_path()):
"""
Produces a session maker object to use when visualising sqlalchemy with a
pandas data frame.
:param db_path: system path to relevant DB (str)
:return: DB session maker object (sqlalchemy sessionmaker)
"""
# Define instance of engine which represents interface to the database.
engine = create_engine('sqlite:///' + db_path, echo=False)
# Define session class object - ORM "handle" to the data-base.
DBSession = sessionmaker()
# Connect engine to the Session object.
DBSession.configure(bind=engine)
return DBSession
def create_sqlsession(existing_maker=None,
session_type='standard',
db_path=set_db_path()):
"""
Returns a sqlalchemy session object of the type specified.
:param existing_maker: existing DB maker object (sqlalchemy sessionmaker)
default is None
:param session_type: type of session desired (str)
'standard' normal DB query
'import' optimises DB connection for large data import
'pandas_sql' allows visualising sqlalchemy with pandas
'print_sql' prints sqlite statements to screen
default is 'standard'
:param db_path: system path to relevant DB (str)
default is 'PhosQuest.db' DB in database folder
:return: DB session object (sqlalchemy session)
"""
if not existing_maker:
maker = {'standard': session_maker,
'import': import_session_maker,
'print_sql': print_sql_session_maker,
'pandas_sql': pandas_sql_session_maker}
DBSession = maker[session_type](db_path)
else:
DBSession = existing_maker
# open a SQLite session
session = DBSession()
return session | 2.859375 | 3 |
scenarios/scripts/plot_baseline_var_carriers.py | ZeronSix/nru-wifi-coexistence-simulator | 0 | 12758416 | <reponame>ZeronSix/nru-wifi-coexistence-simulator
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('../results/baseline_var_carriers.tsv', sep='\t', index_col=False, header=0)
for n in [1, 5, 10]:
plt.cla()
df_a = df[df['lbtType'] == "A"]
df_b = df[df['lbtType'] == "B"]
for delta in [0, 4, 8]:
data = df_a[(df_a['numEnbs'] == n) & (df_a['txLock'] == delta)].groupby('numCarriers')['throughputPerCell']\
.agg([np.mean])\
.reset_index()
error = df_a[(df_a['numEnbs'] == n) & (df_a['txLock'] == delta)].groupby('numCarriers')['throughputPerCellStDev'] \
.agg([np.mean]) \
.reset_index()
plt.errorbar(data['numCarriers'], data['mean'], error['mean'], label=f'A D={delta}', marker='s')
for rng_primary in [0, 1]:
data = df_b[(df_b['numEnbs'] == n) &
(df_b['randomPrimaryCarrier'] == rng_primary)].groupby('numCarriers')['throughputPerCell'] \
.agg([np.mean]) \
.reset_index()
error = df_b[(df_b['numEnbs'] == n) &
(df_b['randomPrimaryCarrier'] == rng_primary)].groupby('numCarriers')['throughputPerCellStDev'] \
.agg([np.mean]) \
.reset_index()
plt.errorbar(data['numCarriers'], data['mean'], error['mean'], label=f'B (random primary={rng_primary == 1})', marker='s')
plt.grid(True)
plt.legend(loc='best')
plt.xlabel('Number of carriers')
plt.ylabel('Throughput per cell, Mbps')
plt.title(f'N={n}')
plt.savefig(f'../plots/pdf/baseline_throughput_{n}.pdf')
plt.savefig(f'../plots/png/baseline_throughput_{n}.png')
#plt.xlim(1, 10)
plt.show()
| 2.4375 | 2 |
water-pouring/water_pouring/envs/pouring_featured.py | yannikkellerde/Water-Pouring | 0 | 12758417 | from pouring_base import Pouring_base
from gym import spaces
from scipy.spatial.transform import Rotation as R
from collections import deque
import math
import numpy as np
import os,sys
FILE_PATH = os.path.abspath(os.path.dirname(__file__))
class Pouring_featured(Pouring_base):
"""A concrete water-pouring gym environment that uses handcrafted features as
observations of the state. Thus, this environment describes a Partially Observable
Markov Decision Process.
Attributes:
max_in_air: Maximum amount of water-particles in the air that is assumed to be
possible. Used for normalization of observations.
"""
def __init__(self,**kwargs):
"""Initialize the water-pouring environment.
Args:
**kwargs: Keyword arguments that are forwarded to the abstract init method
of the base implementation.
"""
self.max_in_air = 40
super(Pouring_featured, self).__init__(**kwargs)
self.action_space = spaces.Box(low=-1,high=1,shape=(3,))
self.observation_space = spaces.Box(low=-1,high=1,shape=(11+(2*self.action_space.shape[0] if self.jerk_punish>0 else 0),))
def _observe(self):
"""Make an observation of the current state by the use of handcrafted features, which
do not describe the full state completely.
Returns:
A 11 or 17 dimensional numpy array that contains:
1. Bottle Rotation
2. The x-translation of the bottle
3. The y-translation of the bottle
4. This episodes time_step_punish
5. This episodes spill_punish
6. This episodes target_fill_state
7. The number of steps that have been performed since the start of the episode.
8. The fill-level of the glass.
9. The amount of water in the bottle.
10. The amount of water in the air between bottle and glass.
11. The amount of spilled particles.
12-14. If self.jerk_punish > 0, the last performed action.
15-17. If self.jerk_punish > 0, the next to last performed action
All values in the array are normalized to the range -1 to 1.
"""
rotation = R.from_matrix(self.bottle.rotation).as_euler("zyx")[0]
rotation = (rotation-self.min_rotation)/(math.pi-self.min_rotation)
translation_x,translation_y = self.bottle.translation[:2]
translation_x = (translation_x - self.translation_bounds[0][0]) / (self.translation_bounds[0][1]-self.translation_bounds[0][0])
translation_y = (translation_y - self.translation_bounds[1][0]) / (self.translation_bounds[1][1]-self.translation_bounds[1][0])
tsp_obs = ((self.time_step_punish-self.time_step_punish_range[0]) /
(self.time_step_punish_range[1]-self.time_step_punish_range[0]))*2-1
time_obs = (self._step_number/self._max_episode_steps)*2-1
spill_punish_obs = ((self.spill_punish-self.spill_range[0]) /
(self.spill_range[1]-self.spill_range[0]))*2-1
target_fill_obs = ((self.target_fill_state-self.target_fill_range[0]) /
(self.target_fill_range[1]-self.target_fill_range[0]))*2-1
feat_dat = [rotation,translation_x,translation_y,tsp_obs,spill_punish_obs,target_fill_obs,time_obs]
feat_dat.append((self.particle_locations["glass"]/self.max_in_glass)*2-1)
feat_dat.append((self.particle_locations["bottle"]/self.max_particles)*2-1)
feat_dat.append((self.particle_locations["air"]/self.max_in_air)*2-1)
feat_dat.append((self.particle_locations["spilled"]/self.max_spill)*2-1)
if self.jerk_punish>0:
# Extend the observation with the actions from the two last steps.
feat_dat.extend(np.array(self.last_actions)[:-1].flatten())
feat_dat = np.clip(np.array(feat_dat),-1,1)
return feat_dat
| 2.890625 | 3 |
test/flow/test_loc.py | stim-devices/dev-localite | 0 | 12758418 | <filename>test/flow/test_loc.py
# from .mock_localite import Mock
from pytest import fixture, raises
from localite.flow.mock import Mock
from localite.flow.loc import LOC, is_valid, LastMessage
from localite.flow.payload import Queue, Payload, put_in_queue, get_from_queue
import time
import json
host = "127.0.0.1"
port = 6666
@fixture(scope="module")
def mock():
mock = Mock(host=host, port=port)
mock.start()
mock.await_running()
yield mock
# shut down in less than 7s
mock.kill()
t0 = time.time()
d = 0
while mock.is_running.is_set() and d < 7:
d = time.time() - t0
assert not mock.is_running.is_set()
@fixture(scope="module")
def loc(mock):
inbox = Queue()
outbox = Queue()
loc = LOC(address=(host, port), inbox=inbox, outbox=outbox)
loc.start()
loc.await_running()
yield loc
# shut down in less than 7s
pl = Payload("cmd", "poison-pill", 12345)
put_in_queue(pl, loc.inbox)
t0 = time.time()
d = 0
while loc.is_running.is_set() and d < 7:
d = time.time() - t0
assert not loc.is_running.is_set()
def test_mock_running(mock):
assert mock.is_running.is_set()
def test_loc_running(loc):
assert loc.is_running.is_set()
def test_get(loc, mock):
msg = json.dumps(
{
"coil_0_response": {
"mepmaxtime": 18,
"mepamplitude": 50,
"mepmin": -25,
"mepmax": 25,
}
}
)
payload = Payload("loc", msg, 12345)
put_in_queue(payload, loc.inbox)
recv = []
t0 = time.time()
while t0 - time.time() < 5:
pl = get_from_queue(loc.outbox)
if pl is not None:
recv.append(pl)
if "coil_0_response" in pl.msg:
break
assert pl.fmt == "mrk"
assert "coil_0_response" in pl.msg
def test_set_response(loc, mock):
"coil_0_response"
payload = Payload("loc", '{"get": "coil_0_amplitude"}', 12345)
put_in_queue(payload, loc.inbox)
recv = []
t0 = time.time()
while t0 - time.time() < 5:
pl = get_from_queue(loc.outbox)
if pl is not None:
recv.append(pl)
if "coil_0_amplitude" in pl.msg:
break
assert "coil_0_amplitude" in pl.msg
def test_invalid(loc, mock):
pl = Payload("loc", '{"garbage": "garbage"}', 12345)
put_in_queue(pl, loc.inbox)
recv = []
t0 = time.time()
while t0 - time.time() < 5:
pl = get_from_queue(loc.outbox)
if pl is not None:
recv.append(pl)
if "garbage" in pl.msg:
break
assert "error" in pl.msg
def test_trigger(loc, mock):
pl = Payload("loc", '{"single_pulse":"COIL_0"}', 12345)
put_in_queue(pl, loc.inbox)
recv = []
t0 = time.time()
while t0 - time.time() < 5:
pl = get_from_queue(loc.outbox)
if pl is not None:
recv.append(pl)
if "coil_0_didt" in pl.msg:
break
assert "coil_0_didt" in pl.msg
def test_valid():
def pl(msg: str) -> Payload:
return Payload(fmt="loc", msg=msg)
assert not is_valid(Payload(fmt="mrk", msg='{"get":"test_xase"}'))
assert not is_valid(pl('{"get":"test_xase"}'))
assert is_valid(pl('{"get":"pointer_position"}'))
assert is_valid(
pl(
'{"coil_0_response": {"mepmaxtime": 25, "mepamplitude": 50, "mepmin": -25, "mepmax": 25} }'
)
)
assert (
is_valid(
pl(
'{"coil_0_response": {"mepmaxtime": -1, "mepamplitude": 50, "mepmin": -25, "mepmax": 25} }'
)
)
== False
)
assert (
is_valid(
pl(
'{"coil_0_response": {"mepmaxtime": 25, "mepamplitude": 50, "garbage": -25, "mepmax": 25} }'
)
)
== False
)
assert (
is_valid(
pl(
'{"coil_0_response": {"mepmaxtime": 25, "mepamplitude": 50, "mepmin": -25, "mepmax": 999999999} }'
)
)
== False
)
assert is_valid(pl('{"single_pulse":"COIL_0"}'))
assert is_valid(pl('{"coil_0_amplitude": 20}'))
assert is_valid(pl('{"coil_0_amplitude": -1}')) == False
assert is_valid(pl('{"coil_0_target_index": 20}'))
assert is_valid(pl('{"coil_0_target_index": -1}')) == False
assert is_valid(pl('{"current_instrument": "POINTER"}'))
assert is_valid(pl('{"garbage": "garbage"}')) == False
def test_last_message_expects():
lm = LastMessage()
assert lm.expects(None) == 0
assert lm.expects(None) == 0
def test_last_message_raises():
pl = Payload(fmt="mrk", msg='{"single_pulse":"COIL_0"}')
lm = LastMessage()
with raises(ValueError):
lm.update(pl)
def test_last_message_works():
pl = Payload(fmt="loc", msg='{"single_pulse":"COIL_0"}')
lm = LastMessage()
lm.update(pl)
assert lm.expects(None) == 1
assert lm.expects(None) == 2
response = {lm.expect: 1}
assert lm.expects(response) == 0
assert lm.expect == None
| 2.171875 | 2 |
app/main/views/withdraw_brief.py | uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend | 1 | 12758419 | from flask import abort, flash, redirect, url_for
from flask_login import current_user
from dmutils.flask import timed_render_template as render_template
from app import data_api_client
from .. import main
from ..helpers.buyers_helpers import (
get_framework_and_lot,
is_brief_correct,
)
BRIEF_WITHDRAWN_MESSAGE = "You’ve withdrawn your requirements for ‘{brief[title]}’"
@main.route('/frameworks/<framework_slug>/requirements/<lot_slug>/<brief_id>/withdraw', methods=['GET'])
def withdraw_a_brief_warning(framework_slug, lot_slug, brief_id):
framework, lot = get_framework_and_lot(
framework_slug,
lot_slug,
data_api_client,
allowed_statuses=['live', 'expired'],
must_allow_brief=True
)
brief = data_api_client.get_brief(brief_id)["briefs"]
if not is_brief_correct(brief, framework_slug, lot_slug, current_user.id, allowed_statuses=['live']):
abort(404)
return render_template(
"buyers/withdraw_brief.html",
framework=framework,
brief=brief,
), 200
@main.route('/frameworks/<framework_slug>/requirements/<lot_slug>/<brief_id>/withdraw', methods=['POST'])
def withdraw_a_brief(framework_slug, lot_slug, brief_id):
get_framework_and_lot(
framework_slug,
lot_slug,
data_api_client,
allowed_statuses=['live', 'expired'],
must_allow_brief=True
)
brief = data_api_client.get_brief(brief_id)["briefs"]
if not is_brief_correct(brief, framework_slug, lot_slug, current_user.id, allowed_statuses=['live']):
abort(404)
data_api_client.withdraw_brief(brief_id, current_user.email_address)
flash(BRIEF_WITHDRAWN_MESSAGE.format(brief=brief), "success")
return redirect(url_for(".buyer_dos_requirements"))
| 2.15625 | 2 |
parse_kkl.py | wmilbot/wikiscraper | 3 | 12758420 | <reponame>wmilbot/wikiscraper
from datapackage_pipelines.wrapper import ingest, spew
from datapackage_pipelines.utilities.resources import PROP_STREAMING
from pyquery import PyQuery as pq
from pipeline_params import get_pipeline_param_rows
import logging, datetime, os
from google.cloud import storage
parameters, datapackage, __ = ingest()
aggregations = {"stats": {}}
parameters = next(get_pipeline_param_rows(parameters["pipeline-id"], parameters["pipeline-parameters"]))
consts = next(get_pipeline_param_rows('constants', 'kkl-parameters.csv'))
def get_resource():
logging.info("parsing pages {} to {}".format(parameters["first_page_num"], parameters["last_page_num"]))
if consts.get("gcs_bucket"):
# initialize google
logging.info("syncing to google storage bucket {}".format(consts["gcs_bucket"]))
gcs = storage.Client.from_service_account_json(consts["gcs_secret_file"])
gcs_bucket = gcs.get_bucket(consts["gcs_bucket"])
else:
# initialize filesystem
gcs, gcs_bucket = None, None
for i in range(int(parameters["first_page_num"]), int(parameters["last_page_num"])+1):
if i == 1:
in_filepath = os.path.join(parameters["in_path"], "index.html")
else:
in_filepath = os.path.join(parameters["in_path"], "page{}.html".format(i))
if gcs_bucket:
blob = gcs_bucket.blob(in_filepath)
in_file_content = blob.download_as_string()
else:
with open(in_filepath) as f:
in_file_content = f.read()
page = pq(in_file_content)
for tr in page("#AddLineTazlumCtrl1_GridView1").find("tr"):
tds = pq(tr).find("td")
texts = []
for i, td in enumerate(tds):
txt = pq(td).text()
if txt and i > 0:
texts.append(txt)
description, source, imgdate = None, None, None
if len(texts) > 0:
description = texts[0]
if len(texts) > 1:
source = texts[1]
try:
if len(texts) > 2:
datestr = texts[2]
imgdate = datetime.datetime.strptime(datestr, "%d/%m/%Y")
except:
pass
imgs = pq(tr).find("img")
image_path = None
if len(imgs) > 0:
image_path = pq(imgs[0]).attr("src")
if imgdate and description and source and image_path:
yield {"description": description, "source": source, "image_path": image_path, "date": imgdate}
datapackage["resources"].append({PROP_STREAMING: True,
"name": "kkl",
"path": "kkl.csv",
"schema": {"fields": [{"name": "description", "type": "string"},
{"name": "source", "type": "string"},
{"name": "date", "type": "date"},
{"name": "image_path", "type": "string"}]}})
spew(datapackage, [get_resource()], aggregations["stats"]) | 2.40625 | 2 |
iletisim/models.py | VektorelPythonHIA23/WebProje | 0 | 12758421 | <reponame>VektorelPythonHIA23/WebProje
from django.db import models
from django.utils import timezone
class IletisimModel(models.Model):
adi = models.CharField(max_length=50)
soyadi = models.CharField(max_length=70)
mesaj = models.TextField()
zaman = models.DateTimeField(default=timezone.now)
def __str__(self):
liste = [self.adi,self.soyadi,str(self.zaman)]
return '-'.join(liste)
| 2.046875 | 2 |
speechsep/_nbdev.py | holyfiddlex/Speech-Separation | 1 | 12758422 | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"load_audio": "00_core.ipynb",
"AudioMono": "00_core.ipynb",
"duration": "00_core.ipynb",
"SpecImage": "00_core.ipynb",
"ArrayAudioBase": "00_core.ipynb",
"ArraySpecBase": "00_core.ipynb",
"ArrayMaskBase": "00_core.ipynb",
"TensorAudio": "00_core.ipynb",
"TensorSpec": "00_core.ipynb",
"TensorMask": "00_core.ipynb",
"Spectify": "00_core.ipynb",
"Decibelify": "00_core.ipynb",
"Mel_Binify_lib": "00_core.ipynb",
"MFCCify": "00_core.ipynb",
"create": "00_core.ipynb",
"encodes": "00_core.ipynb",
"audio2tensor": "00_core.ipynb",
"spec2tensor": "00_core.ipynb",
"Resample": "00_core.ipynb",
"Clip": "00_core.ipynb",
"Normalize": "00_core.ipynb",
"PhaseManager": "00_core.ipynb",
"ResampleSignal": "00b_core.base.ipynb",
"AudioBase": "00b_core.base.ipynb",
"SpecBase": "00b_core.base.ipynb",
"show_batch": "00b_core.base.ipynb",
"time_bins": "01_utils.ipynb",
"stft": "01_utils.ipynb",
"istft": "01_utils.ipynb",
"fill": "01_utils.ipynb",
"randomComplex": "01_utils.ipynb",
"complex2real": "01_utils.ipynb",
"real2complex": "01_utils.ipynb",
"complex_mult": "01_utils.ipynb",
"get_shape": "01_utils.ipynb",
"join_audios": "01_utils.ipynb",
"Mixer": "01_utils.ipynb",
"Unet_Trimmer": "01_utils.ipynb",
"setup_graph": "02_plot.ipynb",
"ColorMeshPlotter": "02_plot.ipynb",
"cmap_dict": "02_plot.ipynb",
"cmap": "02_plot.ipynb",
"pre_plot": "02_plot.ipynb",
"post_plot": "02_plot.ipynb",
"show_audio": "02_plot.ipynb",
"show_spec": "02_plot.ipynb",
"show_mask": "02_plot.ipynb",
"hear_audio": "02_plot.ipynb",
"get_audio_files": "03_data.ipynb",
"AudioBlock": "03_data.ipynb",
"audio_extensions": "03_data.ipynb",
"#fn": "04_Trainer.ipynb",
"fn": "04_Trainer.ipynb",
"pipe": "04_Trainer.ipynb",
"Tensorify": "04_Trainer.ipynb",
"AudioDataset": "04_Trainer.ipynb",
"loss_func": "04_Trainer.ipynb",
"bs": "04_Trainer.ipynb",
"shuffle": "04_Trainer.ipynb",
"workers": "04_Trainer.ipynb",
"seed": "04_Trainer.ipynb",
"dataset": "04_Trainer.ipynb",
"n": "04_Trainer.ipynb",
"train_dl": "04_Trainer.ipynb",
"valid_dl": "04_Trainer.ipynb",
"test_dl": "04_Trainer.ipynb",
"dataiter": "04_Trainer.ipynb",
"data": "04_Trainer.ipynb",
"model": "04_Trainer.ipynb",
"n_epochs": "04_Trainer.ipynb",
"n_samples": "04_Trainer.ipynb",
"n_iter": "04_Trainer.ipynb",
"optimizer": "04_Trainer.ipynb",
"state": "04_Trainer.ipynb",
"safe_div": "05_Masks.ipynb",
"MaskBase": "05_Masks.ipynb",
"MaskBinary": "05_Masks.ipynb",
"MaskcIRM": "05_Masks.ipynb",
"Maskify": "05_Masks.ipynb",
"SiamesePiar": "06_Pipe.ipynb",
"Group": "06_Pipe.ipynb",
"NanFinder": "06_Pipe.ipynb",
"AudioPipe": "06_Pipe.ipynb",
"init_weights": "07_Model.ipynb",
"conv_block": "07_Model.ipynb",
"up_conv": "07_Model.ipynb",
"Recurrent_block": "07_Model.ipynb",
"RRCNN_block": "07_Model.ipynb",
"single_conv": "07_Model.ipynb",
"Attention_block": "07_Model.ipynb",
"U_Net": "07_Model.ipynb"}
modules = ["core.py",
"base.py",
"utils.py",
"plot.py",
"data.py",
"training.py",
"masks.py",
"pipe.py",
"models.py"]
doc_url = "https://holyfiddlex.github.io/speechsep/"
git_url = "https://github.com/holyfiddlex/speechsep/tree/master/"
def custom_doc_links(name): return None
| 1.140625 | 1 |
examples/python/cpu/numpy/example_06.py | kant/ocean-tensor-package | 27 | 12758423 | ## Conversion from Ocean to Numpy
import pyOcean_cpu as ocean
import ocean_numpy
import numpy as np
A = ocean.asTensor([[1,2,3],[4,5,6]], ocean.float)
B = A.convertTo('numpy', True)
A.fill(3)
print(B)
print(B.dtype)
A = ocean.asTensor([[1,2,3],[4,5,6]], ocean.float)
A.byteswap()
B = A.convertTo('numpy', True)
print(B)
| 2.90625 | 3 |
Triptimizer-QPX-Flight.py | georgetown-analytics/triptomizer | 1 | 12758424 | #!/usr/bin/env python
"""
This portion of code ingests data on driving time and distance from google API
and saves the data as json files.
"""
import os
import json
import requests
import urllib2
import requests
#def main():
api_key = "<KEY>"
## pull the above API key from folder name in this URL https://drive.google.com/drive/folders/0B7t0jfbb9NwHbEgxRndDYjlPYnc
url = "https://www.googleapis.com/qpxExpress/v1/trips/search?key=" + api_key
headers = {'content-type': 'application/json'}
params = {
"request": {
"slice": [
{
"origin": "DCA",
"destination": "LAX",
"date": "2016-01-25"
}
],
"passengers": {
"adultCount": 1
},
"solutions": 200,
"refundable": False
}
}
response = requests.post(url, data=json.dumps(params), headers=headers)
data = response.json()
with open('DCA.json', 'w') as f:
json.dump(data, f, indent=2)
params = {
"request": {
"slice": [
{
"origin": "IAD",
"destination": "LAX",
"date": "2016-01-25"
}
],
"passengers": {
"adultCount": 1
},
"solutions": 200,
"refundable": False
}
}
response = requests.post(url, data=json.dumps(params), headers=headers)
data = response.json()
with open('IAD.json', 'w') as f:
json.dump(data, f, indent=2)
params = {
"request": {
"slice": [
{
"origin": "BWI",
"destination": "LAX",
"date": "2016-01-25"
}
],
"passengers": {
"adultCount": 1
},
"solutions": 200,
"refundable": False
}
}
response = requests.post(url, data=json.dumps(params), headers=headers)
data = response.json()
with open('BWI.json', 'w') as f:
json.dump(data, f, indent=2)
#get API data and save as a dictionary (dict is named data)
# response = urllib2.urlopen(url)
# data = json.load(response)
#create filename
# filename = '%s'.json %DCAFlights
# define path for file to be saved; requires 'data' subdirectory
# path = os.path.join(os.getcwd(), 'Flight', filename)
# Open a file for writing
# new_file = open(path,"w")
# Save the dictionary into this file
# json.dump(data,new_file)
# Close the file
# new_file.close()
##prints data below##
#print data
##########################################################################
## Execution
##########################################################################
#if __name__ == '__main__':
# main()
| 3.546875 | 4 |
pytest_trello/__init__.py | Lencof/pytest-trello | 2 | 12758425 | <filename>pytest_trello/__init__.py
__version__ = '0.0.7'
__author__ = "<NAME> and Lencof"
__author_email__ = "<<EMAIL>>"
| 0.875 | 1 |
setup.py | moevis/py2ifttt | 3 | 12758426 | from setuptools import setup
setup(name='py2ifttt',
version='1.0.0',
description='an interface for triggering ifttt webhooks',
long_description='an interface for triggering ifttt webhooks, http://github.com/moevis/py2ifttt',
url='http://github.com/moevis/py2ifttt',
author='moevis',
author_email='<EMAIL>',
license='MIT',
packages=['py2ifttt'],
install_requires=[
'requests',
],
zip_safe=False) | 1.203125 | 1 |
utility/__init__.py | hongyuanChrisLi/StartupInsights | 0 | 12758427 | <filename>utility/__init__.py
# Error Codes
ENV_VAR_NOT_FOUND = 201 | 1.203125 | 1 |
exercises/en/exc_cars_data.py | UBC-MDS/exploratory-data-viz | 0 | 12758428 | import altair as alt
from vega_datasets import data
cars = data.cars()
print(cars.columns)
cars.head() | 1.875 | 2 |
ch02/todos/model.py | PacktPublishing/Building-Web-APIs-with-FastAPI-and-Python | 6 | 12758429 | <reponame>PacktPublishing/Building-Web-APIs-with-FastAPI-and-Python
from pydantic import BaseModel
class Todo(BaseModel):
id: int
item: str
class Config:
schema_extra = {
"example": {
"id": 1,
"item": "Example Schema!"
}
}
class TodoItem(BaseModel):
item: str
class Config:
schema_extra = {
"example": {
"item": "Read the next chapter of the book"
}
}
| 2.609375 | 3 |
python/shared_global/main.py | mbr0wn/snippets | 0 | 12758430 | <reponame>mbr0wn/snippets<gh_stars>0
import shared_mod
import mod1
print "mod1: bar == ", shared_mod.f.bar
shared_mod.f.bar = 7
print "mod1: bar == ", shared_mod.f.bar
| 1.390625 | 1 |
emmet/stylesheet/snippets.py | jingyuexing/py-emmet | 29 | 12758431 | import re
import collections
from ..css_abbreviation import parse, tokens, CSSValue, FunctionCall
re_property = re.compile(r'^([a-z-]+)(?:\s*:\s*([^\n\r;]+?);*)?$')
opt = {'value': True}
class CSSSnippetType:
Raw = 'Raw'
Property = 'Property'
class CSSSnippetRaw:
__slots__ = ('type', 'key', 'value')
def __init__(self, key: str, value: str):
self.type = CSSSnippetType.Raw
self.key = key
self.value = value
class CSSSnippetProperty:
__slots__ = ('type', 'key', 'value', 'property', 'keywords', 'dependencies')
def __init__(self, key: str, prop: str, value: list, keywords: dict):
self.type = CSSSnippetType.Property
self.key = key
self.property = prop
self.value = value
self.keywords = keywords
self.dependencies = []
def create_snippet(key: str, value: str):
"Creates structure for holding resolved CSS snippet"
# A snippet could be a raw text snippet (e.g. arbitrary text string) or a
# CSS property with possible values separated by `|`.
# In latter case, we have to parse snippet as CSS abbreviation
m = re_property.match(value)
if m:
keywords = collections.OrderedDict()
parsed = [parse_value(v) for v in m.group(2).split('|')] if m.group(2) else []
for item in parsed:
for css_val in item:
collect_keywords(css_val, keywords)
return CSSSnippetProperty(key, m.group(1), parsed, keywords)
return CSSSnippetRaw(key, value)
def nest(snippets: list):
"""
Nests more specific CSS properties into shorthand ones, e.g.
`background-position-x` -> `background-position` -> `background`
"""
snippets = snippets[:]
snippets.sort(key=lambda x: x.key)
stack = []
# For sorted list of CSS properties, create dependency graph where each
# shorthand property contains its more specific one, e.g.
# background -> background-position -> background-position-x
for cur in filter(is_property, snippets):
# Check if current property belongs to one from parent stack.
# Since `snippets` array is sorted, items are perfectly aligned
# from shorthands to more specific variants
while stack:
prev = stack[-1]
if cur.property.startswith(prev.property) and \
len(cur.property) > len(prev.property) and \
cur.property[len(prev.property)] == '-':
prev.dependencies.append(cur)
stack.append(cur)
break
stack.pop()
if not stack:
stack.append(cur)
return snippets
def parse_value(value: str):
global opt
return parse(value.strip(), opt)[0].value
def is_property(snippet):
return isinstance(snippet, CSSSnippetProperty)
def collect_keywords(css_val: CSSValue, dest: dict):
for v in css_val.value:
if isinstance(v, tokens.Literal):
dest[v.value] = v
elif isinstance(v, FunctionCall):
dest[v.name] = v
elif isinstance(v, tokens.Field):
# Create literal from field, if available
value = v.name.strip()
if value:
dest[value] = tokens.Literal(value)
| 3.015625 | 3 |
app/modules/system/view.py | iamjohnnym/jane | 2 | 12758432 | <reponame>iamjohnnym/jane<gh_stars>1-10
from flask import render_template, flash, redirect, session, url_for, Blueprint
from app.modules.domain.forms.form import AddDomain
from app.modules.user.forms.form import UserForm
from app.modules.domain.main import Domain
mod = Blueprint('domains', __name__)
@mod.route('/domains-list', methods = ['GET', 'POST'])
def list():
return render_template("domain/template/list.html",
title = 'domains list',
test = 'Hello World',
)
@mod.route('/domains-add', methods = ['GET', 'POST'])
def add():
form = AddDomain()
user_form = UserForm()
if form.validate_on_submit():
try:
domain = Domain(domain=form.domain_name.data,
service='httpd',
document_root="/var/www/vhosts")
domain.writeVirtualHost(form.vhost.data)
domain.writePhpini(form.phpini.data)
except Exception, e:
return render_template("domain/template/add.html",
title = 'domains add',
test = 'Hello World',
form = form,
user_form = user_form,
error="Unable to process your request: {0}".format(e)
)
return render_template("domain/template/list.html",
title = 'domains list',
test = 'Hello World',
form = form,
user_form = user_form,
success = "{0} has been added".format(form.domain_name.data)
)
return render_template("domain/template/add.html",
title = 'domains add',
test = 'Hello World',
form = form,
user_form = user_form,
)
| 2.625 | 3 |
research-analytics/plots.py | MalloryWittwer/research-analytics | 0 | 12758433 | <reponame>MalloryWittwer/research-analytics<filename>research-analytics/plots.py
#--------------------------------------------------------------------------#
# This code generates plots to be displayed in Dashboards #
#--------------------------------------------------------------------------#
# imports ------------------------------------------------------------------
from numpy.lib import utils
import utils
from datetime import date
import pandas as pd
from collections import Counter
import plotly.graph_objs as go
import plotly.express as px
# function definitions -----------------------------------------------------
# function names are self-explanatory --------------------------------------
def make_access_pie(df):
oa_publications = df.groupby('isOpenAccess').count()
df_ = oa_publications
fig = px.pie(df_, values='title', names= df_.index, color=df_.index,
color_discrete_map={'no data':'#eda109',
'true':'#a8fffe',
'false':'#fa3960'})
fig.update_layout(
title = "<span style='font-size: 22px;'><b>Open access publications<b></span>", title_x=0.5,
font=dict(
family="Courier New, monospace",
size=14,
color="white"
),
paper_bgcolor = "#101126",
plot_bgcolor = "#101126")
return fig
def make_fields_pie(df):
test_list = df.fieldsOfStudy.tolist()
res = [i for i in test_list if i]
flat_list_fields = utils.flatten_list(res)
most_common_fields = Counter(flat_list_fields).most_common()
most_common_fields_df = pd.DataFrame(most_common_fields, columns=["field", "occurence"])
fig = px.pie(most_common_fields_df, values='occurence', names= 'field')
fig.update_layout(
title = "<span style='font-size: 22px;'><b>Fields of Study<b></span>", title_x=0.5,
font=dict(
family="Courier New, monospace",
size=14,
color="white"
),
paper_bgcolor = "#101126",
plot_bgcolor = "#101126")
return fig
def make_yearly_popularity(df):
popularity = df.groupby('year').count()['citationCount'] + df.groupby('year').sum()['citationCount']
fig = px.line(df, x=df.groupby('year').count()['citationCount'].index,
y=popularity, title='Populatiry Index')
fig.update_layout(title = "<span style='font-size: 22px;'><b>Evolution of popularity<b></span>", title_x=0.5,
font=dict(
family="Courier New, monospace",
size=12,
color="white"
),
paper_bgcolor = "#101126",
plot_bgcolor = "#101126")
fig.update_traces(marker_color='#eda109')
fig.update_xaxes(title="Year", range= [df.year.min() - 5, date.today().year + 5])
fig.update_yaxes(title="Popularity Indey", range= [0, 1.1* popularity.max()])
return fig
pass
def make_pub_per_year_line(df):
fig = px.line(df, x=df.groupby('year').count()['citationCount'].index,
y=df.groupby('year').count()['citationCount'], title='Publications per year')
fig.update_layout(title = "<span style='font-size: 22px;'><b>Publications per Year<b></span>", title_x=0.5,
font=dict(
family="Courier New, monospace",
size=12,
color="white"
),
paper_bgcolor = "#101126",
plot_bgcolor = "#101126")
fig.update_traces(marker_color='#eda109')
fig.update_xaxes(title="Year", range= [df.year.min() - 5, date.today().year + 5])
fig.update_yaxes(title="Number of Publications", range= [0, 1.1* df.groupby('year').count()['citationCount'].max()])
return fig
def make_pub_per_year(df, which_api):
if which_api == 'semantic_scholar':
fig = go.Figure(data=[go.Bar(x=df.groupby('year').count()['citationCount'].index,
y= df.groupby('year').count()['citationCount'],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
else:
fig = go.Figure(data=[go.Bar(x=df.groupby('published_year').count()['citation_count'].index,
y= df.groupby('published_year').count()['citation_count'],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
fig.update_layout(title = "<span style='font-size: 22px;'><b>Publications per Year<b></span>", title_x=0.5,
font=dict(
family="Courier New, monospace",
size=12,
color="white"
),
paper_bgcolor = "#101126",
plot_bgcolor = "#101126")
fig.update_traces(marker_color='#eda109')
if which_api == 'semantic_scholar':
fig.update_xaxes(title="Year", range= [df.year.min() - 5, date.today().year + 5])
fig.update_yaxes(title="Number of Publications", range= [0, 1.1* df.groupby('year').count()['citationCount'].max()])
else:
fig.update_xaxes(title="Year", range= [df.published_year.min() - 5, date.today().year + 5])
fig.update_yaxes(title="Number of Publications", range= [0, 1.1* df.groupby('published_year').count()['citation_count'].max()])
return fig
def make_citations_per_year_line(df):
fig = px.line(df, x=df.groupby('year').sum()['citationCount'].index,
y=df.groupby('year').sum()['citationCount'], title='Citations per year')
fig.update_layout(title = "<span style='font-size: 22px;'><b>Citations per Year<b></span>", title_x=0.5,
font=dict(
family="Courier New, monospace",
size=12,
color="white"
),
paper_bgcolor = "#101126",
plot_bgcolor = "#101126")
fig.update_traces(marker_color='#eda109')
fig.update_xaxes(title="Year", range= [df.year.min() - 5, date.today().year + 5])
fig.update_yaxes(title="Number of Citations", range= [0, 1.1* df.groupby('year').sum()['citationCount'].max()])
return fig
def make_citations_per_year(df, which_api):
if which_api == 'semantic_scholar':
fig = go.Figure(data=[go.Bar(x=df.groupby('year').sum()['citationCount'].index,
y= df.groupby('year').sum()['citationCount'],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
else:
fig = go.Figure(data=[go.Bar(x=df.groupby('published_year').sum()['citation_count'].index,
y= df.groupby('published_year').sum()['citation_count'],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
fig.update_layout(title = "<span style='font-size: 22px;'><b>Citations per Year<b></span>", title_x=0.5,
font=dict(
family="Courier New, monospace",
size=12,
color="white"
),
paper_bgcolor = "#101126",
plot_bgcolor = "#101126")
fig.update_traces(marker_color='#eda109')
if which_api == 'semantic_scholar':
fig.update_xaxes(title="Year", range= [df.year.min() - 5, date.today().year + 5])
fig.update_yaxes(title="Number of Publications", range= [0, 1.1* df.groupby('year').sum()['citationCount'].max()])
else:
fig.update_xaxes(title="Year", range= [df.published_year.min() - 5, date.today().year + 5])
fig.update_yaxes(title="Number of Publications", range= [0, 1.1* df.groupby('published_year').sum()['citation_count'].max()])
return fig
def make_active_authors(df):
authors_list = []
for index, row in df.iterrows():
for dict_ in row.authors:
authors_list.append(dict_['name'])
most_active_authors = Counter(authors_list).most_common()
most_active_authors_df = pd.DataFrame(most_active_authors, columns=["author", "occurence"])
fig = go.Figure(data=[go.Bar(x=most_active_authors_df[0:10].author,
y= most_active_authors_df[0:10].occurence,
texttemplate="%{y}",
textposition="outside",
textangle=0)])
fig.update_layout(title = "<span style='font-size: 22px;'><b>Most active authors<b></span>", title_x=0.5,
font=dict(
family="Courier New, monospace",
size=12,
color="white"
),
paper_bgcolor = "#101126",
plot_bgcolor = "#101126")
fig.update_traces(marker_color='#eda109')
fig.update_xaxes(title="Authors")
fig.update_yaxes(title="Number of Publications", range= [0, 1.1* most_active_authors_df.occurence.max()])
return fig
def make_top_cited_journals(df):
top_journals_citations = df.groupby('journal_name').sum().sort_values('citation_count', ascending=False)
top_journals_citations_plot = top_journals_citations[top_journals_citations['citation_count'] > 100]
fig = go.Figure(data=[go.Bar(x=top_journals_citations_plot.index,
y= top_journals_citations_plot['citation_count'],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
fig.update_layout(title = f"Top cited journals", title_x=0.5)
fig.update_yaxes(title="Number of citations")
return fig
def make_top_publishing_journals(df):
top_journals_pubs = df.groupby('journal_name').count().sort_values('citation_count', ascending=False)
top_journals_pubs_plot = top_journals_pubs[top_journals_pubs['title'] >= 2]
fig = go.Figure(data=[go.Bar(x=top_journals_pubs_plot.index,
y= top_journals_pubs_plot['citation_count'],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
fig.update_layout(title = f"Top publishing journals", title_x=0.5)
fig.update_yaxes(title="Number of publications")
return fig
def make_top_publishers_pub(df):
top_publisher_pubs = df.groupby('publisher').count().sort_values('citation_count', ascending=False)
top_publisher_pubs_plot = top_publisher_pubs[top_publisher_pubs['title']>3][1:]
fig = go.Figure(data=[go.Bar(x=top_publisher_pubs_plot.index,
y= top_publisher_pubs_plot['title'],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
fig.update_layout(title = f"Top publishers", title_x=0.5)
fig.update_yaxes(title="Number of publications")
return fig
def make_top_publishers_cites(df):
top_publisher_citations = df.groupby('publisher').sum().sort_values('citation_count', ascending=False)
top_publisher_citations.drop(labels=['no data'], axis=0, inplace=True)
top_publisher_citations_plot = top_publisher_citations[top_publisher_citations['citation_count'] > 50]
fig = go.Figure(data=[go.Bar(x=top_publisher_citations_plot.index,
y= top_publisher_citations_plot['citation_count'],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
fig.update_layout(title = f"Top publishers", title_x=0.5)
fig.update_yaxes(title="Number of citations")
return fig
def make_top_key_words(df, query):
"""query should be the list of keywords from user input"""
list_keywords = []
for index, row in df.iterrows():
list_keywords.append(row.key_words)
flatten_list = utils.flatten_list(list_keywords)
query = query.split()
key_word_list = tuple(flatten_list)
cleaned_list = [ x for x in key_word_list if query[0] not in x ]
for i in range(1,len(query)):
cleaned_list = [ x for x in cleaned_list if query[i] not in x ]
#cleaned_tuple = tuple(key_word_list)
cleaned_tuple = tuple(cleaned_list)
key_words_sorted = Counter(cleaned_tuple).most_common()
top_key_words = pd.DataFrame(key_words_sorted, columns=["key_word", "occurence"])
top_key_words = top_key_words.sort_values(by="occurence", ascending=False)
top_key_words_plot = top_key_words[0:15]
fig = go.Figure(data=[go.Bar(x=top_key_words_plot['key_word'],
y= top_key_words_plot['occurence'],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
fig.update_layout(
title = "<span style='font-size: 22px;'><b>Top key words<b></span>", title_x=0.5,
font=dict(
family="Courier New, monospace",
size=12,
color="white"
),
paper_bgcolor = "#101126",
plot_bgcolor = "#101126")
fig.update_traces(marker_color='#eda109')
fig.update_yaxes(title="Number of occurences", range= [0, 1.1* top_key_words_plot['occurence'].max()])
return fig
def make_first_pub_box(df):
fig = go.Figure()
fig.add_trace(go.Scatter(
x=[0, 0, 1, 1], y=[0, 1.4, 1.4, 0], fill="toself", fillcolor='white', mode='lines',
line=dict(color="white")
))
fig.add_trace(go.Scatter(
x=[0.5],
y=[1],
mode="text",
text=["Research topic active since"],
textfont_size=18,
textposition="top center"
))
fig.add_trace(go.Scatter(
x=[0.5],
y=[0.07],
mode="text",
text=[int(df.published_year.min())],
textfont_size=60,
textposition="top center"
))
fig.update_xaxes(visible=False)
fig.update_yaxes(visible=False)
fig.update_layout(
margin=go.layout.Margin(
l=0, #left margin
r=0, #right margin
b=0, #bottom margin
t=0, #top margin
),
width = 300,
height = 150,
showlegend=False,
plot_bgcolor='#d8b3ff',
paper_bgcolor='#d8b3ff')
#fig.show()
return fig
def make_latest_pub_box(df):
fig = go.Figure()
fig.add_trace(go.Scatter(
x=[0, 0, 1, 1], y=[0, 1.4, 1.4, 0], fill="toself", fillcolor='white', mode='lines',
line=dict(color="white")
))
fig.add_trace(go.Scatter(
x=[0.5],
y=[1],
mode="text",
text=["Latest pub. published in"],
textfont_size=18,
textposition="top center"
))
fig.add_trace(go.Scatter(
x=[0.5],
y=[0.07],
mode="text",
text=[int(df.published_year.max())],
textfont_size=60,
textposition="top center"
))
fig.update_xaxes(visible=False)
fig.update_yaxes(visible=False)
fig.update_layout(
margin=go.layout.Margin(
l=0, #left margin
r=0, #right margin
b=0, #bottom margin
t=0, #top margin
),
width = 300,
height = 150,
showlegend=False,
plot_bgcolor='#d8b3ff',
paper_bgcolor='#d8b3ff')
#fig.show()
return fig
def get_top_publisher(df):
top_publisher_pubs = df.groupby('publisher').count().sort_values('citation_count', ascending=False)
top_publisher = top_publisher_pubs.index[0]
if top_publisher == 'no data':
top_publisher = top_publisher_pubs.index[1]
return top_publisher
def make_top_authors(df):
flat_author_list = utils.flatten_list(df.authors.tolist())
top_authors_df = pd.DataFrame(Counter(flat_author_list).most_common(50), columns=['author', 'occurence'])
fig = go.Figure(data=[go.Bar(x=top_authors_df['author'][0:5],
y= top_authors_df['occurence'][0:5],
texttemplate="%{y}",
textposition="outside",
textangle=0)])
fig.update_layout(title = f"Top key words", title_x=0.5)
fig.update_yaxes(title="Number of occurences")
return fig
def generate_collab_network_df(df):
authors_list_of_list = []
ids_list_of_list = []
for index, row in df.iterrows():
authors_list = []
ids_list = []
for dict_ in row.authors:
authors_list.append(dict_['name'])
ids_list.append(dict_['authorId'])
authors_list_of_list.append(authors_list)
ids_list_of_list.append(ids_list)
authors_combinations = []
ids_combinations = []
for authors in authors_list_of_list:
res = [(a, b) for idx, a in enumerate(authors) for b in authors[idx + 1:]]
authors_combinations.append(res)
for ids in ids_list_of_list:
rex = [(a, b) for idx, a in enumerate(ids) for b in ids[idx + 1:]]
ids_combinations.append(rex)
flat_authors_combinations = utils.flatten_list(authors_combinations)
flat_ids_combinations = utils.flatten_list(ids_combinations)
most_common_collab = Counter(flat_authors_combinations).most_common(50)
most_common_collab_ids = Counter(flat_ids_combinations).most_common(50)
unpacked_most_collab = [(a, b, c) for (a, b ), c in most_common_collab]
unpacked_most_collab_ids = [(a, b, c) for (a, b ), c in most_common_collab_ids]
nx_df = pd.DataFrame(unpacked_most_collab, columns=['author1', 'author2', 'weight'])
nx_id_df = pd.DataFrame(unpacked_most_collab_ids, columns=['id1', 'id2', 'weight1'])
collabs_df = pd.concat([nx_df, nx_id_df], axis=1)
collabs_df['author1'] = list(zip(collabs_df.author1, collabs_df.id1))
collabs_df['author2'] = list(zip(collabs_df.author2, collabs_df.id2))
collabs_df.drop(['id1', 'id2', 'weight1'], axis = 1, inplace = True)
return collabs_df
def generate_graph_elements_collab(df):
nx_df = generate_collab_network_df(df)
unique_top_authors = list(set(nx_df.author1.unique().tolist() + nx_df.author2.unique().tolist()))
nodes_list = [{'data': {'id': unique_top_authors[0][1], 'label': unique_top_authors[0][0]}, 'classes': 'author'}]
for element in unique_top_authors[1:]:
nodes_list.append({'data': {'id': element[1], 'label': element[0]}, 'classes': 'author'})
edges_list = [{'data': {'source': nx_df['author1'][0][1], 'target': nx_df['author2'][0][1]}, 'classes': 'collaboration'}]
for index, row in nx_df.iterrows():
edges_list.append({'data': {'source': row.author1[1], 'target': row.author2[1]}, 'classes': 'collaboration'})
elements = nodes_list + edges_list
#print(elements)
return elements
def generate_ref_network_df(df1, df2):
"""df1 = all_references_df
df2 = results_df"""
ref1 = []
ref2 = []
for index, row in df1.iterrows():
ref1.append((row.reference,row['paperId']))
ref2.append(("".join(df2.reference[df2.paperId == row['citedBy']]), row['citedBy']))
ref_network_df = pd.DataFrame(
{'ref1': ref1,
'ref2': ref2
})
return ref_network_df
def generate_graph_elements_network(df1, df2):
ref_network_df = generate_ref_network_df(df1, df2)
unique_refs = list(set(ref_network_df.ref1.unique().tolist()))
unique_results = list(set(ref_network_df.ref2.unique().tolist()))
nodes_refs = [{'data': {'id': unique_refs[0][1], 'label': unique_refs[0][0], 'classes': 'ref'}}]
nodes_results = [{'data': {'id': unique_results[0][1], 'label': unique_results[0][0], 'classes': 'res'}}]
nodes_list = nodes_refs + nodes_results
for element in unique_refs[1:]:
nodes_list.append({'data': {'id': element[1], 'label': element[0], 'classes': 'ref'}})
for element in unique_results[1:]:
nodes_list.append({'data': {'id': element[1], 'label': element[0], 'classes': 'res'}})
edges_list = [{'data': {'source': ref_network_df['ref1'][0][1], 'target': ref_network_df['ref2'][0][1]}, 'classes': 'citation'}]
for index, row in ref_network_df.iterrows():
edges_list.append({'data': {'source': row.ref1[1], 'target': row.ref2[1]}, 'classes': 'citation'})
elements = nodes_list + edges_list
#print(elements)
return elements | 2.375 | 2 |
httypist/schema.py | m42e/httypist | 0 | 12758434 | <gh_stars>0
import typing as t
import datetime
from pydantic import BaseModel
from enum import Enum
class Response(BaseModel):
status: str
success: bool
result: t.Union["ErrorResult", "RequestResult", "StatusResult", "RespsoneResult", "MultipleRequestsResult", str]
class Config:
schema_extra = {
"example": {"status": "ok", "success": True, "result": "object"}
}
class ErrorResult(BaseModel):
description: str
code: int
class RequestResult(BaseModel):
template: str
request_id: str
request_timestamp: int
class MultipleRequestsResult(BaseModel):
requests: t.List[RequestResult]
class StatusResult(BaseModel):
finished: bool
class RespsoneResult(BaseModel):
original_request: RequestResult
log: t.List[str]
result_files: t.List[str]
result_zip: str
folder_zip: str
class TemplatesResult(BaseModel):
templates: t.List[str]
| 2.375 | 2 |
mailchimp-backup.py | max-arnold/mailchimp-backup | 4 | 12758435 | <filename>mailchimp-backup.py<gh_stars>1-10
#!/usr/bin/env python
"""MailChimp list backup script."""
import argparse
import csv
from datetime import datetime
import io
import os
import sys
import requests
from mailchimp3 import MailChimp
def _client(key):
"""Return MailChimp API client object."""
headers = requests.utils.default_headers()
headers['User-Agent'] = (
'Mailchimp Backup script '
'(https://github.com/max-arnold/mailchimp-backup)'
)
return MailChimp(mc_api=key)
def _filename(out, lst):
"""Generate a filename."""
_filename.now = _filename.now or datetime.now()
attrs = {
'year': '{:04d}'.format(_filename.now.year),
'month': '{:02d}'.format(_filename.now.month),
'day': '{:02d}'.format(_filename.now.day),
'hour': '{:02d}'.format(_filename.now.hour),
'minute': '{:02d}'.format(_filename.now.minute),
'second': '{:02d}'.format(_filename.now.second),
'list': lst,
}
return os.path.abspath(out.format(**attrs))
_filename.now = None
def get_lists(key):
"""Return lists info."""
return _client(key).lists.all(get_all=True)
def show_lists(key):
"""Display lists info."""
lists = get_lists(key)
for lst in lists['lists']:
print(
'ID: {}, Name: "{}", Members: {}'.format(
lst['id'], lst['name'], lst['stats']['member_count']
)
)
FIELDS = [
'email_address',
'email_type',
'status',
'vip',
'merge_fields.*',
'ip_signup',
'timestamp_signup',
'language',
'location.latitude',
'location.longitude',
'location.country_code',
'location.timezone',
'tags',
]
def _export_member(member):
"""Unpack a list member into a data structure specified by FIELDS."""
mem = {}
for field in FIELDS:
if '.' not in field:
mem[field] = member[field]
else:
fields = field.split('.')
if fields[1] == '*':
nested_fields = member[fields[0]].keys()
else:
nested_fields = [fields[1]]
for nf in nested_fields:
mem['%s.%s' % (fields[0], nf)] = member[fields[0]][nf]
return mem
def to_csv(members):
"""Convert JSON data structure to CSV string."""
with io.StringIO() as fp:
cw = csv.writer(fp)
if len(members):
cw.writerow(members[0].keys())
for member in members:
cw.writerow(member.values())
value = fp.getvalue()
return value
def export_list(key, list_id):
"""Export list."""
lst = _client(key).lists.members.all(list_id, get_all=True)
export = []
for member in lst.get('members', []):
export.append(_export_member(member))
return to_csv(export)
def export_all_lists(key, options):
"""Export all existing lists."""
for lst in get_lists(key)['lists']:
yield (lst['id'], export_list(key, lst['id']))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MailChimp list backup script')
parser.add_argument('--key', type=str, help='API key')
parser.add_argument(
'--show-lists', action='store_true', help='Show available lists'
)
parser.add_argument('--list', type=str, help='List ID')
parser.add_argument('--all-lists', action='store_true', help='Export all list IDs')
parser.add_argument('--out', type=str, help='Output file')
parser.add_argument(
'--fail-if-empty',
action='store_true',
help='Fail if there are no lists or any of them is empty',
)
options = parser.parse_args()
key = options.key or os.environ.get('MAILCHIMP_KEY')
if key is None:
parser.exit(
status=1,
message=(
'Please specify either the MAILCHIMP_KEY '
'environment variable or the --key argument\n'
),
)
if options.show_lists:
show_lists(key)
parser.exit()
if options.list:
lst = export_list(key, options.list)
if options.fail_if_empty and len(lst.split('\n')) < 2:
parser.exit(
status=1, message='List {} is empty'.format(options.list)
)
if options.out:
filename = _filename(options.out, options.list)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as fp:
fp.write(lst.encode('utf-8'))
else:
sys.stdout.write(lst)
parser.exit()
if options.all_lists:
current_filename = None
lists = list(export_all_lists(key, options))
if options.fail_if_empty and len(lists) == 0:
parser.exit(status=1, message='No lists found')
for lst_id, lst in lists:
if options.fail_if_empty and len(lst.split('\n')) < 2:
parser.exit(status=1, message='List {} is empty'.format(lst_id))
if options.out:
filename = _filename(options.out, lst_id)
os.makedirs(os.path.dirname(filename), exist_ok=True)
mode = 'wb' if current_filename != filename else 'ab'
with open(filename, mode) as fp:
fp.write(lst.encode('utf-8'))
current_filename = filename
else:
sys.stdout.write(lst)
parser.exit()
| 2.6875 | 3 |
tf-shapes.py | victor-iyiola/tensorflow-examples | 0 | 12758436 | """TensorFlow has two major kinds of "shape"
- The Static Shape
- The Dynamic Shape
"""
# To ignore tensorflow version warning
import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf
print(tf.VERSION)
# Demo: TF Static Shape
my_tensor = tf.ones(shape=[8, 2])
print('my_tensor = {}'.format(my_tensor))
# Retrieve it's static shape (NOTE: Static ops are attached to TF Tensor
# & usually have underscores in their names.
static_shape = my_tensor.get_shape()
print('static_shape = {}'.format(static_shape))
print('static_shape.as_list() = {}'.format(static_shape.as_list()))
# Create a placeholder with undefined shape.
my_placeholder = tf.placeholder(dtype=tf.float32, shape=[None, 2])
print('my_placeholder = {}'.format(my_placeholder))
# Update the shape.
print('BEFORE: my_placeholder.get_shape() = {}'.format(
my_placeholder.get_shape()))
my_placeholder.set_shape([8, 2])
print('AFTER: my_placeholder.get_shape() = {}'.format(
my_placeholder.get_shape()))
# Line divider.
print('\n\n', 70 * '=', '\n\n')
# Demo: TF Dynamic Shape
my_tensor = tf.ones(shape=[8, 2])
print('my_tensor = {}'.format(my_tensor))
# Retrieve it's dynamic shape (NOTE: Dynamic ops are attached to d main scope
# & usually have no underscores in their names.
my_dynamic_shape = tf.shape(my_tensor)
print('my_dynamic_shape = {}'.format(my_dynamic_shape))
# Dynamic shape is a tensor itself describing the shape of the original
# tensor.
my_tensor_reshaped = tf.reshape(tensor=my_tensor, shape=[2, 4, 2])
print('my_tensor_reshaped = {}'.format(my_tensor_reshaped))
# To access the dynamic shape's value, you need to run it through a Session
dynamic_value = my_dynamic_shape.eval(session=tf.Session())
print(dynamic_value)
| 3.484375 | 3 |
doc-examples/openhiven_test.py | FrostbyteBot/hiven.py | 9 | 12758437 | <gh_stars>1-10
""" Test-file for testing purposes and development! """
import logging
import openhivenpy
from openhivenpy import Message
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("openhivenpy")
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='openhiven.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
client = openhivenpy.UserClient()
@client.event()
async def on_ready():
print(f"Ready after {client.startup_time}")
@client.event()
async def on_message_create(msg: Message):
print(f"Message was created - {msg.content}")
if __name__ == '__main__':
client.run("Insert token")
| 2.453125 | 2 |
readthedocs/rtd_tests/tests/test_docsitalia_search.py | danse/docs.italia.it | 0 | 12758438 | # -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals)
from django.test import TestCase
from mock import patch
from urllib3._collections import HTTPHeaderDict
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
from readthedocs.restapi.utils import index_search_request
from readthedocs.rtd_tests.mocks.search_mock_responses import (
search_project_response
)
from readthedocs.search.indexes import PageIndex
from readthedocs.docsitalia.models import Publisher, PublisherProject
class TestSearch(TestCase):
fixtures = ['eric', 'test_data']
def setUp(self):
self.pip = Project.objects.get(slug='pip')
self.version = Version.objects.create(
project=self.pip, identifier='test_id', verbose_name='verbose name')
def perform_request_project_mock(self, method, url, params=None, body=None, timeout=None, ignore=()):
"""
Elastic Search Urllib3HttpConnection mock for project search
"""
headers = HTTPHeaderDict({
'content-length': '893',
'content-type': 'application/json; charset=UTF-8'
})
raw_data = search_project_response
return 200, headers, raw_data
@patch(
'elasticsearch.connection.http_urllib3.Urllib3HttpConnection.perform_request',
side_effect=perform_request_project_mock
)
def test_index_search_request_indexes_the_project(self, perform_request_mock):
page_list = []
index_search_request(
version=self.version, page_list=page_list, commit=None,
project_scale=1, page_scale=None, section=False, delete=False)
response = perform_request_mock.call_args_list[0][0][3]
self.assertJSONEqual(response, {
'slug': 'pip',
'lang': 'en',
'tags': None,
'name': u'Pip',
'id': 6,
'weight': 1,
'publisher': None,
'url': u'/projects/pip/',
'author': ['eric'],
'progetto': None,
'description': ''
})
@patch(
'elasticsearch.connection.http_urllib3.Urllib3HttpConnection.perform_request',
side_effect=perform_request_project_mock
)
def test_index_search_request_indexes_publisher_and_publisher_project(self, perform_request_mock):
publisher = Publisher.objects.create(
name='Test Org',
slug='publisher',
metadata={},
projects_metadata={},
active=True
)
pub_project = PublisherProject.objects.create(
name='Test Project',
slug='testproject',
metadata={
'documents': [
'https://github.com/testorg/myrepourl',
'https://github.com/testorg/anotherrepourl',
]
},
publisher=publisher,
active=True
)
pub_project.projects.add(self.pip)
page_list = [{'path': 'path', 'title': 'title', 'content': 'content', 'headers': 'headers'}]
with patch.object(PageIndex, 'bulk_index') as bulk_mock:
index_search_request(
version=self.version, page_list=page_list, commit=None,
project_scale=1, page_scale=1, section=False, delete=False)
response = perform_request_mock.call_args_list[0][0][3]
self.assertJSONEqual(response, {
'slug': 'pip',
'lang': 'en',
'tags': None,
'name': u'Pip',
'id': 6,
'weight': 1,
'publisher': 'Test Org',
'url': u'/projects/pip/',
'author': ['eric'],
'progetto': 'testproject',
'description': ''
})
bulk_mock.assert_called_with(
[{'publisher': 'Test Org', 'taxonomy': None, 'project': 'pip',
'commit': None, 'progetto': 'testproject', 'path': 'path',
'weight': 2, 'version': 'verbose-name', 'headers': 'headers',
'id': 'b3129830187e487e332bb2eab1b7a9c3', 'title': 'title',
'content': 'content', 'project_id': self.pip.pk}], routing='pip'
)
| 2.125 | 2 |
Python Digital Assistant/PyDa.py | Kelta-King/J.A.R.V.I.S | 30 | 12758439 | import wikipedia
import wolframalpha
import wx
import pyttsx3
import speech_recognition as sr
engine = pyttsx3.init()
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None,
pos=wx.DefaultPosition, size=wx.Size(550, 100),
style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |
wx.CLOSE_BOX | wx.CLIP_CHILDREN,
title="PyDa")
panel = wx.Panel(self)
my_sizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(panel,
label="Hello I am J.A.R.V.I.S , the Python Digital Assistant made by <NAME>. How can I help you?")
engine.say('Hello Anubhav')
my_sizer.Add(lbl, 0, wx.ALL, 5)
self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER,size=(400,30))
self.txt.SetFocus()
self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
my_sizer.Add(self.txt, 0, wx.ALL, 5)
panel.SetSizer(my_sizer)
self.Show()
engine.runAndWait()
def OnEnter(self, event):
input = self.txt.GetValue()
input = input.lower()
if input=='':
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source)
try:
self.txt.SetValue(r.recognize_google(audio))
except sr.UnknownValueError:
print('Google Speech Recognition could not understand audio')
except sr.RequestError as e:
print('Could not request results from Google Speech Recognition Service; {0}'.format(e))
# print("It worked!")
else:
try:
app_id = "HH23Y3-5645968TGY"
client = wolframalpha.Client(app_id)
res = client.query(input)
answer = next(res.results).text
print(answer)
engine.say('The answer is '+ answer)
engine.runAndWait()
except:
# wikipedia.set_lang("es")
# input = input.split(' ')
# input = " ".join(input[2:])
engine.say('I have searched following for '+ input)
engine.runAndWait()
print(wikipedia.summary(input, sentences=2))
if __name__ == "__main__":
app = wx.App(True)
frame = MyFrame()
app.MainLoop()
| 3.21875 | 3 |
apimock/__main__.py | MrUPGrade/api-mock | 2 | 12758440 | <reponame>MrUPGrade/api-mock
from apimock.cli import cli
cli()
| 1.1875 | 1 |
db/trainbmi.py | sgowda/brain-python-interface | 7 | 12758441 | '''
Functions to call appropriate constructor functions based on UI data and to link decoder objects in the database
'''
import os
import re
import tempfile
import xmlrpc.client
import pickle
import json
import logging
import numpy as np
from celery import task, chain
from django.http import HttpResponse
from riglib.bmi import extractor, train
from riglib import experiment
@task
def cache_plx(plxfile):
"""
Create cache for plexon file
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
from plexon import plexfile
plexfile.openFile(str(plxfile))
@task
def make_bmi(name, clsname, extractorname, entry, cells, channels, binlen, tslice, ssm, pos_key, kin_extractor, zscore):
"""
Create a new Decoder object from training data and save a record to the database
Parameters
----------
name : string
Name assigned to decoder object in the database
clsname : string
BMI algorithm name (passed to bmilist lookup table 'bmis')
extractorname : string
feature extractor algorithm name (passed to bmilist lookup table 'extractors')
entry : models.TaskEntry
Django record of training task
cells : string
Single string containing all the units to be in decoder, matching
format in global regex 'cellname' (used only for spike extractors)
channels : string
Single string containing all the channels to be in decoder; must be a
comma separated list of values with spaces (e.g., "1, 2, 3")
(used only for, e.g., LFP extractors)
binlen : float
Time of spike history to consider
tslice : slice
Task time to use when training the decoder
ssm : string
TODO
pos_key : string
TODO
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
cellname = re.compile(r'(\d{1,3})\s*(\w{1})')
print("make bmi")
extractor_cls = namelist.extractors[extractorname]
print('Training with extractor class:', extractor_cls)
if 'spike' in extractor_cls.feature_type: # e.g., 'spike_counts'
# look at "cells" argument (ignore "channels")
cells = [ (int(c), ord(u) - 96) for c, u in cellname.findall(cells)]
if cells == []:
units = None # use all units by default
# Note: inside training functions (e.g., _train_KFDecoder_manual_control,
# _train_KFDecoder_visual_feedback, etc.), remember to check if units
# variable is None, and if so, set the units from the plx file:
# if units == None:
# units = np.array(plx.units).astype(np.int32)"
else:
unique_cells = []
for c in cells:
if c not in unique_cells:
unique_cells.append(c)
units = np.array(unique_cells).astype(np.int32)
elif ('lfp' in extractor_cls.feature_type) or ('ai_' in extractor_cls.feature_type): # e.g., 'lfp_power'
# look at "channels" argument (ignore "cells")
channels = np.array(channels.split(', ')).astype(np.int32) # convert str to list of numbers
if len(channels) == 0:
channels = [1, 2, 3, 4] # use these channels by default
else:
channels = np.unique(channels)
# units = np.hstack([channels.reshape(-1, 1), np.zeros(channels.reshape(-1, 1).shape, dtype=np.int32)])
units = np.hstack([channels.reshape(-1, 1), np.ones(channels.reshape(-1, 1).shape, dtype=np.int32)])
else:
raise Exception('Unknown extractor class!')
task_update_rate = 60 # NOTE may not be true for all tasks?!
extractor_kwargs = dict()
if extractor_cls == extractor.BinnedSpikeCountsExtractor:
extractor_kwargs['units'] = units
extractor_kwargs['n_subbins'] = max(1, int((1./task_update_rate)/binlen))
elif extractor_cls == extractor.LFPButterBPFPowerExtractor:
extractor_kwargs['channels'] = channels
elif extractor_cls == extractor.LFPMTMPowerExtractor:
extractor_kwargs['channels'] = channels
elif extractor_cls == extractor.AIMTMPowerExtractor:
extractor_kwargs['channels'] = channels
else:
raise Exception("Unknown extractor_cls: %s" % extractor_cls)
database = xmlrpc.client.ServerProxy("http://localhost:8000/RPC2/", allow_none=True)
# list of DataFile objects
datafiles = models.DataFile.objects.filter(entry_id=entry)
# key: a string representing a system name (e.g., 'plexon', 'blackrock', 'task', 'hdf')
# value: a single filename, or a list of filenames if there are more than one for that system
files = dict()
system_names = set(d.system.name for d in datafiles)
for system_name in system_names:
filenames = [d.get_path() for d in datafiles if d.system.name == system_name]
if system_name in ['blackrock', 'blackrock2']:
files[system_name] = filenames # list of (one or more) files
else:
assert(len(filenames) == 1)
files[system_name] = filenames[0] # just one file
training_method = namelist.bmi_algorithms[clsname]
ssm = namelist.bmi_state_space_models[ssm]
kin_extractor_fn = namelist.kin_extractors[kin_extractor]
decoder = training_method(files, extractor_cls, extractor_kwargs, kin_extractor_fn, ssm, units, update_rate=binlen, tslice=tslice, pos_key=pos_key,
zscore=zscore)
decoder.te_id = entry
tf = tempfile.NamedTemporaryFile('wb')
pickle.dump(decoder, tf, 2)
tf.flush()
database.save_bmi(name, int(entry), tf.name)
def cache_and_train(*args, **kwargs):
"""
Cache plexon file (if using plexon system) and train BMI.
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
recording_sys = models.KeyValueStore.get('recording_sys', None)
if recording_sys == 'plexon':
print("cache and train")
entry = kwargs['entry']
print(entry)
plxfile = models.DataFile.objects.get(system__name='plexon', entry=entry)
print(plxfile)
if not plxfile.has_cache():
cache = cache_plx.si(plxfile.get_path())
train = make_bmi.si(*args, **kwargs)
chain(cache, train)()
else:
print("calling")
make_bmi.delay(*args, **kwargs)
elif recording_sys == 'blackrock':
make_bmi.delay(*args, **kwargs)
else:
raise Exception('Unknown recording_system!')
def save_new_decoder_from_existing(obj, orig_decoder_record, suffix='_'):
'''
Save a decoder that is created by manipulating the parameters of an older decoder
Parameters
----------
obj: riglib.bmi.Decoder instance
New decoder object to be saved
orig_decoder_record: tracker.models.Decoder instance
Database record of the original decoder
suffix: string, default='_'
The name of the new decoder is created by taking the name of the old decoder and adding the specified suffix
Returns
-------
None
'''
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
import riglib.bmi
if not isinstance(obj, riglib.bmi.bmi.Decoder):
raise ValueError("This function is only intended for saving Decoder objects!")
new_decoder_fname = obj.save()
new_decoder_name = orig_decoder_record.name + suffix
training_block_id = orig_decoder_record.entry_id
print("Saving new decoder:", new_decoder_name)
dbq.save_bmi(new_decoder_name, training_block_id, new_decoder_fname)
## Functions to manipulate existing (KF)Decoders. These belong elsewhere
def conv_mm_dec_to_cm(decoder_record):
'''
Convert a mm unit decoder to cm
'''
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
decoder_fname = os.path.join('/storage/decoders/', decoder_record.path)
print(decoder_fname)
decoder_name = decoder_record.name
dec = pickle.load(open(decoder_fname))
from riglib.bmi import train
dec_cm = train.rescale_KFDecoder_units(dec, 10)
new_decoder_basename = os.path.basename(decoder_fname).rstrip('.pkl') + '_cm.pkl'
new_decoder_fname = '/tmp/%s' % new_decoder_basename
pickle.dump(dec_cm, open(new_decoder_fname, 'w'))
new_decoder_name = decoder_name + '_cm'
training_block_id = decoder_record.entry_id
print(new_decoder_name)
dbq.save_bmi(new_decoder_name, training_block_id, new_decoder_fname)
def zero_out_SSKF_bias(decoder_record):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
dec = open_decoder_from_record(decoder_record)
dec.filt.C_xpose_Q_inv_C[:,-1] = 0
dec.filt.C_xpose_Q_inv_C[-1,:] = 0
save_new_decoder_from_existing(dec, decoder_record, suffix='_zero_bias')
def conv_kfdecoder_binlen(decoder_record, new_binlen):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
dec = open_decoder_from_record(decoder_record)
dec.change_binlen(new_binlen)
save_new_decoder_from_existing(dec, decoder_record, suffix='_%dHz' % int(1./new_binlen))
def conv_kfdecoder_to_ppfdecoder(decoder_record):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
# Load the decoder
decoder_fname = os.path.join('/storage/decoders/', decoder_record.path)
print(decoder_fname)
decoder_name = decoder_record.name
dec = pickle.load(open(decoder_fname))
from riglib.bmi import train
dec_ppf = train.convert_KFDecoder_to_PPFDecoder(dec)
new_decoder_basename = os.path.basename(decoder_fname).rstrip('.pkl') + '_ppf.pkl'
new_decoder_fname = '/tmp/%s' % new_decoder_basename
pickle.dump(dec_ppf, open(new_decoder_fname, 'w'))
new_decoder_name = decoder_name + '_ppf'
training_block_id = decoder_record.entry_id
print(new_decoder_name)
from .tracker import dbq
dbq.save_bmi(new_decoder_name, training_block_id, new_decoder_fname)
def conv_kfdecoder_to_sskfdecoder(decoder_record):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
dec = open_decoder_from_record(decoder_record)
F, K = dec.filt.get_sskf()
from riglib.bmi import sskfdecoder
filt = sskfdecoder.SteadyStateKalmanFilter(F=F, K=K)
dec_sskf = sskfdecoder.SSKFDecoder(filt, dec.units, dec.ssm, binlen=decoder.binlen)
save_new_decoder_from_existing(decoder_record, '_sskf')
def make_kfdecoder_interpolate(decoder_record):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
# Load the decoder
decoder_fname = os.path.join('/storage/decoders/', decoder_record.path)
print(decoder_fname)
decoder_name = decoder_record.name
dec = pickle.load(open(decoder_fname))
from riglib.bmi import train
dec_ppf = train._interpolate_KFDecoder_state_between_updates(dec)
new_decoder_basename = os.path.basename(decoder_fname).rstrip('.pkl') + '_ppf.pkl'
new_decoder_fname = '/tmp/%s' % new_decoder_basename
pickle.dump(dec_ppf, open(new_decoder_fname, 'w'))
new_decoder_name = decoder_name + '_60hz'
training_block_id = decoder_record.entry_id
print(new_decoder_name)
from .tracker import dbq
dbq.save_bmi(new_decoder_name, training_block_id, new_decoder_fname)
| 2.390625 | 2 |
chexpert-model/IRNet/test_cams_seg_labels.py | stanfordmlgroup/CheXseg | 7 | 12758442 | import sys
import os
sys.path.append(os.path.abspath("../"))
import torch
import numpy as np
import torch.nn as nn
from torch import multiprocessing, cuda
from misc import torchutils, indexing, imutils
from data.chexpert_dataset_irnet import CheXpertTestCAMDatasetIRNet
from torch.utils.data import DataLoader
from constants import *
import importlib
from tqdm import tqdm
from args.train_arg_parser_irnet import TrainArgParserIRNet
import torch.nn.functional as F
import imageio
from augmentations import get_transforms
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
eps = 1e-7
def main(args):
dataset = CheXpertTestCAMDatasetIRNet()
data_loader = DataLoader(dataset, shuffle=False, num_workers=os.cpu_count(), pin_memory=False)
with torch.no_grad(), cuda.device(0):
fg_intersection = np.zeros(len(LOCALIZATION_TASKS))
fg_union = np.zeros(len(LOCALIZATION_TASKS))
for iter, pack in enumerate(tqdm(data_loader)):
img_level_labels = pack['img_level_labels'][0]
gt_seg_labels = pack['gt_seg_labels'][0]
cam_seg_labels = pack['cam_seg_labels'][0]
img_name = pack['base_name'][0]
np.save(os.path.join(CHEXPERT_PARENT_TEST_CAMS_DIR / "gt_seg_labels", f"{img_name}_seg_labels.npy"), gt_seg_labels.cpu().numpy())
for index, task in enumerate(LOCALIZATION_TASKS):
cam_seg_label = cam_seg_labels[index]
gt_seg_label = gt_seg_labels[index]
if img_level_labels[index] == 0:
cam_seg_label[:] = 0
intersection_fg = torch.sum(cam_seg_label * gt_seg_label).numpy()
union_fg = torch.sum(cam_seg_label).numpy() + torch.sum(gt_seg_label).numpy() - intersection_fg
fg_intersection[index] += intersection_fg
fg_union[index] += union_fg
for i in range(len(fg_intersection)):
fg_iou = (fg_intersection[i] + eps) / (fg_union[i] + eps)
print(f"Index {i} fg iou {fg_iou}")
if __name__ == "__main__":
parser = TrainArgParserIRNet()
hyperparams = parser.parse_args()
# TRAIN
main(hyperparams)
| 1.953125 | 2 |
src/prepare.py | microsoft/verseagility | 15 | 12758443 | """
PREPARE
Before running train, you need to run prepare.py with the respective task.
Example (in the command line):
> cd to root dir
> conda activate nlp
> python src/prepare.py --do_format --task 1
"""
#NOTE: the following is a workaround for AML to load modules
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import os
import spacy
import pandas as pd
import numpy as np
import string
import re
import argparse
from sklearn.model_selection import StratifiedShuffleSplit
# Custom functions
import sys
sys.path.append('./src')
import helper as he
import data as dt
import custom as cu
logger = he.get_logger(location=__name__)
class Clean():
"""Text preprocessing and cleaning steps
SUPPORTED LANGUAGES
- EN
- DE
- IT
- ES
- FR
- XX (multi - NER only)
SUPPORTED MODULES
- Remove Noise
Remove formatting and other noise that may be contained in emails or
other document types.
- Get Placeholders
Placeholders for common items such as dates, times, urls but also
custom customer IDs.
- Remove Stopwords
Stopwords can be added by adding a language specific stopword file
to /assets. Format: "assets/stopwords_<language>.txt".
- Lemmatize
"""
def __init__(self, task,
download_source=False,
download_train=False,
inference=False):
self.task = task
self.language = cu.params.get('language')
# Load data class
self.dt = dt.Data(task=self.task, inference=inference)
# Download data, if needed
if download_train:
self.dt.download('data_dir', dir = 'data_dir', source = 'datastore')
# Load spacy model
self.nlp = he.load_spacy_model(language=self.language, disable=['ner','parser','tagger'])
# Create stopword list
stopwords_active = []
## Load names
try:
names = self.dt.load('fn_names', dir = 'asset_dir', file_type = 'list')
stopwords_active = stopwords_active + names
except FileNotFoundError as e:
logger.warning(f'[WARNING] No names list loaded: {e}')
## Load stopwords
try:
stopwords = self.dt.load('fn_stopwords', dir = 'asset_dir', file_type = 'list')
stopwords_active = stopwords_active + stopwords
except FileNotFoundError as e:
logger.warning(f'[WARNING] No stopwords list loaded: {e}')
## Add to Spacy stopword list
logger.warning(f'[INFO] Active stopwords list lenght: {len(stopwords_active)}')
for w in stopwords_active:
self.nlp.vocab[w.replace('\n','')].is_stop = True
def remove(self, line,
rm_email_formatting=False,
rm_email_header=False,
rm_email_footer=False,
rm_punctuation=False):
"""Remove content from text"""
if not isinstance(line, str):
line = str(line)
# Customer Remove
line = cu.remove(line)
if rm_email_formatting:
line = re.sub(r'<[^>]+>', ' ', line) # Remove HTML tags
line = re.sub(r'^(.*\.eml)', ' ', line) # remove header for system generated emails
if rm_email_header:
#DE/EN
if self.language == 'en' or self.language == 'de':
line = re.sub(r'\b(AW|RE|VON|WG|FWD|FW)(\:| )', '', line, flags=re.I)
#DE
if self.language == 'de':
line = re.sub(r'(Sehr geehrte( Damen und Herren.)?.)|hallo.|guten( tag)?.', '', line, flags=re.I)
if rm_email_footer:
#EN
if self.language == 'en':
line = re.sub(r'\bkind regards.*', '', line, flags=re.I)
#DE
if self.language == 'de':
line = re.sub(r'\b(mit )?(beste|viele|liebe|freundlich\w+)? (gr[u,ü][ß,ss].*)', '', line, flags=re.I)
line = re.sub(r'\b(besten|herzlichen|lieben) dank.*', '', line, flags=re.I)
line = re.sub(r'\bvielen dank für ihr verständnis.*', '', line, flags=re.I)
line = re.sub(r'\bvielen dank im voraus.*', '', line, flags=re.I)
line = re.sub(r'\b(mfg|m\.f\.g) .*','', line, flags=re.I)
line = re.sub(r'\b(lg) .*','',line, flags=re.I)
line = re.sub(r'\b(meinem iPhone gesendet) .*','',line, flags=re.I)
line = re.sub(r'\b(Gesendet mit der (WEB|GMX)) .*','',line, flags=re.I)
line = re.sub(r'\b(Diese E-Mail wurde von Avast) .*','',line, flags=re.I)
# Remove remaining characters
##NOTE: may break other regex
if rm_punctuation:
line = re.sub('['+string.punctuation+']',' ',line)
return line
def get_placeholder(self, line,
rp_generic=False,
rp_custom=False,
rp_num=False):
"""Replace text with type specfic placeholders"""
# Customer placeholders
line = cu.get_placeholder(line)
# Generic placeholder
if rp_generic:
line = re.sub(r' \+[0-9]+', ' ', line) # remove phone numbers
line = re.sub(r'0x([a-z]|[0-9])+ ',' PER ',line, re.IGNORECASE) # replace
line = re.sub(r'[0-9]{2}[\/.,:][0-9]{2}[\/.,:][0-9]{2,4}', ' PDT ', line) # remove dates and time, replace with placeholder
line = re.sub(r'([0-9]{2,3}[\.]){3}[0-9]{1,3}',' PIP ',line) # replace ip with placeholder
line = re.sub(r'[0-9]{1,2}[\/.,:][0-9]{1,2}', ' PTI ', line) # remove only time, replace with placeholder
line = re.sub(r'[\w\.-]+@[\w\.-]+', ' PEM ', line) # remove emails
line = re.sub(r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', ' PUR ', line) # Remove links
line = re.sub(r'€|\$|(USD)|(EURO)', ' PMO ', line)
# Placeholders for numerics
if rp_num:
line = re.sub(r' ([0-9]{4,30}) ',' PNL ', line) # placeholder for long stand alone numbers
line = re.sub(r' [0-9]{2,3} ',' PNS ', line) # placeholder for short stand alone numbers
return line
def tokenize(self, line, lemmatize = False, rm_stopwords = False):
"""Tokenizer for non DL tasks"""
if not isinstance(line, str):
line = str(line)
if lemmatize and rm_stopwords:
line = ' '.join([t.lemma_ for t in self.nlp(line) if not t.is_stop])
elif lemmatize:
line = ' '.join([t.lemma_ for t in self.nlp(line)])
elif rm_stopwords:
line = ' '.join([t.text for t in self.nlp(line) if not t.is_stop])
return line
def transform(self, texts,
to_lower = False,
# Remove
rm_email_formatting = False,
rm_email_header = False,
rm_email_footer = False,
rm_punctuation = False,
# Placeholders
rp_generic = False,
rp_num = False,
# Tokenize
lemmatize = False,
rm_stopwords = False,
return_token = False,
# Whitespace
remove_whitespace = True
):
"""Main run function for cleaning process"""
if isinstance(texts, str):
texts = [texts]
# Convert to series for improved efficiency
df_texts = pd.Series(texts)
# Avoid loading errors
df_texts = df_texts.replace('\t', ' ', regex=True)
# Remove noise
if any((rm_email_formatting, rm_email_header,
rm_email_footer, rm_punctuation)):
df_texts = df_texts.apply(lambda x: self.remove(x,
rm_email_formatting = rm_email_formatting,
rm_email_header = rm_email_header,
rm_email_footer = rm_email_footer,
rm_punctuation = rm_punctuation))
# Replace placeholders
if any((rp_generic, rp_num)):
df_texts = df_texts.apply(lambda x: self.get_placeholder(x,
rp_generic = rp_generic,
rp_num = rp_num))
# Tokenize text
if any((lemmatize, rm_stopwords, return_token)):
df_texts = df_texts.apply(self.tokenize,
lemmatize = lemmatize,
rm_stopwords = rm_stopwords)
# To lower
if to_lower:
df_texts = df_texts.apply(str.lower)
# Remove spacing
if remove_whitespace:
df_texts = df_texts.apply(lambda x: " ".join(x.split()))
# Return Tokens
if return_token:
return [t.split(' ') for t in df_texts.to_list()]
else:
return df_texts.to_list()
def transform_by_task(self, text):
# CUSTOM FUNCTION
if cu.tasks.get(str(self.task)).get('type') == 'classification':
return self.transform(text,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rp_generic = True)[0]
elif cu.tasks.get(str(self.task)).get('type') == 'multi_classification':
return self.transform(text,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rp_generic = True)[0]
elif cu.tasks.get(str(self.task)).get('type') == 'ner':
return text[0]
elif cu.tasks.get(str(self.task)).get('type') == 'qa':
return self.transform(text,
to_lower = True,
# Remove
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rm_punctuation = True,
# Placeholders
rp_generic = True,
rp_num = True,
# Tokenize
lemmatize = True,
rm_stopwords = True,
return_token = True
)[0]
else:
logger.warning('[WARNING] No transform by task found.')
return text[0]
def prepare_classification(task, do_format, train_split, min_cat_occurance,
min_char_length, register_data):
# Get clean object
cl = Clean(task=task, download_source=True)
# Load data
if not os.path.isfile(cl.dt.get_path('fn_prep', dir = 'data_dir')) or do_format:
data = dt.get_dataset(cl, source="cdb")
else:
data = cl.dt.load('fn_prep', dir = 'data_dir')
logger.warning(f'Data Length : {len(data)}')
# Load text & label field
text_raw = cu.load_text(data)
data['label'] = cu.load_label(data, task)
if cu.tasks.get(str(task)).get('type') == 'multi_classification':
data['label'] = data['label'].str.replace(', ', '_').str.replace(' ', '_')
flat_labels = [row['label'].split(',') for index, row in data.iterrows()]
labels_clean = []
for labels in flat_labels:
for label in labels:
labels_clean.append(label)
label_list_raw = pd.DataFrame({'label':labels_clean})
label_list_raw = label_list_raw[label_list_raw.label != '']
label_list_raw = label_list_raw.label.drop_duplicates()
elif cu.tasks.get(str(task)).get('type') == 'classification': # in case of single label classification
label_list_raw = data.label.drop_duplicates()
# Clean text
data['text'] = cl.transform(text_raw,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rp_generic = True)
# Filter by length
data = he.remove_short(data, 'text', min_char_length=min_char_length)
logger.warning(f'Data Length : {len(data)}')
# Remove duplicates
data_red = data.drop_duplicates(subset=['text'])
logger.warning(f'Data Length : {len(data_red)}')
# Min class occurance
if cu.tasks.get(str(task)).get('type') == 'classification':
data_red = data_red[data_red.groupby('label').label.transform('size') > min_cat_occurance]
elif cu.tasks.get(str(task)).get('type') == 'multi_classification':
# Split rows
data_transform = data_red[['id', 'label']].copy()
data_transform['label'] = [row['label'].split(",") for index, row in data_transform.iterrows()] # pipe it to list
data_transform = pd.DataFrame({'index':data_transform.index.repeat(data_transform.label.str.len()), 'label':np.concatenate(data_transform.label.values)}) # explode df
data_transform = data_transform[data_transform.groupby('label').label.transform('size') > min_cat_occurance] # count for min occurance and only keep relevant ones
data_transform = data_transform.groupby(['index'])['label'].apply(lambda x: ','.join(x.astype(str))).reset_index() # re-merge
data_transform = data_transform.set_index('index')
del data_red['label']
data_red = pd.concat([data_red, data_transform], join='inner', axis=1)
logger.warning(f'Data Length : {len(data_red)}')
data_red = data_red.tail(300000).reset_index(drop=True).copy()
#TODO: .tail() temp is for debugging
## There is a memory issue for the EN dataset, due to its size. Needs further investigation.
# Label list
if cu.tasks.get(str(task)).get('type') == 'multi_classification': # 2 = task for multi-label classification
flat_labels = [row['label'].split(',') for index, row in data_red.iterrows()]
labels_clean = []
for labels in flat_labels:
for label in labels:
labels_clean.append(label)
label_list = pd.DataFrame({'label':labels_clean})
label_list = label_list[label_list.label != '']
label_list = label_list.label.drop_duplicates()
elif cu.tasks.get(str(task)).get('type') == 'classification': # in case of single label classification
label_list = data_red.label.drop_duplicates()
logger.warning(f'Excluded labels: {list(set(label_list_raw)-set(label_list))}')
# Split data
strf_split = StratifiedShuffleSplit(n_splits = 1, test_size=(1-train_split), random_state=200)
if cu.tasks.get(str(task)).get('type') == 'classification':
for train_index, test_index in strf_split.split(data_red, data_red['label']):
df_cat_train = data_red.loc[train_index]
df_cat_test = data_red.loc[test_index]
elif cu.tasks.get(str(task)).get('type') == 'multi_classification':
for train_index, test_index in strf_split.split(data_red, pd.DataFrame({'label':[l.split(',')[0] for l in data_red['label']]})['label']):
df_cat_train = data_red.loc[train_index]
df_cat_test = data_red.loc[test_index]
# Save data
cl.dt.save(data_red, fn = 'fn_clean', dir = 'data_dir')
cl.dt.save(df_cat_train[['text','label']], fn = 'fn_train', dir = 'data_dir')
cl.dt.save(df_cat_test[['text','label']], fn = 'fn_test', dir = 'data_dir')
cl.dt.save(label_list, fn = 'fn_label', header=False, dir = 'data_dir')
# Upload data
if register_data:
cl.dt.upload('data_dir', destination='dataset')
def prepare_ner(task, do_format, register_data):
pass
def prepare_qa(task, do_format, min_char_length, register_data):
# Get clean object
cl = Clean(task=task, download_source=True)
# Load data
if not os.path.isfile(cl.dt.get_path('fn_prep', dir = 'data_dir')) or do_format:
data = dt.get_dataset(cl, source="cdb")
else:
data = cl.dt.load('fn_prep', dir = 'data_dir')
logger.warning(f'Data Length : {len(data)}')
# Filter relevant question answer pairs
data = cu.filter_qa(data)
logger.warning(f'Data Length : {len(data)}')
# Load question & answer fields
question, answer = cu.load_qa(data)
# Clean text
data['question_clean'] = cl.transform(question,
to_lower = True,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rm_punctuation = True,
rp_generic = True,
rp_num = True,
lemmatize = True,
rm_stopwords = True
)
data['answer_clean'] = cl.transform(answer,
to_lower = True,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rm_punctuation = True,
rp_generic = True,
rp_num = True,
lemmatize = True,
rm_stopwords = True
)
# For display
data['answer_text_clean'] = cl.transform(answer,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True
)
# Filter by length
data = he.remove_short(data, 'question_clean', min_char_length=min_char_length)
logger.warning(f'Data Length : {len(data)}')
# Remove duplicates
data = data.drop_duplicates(subset=['question_clean'])
logger.warning(f'Data Length : {len(data)}')
data = data.reset_index(drop=True).copy()
# Save data
cl.dt.save(data, fn = 'fn_clean', dir = 'data_dir')
# Upload data
if register_data:
cl.dt.upload('data_dir', destination='dataset')
def main(task=1,
do_format=False,
split=0.9,
min_cat_occurance=300,
min_char_length=20,
register_data=False):
logger.warning(f'Running <PREPARE> for task {task}')
task_type = cu.tasks.get(str(task)).get('type')
if 'classification' == task_type:
prepare_classification(task, do_format, split, min_cat_occurance, min_char_length, register_data)
elif 'multi_classification' == task_type:
prepare_classification(task, do_format, split, min_cat_occurance, min_char_length, register_data)
elif 'ner' == task_type:
prepare_ner(task, do_format, register_data)
elif 'qa' == task_type:
prepare_qa(task, do_format, min_char_length, register_data)
else:
logger.warning('[ERROR] TASK TYPE UNKNOWN. Nothing was processed.')
def run():
"""Run from the command line"""
parser = argparse.ArgumentParser()
parser.add_argument("--task",
default=1,
type=int,
help="Task where: \
-task 1 : classification subcat \
-task 2 : classification cat \
-task 3 : ner \
-task 4 : qa")
parser.add_argument('--do_format',
action='store_true',
help="Avoid reloading and normalizing data")
parser.add_argument("--split",
default=0.9,
type=float,
help="Train test split. Dev split is taken from train set.")
parser.add_argument("--min_cat_occurance",
default=300,
type=int,
help="Min occurance required by category.")
parser.add_argument("--min_char_length",
default=20,
type=int,
help="")
parser.add_argument('--register_data',
action='store_true',
help="")
args = parser.parse_args()
main(args.task, args.do_format, args.split, min_cat_occurance=args.min_cat_occurance,
min_char_length=args.min_char_length, register_data=args.register_data)
if __name__ == '__main__':
run()
| 2.65625 | 3 |
python/udp-test-swipe.py | mpinner/Active | 12 | 12758444 | import socket
from time import sleep
UDP_IP = "192.168.1.177";
UDP_PORT = 8888;
#MESSAGE = 0x0101010101010101010101010101010101010101010101010110
#NOT_MESSAGE = 0x10101010101010101010101010101010101010101010101010110
#"Hello, World!";
print "UDP target IP:", UDP_IP;
print "UDP target port:", UDP_PORT;
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);
for j in range(0,65):
for i in range(0,26):
MESSAGE = char(j) * i;
print "message:", MESSAGE;
sock.sendto(MESSAGE, (UDP_IP ,UDP_PORT));
sleep(.5);
| 3.1875 | 3 |
hazelcast/protocol/codec/map_values_with_predicate_codec.py | tonytheonlypony/hazelcast-python-client | 98 | 12758445 | from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
from hazelcast.protocol.builtin import ListMultiFrameCodec
# hex: 0x012700
_REQUEST_MESSAGE_TYPE = 75520
# hex: 0x012701
_RESPONSE_MESSAGE_TYPE = 75521
_REQUEST_INITIAL_FRAME_SIZE = REQUEST_HEADER_SIZE
def encode_request(name, predicate):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
StringCodec.encode(buf, name)
DataCodec.encode(buf, predicate, True)
return OutboundMessage(buf, True)
def decode_response(msg):
msg.next_frame()
return ListMultiFrameCodec.decode(msg, DataCodec.decode)
| 1.859375 | 2 |
tensor2tensor/data_generators/text_encoder.py | anishsingh20/tensor2tensor | 0 | 12758446 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for text data.
* TextEncoder: base class
* ByteTextEncoder: for ascii text
* TokenTextEncoder: with user-supplied vocabulary file
* SubwordTextEncoder: invertible
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import tokenizer
import tensorflow as tf
# Reserved tokens for things like padding and EOS symbols.
PAD = '<pad>'
EOS = '<EOS>'
RESERVED_TOKENS = [PAD, EOS]
class TextEncoder(object):
"""Base class for converting from ints to/from human readable strings."""
def __init__(self, num_reserved_ids=2):
self._num_reserved_ids = num_reserved_ids
def encode(self, s):
"""Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
"""
return [int(w) + self._num_reserved_ids for w in s.split()]
def decode(self, ids):
"""Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
Returns:
s: human-readable string.
"""
decoded_ids = []
for id_ in ids:
if 0 <= id_ < self._num_reserved_ids:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
decoded_ids.append(id_ - self._num_reserved_ids)
return ' '.join([str(d) for d in decoded_ids])
@property
def vocab_size(self):
raise NotImplementedError()
class ByteTextEncoder(TextEncoder):
"""Encodes each byte to an id. For 8-bit strings only."""
def encode(self, s):
return [ord(c) + self._num_reserved_ids for c in s]
def decode(self, ids):
decoded_ids = []
for id_ in ids:
if 0 <= id_ < self._num_reserved_ids:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
decoded_ids.append(chr(id_))
return ''.join(decoded_ids)
@property
def vocab_size(self):
return 2**8 + self._num_reserved_ids
class TokenTextEncoder(TextEncoder):
"""Encoder based on a user-supplied vocabulary."""
def __init__(self, vocab_filename, reverse=False, num_reserved_ids=2):
"""Initialize from a file, one token per line."""
super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids)
self._reverse = reverse
if vocab_filename is not None:
self._load_vocab_from_file(vocab_filename)
def encode(self, sentence):
"""Converts a space-separated string of tokens to a list of ids."""
ret = [self._token_to_id[tok] for tok in sentence.strip().split()]
if self._reverse:
ret = ret[::-1]
return ret
def decode(self, ids):
if self._reverse:
ids = ids[::-1]
return ' '.join([self._safe_id_to_token(i) for i in ids])
@property
def vocab_size(self):
return len(self._id_to_token)
def _safe_id_to_token(self, idx):
return self._id_to_token.get(idx, 'ID_%d' % idx)
def _load_vocab_from_file(self, filename):
"""Load vocab from a file."""
self._token_to_id = {}
self._id_to_token = {}
for idx, tok in enumerate(RESERVED_TOKENS):
self._token_to_id[tok] = idx
self._id_to_token[idx] = tok
token_start_idx = self._num_reserved_ids
with tf.gfile.Open(filename) as f:
for i, line in enumerate(f):
idx = token_start_idx + i
tok = line.strip()
self._token_to_id[tok] = idx
self._id_to_token[idx] = tok
class SubwordTextEncoder(TextEncoder):
"""Class for breaking tokens into subtokens.
Invertibly encodes a string as a sequence of subtokens from a limited
vocabulary.
A SubwordTextEncoder is built from a corpus (so it is tailored to the text in
the corpus), and stored to a file. See text_encoder_build_subword.py.
It can then be loaded and used to encode/decode any text.
"""
def __init__(self, filename=None, num_reserved_ids=2):
"""Read from a file."""
self._tokenizer = tokenizer.Tokenizer()
if filename is not None:
self._load_from_file(filename)
super(SubwordTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids)
def encode(self, raw_text):
"""Converts a string to a list of subtoken ids.
Args:
raw_text: a string.
Returns:
a list of integers in the range [0, vocab_size)
"""
return self._tokens_to_subtokens(self._tokenizer.encode(raw_text))
def decode(self, subtokens):
"""Converts a sequence of subtoken ids to a string.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a string
"""
return self._tokenizer.decode(self._subtokens_to_tokens(subtokens))
@property
def vocab_size(self):
"""The subtoken vocabulary size."""
return len(self._all_subtoken_strings)
def _tokens_to_subtokens(self, tokens):
"""Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
"""
ret = []
for token in tokens:
ret.extend(self._escaped_token_to_subtokens(self._escape_token(token)))
return ret
def _subtokens_to_tokens(self, subtokens):
"""Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings.
"""
concatenated = ''.join(
[self.subtoken_to_subtoken_string(s) for s in subtokens])
split = concatenated.split('_')
return [self._unescape_token(t + '_') for t in split if t]
def subtoken_to_subtoken_string(self, subtoken):
"""Subtoken_String (string) corresponding to the given subtoken (id)."""
if (subtoken >= 0 and subtoken < self.vocab_size and
self._all_subtoken_strings[subtoken]):
return self._all_subtoken_strings[subtoken]
else:
if 0 <= subtoken < self._num_reserved_ids:
return '%s_' % RESERVED_TOKENS[subtoken]
else:
return 'ID%d_' % subtoken
def _escaped_token_to_subtokens(self, escaped_token):
"""Converts an escaped token string to a list of subtokens.
Args:
escaped_token: an escaped token
Returns:
a list of one or more integers.
"""
ret = []
pos = 0
while pos < len(escaped_token):
end = len(escaped_token)
while True:
subtoken = self._subtoken_string_to_id.get(escaped_token[pos:end], -1)
if subtoken != -1:
break
end -= 1
ret.append(subtoken)
pos = end
return ret
@classmethod
def build_to_target_size(cls,
target_size,
token_counts,
store_filename,
min_val,
max_val,
num_iterations=4):
"""Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a `min_count` value that most
closely matches the `target_size`.
Args:
target_size: desired vocab_size to approximate.
token_counts: a dictionary of string to int.
store_filename: a string - where to write the vocabulary.
min_val: an integer - lower bound for `min_count`.
max_val: an integer - upper bound for `min_count`.
num_iterations: an integer. how many iterations of refinement.
Returns:
a SubwordTextEncoder instance.
"""
present_count = (max_val + min_val) // 2
tf.logging.info('Trying min_count %d' % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(token_counts, store_filename,
present_count, num_iterations)
if min_val >= max_val or subtokenizer.vocab_size == target_size:
return subtokenizer
elif subtokenizer.vocab_size > target_size:
other_subtokenizer = cls.build_to_target_size(
target_size, token_counts, store_filename, present_count + 1, max_val,
num_iterations)
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
else:
return subtokenizer
else:
other_subtokenizer = cls.build_to_target_size(
target_size, token_counts, store_filename, min_val, present_count - 1,
num_iterations)
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
else:
return subtokenizer
def build_from_token_counts(self,
token_counts,
store_filename,
min_count,
num_iterations=4):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of string to int.
store_filename: a string - where to write the vocabulary.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
"""
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
for i in xrange(num_iterations):
counts = {}
for token, count in six.iteritems(token_counts):
escaped_token = self._escape_token(token)
# we will count all tails of the escaped_token, starting from boundaries
# determined by our current segmentation.
if i == 0:
starts = list(range(len(escaped_token)))
else:
subtokens = self._escaped_token_to_subtokens(escaped_token)
pos = 0
starts = []
for subtoken in subtokens:
starts.append(pos)
pos += len(self.subtoken_to_subtoken_string(subtoken))
for start in starts:
for end in xrange(start + 1, len(escaped_token) + 1):
subtoken_string = escaped_token[start:end]
counts[subtoken_string] = counts.get(subtoken_string, 0) + count
# array of lists of candidate subtoken strings, by length
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(counts):
if count < min_count or len(subtoken_string) <= 1:
continue
while len(len_to_subtoken_strings) <= len(subtoken_string):
len_to_subtoken_strings.append([])
len_to_subtoken_strings[len(subtoken_string)].append(subtoken_string)
new_subtoken_strings = []
# consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
for subtoken_strings in len_to_subtoken_strings[::-1]:
for subtoken_string in subtoken_strings:
count = counts[subtoken_string]
if count < min_count:
continue
new_subtoken_strings.append((-count, subtoken_string))
for l in xrange(1, len(subtoken_string)):
counts[subtoken_string[:l]] -= count
# make sure we have all single characters.
new_subtoken_strings.extend([(-counts.get(chr(i), 0), chr(i))
for i in xrange(2**8)])
new_subtoken_strings.sort()
self._init_from_list([''] * self._num_reserved_ids +
[p[1] for p in new_subtoken_strings])
print('vocab_size = %d' % self.vocab_size)
original = 'This sentence was encoded by the SubwordTextEncoder.'
encoded = self.encode(original)
print(encoded)
print([self.subtoken_to_subtoken_string(s) for s in encoded])
decoded = self.decode(encoded)
print(decoded)
assert decoded == original
self._store_to_file(store_filename)
def _init_from_list(self, subtoken_strings):
"""Initialize from a list of subtoken strings."""
self._all_subtoken_strings = subtoken_strings
self._subtoken_string_to_id = {}
for i in xrange(len(subtoken_strings)):
subtoken_string = subtoken_strings[i]
if subtoken_string:
self._subtoken_string_to_id[subtoken_string] = i
def _load_from_file(self, filename):
"""Load from a file."""
subtoken_strings = []
with tf.gfile.Open(filename) as f:
for line in f:
subtoken_strings.append(line.strip()[1:-1].decode('string-escape'))
self._init_from_list(subtoken_strings)
def _store_to_file(self, filename):
with tf.gfile.Open(filename, 'w') as f:
for subtoken_string in self._all_subtoken_strings:
f.write('\'' + subtoken_string.encode('string-escape') + '\'\n')
def _escape_token(self, token):
r"""Translate '\'->'\\' and '_'->'\u', then append '_'.
Args:
token: a string
Returns:
escaped_token: a string
"""
return token.replace('\\', '\\\\').replace('_', '\\u') + '_'
def _unescape_token(self, escaped_token):
r"""Remove '_' from end, then translate '\\'->'\' and '\u'->'_'.
TODO(noam): There must be some better way to do this with regexps.
Args:
escaped_token: a string
Returns:
token: a string
"""
assert escaped_token[-1] == '_'
escaped_token = escaped_token[:-1]
if '\\' not in escaped_token:
return escaped_token
ret = ''
pos = 0
while pos < len(escaped_token):
if escaped_token[pos] == '\\' and pos + 1 < len(escaped_token):
if escaped_token[pos + 1] == 'u':
ret += '_'
else:
ret += escaped_token[pos + 1]
pos += 1
pos += 1
return ret
@classmethod
def get_token_counts(cls, text_filepattern, corpus_max_lines):
"""Read the corpus and compute a dictionary of word counts."""
tok = tokenizer.Tokenizer()
token_counts = {}
lines_read = 0
filenames = tf.gfile.Glob(text_filepattern)
for text_filename in filenames:
with tf.gfile.Open(text_filename) as f:
for line in f:
tokens = tok.encode(line.strip())
for t in tokens:
token_counts[t] = token_counts.get(t, 0) + 1
lines_read += 1
if corpus_max_lines > 0 and lines_read > corpus_max_lines:
return token_counts
return token_counts
| 2.265625 | 2 |
src/calibrate_camera.py | kobeeraveendran/advanced-lane-finding | 0 | 12758447 | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import sys
import os
import pickle
def extract_points(images):
'''
args:
- images: list of strings containing the filenames of the calibration image set
returns:
- mtx: camera calibration matrix
- dist: distortion coefficients
'''
obj = np.zeros((6 * 9, 3), np.float32)
obj[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
obj_points = []
img_points = []
for filename in images:
image = cv2.imread(filename)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
if ret:
obj_points.append(obj)
img_points.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, gray.shape[::-1], None, None)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump(dist_pickle, open("dist_pickle.p", "wb"))
return mtx, dist
def camera_cal(image, mtx, dist, filename = None, save = False):
'''
args:
- filename: filename (paths also accepted)
- mtx: camera matrix from `extract_points()`, or loaded from saved file
- dist: distortion coefficients from `extract_points()`, or loaded from saved file
returns:
- dst: undistorted image
'''
if filename:
image = mpimg.imread(filename)
# undistort image
dst = cv2.undistort(image, mtx, dist, None, mtx)
# write to new image for checking purposes
if save:
split = filename.split('.')
new_filename = filename.split('.')[-2].split('/')[-1]
cv2.imwrite("../undistorted/{}_undist.{}".format(new_filename, split[-1]), dst)
return dst
if __name__ == "__main__":
if len(sys.argv) > 1:
# preferably a path without a trailing '/'
image_list = glob.glob(sys.argv[1] + "/*")
else:
image_list = glob.glob("../camera_cal/*")
mtx, dist = extract_points(image_list)
os.makedirs("../undistorted/", exist_ok = True)
dst = camera_cal("../camera_cal/calibration1.jpg", mtx, dist) | 2.890625 | 3 |
brambling/migrations/0009_set_stripe_metadata.py | j-po/django-brambling | 0 | 12758448 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.db import models, migrations
import stripe
def set_stripe_metadata(apps, schema_editor):
# Try to set remote metadata for all recorded transactions.
# But don't get too upset if it all fails ;)
Transaction = apps.get_model('brambling', 'Transaction')
transactions = Transaction.objects.select_related(
'order__event__organization'
).filter(
method='stripe',
)
for txn in transactions:
order = txn.order
event = order.event
organization = event.organization
if txn.api_type == 'test' and organization.stripe_test_access_token:
stripe.api_key = organization.stripe_test_access_token
elif txn.api_type == 'live' and organization.stripe_access_token:
stripe.api_key = organization.stripe_access_token
try:
if txn.transaction_type == 'purchase':
remote = stripe.Charge.retrieve(txn.remote_id)
elif txn.transaction_type == 'refund':
ch = stripe.Charge.retrieve(txn.related_transaction.remote_id)
remote = ch.refunds.retrieve(txn.remote_id)
else:
continue
remote.metadata = {
'order': order.code,
'event': event.name,
}
remote.save()
except stripe.InvalidRequestError, e:
warnings.warn("Updating metadata failed: {}".format(e.message))
class Migration(migrations.Migration):
dependencies = [
('brambling', '0008_auto_20150520_0253'),
]
operations = [
migrations.RunPython(set_stripe_metadata, lambda *a, **k: None),
]
| 1.6875 | 2 |
tests/test_recursive_builder.py | jiamo/polly_read | 1 | 12758449 | <gh_stars>1-10
from unittest import TestCase
from polly_read.ssml_builder import (
Paragraph, Sentence, Word
)
class TestPySSMLNestBuilder(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.maxDiff = None
def test_say(self):
p = Paragraph(
[Sentence([Word("I"), Word("am")]).add_ms_pause(1000).add_strength_pause("x-strong"),
Sentence([Word("You"), Word("are")]).add_ms_pause(1000)])
print(p.to_ssml())
def test_paragraph(self):
p = Paragraph.build_from_text("hello world")
print(p.text)
print(p.to_ssml())
| 3.09375 | 3 |
sdk/core/azure-common/tests/test_credentials.py | rsdoherty/azure-sdk-for-python | 2,728 | 12758450 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import time
try:
from unittest import mock
except ImportError:
import mock
import pytest
from azure.common.credentials import _CliCredentials
import azure.common.credentials
class MockCliCredentials:
def _token_retriever(self):
return "NOTUSED", "TOKEN", {'expiresIn': 42}
def signed_session(self, session=None):
return session
class MockCliProfile:
def __init__(self):
self.received_resource = None
def get_login_credentials(self, resource):
self.received_resource = resource
return MockCliCredentials(), "NOTUSED", "NOTUSED"
def test_cli_credentials_mgmt():
cli_profile = MockCliProfile()
cred = _CliCredentials(cli_profile, "http://resource.id")
# Mgmt scenario
session = cred.signed_session("session")
assert cli_profile.received_resource == "http://resource.id"
assert session == "session"
# Trying to mock azure-core not here
with mock.patch('azure.common.credentials._AccessToken', None):
# Should not crash
cred.signed_session("session")
def test_cli_credentials_accesstoken():
cli_profile = MockCliProfile()
cred = _CliCredentials(cli_profile, "http://resource.id")
# Track2 scenario
access_token = cred.get_token("http://resource.id/.default")
assert cli_profile.received_resource == "http://resource.id"
assert access_token.token == "TOKEN"
assert access_token.expires_on <= int(time.time() + 42)
access_token = cred.get_token("http://resource.newid")
assert cli_profile.received_resource == "http://resource.newid"
# Trying to mock azure-core not here
with mock.patch('azure.common.credentials._AccessToken', None):
with pytest.raises(ImportError):
cred.get_token("http://resource.yetid") | 1.984375 | 2 |