max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
h2client.py | TOKUJI/BlackBull | 1 | 12767851 | import asyncio
import httpx
from blackbull.logger import get_logger_set
logger, log = get_logger_set()
async def main():
async with httpx.AsyncClient(http2=True, verify=False) as c:
res = await c.get('https://localhost:8000/json', headers={'key': 'value'})
assert res.status_code == 200
assert res.content == b'{"a": "b"}'
if __name__ == '__main__':
asyncio.run(
asyncio.wait_for(
main(), timeout=0.5
)
)
| 2.625 | 3 |
starboard/commands/base.py | TrigonDev/Starboard | 4 | 12767852 | <gh_stars>1-10
# MIT License
#
# Copyright (c) 2022 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from typing import TYPE_CHECKING, cast
import crescent
import hikari
from starboard.config import CONFIG
if TYPE_CHECKING:
from starboard.bot import Bot
plugin = crescent.Plugin()
@plugin.include
@crescent.command(name="ping", description="Pong!")
async def ping_command(ctx: crescent.Context) -> None:
bot = cast("Bot", ctx.app)
guild = bot.cache.get_guild(ctx.guild_id) if ctx.guild_id else None
shard = guild.shard_id if guild else 0
await ctx.respond(
f"Pong! Cluster {bot.cluster.cluster_id}, shard {shard}, "
f"{bot.heartbeat_latency*1000:.0f} ms latency."
)
@plugin.include
@crescent.command(name="help", description="Get help with starboard")
async def help_command(ctx: crescent.Context) -> None:
bot = cast("Bot", ctx.app)
act = bot.rest.build_action_row()
if CONFIG.bot_invite:
(
act.add_button(hikari.ButtonStyle.LINK, CONFIG.bot_invite)
.set_label("Invite Starboard")
.add_to_container()
)
if CONFIG.support_invite:
(
act.add_button(hikari.ButtonStyle.LINK, CONFIG.support_invite)
.set_label("Get Support")
.add_to_container()
)
if CONFIG.docs_link:
(
act.add_button(hikari.ButtonStyle.LINK, CONFIG.docs_link)
.set_label("Documentation")
.add_to_container()
)
if CONFIG.source_link:
(
act.add_button(hikari.ButtonStyle.LINK, CONFIG.source_link)
.set_label("Source Code")
.add_to_container()
)
emb = bot.embed(
title="Starboard Help",
description=(
"Starboard is a Discord bot that lets you create starboard for "
"your server. A starboard is similar to channel pins, except that "
'people can "vote" to pin messages. A typical setup would be to '
"create a starboard named #starboard such that when a message "
"receives 3 :star: reactions, the message will be reposted to "
"that channel."
),
)
emb.add_field(
name="Useful Commands",
value=(
"`/starboards view`: View all your starboards\n"
"`/starboards create`: Create a starboard\n"
"`/starboards edit`: Change the settings for a starboard\n"
),
)
emb.add_field(
name="Features",
value=(
"Starboard's key (free) features are:\n"
" - Multiple starboards per server\n"
" - Custom avatar and username per starboard (via webhooks)\n"
" - Autostar channels\n"
" - Channel-specific settings for each starboard\n"
" - Advanced role permission system"
+ (
"\n\nTo see premium features, visit my "
f"**[Patreon]({CONFIG.patreon_link})**."
if CONFIG.patreon_link
else ""
)
),
)
await ctx.respond(
embed=emb, component=act if act.components else hikari.UNDEFINED
)
| 1.898438 | 2 |
bin/populate_db.py | ryanrdetzel/Mental-Cache | 1 | 12767853 | import pytc
import string
from random import choice
DBNAME="../mental_cache.hdb"
db = pytc.HDB()
db.open(DBNAME, pytc.HDBOWRITER | pytc.HDBOCREAT)
chars = string.letters.lower() + string.digits
x = 1
while x < 1000000:
page_name = ''.join([choice(chars) for i in xrange(8)])
db.put(page_name,'{"order": "","name": "Untitled","components": {},"last_id": 0}')
x = x+1
#print db.get('2')
| 2.578125 | 3 |
web/transiq/restapi/views/views_page.py | manibhushan05/transiq | 0 | 12767854 | from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.db.models import Q, Count
from django.http import HttpResponseRedirect
from django.utils.html import format_html
from rest_framework import status, viewsets
from rest_framework.renderers import TemplateHTMLRenderer, JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
# UPDATES PAGE VIEWS
from employee.models import Employee
from fileupload.models import ChequeFile
from restapi.helper_api import verify_pod_data, my_uploaded_pod_data, manual_booking_id_list, check_booking_status, \
get_booking_status_mapping_object
from restapi.models import BookingStatusesMapping, BookingStatusChain
from restapi.serializers.employee import EmployeeSerializer
from restapi.serializers.file_upload import ChequeFileSerializer
from restapi.serializers.team import InvoiceSerializer
from restapi.serializers.team import ManualBookingSerializer
from restapi.serializers.utils import IfscDetailSerializer
from restapi.service.booking import detailed_full_booking_page_data, \
detailed_commission_booking_page_data
from restapi.service.credit_debit_note import approve_credit_note_customer_data, approve_debit_note_customer_data, \
approve_credit_note_supplier_data, approve_debit_note_supplier_data, \
approve_credit_note_customer_direct_advance_data
from restapi.service.invoices import get_invoice_data, get_comment_list, get_amount_data, \
full_booking_invoice_data
from restapi.service.payments import pending_payments_data, pending_payment_adjustment_data
from restapi.service.trackvehicle import track_vehicles_data, track_vehicle_data
from restapi.utils import get_or_none
from sme.models import Sme
from team.models import LrNumber, ManualBooking, CreditNoteCustomer, CreditNoteSupplier, DebitNoteCustomer, \
DebitNoteSupplier, CreditNoteCustomerDirectAdvance, Invoice
from utils.models import VehicleCategory, IfscDetail
class DownloadPaymentFilePage(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request):
return Response(status=status.HTTP_200_OK, template_name='team/download_outward_payment_file.html')
class ManualBookingCreatePageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get_basic_full_booking(self, request):
return Response(template_name='team/booking/fetch_full_booking_data_page.html')
def get_confirm_booking(self, request):
return Response(template_name='team/booking/confirm_booking_page.html')
def get_detailed_full_booking(self, request):
json_data = {k: request.GET.get(k) for k in request.GET.keys()}
return Response(template_name='team/booking/full-booking.html', data=detailed_full_booking_page_data(json_data),
status=status.HTTP_200_OK)
def get_detailed_full_booking_mb_id_based(self, request, pk):
try:
manual_booking = ManualBooking.objects.get(id=pk)
except ManualBooking.DoesNotExist:
return Response({"status": "failure",
"msg": "ManualBooking Doesn't exists",
"status_code": status.HTTP_400_BAD_REQUEST,
"data": {}}, status=status.HTTP_400_BAD_REQUEST)
serializer = ManualBookingSerializer(instance=manual_booking)
return Response(template_name='team/booking/detailed_lr_generation.html',
data=serializer.data, status=status.HTTP_200_OK)
def get_basic_commission_booking(self, request):
return Response(template_name='team/booking/fetch-commission-booking-data.html')
def get_detailed_commission_booking(self, request):
data = request.GET
json_data = {k: data.get(k) for k in data.keys()}
return Response(template_name='team/booking/commission-booking.html',
data=detailed_commission_booking_page_data(json_data))
class OutwardPaymentListPageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request):
return Response(template_name='team/payments/outward_payment_history.html')
def get_payment_receipt(self, request):
return Response(template_name='team/payments/supplier_payment_receipt.html', status=status.HTTP_200_OK)
class EmployeeProfilePageView(viewsets.ViewSet):
renderer_classes = (JSONRenderer, TemplateHTMLRenderer)
def get(self, request):
emp = get_or_none(Employee, username=User.objects.get(username=request.user.username))
employee_serializer = EmployeeSerializer(instance=emp)
return Response(template_name='team/employee/emp-profile.html', data=employee_serializer.data,
status=status.HTTP_200_OK)
class ChangePasswordPageView(viewsets.ViewSet):
renderer_classes = (JSONRenderer, TemplateHTMLRenderer)
def get(self, request):
return Response(template_name='team/employee/change-password.html', status=status.HTTP_200_OK)
class InwardPaymentListPageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request):
return Response(template_name='team/payments/inward_payment_history.html')
class OutwardPaymentPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
return Response(status=status.HTTP_200_OK, template_name='team/payments/add_outward_payment.html')
class BookingStatusesMonitoringPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
return Response(status=status.HTTP_200_OK, template_name='team/monitoring/senior_mgmt_booking_status.html')
class TaskStatusesMonitoringPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
return Response(status=status.HTTP_200_OK, template_name='team/monitoring/senior_mgmt_task_status.html')
class PendingInwardPageView(viewsets.ViewSet):
renderer_classes = (JSONRenderer, TemplateHTMLRenderer,)
def get(self, request):
return Response(template_name='team/payments/add_received_payment.html')
def unadjusted_list(self, request):
return Response(template_name='team/payments/pending-payment-list.html', data={
'pending_payments': pending_payments_data(),
})
def payment_adjustment(self, request):
response = pending_payment_adjustment_data(data={
'accept_choice': request.GET.get('accept_choice'),
'payment_id': request.GET.get('payment_id'),
'customer': request.GET.get('customer'),
'tds': request.GET.get('tds'),
'username': request.user.username,
})
if response['status'] != 200:
return Response(status=response['status'], data={'msg': response['msg']})
return Response(template_name='team/payments/payment-adjustment-page.html', status=status.HTTP_200_OK,
data=response['data'])
class ChequePageView(viewsets.ViewSet):
renderer_classes = (JSONRenderer, TemplateHTMLRenderer,)
def create(self, request):
return Response(template_name='', status=status.HTTP_200_OK)
def uncredited_cheque_list(self, request):
cheques = ChequeFile.objects.filter(resolved=False).order_by('cheque_date').values(
'cheque_number', 'cheque_date', 'customer_name', 'amount', 'remarks').annotate(Count('cheque_number'))
data = []
for cheque in cheques:
cheque_number = cheque['cheque_number']
data.append({
'id': ','.join([str(row.id) for row in ChequeFile.objects.filter(cheque_number=cheque_number)]),
'cheque_number': cheque_number,
'cheque_date': cheque['cheque_date'],
'customer_name': cheque['customer_name'],
'amount': cheque['amount'],
'remarks': cheque['remarks'],
'images': [{'url': row.s3_upload.public_url(), 'filename': row.cheque_number, } for row in
ChequeFile.objects.filter(cheque_number=cheque_number)]
})
return Response(template_name='team/payments/uncredited-cheques.html', status=status.HTTP_200_OK,
data={'cheques': data})
class InvoicePageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def list(self, request):
return Response(template_name='team/invoices/invoice_list.html', status=status.HTTP_200_OK)
def summary(self, request):
return Response(template_name='team/invoices/invoice_summary_statement.html', status=status.HTTP_200_OK)
def fetch_full_booking_invoice(self, request):
return Response(template_name='team/invoices/fetch_full_booking_invoice_data.html', status=status.HTTP_200_OK)
def full_booking_invoice(self, request):
customer = get_or_none(Sme, id=request.GET.get('customer_to_be_billed'))
return Response(template_name='team/invoices/full_booking_invoices.html',
data=full_booking_invoice_data(customer=customer), status=status.HTTP_200_OK)
def fetch_commission_booking_invoice(self, request):
return Response(template_name='team/invoices/fetch-commission-invoice.html', status=status.HTTP_200_OK)
def commission_booking_invoice(self, request):
customer = get_or_none(Sme, id=request.GET.get('customer_to_be_billed'))
bookings = ManualBooking.objects.filter(id__in=request.GET.getlist('booking_id[]'))
if not bookings.exists() or not isinstance(customer, Sme):
return HttpResponseRedirect('/team/commission-invoice-data-page/')
invoice_data = get_invoice_data(bookings, 'commission')
comment_list = get_comment_list(bookings, invoice_data)
return Response(template_name='team/invoices/commission_booking_invoice.html', status=status.HTTP_200_OK,
data={'booking_data': invoice_data, 'customer': customer,
'gst_liability': bookings.last().gst_liability,
'booking_ids': ','.join(map(str, bookings.values_list('id', flat=True))),
'comment_list': comment_list,
'invoice_amount_data': get_amount_data(bookings=bookings, booking_type='full'),
})
class LrNumberPageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def list(self, request):
return Response(template_name='team/booking/download-lr.html', status=status.HTTP_200_OK)
class PODPageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def upload(self, request):
return Response(template_name='', status=status.HTTP_200_OK)
def list(self, request):
return Response(template_name='team/booking/pod-list.html', status=status.HTTP_200_OK)
def unverified_pod(self, request):
return Response(template_name='team/documents/verify_pod.html', data={'bookings_data': verify_pod_data()},
status=status.HTTP_200_OK)
def td_unverified_pod(self, request):
return Response(template_name='team/documents/td_verify_pod.html', data={'bookings_data': verify_pod_data()},
status=status.HTTP_200_OK)
def my_uploaded_pod(self, request):
return Response(template_name='team/documents/uploaded-pod.html',
data={'bookings_data': my_uploaded_pod_data(user=request.user)}, status=status.HTTP_200_OK)
class AccountingSummaryPageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get_placed_order_customer_summary(self, request):
return Response(template_name='team/accounting/placed-order-customer-summary.html', status=status.HTTP_200_OK)
def get_billed_customer_summary(self, request):
return Response(template_name='team/accounting/billed-customer-summary.html', status=status.HTTP_200_OK)
def get_supplier_summary(self, request):
return Response(template_name='team/accounting/supplier-summary.html', status=status.HTTP_200_OK)
def get_vehicle_summary(self, request):
return Response(template_name='team/accounting/vehicle-summary.html', status=status.HTTP_200_OK)
class BankAccountPageView(viewsets.ViewSet):
renderer_classes = (JSONRenderer, TemplateHTMLRenderer)
def fetch_ifsc(self, request):
return Response(template_name='team/registrations/fetch-bank-details-using-ifsc.html',
status=status.HTTP_200_OK)
def create(self, request):
ifsc = get_or_none(IfscDetail, ifsc_code__iexact=request.GET.get('fetch_ifsc'))
if isinstance(ifsc, IfscDetail):
data = IfscDetailSerializer(ifsc).data
else:
data = {}
return Response(template_name='team/registrations/register_beneficiary_bank_account.html',
status=status.HTTP_200_OK, data=data)
def list(self, request):
return Response(template_name='team/payments/beneficiary_list.html', status=status.HTTP_200_OK)
class TrackVehiclePageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def track_vehicles(self, request):
return Response(template_name='team/track/track_vehicles.html', status=status.HTTP_200_OK,
data=track_vehicles_data())
def track_vehicle(self, request):
return Response(template_name='team/track/track_individual_vehicle.html', status=status.HTTP_200_OK,
data=track_vehicle_data(device_id=request.GET.get('gps_log_id')))
# FILE UPLOAD
class PODUploadPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
lr_numbers = LrNumber.objects.filter(Q(datetime__date__gte=datetime.now().date() - timedelta(days=180)) & (
Q(booking__pod_status='pending') | Q(booking__pod_status='rejected') | Q(
booking__pod_status='unverified'))).order_by('-datetime').values(
'id', 'lr_number')
bookings = []
for booking in ManualBooking.objects.filter(
(Q(pod_status__iexact='pending') | Q(pod_status__iexact='rejected')) & (
Q(booking_id__istartswith='BROKER') | Q(booking_id__istartswith='AB'))).exclude(
Q(booking_status='cancelled') | Q(deleted=True)):
bookings.append({'booking_id': booking.booking_id})
return Response({'lr_numbers': lr_numbers, 'bookings': bookings}, template_name='fileupload/pod_upload.html',
status=status.HTTP_200_OK)
class ChequeFilePageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
cheques = ChequeFile.objects.filter(resolved=False).exclude(deleted=True).order_by('-cheque_date')
cheques_serializer = ChequeFileSerializer(cheques, many=True)
return Response({"data": cheques_serializer.data}, status=status.HTTP_200_OK,
template_name="team/payments/uncredited-cheques.html")
class ManualBookingListPage(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get_partial_booking(self, request):
return Response(status=status.HTTP_200_OK, template_name='team/booking/partial_booking.html')
def get_full_booking(self, request):
return Response(status=status.HTTP_200_OK, template_name='team/booking/booking-archive.html')
def get_generate_lr(self, request):
return Response(status=status.HTTP_200_OK, template_name='team/booking/booking_status_loaded.html')
def get_bookings_pay_advance(self, request):
return Response(status=status.HTTP_200_OK, template_name='team/booking/bookings_pay_advance.html')
class BookingMISPage(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request):
return Response(template_name='team/booking/mis-booking.html', status=status.HTTP_200_OK)
class UpdateContractBookingPage(viewsets.ViewSet):
renderer_classes = (JSONRenderer, TemplateHTMLRenderer,)
def get(self, request):
bookings = ManualBooking.objects.filter(Q(total_amount_to_company=0)).filter(billing_type='contract').exclude(
Q(deleted=True) | Q(booking_status='cancelled'))
data = []
for booking in bookings:
data.append({
'id': booking.id,
'booking_id': booking.booking_id,
'shipment_date': booking.shipment_date.strftime('%d-%b-%Y') if booking.shipment_date else '',
'lr_numbers': '\n'.join(booking.lr_numbers.values_list('lr_number', flat=True)),
'customer_name': booking.company.get_name() if booking.company else '',
'origin': booking.from_city,
'destination': booking.to_city,
'weight': booking.charged_weight,
'rate_id': '{}_{}'.format('rate', booking.booking_id),
'amount_id': '{}_{}'.format('amount', booking.booking_id)
})
return Response(template_name='team/booking/update-contract-bookings-rate.html', status=status.HTTP_200_OK,
data={'bookings': data, 'id': ','.join(map(str, bookings.values_list('id', flat=True)))})
# UPDATE PAGE VIEWs
class PayBalanceBookingHistoryPage(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request):
return Response(template_name='team/payments/pay_balance_booking_history.html', status=status.HTTP_200_OK)
class RaiseInvoiceBookingHistoryPage(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request):
return Response(template_name='team/invoices/raise_invoice_booking_history.html', status=status.HTTP_200_OK)
class UploadInvoiceSentReceiptPage(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request):
booking_ids = manual_booking_id_list(user=request.user)
invoice_raised_bookings = BookingStatusesMapping.objects.filter(
booking_status_chain__booking_status__status__iexact='invoice_raised').exclude(
Q(deleted=True) | Q(booking_stage='reverted')). \
values_list('manual_booking_id', flat=True)
party_invoice_sent_bookings = BookingStatusesMapping.objects.filter(
booking_status_chain__booking_status__status__iexact='party_invoice_sent').exclude(
Q(deleted=True) | Q(booking_stage='reverted')). \
values_list('manual_booking_id', flat=True)
invoice_not_sent_bookings = [x for x in invoice_raised_bookings if x not in party_invoice_sent_bookings]
bookings = ManualBooking.objects.filter(id__in=booking_ids).filter(id__in=invoice_not_sent_bookings). \
filter(invoice_status='invoice_raised').exclude(billing_type='contract')
invoices = Invoice.objects.filter(bookings__in=bookings,
date__gte=datetime.now().date() - timedelta(days=365)).distinct()
# invoices = Invoice.objects.filter(date__gte=datetime.now() - timedelta(days=3)).exclude(deleted=True)
serializer = InvoiceSerializer(instance=invoices, many=True)
return Response(template_name='team/invoices/invoice_sent_receipt.html', status=status.HTTP_200_OK,
data={'data': serializer.data})
class ConfirmInvoiceSentPage(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request):
booking_ids = manual_booking_id_list(user=request.user)
party_invoice_sent_bookings = BookingStatusesMapping.objects.filter(
booking_status_chain__booking_status__status__iexact='party_invoice_sent').exclude(
Q(deleted=True) | Q(booking_stage='reverted')). \
values_list('manual_booking_id', flat=True)
invoice_confirmed_bookings = BookingStatusesMapping.objects.filter(
booking_status_chain__booking_status__status__iexact='invoice_confirmed').exclude(
Q(deleted=True) | Q(booking_stage='reverted')). \
values_list('manual_booking_id', flat=True)
invoice_not_confirmed_bookings = [x for x in party_invoice_sent_bookings if x not in invoice_confirmed_bookings]
bookings = ManualBooking.objects.filter(id__in=booking_ids).filter(id__in=invoice_not_confirmed_bookings). \
filter(invoice_status='invoice_sent').exclude(billing_type='contract')
invoices = Invoice.objects.filter(bookings__in=bookings,
date__gte=datetime.now().date() - timedelta(days=365)).distinct()
# invoices = Invoice.objects.filter(date__gte=datetime.now() - timedelta(days=3)).exclude(deleted=True)
serializer = InvoiceSerializer(instance=invoices, many=True)
data = self.add_booking_status_mapping_info(serializer.data)
return Response(template_name='team/invoices/confirm_sent_invoice.html', status=status.HTTP_200_OK,
data={'data': data})
def add_booking_status_mapping_info(self, data):
for inv in data:
inv['invoice_booking_details'] = []
inv_bookings = Invoice.objects.get(id=inv['id']).bookings.all()
for booking in inv_bookings:
bsm_details = {}
booking_invoice_confirmed = check_booking_status(booking, 'party_invoice_sent')
booking_status_mapping_id = None
booking_status_chain_id = None
booking_status_mapping_booking_stage = None
if booking_invoice_confirmed:
booking_status_mapping_object = get_booking_status_mapping_object(booking, 'party_invoice_sent')
try:
booking_status_chain_id = BookingStatusChain.objects.get(
booking_status__status='party_invoice_sent').id
except BookingStatusChain.DoesNotExist:
booking_status_chain_id = None
if booking_status_mapping_object:
booking_status_mapping_id = booking_status_mapping_object.id
booking_status_mapping_booking_stage = booking_status_mapping_object.booking_stage
bsm_details['booking_id'] = booking.id
bsm_details['booking_status_mapping_id'] = booking_status_mapping_id
bsm_details['booking_status_chain_id'] = booking_status_chain_id
bsm_details['booking_status_mapping_booking_stage'] = booking_status_mapping_booking_stage
inv['invoice_booking_details'].append(bsm_details)
return data
class ProcessPaymentEnetPage(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request):
return Response(template_name='team/payments/process_payment_page.html', status=status.HTTP_200_OK)
class ReconcilePaymentPage(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
return Response(template_name='team/payments/reconcile_payment_page.html', status=status.HTTP_200_OK)
class OwnerListPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/registrations/owner_list.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class OwnerVehicleListPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/registrations/vehicle-list.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class SmeListPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/registrations/customer-archive.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class SupplierListPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/registrations/supplier-list.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class DriverListPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/registrations/driver-list-page.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
# REGISTER PAGE VIEWS
class VehicleRegisterPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
vehicle_categories = [
{'id': vehicle_category.id, 'vehicle_type': vehicle_category.vehicle_type,
'capacity': vehicle_category.capacity}
for vehicle_category in VehicleCategory.objects.all()
]
body_type_choices = (
('open', 'Open'),
('closed', 'Closed'),
('semi', 'Semi'),
('half', 'Half'),
('containerized', 'Containerized'),
)
gps_enable_choices = (
('yes', 'Yes'),
('no', 'No')
)
return Response({
'vehicle_categories': vehicle_categories,
'body_type_choices': body_type_choices,
'gps_enable_choices': gps_enable_choices
}, template_name='team/registrations/register_vehicle.html', status=status.HTTP_200_OK)
class OwnerRegisterPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/registrations/register_owner.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class SmeRegisterPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/registrations/register-customer.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class SupplierRegisterPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/registrations/register-supplier.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class DriverRegisterPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/registrations/register-driver.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
# CREDIT DEBIT NOTE
class IssueCreditDebitNotePageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/credit_debit_note/issue/issue-credit-debit-note.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class IssueCreditNoteCustomerPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/credit_debit_note/issue/issue_cnc.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class IssueCreditNoteSupplierPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/credit_debit_note/issue/issue_cns.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class IssueDebitNoteCustomerPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/credit_debit_note/issue/issue_dnc.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class IssueDebitNoteSupplierPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/credit_debit_note/issue/issue_dns.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class IssueCreditNoteCustomerDirectAdvancePageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/credit_debit_note/issue/issue_cnca.html'
def get(self, request):
return Response(status=status.HTTP_200_OK)
class ApproveCreditDebitNotePageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/credit_debit_note/approve/approve_credit_debit_note_page.html'
def get(self, request):
return Response(status=status.HTTP_200_OK, data={
'cnc': approve_credit_note_customer_data(),
'dnc': approve_debit_note_customer_data(),
'cns': approve_credit_note_supplier_data(),
'dns': approve_debit_note_supplier_data(),
'cnca': approve_credit_note_customer_direct_advance_data(),
})
class ApproveCreditNoteCustomerPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
data = []
for row in CreditNoteCustomer.objects.filter(status='pending').exclude(deleted=True).order_by('created_on'):
data.append({
'cnc_id': row.id,
'customer': row.customer.get_name() if row.customer else '-',
'bookings': '\n'.join(
[format_html('''<a href="/team/booking-edit/?booking_id={}">{}</a>''', booking.id,
booking.booking_id)
for booking in row.bookings.all()]),
'invoice': row.invoice.invoice_number if row.invoice else '-',
'amount': row.credit_amount,
'created_on': row.created_on.strftime('%d-%b-%Y') if row.created_on else '-',
'credit_note_number': row.credit_note_number,
'created_by': row.created_by.username if row.created_by else '-',
'credit_note_reason': row.reason.name if row.reason else '-',
'remarks': row.remarks,
'approve_cnc_form': 'approve_cnc_form_{}'.format(row.id),
'approve_cnc_btn': 'approve_cnc_btn_{}'.format(row.id),
'reject_cnc_btn': 'reject_cnc_btn_{}'.format(row.id),
'input_reject_cnc_remarks': 'input_reject_cnc_remarks_{}'.format(row.id),
'btn_status': 'btn_status_{}'.format(row.id),
'div_rejection_remarks': 'div_rejection_remarks_{}'.format(row.id),
'div_rejection_line': 'div_rejection_line_{}'.format(row.id),
})
return Response({'data': data}, template_name='team/credit_debit_note/approve/cnc.html',
status=status.HTTP_200_OK)
class ApproveCreditNoteSupplierPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
data = []
for row in CreditNoteSupplier.objects.filter(status='pending').exclude(deleted=True).order_by('created_on'):
data.append({
'cnc_id': row.id,
'broker': row.broker.get_name() if row.broker else '-',
'bookings': '\n'.join(
[format_html('''<a href="/team/booking-edit/?booking_id={}">{}</a>''', booking.id,
booking.booking_id)
for booking in row.bookings.all()]),
'invoice': row.invoice.invoice_number if row.invoice else '-',
'amount': row.credit_amount,
'created_on': row.created_on.strftime('%d-%b-%Y') if row.created_on else '-',
'credit_note_number': row.credit_note_number,
'created_by': row.created_by.username if row.created_by else '-',
'credit_note_reason': row.reason.name if row.reason else '-',
'remarks': row.remarks,
'approve_cns_form': 'approve_cns_form_{}'.format(row.id),
'approve_cns_btn': 'approve_cns_btn_{}'.format(row.id),
'reject_cns_btn': 'reject_cns_btn_{}'.format(row.id),
'input_reject_cns_remarks': 'input_reject_cns_remarks_{}'.format(row.id),
'btn_status': 'btn_status_{}'.format(row.id),
'div_rejection_remarks': 'div_rejection_remarks_{}'.format(row.id),
'div_rejection_line': 'div_rejection_line_{}'.format(row.id),
})
return Response({'data': data}, template_name='team/credit_debit_note/approve/cns.html',
status=status.HTTP_200_OK)
class ApproveDebitNoteCustomerPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
def get(self, request):
data = []
for row in DebitNoteCustomer.objects.filter(status='pending').exclude(deleted=True).order_by('created_on'):
data.append({
'dnc_id': row.id,
'customer': row.customer.get_name() if row.customer else '-',
'bookings': '\n'.join(
[format_html('''<a href="/team/booking-edit/?booking_id={}">{}</a>''', booking.id,
booking.booking_id)
for booking in row.bookings.all()]),
'invoice': row.invoice.invoice_number if row.invoice else '-',
'amount': row.debit_amount,
'created_on': row.created_on.strftime('%d-%b-%Y') if row.created_on else '-',
'debit_note_number': row.debit_note_number,
'created_by': row.created_by.username if row.created_by else '-',
'debit_note_reason': row.reason.name if row.reason else '-',
'remarks': row.remarks,
'approve_dnc_form': 'approve_dnc_form_{}'.format(row.id),
'approve_dnc_btn': 'approve_dnc_btn_{}'.format(row.id),
'reject_dnc_btn': 'reject_dnc_btn_{}'.format(row.id),
'input_reject_dnc_remarks': 'input_reject_dnc_remarks_{}'.format(row.id),
'btn_status': 'btn_status_{}'.format(row.id),
'div_rejection_remarks': 'div_rejection_remarks_{}'.format(row.id),
'div_rejection_line': 'div_rejection_line_{}'.format(row.id),
})
return Response({'data': data}, template_name='team/credit_debit_note/approve/dnc.html',
status=status.HTTP_200_OK)
class ApproveDebitNoteSupplierPageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/credit_debit_note/issue/issue_dns.html'
def get(self, request):
data = []
for row in DebitNoteSupplier.objects.filter(status='pending').exclude(deleted=True).order_by('created_on'):
data.append({
'cnc_id': row.id,
'broker': row.broker.get_name() if row.broker else '-',
'bookings': '\n'.join(
[format_html('''<a href="/team/booking-edit/?booking_id={}">{}</a>''', booking.id,
booking.booking_id)
for booking in row.bookings.all()]),
'invoice': row.invoice.invoice_number if row.invoice else '-',
'amount': row.debit_amount,
'created_on': row.created_on.strftime('%d-%b-%Y') if row.created_on else '-',
'credit_note_number': row.debit_note_number,
'created_by': row.created_by.username if row.created_by else '-',
'credit_note_reason': row.reason.name if row.reason else '-',
'remarks': row.remarks,
'approve_dns_form': 'approve_dns_form_{}'.format(row.id),
'approve_dns_btn': 'approve_dns_btn_{}'.format(row.id),
'reject_dns_btn': 'reject_dns_btn_{}'.format(row.id),
'input_reject_dns_remarks': 'input_reject_dns_remarks_{}'.format(row.id),
'btn_status': 'btn_status_{}'.format(row.id),
'div_rejection_remarks': 'div_rejection_remarks_{}'.format(row.id),
'div_rejection_line': 'div_rejection_line_{}'.format(row.id),
})
return Response({'data': data}, template_name='team/credit_debit_note/approve/dns.html',
status=status.HTTP_200_OK)
class ApproveCreditNoteCustomerDirectAdvancePageView(APIView):
renderer_classes = (TemplateHTMLRenderer, JSONRenderer)
template_name = 'team/credit_debit_note/issue/issue_cnca.html'
def get(self, request):
data = []
for row in CreditNoteCustomerDirectAdvance.objects.filter(status='pending').exclude(deleted=True).order_by(
'created_on'):
data.append({
'cnc_id': row.id,
'broker': row.broker.get_name() if row.broker else '-',
'customer': row.customer.get_name() if row.customer else '-',
'bookings': '\n'.join(
[format_html('''<a href="/team/booking-edit/?booking_id={}">{}</a>''', booking.id,
booking.booking_id)
for booking in row.bookings.all()]),
'invoice': row.invoice.invoice_number if row.invoice else '-',
'amount': row.credit_amount,
'created_on': row.created_on.strftime('%d-%b-%Y') if row.created_on else '-',
'credit_note_number': row.credit_note_number,
'created_by': row.created_by.username if row.created_by else '-',
'credit_note_reason': row.reason.name if row.reason else '-',
'remarks': row.remarks,
'approve_cnca_form': 'approve_cnca_form_{}'.format(row.id),
'approve_cnca_btn': 'approve_cnca_btn_{}'.format(row.id),
'reject_cnca_btn': 'reject_cnca_btn_{}'.format(row.id),
'input_reject_cnca_remarks': 'input_reject_cnca_remarks_{}'.format(row.id),
'btn_status': 'btn_status_{}'.format(row.id),
'div_rejection_remarks': 'div_rejection_remarks_{}'.format(row.id),
'div_rejection_line': 'div_rejection_line_{}'.format(row.id),
})
return Response({'data': data}, template_name='team/credit_debit_note/approve/cnca.html',
status=status.HTTP_200_OK)
class MobilePageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def dashboard(self, request):
return Response(template_name='mobile/dashboard.html', status=status.HTTP_200_OK)
class DocumentUploadPageView(viewsets.ViewSet):
renderer_classes = (TemplateHTMLRenderer,)
def pod(self, request):
lr_numbers = LrNumber.objects.filter(Q(datetime__date__gte=datetime.now().date() - timedelta(days=180)) & (
Q(booking__pod_status='pending') | Q(booking__pod_status='rejected') | Q(
booking__pod_status='unverified'))).order_by('-datetime').values(
'id', 'lr_number')
bookings = []
for booking in ManualBooking.objects.filter(
(Q(pod_status__iexact='pending') | Q(pod_status__iexact='rejected')) & (
Q(booking_id__istartswith='BROKER') | Q(booking_id__istartswith='AB'))).exclude(
Q(booking_status='cancelled') | Q(deleted=True)):
bookings.append({'booking_id': booking.booking_id})
return Response(template_name='fileupload/pod_upload.html', status=status.HTTP_200_OK)
def vehicle(self, request):
return Response(template_name='fileupload/upload_vehicle_documents.html', status=status.HTTP_200_OK)
def supplier(self, request):
return Response(template_name='fileupload/upload_supplier_documents.html', status=status.HTTP_200_OK)
def weighing_slip(self, request):
return Response(template_name='fileupload/weighing_slip_upload.html', status=status.HTTP_200_OK)
def owner(self, request):
return Response(template_name='fileupload/upload_owner_documents.html', status=status.HTTP_200_OK)
def driver(self, request):
return Response(template_name='fileupload/upload_driver_documents.html', status=status.HTTP_200_OK)
def cheque(self, request):
return Response(template_name='fileupload/upload_cheque.html', status=status.HTTP_200_OK)
def invoice_receipt(self, request):
return Response(template_name='fileupload/invoice_receipt.html', status=status.HTTP_200_OK)
| 1.476563 | 1 |
Chapter 8/test_2.py | PacktPublishing/Mastering-IPython-4 | 22 | 12767855 | from hail2 import f
class TestHailStones():
def test_f(self):
ans = [0, 0, 1, 7, 2, 5, 8, 16, 3, 19, 6]
for i in range(1, 11):
print(i)
assert f(i) == ans[i]
| 2.734375 | 3 |
base_plugin_manager/__init__.py | LeiQiao/Parasite-Plugins | 0 | 12767856 | from .plugin_manager_plugin import PluginManagerPlugin
from . import plugin_manager_api
| 1.117188 | 1 |
main.py | JNPRAutomate/BGP_Flowspec_automation_with_PyEZ | 13 | 12767857 | #
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
#
# Copyright (c) 2018 Juniper Networks, Inc.
# All rights reserved.
#
# Use is subject to license terms.
#
# Licensed under the Apache License, Version 2.0 (the ?License?); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import cherrypy
import hashlib
import datetime
import yaml
import re
from jinja2 import Environment, FileSystemLoader
from jnpr.junos.utils.config import Config
from jnpr.junos import Device
from jnpr.junos.exception import ConfigLoadError, CommitError
from data.fr import FlowRoutesTable, FlowFilterTable
class MyDev(object):
def __init__(self):
self.dev_user = None
self.dev_pw = None
self.age_out_interval = None
self.flow_active = dict()
self.flow_config = dict()
self.filter_active = dict()
self.routers = list()
def addNewFlowRoute(self, flowRouteData=None):
env = Environment(autoescape=False,
loader=FileSystemLoader('./template'), trim_blocks=False, lstrip_blocks=False)
template = env.get_template('set-flow-route.conf')
# print template.render(flowRouteData)
my_router = None
for router in self.routers:
for name, value in router.iteritems():
if 'rr' in value['type']:
my_router = [value['ip']]
with Device(host=my_router[0], user=self.dev_user, password=self.dev_pw) as dev:
try:
cu = Config(dev)
cu.lock()
cu.load(template_path='template/set-flow-route.conf', template_vars=flowRouteData, merge=True)
cu.commit()
cu.unlock()
except ConfigLoadError as cle:
return False, cle.message
self.flow_config[flowRouteData['flowRouteName']] = {
'dstPrefix': flowRouteData['dstPrefix'] if 'dstPrefix' in flowRouteData else None,
'srcPrefix': flowRouteData['srcPrefix'] if 'srcPrefix' in flowRouteData else None,
'protocol': flowRouteData['protocol'] if 'protocol' in flowRouteData else None,
'dstPort': flowRouteData['dstPort'] if 'dstPort' in flowRouteData else None,
'srcPort': flowRouteData['srcPort'] if 'srcPort' in flowRouteData else None,
'action': flowRouteData['action']}
return True, 'Successfully added new flow route'
def modFlowRoute(self, flowRouteData=None):
my_router = None
for router in self.routers:
for name, value in router.iteritems():
if 'rr' in value['type']:
my_router = [value['ip']]
with Device(host=my_router[0], user=self.dev_user, password=<PASSWORD>) as dev:
try:
cu = Config(dev)
cu.lock()
cu.load(template_path='template/mod-flow-route.conf', template_vars=flowRouteData)
cu.commit()
cu.unlock()
except CommitError as ce:
return False, ce.message
self.flow_config[flowRouteData['flowRouteName']] = {
'dstPrefix': flowRouteData['dstPrefix'] if 'dstPrefix' in flowRouteData else None,
'srcPrefix': flowRouteData['srcPrefix'] if 'srcPrefix' in flowRouteData else None,
'protocol': flowRouteData['protocol'] if 'protocol' in flowRouteData else None,
'dstPort': flowRouteData['dstPort'] if 'dstPort' in flowRouteData else None,
'srcPort': flowRouteData['srcPort'] if 'srcPort' in flowRouteData else None,
'action': flowRouteData['action']}
return True, 'Successfully modified flow route'
def delFlowRoute(self, flowRouteData=None):
my_router = None
for router in self.routers:
for name, value in router.iteritems():
if 'rr' in value['type']:
my_router = [value['ip']]
with Device(host=my_router[0], user=self.dev_user, password=self.dev_pw) as dev:
try:
cu = Config(dev)
cu.lock()
cu.load(template_path='template/delete-flow-route.conf', template_vars=flowRouteData, merge=True)
cu.commit()
cu.unlock()
except ConfigLoadError as cle:
return False, cle.message
self.flow_config.pop(flowRouteData['flowRouteName'], None)
return True, 'Sucessfully deleted flow route'
def getActiveFlowRoutes(self):
t = datetime.datetime.strptime(self.age_out_interval, "%H:%M:%S")
self.flow_active = dict()
for router in self.routers:
for name, value in router.iteritems():
with Device(host=value['ip'], user=self.dev_user, password=<PASSWORD>) as dev:
# data = dev.rpc.get_config(filter_xml='routing-options/flow/route/name')
frt = FlowRoutesTable(dev)
frt.get()
for flow in frt:
destination = flow.destination.split(',')
for index, item in enumerate(destination):
_item = item.split('=')
destination[index] = _item[1] if len(_item) > 1 else _item[0]
hash_object = hashlib.sha512(b'{0}{1}'.format(str(destination), str(value['ip'])))
hex_dig = hash_object.hexdigest()
_age = dict()
if len(flow.age) <= 2:
_age['current'] = datetime.timedelta(seconds=int(flow.age))
elif len(flow.age) == 4 or len(flow.age) == 5:
ms = flow.age.split(':')
_age['current'] = datetime.timedelta(minutes=int(ms[0]), seconds=int(ms[1]))
elif len(flow.age) == 7 or len(flow.age) == 8:
ms = flow.age.split(':')
_age['current'] = datetime.timedelta(hours=int(ms[0]), minutes=int(ms[1]),
seconds=int(ms[2]))
else:
pattern = r'(.*)\s(.*?):(.*?):(.*)'
regex = re.compile(pattern)
age = re.findall(regex, flow.age)
_age['current'] = datetime.timedelta(days=int(age[0][0][:-1]), hours=int(age[0][1]),
minutes=int(age[0][2]), seconds=int(age[0][3]))
pattern = r'([^\s]+)'
regex = re.compile(pattern)
_krt_actions = re.findall(regex, flow.tsi)
if len(_krt_actions) <= 4:
krt_actions = _krt_actions
else:
krt_actions = _krt_actions[4]
# Junos 14.1RX different XPATH for BGP communities
version = dev.facts['version'].split('R')[0].split('.')
if int(version[0]) <= 14 and int(version[1]) <= 1:
if isinstance(flow.action_141, str):
if 'traffic-action' in flow.action_141:
commAction = flow.action_141.split(":")[1].lstrip().strip()
else:
commAction = flow.action_141
elif isinstance(flow.action_141, list):
commAction = flow.action_141[1].split(':')[1].lstrip().strip()
else:
commAction = flow.action_141
else:
if isinstance(flow.action, str):
if 'traffic-action' in flow.action:
commAction = flow.action.split(":")[1].lstrip().strip()
else:
commAction = flow.action
elif isinstance(flow.action, list):
commAction = flow.action[1].split(':')[1].lstrip().strip()
else:
commAction = flow.action
if hex_dig not in self.flow_active:
self.flow_active[hex_dig] = {'router': name, 'term': flow.term, 'destination': destination,
'commAction': commAction, 'krtAction': krt_actions,
'age': str(_age['current']),
'hash': hex_dig, 'status': 'new'}
else:
if 'term:N/A' in flow['term']:
self.flow_active.pop(hex_dig, None)
if _age['current']:
if _age['current'] > datetime.timedelta(hours=t.hour, minutes=t.minute,
seconds=t.second):
self.flow_active[hex_dig]['status'] = 'old'
try:
if hex_dig in self.flow_active:
self.flow_active[hex_dig].update({'term': flow.term, 'destination': destination,
'commAction': commAction,
'krtAction': krt_actions,
'age': str(_age['current'])})
except KeyError as ke:
return False, ke.message
return True, self.flow_active
def getActiveFlowRouteFilter(self):
if self.routers:
for router in self.routers:
for name, value in router.iteritems():
self.filter_active[name] = list()
with Device(host=value['ip'], user=self.dev_user, password=self.dev_pw) as dev:
frft = FlowFilterTable(dev)
frft.get()
for filter in frft:
data = filter.name.split(',')
for didx, item in enumerate(data):
_item = item.split('=')
data[didx] = _item[1] if len(_item) > 1 else _item[0]
self.filter_active[name].append({'data': data, 'packet_count': filter.packet_count,
'byte_count': filter.byte_count})
return True, self.filter_active
def loadFlowRouteConfig(self):
dev_ip = list()
for router in self.routers:
for name, value in router.iteritems():
if 'rr' in value['type']:
dev_ip.append(value['ip'])
with Device(host=dev_ip[0], user=self.dev_user, password=self.dev_pw, normalize=True) as dev:
version = dev.facts['version'].split('R')[0].split('.')
# Junos 14.1RX does not support json so let's go with XML here
if int(version[0]) <= 14 and int(version[1]) <= 1:
data = dev.rpc.get_config(options={'format': 'xml'}, filter_xml='routing-options/flow')
for route in data.iter('route'):
my_list = list()
for item in route:
if 'name' in item.tag:
my_list.append(item.text)
self.flow_config[item.text] = {}
elif 'match' in item.tag:
tag = None
for child in item.iterchildren():
if 'destination-port' in child.tag:
tag = 'dstPort'
elif 'source-port' in child.tag:
tag = 'srcPort'
elif 'destination' in child.tag:
tag = 'dstPrefix'
elif 'source' in child.tag:
tag = 'srcPrefix'
elif 'protocol' in child.tag:
tag = 'protocol'
self.flow_config[my_list[0]][tag] = child.text
elif 'then' in item.tag:
_action = dict()
for child in item.iterchildren():
for value in child.iter():
_action[child.tag] = {'value': value.text}
self.flow_config[my_list[0]]['action'] = _action
return True, self.flow_config
else:
data = dev.rpc.get_config(options={'format': 'json'})
if 'route' in data['configuration']['routing-options']['flow']:
for route in data['configuration']['routing-options']['flow']['route']:
_action = dict()
for key, value in route['then'].iteritems():
if value[0]:
_action[key] = {'value': value}
else:
_action[key] = {'value': None}
self.flow_config[route['name']] = {
'dstPrefix': route['match']['destination'] if 'destination' in route['match'] else None,
'srcPrefix': route['match']['source'] if 'source' in route['match'] else None,
'protocol': route['match']['protocol'] if 'protocol' in route['match'] else None,
'dstPort': route['match']['destination-port'] if 'destination-port' in route[
'match'] else None,
'srcPort': route['match']['source-port'] if 'source-port' in route['match'] else None,
'action': _action}
return True, self.flow_config
else:
return False, self.flow_config
def save_settings(self, dev_user=None, dev_pw=None, routers=None, age_out_interval=None):
self.dev_user = dev_user
self.dev_pw = dev_pw
self.age_out_interval = age_out_interval
# self.routers = routers
# with open('ui/config.yml', 'w') as fp:
# config = {'dev_user': self.dev_user, 'dev_pw': self.dev_pw, 'routers': self.routers,
# 'age_out_interval': self.age_out_interval}
# yaml.safe_dump(config, fp, default_flow_style=False)
def load_settings(self):
with open('ui/config.yml', 'r') as fp:
_config = fp.read()
config = yaml.safe_load(_config)
self.dev_user = config['dev_user']
self.dev_pw = config['dev_pw']
self.age_out_interval = config['age_out_interval']
self.routers = config['routers']
class BGPFlow(object):
@cherrypy.expose
def index(self):
return open('ui/index.html', 'r')
@cherrypy.expose
class BGPFlowWS(object):
def __init__(self, my_dev=None):
self.my_dev = my_dev
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def GET(self, action=None):
if action == 'active':
data = self.my_dev.getActiveFlowRoutes()
return data
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def POST(self, action=None):
if action == 'add':
input_json = cherrypy.request.json
resp = self.my_dev.addNewFlowRoute(flowRouteData=input_json)
return resp
elif action == 'mod':
input_json = cherrypy.request.json
resp = self.my_dev.modFlowRoute(flowRouteData=input_json)
return resp
elif action == 'del':
input_json = cherrypy.request.json
resp = self.my_dev.delFlowRoute(flowRouteData=input_json)
return resp
elif action == 'save':
input_json = cherrypy.request.json
self.my_dev.save_settings(dev_user=input_json['user'], dev_pw=input_json['password'],
age_out_interval=input_json['age_out_interval'])
return True, 'Successfully saved configuration settings'
else:
return False, 'Action not defined'
@cherrypy.expose
class Frt(object):
def __init__(self, my_dev=None):
self.my_dev = my_dev
@cherrypy.tools.json_out()
def POST(self):
resp = self.my_dev.getActiveFlowRoutes()
return resp
@cherrypy.expose
class Frtc(object):
def __init__(self, my_dev=None):
self.my_dev = my_dev
@cherrypy.tools.json_out()
def POST(self):
resp = self.my_dev.loadFlowRouteConfig()
return resp
@cherrypy.expose
class Frft(object):
def __init__(self, my_dev=None):
self.my_dev = my_dev
@cherrypy.tools.json_out()
def POST(self):
resp = self.my_dev.getActiveFlowRouteFilter()
return resp
if __name__ == '__main__':
cherrypy.config.update({'log.screen': False,
'log.access_file': '',
'log.error_file': ''})
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd()),
'tools.staticdir.on': True,
'tools.staticdir.dir': 'ui'
},
'/api': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
},
'/api/frt': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
},
'/api/frct': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
},
'/api/frft': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
},
}
my_dev = MyDev()
my_dev.load_settings()
webapp = BGPFlow()
webapp.api = BGPFlowWS(my_dev=my_dev)
webapp.api.frt = Frt(my_dev=my_dev)
webapp.api.frct = Frtc(my_dev=my_dev)
webapp.api.frft = Frft(my_dev=my_dev)
cherrypy.config.update({'log.screen': False,
'server.socket_host': '0.0.0.0',
'server.socket_port': 8080,
})
cherrypy.quickstart(webapp, '/', conf)
| 1.742188 | 2 |
systori/lib/templatetags/amount.py | systori/systori | 12 | 12767858 | from math import floor, ceil
from decimal import Decimal
from django import template
from django.utils.safestring import mark_safe
from .customformatting import ubrdecimal
register = template.Library()
def _make_context(
context,
css,
obj,
field,
bold="gross",
has_form=False,
comment=None,
select_if_equal=None,
):
ctx = {
"TAX_RATE": context["TAX_RATE"],
"css_class": css,
"amount": getattr(obj, field + "_amount"),
"diff": getattr(obj, field + "_diff_amount", None),
"percent": getattr(obj, field + "_percent", None),
"has_form": has_form,
"bold": bold,
}
if select_if_equal == ctx["amount"]:
ctx["css_class"] += " selected"
if has_form:
ctx.update(
{"net": obj[field + "_net"], "tax": obj[field + "_tax"], "comment": comment}
)
for field_name in ["net", "tax", "comment"]:
field_obj = ctx[field_name]
if field_obj is not None and field_obj.errors:
ctx["css_class"] += " has-error bg-danger"
break
return ctx
@register.inclusion_tag("accounting/amount_view_cell.html", takes_context=True)
def amount_view(context, *args, **kwargs):
return _make_context(context, *args, **kwargs)
@register.inclusion_tag("accounting/amount_view_cell.html", takes_context=True)
def amount_stateful(context, *args, **kwargs):
return _make_context(context, *args, has_form=True, **kwargs)
@register.inclusion_tag("accounting/amount_input_cell.html", takes_context=True)
def amount_input(context, *args, **kwargs):
return _make_context(context, *args, has_form=True, **kwargs)
@register.simple_tag
def amount_diff_part(amount, part):
color = ""
value = getattr(amount, part, Decimal(0))
if value > 0:
color = "green"
elif value < 0:
color = "red"
str_value = ""
if value != 0:
str_value = ubrdecimal(value, 2)
if value > 0:
str_value = "+" + str_value
return mark_safe('<span class="amount-diff %s">%s</span>' % (color, str_value))
@register.simple_tag
def amount_value_part(amount, part):
value = getattr(amount, part, Decimal(0))
str_value = ubrdecimal(value, 2)
return mark_safe('<span class="amount-value">%s</span>' % (str_value,))
@register.simple_tag
def amount_percent(percent):
color = ""
str_value = ""
if percent is not None:
if percent == 100:
color = "green"
elif percent > 100:
color = "red"
percent = ceil(percent)
elif percent < 100:
color = "blue"
percent = floor(percent)
str_value = str(percent) + "%"
return mark_safe('<div class="amount-percent %s">%s</div>' % (color, str_value))
| 2.171875 | 2 |
arrays/contains_duplicate.py | wtlow003/leetcode-daily | 0 | 12767859 | """
217. Contains Duplicate
https://leetcode.com/problems/contains-duplicate/
Given an array of integers, find if the array contains any duplicates.
Your function should return true if any value appears at least twice in the array,
and it should return false if every element is distinct.
Example:
Input: [1,2,3,1]
Output: true
"""
# Runtime: 128ms
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
hash_map = {}
for num in nums:
if num not in hash_map:
hash_map[num] = hash_map.get(0, num,) + 1
else:
return True
return False
| 3.875 | 4 |
zoomus/components/live_stream.py | seantibor/zoomus | 178 | 12767860 | from __future__ import absolute_import
from zoomus import util
from zoomus.components import base
class LiveStreamComponentV2(base.BaseComponent):
def update(self, **kwargs):
"""
Use this API to update the meeting's stream information.
Expects:
- meeting_id: int
- stream_url: string (URL)
- stream_key: string
- page_url: string (URL)
"""
util.require_keys(kwargs, "meeting_id")
return self.patch_request(
"/meetings/{}/livestream".format(kwargs.get("meeting_id")), data=kwargs
)
def update_status(self, **kwargs):
"""
Use this API to update the status of a meeting's live stream.
Expects:
- meeting_id: int
- action (start|stop)
- settings: dict
"""
util.require_keys(kwargs, "meeting_id")
return self.patch_request(
"/meetings/{}/livestream/status".format(kwargs.get("meeting_id")),
data=kwargs,
)
| 2.421875 | 2 |
search/COSP/reinforce_baselines.py | cjdjr/cosp_nas | 3 | 12767861 | <reponame>cjdjr/cosp_nas<gh_stars>1-10
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from scipy.stats import ttest_rel
import copy
# from train import rollout, get_inner_model
class Baseline(object):
def wrap_dataset(self, dataset):
return dataset
def unwrap_batch(self, batch):
return batch, None
def eval(self, x, c):
raise NotImplementedError("Override this method")
def get_learnable_parameters(self):
return []
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
class ExponentialBaseline(Baseline):
def __init__(self, beta):
super(Baseline, self).__init__()
self.beta = beta
self.v = None
def eval(self, x, c):
if self.v is None:
v = c.mean()
else:
v = self.beta * self.v + (1. - self.beta) * c.mean()
self.v = v.detach() # Detach since we never want to backprop
return self.v, 0 # No loss
def state_dict(self):
return {
'v': self.v
}
def load_state_dict(self, state_dict):
self.v = state_dict['v']
class WarmupBaseline(Baseline):
def __init__(self, baseline, n_epochs=1, warmup_exp_beta=0.8, ):
super(Baseline, self).__init__()
self.baseline = baseline
assert n_epochs > 0, "n_epochs to warmup must be positive"
self.warmup_baseline = ExponentialBaseline(warmup_exp_beta)
self.alpha = 0
self.n_epochs = n_epochs
def wrap_dataset(self, dataset):
if self.alpha > 0:
return self.baseline.wrap_dataset(dataset)
return self.warmup_baseline.wrap_dataset(dataset)
def unwrap_batch(self, batch):
if self.alpha > 0:
return self.baseline.unwrap_batch(batch)
return self.warmup_baseline.unwrap_batch(batch)
def eval(self, x, c):
if self.alpha == 1:
return self.baseline.eval(x, c)
if self.alpha == 0:
return self.warmup_baseline.eval(x, c)
v, l = self.baseline.eval(x, c)
vw, lw = self.warmup_baseline.eval(x, c)
# Return convex combination of baseline and of loss
return self.alpha * v + (1 - self.alpha) * vw, self.alpha * l + (1 - self.alpha * lw)
def epoch_callback(self, model, epoch):
# Need to call epoch callback of inner model (also after first epoch if we have not used it)
self.baseline.epoch_callback(model, epoch)
self.alpha = (epoch + 1) / float(self.n_epochs)
if epoch < self.n_epochs:
print("Set warmup alpha = {}".format(self.alpha))
def state_dict(self):
# Checkpointing within warmup stage makes no sense, only save inner baseline
return self.baseline.state_dict()
def load_state_dict(self, state_dict):
# Checkpointing within warmup stage makes no sense, only load inner baseline
self.baseline.load_state_dict(state_dict)
class NoBaseline(Baseline):
def eval(self, x, c):
return 0, 0 # No baseline, no loss
| 2.296875 | 2 |
results/plot.py | sebhoof/SolarAxionFlux | 6 | 12767862 | import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'\usepackage{amsmath}\usepackage{amssymb}\usepackage{siunitx}')
# Colours
col_b16agss09 = '#A50026'
col_b16gs98 = '#D73027'
col_agss09 = '#F46D43'
col_agss09ph = '#FDAE61'
col_ags05 = '#fEE090'
col_bs05agsop = '#FFFFBF'
col_bs05op = '#E0F3F8'
col_bp04 = '#ABD9E9'
col_bp00 = '#74ADD1'
col_bp98 = '#4575B4'
col_gs98 = '#313695'
def plot_setup(size=6,ratio=0.618):
fig.set_size_inches(size,ratio*size)
ax.tick_params(which='both', direction='in', bottom=True, top=True, left=True, right=True)
ax.tick_params(which='major', length=6)
ax.tick_params(which='minor', length=4)
#plt.minorticks_on()
conversion = 365.0*24.0*60.0*60.0*1.0e4*1.0e-20
res1 = np.genfromtxt("primakoff.dat")
res2 = np.genfromtxt("compton.dat")
res3 = np.genfromtxt("all_ff.dat")
res4 = np.genfromtxt("all_gaee.dat")
res5 = np.genfromtxt("metals.dat")
res6 = np.genfromtxt("TP.dat")
res7 = np.genfromtxt("LP.dat")
res8 = np.genfromtxt("TP_Rosseland.dat")
res9 = np.genfromtxt("LP_Rosseland.dat")
#corr = np.genfromtxt("weighted_compton.dat")
#weighted_compton = interpolate.interp1d(corr[:,0], corr[:,1], bounds_error=False, fill_value=0)
common_path = "../data/benchmarks/"
ref1 = np.genfromtxt(common_path+"2013_redondo_primakoff.dat")
ref2 = np.genfromtxt(common_path+"2013_redondo_compton.dat")
compton = interpolate.interp1d(ref2[:,0], ref2[:,1], bounds_error=False, fill_value=0)
ref3 = np.genfromtxt(common_path+"2013_redondo_ff.dat")
ref4 = np.genfromtxt(common_path+"2013_redondo_all.dat")
ref5 = np.genfromtxt(common_path+"2020_giannotti_TP.dat")
ref6 = np.genfromtxt(common_path+"2020_giannotti_LP.dat")
ref7 = np.genfromtxt(common_path+"2020-o'hare.dat")
ref8 = np.genfromtxt(common_path+"2020_caputo_LP.dat")
conv_fac = 1.0e-4/(365.0*24.0*60.0*60.0*1.0e10)
## Validation plots for axion-photon interactions
# Primakoff approximation [hep-ex/0702006] based on [astro-ph/0402114]
omega = np.linspace(0,10,300)
fig, ax = plt.subplots()
plot_setup()
plt.plot(omega, 6.02*omega**2.481*np.exp(-omega/1.205),':', color=col_agss09, label=r'Primakoff approx. (BP04)')
plt.plot(ref1[:,0], conv_fac*(1.0e4/50.0)*ref1[:,1], '-', color=col_b16agss09, label=r'Primakoff (Redondo)')
plt.plot(res1[:,0], res1[:,1]/1.0e10, 'k--', label=r'Primakoff (AGSS09)')
plt.plot(res6[:,0], res6[:,1]/1.0e10, 'k--', label=r'TP (AGSS09)')
plt.title(r'Axion-photon interactions, $g_{a\gamma\gamma} = \SI{e-10}{\GeV^{-1}}$, OP opacities')
plt.xlabel(r'Energy $\omega$ [keV]')
plt.ylabel(r'Axion flux $\mathrm{d}\Phi_a/\mathrm{d}\omega$ [\SI{e10}{\per\cm\squared\per\keV\per\s}]')
plt.xlim([0,10])
#plt.ylim([0,8])
plt.legend(frameon=False)
plt.savefig("validation_gagg.pdf", bbox_inches='tight')
#plt.show()
plt.close()
fig, ax = plt.subplots()
plot_setup()
plt.plot(omega, 6.02*omega**2.481*np.exp(-omega/1.205),':', color=col_agss09, label=r'Primakoff approx. (BP04)')
plt.plot(ref1[:,0], conv_fac*(1.0e4/50.0)*ref1[:,1], '-', color=col_b16agss09, label=r'Primakoff (Redondo)')
plt.plot(res1[:,0], res1[:,1]/1.0e10, 'k--', label=r'Primakoff (AGSS09)')
plt.plot(res6[:,0], res6[:,1]/1.0e10, 'k-', label=r'TP (AGSS09)')
plt.plot(res8[:,0], res8[:,1]/1.0e10, 'k--', label=r'TP Rosseland (AGSS09)')
plt.plot(ref5[:,0], ref5[:,1]*4.0*1.4995, '-', color='green', label=r'TP (Giannotti)')#correct B conversion in giannotti result and adjust coupling constant
plt.title(r'Axion-photon interactions, $g_{a\gamma\gamma} = \SI{e-10}{\GeV^{-1}}$, OP opacities')
plt.xlabel(r'Energy $\omega$ [keV]')
plt.ylabel(r'Axion flux $\mathrm{d}\Phi_a/\mathrm{d}\omega$ [\SI{e10}{\per\cm\squared\per\keV\per\s}]')
plt.xlim([0.1,10])
plt.yscale('log')
plt.xscale('log')
#plt.ylim([0,8])
plt.legend(frameon=False)
plt.savefig("validation_Tplasmon.pdf", bbox_inches='tight')
plt.show()
plt.close()
fig, ax = plt.subplots()
plot_setup()
plt.plot(omega, 6.02*omega**2.481*np.exp(-omega/1.205),':', color=col_agss09, label=r'Primakoff approx. (BP04)')
plt.plot(ref1[:,0], conv_fac*(1.0e4/50.0)*ref1[:,1], '-', color=col_b16agss09, label=r'Primakoff (Redondo)')
plt.plot(res1[:,0], res1[:,1]/1.0e10, 'k--', label=r'Primakoff (AGSS09)')
plt.plot(res7[:,0], res7[:,1]/1.0e10, 'k-', label=r'LP (AGSS09)')
plt.plot(res9[:,0], res9[:,1]/1.0e10, 'k--', label=r'LP Rosseland (AGSS09)')
plt.plot(ref6[:,0], ref6[:,1]*4.0, '--', color='green', label=r'LP (Giannotti)') # correct coupling
plt.plot(ref7[:,0], ref7[:,1]/1.0e10*4.0/1.7856, '--', color='orange', label=r'LP (O´Hare)') # correct coupling and angular average
plt.plot(ref8[:,0], ref8[:,1]/1.0e10*(3.0/5.0)**2, '--', color='gold', label=r'LP (Caputo)') #correct field values
plt.title(r'Axion-photon interactions, $g_{a\gamma\gamma} = \SI{e-10}{\GeV^{-1}}$, OP opacities')
plt.xlabel(r'Energy $\omega$ [keV]')
plt.ylabel(r'Axion flux $\mathrm{d}\Phi_a/\mathrm{d}\omega$ [\SI{e10}{\per\cm\squared\per\keV\per\s}]')
plt.xlim([0.001,0.4])
plt.yscale('log')
plt.xscale('log')
plt.ylim([0.0,37])
plt.legend(frameon=False)
plt.savefig("validation_Lplasmon.pdf", bbox_inches='tight')
plt.show()
plt.close()
fig, ax = plt.subplots()
## Validation plots for axion-electron interactions
plot_setup()
plt.plot(ref2[:,0], 100.0*conv_fac*(0.5*ref2[:,1]), 'b-', label=r'Compton (Redondo)')
plt.plot(ref3[:,0], 100.0*conv_fac*ref3[:,1], 'm-', label=r'FF (Redondo)')
plt.plot(ref4[:,0], 1.0e11*ref4[:,1]*(1.0e-13/0.511e-10)**2/(24.0*60.0*60.0) - 100.0*conv_fac*(0.5*compton(ref4[:,0])), 'g-', label=r'All')
plt.plot(res2[:,0], res2[:,1]/1.0e8, 'k--', label=r'Compton (B16-AGSS09)')
plt.plot(res3[:,0], res3[:,1]/1.0e8, 'k--', label=r'FF (B16-AGSS09)')
plt.plot(res4[:,0], res4[:,1]/1.0e8, 'k--', label=r'All (B16-AGSS09)')
plt.plot(res5[:,0], res5[:,1]/1.0e8, 'k--', label=r'Metals (B16-AGSS09)')
plt.title(r'Axion-electron interactions, $g_{aee} = \num{e-13}$, OP opacities')
plt.xlabel(r'Energy $\omega$ [keV]')
plt.ylabel(r'Axion flux $\mathrm{d}\Phi_a/\mathrm{d}\omega$ [\SI{e8}{\per\cm\squared\per\keV\per\s}]')
plt.xlim([0,10])
plt.ylim([0,12])
plt.legend(ncol=2, frameon=False)
plt.savefig("validation_gaee.pdf")
#plt.show()
plt.close()
| 2.40625 | 2 |
cosanlab_preproc/wfmaker.py | BryanGonzalez262/cosanlab_preproc | 0 | 12767863 | from __future__ import division
"""
Workflow Maker
==============
Handy function to build dynamic workflows using BIDS formatted data files.
"""
# These imports are not nipype-dependent so we can import them early; see config notes below
import matplotlib
matplotlib.use('Agg')
import nibabel as nib
import os
from .utils import get_resource_path
import six
from bids.grabbids import BIDSLayout
def wfmaker(project_dir,raw_dir,subject_id,task_name='',apply_trim=False,apply_dist_corr=False,apply_smooth=False,apply_filter=False,mni_template='2mm',apply_n4 =True,ants_threads=8,readable_crash_files=False):
"""
This function returns a "standard" workflow based on requested settings. Assumes data is in the following directory structure in BIDS format:
*Work flow steps*:
1) EPI Distortion Correction (FSL; optional)
2) Trimming (nipy)
3) Realignment/Motion Correction (FSL)
4) Artifact Detection (rapidART/python)
5) Brain Extraction + N4 Bias Correction (ANTs)
6) Coregistration (rigid) (ANTs)
7) Normalization to MNI (non-linear) (ANTs)
8) Low-pass filtering (nilearn; optional)
8) Smoothing (FSL; optional)
9) Downsampling to INT16 precision to save space (nibabel)
Args:
project_dir (str): full path to the root of project folder, e.g. /my/data/myproject. All preprocessed data will be placed under this foler and the raw_dir folder will be searched for under this folder
raw_dir (str): folder name for raw data, e.g. 'raw' which would be automatically converted to /my/data/myproject/raw
subject_id (str/int): subject ID to process. Can be either a subject ID string e.g. 'sid-0001' or an integer to index the entire list of subjects in raw_dir, e.g. 0, which would process the first subject
apply_trim (int/bool; optional): number of volumes to trim from the beginning of each functional run; default is None
task_name (str; optional): which functional task runs to process; default is all runs
apply_dist_corr (bool; optional): look for fmap files and perform distortion correction; default False
smooth (int/list; optional): smoothing to perform in FWHM mm; if a list is provided will create outputs for each smoothing kernel separately; default False
apply_filter (float/list; optional): low-pass/high-freq filtering cut-offs in Hz; if a list is provided will create outputs for each filter cut-off separately. With high temporal resolution scans .25Hz is a decent value to capture respitory artifacts; default None/False
mni_template (str; optional): which mm resolution template to use, e.g. '3mm'; default '2mm'
apply_n4 (bool; optional): perform N4 Bias Field correction on the anatomical image; default true
ants_threads (int; optional): number of threads ANTs should use for its processes; default 8
readable_crash_files (bool; optional): should nipype crash files be saved as txt? This makes them easily readable, but sometimes interferes with nipype's ability to use cached results of successfully run nodes (i.e. picking up where it left off after bugs are fixed); default False
Examples:
>>> from cosanlab_preproc.wfmaker import wfmaker
>>> # Create workflow that performs no distortion correction, trims first 5 TRs, no filtering, 6mm smoothing, and normalizes to 2mm MNI space. Run it with 16 cores.
>>>
>>> workflow = wfmaker(
project_dir = '/data/project',
raw_dir = 'raw',
apply_trim = 5)
>>>
>>> workflow.run('MultiProc',plugin_args = {'n_procs': 16})
>>>
>>> # Create workflow that performs distortion correction, trims first 25 TRs, no filtering and filtering .25hz, 6mm and 8mm smoothing, and normalizes to 3mm MNI space. Run it serially (will be super slow!).
>>>
>>> workflow = wfmaker(
project_dir = '/data/project',
raw_dir = 'raw',
apply_trim = 25,
apply_dist_corr = True,
apply_filter = [0, .25],
apply_smooth = [6.0, 8.0],
mni = '3mm')
>>>
>>> workflow.run()
"""
##################
### PATH SETUP ###
##################
if mni_template not in ['1mm','2mm','3mm']:
raise ValueError("MNI template must be: 1mm, 2mm, or 3mm")
data_dir = os.path.join(project_dir,raw_dir)
output_dir = os.path.join(project_dir,'preprocessed')
output_final_dir = os.path.join(output_dir,'final')
output_interm_dir = os.path.join(output_dir,'intermediate')
log_dir = os.path.join(project_dir,'logs','nipype')
if not os.path.exists(output_final_dir):
os.makedirs(output_final_dir)
if not os.path.exists(output_interm_dir):
os.makedirs(output_interm_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Set MNI template
MNItemplate = os.path.join(get_resource_path(),'MNI152_T1_' + mni_template + '_brain.nii.gz')
MNImask = os.path.join(get_resource_path(),'MNI152_T1_' + mni_template + '_brain_mask.nii.gz')
MNItemplatehasskull = os.path.join(get_resource_path(),'MNI152_T1_' + mni_template + '.nii.gz')
# Set ANTs files
bet_ants_template = os.path.join(get_resource_path(),'OASIS_template.nii.gz')
bet_ants_prob_mask = os.path.join(get_resource_path(),'OASIS_BrainCerebellumProbabilityMask.nii.gz')
bet_ants_registration_mask = os.path.join(get_resource_path(),'OASIS_BrainCerebellumRegistrationMask.nii.gz')
#################################
### NIPYPE IMPORTS AND CONFIG ###
#################################
# Update nipype global config because workflow.config[] = ..., doesn't seem to work
# Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file
from nipype import config
if readable_crash_files:
cfg = dict(execution={'crashfile_format':'txt'})
config.update_config(cfg)
config.update_config({'logging':{'log_directory':log_dir,'log_to_file':True}})
from nipype import logging
logging.update_logging(config)
# Now import everything else
from nipype.interfaces.io import DataSink
from nipype.interfaces.utility import Merge, IdentityInterface
from nipype.pipeline.engine import Node, Workflow
from nipype.interfaces.nipy.preprocess import ComputeMask
from nipype.algorithms.rapidart import ArtifactDetect
from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection
from nipype.interfaces.ants import Registration, ApplyTransforms
from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP
from nipype.interfaces.fsl.maths import MeanImage
from nipype.interfaces.fsl import Merge as MERGE
from nipype.interfaces.fsl.utils import Smooth
from nipype.interfaces.nipy.preprocess import Trim
from .interfaces import Plot_Coregistration_Montage,Plot_Quality_Control,Plot_Realignment_Parameters,Create_Covariates,Down_Sample_Precision,Create_Encoding_File, Filter_In_Mask
##################
### INPUT NODE ###
##################
layout = BIDSLayout(data_dir)
# Dartmouth subjects are named with the sub- prefix, handle whether we receive an integer identifier for indexing or the full subject id with prefixg
if isinstance(subject_id, six.string_types):
subId = subject_id[4:]
elif isinstance(subject_id, int):
subId = layout.get_subjects()[subject_id]
subject_id = 'sub-' + subId
else:
raise TypeError("subject_id should be a string or integer")
#Get anat file location
anat = layout.get(subject=subId,type='T1w',extensions='.nii.gz')[0].filename
#Get functional file locations
if task_name:
funcs = [f.filename for f in layout.get(subject=subId,type='bold',task=task_name,extensions='.nii.gz')]
else:
funcs = [f.filename for f in layout.get(subject=subId,type='bold',extensions='.nii.gz')]
#Turn functional file list into interable Node
func_scans = Node(IdentityInterface(fields=['scan']),name='func_scans')
func_scans.iterables = ('scan',funcs)
#Get TR for use in filtering below; we're assuming all BOLD runs have the same TR
tr_length = layout.get_metadata(funcs[0])['RepetitionTime']
#####################################
## TRIM ##
#####################################
if apply_trim:
trim = Node(Trim(),name = 'trim')
trim.inputs.begin_index = apply_trim
#####################################
## DISTORTION CORRECTION ##
#####################################
if apply_dist_corr:
#Get fmap file locations
fmaps = [f.filename for f in layout.get(subject=subId,modality='fmap',extensions='.nii.gz')]
if not fmaps:
raise IOError("Distortion Correction requested but field map scans not found...")
#Get fmap metadata
totalReadoutTimes, measurements, fmap_pes = [],[],[]
for i, fmap in enumerate(fmaps):
# Grab total readout time for each fmap
totalReadoutTimes.append(layout.get_metadata(fmap)['TotalReadoutTime'])
# Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans)
measurements.append(nib.load(fmap).header['dim'][4])
# Get phase encoding direction
fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"]
fmap_pes.append(fmap_pe)
encoding_file_writer = Node(interface=Create_Encoding_File(),name='create_encoding')
encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes
encoding_file_writer.inputs.fmaps = fmaps
encoding_file_writer.inputs.fmap_pes = fmap_pes
encoding_file_writer.inputs.measurements = measurements
encoding_file_writer.inputs.file_name='encoding_file.txt'
merge_to_file_list = Node(interface=Merge(2), infields = ['in1','in2'],name='merge_to_file_list')
merge_to_file_list.inputs.in1 = fmaps[0]
merge_to_file_list.inputs.in1 = fmaps[1]
#Merge AP and PA distortion correction scans
merger = Node(interface=MERGE(dimension='t'),name='merger')
merger.inputs.output_type = 'NIFTI_GZ'
merger.inputs.in_files = fmaps
merger.inputs.merged_file = 'merged_epi.nii.gz'
#Create distortion correction map
topup = Node(interface=TOPUP(),name='topup')
topup.inputs.output_type = 'NIFTI_GZ'
#Apply distortion correction to other scans
apply_topup = Node(interface=ApplyTOPUP(),name='apply_topup')
apply_topup.inputs.output_type = 'NIFTI_GZ'
apply_topup.inputs.method = 'jac'
apply_topup.inputs.interp = 'spline'
###################################
### REALIGN ###
###################################
realign_fsl = Node(MCFLIRT(),name="realign")
realign_fsl.inputs.cost = 'mutualinfo'
realign_fsl.inputs.mean_vol = True
realign_fsl.inputs.output_type = 'NIFTI_GZ'
realign_fsl.inputs.save_mats = True
realign_fsl.inputs.save_rms = True
realign_fsl.inputs.save_plots = True
###################################
### MEAN EPIs ###
###################################
#For coregistration after realignment
mean_epi = Node(MeanImage(),name='mean_epi')
mean_epi.inputs.dimension = 'T'
#For after normalization is done to plot checks
mean_norm_epi = Node(MeanImage(),name='mean_norm_epi')
mean_norm_epi.inputs.dimension = 'T'
###################################
### MASK, ART, COV CREATION ###
###################################
compute_mask = Node(ComputeMask(), name='compute_mask')
compute_mask.inputs.m = .05
art = Node(ArtifactDetect(),name='art')
art.inputs.use_differences = [True, False]
art.inputs.use_norm = True
art.inputs.norm_threshold = 1
art.inputs.zintensity_threshold = 3
art.inputs.mask_type = 'file'
art.inputs.parameter_source = 'FSL'
make_cov = Node(Create_Covariates(),name='make_cov')
################################
### N4 BIAS FIELD CORRECTION ###
################################
if apply_n4:
n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction')
n4_correction.inputs.copy_header = True
n4_correction.inputs.save_bias = False
n4_correction.inputs.num_threads = ants_threads
n4_correction.inputs.input_image = anat
###################################
### BRAIN EXTRACTION ###
###################################
brain_extraction_ants = Node(BrainExtraction(),name='brain_extraction')
brain_extraction_ants.inputs.dimension = 3
brain_extraction_ants.inputs.use_floatingpoint_precision = 1
brain_extraction_ants.inputs.num_threads = ants_threads
brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask
brain_extraction_ants.inputs.keep_temporary_files = 1
brain_extraction_ants.inputs.brain_template = bet_ants_template
brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask
brain_extraction_ants.inputs.out_prefix = 'bet'
###################################
### COREGISTRATION ###
###################################
coregistration = Node(Registration(), name='coregistration')
coregistration.inputs.float = False
coregistration.inputs.output_transform_prefix = "meanEpi2highres"
coregistration.inputs.transforms = ['Rigid']
coregistration.inputs.transform_parameters = [(0.1,), (0.1,)]
coregistration.inputs.number_of_iterations = [[1000,500,250,100]]
coregistration.inputs.dimension = 3
coregistration.inputs.num_threads = ants_threads
coregistration.inputs.write_composite_transform = True
coregistration.inputs.collapse_output_transforms = True
coregistration.inputs.metric = ['MI']
coregistration.inputs.metric_weight = [1]
coregistration.inputs.radius_or_number_of_bins = [32]
coregistration.inputs.sampling_strategy = ['Regular']
coregistration.inputs.sampling_percentage = [0.25]
coregistration.inputs.convergence_threshold = [1e-08]
coregistration.inputs.convergence_window_size = [10]
coregistration.inputs.smoothing_sigmas = [[3,2,1,0]]
coregistration.inputs.sigma_units = ['mm']
coregistration.inputs.shrink_factors = [[4,3,2,1]]
coregistration.inputs.use_estimate_learning_rate_once = [True]
coregistration.inputs.use_histogram_matching = [False]
coregistration.inputs.initial_moving_transform_com = True
coregistration.inputs.output_warped_image = True
coregistration.inputs.winsorize_lower_quantile = 0.01
coregistration.inputs.winsorize_upper_quantile = 0.99
###################################
### NORMALIZATION ###
###################################
# Settings Explanations
# Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference
# B<NAME> referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275
# Things that matter the most:
# smoothing_sigmas:
# how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm
# Old settings [[3,2,1,0]]*3
# shrink_factors
# The coarseness with which to do registration
# Old settings [[8,4,2,1]] * 3
# >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex
# Other settings
# transform_parameters:
# how much regularization to do for fitting that transformation
# for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets
# radius_or_number_of_bins
# This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer
# use_histogram_matching
# Use image intensity distribution to guide registration
# Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1)
# convergence_threshold
# threshold for optimizer
# convergence_window_size
# how many samples should optimizer average to compute threshold?
# sampling_strategy
# what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass
normalization = Node(Registration(),name='normalization')
normalization.inputs.float = False
normalization.inputs.collapse_output_transforms=True
normalization.inputs.convergence_threshold=[1e-06,1e-06,1e-07]
normalization.inputs.convergence_window_size=[10]
normalization.inputs.dimension = 3
normalization.inputs.fixed_image = MNItemplate
normalization.inputs.initial_moving_transform_com=True
normalization.inputs.metric=['MI', 'MI', 'CC']
normalization.inputs.metric_weight=[1.0]*3
normalization.inputs.number_of_iterations=[[1000, 500, 250, 100],
[1000, 500, 250, 100],
[100, 70, 50, 20]]
normalization.inputs.num_threads= ants_threads
normalization.inputs.output_transform_prefix = 'anat2template'
normalization.inputs.output_inverse_warped_image=True
normalization.inputs.output_warped_image = True
normalization.inputs.radius_or_number_of_bins=[32, 32, 4]
normalization.inputs.sampling_percentage=[0.25, 0.25, 1]
normalization.inputs.sampling_strategy=['Regular',
'Regular',
'None']
normalization.inputs.shrink_factors=[[4, 3, 2, 1]]*3
normalization.inputs.sigma_units=['vox']*3
normalization.inputs.smoothing_sigmas=[[2,1],[2,1],[3, 2, 1, 0]]
normalization.inputs.transforms = ['Rigid','Affine','SyN']
normalization.inputs.transform_parameters=[(0.1,),
(0.1,),
(0.1, 3.0, 0.0)]
normalization.inputs.use_histogram_matching=True
normalization.inputs.winsorize_lower_quantile=0.005
normalization.inputs.winsorize_upper_quantile=0.995
normalization.inputs.write_composite_transform=True
###################################
### APPLY TRANSFORMS AND SMOOTH ###
###################################
merge_transforms = Node(Merge(2), iterfield=['in2'], name ='merge_transforms')
# Used for epi -> mni, via (coreg + norm)
apply_transforms = Node(ApplyTransforms(),iterfield=['input_image'],name='apply_transforms')
apply_transforms.inputs.input_image_type = 3
apply_transforms.inputs.float = False
apply_transforms.inputs.num_threads = 12
apply_transforms.inputs.environ = {}
apply_transforms.inputs.interpolation = 'BSpline'
apply_transforms.inputs.invert_transform_flags = [False, False]
apply_transforms.inputs.reference_image = MNItemplate
# Used for t1 segmented -> mni, via (norm)
apply_transform_seg = Node(ApplyTransforms(),name='apply_transform_seg')
apply_transform_seg.inputs.input_image_type = 3
apply_transform_seg.inputs.float = False
apply_transform_seg.inputs.num_threads = 12
apply_transform_seg.inputs.environ = {}
apply_transform_seg.inputs.interpolation = 'MultiLabel'
apply_transform_seg.inputs.invert_transform_flags = [False]
apply_transform_seg.inputs.reference_image = MNItemplate
###################################
### PLOTS ###
###################################
plot_realign = Node(Plot_Realignment_Parameters(),name="plot_realign")
plot_qa = Node(Plot_Quality_Control(),name="plot_qa")
plot_normalization_check = Node(Plot_Coregistration_Montage(),name="plot_normalization_check")
plot_normalization_check.inputs.canonical_img = MNItemplatehasskull
############################################
### FILTER, SMOOTH, DOWNSAMPLE PRECISION ###
############################################
#Use cosanlab_preproc for down sampling
down_samp = Node(Down_Sample_Precision(),name="down_samp")
#Use FSL for smoothing
if apply_smooth:
smooth = Node(Smooth(),name='smooth')
if isinstance(apply_smooth, list):
smooth.iterables = ("fwhm",apply_smooth)
elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float):
smooth.inputs.fwhm = apply_smooth
else:
raise ValueError("apply_smooth must be a list or int/float")
#Use cosanlab_preproc for low-pass filtering
if apply_filter:
lp_filter = Node(Filter_In_Mask(),name='lp_filter')
lp_filter.inputs.mask = MNImask
lp_filter.inputs.sampling_rate = tr_length
lp_filter.inputs.high_pass_cutoff = 0
if isinstance(apply_filter,list):
lp_filter.iterables = ("low_pass_cutoff",apply_filter)
elif isinstance(apply_filter, int) or isinstance(apply_filter, float):
lp_filter.inputs.low_pass_cutoff = apply_filter
else:
raise ValueError("apply_filter must be a list or int/float")
###################
### OUTPUT NODE ###
###################
#Collect all final outputs in the output dir and get rid of file name additions
datasink = Node(DataSink(),name='datasink')
datasink.inputs.base_directory = output_final_dir
datasink.inputs.container = subject_id
# Remove substitutions
data_dir_parts = data_dir.split('/')[1:]
prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func']
func_scan_names = [os.path.split(elem)[-1] for elem in funcs]
to_replace = []
for elem in func_scan_names:
bold_name = elem.split(subject_id + '_')[-1]
bold_name = bold_name.split('.nii.gz')[0]
to_replace.append(('..'.join(prefix + [elem]), bold_name))
datasink.inputs.substitutions = to_replace
#####################
### INIT WORKFLOW ###
#####################
workflow = Workflow(name=subId)
workflow.base_dir = output_interm_dir
############################
######### PART (1a) #########
# func -> discorr -> trim -> realign
# OR
# func -> trim -> realign
# OR
# func -> discorr -> realign
# OR
# func -> realign
############################
if apply_dist_corr:
workflow.connect([
(encoding_file_writer, topup,[('encoding_file','encoding_file')]),
(encoding_file_writer, apply_topup,[('encoding_file','encoding_file')]),
(merger,topup,[('merged_file','in_file')]),
(func_scans,apply_topup,[('scan','in_files')]),
(topup,apply_topup,[('out_fieldcoef','in_topup_fieldcoef'),
('out_movpar','in_topup_movpar')])
])
if apply_trim:
# Dist Corr + Trim
workflow.connect([
(apply_topup,trim,[('out_corrected','in_file')]),
(trim, realign_fsl, [('out_file','in_file')])
])
else:
# Dist Corr + No Trim
workflow.connect([
(apply_topup,realign_fsl,[('out_corrected','in_file')])
])
else:
if apply_trim:
# No Dist Corr + Trim
workflow.connect([
(func_scans, trim, [('scan','in_file')]),
(trim, realign_fsl, [('out_file','in_file')])
])
else:
# No Dist Corr + No Trim
workflow.connect([
(func_scans, realign_fsl, [('scan','in_file')]),
])
############################
######### PART (1n) #########
# anat -> N4 -> bet
# OR
# anat -> bet
############################
if apply_n4:
workflow.connect([
(n4_correction, brain_extraction_ants, [('output_image','anatomical_image')])
])
else:
brain_extraction_ants.inputs.anatomical_image = anat
##########################################
############### PART (2) #################
# realign -> coreg -> mni (via t1)
# t1 -> mni
# covariate creation
# plot creation
###########################################
workflow.connect([
(realign_fsl, plot_realign, [('par_file','realignment_parameters')]),
(realign_fsl, plot_qa, [('out_file','dat_img')]),
(realign_fsl, art, [('out_file','realigned_files'),
('par_file','realignment_parameters')]),
(realign_fsl, mean_epi, [('out_file','in_file')]),
(realign_fsl, make_cov, [('par_file','realignment_parameters')]),
(mean_epi, compute_mask, [('out_file','mean_volume')]),
(compute_mask, art, [('brain_mask','mask_file')]),
(art, make_cov, [('outlier_files','spike_id')]),
(art, plot_realign, [('outlier_files','outliers')]),
(plot_qa, make_cov, [('fd_outliers','fd_outliers')]),
(brain_extraction_ants, coregistration, [('BrainExtractionBrain','fixed_image')]),
(mean_epi, coregistration, [('out_file','moving_image')]),
(brain_extraction_ants, normalization, [('BrainExtractionBrain','moving_image')]),
(coregistration, merge_transforms, [('composite_transform','in2')]),
(normalization, merge_transforms, [('composite_transform','in1')]),
(merge_transforms, apply_transforms, [('out','transforms')]),
(realign_fsl, apply_transforms, [('out_file','input_image')]),
(apply_transforms, mean_norm_epi, [('output_image','in_file')]),
(normalization, apply_transform_seg, [('composite_transform','transforms')]),
(brain_extraction_ants, apply_transform_seg, [('BrainExtractionSegmentation','input_image')]),
(mean_norm_epi, plot_normalization_check, [('out_file','wra_img')])
])
##################################################
################### PART (3) #####################
# epi (in mni) -> filter -> smooth -> down sample
# OR
# epi (in mni) -> filter -> down sample
# OR
# epi (in mni) -> smooth -> down sample
# OR
# epi (in mni) -> down sample
###################################################
if apply_filter:
workflow.connect([
(apply_transforms, lp_filter, [('output_image','in_file')])
])
if apply_smooth:
# Filtering + Smoothing
workflow.connect([
(lp_filter, smooth, [('out_file','in_file')]),
(smooth, down_samp, [('smoothed_file','in_file')])
])
else:
# Filtering + No Smoothing
workflow.connect([
(lp_filter, down_samp, [('out_file','in_file')])
])
else:
if apply_smooth:
# No Filtering + Smoothing
workflow.connect([
(apply_transforms, smooth, [('output_image', 'in_file')]),
(smooth, down_samp, [('smoothed_file','in_file')])
])
else:
# No Filtering + No Smoothing
workflow.connect([
(apply_transforms, down_samp, [('output_image', 'in_file')])
])
##########################################
############### PART (4) #################
# down sample -> save
# plots -> save
# covs -> save
# t1 (in mni) -> save
# t1 segmented masks (in mni) -> save
##########################################
workflow.connect([
(down_samp, datasink, [('out_file','functional.@down_samp')]),
(plot_realign, datasink, [('plot','functional.@plot_realign')]),
(plot_qa, datasink, [('plot','functional.@plot_qa')]),
(plot_normalization_check, datasink, [('plot','functional.@plot_normalization')]),
(make_cov, datasink, [('covariates','functional.@covariates')]),
(normalization, datasink, [('warped_image','structural.@normanat')]),
(apply_transform_seg, datasink,[('output_image','structural.@normanatseg')])
])
if not os.path.exists(os.path.join(output_dir,'pipeline.png')):
workflow.write_graph(dotfilename=os.path.join(output_dir,'pipeline'),format='png')
print(f"Creating workflow for subject: {subject_id}")
if ants_threads == 8:
print(f"ANTs will utilize the default of {ants_threads} threads for parallel processing.")
else:
print(f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing.")
return workflow
| 2.234375 | 2 |
app/api/rest/base.py | dolfinus/cryptonite | 0 | 12767864 | <reponame>dolfinus/cryptonite
""" API Backend - Base Resource Models """
from flask_restful import Resource, abort
from api import api_rest
from api.security import multi_auth, restricted, restricted_or_current
class BaseResource(Resource):
def options (self, *args, **kwargs):
return None, 200, {
'Allow': 'GET, POST, PUT, DELETE',
'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE',
'Access-Control-Allow-Headers': 'Content-Type,Authorization,authorization,Token'
}
def get(self, *args, **kwargs):
abort(405)
def post(self, *args, **kwargs):
abort(405)
def put(self, *args, **kwargs):
abort(405)
def patch(self, *args, **kwargs):
abort(405)
def delete(self, *args, **kwargs):
abort(405)
class SecureResource(BaseResource):
method_decorators = [multi_auth.login_required]
def rest_resource(resource_cls):
""" Decorator for adding resources to Api App """
api_rest.add_resource(resource_cls, *resource_cls.endpoints) | 2.390625 | 2 |
tcmba/spiders/processos.py | DadosAbertosDeFeira/tcm-ba | 2 | 12767865 | from datetime import datetime
from scrapy import FormRequest, Request, Spider
from tcmba.items import ProcessItem
class ProcessesSpider(Spider):
name = "processos"
allowed_domains = ["www.tcm.ba.gov.br/"]
start_urls = [
"https://www.tcm.ba.gov.br/consulta/jurisprudencia/consulta-ementario-juridico/#todos/" # noqa
]
handle_httpstatus_list = [302]
def parse(self, response):
descriptions = response.css("table#tabela tr td a span::text").extract()
file_urls = response.css("table#tabela tr td a::attr(href)").extract()
process_numbers = response.css(
"table#tabela tr td:nth-child(1)::text"
).extract()
assert len(descriptions) == len(file_urls) == len(process_numbers)
for process_number, description, file_url in zip(
process_numbers, descriptions, file_urls
):
item = ProcessItem(
process_number=process_number,
description=description,
file_url=file_url,
crawled_at=datetime.now(),
)
yield Request(
"https://www.tcm.ba.gov.br/consulta-processual/",
dont_filter=True,
callback=self.parse_process,
meta={"item": item},
)
def parse_process(self, response):
yield FormRequest.from_response(
response,
method="POST",
dont_filter=True,
formxpath='.//form[@name="formProtocolo"]',
formdata={
"proc": response.meta["item"]["process_number"],
"consulta": "ok",
"B1": "+Consultar+",
},
callback=self.parse_details,
meta={"item": response.meta["item"]},
)
def get_history(self, table):
units = table.css("td:nth-child(1)")
entry_dates = table.css("td:nth-child(2)")
statuses = table.css("td:nth-child(3)")
notes = table.css("td:nth-child(4)")
history = []
for unit, entry_date, status, note in zip(units, entry_dates, statuses, notes):
unit_str = unit.css("::text").get()
entry_date_str = entry_date.css("::text").get()
status_str = status.css("::text").get()
note_str = note.css("::text").get()
history.append(
{
"unity": unit_str.strip() if unit_str else "",
"entry_date": entry_date_str.strip() if entry_date_str else "",
"situation": status_str.strip() if status_str else "",
"notes": note_str.strip() if note_str else "",
}
)
return history
def get_field(self, response, label):
field_str = response.xpath(
f"//label[contains(text(),'{label}')]/following-sibling::span/text()"
).get()
if field_str:
return field_str.strip()
return ""
def parse_details(self, response):
item = response.meta["item"]
item["process_at"] = response.css("div.subtitle span::text").get()
item["entry_at"] = self.get_field(response, "Data de Entrada:")
item["nature"] = self.get_field(response, "Natureza:")
item["complement"] = self.get_field(response, "Complemento:")
item["city"] = self.get_field(response, "Município:")
item["author"] = self.get_field(response, "Interessado/Autor:")
item["received"] = self.get_field(response, "Recebido(S/N):")
item["last_update_at"] = self.get_field(response, "Data:")
item["unit"] = self.get_field(response, "Unidade:")
item["history"] = self.get_history(response.css("table#tabelaResultado"))
item["number_of_origin_document"] = self.get_field(
response, "Nº Doc.de Origem:"
)
item["entrance"] = self.get_field(response, "Meio:")
item["document_date"] = self.get_field(response, "Data do Documento:")
item["attachments"] = self.get_field(response, "Anexos:")
item["notes"] = self.get_field(response, "Observações:")
item["place_of_origin"] = self.get_field(response, "Local de Origem:")
yield item
| 2.5625 | 3 |
tests/test_contrib_debug_toolbar_flask.py | proofit404/userstories | 187 | 12767866 | <gh_stars>100-1000
import pytest
@pytest.mark.xfail
def test_contrib_is_available():
from stories.contrib.debug_toolbars.flask import StoriesPanel # noqa: F401
| 1.234375 | 1 |
utils/__init__.py | leoHeidel/dog-face-dataset | 1 | 12767867 | <reponame>leoHeidel/dog-face-dataset<filename>utils/__init__.py
from .image_utils import *
| 0.902344 | 1 |
tests/test_mixins.py | Nollde/order | 6 | 12767868 | <gh_stars>1-10
# coding: utf-8
__all__ = [
"CopyMixinTest", "AuxDataMixinTest", "TagMixinTest", "DataSourceMixinTest",
"SelectionMixinTest", "LabelMixinTest", "ColorMixinTest",
]
import unittest
from order import (
CopyMixin, AuxDataMixin, TagMixin, DataSourceMixin, SelectionMixin, LabelMixin, ColorMixin,
)
class CopyMixinTest(unittest.TestCase):
def make_class(self):
class C(CopyMixin):
copy_specs = ["name", "id"]
def __init__(self, name, id=0):
super(C, self).__init__()
self.name = name
self.id = id
return C
def test_plain(self):
C = self.make_class()
a = C("foo", 1)
b = a.copy()
self.assertIsInstance(b, C)
self.assertEqual(b.name, "foo")
self.assertEqual(b.id, 1)
def test_class(self):
C = self.make_class()
a = C("foo", 1)
class D(C):
pass
b = a.copy(_cls=D)
self.assertIsInstance(b, D)
self.assertEqual(b.name, "foo")
self.assertEqual(b.id, 1)
def test_replace_specs(self):
C = self.make_class()
a = C("foo", 1)
b = a.copy()
self.assertEqual(b.name, a.name)
self.assertEqual(b.id, a.id)
c = a.copy(_specs=["name"], _replace_specs=True)
self.assertEqual(c.name, a.name)
self.assertEqual(c.id, 0)
def test_ref(self):
C = self.make_class()
o = object()
class D(C):
copy_specs = C.copy_specs + [{"attr": "o", "ref": True}]
def __init__(self, name, id=0, o=None):
super(D, self).__init__(name, id=id)
self.o = o
d = D("foo", 1, o)
d2 = d.copy()
self.assertEqual(d.o, d2.o)
d3 = d.copy(_specs=[{"attr": "o", "ref": False}])
self.assertNotEqual(d.o, d3.o)
def test_shallow(self):
C = self.make_class()
d = {"a": [1]}
class D(C):
copy_specs = C.copy_specs + [{"attr": "d", "shallow": True}]
def __init__(self, name, id=0, d=None):
super(D, self).__init__(name, id=id)
self.d = d
a = D("foo", 1, d)
b = a.copy()
self.assertEqual(len(a.d["a"]), 1)
self.assertEqual(len(b.d["a"]), 1)
a.d["a"].append(2)
self.assertEqual(len(a.d["a"]), 2)
self.assertEqual(len(b.d["a"]), 2)
c = a.copy(_specs=[{"attr": "d", "shallow": False}])
self.assertEqual(len(a.d["a"]), 2)
self.assertEqual(len(c.d["a"]), 2)
a.d["a"].append(3)
self.assertEqual(len(a.d["a"]), 3)
self.assertEqual(len(c.d["a"]), 2)
def test_use_setter(self):
C = self.make_class()
class D(C):
copy_specs = C.copy_specs + [{"attr": "value", "use_setter": True}]
def __init__(self, *args, **kwargs):
super(D, self).__init__(*args, **kwargs)
self.value = 123
a = D("foo", 1)
b = a.copy()
self.assertEqual(b.value, a.value)
def test_different_dst_src(self):
C = self.make_class()
class D(C):
copy_specs = C.copy_specs + [{"src": "foo", "dst": "bar"}]
def __init__(self, name, id=0, bar=None):
super(D, self).__init__(name, id=id)
self.foo = bar
a = D("foo", 1, 123)
b = a.copy()
self.assertEqual(b.foo, a.foo)
def test_skip(self):
C = self.make_class()
class D(C):
copy_specs = C.copy_specs + ["some_attr"]
def __init__(self, name, id=0, some_attr=None):
super(D, self).__init__(name, id=id)
self.some_attr = some_attr
a = D("foo", 1, 123)
b = a.copy()
self.assertEqual(b.some_attr, a.some_attr)
c = a.copy(_skip=["id", "some_attr"])
self.assertEqual(c.name, a.name)
self.assertEqual(c.id, 0)
self.assertIsNone(c.some_attr)
class AuxDataMixinTest(unittest.TestCase):
def test_constructor(self):
c = AuxDataMixin()
self.assertEqual(len(c.aux), 0)
c = AuxDataMixin(aux={"foo": "bar"})
self.assertEqual(len(c.aux), 1)
def test_methods(self):
c = AuxDataMixin()
c.set_aux("foo", "bar")
self.assertEqual(len(c.aux), 1)
self.assertEqual(c.get_aux("foo"), "bar")
self.assertEqual(c.aux["foo"], "bar")
self.assertTrue(c.has_aux("foo"))
self.assertFalse(c.has_aux("foo2"))
c.remove_aux("foo")
self.assertFalse(c.has_aux("foo"))
c.set_aux("foo", "bar")
self.assertTrue(c.has_aux("foo"))
c.clear_aux()
self.assertFalse(c.has_aux("foo"))
def test_x(self):
c = AuxDataMixin(aux={"foo": "bar"})
self.assertEqual(c.x.foo, "bar")
with self.assertRaises(AttributeError):
c.x.nonexisting
self.assertEqual(c.x("foo"), "bar")
with self.assertRaises(KeyError):
c.x("nonexisting")
class TagMixinTest(unittest.TestCase):
def test_constructor(self):
t = TagMixin()
self.assertEqual(len(t.tags), 0)
t = TagMixin(tags=["foo", "bar"])
self.assertEqual(len(t.tags), 2)
self.assertIsInstance(t.tags, set)
def test_add_remove(self):
t = TagMixin()
t.add_tag(("foo", "bar", "baz"))
self.assertEqual(len(t.tags), 3)
with self.assertRaises(TypeError):
t.tags = {}
with self.assertRaises(TypeError):
t.tags = [1]
t.remove_tag("baz")
self.assertEqual(len(t.tags), 2)
def test_has(self):
t = TagMixin(tags=["foo", "bar", "baz"])
self.assertTrue(t.has_tag("foo"))
self.assertTrue(t.has_tag("bar"))
self.assertFalse(t.has_tag("bar2"))
self.assertTrue(t.has_tag("foo*"))
self.assertTrue(t.has_tag("ba*"))
self.assertTrue(t.has_tag(("foo", "foo2")))
self.assertFalse(t.has_tag(("foo", "foo2"), mode=all))
self.assertTrue(t.has_tag(("foo", "baz"), mode=all))
class DataSourceMixinTest(unittest.TestCase):
def test_constructor(self):
c = DataSourceMixin()
self.assertFalse(c.is_data)
self.assertTrue(c.is_mc)
self.assertEqual(c.data_source, "mc")
c = DataSourceMixin(is_data=True)
self.assertTrue(c.is_data)
self.assertFalse(c.is_mc)
self.assertEqual(c.data_source, "data")
with self.assertRaises(TypeError):
DataSourceMixin(is_data={})
def test_setters(self):
c = DataSourceMixin()
self.assertEqual(c.data_source, "mc")
c.is_data = True
self.assertEqual(c.data_source, "data")
c.is_data = False
self.assertEqual(c.data_source, "mc")
c.is_mc = False
self.assertEqual(c.data_source, "data")
c.is_mc = True
self.assertEqual(c.data_source, "mc")
with self.assertRaises(TypeError):
c.is_mc = {}
class SelectionMixinTest(unittest.TestCase):
def test_constructor_root(self):
s = SelectionMixin("myBranchC > 0", selection_mode=SelectionMixin.MODE_ROOT)
self.assertEqual(s.selection, "myBranchC > 0")
s.add_selection("myBranchD < 100", bracket=True)
self.assertEqual(s.selection, "((myBranchC > 0) && (myBranchD < 100))")
s.add_selection("myBranchE < 1", op="or")
self.assertEqual(s.selection, "((myBranchC > 0) && (myBranchD < 100)) || (myBranchE < 1)")
s.add_selection("myWeight", op="*")
self.assertEqual(s.selection, "(((myBranchC > 0) && (myBranchD < 100)) || (myBranchE < 1)) "
"* (myWeight)")
def test_constructor_numexpr(self):
s = SelectionMixin("myBranchC > 0", selection_mode=SelectionMixin.MODE_NUMEXPR)
self.assertEqual(s.selection, "myBranchC > 0")
s.add_selection("myBranchD < 100", bracket=True)
self.assertEqual(s.selection, "((myBranchC > 0) & (myBranchD < 100))")
s.add_selection("myBranchE < 1", op="or")
self.assertEqual(s.selection, "((myBranchC > 0) & (myBranchD < 100)) | (myBranchE < 1)")
s.add_selection("myWeight", op="*")
self.assertEqual(s.selection, "(((myBranchC > 0) & (myBranchD < 100)) | (myBranchE < 1)) "
"* (myWeight)")
def test_selections(self):
s = SelectionMixin(selection_mode=SelectionMixin.MODE_ROOT)
s.selection = "myBranchC > 0"
self.assertEqual(s.selection, "myBranchC > 0")
s.add_selection("myBranchD > 0", op="||")
self.assertEqual(s.selection, "(myBranchC > 0) || (myBranchD > 0)")
s.selection = "myBranchC > 0"
s.add_selection("myBranchD > 0", op="||", bracket=True)
self.assertEqual(s.selection, "((myBranchC > 0) || (myBranchD > 0))")
s.selection = ["myBranchC > 0", "myBranchE > 0"]
self.assertEqual(s.selection, "(myBranchC > 0) && (myBranchE > 0)")
s.selection_mode = SelectionMixin.MODE_NUMEXPR
s.selection = ["myBranchC > 0", "myBranchE > 0"]
self.assertEqual(s.selection, "(myBranchC > 0) & (myBranchE > 0)")
class LabelMixinTest(unittest.TestCase):
def test_constructor(self):
l = LabelMixin(label=r"$\eq$ 3 jets", label_short="3j")
self.assertEqual(l.label, r"$\eq$ 3 jets")
self.assertEqual(l.label_short, "3j")
self.assertEqual(l.label_root, "#eq 3 jets")
l.label_short = None
self.assertEqual(l.label_short, l.label)
l.label = None
self.assertIsNone(l.label_short)
class ColorMixinTest(unittest.TestCase):
def test_constructor(self):
c = ColorMixin((0.5, 0.4, 0.3))
self.assertEqual(c.color_r, 0.5)
self.assertEqual(c.color_g, 0.4)
self.assertEqual(c.color_b, 0.3)
c = ColorMixin((255, 0, 255))
self.assertEqual(c.color_r, 1)
self.assertEqual(c.color_g, 0)
self.assertEqual(c.color_b, 1)
c = ColorMixin("#f0f")
self.assertEqual(c.color_r, 1)
self.assertEqual(c.color_g, 0)
self.assertEqual(c.color_b, 1)
c = ColorMixin("#ff00ff")
self.assertEqual(c.color_r, 1)
self.assertEqual(c.color_g, 0)
self.assertEqual(c.color_b, 1)
with self.assertRaises(ValueError):
c = ColorMixin("foo")
with self.assertRaises(ValueError):
c = ColorMixin((255, 255))
def test_setters_getters(self):
c = ColorMixin((0.5, 0.4, 0.3))
c.color_r = 255
self.assertEqual(c.color_r, 1)
self.assertEqual(c.color_r_int, 255)
c.color = (255, 0, 255, 0.5)
self.assertEqual(c.color_alpha, 0.5)
with self.assertRaises(ValueError):
c.color_r = -100
with self.assertRaises(ValueError):
c.color_g = 256
with self.assertRaises(ValueError):
c.color_b = 1.1
with self.assertRaises(ValueError):
c.color_alpha = 1.1
| 2.546875 | 3 |
front_end/vmipl_communication/connection.py | FlorianWestphal/VMI-PL | 0 | 12767869 | <filename>front_end/vmipl_communication/connection.py
import socket
import struct
import os
from vmipl.event_probes import EventProbe
from vmipl_aux.constants import Constants
class Message:
def __init__(self, msg_type, msglen, flags=0, seq=-1, payload=None):
self.type = msg_type
self.flags = flags
self.seq = seq
self.pid = -1
self.payload = payload
self.msglen = msglen
def send(self, conn):
if self.seq == -1:
self.seq = conn.seq()
self.pid = conn.pid
length = len(self.payload)
hdr = struct.pack("IHHII", length + 4 * 4, self.type,
self.flags, self.seq, self.pid)
conn.send(hdr + self.payload)
class Connection:
# begin with second netlink group, since the first is used for
# exchanging control data between execution environment and kernel
min_group_id = 1<<1
max_group_id = 1<<31
control_group_id = 1<<0
first_nl_family = 23
last_nl_family = 31
def __init__(self, nltype, groups=0, timeout=None):
self.descriptor = socket.socket(socket.AF_NETLINK,
socket.SOCK_RAW, nltype)
self.descriptor.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 65536)
self.descriptor.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 65536)
self.descriptor.bind((0, groups))
self.descriptor.settimeout(timeout)
self.pid, self.groups = self.descriptor.getsockname()
self._seq = 0
def send(self, msg):
self.descriptor.send(msg)
def seq(self):
self._seq += 1
return self._seq
def recv(self, bufs=16384):
contents, (nlpid, nlgrps) = self.descriptor.recvfrom(bufs)
# XXX: python doesn't give us message flags, check
# len(contents) vs. msglen for TRUNC
msglen, msg_type, flags, seq, pid = struct.unpack("IHHII",
contents[:16])
msg = Message(msg_type, msglen, flags, seq, contents[16:])
msg.pid = pid
if msg.type == Constants.nlmsg_error:
errno = -struct.unpack("i", msg.payload[:4])[0]
if errno != 0:
err = OSError("Netlink error: %s (%d)" % (
os.strerror(errno), errno))
err.errno = errno
raise err
return msg
def close(self):
self.descriptor.close()
def send_message(self, message_type, data, synchronous):
msg = Message(message_type, 0, 0, -1, data)
msg.send(self)
del msg
if synchronous:
reply = self.recv()
del reply
def send_probes(self, probes):
if len(probes) == 0:
return
mid = len(probes) / 2
self.send_message(probes[mid].message_type, probes[mid].convert_to_struct(), True)
if issubclass(probes[mid].__class__, EventProbe):
self.send_probes(probes[mid].data_probes)
self.send_probes(probes[:mid])
self.send_probes(probes[mid+1:])
def send_script(self, script):
self.send_message(script.message_type, script.convert_to_struct(), True)
sorted_probes = sorted(script.event_probes, key=lambda x: x.type)
self.send_probes(sorted_probes)
| 2.296875 | 2 |
props/graph_representation/parse_graph.py | kshabahang/props | 0 | 12767870 | import props.graph_representation.node
from props.graph_representation.graph_wrapper import GraphWrapper
from props.graph_representation.word import Word, NO_INDEX
from props.graph_representation.node import Node,CopularNode,PossessiveNode,PropNode,\
AppositionNode, PrepNode, CondNode, ConjunctionNode, advNode, RCMODPropNode,\
TimeNode, isTime, LocationNode, isLocation
from props.dependency_tree.definitions import adjectival_mod_dependencies, labels_ban,\
filter_labels_ban, condition_outcome_markers, reason_outcome_markers,\
comp_markers
import props.graph_utils
from props.proposition_structure import syntactic_item
from props.graph_representation import word
from time_annotator.timex_wrapper import timexWrapper
from mx.DateTime.ISO import ParseTime
from location_annotator.textual_location_annotator import textualLocationAnnotator
FIRST_ENTITY_LABEL = "entity"#"first_entity"
SECOND_ENTITY_LABEL = "entity"#"second_entity"
POSSESSOR_LABEL = "possessor"
POSSESSED_LABEL = "possessed"
COMP_LABEL = "comp"
DISCOURSE_LABEL = "discourse"
OUTCOME_LABEL = "outcome"
CONDITION_LABEL = "condition"
REASON_LABEL = "reason"
ADV_LABEL = "adverb"
SORUCE_LABEL = "source"
#types for appendix:
APPENDIX_PREP = "Prepositions"
APPENDIX_COP = "Copular"
APPENDIX_POSS = "Possessives"
APPENDIX_APPOS = "Appositions"
APPENDIX_ADJ = "Adjectives"
APPENDIX_VERB = "Verbal Predicates"
APPENDIX_COND = "Conditionals and Temporals"
APPENDIX_COMPLEMENT = "Clausal Complements"
APPENDIX_RCMOD = "Relative Clauses"
APPENDIX_CONJUNCTION = "Conjunctions"
APPENDIX_NEGATION = "Negation"
APPENDIX_PASSIVE = "Passive Voice"
APPENDIX_LEMMA = "Lemma"
APPENDIX_LOCATION = "Locations"
APPENDIX_MODAL = "Modal"
APPENDIX_EXISTENSIALS = "Existensials"
APPENDIX_TENSE = "Tense"
APPENDIX_TIME = "Time"
APPENDIX_RANGE = "Ranges"
APPENDIX_KEYS = (APPENDIX_ADJ,
APPENDIX_APPOS,
APPENDIX_COND,
APPENDIX_CONJUNCTION,
APPENDIX_COMPLEMENT,
APPENDIX_COP,
APPENDIX_EXISTENSIALS,
APPENDIX_LEMMA,
APPENDIX_LOCATION,
APPENDIX_MODAL,
APPENDIX_NEGATION,
APPENDIX_PASSIVE,
APPENDIX_POSS,
APPENDIX_PREP,
APPENDIX_RANGE,
APPENDIX_RCMOD,
APPENDIX_TENSE,
APPENDIX_TIME,
APPENDIX_VERB
)
class ParseGraph:
"""
class to bunch together all function of conversion from DepTree to digraph
Mainly in order to store the graph as a member which all these functions can edit.
"""
def __init__(self,t,locationAnnotator):
"""
initialize a graph class, followed by converting a tree
@type t: Tree
@param tree: syntactic tree to be converted
@type id: int
@param id: a unique id for current Tree
@type gr: digraph
@var gr: the graph representing t
"""
if not t.id: # meaning this is the ROOT element
self.tree = t.children[0]
else:
self.tree = t
self.gr = GraphWrapper(t.get_original_sentence())
self.locationAnnotator = locationAnnotator
# maintain an appendix for easier browsing
self.types = appendix_types()
self.parse(self.tree)
def parse(self,t):
"""
Get the graph representation from a syntactic representation
Returns through the graph parameter.
@type t: DepTree
@param tree: syntactic tree to be converted
@rtype: Node
@return: the node in the graph corresponding to the top node in t
"""
#order matters!
if t.is_conditional_predicate():
self.types.add(APPENDIX_COND)
return self.parseConditional(outcome = t._CONDITIONAL_PREDICATE_FEATURE_Outcome()["Value"],
condList = t.condPred)
if t._VERBAL_PREDICATE_SUBTREE_Adv():
advChildren = t.adverb_children
advSubj = t.adverb_subj
return self.parseAdverb(subj=advSubj,
advChildren=advChildren)
if t.is_conjunction_predicate():
self.types.add(APPENDIX_CONJUNCTION)
return self.parseConjunction(baseElm = t.baseElm,
conjResult = t.conjResult)
if t.is_appositional_predicate():
self.types.add(APPENDIX_APPOS)
firstEntity = t._APPOSITIONAL_PREDICATE_FEATURE_Left_Side()["Value"]
secondEntity = t._APPOSITIONAL_PREDICATE_FEATURE_Right_Side()["Value"]
return self.parseApposition(index = t.id,
first_entity=firstEntity,
second_entity=secondEntity)
if t.is_relative_clause():
self.types.add(APPENDIX_RCMOD)
return self.parseRcmod(np = t._RELCLAUSE_PREDICATE_FEATURE_Rest()['Value'],
modList = t.rcmodPred)
if t.is_prepositional_predicate():
self.types.add(APPENDIX_PREP)
return self.parsePreposition(psubj=t._PREPOSITIONAL_PREDICATE_FEATURE_psubj()["Value"],
prepChildList=t.prepChildList)
if t.is_copular_predicate():
self.types.add(APPENDIX_COP)
firstEntity = t._COPULAR_PREDICATE_FEATURE_Copular_Predicate()["Value"]
secondEntity = t._COPULAR_PREDICATE_FEATURE_Copular_Object()["Value"]
return self.parseCopular(index = t.id,
first_entity=firstEntity,
second_entity=secondEntity,
features = syntactic_item.get_verbal_features(t))
if t.is_possesive_predicate():
self.types.add(APPENDIX_POSS)
possessor = t._POSSESSIVE_PREDICATE_FEATURE_Possessor()["Value"]
possessed = t._POSSESSIVE_PREDICATE_FEATURE_Possessed()["Value"]
possessive = t._POSSESSIVE_PREDICATE_FEATURE_Possessive()["Value"]
return self.parsePossessive(possessor = possessor,
possessed = possessed,
possessive = possessive)
if t.is_adjectival_predicate():
self.types.add(APPENDIX_ADJ)
return self.parseProp(subject = t._ADJECTIVAL_PREDICATE_FEATURE_Subject()["Value"],
copulaIndex = NO_INDEX,
adjectiveChildList = t.adjectivalChildList,
propAsHead=False)
if t.is_clausal_complement():
self.types.add(APPENDIX_COMPLEMENT)
return self.parseComplement(compSubj = t.compSubj,
compChildren = t.compChildList)
if t.unhandled_advcl():
# put each unhandled advcl as a disconnected subgraph
for c in t.advcl:
self.parse(c)
return self.parse(t)
if t.is_verbal_predicate():
self.types.add(APPENDIX_VERB)
head_ret = t._VERBAL_PREDICATE_SUBTREE_Head()
return self.parseVerbal(indexes = head_ret["Span"],
verbs = head_ret["Value"].split(" "),
arguments = t.collect_arguments(),
tree = t)
else:
# fall back - pack all the tree in a single node
if len(t.children)==1:
if (t.children[0].parent_relation == "nn") and (t.word.endswith(",")) and (t.children[0].word.endswith(",")):
#conjunction in disguise
child = t.children[0]
t.children = []
ret = self.parseConjunction(cc = [(t.id,"and")],
conjElements = [t,child])
t.children = [child]
return ret
nodes = t._get_subtree(filter_labels_ban)
text = [Word(index=index,
word=nodes[index]) for index in sorted(nodes.keys())]
topNode = self.parseBottom(text = sorted(text,key=lambda x:x.index),
features = syntactic_item.get_verbal_features(t))
return topNode
def parseBottom(self,text,features):
"""
Parse a node for which all other construction test has failed,
no tree structure is assumed over the input text.
@type text: list[Word]
@param text: words to appear at node, oredered by index
@type features: dict{string:string}
@param features: features of the node
@rtype Node
@return the node which was inserted into the graph
"""
time_res = timexWrapper(text)
if time_res[0]:
self.types.add(APPENDIX_TIME)
time_node = self.parseTime(time_res[0])
else:
time_node = False
s = " ".join([w.word for w in text])
if self.locationAnnotator.is_location(s):
locNode = LocationNode.init(features={})
self.gr.add_node(locNode)
bottomNode = Node(isPredicate=False,
text = text,
features = features,
valid=True)
self.gr.add_node(bottomNode)
self.gr.add_edge((locNode,bottomNode),
label="loc")
self.types.add(APPENDIX_LOCATION)
return locNode
left_text = time_res[1]
if left_text:
topNode = Node(isPredicate=False,
text = left_text,
features = features,
valid=True)
if not topNode.str:
time_node.features.update(topNode.features)
topNode = time_node
else:
self.gr.add_node(topNode)
if time_node:
self.gr.add_edge((topNode,time_node))
else:
if not time_node:
#TODO: probably not good, but happens
topNode = Node(isPredicate=False,
text = [],
features = features,
valid=True)
self.gr.add_node(topNode)
else:
topNode = time_node
return topNode
def parseTime(self,time_res):
"""
Add a time node to the graph, given the results of the automated tool.
@type time_res: list[TimeExpression]
@param time_res: Time Expressions to be added to the graph, all as single nodes, and under the same "time" node
@rtype Node
@return the top node (time node)
"""
topNode = TimeNode.init(features={})
self.gr.add_node(topNode)
for timeExpression in time_res:
curNode = Node(isPredicate = False,
text = timeExpression.text,
features = {"Time Value":timeExpression.value},
valid = True)
self.gr.add_node(curNode)
self.gr.add_edge((topNode,curNode))
return topNode
def parseComplement(self,compSubj,compChildren):
"""
add a complement subgraph to the graph
@type compSubj: DepTree
@param compSubj: the subject of all following complements
@type compChildren: list [depTree]
@param compChildren: all subclauses
"""
topNode = self.parse(compSubj)
for child in compChildren:
curNode = self.parse(child)
self.gr.add_edge(edge=(topNode,curNode),
label=child.parent_relation)
return topNode
def parseConjunction(self,baseElm,conjResult):
"""
add a conjunction subgraph to the graph
@type cc: list [(int,string)]
@param cc: the connecting element
@type conjElements: list [DepTree]
@param conjElements: subtrees to be joined in conjunction
"""
retNode = self.parse(baseElm)
for cc,conjElements in conjResult:
if not conjElements:
# discourse marker
discourseNode = Node(isPredicate = False,
text = [Word(ind,word) for ind,word in cc],
features = {},
valid=True)
self.gr.add_node(discourseNode)
self.gr.add_edge(edge =(retNode,discourseNode),
label= DISCOURSE_LABEL)
else:
# generate top conjunction node
conjNode = ConjunctionNode.init(text = [Word(ind,word) for ind,word in cc],
features = {})
self.gr.add_node(conjNode)
#connect cc to base element
self.gr.add_edge((conjNode,retNode))
#generate node for each element and connect to topNode
for elm in conjElements:
curNode = self.parse(elm)
self.gr.add_edge(edge = (conjNode,curNode))
return retNode
def parseRcmod(self,np,modList):
"""
add a relative clause subgraph to the graph
@type np: DepTree
@param np: the entity being modified by the relative clause
@type modlist: a list of DepTrees,
@param modList: trees modifying np
"""
topNode = self.parse(np)
for temp_t in modList:
# add nodes
rcmodNode = self.parse(temp_t._RELCLAUSE_PREDICATE_FEATURE_Relclause()["Value"])
propNode = RCMODPropNode.init(features={},
valid=True)
self.gr.add_node(propNode)
#add edges
self.gr.add_edge(edge=(topNode,propNode))
self.gr.add_edge(edge=(propNode,rcmodNode))
if rcmodNode.isPredicate:
# this will create a cycle, label is a hurestic to guess the connection between relative clause and top node
self.gr.add_edge(edge=(rcmodNode,topNode), label=temp_t.rcmodRel)
# record that this construction came from rcmod
topNode.rcmod = [propNode,rcmodNode]
return topNode
def parseConditional(self,outcome,condList):
"""
add a conditional subgraph to the graph
@type outcome: DepTree
@param outcome: the outcome of all following conditions
@type condList: a list of DepTrees,
@param condList: all conditionals regarding outcome
"""
outcomeNode = self.parse(outcome)
for temp_t in condList:
mark = temp_t._CONDITIONAL_PREDICATE_FEATURE_Mark()
markValue = mark["Value"]
markIndex = mark["Span"][0]
conditionNode = self.parse(temp_t._CONDITIONAL_PREDICATE_FEATURE_Condition()["Value"])
#create nodes
markNode = CondNode.init(index = markIndex,
condType = markValue,
features = {},
valid=True)
self.gr.add_node(markNode)
markValue = markValue.lower()
# add edges according to the type of conditional
if markValue in condition_outcome_markers:
self.gr.add_edge(edge = (markNode,outcomeNode),
label = OUTCOME_LABEL)
self.gr.add_edge(edge = (markNode,conditionNode),
label = CONDITION_LABEL)
elif markValue in reason_outcome_markers:
self.gr.add_edge(edge = (markNode,outcomeNode),
label = OUTCOME_LABEL)
self.gr.add_edge(edge = (markNode,conditionNode),
label = REASON_LABEL)
elif markValue in comp_markers:
self.gr.add_edge(edge = (conditionNode,outcomeNode),
label = COMP_LABEL)
else:
#add edges
self.gr.add_edge((outcomeNode,markNode))
self.gr.add_edge((markNode,conditionNode))
#return top node
return outcomeNode
def parsePreposition(self,psubj,prepChildList):
"""
add a preposition subgraph to the graph
@type psubj: DepTree
@param psubj: the subject of all following prepositions
@type prepChildList: a list of DepTrees,
@param prepChildList: all prepositions regarding nsubj
"""
#create top nodes:
topNode = self.parse(psubj)
for temp_t in prepChildList:
#generate bottom node and connect to prep
pobj = temp_t._PREPOSITIONAL_PREDICATE_FEATURE_pobj()["Value"]
if not pobj: # e.g., #460
continue
bottomNode = self.parse(pobj)
#generate prep node and connect to top node
prepNode = PrepNode.init(index=temp_t.prepInd,
prepType=temp_t.prepType,
features={},
valid = True)
# self.gr.add_node(prepNode)
#self.gr.add_edge(edge = (prepNode,bottomNode))
self.gr.add_edge(edge = (topNode,bottomNode),
label = " ".join([w.word for w in prepNode.str]))
return topNode
def parseVerbal(self,indexes,verbs,arguments,tree):
"""
add a verbal subgraph to the graph
@type indexes: list [int]
@param indexes: the index(es) of the verb in the sentence
@type verbs: list [string]
@param verbs: the string(s) representing the verb
@type tree: DepTree
@param tree: tree object from which to extract various features
@type arguments: list
@param arguments: list of DepTrees of arguments
"""
# create verbal head node
# start by extracting features
feats = syntactic_item.get_verbal_features(tree)
if feats['Lemma'] == verbs[0]:
del(feats['Lemma'])
for k in feats:
self.types.add(k)
verbNode = graph_representation.node.Node(isPredicate=True,
text = [Word(index=index,
word=verb) for index,verb in zip(indexes,verbs)],
features=feats,
valid=True)
self.gr.add_node(verbNode)
# handle arguments
for arg_t in arguments:
curNode = self.parse(arg_t)
#curNode.features = syntactic_item.get_verbal_features(arg_t)
self.gr.add_edge((verbNode,curNode), arg_t.parent_relation)
# handle time expressions
(timeSubtree,_) = tree._VERBAL_PREDICATE_SUBTREE_Time()
if timeSubtree:
timeNode = graph_representation.node.TimeNode.init(features = {})
self.gr.add_node(timeNode)
timeSubGraph = self.parse(timeSubtree)
self.gr.add_edge((verbNode,timeNode))
self.gr.add_edge((timeNode,timeSubGraph))
return verbNode
def parseAdverb(self,subj,advChildren):
topNode = self.parse(subj)
for advChild,mwe in advChildren:
# advTopNode = advNode.init(features = {})
# self.gr.add_node(advTopNode)
# self.gr.add_edge(edge = (topNode,advTopNode))
if mwe:
# in case this is a complex adverb ("as long as")
curAdvNode = Node(isPredicate = False,
text = [Word(ind,word) for ind,word in mwe],
features = {},
valid = True)
self.gr.add_node(curAdvNode)
curChildNode = self.parse(advChild)
self.gr.add_edge(edge=(topNode,curAdvNode),
label = ADV_LABEL)
self.gr.add_edge(edge = (curAdvNode,curChildNode),
label = advChild.parent_relation)
else:
curChildNode = self.parse(advChild)
self.gr.add_edge(edge = (topNode,curChildNode),
label = ADV_LABEL)
return topNode
def parseCopular(self,index,first_entity,second_entity,features):
"""
add a copular subgraph to the graph
@type index: int
@param index: the index of the copula in the sentence
@type first_entity: DepTree
@param first_entity: the syntax tree of the first entity
@type second_entity: DepTree
@param second_entity: the syntax tree of the second entity
@rtype: Node
@return: the top node of the copula subgraph
"""
if (second_entity.parent_relation in adjectival_mod_dependencies) \
or (not second_entity.is_definite()):
# reduce to prop construction when the second element in the copula is an adjective
# e.g., Rabbit is white -> white rabbit
# or when the second element is indefinite
second_entity.adjectivalChild = [second_entity]
second_entity.relative_adj = False #TODO: calculate this
second_entity.parent_relation = "copular" #TODO: this might be dangerous :\
return self.parseProp(subject = first_entity,
copulaIndex = index,
adjectiveChildList = [second_entity],
features=features,
propAsHead = True)
# generate the top node and add to the graph
topNode = CopularNode.init(index=index,
features=features,
valid=True)
self.gr.add_node(topNode)
# generate both entities subgraphs
firstEntityNode = self.parse(first_entity)
secondEntityNode = self.parse(second_entity)
#propagate properties between the two nodes
graph_representation.node.addSymmetricPropogation(firstEntityNode,
secondEntityNode)
#add labeled edges
self.gr.add_edge(edge=(topNode,firstEntityNode),
label=FIRST_ENTITY_LABEL)
self.gr.add_edge(edge=(topNode,secondEntityNode),
label=SECOND_ENTITY_LABEL)
return topNode
def parseApposition(self,index,first_entity,second_entity):
"""
add an apposition subgraph to the graph
@type index: int
@param index: the index of the apposition in the sentence
@type first_entity: DepTree
@param first_entity: the syntax tree of the first entity
@type second_entity: DepTree
@param second_entity: the syntax tree of the second entity
@rtype: Node
@return: the top node of the apposition subgraph
"""
#copied from copular, interesting to see if this happens
if (second_entity.parent_relation in adjectival_mod_dependencies) \
or (not second_entity.is_definite()):
# reduce to prop construction when the second element in the copula is an adective
# e.g., Rabbit is white -> white rabbit
second_entity.adjectivalChild = [second_entity]
second_entity.relative_adj = False #TODO - calculate this
second_entity.parent_relation = "appos" #TODO: this might be dangerous :\
return self.parseProp(subject = first_entity,
copulaIndex = NO_INDEX,
adjectiveChildList = [second_entity],
propAsHead = True)
# generate the top node and add to the graph
topNode = AppositionNode.init(index=index,
features={})
self.gr.add_node(topNode)
# generate both entities subgraphs
firstEntityNode = self.parse(first_entity)
secondEntityNode = self.parse(second_entity)
# remember first and second entities in apposition's node
# topNode.entities = [firstEntityNode,secondEntityNode]
# propagate properties between the two nodes
graph_representation.node.addSymmetricPropogation(firstEntityNode,
secondEntityNode)
#add labeled edges
self.gr.add_edge(edge=(topNode,firstEntityNode),
label=FIRST_ENTITY_LABEL)
self.gr.add_edge(edge=(topNode,secondEntityNode),
label=SECOND_ENTITY_LABEL)
return topNode
def parsePossessive(self,possessor,possessed,possessive):
"""
add a possessive subgraph to the graph
@type index: int
@param index: the index of the possessive in the sentence
@type possessor: DepTree
@param possessor: the syntax tree of the possessor
@type possessed: DepTree
@param possessed: the syntax tree of the possessed
@type possessive: DepTree
@param possessive: the syntax tree of the possessive - e.g - 's
@rtype: Node
@return: the top node of the possessive subgraph
"""
if not possessive:
index = graph_representation.word.NO_INDEX
else:
index = possessive.id
# generate nodes
possessorNode = self.parse(possessor)
possessedNode = self.parse(possessed)
if isTime(possessorNode) or isLocation(possessorNode):
#possessive construction to indicate time
self.gr.add_edge((possessedNode,possessorNode))
return possessedNode
#otherwise - proper possessive:
hasNode = PossessiveNode.init(index=index,
features={},
valid=True)
self.gr.add_node(hasNode)
# add edges to graph
self.gr.add_edge(edge=(hasNode,possessorNode),
label=POSSESSOR_LABEL)
self.gr.add_edge(edge=(hasNode,possessedNode),
label=POSSESSED_LABEL)
# create top node
# get list of all relevant nodes
nodeLs = [possessorNode,possessedNode]
if possessive: # in some cases there's no possessive marker (e.g., "their woman")
possessiveNode = graph_representation.node.Node(isPredicate=False,
text = [Word(possessive.id,
possessive.get_original_sentence(root=False))],
features = {},
valid=True)
nodeLs.append(possessiveNode)
# create possessive top node, add to graph, and return it
topNode = graph_utils.generate_possessive_top_node(graph=self.gr, nodeLs=nodeLs)
self.gr.add_node(topNode)
#mark that features and neighbours should propagate from the top node to the possessed
# John's results were low -> features should propogate between (John's results) and (results)
graph_representation.node.addSymmetricPropogation(topNode, possessedNode)
return topNode
def parseProp(self,subject,copulaIndex,adjectiveChildList,propAsHead,features={}):
"""
add a prop subgraph to the graph
@type adjective: DepTree
@param adjective: the syntax tree of the adjective
@type subject: DepTree
@param subject: the syntax tree of the subject
@rtype: Node
@return: the top node of the copula subgraph
"""
# parse top node
subjectNode = self.parse(subject)
topNode = subjectNode
#parse each property and connect to top node
for temp_t in adjectiveChildList:
adjective = temp_t._ADJECTIVAL_PREDICATE_FEATURE_Adjective()["Value"]
adjectiveNode = self.parse(adjective)
if "Lemma" in features:
del(features["Lemma"])
adjectiveNode.features.update(features)
# generate the top node and add to the graph
propNode = PropNode.init(features={"relative":temp_t.relative_adj},
index = copulaIndex,
valid=True,
parent_relation = adjective.parent_relation)
self.gr.add_node(propNode)
if propAsHead:
topNode = propNode
#add labeled edges
self.gr.add_edge(edge=(subjectNode,propNode),
label="")
self.gr.add_edge(edge=(propNode,adjectiveNode),
label="")
return topNode
class appendix_types:
def __init__(self):
self.d = {}
def add(self,obj):
self._update(obj, add=+1)
def getSet(self):
return set([k for k in self.d.keys() if self.d[k]>0])
def union(self,other):
for k in other.d:
self._update(obj=k, add=other.d[k])
def remove(self,obj):
self._update(obj, add=-1)
def _update(self,obj,add):
if obj not in self.d:
self.d[obj] = 0
self.d[obj]+=add | 1.640625 | 2 |
titus/test/producer/testCart.py | jmilleralpine/hadrian | 127 | 12767871 | #!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy
from titus.genpy import PFAEngine
from titus.producer.tools import look
from titus.producer.cart import *
class TestProducerCart(unittest.TestCase):
@staticmethod
def data():
while True:
x = random.uniform(0, 10)
y = random.uniform(0, 10)
if x < 4.0:
if y < 6.0:
z = random.gauss(5, 1)
else:
z = random.gauss(8, 1)
else:
if y < 2.0:
z = random.gauss(1, 1)
else:
z = random.gauss(2, 1)
if z < 0.0:
z = 0.0
elif z >= 10.0:
z = 9.99999
a = "A" + str(int(x))
b = "B" + str(int(y/2) * 2)
c = "C" + str(int(z/3) * 3)
yield (x, y, z, a, b, c)
def testCartMustBuildNumericalNumerical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((x, y, z) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("x", "y", "z"))
tree = TreeNode.fromWholeDataset(dataset, "z")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "x")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["value"], 4.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], 6.00, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["double"], 5.00, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["double"], 8.02, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], 2.00, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["double"], 1.09, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["double"], 2.00, places=2)
engine, = PFAEngine.fromJson(doc)
self.assertAlmostEqual(engine.action({"x": 2.0, "y": 3.0}), 5.00, places=2)
self.assertAlmostEqual(engine.action({"x": 2.0, "y": 8.0}), 8.02, places=2)
self.assertAlmostEqual(engine.action({"x": 7.0, "y": 1.0}), 1.09, places=2)
self.assertAlmostEqual(engine.action({"x": 7.0, "y": 5.0}), 2.00, places=2)
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandUnique=True, nTimesVariance=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
def testCartMustBuildNumericalCategorical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((x, y, c) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("x", "y", "c"))
tree = TreeNode.fromWholeDataset(dataset, "c")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "x")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["value"], 4.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], 6.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["string"], "C3")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["string"], "C6")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], 2.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["string"], "C0")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["string"], "C0")
engine, = PFAEngine.fromJson(doc)
self.assertEqual(engine.action({"x": 2.0, "y": 3.0}), "C3")
self.assertEqual(engine.action({"x": 2.0, "y": 8.0}), "C6")
self.assertEqual(engine.action({"x": 7.0, "y": 1.0}), "C0")
self.assertEqual(engine.action({"x": 7.0, "y": 5.0}), "C0")
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandDistribution=True, predictandUnique=True, entropy=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
def testCartMustBuildCategoricalNumerical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((a, b, z) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("a", "b", "z"))
tree = TreeNode.fromWholeDataset(dataset, "z")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "a")
self.assertEqual(doc["cells"]["tree"]["init"]["value"], ["A0", "A1", "A2", "A3"])
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], ["B6", "B8"])
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["double"], 8.02, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["double"], 5.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], ["B0"])
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["double"], 1.09, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["double"], 2.00, places=2)
engine, = PFAEngine.fromJson(doc)
self.assertAlmostEqual(engine.action({"a": "A1", "b": "B6"}), 8.02, places=2)
self.assertAlmostEqual(engine.action({"a": "A1", "b": "B2"}), 5.00, places=2)
self.assertAlmostEqual(engine.action({"a": "A5", "b": "B0"}), 1.09, places=2)
self.assertAlmostEqual(engine.action({"a": "A5", "b": "B4"}), 2.00, places=2)
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandUnique=True, nTimesVariance=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
def testCartMustBuildCategoricalCategorical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((a, b, c) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("a", "b", "c"))
tree = TreeNode.fromWholeDataset(dataset, "c")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "a")
self.assertEqual(doc["cells"]["tree"]["init"]["value"], ["A0", "A1", "A2", "A3"])
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], ["B6", "B8"])
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["string"], "C6")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["string"], "C3")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], ["B0"])
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["string"], "C0")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["string"], "C0")
engine, = PFAEngine.fromJson(doc)
self.assertEqual(engine.action({"a": "A1", "b": "B6"}), "C6")
self.assertEqual(engine.action({"a": "A1", "b": "B2"}), "C3")
self.assertEqual(engine.action({"a": "A5", "b": "B0"}), "C0")
self.assertEqual(engine.action({"a": "A5", "b": "B4"}), "C0")
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandDistribution=True, predictandUnique=True, entropy=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
if __name__ == "__main__":
unittest.main()
| 2.4375 | 2 |
P2/ex1at1.py | bmatheaco/pythonexercises | 0 | 12767872 | <gh_stars>0
class Aumento:
codigo = 0
nome = ''
preco = 0.0
aumento = 0.0
def main():
p = Aumento()
p.codigo = int(input('Informe o código do produto: '))
p.nome = str(input('Informe o nome do produto: '))
p.preco = float(input('Informe o preço do produto: R$ '))
p.aumento = (p.preco*0.1) + p.preco
print(f'valor com aumento = R$ {p.aumento:.2f}.')
main()
| 3.59375 | 4 |
core/colors.py | McArcady/python-lnp | 48 | 12767873 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Color scheme management."""
from __future__ import print_function, unicode_literals, absolute_import
import os, shutil
from . import helpers, paths, log
from .lnp import lnp
from .dfraw import DFRaw
_df_colors = (
'BLACK', 'BLUE', 'GREEN', 'CYAN',
'RED', 'MAGENTA', 'BROWN', 'LGRAY',
'DGRAY', 'LBLUE', 'LGREEN', 'LCYAN',
'LRED', 'LMAGENTA', 'YELLOW', 'WHITE'
)
def read_colors():
"""Returns a sorted tuple of color scheme basenames, in LNP/Colors."""
return tuple(sorted(
[os.path.splitext(os.path.basename(p))[0] for p in
helpers.get_text_files(paths.get('colors'))],
key=helpers.key_from_underscore_prefixed_string))
def get_colors(colorscheme=None):
"""
Returns RGB tuples for all 16 colors in <colorscheme>.txt, or
data/init/colors.txt if no scheme is provided. On errors, returns an empty
list."""
# pylint:disable=bare-except
try:
if colorscheme is not None:
f = colorscheme
if not f.endswith('.txt'):
f = f + '.txt'
if os.path.dirname(f) == '':
f = paths.get('colors', f)
else:
if lnp.df_info.version <= '0.31.03':
f = paths.get('init', 'init.txt')
else:
f = paths.get('init', 'colors.txt')
color_fields = [(c+'_R', c+'_G', c+'_B') for c in _df_colors]
result = DFRaw(f).get_values(*color_fields)
return [tuple(int(x) for x in t) for t in result]
except:
if colorscheme:
log.e('Unable to read colorscheme %s', colorscheme, stack=True)
else:
log.e('Unable to read current colors', stack=True)
return []
def load_colors(filename):
"""
Replaces the current DF color scheme.
Args:
filename: The name of the new colorscheme to apply (extension optional).
If no path is specified, file is assumed to be in LNP/Colors.
"""
log.i('Loading colorscheme ' + filename)
if not filename.endswith('.txt'):
filename = filename + '.txt'
if os.path.dirname(filename) == '':
filename = paths.get('colors', filename)
if lnp.df_info.version <= '0.31.03':
colors = ([c+'_R' for c in _df_colors] + [c+'_G' for c in _df_colors] +
[c+'_B' for c in _df_colors])
lnp.settings.read_file(filename, colors, False)
lnp.settings.write_settings()
else:
shutil.copyfile(filename, paths.get('init', 'colors.txt'))
def save_colors(filename):
"""
Save current keybindings to a file.
Args:
filename: the name of the new color scheme file.
"""
log.i('Saving colorscheme ' + filename)
if not filename.endswith('.txt'):
filename = filename + '.txt'
filename = paths.get('colors', filename)
if lnp.df_info.version <= '0.31.03':
colors = ([c+'_R' for c in _df_colors] + [c+'_G' for c in _df_colors] +
[c+'_B' for c in _df_colors])
lnp.settings.create_file(filename, colors)
else:
shutil.copyfile(paths.get('init', 'colors.txt'), filename)
def color_exists(filename):
"""
Returns whether or not a color scheme already exists.
Args:
filename: the filename to check.
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
return os.access(paths.get('colors', filename), os.F_OK)
def delete_colors(filename):
"""
Deletes a color scheme file.
Args:
filename: the filename to delete.
"""
log.i('Deleting colorscheme ' + filename)
if not filename.endswith('.txt'):
filename = filename + '.txt'
os.remove(paths.get('colors', filename))
def get_installed_file():
"""Returns the name of the currently installed color scheme, or None."""
files = helpers.get_text_files(paths.get('colors'))
current_scheme = get_colors()
for scheme in files:
if get_colors(scheme) == current_scheme:
return os.path.splitext(os.path.basename(scheme))[0]
return None
| 2.28125 | 2 |
variant_remapping_tools/reads_to_remapped_variants.py | diegomscoelho/variant-remapping | 3 | 12767874 | #! /usr/bin/env python3
import argparse
from argparse import RawTextHelpFormatter
from collections import Counter, defaultdict
import yaml
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
import pysam
nucleotide_alphabet = {'A', 'T', 'C', 'G'}
def reverse_complement(sequence):
return str(Seq(sequence, generic_dna).reverse_complement())
def calculate_new_variant_definition(left_read, right_read, ref_fasta, original_vcf_rec):
"""
Resolve the variant definition from the flanking region alignment and old variant definition
TODO: Link to algorithm description once public
"""
# Flag to highlight low confidence in an event detected
failure_reason = None
old_ref = original_vcf_rec[3]
old_alts = original_vcf_rec[4].split(',')
operations = {}
# Define new ref and new pos
new_ref = fetch_bases(ref_fasta, left_read.reference_name, left_read.reference_end + 1,
right_read.reference_start - left_read.reference_end).upper()
if len(set(new_ref).difference(nucleotide_alphabet)) != 0 :
failure_reason = 'Reference Allele not in ACGT'
new_pos = left_read.reference_end + 1
# 1. Handle reference strand change
if not left_read.is_reverse and not right_read.is_reverse:
# Forward strand alignment
old_ref_conv = old_ref
old_alt_conv = old_alts
operations['st'] = '+'
elif left_read.is_reverse and right_read.is_reverse:
# Reverse strand alignment
old_ref_conv = reverse_complement(old_ref)
old_alt_conv = [reverse_complement(alt) for alt in old_alts]
operations['st'] = '-'
else:
# This case should be handled by the filtering but raise just in case...
error_msg = (f'Impossible read configuration: '
f'read1 is_reverse: {left_read.is_reverse}, '
f'read2 is_reverse: {right_read.is_reverse}, '
f'read1 position: {left_read.pos}, '
f'read2 position: {right_read.pos}')
raise ValueError(error_msg)
# 2. Assign new allele sequences
if new_ref == old_ref_conv:
new_alts = old_alt_conv
elif new_ref in old_alt_conv:
old_alt_conv.remove(new_ref)
new_alts = old_alt_conv
new_alts.append(old_ref_conv)
operations['rac'] = old_ref_conv + '-' + new_ref
if len(old_ref_conv) != len(new_ref):
failure_reason = 'Reference Allele length change'
else:
new_alts = old_alt_conv
new_alts.append(old_ref_conv)
operations['rac'] = old_ref_conv + '-' + new_ref
operations['nra'] = None
if len(old_ref_conv) != len(new_ref):
failure_reason = 'Novel Reference Allele length change'
# 3. Correct zero-length reference sequence
if len(new_ref) == 0:
new_pos -= 1
new_ref = fetch_bases(ref_fasta, left_read.reference_name, new_pos, 1).upper()
new_alts = [new_ref + alt for alt in new_alts]
operations['zlr'] = None
return new_pos, new_ref, new_alts, operations, failure_reason
def update_vcf_record(reference_name, varpos, new_ref, new_alts, operations, original_vcf_rec):
"""
Update the original vcf record with the different fields and use the operations to modify the info and genotypes
fields.
"""
original_vcf_rec[0] = reference_name
original_vcf_rec[1] = str(varpos)
original_vcf_rec[3] = new_ref
original_vcf_rec[4] = ','.join(new_alts)
# Update The INFO field by appending operations
operation_list = [op if operations[op] is None else '%s=%s' % (op, operations[op]) for op in operations]
if original_vcf_rec[7] != '.':
original_vcf_rec[7] = ';'.join(original_vcf_rec[7].strip(';').split(';') + operation_list)
else:
original_vcf_rec[7] = ';'.join(operation_list)
# If required Update SAMPLE fields by changing the Genotypes
if 'rac' in operations and len(original_vcf_rec) > 8 and 'GT' in original_vcf_rec[8]:
gt_index = original_vcf_rec[8].split(':').index('GT')
for genotype_i in range(9, len(original_vcf_rec)):
genotype_str_list = original_vcf_rec[genotype_i].split(':')
if genotype_str_list[gt_index] == '1/1':
genotype_str_list[gt_index] = '0/0'
elif 'nra' in operations and genotype_str_list[gt_index] == '0/1':
genotype_str_list[gt_index] = '1/2'
original_vcf_rec[genotype_i] = ':'.join(genotype_str_list)
def fetch_bases(fasta, contig, start, length):
"""
Returns a subsection from a specified FASTA contig. The start coordinate is 1-based.
"""
zero_base_start = start - 1
end = zero_base_start + length
new_ref = fasta.fetch(reference=contig, start=zero_base_start, end=end)
return new_ref
def group_reads(bam_file_path):
"""
This function assumes that the reads are sorted by query name.
It will group reads by query name and create three subgroups of primary, supplementary and secondary aligned reads.
It returns an iterators where each element is a tuple of the three lists
:param bam_file_path: the name sorted bam file
:return: iterator of tuples containing three lists
"""
with pysam.AlignmentFile(bam_file_path, 'rb') as inbam:
current_read_name = None
primary_group = None
secondary_group = None
supplementary_group = None
for read in inbam:
if read.query_name == current_read_name:
pass
else:
if current_read_name:
yield primary_group, supplementary_group, secondary_group
primary_group = []
secondary_group = []
supplementary_group = []
if read.is_secondary:
secondary_group.append(read)
elif read.is_supplementary:
supplementary_group.append(read)
else:
primary_group.append(read)
current_read_name = read.query_name
if primary_group:
yield primary_group, supplementary_group, secondary_group
def order_reads(primary_group, primary_to_supplementary):
"""
Order read and return the most 5' (smallest coordinates) first.
if a supplementary read exists and is closer to the other read then it is used in place of the primary
"""
read1, read2 = primary_group
suppl_read1 = suppl_read2 = None
if read1 in primary_to_supplementary:
suppl_read1 = primary_to_supplementary.get(read1)[0]
if read2 in primary_to_supplementary:
suppl_read2 = primary_to_supplementary.get(read2)[0]
if read1.reference_start <= read2.reference_start:
if suppl_read1 and suppl_read1.reference_start > read1.reference_start:
read1 = suppl_read1
if suppl_read2 and suppl_read2.reference_start < read2.reference_start:
read2 = suppl_read2
return read1, read2
else:
if suppl_read1 and suppl_read1.reference_start < read1.reference_start:
read1 = suppl_read1
if suppl_read2 and suppl_read2.reference_start > read2.reference_start:
read2 = suppl_read2
return read2, read1
def pass_basic_filtering(primary_group, secondary_group, primary_to_supplementary, counter, filter_align_with_secondary):
"""
Test if the alignment pass basic filtering such as presence of secondary alignments, any primary unmapped,
primary mapped on different chromosome, or primary mapped poorly.
"""
if filter_align_with_secondary and len(secondary_group):
counter['Too many alignments'] += 1
elif len(primary_group) < 2 or any(read.is_unmapped for read in primary_group):
counter['Flank unmapped'] += 1
elif len(set(read.reference_name for read in primary_group)) != 1:
counter['Different chromosomes'] += 1
elif any(len(suppl) > 1 for suppl in primary_to_supplementary.values()):
counter['Too many supplementary'] += 1
else:
return True
return False
def pass_aligned_filtering(left_read, right_read, counter):
"""
Test if the two reads pass the additional filters such as check for soft-clipped end next to the variant region,
or overlapping region between the two reads.
:param left_read: the left (or 5') most read
:param right_read: the right (or 3') most read
:param counter: Counter to report the number of reads filtered.
:return: True or False
"""
# in CIGAR tuples the operation is coded as an integer
# https://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment.cigartuples
if left_read.cigartuples[-1][0] == pysam.CSOFT_CLIP or right_read.cigartuples[0][0] == pysam.CSOFT_CLIP:
counter['Soft-clipped alignments'] += 1
elif left_read.reference_end > right_read.reference_start:
counter['Overlapping alignment'] += 1
elif left_read.is_reverse != right_read.is_reverse:
counter['Unexpected orientation'] += 1
else:
return True
return False
def output_alignment(original_vcf_rec, outfile):
"""
Output the original or updated VCF entry to the provided output file.
"""
print('\t'.join(original_vcf_rec), file=outfile)
def link_supplementary(primary_group, supplementary_group):
"""Link supplementary alignments to their primary."""
if not supplementary_group:
# No supplementary so no linking required
return {}
supplementary_dict = {}
primary_to_supplementary = defaultdict(list)
for supplementary_read in supplementary_group:
supplementary_dict[supplementary_read.reference_name + str(supplementary_read.reference_start + 1)] = supplementary_read
for primary in primary_group:
# chr2,808117,+,1211M790S,60,1;
if primary.has_tag('SA'):
for other_alignment in primary.get_tag('SA').split(';'):
if other_alignment:
rname, pos = other_alignment.split(',')[:2]
primary_to_supplementary[primary].append(
supplementary_dict[rname + pos]
)
return dict(primary_to_supplementary)
def process_bam_file(bam_file_paths, output_file, out_failed_file, new_genome,
filter_align_with_secondary, flank_length, summary_file):
counter = Counter()
fasta = pysam.FastaFile(new_genome)
with open(output_file, 'w') as outfile, open(out_failed_file, 'w') as out_failed:
for bam_file_path in bam_file_paths:
for primary_group, supplementary_group, secondary_group in group_reads(bam_file_path):
counter['total'] += 1
primary_to_supplementary = link_supplementary(primary_group, supplementary_group)
# Retrieve the full VCF record from the bam vr tag
original_vcf_rec = primary_group[0].get_tag('vr').split('|^')
if pass_basic_filtering(primary_group, secondary_group, primary_to_supplementary, counter, filter_align_with_secondary):
left_read, right_read = order_reads(primary_group, primary_to_supplementary)
if pass_aligned_filtering(left_read, right_read, counter):
varpos, new_ref, new_alts, ops, failure_reason = \
calculate_new_variant_definition(left_read, right_read, fasta, original_vcf_rec)
if not failure_reason:
counter['Remapped'] += 1
update_vcf_record(left_read.reference_name, varpos, new_ref, new_alts, ops, original_vcf_rec)
output_alignment(original_vcf_rec, outfile)
else:
# Currently the alignment is not precise enough to ensure that the allele change for INDEL and
# novel reference allele are correct. So we skip them.
# TODO: add realignment confirmation see #14 and EVA-2417
counter[failure_reason] += 1
output_alignment(original_vcf_rec, out_failed)
else:
output_alignment(original_vcf_rec, out_failed)
else:
output_alignment(original_vcf_rec, out_failed)
with open(summary_file, 'w') as open_summary:
yaml.safe_dump({f'Flank_{flank_length}': dict(counter)}, open_summary)
def main():
description = ('Process alignment results in bam format to determine the location of the variant in the new genome.'
' Each variant will be either output in the new genome VCF or the old VCF will be output in a '
'separate file.')
parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
parser.add_argument('-i', '--bams', type=str, required=True, nargs='+',
help='Input BAM file with remapped flanking regions')
parser.add_argument('-o', '--outfile', type=str, required=True,
help='Output VCF file with remapped variants')
parser.add_argument('--out_failed_file', type=str, required=True,
help='Name of the file containing reads that did not align correctly')
parser.add_argument('--flank_length', type=int, required=True,
help='Length of the flanking region used.')
parser.add_argument('--summary', type=str, required=True,
help='YAML files containing the summary metrics')
parser.add_argument('-f', '--filter_align_with_secondary', action='store_true', default=False,
help='Filter out alignments that have one or several secondary alignments.')
parser.add_argument('-n', '--newgenome', required=True, help='FASTA file of the target genome')
args = parser.parse_args()
process_bam_file(
bam_file_paths=args.bams,
output_file=args.outfile,
out_failed_file=args.out_failed_file,
new_genome=args.newgenome,
filter_align_with_secondary=args.filter_align_with_secondary,
flank_length=args.flank_length,
summary_file=args.summary
)
if __name__ == '__main__':
main()
| 2.703125 | 3 |
yolo/config.py | ShechemKS/Yolo_Detectron2 | 3 | 12767875 | from detectron2.config import CfgNode as CN
def add_yolo_config(cfg):
cfg.MODEL.YAML = "yolov5m.yaml"
cfg.MODEL.YOLO = CN()
cfg.MODEL.YOLO.NORM = "BN"
cfg.MODEL.YOLO.ACTIVATION = "nn.LeakyReLU"
cfg.MODEL.YOLO.FOCAL_LOSS_GAMMA = 0.0
cfg.MODEL.YOLO.BOX_LOSS_GAIN = 0.05
cfg.MODEL.YOLO.CLS_LOSS_GAIN = 0.3
cfg.MODEL.YOLO.CLS_POSITIVE_WEIGHT = 1.0
cfg.MODEL.YOLO.OBJ_LOSS_GAIN = 0.7
cfg.MODEL.YOLO.OBJ_POSITIVE_WEIGHT = 1.0
cfg.MODEL.YOLO.LABEL_SMOOTHING = 0.0
cfg.MODEL.YOLO.ANCHOR_T = 4.0
cfg.MODEL.YOLO.CONF_THRESH = 0.001
cfg.MODEL.YOLO.IOU_THRES = 0.65
cfg.MODEL.PIXEL_MEAN: [0.0, 0.0, 0.0]
cfg.MODEL.PIXEL_STD: [255.0, 255.0, 255.0]
cfg.SOLVER.BASE_LR = 0.001
cfg.SOLVER.MOMENTUM = 0.937
cfg.SOLVER.NESTEROV = True
cfg.SOLVER.WEIGHT_DECAY = 0.0005
cfg.SOLVER.WEIGHT_DECAY_NORM = 0.0
cfg.SOLVER.WEIGHT_DECAY_BIAS = 0.0005
cfg.SOLVER.LR_SCHEDULER_NAME = "WarmupCosineLR"
cfg.SOLVER.WARMUP_ITERS = 1000
cfg.SOLVER.IMS_PER_BATCH = 16
cfg.INPUT.SIZE = 416
cfg.INPUT.HSV_H = 0.015
cfg.INPUT.HSV_S = 0.7
cfg.INPUT.HSV_V = 0.4
cfg.INPUT.DEGREES = 0.0
cfg.INPUT.TRANSLATE = 0.1
cfg.INPUT.SCALE = 0.5
cfg.INPUT.SHEAR = 0.0
cfg.INPUT.PERSPECTIVE = 0.0
cfg.INPUT.FLIPUD = 0.0
cfg.INPUT.FLIPLR = 0.5
cfg.INPUT.MOSAIC = 1.0 # IGNORED
cfg.INPUT.MIXUP = 0.0
cfg.INPUT.FORMAT = "BGR"
cfg.TEST.AUG.SIZE = 416
| 1.890625 | 2 |
senselet++Onewire_Sensor/senseletOnewire.py | beitong95/SENSELET_PLUSPLUS | 0 | 12767876 | import paho.mqtt.client as paho
from sensorMetaData import sensorMetaData
from sht85 import SHT85
from mlx90614 import MLX90614
import os
import time
import threading
from threading import Lock
from datetime import datetime
import subprocess
# lock for thread print
thread_print_lock = Lock()
# publish or debug
mode="publish"
#mode="debug"
# setup watchdog
fd = open("/dev/watchdog", "w")
print(fd)
# setup mqtt client and mqtt function
def on_publish(client,userdata,result):
pass
broker="xxx.xxx.xxx.xxx"
port=1883
raspberrypi_id = 1
client = paho.Client("control" + str(raspberrypi_id))
client.on_publish = on_publish
client.connect(broker,port)
client.loop_start()
#global var
stationTXByte_old = 0
stationRXByte_old = 0
time_old = 0
# thread safe print
def thread_print(a, *b):
global mode
# if we are sending the data to the server, we mute the output
if mode == "publish":
return
with thread_print_lock:
# print format: time + data
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("%s: " % current_time, end='')
print (a % b)
# sensor list
currentSensors = {}
ignore = 'w1_bus_master'
# Sensor Reading Class
# Each physical sensor will have a sensorReading obj
# There is no thread stop in python thread package, so we use stop flag to stop threads
class SensorReading(threading.Thread):
global client
global fd
def __init__(self, metaData):
threading.Thread.__init__(self)
self._stopper = threading.Event()
self.metaData = metaData
def stop(self):
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
def getInterval(self):
return 1.0/self.metaData["frequency"]
def getCalibration(self):
return float(self.metaData["calibration"])
def getID(self):
return self.metaData["id"]
def kickDog(self, ret):
if ret.rc == 0:
nb = fd.write("u")
fd.flush()
if nb > 0:
pass
else:
thread_print("WATCHDOG ERROR")
else:
thread_print("Didn't kick the dog. ret value = %s" % str(ret.rc))
def sht85_read(self):
# get frequency
interval = self.getInterval()
calibration = self.getCalibration()
# get id
id = self.getID()
while True:
if self.stopped():
return
t,h,c = self.metaData["i2cDevice"].single_shot("HIGH")
h = h + calibration
if c == 5:
#read fail don't publish rare
thread_print("sensor: %s. Read fail." % (id))
else:
# read success
# send r, h, id to the server
thread_print("sensor: %s. Temp: %s. Hum: %s. Attempts: %s" % (id,str(t), str(h), str(c)))
# publish
try:
ret = client.publish("senselet/" + id, str(time.time()) + '_' + str(t) + '_' + str(h))
self.kickDog(ret)
except Exception as e:
thread_print(str(e))
# adapt sleep time
# after using ds2482, actually c will always be zero
time.sleep(max(0.1, interval - (0.3*c + 0.2)))
def mlx90614_read(self):
# get frequency
interval = self.getInterval()
# get id
id = self.getID()
while True:
if self.stopped():
return
t,c = self.metaData["i2cDevice"].get_obj_temp()
if c == 5:
#read fail
thread_print("sensor: %s. Read fail." % (id))
else:
# read success
# send r, h, id to the server
thread_print("sensor: %s. Temp: %s.Attempts: %s" % (id,str(t), str(c)))
try:
ret = client.publish("senselet/" + id, str(time.time()) + '_' + str(t))
self.kickDog(ret)
except Exception as e:
thread_print(str(e))
# adapt sleep time
# after using ds2482, actually c will always be zero
time.sleep(max(0.1, interval - 0.1*c))
def waterLeakageRope_read(self):
# get frequency
interval = self.getInterval()
# get id
id = self.getID()
while True:
if self.stopped():
return
# read adc
path = "/sys/bus/w1/devices/" + id + "/vad"
c = 0
for i in range(5):
try:
with open(path, "r") as f:
status = f.read().replace("\n","")
break
except IOError:
c = c + 1
if c == 5 :
#read fail
thread_print("sensor: %s. Read fail." % (id))
else:
# read success
# convert adc reading to discrete status
if int(status) < 300 and int(status) > 15:
status = 1
else:
status = 0
# send status to the server
thread_print("sensor: %s. Status: %s. Attempts: %s" % (id,status, str(c)))
try:
ret = client.publish("senselet/" + id, str(time.time()) + '_' + str(status))
self.kickDog(ret)
except Exception as e:
thread_print(str(e))
time.sleep(interval)
def waterLeakagePoint_read(self):
# get frequency
interval = self.getInterval()
# get id
id = self.getID()
while True:
if self.stopped():
return
path = "/sys/bus/w1/devices/" + id + "/state"
c = 0
for i in range(5):
try:
with open(path, "r") as f:
status = f.read(1)
break
except IOError:
c = c + 1
if c == 5 :
#read fail
thread_print("sensor: %s. Read fail." % (id))
else:
# read success
# send status to the server
s = '{0:08b}'.format(ord(status))[1]
thread_print("sensor: %s. Status: %s. Attempts: %s" % (id, s, str(c)))
try:
ret = client.publish("senselet/" + id, str(time.time()) + '_' + s)
self.kickDog(ret)
except Exception as e:
thread_print(str(e))
time.sleep(interval)
def doorSensor_read(self):
# get frequency
interval = self.getInterval()
# get id
id = self.getID()
while True:
if self.stopped():
return
path = "/sys/bus/w1/devices/" + id + "/state"
c = 0
for i in range(5):
try:
with open(path, "r") as f:
status = f.read(1)
break
except IOError:
c = c + 1
if c == 5 :
#read fail
thread_print("sensor: %s. Read fail." % (id))
else:
# read success
# send status to the server
s = '{0:08b}'.format(ord(status))[1]
# convert: 1 is open 0 is close
if s == "1":
s = "0"
else:
s = "1"
thread_print("sensor: %s. Status: %s. Attempts: %s" % (id, s, str(c)))
try:
ret = client.publish("senselet/" + id, str(time.time()) + '_' + s)
self.kickDog(ret)
except Exception as e:
thread_print(str(e))
time.sleep(interval)
def oilLeakagePoint_read(self):
# get frequency
interval = self.getInterval()
# get id
id = self.getID()
while True:
if self.stopped():
return
path = "/sys/bus/w1/devices/" + id + "/state"
c = 0
for i in range(5):
try:
with open(path, "r") as f:
status = f.read(1)
break
except IOError:
c = c + 1
if c == 5 :
#read fail
thread_print("sensor: %s. Read fail." % (id))
else:
# read success
# send status to the server
s = '{0:08b}'.format(ord(status))[1]
thread_print("sensor: %s. Status: %s. Attempts: %s" % (id, s, str(c)))
try:
ret = client.publish("senselet/" + id, str(time.time()) + '_' + s)
self.kickDog(ret)
except Exception as e:
thread_print(str(e))
time.sleep(interval)
def airFlow_read(self):
# get frequency
interval = self.getInterval()
# get id
id = self.getID()
while True:
if self.stopped():
return
# read adc
path = "/sys/bus/w1/devices/" + id + "/vad"
c = 0
for i in range(5):
try:
with open(path, "r") as f:
status = f.read().replace("\n","")
break
except IOError:
c = c + 1
if c == 5 :
#read fail
thread_print("sensor: %s. Read fail." % (id))
else:
# read success
# send status to the server
thread_print("sensor: %s. Speed: %s. Attempts: %s" % (id,status, str(c)))
try:
ret = client.publish("senselet/" + id, str(time.time()) + '_' + str(status))
self.kickDog(ret)
except Exception as e:
thread_print(str(e))
time.sleep(interval)
def run(self):
if self.metaData["name"] == "sht85":
self.sht85_read()
elif self.metaData["name"] == "mlx90614":
self.mlx90614_read()
elif self.metaData["name"] == "waterLeakageRope":
self.waterLeakageRope_read()
elif self.metaData["name"] == "waterLeakagePoint":
self.waterLeakagePoint_read()
elif self.metaData["name"] == "doorSensorSmall":
self.doorSensor_read()
elif self.metaData["name"] == "doorSensorLarge":
self.doorSensor_read()
elif self.metaData["name"] == "oilLeakagePoint":
self.oilLeakagePoint_read()
elif self.metaData["name"] == "airFlow":
self.airFlow_read()
# Sensor Reading Class End
# get available devices in the folder
def getDevices():
path = "/sys/bus/w1/devices/"
try:
files = os.listdir(path)
except:
return -1
return files
# get the I2C BUS of a given sensor id
def getI2CBUS(sensor):
path = "/sys/bus/w1/devices/" + sensor + "/"
try:
files = os.listdir(path)
except:
# oops fail to list dirs -> we unplug it
return -1
sub = "i2c"
busName = [s for s in files if sub in s]
if len(busName) == 0:
#we have the file but no i2c -> we unplug it
return -1
return int(busName[0].split('-')[1])
def kickDog():
thread_print("no sensor connect to the device %s." %(raspberrypi_id))
def checkAndUpdateSensors():
start = time.time()
global currentSensors
if len(currentSensors) == 0:
kickDog()
# temp sensor list
newSensors = {}
# create a temp sensor list
sensors = getDevices()
if sensors == -1:
return -1
# if there is no sensor connecting to the edge, we still pat the dog
# sensors are discovered by the driver
for sensor in sensors:
if ignore in sensor:
continue
if sensor not in sensorMetaData:
# if the sensor is not registered, we print&log this error
thread_print("sensor - %s not registered" % sensor)
continue
else:
metaData = sensorMetaData[sensor]
# if the sensor is registered, check if it is an i2c device(i2c device is a little bit complex)
if metaData["protocol"] == "I2C":
newI2cBus = getI2CBUS(sensor)
if newI2cBus == -1 :
continue
else:
if metaData["name"] == "sht85":
newSensors[sensor] = {
"protocol": "I2C",
"i2cBus": newI2cBus,
"name": metaData["name"],
"frequency": metaData["frequency"],
"calibration": metaData["calibration"]
}
else:
newSensors[sensor] = {
"protocol": "I2C",
"i2cBus": newI2cBus,
"name": metaData["name"],
"frequency": metaData["frequency"]
}
else:
newSensors[sensor] = {
"protocol": metaData["protocol"],
"name": metaData["name"],
"frequency": metaData["frequency"]
}
# loop through current sensor list, add new sensor, remove unplugged sensor
# for i2c sensor, we need to check (1) if it exists in the currentSensors (2) if the i2c bus is the same
for sensor in list(currentSensors):
metaData = currentSensors[sensor]
# unplug
if sensor not in newSensors:
# thread_print&log
thread_print("sensor - %s - %s unplugged" % (sensor, metaData["name"]))
# if this is I2C device, we need to close the I2C file
if metaData["protocol"] == "I2C":
metaData["i2cDevice"].bus.close()
# stop the thread
metaData["threading"].stop()
metaData["threading"].join()
# delete this sensor from the connected sensor list
del currentSensors[sensor]
else:
# we need to do extra checks for I2C, we don't need to check sensors with other types
if metaData["protocol"] == "I2C":
# if the bus number does not change, do nothing. else, we close the old device and add the new device
oldI2cBus = metaData["i2cBus"]
newI2cBus = newSensors[sensor]["i2cBus"]
if oldI2cBus != newI2cBus:
thread_print("i2c sensor change i2c bus from %s -> %s" % (str(oldI2cBus), str(newI2cBus)))
metaData["i2cDevice"].bus.close()
# start new bus
if metaData["name"] == "sht85":
metaData["i2cDevice"] = SHT85(newI2cBus)
elif metaData["name"] == "mlx90614":
metaData["i2cDevice"] = MLX90614(newI2cBus)
# delete this sensor from new Sensors list
del newSensors[sensor]
# for threading we dont need to do anything
# remained sensors in newSensors are new plugged sensors
for sensor in newSensors:
metaData = newSensors[sensor]
thread_print("sensor - %s - %s plugged in to the system" % (sensor, metaData["name"]))
if metaData["protocol"]== "I2C":
newI2cBus = newSensors[sensor]["i2cBus"]
if metaData["name"]== "sht85":
i2cDevice = SHT85(newI2cBus)
elif metaData["name"]== "mlx90614":
i2cDevice = MLX90614(0x5a,newI2cBus)
if metaData["name"] == "sht85":
currentSensors[sensor] = {
"id": sensor,
"protocol": "I2C",
"i2cBus": newI2cBus,
"i2cDevice": i2cDevice,
"name": metaData["name"],
"frequency": metaData["frequency"],
"calibration": metaData["calibration"]
}
else:
currentSensors[sensor] = {
"id": sensor,
"protocol": "I2C",
"i2cBus": newI2cBus,
"i2cDevice": i2cDevice,
"name": metaData["name"],
"frequency": metaData["frequency"]
}
else:
currentSensors[sensor] = {
"id": sensor,
"protocol": metaData["protocol"],
"name": metaData["name"],
"frequency": metaData["frequency"]
}
# start sensor reading thread
t = SensorReading(currentSensors[sensor])
currentSensors[sensor]["threading"] = t
t.start()
end = time.time()
# publish network stats
def publishLink():
global raspberrypi_id
id = 'network' + str(raspberrypi_id)
process = subprocess.run('cat /proc/net/wireless', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
output = process.stdout
outputList = output.split("\n")
level = outputList[2].split()[2].replace('.','')
thread_print("controller: %s. Link: %s. " % (id, level))
try:
ret = client.publish("senselet/" + id, str(time.time()) + '_' + level)
except Exception as e:
# we just print out the error
thread_print(str(e))
def publishWIFIStats():
global raspberrypi_id, stationTXByte_old, stationRXByte_old, time_old
id = 'control' + str(raspberrypi_id)
process = subprocess.run('iw dev wlan0 station dump', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
output = process.stdout
outputList = output.split('\n\t')
stationMAC = str(outputList[0].split()[1])
stationSignal = str(outputList[7].split('[')[1].split(']')[0])
stationTXRate = str(outputList[8].split('\t')[1].split()[0])
stationRXRate = str(outputList[9].split('\t')[1].split()[0])
if stationTXByte_old == 0:
time_old = time.time()
stationTXByte_old = int(outputList[4].split('\t')[1])
stationRXByte_old = int(outputList[2].split('\t')[1])
stationTXByteRate = 0
stationRXByteRate = 0
else:
time_new = time.time()
time_diff = time_new - time_old
stationTXByte_new = int(outputList[4].split('\t')[1])
stationRXByte_new = int(outputList[2].split('\t')[1])
stationTXByteRate = str(round((stationTXByte_new - stationTXByte_old)/time_diff ,3))
stationRXByteRate = str(round((stationRXByte_new - stationRXByte_old)/time_diff,3))
stationTXByte_old = stationTXByte_new
stationRXByte_old = stationRXByte_new
time_old = time_new
thread_print("controller: %s. APMAC: %s. APTXByteRate: %s. APRXByteRate: %s. APSignal: %s. APTXRate: %s. APRXRate: %s. " % (id,stationMAC, stationTXByteRate, stationRXByteRate, stationSignal, stationTXRate, stationRXRate))
try:
ret = client.publish("senselet/" + id, str(time.time()) + '_' + stationMAC + '_' + stationTXByteRate + '_' + stationRXByteRate + '_' + stationSignal + '_' + stationTXRate + '_' + stationRXRate)
except Exception as e:
# we just print out the error
thread_print(str(e))
def main():
while True:
try:
# 04/16/2021 add wifi stats
publishWIFIStats()
ret = checkAndUpdateSensors()
if ret == -1:
thread_print("something wrong happened, lets try it agagin")
time.sleep(0.5)
continue
# read link every 1 s
publishLink()
time.sleep(1)
publishLink()
time.sleep(1)
publishLink()
time.sleep(1)
except KeyboardInterrupt:
# Ctrl-C handling and send kill to threads
thread_print ("Sending kill to threads...")
for sensor in currentSensors:
currentSensors[sensor]["threading"].stop()
for sensor in currentSensors:
currentSensors[sensor]["threading"].join()
break
thread_print ("Exited")
if __name__ == '__main__':
main()
fd.write("V")
fd.close()
print("watch dog stop")
| 2.71875 | 3 |
clif/python/gen.py | LqNoob/C-Python-interaction | 0 | 12767877 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generator helpers.
Produces pieces of generated code.
"""
from clif.python import astutils
from clif.python import postconv
VERSION = '0.2' # CLIF generated API version. Pure informative.
PY3OUTPUT = None # Target Python3 on True, Py2 on False, None-don't care.
I = ' '
def WriteTo(channel, lines):
for s in lines:
channel.write(s)
channel.write('\n')
def Headlines(src_file, hdr_files=(), sys_hdr_files=(), open_ns=None):
"""Generate header comment and #includes.
Args:
src_file: str - full name of the source file (C++ header)
hdr_files: [str] - additional c++ headers to #include "str"
If the first name is PYTHON, #include <Python.h>.
If str == PYOBJ, forward declare PyObject.
sys_hdr_files: set(str) - additional c++ headers to #include <str>
open_ns: str - emit namespace open_ns if not empty.
Yields:
source code lines
"""
yield '/' * 70
yield ('// This file was automatically generated by CLIF'
+ ('' if PY3OUTPUT is None else
' to run under Python %d' % (3 if PY3OUTPUT else 2)))
yield '// Version %s' % VERSION
yield '/' * 70
if src_file:
yield '// source: %s' % src_file
yield ''
python_h = False
if hdr_files[:1] == ['PYTHON']:
python_h = True
yield '#include <Python.h>'
del hdr_files[0]
for h in sys_hdr_files:
if h:
yield '#include <%s>' % h
for h in hdr_files:
if h == 'PYOBJ' and not python_h:
yield ''
yield '// Forward "declare" PyObject (instead of #include <Python.h>)'
yield 'struct _object; typedef _object PyObject;'
elif h:
yield '#include "%s"' % h
if open_ns:
yield ''
yield OpenNs(open_ns)
def OpenNs(namespace):
namespace = (namespace or 'clif').strip(':')
return ' '.join('namespace %s {' % ns for ns in namespace.split('::'))
def CloseNs(namespace):
namespace = (namespace or 'clif').strip(':')
return '} '*(1+namespace.count('::'))+' // namespace '+namespace
def TypeConverters(type_namespace, types, *gen_cvt_args):
"""Generate type converters for types in type_namespace."""
type_namespace = type_namespace or 'clif'
yield ''
yield OpenNs(type_namespace)
if type_namespace != 'clif':
yield 'using namespace ::clif;'
yield 'using ::clif::Clif_PyObjAs;'
yield 'using ::clif::Clif_PyObjFrom;'
for t in types:
for s in (
t.GenConverters(*gen_cvt_args)
): yield s
yield ''
yield CloseNs(type_namespace)
def _DefLine(pyname, cname, meth, doc):
if 'KEYWORD' in meth or 'NOARGS' in meth:
cname = '(PyCFunction)'+cname
return '{C("%s"), %s, %s, C("%s")}' % (pyname, cname, meth, doc)
def _DefTable(ctype, cname, lines):
yield ''
yield 'static %s %s[] = {' % (ctype, cname)
for p in lines:
yield I+_DefLine(*p)+','
yield I+'{}'
yield '};'
def MethodDef(methods):
for s in (
_DefTable('PyMethodDef', 'Methods', methods)
): yield s
MethodDef.name = 'Methods'
def GetSetDef(properties):
for s in (
_DefTable('PyGetSetDef', 'Properties', properties)
): yield s
GetSetDef.name = 'Properties'
def ReadyFunction(types_init):
"""Generate Ready() function to call PyType_Ready for wrapped types."""
yield ''
yield 'bool Ready() {'
for cppname, base, _ in types_init:
if base:
if '.' in base:
# |base| is a fully qualified Python name.
# The caller ensures we have only one Python base.
yield I+'PyObject* base_cls = ImportFQName("%s");' % base
yield I+'if (base_cls == nullptr) return false;'
yield I+'if (!PyObject_TypeCheck(base_cls, &PyType_Type)) {'
yield I+I+'Py_DECREF(base_cls);'
yield I+I+('PyErr_SetString(PyExc_TypeError, "Base class %s is not a '
'new style class inheriting from object.");' % base)
yield I+I+'return false;'
yield I+'}'
yield I+('%s.tp_base = reinterpret_cast<PyTypeObject*>(base_cls);'
% cppname)
yield I+'// Check that base_cls is a *statically* allocated PyType.'
yield I+'if (%s.tp_base->tp_alloc == PyType_GenericAlloc) {' % cppname
yield I+I+'Py_DECREF(base_cls);'
yield I+I+('PyErr_SetString(PyExc_TypeError, "Base class %s is a'
' dynamic (Python defined) class.");' % base)
yield I+I+'return false;'
yield I+'}'
else:
# base is Python wrapper type in a C++ class namespace defined locally.
# Allow to inherit only from top-level classes.
yield I+'%s.tp_base = &%s;' % (cppname, base)
yield I+'if (PyType_Ready(&%s) < 0) return false;' % cppname
yield I+'Py_INCREF(&%s); // For PyModule_AddObject to steal.' % cppname
yield I+'return true;'
yield '}'
def InitFunction(pathname, doc, meth_ref, init, dict_):
"""Generate a function to create the module and initialize it."""
if PY3OUTPUT:
yield ''
yield 'static struct PyModuleDef Module = {'
yield I+'PyModuleDef_HEAD_INIT,'
yield I+'"%s", // module name' % pathname
yield I+'"%s", // module doc' % doc
yield I+'-1, // module keeps state in global variables'
yield I+meth_ref
yield '};'
yield ''
yield 'PyObject* Init() {'
if PY3OUTPUT:
yield I+'PyObject* module = PyModule_Create(&Module);'
else:
yield I+'PyObject* module = Py_InitModule3("%s", %s, "%s");' % (
pathname, meth_ref, doc)
yield I+'if (!module) return nullptr;'
init_needs_err = False
for s in init:
assert ' return' not in s, 'use "goto err;" to handle errors'
if ' err;' in s: init_needs_err = True
yield I+s
for pair in dict_:
yield I+'if (PyModule_AddObject(module, "%s", %s) < 0) goto err;' % pair
yield I+'return module;'
if init_needs_err or dict_:
yield 'err:'
if PY3OUTPUT:
yield I+'Py_DECREF(module);'
yield I+'return nullptr;'
yield '}'
def TypeObject(tp_slots, slotgen, pyname, wname, fqclassname, ctor,
abstract, async_dtor=False, subst_cpp_ptr=''):
"""Generate PyTypeObject methods and table.
Args:
tp_slots: dict - values for PyTypeObject slots
slotgen: generator to produce body of PyTypeObject using tp_slots
pyname: str - Python class name
wname: str - C++ wrapper class name
fqclassname: str - FQ C++ class (being wrapped) name
ctor: str - (WRAPped/DEFault/None) type of generated ctor
abstract: bool - wrapped C++ class is abstract
async_dtor: bool - allow Python threads during C++ destructor
subst_cpp_ptr: str - C++ "replacement" class (being wrapped) if any
Yields:
Source code for PyTypeObject and tp_alloc / tp_init / tp_free methods.
"""
yield ''
yield '// %s __new__' % pyname
yield 'static PyObject* _allocator(PyTypeObject* type, Py_ssize_t nitems);'
yield '// %s __init__' % pyname
yield 'static int _ctor(PyObject* self, PyObject* args, PyObject* kw);'
yield ''
yield 'static void _dtor(void* self) {'
if async_dtor:
yield I+'Py_BEGIN_ALLOW_THREADS'
yield I+'delete reinterpret_cast<%s*>(self);' % wname
if async_dtor:
yield I+'Py_END_ALLOW_THREADS'
yield '}'
tp_slots['tp_free'] = '_dtor'
tp_slots['tp_dealloc'] = 'Clif_PyType_GenericFree'
tp_slots['tp_alloc'] = '_allocator'
tp_slots['tp_new'] = 'PyType_GenericNew'
tp_slots['tp_init'] = '_ctor' if ctor else 'Clif_PyType_Inconstructible'
tp_slots['tp_basicsize'] = 'sizeof(%s)' % wname
tp_slots['tp_itemsize'] = tp_slots['tp_version_tag'] = '0'
tp_slots['tp_dictoffset'] = tp_slots['tp_weaklistoffset'] = '0'
tp_slots['tp_flags'] = ' | '.join(tp_slots['tp_flags'])
tp_slots['tp_doc'] = '"CLIF wrapper for %s"' % fqclassname
wtype = '%s_Type' % wname
yield ''
yield 'PyTypeObject %s = {' % wtype
yield I+'PyVarObject_HEAD_INIT(&PyType_Type, 0)'
for s in slotgen(tp_slots):
yield s
yield '};'
yield ''
if ctor:
yield 'static int _ctor(PyObject* self, PyObject* args, PyObject* kw) {'
if abstract:
yield I+'if (Py_TYPE(self) == &%s) {' % wtype
yield I+I+'return Clif_PyType_Inconstructible(self, args, kw);'
yield I+'}'
if ctor == 'DEF':
# Skip __init__ if it's a METH_NOARGS.
yield I+('if ((args && PyTuple_GET_SIZE(args) != 0) ||'
' (kw && PyDict_Size(kw) != 0)) {')
yield I+I+('PyErr_SetString(PyExc_TypeError, "%s takes no arguments");' %
pyname)
yield I+I+'return -1;'
yield I+'}'
cpp = 'reinterpret_cast<%s*>(self)->cpp' % wname
yield I+'%s = ::clif::MakeShared<%s>();' % (cpp,
subst_cpp_ptr or fqclassname)
if subst_cpp_ptr:
yield I+'%s->::clif::PyObj::Init(self);' % cpp
yield I+'return 0;'
else: # ctor is WRAP (holds 'wrapper name')
yield I+'PyObject* init = %s(self, args, kw);' % ctor
yield I+'Py_XDECREF(init);'
yield I+'return init? 0: -1;'
yield '}'
yield ''
yield 'static PyObject* _allocator(PyTypeObject* type, Py_ssize_t nitems) {'
yield I+'assert(nitems == 0);'
yield I+'PyObject* self = reinterpret_cast<PyObject*>(new %s);' % wname
yield I+'return PyObject_Init(self, &%s);' % wtype
yield '}'
def _CreateInputParameter(func_name, ast_param, arg, args):
"""Return a string to create C++ stack var named arg. args += arg getter."""
ptype = ast_param.type
ctype = ptype.cpp_type
smartptr = (ctype.startswith('::std::unique_ptr') or
ctype.startswith('::std::shared_ptr'))
# std::function special case
if not ctype:
assert ptype.callable, 'Non-callable param has empty cpp_type'
if len(ptype.callable.returns) > 1:
raise ValueError('Callbacks may not have any output parameters, '
'%s param %s has %d' % (func_name, ast_param.name.native,
len(ptype.callable.returns)-1))
args.append('std::move(%s)' % arg)
return 'std::function<%s> %s;' % (astutils.StdFuncParamStr(ptype.callable),
arg)
# T*
if ptype.cpp_raw_pointer:
if ptype.cpp_toptr_conversion:
args.append(arg)
return '%s %s;' % (ctype, arg)
t = ctype[:-1]
if ctype.endswith('*'):
if ptype.cpp_abstract:
if ptype.cpp_touniqptr_conversion:
args.append(arg+'.get()')
return '::std::unique_ptr<%s> %s;' % (t, arg)
elif ptype.cpp_has_public_dtor:
# Create a copy on stack and pass its address.
if ptype.cpp_has_def_ctor:
args.append('&'+arg)
return '%s %s;' % (t, arg)
else:
args.append('&%s.value()' % arg)
return '::gtl::optional<%s> %s;' % (t, arg)
raise TypeError("Can't convert %s to %s" % (ptype.lang_type, ctype))
if (smartptr or ptype.cpp_abstract) and not ptype.cpp_touniqptr_conversion:
raise TypeError('Can\'t create "%s" variable (C++ type %s) in function %s'
', no valid conversion defined'
% (ast_param.name.native, ctype, func_name))
# unique_ptr<T>, shared_ptr<T>
if smartptr:
args.append('std::move(%s)' % arg)
return '%s %s;' % (ctype, arg)
# T, [const] T&
if ptype.cpp_toptr_conversion:
args.append('*'+arg)
return '%s* %s;' % (ctype, arg)
if ptype.cpp_abstract: # for AbstractType &
args.append('*'+arg)
return 'std::unique_ptr<%s> %s;' % (ctype, arg)
# Create a copy on stack (even fot T&, most cases should have to_T* conv).
if ptype.cpp_has_def_ctor:
args.append('std::move(%s)' % arg)
return '%s %s;' % (ctype, arg)
else:
args.append(arg+'.value()')
return '::gtl::optional<%s> %s;' % (ctype, arg)
def FunctionCall(pyname, wrapper, doc, catch, call, postcall_init,
typepostconversion, func_ast, lineno, prepend_self=None):
"""Generate PyCFunction wrapper from AST.FuncDecl func_ast.
Args:
pyname: str - Python function name (may be special: ends with @)
wrapper: str - generated function name
doc: str - C++ sinature
catch: bool - catch C++ exceptions
call: str | [str] - C++ command(s) to call the wrapped function
(without "(params);" part).
postcall_init: str - C++ command; to (re)set ret0.
typepostconversion: dict(pytype, index) to convert to pytype
func_ast: AST.FuncDecl protobuf
lineno: int - .clif line number where func_ast defined
prepend_self: AST.Param - Use self as 1st parameter.
Yields:
Source code for wrapped function.
"""
ctxmgr = pyname.endswith('@')
if ctxmgr:
ctxmgr = pyname
assert ctxmgr in ('__enter__@', '__exit__@'), (
'Invalid context manager name ' + pyname)
pyname = pyname.rstrip('@')
nret = len(func_ast.returns)
params = [] # C++ parameter names.
nargs = len(func_ast.params)
yield ''
if func_ast.classmethod:
yield '// @classmethod ' + doc
arg0 = 'cls' # Extra protection that generated code does not use 'self'.
else:
yield '// ' + doc
arg0 = 'self'
yield 'static PyObject* %s(PyObject* %s%s) {' % (
wrapper, arg0, ', PyObject* args, PyObject* kw' if nargs else '')
if prepend_self:
yield I+_CreateInputParameter(pyname+' line %d' % lineno, prepend_self,
'arg0', params)
yield I+'if (!Clif_PyObjAs(self, &arg0)) return nullptr;'
minargs = sum(1 for p in func_ast.params if not p.default_value)
if nargs:
yield I+'PyObject* a[%d]%s;' % (nargs, '' if minargs == nargs else '{}')
yield I+'char* names[] = {'
for p in func_ast.params:
yield I+I+I+'C("%s"),' % p.name.native
yield I+I+I+'nullptr'
yield I+'};'
yield I+('if (!PyArg_ParseTupleAndKeywords(args, kw, "%s:%s", names, %s)) '
'return nullptr;' % ('O'*nargs if minargs == nargs else
'O'*minargs+'|'+'O'*(nargs-minargs), pyname,
', '.join('&a[%d]'%i for i in range(nargs))))
if minargs < nargs:
yield I+'int nargs; // Find how many args actually passed in.'
yield I+'for (nargs = %d; nargs > %d; --nargs) {' % (nargs, minargs)
yield I+I+'if (a[nargs-1] != nullptr) break;'
yield I+'}'
# Convert input parameters from Python.
for i, p in enumerate(func_ast.params):
n = i+1
arg = 'arg%d' % n
yield I+_CreateInputParameter(pyname+' line %d' % lineno, p, arg, params)
cvt = ('if (!Clif_PyObjAs(a[{i}], &{cvar})) return ArgError'
'("{func_name}", names[{i}], "{ctype}", a[{i}]);'
).format(i=i, cvar=arg, func_name=pyname, ctype=astutils.Type(p))
if i < minargs:
# Non-default parameter.
yield I+cvt
else:
yield I+'if (nargs > %d) {' % i
# Check if we're passed kw args, skipping some default C++ args.
# In this case we must substitute missed default args with default_value
if (p.default_value == 'default' # Matcher could not find the default.
or 'inf' in p.default_value): # W/A for b/29437257
if n < nargs:
yield I+I+('if (!a[{i}]) return DefaultArgMissedError('
'"{}", names[{i}]);'.format(pyname, i=i))
yield I+I+cvt
else:
# C-cast takes care of the case where |arg| is an enum value, while
# the matcher would return an integral literal. Using static_cast
# would be ideal, but its argument should be an expression, which a
# struct value like {1, 2, 3} is not.
yield I+I+'if (!a[%d]) %s = (%s)%s;' % (i, arg, astutils.Type(p),
p.default_value)
yield I+I+'else '+cvt
yield I+'}'
# Create input parameters for extra return values.
return_type = astutils.FuncReturnType(func_ast)
void_return_type = return_type == 'void'
for n, p in enumerate(func_ast.returns):
if n or void_return_type:
yield I+'%s ret%d{};' % (astutils.Type(p), n)
params.append('&ret%d' % n)
yield I+'// Call actual C++ method.'
if isinstance(call, list):
for s in call[:-1]:
yield I+s
call = call[-1]
if func_ast.async:
if nargs:
yield I+'Py_INCREF(args);'
yield I+'Py_XINCREF(kw);'
yield I+'PyThreadState* _save;'
yield I+'Py_UNBLOCK_THREADS'
optional_ret0 = False
if (minargs < nargs or catch) and not void_return_type:
if func_ast.returns[0].type.cpp_has_def_ctor:
yield I+return_type+' ret0;'
else:
# Using optional<> requires T be have T(x) and T::op=(x) available.
# While we need only t=x, implementing it will be a pain we skip for now.
yield I+'::gtl::optional<%s> ret0;' % return_type
optional_ret0 = True
if catch:
for s in _GenExceptionTry():
yield s
if minargs < nargs:
if not void_return_type:
call = 'ret0 = '+call
yield I+'switch (nargs) {'
for n in range(minargs, nargs+1):
yield I+'case %d:' % n
yield I+I+'%s; break;' % (call+astutils.TupleStr(params[:n]))
yield I+'}'
else:
call += astutils.TupleStr(params)
_I = I if catch else '' # pylint: disable=invalid-name
if void_return_type:
yield _I+I+call+';'
elif catch:
yield _I+I+'ret0 = '+call+';'
else:
yield _I+I+return_type+' ret0 = '+call+';'
if catch:
for s in _GenExceptionCatch():
yield s
if postcall_init:
if void_return_type:
yield I+postcall_init
else:
yield I+'ret0'+postcall_init
if func_ast.async:
yield I+'Py_BLOCK_THREADS'
if nargs:
yield I+'Py_DECREF(args);'
yield I+'Py_XDECREF(kw);'
if catch:
for s in _GenExceptionRaise():
yield s
# If ctxmgr, force return self on enter, None on exit.
if nret > 1 or (func_ast.postproc or ctxmgr) and nret:
yield I+'// Convert return values to Python.'
yield I+'PyObject* p, * result_tuple = PyTuple_New(%d);' % nret
yield I+'if (result_tuple == nullptr) return nullptr;'
for i in range(nret):
yield I+'if ((p=Clif_PyObjFrom(std::move(ret%d), %s)) == nullptr) {' % (
i, postconv.Initializer(func_ast.returns[i].type, typepostconversion))
yield I+I+'Py_DECREF(result_tuple);'
yield I+I+'return nullptr;'
yield I+'}'
yield I+'PyTuple_SET_ITEM(result_tuple, %d, p);' % i
if func_ast.postproc:
yield I+'PyObject* pyproc = ImportFQName("%s");' % func_ast.postproc
yield I+'if (pyproc == nullptr) {'
yield I+I+'Py_DECREF(result_tuple);'
yield I+I+'return nullptr;'
yield I+'}'
yield I+'p = PyObject_CallObject(pyproc, result_tuple);'
yield I+'Py_DECREF(pyproc);'
yield I+'Py_CLEAR(result_tuple);'
if ctxmgr:
yield I+'if (p == nullptr) return nullptr;'
yield I+'Py_DECREF(p); // Not needed by the context manager.'
else:
yield I+'result_tuple = p;'
if ctxmgr == '__enter__@':
yield I+'Py_XDECREF(result_tuple);'
yield I+'Py_INCREF(self);'
yield I+'return self;'
elif ctxmgr == '__exit__@':
yield I+'Py_XDECREF(result_tuple);'
yield I+'Py_RETURN_NONE;'
else:
yield I+'return result_tuple;'
elif nret:
yield I+'return Clif_PyObjFrom(std::move(ret0%s), %s);' % (
('.value()' if optional_ret0 else ''),
postconv.Initializer(func_ast.returns[0].type, typepostconversion))
elif ctxmgr == '__enter__@':
yield I+'Py_INCREF(self);'
yield I+'return self;'
else:
yield I+'Py_RETURN_NONE;'
yield '}'
def _GenExceptionTry():
yield I+'PyObject* err_type = nullptr;'
yield I+'string err_msg{"C++ exception"};'
yield I+'try {'
def _GenExceptionCatch():
yield I+'} catch(const std::exception& e) {'
yield I+I+'err_type = PyExc_RuntimeError;'
yield I+I+'err_msg += string(": ") + e.what();'
yield I+'} catch (...) {'
yield I+I+'err_type = PyExc_RuntimeError;'
yield I+'}'
def _GenExceptionRaise():
yield I+'if (err_type) {'
yield I+I+'PyErr_SetString(err_type, err_msg.c_str());'
yield I+I+'return nullptr;'
yield I+'}'
def VirtualFunctionCall(fname, f, pyname, abstract, postconvinit):
"""Generate virtual redirector call wrapper from AST.FuncDecl f."""
name = f.name.cpp_name
ret = astutils.FuncReturnType(f, true_cpp_type=True)
arg = astutils.FuncParamStr(f, 'a', true_cpp_type=True)
mod = ['']
if f.cpp_const_method: mod.append('const')
if f.cpp_noexcept: mod.append('noexcept')
yield ''
yield I+'%s %s%s%s override {' % (ret, fname, arg, ' '.join(mod))
params = astutils.TupleStr('std::move(a%i)' % i for i in range(
len(f.params) + len(f.returns) - (ret != 'void')))
yield I+I+('auto f = ::clif::SafeGetAttrString(pythis.get(), C("%s"));'
% f.name.native)
yield I+I+'if (f.get()) {'
# TODO: Pass postconvinit(f.params...) to callback::Func.
ret_st = 'return ' if ret != 'void' else ''
yield I+I+I+'%s::clif::callback::Func<%s>(f.get())%s;' % (
ret_st, ', '.join([ret] + list(astutils.Type(a) for a in f.params)
+ list(astutils.FuncReturns(f))), params)
yield I+I+'} else {'
if abstract:
# This is only called from C++. Since f has no info if it is pure virtual,
# we can't always generate the call, so we always fail in an abstract class.
yield I+I+I+('Py_FatalError("@virtual method %s.%s has no Python '
'implementation.");' % (pyname, f.name.native))
# In Python 2 Py_FatalError is not marked __attribute__((__noreturn__)),
# so to avoid -Wreturn-type warning add extra abort(). It does not hurt ;)
yield I+I+I+'abort();'
else:
yield I+I+I+ret_st + name + params + ';'
yield I+I+'}'
yield I+'}'
def FromFunctionDef(ctype, wdef, wname, flags, doc):
"""PyCFunc definition."""
assert ctype.startswith('std::function<'), repr(ctype)
return 'static PyMethodDef %s = %s;' % (wdef, _DefLine('', wname, flags, doc))
| 2.046875 | 2 |
tests/ut/python/mindrecord/test_tfrecord_to_mr.py | shaolei-wang/mindspore | 1 | 12767878 | # Copyright 2020 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test tfrecord to mindrecord tool"""
import collections
from importlib import import_module
import os
import numpy as np
import pytest
from mindspore import log as logger
from mindspore.mindrecord import FileReader
from mindspore.mindrecord import TFRecordToMR
SupportedTensorFlowVersion = '2.1.0'
try:
tf = import_module("tensorflow") # just used to convert tfrecord to mindrecord
except ModuleNotFoundError:
logger.warning("tensorflow module not found.")
tf = None
TFRECORD_DATA_DIR = "../data/mindrecord/testTFRecordData"
TFRECORD_FILE_NAME = "test.tfrecord"
MINDRECORD_FILE_NAME = "test.mindrecord"
PARTITION_NUM = 1
def verify_data(transformer, reader):
"""Verify the data by read from mindrecord"""
tf_iter = transformer.tfrecord_iterator()
mr_iter = reader.get_next()
count = 0
for tf_item, mr_item in zip(tf_iter, mr_iter):
count = count + 1
assert len(tf_item) == 6
assert len(mr_item) == 6
for key, value in tf_item.items():
logger.info("key: {}, tfrecord: value: {}, mindrecord: value: {}".format(key, value, mr_item[key]))
if isinstance(value, np.ndarray):
assert (value == mr_item[key]).all()
else:
assert value == mr_item[key]
assert count == 10
def generate_tfrecord():
def create_int_feature(values):
if isinstance(values, list):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) # values: [int, int, int]
else:
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[values])) # values: int
return feature
def create_float_feature(values):
if isinstance(values, list):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) # values: [float, float]
else:
feature = tf.train.Feature(float_list=tf.train.FloatList(value=[values])) # values: float
return feature
def create_bytes_feature(values):
if isinstance(values, bytes):
feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) # values: bytes
else:
# values: string
feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(values, encoding='utf-8')]))
return feature
writer = tf.io.TFRecordWriter(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
example_count = 0
for i in range(10):
file_name = "000" + str(i) + ".jpg"
image_bytes = bytes(str("aaaabbbbcccc" + str(i)), encoding="utf-8")
int64_scalar = i
float_scalar = float(i)
int64_list = [i, i+1, i+2, i+3, i+4, i+1234567890]
float_list = [float(i), float(i+1), float(i+2.8), float(i+3.2),
float(i+4.4), float(i+123456.9), float(i+98765432.1)]
features = collections.OrderedDict()
features["file_name"] = create_bytes_feature(file_name)
features["image_bytes"] = create_bytes_feature(image_bytes)
features["int64_scalar"] = create_int_feature(int64_scalar)
features["float_scalar"] = create_float_feature(float_scalar)
features["int64_list"] = create_int_feature(int64_list)
features["float_list"] = create_float_feature(float_list)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
example_count += 1
writer.close()
logger.info("Write {} rows in tfrecord.".format(example_count))
def test_tfrecord_to_mindrecord():
"""test transform tfrecord to mindrecord."""
if not tf or tf.__version__ < SupportedTensorFlowVersion:
# skip the test
logger.warning("Module tensorflow is not found or version wrong, \
please use pip install it / reinstall version >= {}.".format(SupportedTensorFlowVersion))
return
generate_tfrecord()
assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string),
"image_bytes": tf.io.FixedLenFeature([], tf.string),
"int64_scalar": tf.io.FixedLenFeature([], tf.int64),
"float_scalar": tf.io.FixedLenFeature([], tf.float32),
"int64_list": tf.io.FixedLenFeature([6], tf.int64),
"float_list": tf.io.FixedLenFeature([7], tf.float32),
}
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME),
MINDRECORD_FILE_NAME, feature_dict, ["image_bytes"])
tfrecord_transformer.transform()
assert os.path.exists(MINDRECORD_FILE_NAME)
assert os.path.exists(MINDRECORD_FILE_NAME + ".db")
fr_mindrecord = FileReader(MINDRECORD_FILE_NAME)
verify_data(tfrecord_transformer, fr_mindrecord)
os.remove(MINDRECORD_FILE_NAME)
os.remove(MINDRECORD_FILE_NAME + ".db")
os.remove(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
def test_tfrecord_to_mindrecord_scalar_with_1():
"""test transform tfrecord to mindrecord."""
if not tf or tf.__version__ < SupportedTensorFlowVersion:
# skip the test
logger.warning("Module tensorflow is not found or version wrong, \
please use pip install it / reinstall version >= {}.".format(SupportedTensorFlowVersion))
return
generate_tfrecord()
assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string),
"image_bytes": tf.io.FixedLenFeature([], tf.string),
"int64_scalar": tf.io.FixedLenFeature([1], tf.int64),
"float_scalar": tf.io.FixedLenFeature([1], tf.float32),
"int64_list": tf.io.FixedLenFeature([6], tf.int64),
"float_list": tf.io.FixedLenFeature([7], tf.float32),
}
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME),
MINDRECORD_FILE_NAME, feature_dict, ["image_bytes"])
tfrecord_transformer.transform()
assert os.path.exists(MINDRECORD_FILE_NAME)
assert os.path.exists(MINDRECORD_FILE_NAME + ".db")
fr_mindrecord = FileReader(MINDRECORD_FILE_NAME)
verify_data(tfrecord_transformer, fr_mindrecord)
os.remove(MINDRECORD_FILE_NAME)
os.remove(MINDRECORD_FILE_NAME + ".db")
os.remove(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
def test_tfrecord_to_mindrecord_scalar_with_1_list_small_len_exception():
"""test transform tfrecord to mindrecord."""
if not tf or tf.__version__ < SupportedTensorFlowVersion:
# skip the test
logger.warning("Module tensorflow is not found or version wrong, \
please use pip install it / reinstall version >= {}.".format(SupportedTensorFlowVersion))
return
generate_tfrecord()
assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string),
"image_bytes": tf.io.FixedLenFeature([], tf.string),
"int64_scalar": tf.io.FixedLenFeature([1], tf.int64),
"float_scalar": tf.io.FixedLenFeature([1], tf.float32),
"int64_list": tf.io.FixedLenFeature([6], tf.int64),
"float_list": tf.io.FixedLenFeature([2], tf.float32),
}
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
with pytest.raises(ValueError):
tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME),
MINDRECORD_FILE_NAME, feature_dict, ["image_bytes"])
tfrecord_transformer.transform()
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
os.remove(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
def test_tfrecord_to_mindrecord_list_with_diff_type_exception():
"""test transform tfrecord to mindrecord."""
if not tf or tf.__version__ < SupportedTensorFlowVersion:
# skip the test
logger.warning("Module tensorflow is not found or version wrong, \
please use pip install it / reinstall version >= {}.".format(SupportedTensorFlowVersion))
return
generate_tfrecord()
assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string),
"image_bytes": tf.io.FixedLenFeature([], tf.string),
"int64_scalar": tf.io.FixedLenFeature([1], tf.int64),
"float_scalar": tf.io.FixedLenFeature([1], tf.float32),
"int64_list": tf.io.FixedLenFeature([6], tf.float32),
"float_list": tf.io.FixedLenFeature([7], tf.float32),
}
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
with pytest.raises(ValueError):
tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME),
MINDRECORD_FILE_NAME, feature_dict, ["image_bytes"])
tfrecord_transformer.transform()
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
os.remove(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
def test_tfrecord_to_mindrecord_list_without_bytes_type():
"""test transform tfrecord to mindrecord."""
if not tf or tf.__version__ < SupportedTensorFlowVersion:
# skip the test
logger.warning("Module tensorflow is not found or version wrong, \
please use pip install it / reinstall version >= {}.".format(SupportedTensorFlowVersion))
return
generate_tfrecord()
assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string),
"image_bytes": tf.io.FixedLenFeature([], tf.string),
"int64_scalar": tf.io.FixedLenFeature([1], tf.int64),
"float_scalar": tf.io.FixedLenFeature([1], tf.float32),
"int64_list": tf.io.FixedLenFeature([6], tf.int64),
"float_list": tf.io.FixedLenFeature([7], tf.float32),
}
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME),
MINDRECORD_FILE_NAME, feature_dict)
tfrecord_transformer.transform()
assert os.path.exists(MINDRECORD_FILE_NAME)
assert os.path.exists(MINDRECORD_FILE_NAME + ".db")
fr_mindrecord = FileReader(MINDRECORD_FILE_NAME)
verify_data(tfrecord_transformer, fr_mindrecord)
os.remove(MINDRECORD_FILE_NAME)
os.remove(MINDRECORD_FILE_NAME + ".db")
os.remove(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
def test_tfrecord_to_mindrecord_scalar_with_2_exception():
"""test transform tfrecord to mindrecord."""
if not tf or tf.__version__ < SupportedTensorFlowVersion:
# skip the test
logger.warning("Module tensorflow is not found or version wrong, \
please use pip install it / reinstall version >= {}.".format(SupportedTensorFlowVersion))
return
generate_tfrecord()
assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string),
"image_bytes": tf.io.FixedLenFeature([], tf.string),
"int64_scalar": tf.io.FixedLenFeature([2], tf.int64),
"float_scalar": tf.io.FixedLenFeature([1], tf.float32),
"int64_list": tf.io.FixedLenFeature([6], tf.int64),
"float_list": tf.io.FixedLenFeature([7], tf.float32),
}
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME),
MINDRECORD_FILE_NAME, feature_dict, ["image_bytes"])
with pytest.raises(ValueError):
tfrecord_transformer.transform()
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
os.remove(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
def test_tfrecord_to_mindrecord_scalar_string_with_1_exception():
"""test transform tfrecord to mindrecord."""
if not tf or tf.__version__ < SupportedTensorFlowVersion:
# skip the test
logger.warning("Module tensorflow is not found or version wrong, \
please use pip install it / reinstall version >= {}.".format(SupportedTensorFlowVersion))
return
generate_tfrecord()
assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
feature_dict = {"file_name": tf.io.FixedLenFeature([1], tf.string),
"image_bytes": tf.io.FixedLenFeature([], tf.string),
"int64_scalar": tf.io.FixedLenFeature([1], tf.int64),
"float_scalar": tf.io.FixedLenFeature([1], tf.float32),
"int64_list": tf.io.FixedLenFeature([6], tf.int64),
"float_list": tf.io.FixedLenFeature([7], tf.float32),
}
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
with pytest.raises(ValueError):
tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME),
MINDRECORD_FILE_NAME, feature_dict, ["image_bytes"])
tfrecord_transformer.transform()
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
os.remove(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
def test_tfrecord_to_mindrecord_scalar_bytes_with_10_exception():
"""test transform tfrecord to mindrecord."""
if not tf or tf.__version__ < SupportedTensorFlowVersion:
# skip the test
logger.warning("Module tensorflow is not found or version wrong, \
please use pip install it / reinstall version >= {}.".format(SupportedTensorFlowVersion))
return
generate_tfrecord()
assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string),
"image_bytes": tf.io.FixedLenFeature([10], tf.string),
"int64_scalar": tf.io.FixedLenFeature([1], tf.int64),
"float_scalar": tf.io.FixedLenFeature([1], tf.float32),
"int64_list": tf.io.FixedLenFeature([6], tf.int64),
"float_list": tf.io.FixedLenFeature([7], tf.float32),
}
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
with pytest.raises(ValueError):
tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME),
MINDRECORD_FILE_NAME, feature_dict, ["image_bytes"])
tfrecord_transformer.transform()
if os.path.exists(MINDRECORD_FILE_NAME):
os.remove(MINDRECORD_FILE_NAME)
if os.path.exists(MINDRECORD_FILE_NAME + ".db"):
os.remove(MINDRECORD_FILE_NAME + ".db")
os.remove(os.path.join(TFRECORD_DATA_DIR, TFRECORD_FILE_NAME))
| 2.15625 | 2 |
axelerate/__init__.py | chuangzhu/aXeleRate | 0 | 12767879 | <reponame>chuangzhu/aXeleRate
from .train import setup_training
from .infer import setup_inference
| 0.894531 | 1 |
tools/report-converter/tests/unit/analyzers/test_clang_tidy_parser.py | hyker/codechecker | 1 | 12767880 | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
This module tests the correctness of the OutputParser and PListConverter, which
used in sequence transform a Clang Tidy output file to a plist file.
"""
import os
import plistlib
import shutil
import tempfile
import unittest
from codechecker_report_converter.analyzers.clang_tidy import analyzer_result
from codechecker_report_converter.report.parser import plist
OLD_PWD = None
def setup_module():
"""Setup the test tidy reprs for the test classes in the module."""
global OLD_PWD
OLD_PWD = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), 'tidy_output_test_files'))
def teardown_module():
"""Restore environment after tests have ran."""
global OLD_PWD
os.chdir(OLD_PWD)
class ClangTidyAnalyzerResultTestCase(unittest.TestCase):
""" Test the output of the ClangTidyAnalyzerResult. """
def setUp(self):
""" Setup the test. """
self.analyzer_result = analyzer_result.AnalyzerResult()
self.cc_result_dir = tempfile.mkdtemp()
def tearDown(self):
""" Clean temporary directory. """
shutil.rmtree(self.cc_result_dir)
def __check_analyzer_result(self, analyzer_result, analyzer_result_plist,
source_files, expected_plist):
""" Check the result of the analyzer transformation. """
self.analyzer_result.transform(
analyzer_result, self.cc_result_dir, plist.EXTENSION)
plist_file = os.path.join(self.cc_result_dir, analyzer_result_plist)
with open(plist_file, mode='rb') as pfile:
res = plistlib.load(pfile)
# Use relative path for this test.
res['files'] = source_files
with open(expected_plist, mode='rb') as pfile:
exp = plistlib.load(pfile)
self.assertTrue(res['metadata']['generated_by']['version'])
res['metadata']['generated_by']['version'] = "x.y.z"
self.assertEqual(res, exp)
def test_empty1(self):
""" Test for empty Messages. """
ret = self.analyzer_result.transform(
'empty1.out', self.cc_result_dir, plist.EXTENSION)
self.assertFalse(ret)
def test_empty2(self):
""" Test for empty Messages with multiple line. """
ret = self.analyzer_result.transform(
'empty2.out', self.cc_result_dir, plist.EXTENSION)
self.assertFalse(ret)
def test_tidy1(self):
""" Test for the tidy1.plist file. """
self.__check_analyzer_result('tidy1.out', 'test.cpp_clang-tidy.plist',
['files/test.cpp'], 'tidy1.plist')
def test_tidy2(self):
""" Test for the tidy2.plist file. """
self.__check_analyzer_result('tidy2.out', 'test2.cpp_clang-tidy.plist',
['files/test2.cpp'], 'tidy2.plist')
def test_tidy3(self):
""" Test for the tidy3.plist file. """
self.__check_analyzer_result('tidy3.out', 'test3.cpp_clang-tidy.plist',
['files/test3.cpp'],
'tidy3_cpp.plist')
self.__check_analyzer_result('tidy3.out', 'test3.hh_clang-tidy.plist',
['files/test3.cpp', 'files/test3.hh'],
'tidy3_hh.plist')
| 2.265625 | 2 |
tests/test_Results.py | grahamgower/moments | 0 | 12767881 | import os
import unittest
import numpy
import moments
import time
class ResultsTestCase(unittest.TestCase):
def setUp(self):
self.startTime = time.time()
def tearDown(self):
t = time.time() - self.startTime
print("%s: %.3f seconds" % (self.id(), t))
def test_1d_ic(self):
# This just the standard neutral model
n = 10
fs = moments.Spectrum(numpy.zeros(n+1))
fs.integrate([1], tf=10, dt_fac=0.01)
answer = moments.Spectrum(1./numpy.arange(n+1))
self.assert_(numpy.ma.allclose(fs, answer, atol=5e-5))
def test_1pop(self):
n = 15
f = lambda x: [1+0.0001*x]
sfs = moments.Spectrum(numpy.zeros([n+1]))
sfs.integrate(f, 5, 0.01, theta=1.0, h=0.1, gamma=-1)
sfs_ref = moments.Spectrum.from_file('test_files/1_pop.fs')
self.assertTrue(numpy.allclose(sfs, sfs_ref))
def test_2pops_neutral(self):
n = 20
mig = numpy.ones([2, 2])
f = lambda x: [1, 1+0.0001*x]
sfs = moments.Spectrum(numpy.zeros([n+1, n+1]))
sfs.integrate(f, 10, 0.005, theta=1.0, h=[0.5, 0.5], gamma=[0, 0], m=mig)
sfs_ref = moments.Spectrum.from_file('test_files/2_pops_neutral.fs')
self.assertTrue(numpy.allclose(sfs, sfs_ref))
def test_2pops(self):
n1, n2 = 15, 20
mig = numpy.ones([2, 2])
f = lambda x: [1, 1+0.0001*x]
sfs = moments.Spectrum(numpy.zeros([n1+1, n2+1]))
sfs.integrate(f, 10, 0.005, theta=1.0, h=[0.6, 0.6], gamma=[2, 2], m=mig)
sfs_ref = moments.Spectrum.from_file('test_files/2_pops.fs')
self.assertTrue(numpy.allclose(sfs, sfs_ref))
def test_3pops_slow(self):
n1, n2, n3 = 15, 20, 18
gamma = [0, 0.5, -2]
h = [0.5, 0.1, 0.9]
mig = numpy.array([[0, 5, 2],[1, 0, 1],[10, 0, 1]])
f = lambda x: [1, 1, 1+0.0001*x]
sfs = moments.Spectrum(numpy.zeros([n1+1, n2+1, n3+1]))
sfs.integrate(f, 10, 0.01, theta=1.0, h=h, gamma=gamma, m=mig)
sfs_ref = moments.Spectrum.from_file('test_files/3_pops.fs')
self.assertTrue(numpy.allclose(sfs, sfs_ref))
def test_IM(self):
params = (0.8, 2.0, 0.6, 0.45, 5.0, 0.3)
ns = (20,13)
theta = 1000.
fs = theta*moments.Demographics2D.IM(params, ns)
dadi_fs = moments.Spectrum.from_file('test_files/IM.fs')
resid = moments.Inference.Anscombe_Poisson_residual(fs,dadi_fs)
self.assert_(abs(resid).max() < 0.25)
suite = unittest.TestLoader().loadTestsFromTestCase(ResultsTestCase)
if __name__ == '__main__':
unittest.main()
| 2.5625 | 3 |
tests/test.py | microsoft/attested-fetch | 0 | 12767882 | <reponame>microsoft/attested-fetch<filename>tests/test.py
from pathlib import Path
import json
import base64
import hashlib
import subprocess
import tempfile
THIS_DIR = Path(__file__).parent
DIST_DIR = THIS_DIR.parent / 'dist'
OEVERIFY = "/opt/openenclave/bin/oeverify"
TEST_URL = "https://www.microsoft.com/en-gb/"
TEST_NONCE = "nonce123"
def test():
out_path = "test.json"
# Run afetch
subprocess.run([
DIST_DIR / "afetch",
DIST_DIR / "libafetch.enclave.so",
out_path,
TEST_URL, TEST_NONCE
], check=True)
# Read attested result
with open(out_path) as f:
out = json.load(f)
# Verify evidence and endorsements using Open Enclave
with tempfile.TemporaryDirectory() as tmpdir:
# Run oeverify
evidence_path = Path(tmpdir) / "evidence.bin"
endorsements_path = Path(tmpdir) / "endorsements.bin"
with open(evidence_path, "wb") as f:
f.write(base64.b64decode(out["evidence"]))
with open(endorsements_path, "wb") as f:
f.write(base64.b64decode(out["endorsements"]))
result = subprocess.run([
OEVERIFY, "-r", evidence_path, "-e", endorsements_path
], capture_output=True, universal_newlines=True, check=True)
# Extract report data from stdout
prefix = "sgx_report_data:"
sgx_report_data = None
for line in result.stdout.splitlines():
if line.startswith(prefix):
sgx_report_data = line[len(prefix):].strip()
assert sgx_report_data is not None
# Check if data hash matches report data in evidence
data_hash = "0x" + hashlib.sha256(out["data"].encode()).digest().hex()
assert sgx_report_data == data_hash, f"{data_hash} != {sgx_report_data}"
# Finally, check if the data is valid JSON
data = json.loads(base64.b64decode(out["data"]))
assert data["nonce"] == TEST_NONCE, data["nonce"]
assert data["url"] == TEST_URL, data["url"]
assert len(data["certs"]) > 0, data["certs"]
assert len(data["body"]) > 0, data["body"]
if __name__ == "__main__":
test()
print("All tests succeeded!")
| 2.21875 | 2 |
lightreid/models/backbones/__init__.py | nataliamiccini/light-reid | 296 | 12767883 | <gh_stars>100-1000
from .resnet import resnet18, resnet34, resnet50, resnet101, resnet152
from .resnet import resnet18ibna, resnet34ibna, resnet50ibna, resnet101ibna, resnet152ibna
from .transformers import *
__cnnbackbone_factory = {
# resnet series
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
'resnet18ibna': resnet18ibna,
'resnet34ibna': resnet34ibna,
'resnet50ibna': resnet50ibna,
'resnet101ibna': resnet101ibna,
'resnet152ibna': resnet152ibna,
# vision transformer series
'vit_small_patch16_224': vit_small_patch16_224,
'vit_base_patch16_224': vit_base_patch16_224,
'vit_base_patch32_224': vit_base_patch32_224,
'vit_base_patch16_384': vit_base_patch16_384,
'vit_base_patch32_384': vit_base_patch32_384,
'vit_large_patch16_224': vit_large_patch16_224,
'vit_large_patch32_224': vit_large_patch32_224,
'vit_large_patch16_384': vit_large_patch16_384,
'vit_large_patch32_384': vit_large_patch32_384,
'vit_base_patch16_224_in21k': vit_base_patch16_224_in21k,
'vit_base_patch32_224_in21k': vit_base_patch32_224_in21k,
'vit_large_patch16_224_in21k': vit_large_patch16_224_in21k,
'vit_large_patch32_224_in21k': vit_large_patch32_224_in21k,
'vit_huge_patch14_224_in21k': vit_huge_patch14_224_in21k,
'vit_deit_tiny_patch16_224': vit_deit_tiny_patch16_224,
'vit_deit_small_patch16_224': vit_deit_small_patch16_224,
'vit_deit_base_patch16_224': vit_deit_base_patch16_224,
'vit_deit_base_patch16_384': vit_deit_base_patch16_384,
'vit_deit_tiny_distilled_patch16_224': vit_deit_tiny_distilled_patch16_224,
'vit_deit_small_distilled_patch16_224': vit_deit_small_distilled_patch16_224,
'vit_deit_base_distilled_patch16_224': vit_deit_base_distilled_patch16_224,
'vit_deit_base_distilled_patch16_384': vit_deit_base_distilled_patch16_384,
'vit_base_patch16_224_miil_in21k': vit_base_patch16_224_miil_in21k,
'vit_base_patch16_224_miil': vit_base_patch16_224_miil,
}
def build_cnnbackbone(name, pretrained=True, **kwargs):
return __cnnbackbone_factory[name](pretrained=pretrained, **kwargs) | 1.195313 | 1 |
thmigctrl.py | bazbt3/thmigpen | 1 | 12767884 | #!/usr/bin/env python3
# thmigctrl
# v0.1.0 for Python 3.5
# Runs modules based on a matching hashtag
# Setup tag and channel parameters, a list of valid command options:
tag = 'tmctrl'
retrievecount = 20
channelid = 962
# Import @33MHz and @thrrgilag's library for interacting with pnut.io:
import pnutpy
# Import time, used to delay posting to avoid rate limits:
import time
# Setup pnut.io authorisation:
tokenfile = open("pnut_app_token.txt", "r")
token = tokenfile.read()
token = token.strip()
pnutpy.api.add_authorization_token(token)
# Get hashtag content from pnut.io:
d = pnutpy.api.posts_with_hashtag(tag, count = retrievecount)
# Extract posts, strip out unnecessary words, check for matches to poll options, and construct a summary message:
# Open the previous post numbers file:
f=open('pollctrl.txt','r')
y = f.readlines()
f.close()
f=open('pollctrl.txt','w')
posttext = ''
number = retrievecount
# hashtag = ''
while number >= 0:
try:
if not 'is_deleted' in d[0][number]:
user = str(d[0][number]["user"]["username"])
querypost = d[0][number]["content"]["text"]
postnum = str(d[0][number]["id"])
# If postnum does not appear in the file it's not been seen, so process it to see if a command was made:
success = False
if (not (postnum + '\n') in y):
if 'help' in querypost:
posttext = '''
*Checks only every 15 minutes.
*Precede all commands with a hash
tmctrl help:
this!
tmask #hashtag:
Suggest a hashtag
tmpoll #hashtag:
Vote for a hashtag
'''
success = True
elif ('ask' in querypost):
posttext = ' ask'
success = True
elif ('poll' in querypost):
posttext = ' poll'
success = True
elif success == False:
posttext = ' Oops, I don\'t understand; please try again. Try #help for more.'
posttext = '@' + user + posttext + ' (' + postnum + ')'
if posttext:
pnutpy.api.create_post(data={'reply_to': postnum, 'text': posttext})
# Delay to avoid rate limits:
time.sleep(3.2)
f.write(str(postnum) + '\n')
posttext = ''
except IndexError:
pass
number -= 1
f.close()
| 2.375 | 2 |
HW4-MMap.Random Forest.SciKit Learn/Q2/decision_tree.py | Magic-Fang/CSE6242-DataVisual-Analytics | 8 | 12767885 | from util import entropy, information_gain, partition_classes
import numpy as np
import ast
import heapq
import copy
class DecisionTree(object):
def __init__(self):
# Initializing the tree as an empty dictionary or list, as preferred
#self.tree = []
self.tree = {}
self.maxDepth = 30
def learn(self, X, y):
# TODO: Train the decision tree (self.tree) using the the sample X and labels y
# You will have to make use of the functions in utils.py to train the tree
# One possible way of implementing the tree:
# Each node in self.tree could be in the form of a dictionary:
# https://docs.python.org/2/library/stdtypes.html#mapping-types-dict
# For example, a non-leaf node with two children can have a 'left' key and a
# 'right' key. You can add more keys which might help in classification
# (eg. split attribute and split value)
def decideLabel(labels):
# Return the majority label (0 or 1) in labels list.
ones = sum(labels)
if len(labels)-ones >= ones:
return 0
return 1
def decideSplitAttrVal(X, y, attrs):
# iterate all attribute in X's instance, choose the best attribute for spliting
for i in range(len(X[0])): # iterate all attribute
print(" Comparing No."+str(i)+" attr \n")
maxInfoGain = splitVal = splitAttr = -1
values_of_per_Attr = [[X[k][i]] for k in range(len(X))]
for split_val_try in values_of_per_Attr:
Xleft, Xright, yleft, yright = partition_classes(values_of_per_Attr, y, 0, split_val_try[0])
curInfoGain = information_gain(y, [yleft,yright])
# Update maxInfoGain
if curInfoGain > maxInfoGain:
splitAttr = i
splitVal = split_val_try[0]
maxInfoGain = curInfoGain
attrs.append((1-maxInfoGain, splitAttr, splitVal))
heapq.heapify(attrs)
return
def buildTree(X, y, dep, attrs):
# If depth exceed or there is only one feature in instance of X, return label (0 or 1) directly.
if dep >= self.maxDepth or len(attrs) <= 1:
return decideLabel(y)
# If features in y are the same, no need for more branch
if sum(y) == len(y) or sum(y) == 0:
return y[0]
print("buildTree Depth is "+str(dep)+"\n" )
#splitAttr, splitVal = decideSplitAttrVal(X, y)
#splitAttr, splitVal = 0, decideSplitAttrVal2(X,y)
grades, splitAttr, splitVal = heapq.heappop(attrs)
Xleft, Xright, yleft, yright = partition_classes(X, y, splitAttr, splitVal)
print("partition finished \n")
# Get off the splitAttr of each instance in Xleft and Xright
print(len(Xleft))
print(len(Xright))
# for i in range(len(Xleft)):
# Xleft[i] = Xleft[i][:splitAttr]+Xleft[i][splitAttr+1:]
# for j in range(len(Xright)):
# Xright[j] = Xright[j][:splitAttr]+Xright[j][splitAttr+1:]
# Recursion stops when the spliting is not applicable
if len(Xleft) == 0 or len(Xright) == 0:
return decideLabel(y)
else:
tree = {}
#tree[splitAttr] = [splitVal, buildTree(Xleft, yleft, dep+1), buildTree(Xright, yright, dep+1)]
tree[splitAttr] = [splitVal, buildTree(Xleft, yleft, dep+1, copy.deepcopy(attrs)),
buildTree(Xright, yright, dep+1, copy.deepcopy(attrs))]
return tree
attrs = []
decideSplitAttrVal(X, y, attrs)
self.tree = buildTree(X, y, 1, attrs)
#self.tree = buildTree(X, y, 1)
def classify(self, record):
# TODO: classify the record using self.tree and return the predicted label
cur = self.tree
# remeber the keys of self.tree is splitAttr, which is index
tmp = record[:]
while isinstance(cur, dict):
feature = list(cur.keys())[0]
if isinstance(tmp[feature], int) or isinstance(tmp[feature], float):
if tmp[feature] <= cur[feature][0]:
cur = cur[feature][1]
else:
cur = cur[feature][2]
else:
if tmp[feature] == cur[feature][0]:
cur = cur[feature][1]
else:
cur = cur[feature][2]
#tmp = tmp[:feature]+tmp[feature+1:]
return cur
| 3.21875 | 3 |
Python L1 Assignments/data/file__ops.py | deb991/TopGear__Projects_n_Assignments | 0 | 12767886 | <filename>Python L1 Assignments/data/file__ops.py
#!/usr/bin/env python
import os
import subprocess
from ctypes import windll
import string
FILEBROWSER_PATH = os.path.join(os.getenv('WINDIR'), 'explorer.exe')
print('\ninitiating File checing into current Directory >>>...\n')
def file__Manager():
print('\ninitiating File checing into current Directory >>>...\n')
parent_dir = ('/')
print('\n Root filesystem syntex depends upon Platforms. ~~~~')
print(
'\nWindows File system starts with \n<<C as Primary Parent Drive>>\n <<"/" as Primary Parent Drive/ Directory Location for both Unix, Mac>>')
try:
print('Parent Directory:: ~~~~\n')
# noinspection PyTypeChecker
#os.system(start'C:\\Users')
subprocess.call("explorer C:\\Users\\", shell = True)
except:
print('Parent Directory:: ~~~~\n')
os.system('START "/"')
def get_drives():
drives = []
bitmask = windll.kernel32.GetLogicalDrives()
for letter in string.ascii_uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
print(drives)
def file_indexing():
print('\nIndexing whole drive of the system:: >>....')
directory = ('C:\\Users\\')
baseDir = "D:\\My__Env\\PycharmProjects\\TG__lib"
# try:
# with open('index.txt', 'a') as f:
# print(root, file = f)
#except FileNotFoundError:
# print('[WinError 3] The system cannot find the path specified')
thePath = os.getcwd()
theFiles = list(os.listdir(thePath))
theDict = dict()
for something in theFiles: # Calculate size for all files here.
theStats = os.stat(something)
theDict[something] = theStats
for item in theDict:
print("The File: {:30s} The Size: {:d} Bytes".format(item, theDict[item].st_size))
###Incomplete###
if __name__ == '__main__':
file__Manager()
get_drives()
file_indexing() | 3.046875 | 3 |
src/reviews/management/commands/send_review_reminders.py | Talengi/phase | 8 | 12767887 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from itertools import groupby
from notifications.management.commands.base import EmailCommand
from reviews.models import Review
class Command(EmailCommand):
help = 'Send an email reminder to all users with pending reviews'
text_template = 'reviews/pending_reviews_reminder_email.txt'
html_template = 'reviews/pending_reviews_reminder_email.html'
def handle(self, *args, **options):
pending_reviews = Review.objects \
.filter(status='progress') \
.select_related('document', 'reviewer') \
.order_by('reviewer', 'role')
users = groupby(pending_reviews, lambda rev: rev.reviewer)
for user, reviews in users:
if not user.send_pending_reviews_mails:
continue
self.send_notification(user=user, reviews=list(reviews))
def get_subject(self, **kwargs):
return 'Phase - Pending reviews'
def get_recipient_list(self, **kwargs):
return [kwargs['user'].email]
| 2.171875 | 2 |
ninjaopenfoam/swepc.py | hertzsprung/ninjaopenfoam | 0 | 12767888 | import os
class SWEPC:
def __init__(self, name, output, testCase, solver, degree, elements,
endTime, dt, topographyMean=0.6):
self.name = name
self.output = os.path.join('$builddir', output)
self.testCase = testCase
self.solver = solver
self.degree = degree
self.elements = elements
self.endTime = endTime
self.dt = dt
self.topographyMean = topographyMean
def write(self, generator):
generator.w.build(
os.path.join(self.output, 'coefficients.dat'),
'swepc',
implicit_outputs=[
os.path.join(self.output, 'statistics.dat'),
os.path.join(self.output, 'derived-statistics.dat')
],
variables={
'outputDir': self.output,
'testCase': self.testCase,
'solver': self.solver,
'degree': self.degree,
'elements': self.elements,
'endTime': self.endTime,
'dt': self.dt,
'topographyMean' : self.topographyMean})
def outputs(self):
return [os.path.join(self.output, file)
for file in ['statistics.dat', 'derived-statistics.dat',
'coefficients.dat']]
def __str__(self):
return self.name
class SWEMonteCarlo:
def __init__(self, name, output, testCase, solver, iterations, sampleIndex,
elements, endTime, dt):
self.name = name
self.output = os.path.join('$builddir', output)
self.testCase = testCase
self.solver = solver
self.iterations = iterations
self.sampleIndex = sampleIndex
self.elements = elements
self.endTime = endTime
self.dt = dt
def write(self, generator):
generator.w.build(
os.path.join(self.output, 'statistics.dat'),
'swemc',
implicit_outputs=[
os.path.join(self.output, 'derived-statistics.dat'),
os.path.join(self.output, 'convergence.dat'),
os.path.join(self.output, 'sample'+str(self.sampleIndex)+'.dat')],
variables={
'outputDir': self.output,
'testCase': self.testCase,
'solver': self.solver,
'iterations': self.iterations,
'sampleIndex': self.sampleIndex,
'elements': self.elements,
'endTime': self.endTime,
'dt': self.dt})
def outputs(self):
return [os.path.join(self.output, file)
for file in ['statistics.dat', 'derived-statistics.dat',
'convergence.dat',
'sample'+str(self.sampleIndex)+'.dat']]
def __str__(self):
return self.name
class SWEPDF:
def __init__(self, name, output, coefficientsFile, variable, sampleIndex,
min, max, samples):
self.name = name
self.output = os.path.join('$builddir', output + '.dat')
self.coefficientsFile = os.path.join('$builddir', coefficientsFile)
self.variable = variable
self.sampleIndex = sampleIndex
self.min = min
self.max = max
self.samples = samples
def write(self, generator):
generator.w.build(
self.output,
'swepdf',
inputs=[self.coefficientsFile],
variables={
'variable': self.variable,
'min': self.min,
'max': self.max,
'samples': self.samples,
'line': self.sampleIndex+2})
def outputs(self):
return [self.output]
def __str__(self):
return self.name
| 2.5 | 2 |
abc254/a/main.py | seigot/atcoder | 2 | 12767889 | <filename>abc254/a/main.py<gh_stars>1-10
s = str(input())
ss = ''.join(list(reversed(s)))
sss = ss[:2]
ssss = ''.join(list(reversed(sss)))
print(ssss)
| 3.046875 | 3 |
demo/fs.py | thoughteer/edera | 3 | 12767890 | <reponame>thoughteer/edera
import contextlib
import os
import os.path
import shutil
import tempfile
class FileSystem(object):
def __init__(self, root):
self.root = root
def check(self, path):
return os.path.exists(os.path.join(self.root, path))
@contextlib.contextmanager
def create(self, path):
path = os.path.join(self.root, path)
tdescriptor, tpath = None, None
try:
tdescriptor, tpath = tempfile.mkstemp()
with open(tpath, "w") as stream:
yield stream
os.rename(tpath, path)
finally:
if tdescriptor is not None:
os.close(tdescriptor)
if os.path.exists(tpath):
os.remove(tpath)
def ensure(self, path):
return os.makedirs(os.path.join(self.root, path))
def read(self, path):
return open(os.path.join(self.root, path), "r")
def remove(self, path):
path = os.path.join(self.root, path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
| 2.609375 | 3 |
GUI/Ui_SerialPort.py | IronSublimate/PyQt-multifunctional-uart-helper | 13 | 12767891 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SerialPort.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1142, 771)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/img/icon_128.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.West)
self.tabWidget.setMovable(False)
self.tabWidget.setObjectName("tabWidget")
self.tab_msg = QtWidgets.QWidget()
self.tab_msg.setObjectName("tab_msg")
self.gridLayout_3 = QtWidgets.QGridLayout(self.tab_msg)
self.gridLayout_3.setObjectName("gridLayout_3")
self.textEdit_Recive = QtWidgets.QTextEdit(self.tab_msg)
self.textEdit_Recive.setStyleSheet("/*background-color: rgb(255, 255, 255);\n"
"background-color: rgb(0, 0, 0);*/")
self.textEdit_Recive.setUndoRedoEnabled(False)
self.textEdit_Recive.setReadOnly(True)
self.textEdit_Recive.setHtml("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'SimSun\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>")
self.textEdit_Recive.setAcceptRichText(False)
self.textEdit_Recive.setObjectName("textEdit_Recive")
self.gridLayout_3.addWidget(self.textEdit_Recive, 2, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_2 = QtWidgets.QLabel(self.tab_msg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setTextFormat(QtCore.Qt.AutoText)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.hexSending_checkBox = QtWidgets.QCheckBox(self.tab_msg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.hexSending_checkBox.sizePolicy().hasHeightForWidth())
self.hexSending_checkBox.setSizePolicy(sizePolicy)
self.hexSending_checkBox.setObjectName("hexSending_checkBox")
self.horizontalLayout.addWidget(self.hexSending_checkBox)
self.Send_Button = QtWidgets.QPushButton(self.tab_msg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Send_Button.sizePolicy().hasHeightForWidth())
self.Send_Button.setSizePolicy(sizePolicy)
self.Send_Button.setObjectName("Send_Button")
self.horizontalLayout.addWidget(self.Send_Button)
self.pushButton_clearSendText = QtWidgets.QPushButton(self.tab_msg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_clearSendText.sizePolicy().hasHeightForWidth())
self.pushButton_clearSendText.setSizePolicy(sizePolicy)
self.pushButton_clearSendText.setObjectName("pushButton_clearSendText")
self.horizontalLayout.addWidget(self.pushButton_clearSendText)
self.gridLayout_3.addLayout(self.horizontalLayout, 3, 0, 1, 1)
self.textEdit_Send = QtWidgets.QTextEdit(self.tab_msg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit_Send.sizePolicy().hasHeightForWidth())
self.textEdit_Send.setSizePolicy(sizePolicy)
self.textEdit_Send.setStyleSheet("/*background-color: rgb(0, 0, 0);*/")
self.textEdit_Send.setObjectName("textEdit_Send")
self.gridLayout_3.addWidget(self.textEdit_Send, 4, 0, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(self.tab_msg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.hexShowing_checkBox = QtWidgets.QCheckBox(self.tab_msg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.hexShowing_checkBox.sizePolicy().hasHeightForWidth())
self.hexShowing_checkBox.setSizePolicy(sizePolicy)
self.hexShowing_checkBox.setObjectName("hexShowing_checkBox")
self.horizontalLayout_2.addWidget(self.hexShowing_checkBox)
self.ClearButton = QtWidgets.QPushButton(self.tab_msg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ClearButton.sizePolicy().hasHeightForWidth())
self.ClearButton.setSizePolicy(sizePolicy)
self.ClearButton.setObjectName("ClearButton")
self.horizontalLayout_2.addWidget(self.ClearButton)
self.gridLayout_3.addLayout(self.horizontalLayout_2, 1, 0, 1, 1)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_9 = QtWidgets.QLabel(self.tab_msg)
self.label_9.setObjectName("label_9")
self.horizontalLayout_4.addWidget(self.label_9)
self.comboBox_codetype = QtWidgets.QComboBox(self.tab_msg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_codetype.sizePolicy().hasHeightForWidth())
self.comboBox_codetype.setSizePolicy(sizePolicy)
self.comboBox_codetype.setObjectName("comboBox_codetype")
self.comboBox_codetype.addItem("")
self.comboBox_codetype.addItem("")
self.horizontalLayout_4.addWidget(self.comboBox_codetype)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem2)
self.gridLayout_3.addLayout(self.horizontalLayout_4, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_msg, "")
self.tab_img = QtWidgets.QWidget()
self.tab_img.setObjectName("tab_img")
self.gridLayout_2 = QtWidgets.QGridLayout(self.tab_img)
self.gridLayout_2.setObjectName("gridLayout_2")
self.checkBox_showGrid = QtWidgets.QCheckBox(self.tab_img)
self.checkBox_showGrid.setObjectName("checkBox_showGrid")
self.gridLayout_2.addWidget(self.checkBox_showGrid, 1, 7, 1, 1)
self.comboBox_imgType = QtWidgets.QComboBox(self.tab_img)
self.comboBox_imgType.setObjectName("comboBox_imgType")
self.comboBox_imgType.addItem("")
self.comboBox_imgType.addItem("")
self.gridLayout_2.addWidget(self.comboBox_imgType, 0, 0, 1, 2)
self.label_5 = QtWidgets.QLabel(self.tab_img)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 1, 0, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem3, 0, 5, 1, 2)
self.lineEdit_width = QtWidgets.QLineEdit(self.tab_img)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_width.sizePolicy().hasHeightForWidth())
self.lineEdit_width.setSizePolicy(sizePolicy)
self.lineEdit_width.setInputMask("")
self.lineEdit_width.setObjectName("lineEdit_width")
self.gridLayout_2.addWidget(self.lineEdit_width, 1, 3, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(269, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem4, 1, 5, 1, 2)
self.lineEdit_height = QtWidgets.QLineEdit(self.tab_img)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_height.sizePolicy().hasHeightForWidth())
self.lineEdit_height.setSizePolicy(sizePolicy)
self.lineEdit_height.setObjectName("lineEdit_height")
self.gridLayout_2.addWidget(self.lineEdit_height, 1, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(self.tab_img)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 1, 2, 1, 1)
self.label_position = QtWidgets.QLabel(self.tab_img)
self.label_position.setObjectName("label_position")
self.gridLayout_2.addWidget(self.label_position, 2, 7, 1, 1)
self.checkBox_UseOpenCV = QtWidgets.QCheckBox(self.tab_img)
self.checkBox_UseOpenCV.setObjectName("checkBox_UseOpenCV")
self.gridLayout_2.addWidget(self.checkBox_UseOpenCV, 0, 7, 1, 1)
self.pushButton_saveImg = QtWidgets.QPushButton(self.tab_img)
self.pushButton_saveImg.setObjectName("pushButton_saveImg")
self.gridLayout_2.addWidget(self.pushButton_saveImg, 2, 0, 1, 2)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem5, 2, 5, 1, 1)
self.label_extra14bytes = QtWidgets.QLabel(self.tab_img)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_extra14bytes.sizePolicy().hasHeightForWidth())
self.label_extra14bytes.setSizePolicy(sizePolicy)
self.label_extra14bytes.setText("")
self.label_extra14bytes.setObjectName("label_extra14bytes")
self.gridLayout_2.addWidget(self.label_extra14bytes, 0, 3, 1, 1)
self.label_pause = QtWidgets.QLabel(self.tab_img)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_pause.sizePolicy().hasHeightForWidth())
self.label_pause.setSizePolicy(sizePolicy)
self.label_pause.setText("")
self.label_pause.setObjectName("label_pause")
self.gridLayout_2.addWidget(self.label_pause, 2, 3, 1, 1)
self.label_img = WidgetPainter(self.tab_img)
self.label_img.setMouseTracking(True)
self.label_img.setFocusPolicy(QtCore.Qt.StrongFocus)
self.label_img.setObjectName("label_img")
self.gridLayout_2.addWidget(self.label_img, 3, 0, 1, 8)
self.tabWidget.addTab(self.tab_img, "")
self.tab_other = QtWidgets.QWidget()
self.tab_other.setObjectName("tab_other")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.tab_other)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.tabWidget_other = QtWidgets.QTabWidget(self.tab_other)
self.tabWidget_other.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget_other.setTabShape(QtWidgets.QTabWidget.Rounded)
self.tabWidget_other.setObjectName("tabWidget_other")
self.tab_watch_parameter = QtWidgets.QWidget()
self.tab_watch_parameter.setObjectName("tab_watch_parameter")
self.gridLayout_7 = QtWidgets.QGridLayout(self.tab_watch_parameter)
self.gridLayout_7.setObjectName("gridLayout_7")
self.pushButton_clear_dict = QtWidgets.QPushButton(self.tab_watch_parameter)
self.pushButton_clear_dict.setObjectName("pushButton_clear_dict")
self.gridLayout_7.addWidget(self.pushButton_clear_dict, 1, 1, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_7.addItem(spacerItem6, 1, 0, 1, 1)
self.tableWidget_para = QtWidgets.QTableWidget(self.tab_watch_parameter)
self.tableWidget_para.setShowGrid(True)
self.tableWidget_para.setRowCount(0)
self.tableWidget_para.setColumnCount(2)
self.tableWidget_para.setObjectName("tableWidget_para")
self.gridLayout_7.addWidget(self.tableWidget_para, 0, 0, 1, 2)
self.tabWidget_other.addTab(self.tab_watch_parameter, "")
self.tab_change_parameter = QtWidgets.QWidget()
self.tab_change_parameter.setObjectName("tab_change_parameter")
self.gridLayout_5 = QtWidgets.QGridLayout(self.tab_change_parameter)
self.gridLayout_5.setObjectName("gridLayout_5")
self.listWidget_para = QtWidgets.QListWidget(self.tab_change_parameter)
self.listWidget_para.setObjectName("listWidget_para")
self.gridLayout_5.addWidget(self.listWidget_para, 1, 0, 1, 2)
spacerItem7 = QtWidgets.QSpacerItem(570, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem7, 2, 0, 1, 1)
self.pushButton_readMCU = QtWidgets.QPushButton(self.tab_change_parameter)
self.pushButton_readMCU.setObjectName("pushButton_readMCU")
self.gridLayout_5.addWidget(self.pushButton_readMCU, 2, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.tab_change_parameter)
self.label_4.setObjectName("label_4")
self.gridLayout_5.addWidget(self.label_4, 0, 0, 1, 2)
self.tabWidget_other.addTab(self.tab_change_parameter, "")
self.tab_wave = QtWidgets.QWidget()
self.tab_wave.setObjectName("tab_wave")
self.gridLayout_6 = QtWidgets.QGridLayout(self.tab_wave)
self.gridLayout_6.setObjectName("gridLayout_6")
self.graphicsView = DynamicWaveView(self.tab_wave)
self.graphicsView.setObjectName("graphicsView")
self.gridLayout_6.addWidget(self.graphicsView, 0, 0, 1, 1)
self.tabWidget_other.addTab(self.tab_wave, "")
self.tab_piano = PianoView()
self.tab_piano.setFocusPolicy(QtCore.Qt.StrongFocus)
self.tab_piano.setObjectName("tab_piano")
self.gridLayout_4 = QtWidgets.QGridLayout(self.tab_piano)
self.gridLayout_4.setObjectName("gridLayout_4")
self.tabWidget_other.addTab(self.tab_piano, "")
self.horizontalLayout_5.addWidget(self.tabWidget_other)
self.tabWidget.addTab(self.tab_other, "")
self.horizontalLayout_3.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1142, 26))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
self.menu_2 = QtWidgets.QMenu(self.menubar)
self.menu_2.setObjectName("menu_2")
self.menu_3 = QtWidgets.QMenu(self.menubar)
self.menu_3.setObjectName("menu_3")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.dockWidget_uart = QtWidgets.QDockWidget(MainWindow)
self.dockWidget_uart.setFloating(False)
self.dockWidget_uart.setFeatures(QtWidgets.QDockWidget.AllDockWidgetFeatures)
self.dockWidget_uart.setObjectName("dockWidget_uart")
self.dockWidgetContents_2 = QtWidgets.QWidget()
self.dockWidgetContents_2.setObjectName("dockWidgetContents_2")
self.verticalLayout = QtWidgets.QVBoxLayout(self.dockWidgetContents_2)
self.verticalLayout.setObjectName("verticalLayout")
self.line_2 = QtWidgets.QFrame(self.dockWidgetContents_2)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label_3 = QtWidgets.QLabel(self.dockWidgetContents_2)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 4, 0, 1, 1)
self.Com_Name_Label = QtWidgets.QLabel(self.dockWidgetContents_2)
self.Com_Name_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Com_Name_Label.setObjectName("Com_Name_Label")
self.gridLayout.addWidget(self.Com_Name_Label, 2, 0, 1, 1)
self.label_8 = QtWidgets.QLabel(self.dockWidgetContents_2)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 6, 0, 1, 1)
self.comboBox_data = QtWidgets.QComboBox(self.dockWidgetContents_2)
self.comboBox_data.setObjectName("comboBox_data")
self.comboBox_data.addItem("")
self.comboBox_data.addItem("")
self.comboBox_data.addItem("")
self.comboBox_data.addItem("")
self.gridLayout.addWidget(self.comboBox_data, 5, 1, 1, 1)
self.comboBox_stop = QtWidgets.QComboBox(self.dockWidgetContents_2)
self.comboBox_stop.setObjectName("comboBox_stop")
self.comboBox_stop.addItem("")
self.comboBox_stop.addItem("")
self.gridLayout.addWidget(self.comboBox_stop, 6, 1, 1, 1)
self.Com_Baud_Combo = QtWidgets.QComboBox(self.dockWidgetContents_2)
self.Com_Baud_Combo.setEditable(True)
self.Com_Baud_Combo.setDuplicatesEnabled(False)
self.Com_Baud_Combo.setModelColumn(0)
self.Com_Baud_Combo.setObjectName("Com_Baud_Combo")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.Com_Baud_Combo.addItem("")
self.gridLayout.addWidget(self.Com_Baud_Combo, 3, 1, 1, 1)
self.Com_Refresh_Label = QtWidgets.QLabel(self.dockWidgetContents_2)
self.Com_Refresh_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Com_Refresh_Label.setObjectName("Com_Refresh_Label")
self.gridLayout.addWidget(self.Com_Refresh_Label, 0, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.dockWidgetContents_2)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 5, 0, 1, 1)
self.Com_Refresh_Button = QtWidgets.QPushButton(self.dockWidgetContents_2)
self.Com_Refresh_Button.setObjectName("Com_Refresh_Button")
self.gridLayout.addWidget(self.Com_Refresh_Button, 0, 1, 1, 1)
self.Com_Open_Button = QtWidgets.QPushButton(self.dockWidgetContents_2)
self.Com_Open_Button.setObjectName("Com_Open_Button")
self.gridLayout.addWidget(self.Com_Open_Button, 7, 1, 1, 1)
self.comboBox_parity = QtWidgets.QComboBox(self.dockWidgetContents_2)
self.comboBox_parity.setObjectName("comboBox_parity")
self.comboBox_parity.addItem("")
self.comboBox_parity.addItem("")
self.comboBox_parity.addItem("")
self.comboBox_parity.addItem("")
self.comboBox_parity.addItem("")
self.gridLayout.addWidget(self.comboBox_parity, 4, 1, 1, 1)
self.Com_isOpenOrNot_Label = QtWidgets.QLabel(self.dockWidgetContents_2)
self.Com_isOpenOrNot_Label.setText("")
self.Com_isOpenOrNot_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Com_isOpenOrNot_Label.setObjectName("Com_isOpenOrNot_Label")
self.gridLayout.addWidget(self.Com_isOpenOrNot_Label, 9, 0, 1, 1)
self.Com_State_Label = QtWidgets.QLabel(self.dockWidgetContents_2)
self.Com_State_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Com_State_Label.setObjectName("Com_State_Label")
self.gridLayout.addWidget(self.Com_State_Label, 7, 0, 1, 1)
self.Com_Name_Combo = QtWidgets.QComboBox(self.dockWidgetContents_2)
self.Com_Name_Combo.setObjectName("Com_Name_Combo")
self.gridLayout.addWidget(self.Com_Name_Combo, 2, 1, 1, 1)
self.Com_Close_Button = QtWidgets.QPushButton(self.dockWidgetContents_2)
self.Com_Close_Button.setDefault(False)
self.Com_Close_Button.setObjectName("Com_Close_Button")
self.gridLayout.addWidget(self.Com_Close_Button, 9, 1, 1, 1)
self.Com_Baud_Label = QtWidgets.QLabel(self.dockWidgetContents_2)
self.Com_Baud_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Com_Baud_Label.setObjectName("Com_Baud_Label")
self.gridLayout.addWidget(self.Com_Baud_Label, 3, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
spacerItem8 = QtWidgets.QSpacerItem(20, 115, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem8)
self.Time_Label = QtWidgets.QLabel(self.dockWidgetContents_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Time_Label.sizePolicy().hasHeightForWidth())
self.Time_Label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("方正兰亭中黑_GBK")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.Time_Label.setFont(font)
self.Time_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Time_Label.setObjectName("Time_Label")
self.verticalLayout.addWidget(self.Time_Label)
self.calendarWidget = QtWidgets.QCalendarWidget(self.dockWidgetContents_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.calendarWidget.sizePolicy().hasHeightForWidth())
self.calendarWidget.setSizePolicy(sizePolicy)
self.calendarWidget.setStyleSheet("/*alternate-background-color: rgb(0, 0, 0);\n"
"background-color: rgb(0, 0, 0);*/")
self.calendarWidget.setFirstDayOfWeek(QtCore.Qt.Sunday)
self.calendarWidget.setHorizontalHeaderFormat(QtWidgets.QCalendarWidget.ShortDayNames)
self.calendarWidget.setVerticalHeaderFormat(QtWidgets.QCalendarWidget.ISOWeekNumbers)
self.calendarWidget.setObjectName("calendarWidget")
self.verticalLayout.addWidget(self.calendarWidget)
self.dockWidget_uart.setWidget(self.dockWidgetContents_2)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.dockWidget_uart)
self.action_uart = QtWidgets.QAction(MainWindow)
self.action_uart.setCheckable(True)
self.action_uart.setChecked(True)
self.action_uart.setObjectName("action_uart")
self.action_exit = QtWidgets.QAction(MainWindow)
self.action_exit.setShortcutVisibleInContextMenu(True)
self.action_exit.setObjectName("action_exit")
self.actionAbout_Qt = QtWidgets.QAction(MainWindow)
self.actionAbout_Qt.setObjectName("actionAbout_Qt")
self.actionAboutThis = QtWidgets.QAction(MainWindow)
self.actionAboutThis.setObjectName("actionAboutThis")
self.action_stop = QtWidgets.QAction(MainWindow)
self.action_stop.setObjectName("action_stop")
self.menu.addAction(self.action_stop)
self.menu.addAction(self.action_exit)
self.menu_2.addAction(self.actionAbout_Qt)
self.menu_2.addAction(self.actionAboutThis)
self.menu_3.addAction(self.action_uart)
self.menubar.addAction(self.menu.menuAction())
self.menubar.addAction(self.menu_3.menuAction())
self.menubar.addAction(self.menu_2.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(2)
self.tabWidget_other.setCurrentIndex(0)
self.comboBox_data.setCurrentIndex(3)
self.comboBox_stop.setCurrentIndex(0)
self.Com_Baud_Combo.setCurrentIndex(10)
self.pushButton_clearSendText.clicked.connect(self.textEdit_Send.clear)
self.ClearButton.clicked.connect(self.textEdit_Recive.clear)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "PyQt5 多功能串口调试助手"))
self.label_2.setText(_translate("MainWindow", "发送区"))
self.hexSending_checkBox.setText(_translate("MainWindow", "16进制发送"))
self.Send_Button.setText(_translate("MainWindow", "发送"))
self.pushButton_clearSendText.setText(_translate("MainWindow", "清除"))
self.label.setText(_translate("MainWindow", "接收区"))
self.hexShowing_checkBox.setText(_translate("MainWindow", "16进制显示"))
self.ClearButton.setText(_translate("MainWindow", "清除"))
self.label_9.setText(_translate("MainWindow", "编码方式:"))
self.comboBox_codetype.setItemText(0, _translate("MainWindow", "utf-8"))
self.comboBox_codetype.setItemText(1, _translate("MainWindow", "gb2312"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_msg), _translate("MainWindow", "接收发送数据"))
self.checkBox_showGrid.setText(_translate("MainWindow", "显示网格线"))
self.comboBox_imgType.setItemText(0, _translate("MainWindow", "二值化图像"))
self.comboBox_imgType.setItemText(1, _translate("MainWindow", "灰度图像(单片机解压)"))
self.label_5.setText(_translate("MainWindow", "高"))
self.lineEdit_width.setText(_translate("MainWindow", "80"))
self.lineEdit_height.setText(_translate("MainWindow", "60"))
self.label_6.setText(_translate("MainWindow", "宽"))
self.label_position.setText(_translate("MainWindow", "鼠标位置 x: 0,y: 0"))
self.checkBox_UseOpenCV.setText(_translate("MainWindow", "使用OpenCV查看图像"))
self.pushButton_saveImg.setText(_translate("MainWindow", "保存图像"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_img), _translate("MainWindow", "查看图像"))
self.pushButton_clear_dict.setText(_translate("MainWindow", "清空"))
self.tabWidget_other.setTabText(self.tabWidget_other.indexOf(self.tab_watch_parameter), _translate("MainWindow", "查看参数"))
self.pushButton_readMCU.setText(_translate("MainWindow", "更新上位机数据"))
self.label_4.setText(_translate("MainWindow", "参数"))
self.tabWidget_other.setTabText(self.tabWidget_other.indexOf(self.tab_change_parameter), _translate("MainWindow", "修改参数"))
self.tabWidget_other.setTabText(self.tabWidget_other.indexOf(self.tab_wave), _translate("MainWindow", "显示波形"))
self.tabWidget_other.setTabText(self.tabWidget_other.indexOf(self.tab_piano), _translate("MainWindow", "弹琴"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_other), _translate("MainWindow", "附加功能"))
self.menu.setTitle(_translate("MainWindow", "文件(&F)"))
self.menu_2.setTitle(_translate("MainWindow", "关于(&A)"))
self.menu_3.setTitle(_translate("MainWindow", "视图(&V)"))
self.dockWidget_uart.setWindowTitle(_translate("MainWindow", "串口设置"))
self.label_3.setText(_translate("MainWindow", "奇偶位"))
self.Com_Name_Label.setText(_translate("MainWindow", "串口选择"))
self.label_8.setText(_translate("MainWindow", "停止位"))
self.comboBox_data.setItemText(0, _translate("MainWindow", "5"))
self.comboBox_data.setItemText(1, _translate("MainWindow", "6"))
self.comboBox_data.setItemText(2, _translate("MainWindow", "7"))
self.comboBox_data.setItemText(3, _translate("MainWindow", "8"))
self.comboBox_stop.setItemText(0, _translate("MainWindow", "1"))
self.comboBox_stop.setItemText(1, _translate("MainWindow", "2"))
self.Com_Baud_Combo.setCurrentText(_translate("MainWindow", "115200"))
self.Com_Baud_Combo.setItemText(0, _translate("MainWindow", "1200"))
self.Com_Baud_Combo.setItemText(1, _translate("MainWindow", "2400"))
self.Com_Baud_Combo.setItemText(2, _translate("MainWindow", "4800"))
self.Com_Baud_Combo.setItemText(3, _translate("MainWindow", "9600"))
self.Com_Baud_Combo.setItemText(4, _translate("MainWindow", "14400"))
self.Com_Baud_Combo.setItemText(5, _translate("MainWindow", "19200"))
self.Com_Baud_Combo.setItemText(6, _translate("MainWindow", "38400"))
self.Com_Baud_Combo.setItemText(7, _translate("MainWindow", "43000"))
self.Com_Baud_Combo.setItemText(8, _translate("MainWindow", "57600"))
self.Com_Baud_Combo.setItemText(9, _translate("MainWindow", "76800"))
self.Com_Baud_Combo.setItemText(10, _translate("MainWindow", "115200"))
self.Com_Baud_Combo.setItemText(11, _translate("MainWindow", "128000"))
self.Com_Baud_Combo.setItemText(12, _translate("MainWindow", "230400"))
self.Com_Baud_Combo.setItemText(13, _translate("MainWindow", "256000"))
self.Com_Baud_Combo.setItemText(14, _translate("MainWindow", "460800"))
self.Com_Baud_Combo.setItemText(15, _translate("MainWindow", "921600"))
self.Com_Baud_Combo.setItemText(16, _translate("MainWindow", "1382400"))
self.Com_Refresh_Label.setText(_translate("MainWindow", "串口搜索"))
self.label_7.setText(_translate("MainWindow", "数据位"))
self.Com_Refresh_Button.setText(_translate("MainWindow", "刷新"))
self.Com_Open_Button.setText(_translate("MainWindow", "Open"))
self.comboBox_parity.setItemText(0, _translate("MainWindow", "无校验"))
self.comboBox_parity.setItemText(1, _translate("MainWindow", "偶校验"))
self.comboBox_parity.setItemText(2, _translate("MainWindow", "奇校验"))
self.comboBox_parity.setItemText(3, _translate("MainWindow", "空校验"))
self.comboBox_parity.setItemText(4, _translate("MainWindow", "标志校验"))
self.Com_State_Label.setText(_translate("MainWindow", "串口操作"))
self.Com_Close_Button.setText(_translate("MainWindow", "Close"))
self.Com_Baud_Label.setText(_translate("MainWindow", "波特率"))
self.Time_Label.setText(_translate("MainWindow", "Time"))
self.action_uart.setText(_translate("MainWindow", "串口设置(&U)"))
self.action_uart.setShortcut(_translate("MainWindow", "Ctrl+U"))
self.action_exit.setText(_translate("MainWindow", "退出(&T)"))
self.action_exit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionAbout_Qt.setText(_translate("MainWindow", "About &Qt"))
self.actionAboutThis.setText(_translate("MainWindow", "关于(&A)"))
self.action_stop.setText(_translate("MainWindow", "停车(&S)"))
self.action_stop.setToolTip(_translate("MainWindow", "停车(S)"))
self.action_stop.setShortcut(_translate("MainWindow", "Ctrl+T"))
from Widget.Piano import PianoView
from Widget.Wave import DynamicWaveView
from Widget.widgetpainter import WidgetPainter
import res_rc
| 1.75 | 2 |
Mobley_logP/tautomerExploration/LimitedSet_pKa.py | MobleyLab/SAMPL5_logD_PredictionAnalysis | 1 | 12767892 | # Written by <NAME>
# Mobley Group, UC Irvine
# This script performs error analysis for corrected logD values, considering only those where the logD is different than the logP
# These results are compared to predictions from set 16 which did the best by most error metrics
import pickle
from sigfig.sigfig import *
import glob
import numpy as np
import imp
tools = imp.load_source('tools','../DataFiles/tools.py')
#========================================================================
# Constants we'll need later:
bootits = 1000 # number bootstrap iterations
diff = 0.01
#========================================================================
# Load Experimental data and make lists by batch
Exp = pickle.load(open('../DataFiles/experimental.p','rb'))
# Load Prediction Dictionary
database = pickle.load(open('dictionary_Corrected.p','rb'))
Klamt = pickle.load(open('../DataFiles/predictions.p','rb'))[16]['data']
limit_keys = []
for k, e in database.items():
logD = e['LogD_oneCorrected'][0]
logP = e['LogD_calc'][0]
if abs(logD - logP) > diff:
print k
limit_keys.append(k)
print "There are ", len(limit_keys), "changed logDs"
limit_keys = sorted(limit_keys)
all_keys = sorted(database.keys())
allLogP = np.array([database[k]['LogD_calc'] for k in all_keys])
limitLogP = np.array([database[k]['LogD_calc'] for k in limit_keys])
allLogD = np.array([database[k]['LogD_oneCorrected'] for k in all_keys])
limitLogD = np.array([database[k]['LogD_oneCorrected'] for k in limit_keys])
allKlamt = np.array([Klamt[k] for k in all_keys])
limitKlamt = np.array([Klamt[k] for k in limit_keys])
allExp = np.array([Exp[k]['data'] for k in all_keys])
limitExp = np.array([Exp[k]['data'] for k in limit_keys])
output = ["# Compare the largest change from pKa corrected data to entry 16 results\n", "# Limited refers to only logP values changed by at least %.4f\n" % diff, 'data set,\t AveErr,\t RME,\t AUE,\t tau,\t R,\t maxErr,\t percent,\t error slope \n']
dataSet = ["All LogP", "All Corrected LogD", "All Klamt prediction", "Limited LogP", "Limited LogD", "Limited Klamt predictions"]
for i, data in enumerate([allLogP, allLogD, allKlamt, limitLogP, limitLogD, limitKlamt]):
print i
print dataSet[i]
if dataSet[i].split(' ')[0] == 'All':
print "\t all data"
exp = allExp
else:
exp = limitExp
print '\t limited data'
if dataSet[i].split(' ')[1] != 'Klamt':
ddata = np.array([1.4 for a in range(len(data))])
else:
ddata = data[:, 2]
# Add error analysis for logP (only changing values)
AveErr, RMS, AUE, tau, R, maxErr, percent, Rsquared = tools.stats_array(data[:,0], exp[:,0], exp[:,1], bootits, dataSet[i])
# Get data for QQ plot
X, Y, slope, dslope = tools.getQQdata(data[:,0], exp[:,0], ddata, exp[:,1], bootits)
line = dataSet[i]
for met in [AveErr, RMS, AUE, tau, R, maxErr, percent, [slope,dslope]]:
line = line + ",\t %s +/- %s" % (round_unc(met[0], met[1]), round_sf(met[1], 1))
output.append(line+'\n')
fileName ='DataTables/CompareTo16_%.4f.txt' % diff
f = open(fileName,'w')
f.writelines(output)
f.close()
print "created ", fileName
| 2.046875 | 2 |
BayesPaths/ResidualBiGraph3.py | chrisquince/BayesPaths | 3 | 12767893 | <filename>BayesPaths/ResidualBiGraph3.py
from itertools import compress
import argparse
import sys
import numpy as np
import os
import subprocess
import re
from numpy.random import RandomState
import logging
from subprocess import PIPE
from collections import defaultdict
from BayesPaths.UnitigGraph import UnitigGraph
from BayesPaths.UtilsFunctions import convertNodeToName
from BayesPaths.UtilsFunctions import expNormLogProb
from BayesPaths.UtilsFunctions import expLogProb
from operator import itemgetter
import uuid
import networkx as nx
import logging
def lassoF(phi):
if phi < 0.5:
return phi
else:
return 1 - phi
def lassoPenalty(phiMatrix):
fL = 0.
for i in range(phiMatrix.shape[0]):
for j in range(phiMatrix.shape[1]):
fL += lassoF(phiMatrix[i][j])
return fL
BETA = 0.6
TAU = 0.5
MIN_MAX_COST = 1.0e-7
class ResidualBiGraph():
"""Creates unitig graph for minimisation"""
def __init__(self, diGraph, sEdges, maxFlow = 1.,INT_SCALE = 1.0e6, COST_SCALE = 1.0e5):
"""Empty AugmentedBiGraph"""
self.diGraph = diGraph
self.sEdges = sEdges
self.maxFlow = maxFlow
self.rGraph = ResidualBiGraph.createResidualGraph(diGraph)
self.INT_SCALE = INT_SCALE
self.COST_SCALE = COST_SCALE
self.maxCost = 1.
def __copy__(self):
diGraphCopy = self.diGraph.copy()
return ResidualBiGraph(diGraphCopy,self.sEdges,self.maxFlow,self.INT_SCALE,self.COST_SCALE)
@classmethod
def createFromUnitigGraph(cls,unitigGraph, maxFlow = 1.,INT_SCALE = 1.0e6, COST_SCALE=1.0e5):
assert hasattr(unitigGraph, 'directedUnitigBiGraphS')
tempDiGraph = ResidualBiGraph.removeCycles(unitigGraph.directedUnitigBiGraphS)
copyDiGraph = tempDiGraph.copy()
sEdges = set()
for node in tempDiGraph.nodes():
pred = list(tempDiGraph.predecessors(node))
if len(pred) > 1 and node != 'sink+':
newNode = node + 's'
copyDiGraph.add_node(newNode)
for pnode in pred:
copyDiGraph.add_edge(pnode,newNode,weight=tempDiGraph[pnode][node]['weight'],
covweight=tempDiGraph[pnode][node]['covweight'],capacity=maxFlow*INT_SCALE,flow=0)
copyDiGraph.remove_edge(pnode,node)
copyDiGraph.add_edge(newNode,node,capacity=maxFlow*INT_SCALE,flow=0, weight=0.)
sEdges.add((newNode,node))
elif len(pred) == 1 and node != 'sink+':
copyDiGraph.add_edge(pred[0],node,weight=tempDiGraph[pred[0]][node]['weight'],
covweight=tempDiGraph[pred[0]][node]['covweight'],capacity=maxFlow*INT_SCALE,flow=0)
sEdges.add((pred[0],node))
nx.set_edge_attributes(copyDiGraph, maxFlow*INT_SCALE, name='capacity')
nx.set_edge_attributes(copyDiGraph, 0, name='flow')
nx.set_edge_attributes(copyDiGraph, 0, name='weight')
# attrs = {'source+': {'demand': -self.INT_SCALE}, 'sink+': {'demand': self.INT_SCALE}}
attrs = {'source+': {'demand': 0}, 'sink+': {'demand': 0}}
nx.set_node_attributes(copyDiGraph, attrs)
biGraph = cls(copyDiGraph, sEdges, maxFlow, INT_SCALE, COST_SCALE)
return biGraph
@classmethod
def removeCycles(cls, inGraph):
diGraph = inGraph.copy()
while not nx.is_directed_acyclic_graph(diGraph):
cycle = nx.find_cycle(diGraph)
weakestLink = sys.float_info.max
weakestEdge = None
for edge in cycle:
weight = diGraph[edge[0]][edge[1]]['covweight']
if weight < weakestLink:
weakestEdge = edge
weakestLink = weight
diGraph.remove_edge(weakestEdge[0],weakestEdge[1])
return diGraph
@classmethod
def createResidualGraph(cls,diGraph):
copyDiGraph = diGraph.copy()
for (m,n,f) in diGraph.edges.data('flow', default=0):
copyDiGraph[m][n]['capacity'] = max(0,diGraph[m][n]['capacity'] - f)
copyDiGraph[m][n]['flow'] = 0
copyDiGraph.add_edge(n,m,capacity=f,flow=0, weight=-diGraph[m][n]['weight'])
nx.set_node_attributes(copyDiGraph,0.0,'demand')
return copyDiGraph
@classmethod
def combineGraphs(cls,dictBiGraphs,geneList,mapGeneIdx,maxFlow = 1.,INT_SCALE = 1.0e6, COST_SCALE=1.0e5):
cGraph = nx.DiGraph()
lastGene = None
sEdges = set()
newGeneIdx = {}
defaultdict(dict)
for gene in geneList:
unitigsDash = list(dictBiGraphs[gene].diGraph.nodes())
mapNodes = {s:gene + "_" + s for s in unitigsDash}
for (ud, mapunitig) in mapNodes.items():
udu = ud[:-1]
mapunitigu = mapunitig[:-1]
if udu in mapGeneIdx[gene]:
newGeneIdx[mapunitigu] = mapGeneIdx[gene][udu]
if lastGene is None:
mapNodes['source+'] = 'source+'
if gene == geneList[-1]:
mapNodes['sink+'] = 'sink+'
sMap = [(mapNodes[e[0]],mapNodes[e[1]]) for e in dictBiGraphs[gene].sEdges]
sEdges.update(sMap)
tempGraph = nx.relabel_nodes(dictBiGraphs[gene].diGraph, mapNodes)
cGraph = nx.algorithms.operators.binary.compose(cGraph, tempGraph)
if lastGene is not None:
lastSink = lastGene + '_sink+'
cGraph.add_edge(lastSink,gene + '_source+', weight=0,covweight=0.,capacity=maxFlow*INT_SCALE,flow=0)
lastGene = gene
biGraph = cls(cGraph, sEdges, maxFlow, INT_SCALE, COST_SCALE)
biGraph.maxFlow = maxFlow
nx.set_edge_attributes(biGraph.diGraph, maxFlow*biGraph.INT_SCALE, name='capacity')
return (biGraph, newGeneIdx)
def addSourceSinkShunt(self):
self.diGraph.add_edge('sink+','source+',capacity=self.maxFlow*self.INT_SCALE,flow=0, weight=0.)
def removeSourceSinkShunt(self):
self.diGraph.remove_edge('sink+','source+')
def transformFlowCost(self, flowCost):
flowCostT = (flowCost*self.maxCost)/self.COST_SCALE
return flowCostT/self.INT_SCALE
def updateCosts(self,vCosts,mapIdx):
self.maxCost = np.max(np.abs(vCosts))
if self.maxCost < MIN_MAX_COST:
self.maxCost = MIN_MAX_COST
for sEdge in self.sEdges:
unitigd = sEdge[1][:-1]
v = mapIdx[unitigd]
try:
self.diGraph[sEdge[0]][sEdge[1]]['weight'] = int((vCosts[v]*self.COST_SCALE)/self.maxCost)
except ValueError:
self.diGraph[sEdge[0]][sEdge[1]]['weight'] = 0
def updateFlows(self,flowDict, epsilon):
for (node, flows) in flowDict.items():
for (outnode, flow) in flows.items():
if flow > 0.:
if self.diGraph.has_edge(node,outnode):
fFlow = self.diGraph[node][outnode]['flow']
self.diGraph[node][outnode]['flow'] = int(fFlow + epsilon*flow)
else:
assert self.diGraph.has_edge(outnode,node)
fFlow = self.diGraph[outnode][node]['flow']
self.diGraph[outnode][node]['flow'] = max(0,int(fFlow - epsilon*flow))
def deltaF(self, flowDict, epsilon, X, eLambda, mapIdx, Lengths, bKLDivergence = False, bLasso = False, fLambda = 1.):
DeltaF = 0.
for (node,outnode) in self.sEdges:
nfFlow = 0.
fFlow = 0.
v = mapIdx[outnode[:-1]]
change = False
iFlow = self.diGraph[node][outnode]['flow']
fFlow = float(iFlow)/self.INT_SCALE
if flowDict[node][outnode] > 0.:
niFlow = int(iFlow + epsilon*flowDict[node][outnode])
nfFlow = float(niFlow)/self.INT_SCALE
change = True
elif flowDict[outnode][node] > 0.:
niFlow = int(iFlow - epsilon*flowDict[outnode][node])
nfFlow = float(niFlow)/self.INT_SCALE
change = True
if change:
newLambda = eLambda[v] + Lengths[v]*(nfFlow - fFlow)
if bKLDivergence:
T1 = newLambda - eLambda[v]
T2 = X[v]*np.log(newLambda/eLambda[v])
DeltaF += np.sum(T1 - T2)
else:
DeltaF += 0.5*np.sum((X[v] - newLambda)**2 - (X[v] - eLambda[v])**2)
if bLasso:
DeltaF += fLambda*(nfFlow - fFlow)
return DeltaF
def deltaFF(self, flowDict, epsilon, X, eEta, eTheta, mapIdx, minDelta):
DeltaF = 0.
for (node,outnode) in self.sEdges:
nfFlow = 0.
fFlow = 0.
v = mapIdx[outnode[:-1]]
change = False
iFlow = self.diGraph[node][outnode]['flow']
fFlow = float(iFlow)/self.INT_SCALE
if flowDict[node][outnode] > 0.:
niFlow = int(iFlow + epsilon*flowDict[node][outnode])
nfFlow = float(niFlow)/self.INT_SCALE
change = True
elif flowDict[outnode][node] > 0.:
niFlow = int(iFlow - epsilon*flowDict[outnode][node])
nfFlow = float(niFlow)/self.INT_SCALE
change = True
if change:
newEta = eEta[v] + nfFlow - fFlow
newTheta = np.log((newEta + minDelta)/(1 - newEta + minDelta))
DeltaF += 0.5*((X[v] - newTheta)**2 - (X[v] - eTheta[v])**2)
return DeltaF
def initialiseFlows(self):
for e in self.diGraph.edges:
self.diGraph[e[0]][e[1]]['flow'] = 0
def clearCosts(self):
for e in self.diGraph.edges:
self.diGraph[e[0]][e[1]]['weight'] = 0
def addFlowPath(self, path, pflow):
for u,v in zip(path,path[1:]):
#print(u + ',' + v)
self.diGraph[u][v]['flow'] += pflow
def addEdgePath(self, path, pflow):
for e in path:
fC = self.diGraph[e[0]][e[1]]['flow']
fN = max(fC + pflow,0)
self.diGraph[e[0]][e[1]]['flow'] = fN
def getRandomPath(self, prng):
node = 'source+'
nAttempts = 0
path = []
while node != 'sink+' and nAttempts < 1000:
succ = list(self.diGraph.successors(node))
path.append(node)
if len(succ) == 0:
path = []
node = 'source+'
nAttempts += 1
else:
node = prng.choice(succ)
path.append(node)
if nAttempts == 1000:
raise ValueError('Gene appears to have no path source to sink')
return path
def updatePhi(self, phi, mapIdx):
for sEdge in self.sEdges:
iFlow = self.diGraph[sEdge[0]][sEdge[1]]['flow']
fFlow = float(iFlow)/self.INT_SCALE
#print(str(fFlow))
unitigd = sEdge[1][:-1]
v = mapIdx[unitigd]
phi[v] = fFlow
def decomposeFlows(self):
paths = {}
maxFlow = 1.0
while maxFlow > 0.:
(maxPath, maxFlow) = self.getMaxMinFlowPathDAG()
self.addFlowPath(maxPath, -maxFlow)
if maxFlow > 0.:
paths[tuple(maxPath)] = maxFlow/self.INT_SCALE
print(str(maxFlow/self.INT_SCALE))
return paths
def getMaxMinFlowPathDAG(self):
#self.initialiseFlows()
self.top_sort = list(nx.topological_sort(self.diGraph))
lenSort = len(self.top_sort)
maxPred = {}
maxFlowNode = {}
for node in self.top_sort:
pred = list(self.diGraph.predecessors(node))
if len(pred) > 0:
maxFlowPred = min(maxFlowNode[pred[0]],self.diGraph[pred[0]][node]['flow'])
maxPred[node] = pred[0]
for predecessor in pred[1:]:
# print (node + "," + predecessor + "," + str(dGraph[predecessor][node]['flow']))
weight = min(maxFlowNode[predecessor],self.diGraph[predecessor][node]['flow'])
if weight > maxFlowPred:
maxFlowPred = weight
maxPred[node] = predecessor
maxFlowNode[node] = maxFlowPred
else:
maxFlowNode[node] = sys.float_info.max
maxPred[node] = None
minPath = []
bestNode = 'sink+'
while bestNode is not None:
minPath.append(bestNode)
bestNode = maxPred[bestNode]
minPath.pop(0)
minPath.pop()
minPath.reverse()
return (minPath, maxFlowNode['sink+'])
#optimise flows to fit function
class FlowFitTheta():
def __init__(self, biGraph, prng, EtaStar, mapIdx, bConstrainFlow = False, initEta = None, minDelta = 1.0e-3):
self.minDelta = minDelta
self.etaStar = EtaStar
self.thetaStar = np.log((self.etaStar + self.minDelta)/(1.0 - self.etaStar + self.minDelta))
self.biGraph = biGraph.__copy__()
self.prng = prng
self.V = self.etaStar.shape[0]
self.mapIdx = mapIdx
if not bConstrainFlow:
if initEta is None:
self.Theta = np.zeros(self.V)
self.Eta = np.zeros(self.V)
self.Eta.fill(0.5)
else:
self.Eta = initEta
self._updateTheta()
self.biGraph.addSourceSinkShunt()
else:
pathg = self.biGraph.getRandomPath(self.prng)
pathh = self.biGraph.getRandomPath(self.prng)
self.biGraph.addFlowPath(pathg, 0.5*self.biGraph.INT_SCALE)
self.biGraph.addFlowPath(pathh, 0.5*self.biGraph.INT_SCALE)
self.Eta = np.zeros(self.V)
for u in pathg:
ud = u[:-1]
if ud in self.mapIdx:
v = self.mapIdx[ud]
self.Eta[v] += 0.5
for u in pathh:
ud = u[:-1]
if ud in self.mapIdx:
v = self.mapIdx[ud]
self.Eta[v] += 0.5
self._updateTheta()
def _devF(self):
return 0.5*np.sum((self.thetaStar - self.Theta)**2)
def _updateTheta(self):
self.Theta = np.log((self.Eta + self.minDelta)/(1.0 - self.Eta + self.minDelta))
def optimiseFlows(self, max_iter = 100, minChange = 1.0):
iter = 0
lNLL1 = self._devF()
print(str(iter) + "," + str(lNLL1))
#self.biGraph.addSourceSinkShunt()
deltaF = minChange*2.
while iter < max_iter or deltaF > minChange:
#first compute phi gradient in matrix format
dNeta = 1./((1.0 - self.Eta + self.minDelta)*(self.Eta + self.minDelta))
gradEta = (self.Theta - self.thetaStar)*dNeta
newNeta = np.copy(self.Eta)
self.biGraph.updateCosts(gradEta,self.mapIdx)
residualGraph = ResidualBiGraph.createResidualGraph(self.biGraph.diGraph)
flowCost, flowDict = nx.network_simplex(residualGraph)
pflow = 0.1
DeltaF = self.biGraph.deltaFF(flowDict, pflow, self.thetaStar, self.Eta, self.Theta, self.mapIdx,self.minDelta)
weight = self.biGraph.transformFlowCost(flowCost)
i = 0
while DeltaF > pflow*weight*BETA and i < 10:
pflow *= TAU
DeltaF = self.biGraph.deltaFF(flowDict, pflow, self.thetaStar, self.Eta, self.Theta, self.mapIdx, self.minDelta)
i += 1
if pflow > 0. and i < 10:
self.biGraph.updateFlows(flowDict,pflow)
self.biGraph.updatePhi(newNeta,self.mapIdx)
self.Eta = newNeta
self._updateTheta()
NLL1 = self._devF()
deltaF = abs(NLL1 - lNLL1)
lNLL1 = NLL1
if iter % 10 == 0:
print(str(iter) + "," + str(NLL1) + "," + str(deltaF))
iter = iter+1
class FlowGraphML():
DELTA = 1.0e-9
EPSILON = 1.0e-5
PRECISION = 1.0e-15
def __init__(self, biGraphs, genes, prng, X, lengths, mapGeneIdx, mask = None, bLasso = False, fLambda = 1.0):
self.X = X
self.V = self.X.shape[0]
if mask is None:
self.mask = np.ones((self.V))
else:
self.mask = mask
self.Omega = np.sum(self.mask > 0)
self.biGraphs = {}
for (gene,biGraph) in biGraphs.items():
self.biGraphs[gene] = ResidualBiGraph(biGraph.diGraph.copy(),biGraph.sEdges,maxFlow=biGraph.maxFlow)
self.biGraphs[gene].initialiseFlows()
self.genes = genes
self.mapGeneIdx = mapGeneIdx
self.phi = np.zeros((self.V))
#for gene, biGraph in self.biGraphs.items():
# pathg = biGraph.getRandomPath(prng)
# biGraph.addFlowPath(pathg, 0.1*INT_SCALE)
# for u in pathg:
# ud = u[:-1]
# if ud in self.mapGeneIdx[gene]:
# v = self.mapGeneIdx[gene][ud]
# self.phi[v] = 0.1
self.tau = 1.
self.lengths = lengths
self.bLasso = bLasso
self.fLambda = fLambda
def _KDivergence(self, eLambda, mask):
return np.sum(mask*(eLambda - self.X*np.log(eLambda)))
def _FDivergence(self, eLambda, mask):
return 0.5*np.sum(np.square(mask*(eLambda - self.X)))
def optimiseFlows(self, max_iter=500, bKLDivergence = False):
iter = 0
eLambda = (self.phi + self.DELTA) * self.lengths
if bKLDivergence:
NLL1 = self._KDivergence(eLambda, self.mask)
else:
NLL1 = self._FDivergence(eLambda, self.mask)
print(str(iter) + "," + str(NLL1))
for gene, biGraph in self.biGraphs.items():
biGraph.addSourceSinkShunt()
while iter < max_iter:
#first compute phi gradient in matrix format
eLambda = (self.phi + self.DELTA) * self.lengths
R = self.X/eLambda
if bKLDivergence:
gradPhi = - R*self.mask + self.lengths
else:
gradPhi = (eLambda - self.X)*self.mask*self.lengths
if self.bLasso:
gradPhi += self.fLambda
newPhi = np.copy(self.phi)
for gene, biGraph in self.biGraphs.items():
biGraph.updateCosts(gradPhi,self.mapGeneIdx[gene])
residualGraph = ResidualBiGraph.createResidualGraph(biGraph.diGraph)
#residualGraph.add_edge('sink+','source+')
#residualGraph.add_edge('source+','sink+')
#for n1, d in residualGraph.nodes(data=True):
# d.pop('demand',None)
#attrs = {'source+': {'demand': 0.01*INT_SCALE}, 'sink+': {'demand': -0.01*INT_SCALE}}
#nx.set_node_attributes(residualGraph, attrs)
flowCost, flowDict = nx.network_simplex(residualGraph)
pflow = 0.01
DeltaF = biGraph.deltaF(flowDict, pflow, self.X, eLambda, self.mapGeneIdx[gene], self.lengths, bKLDivergence, self.bLasso, self.fLambda)
weight = biGraph.transformFlowCost(flowCost)
i = 0
while DeltaF > pflow*weight*BETA and i < 10:
pflow *= TAU
DeltaF = biGraph.deltaF(flowDict, pflow, self.X, eLambda, self.mapGeneIdx[gene], self.lengths, bKLDivergence, self.bLasso, self.fLambda)
i += 1
if pflow > 0. and i < 10:
biGraph.updateFlows(flowDict,pflow)
biGraph.updatePhi(newPhi,self.mapGeneIdx[gene])
eLambda1 = (newPhi + self.DELTA) * self.lengths
if bKLDivergence:
NLL1 = self._KDivergence(eLambda1,self.mask)
else:
NLL1 = self._FDivergence(eLambda1,self.mask)
if iter % 1 == 0:
print(str(iter) + "," + str(NLL1))
#print(str(iter) + "," + str(NLL3))
self.phi = newPhi
iter = iter+1
for gene, biGraph in self.biGraphs.items():
biGraph.removeSourceSinkShunt()
def decomposeFlows(self):
flowPaths = {}
for gene, biGraph in self.biGraphs.items():
flowPaths[gene] = biGraph.decomposeFlows()
return flowPaths
def KLDivergence(self,mask):
eLambda = (self.phi + self.DELTA) * self.lengths
div = np.sum(mask*(eLambda - self.X - self.X*np.log(eLambda) + self.X*np.log(self.X + self.PRECISION)))
return div
def FDivergence(self, mask):
eLambda = (self.phi + self.DELTA) * self.lengths
omega = np.sum(mask)
return np.sqrt(np.sum(np.square(mask*(eLambda - self.X)))/omega)
def evalPathWeight(self, path, weight):
D = 0.0
for u,v in zip(path,path[1:]):
D += self.diGraph[u][v][weight]
return D
def adjustCoverages(unitigGraph):
adjLengths = {}
covMapAdj = {}
readLength = 150.
#readLength = 1
kFactor = readLength/(readLength - unitigGraph.overlapLength + 1.)
for unitig in unitigGraph.unitigs:
adjLengths[unitig] = unitigGraph.lengths[unitig] - 2.0*unitigGraph.overlapLength + readLength
#adjLengths[unitig] = 1.
covMapAdj[unitig] = unitigGraph.covMap[unitig] * float(adjLengths[unitig])*(kFactor/readLength)
V = len(unitigGraph.unitigs)
S = unitigGraph.covMap[unitigGraph.unitigs[0]].shape[0]
xValsU = {}
X = np.zeros((V,S))
lengths = np.zeros(V)
M = np.ones((V,S))
v = 0
mapUnitigs = {}
with open('coverage.csv','w') as f:
for unitig in unitigGraph.unitigs:
readSum = np.sum(covMapAdj[unitig])
mapUnitigs[unitig] = v
xValsU[unitig] = readSum
covSum = np.sum(unitigGraph.covMap[unitig])*kFactor
X[v,:] = unitigGraph.covMap[unitig] * float(adjLengths[unitig])*(kFactor/readLength)
lengths[v] = float(adjLengths[unitig])
f.write(unitig + ',' + str(unitigGraph.lengths[unitig]) +',' + str(covSum) + ',' + str(readSum) + '\n')
v+=1
return (V,S,lengths, mapUnitigs, X)
def readCogStopsDead(cog_graph,kmer_length,cov_file):
deadEndFile = cog_graph[:-3] + "deadends"
stopFile = cog_graph[:-3] + "stops"
try:
unitigGraph = UnitigGraph.loadGraphFromGfaFile(cog_graph,int(kmer_length), cov_file, tsvFile=True, bRemoveSelfLinks = True)
except IOError:
print('Trouble using file {}'.format(cog_graph))
sys.exit()
deadEnds = []
try:
with open(deadEndFile) as f:
for line in f:
line.strip()
deadEnds.append(line)
except IOError:
print('Trouble using file {}'.format(deadEndFile))
sys.exit()
stops = []
try:
with open(stopFile) as f:
for line in f:
line = line.strip()
toks = line.split("\t")
dirn = True
if toks[1] == '-':
dirn = False
stops.append((toks[0],dirn))
except IOError:
print('Trouble using file {}'.format(stopFile))
sys.exit()
return (unitigGraph, stops, deadEnds )
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("cog_graph", help="gfa file")
parser.add_argument("kmer_length", help="kmer length assumed overlap")
parser.add_argument("cov_file", help="tsv file")
parser.add_argument('-s', '--random_seed', default=23724839, type=int,
help="specifies seed for numpy random number generator defaults to 23724839 applied after random filtering")
args = parser.parse_args()
import ipdb; ipdb.set_trace()
np.random.seed(seed=12637)
(unitigGraph, stops, deadEnds ) = readCogStopsDead(args.cog_graph,args.kmer_length,args.cov_file)
(source_list, sink_list) = unitigGraph.selectSourceSinksStops(stops, deadEnds, 3000)
source_names = [convertNodeToName(source) for source in source_list]
sink_names = [convertNodeToName(sink) for sink in sink_list]
unitigGraph.setDirectedBiGraphSource(source_names,sink_names)
prng = RandomState(args.random_seed) #create prng from seed
(V, S, lengths, mapUnitigs, X) = adjustCoverages(unitigGraph)
mapGeneIdx = {}
mapGeneIdx['gene'] = mapUnitigs
genes = ['gene']
residualBiGraphs = {}
residualBiGraphs['gene'] = ResidualBiGraph.createFromUnitigGraph(unitigGraph)
M = np.ones((V))
indices = np.random.choice(np.arange(V), replace=False,
size=int(V * 0.1))
M[indices] = 0
XT = np.sum(X,axis=1)
flowGraph = FlowGraphML(residualBiGraphs, genes, prng, XT, lengths, mapGeneIdx, M, True, 1.0)
flowGraph.bLasso = True
flowGraph.fLambda = 1.0e3
flowGraph.optimiseFlows(50,bKLDivergence = False)
eLambda = (flowGraph.phi + flowGraph.DELTA) * flowGraph.lengths
for v in range(flowGraph.V):
print(str(v) + ',' + str(flowGraph.X[v]) + ',' + str(flowGraph.phi[v]) + ',' + str(eLambda[v]))
paths = flowGraph.decomposeFlows()
print('Debug')
if __name__ == "__main__":
main(sys.argv[1:])
| 2.265625 | 2 |
article/figure-4b/lineplot-4b.py | guilherme-araujo/gsop-dist | 0 | 12767894 | <reponame>guilherme-araujo/gsop-dist<filename>article/figure-4b/lineplot-4b.py
import seaborn as sns
import sys
import csv
from statistics import stdev
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import gc
files = [
{'file': 'a1g9', 'bonus': '0.01'},
{'file': 'a3g7', 'bonus': '0.03'},
{'file': 'a5g5', 'bonus': '0.05'},
{'file': 'a7g3', 'bonus': '0.07'},
{'file': 'a9g1', 'bonus': '0.09'}
]
al = []
for f in files:
with open(f['file']+'/'+f['file']+'.txt') as csv_file_r:
print(f['file'])
csv_reader = csv.reader(csv_file_r, delimiter=';')
e00 = []
for row in csv_reader:
if(row[0]!='partial'):
qta = int(row[0])
qtb = int(row[1])
result = 'Undef.'
if qta == 500:
result = 'A'
elif qta == 0:
result = 'B'
e00.append([qta,qtb,result,f['bonus']])
al += e00
all = pd.DataFrame(al, columns=['qta', 'qtb', 'type', 'bonus'])
print(all)
resumo = all.groupby(["bonus", "type"])["qta"].count().unstack(fill_value=0).stack().reset_index(name="sum")
fig_dims = (6, 4)
fig, ax = plt.subplots(figsize=fig_dims)
print(resumo)
fig = sns.lineplot(data=resumo, x="bonus", y="sum", hue="type")
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
ax.set(xlabel="alpha A", ylabel="Fixation %" )
ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=5000000))
#ax.xaxis.set_major_formatter(mtick.ScalarFormatter())
#ax.set_xticks(resumo['bonus'].unique())
#plt.setp(ax.get_xticklabels(), rotation=90, horizontalalignment='center')
plt.ylim(1200000,2000000)
plt.tight_layout()
plt.show()
plt.savefig("lineplot-4b.svg")
plt.savefig("lineplot-4b.png", dpi=200)
| 2.140625 | 2 |
plot_experiments_nodes.py | proroka/redundant_assignment | 0 | 12767895 | <reponame>proroka/redundant_assignment
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import matplotlib.pylab as plt
import msgpack
import msgpack_numpy
import numpy as np
import scipy
import scipy.stats as st
from six.moves import input
import launch_experiments_nodes as launch_experiments
Data = collections.namedtuple('Data', ['costs', 'correlations'])
_LOWER_BOUND = 'lower_bound'
_UPPER_BOUND = 'hungarian'
_IGNORE = set(['no_correlation_greedy'])
_LINESTYLE = {
'hungarian': '--',
}
_DEFAULT_LINESTYLE = '-'
_COLORS = {
'lower_bound': 'black',
'greedy': 'red',
'hungarian': 'black',
'repeated_hungarian': 'orange',
'random': 'blue',
'closest': 'magenta',
}
_ORDER_AS = [
'hungarian',
'random',
'repeated_hungarian',
'greedy',
'lower_bound',
'closest',
]
def read_results(filename):
with open(filename, 'rb') as fp:
return msgpack.unpackb(fp.read(), raw=False, use_list=False)
def errors(a, use_ci=True):
if use_ci:
u, v = st.t.interval(0.95, len(a)-1, loc=np.mean(a), scale=st.sem(a))
return u.item(), v.item()
m = np.mean(a)
s = np.std(a)
return m - s, m + s
def make_nice(bp):
for box in bp['boxes']:
box.set(color='k', linewidth=2)
box.set(facecolor='steelblue')
for whisker in bp['whiskers']:
whisker.set(color='k', linewidth=2)
for cap in bp['caps']:
cap.set(color='k', linewidth=2)
for median in bp['medians']:
median.set(color='k', linewidth=2)
for flier in bp['fliers']:
flier.set(marker='o', color='k', alpha=0.5)
def run(filename):
original_data = read_results(filename)
argument_class = None
# Reconverts keys to arguments and make numpy arrays.
data = {}
for k, v in original_data.items():
if len(k) == 2 and argument_class is None:
argument_class = launch_experiments.Arguments
if argument_class is None:
raise ValueError('Unsupported data format.')
algorithm_data = {}
for algorithm, (costs, correlations) in v.items():
algorithm_data[algorithm] = Data(np.array(costs, np.float32),
np.array(correlations, np.float32))
data[argument_class(*k)] = algorithm_data
# Get baseline values.
defaults = argument_class()
# Values for the x-axis.
x_axes = collections.defaultdict(set)
# Gather possible plots.
for k in data:
for field in argument_class._fields:
d = getattr(defaults, field)
v = getattr(k, field)
if d != v:
# Verify that all other fields are the same.
for other_field in argument_class._fields:
if other_field == field:
continue
if getattr(defaults, other_field) != getattr(k, other_field):
raise NotImplementedError('Plotting multiple dimensional plots is not supported.')
# All good.
x_axes[field].add(v)
if field != 'deployment_size':
x_axes[field].add(d)
for x_axis_label, x_axis_values in x_axes.items():
# Sorted x values.
x_values = sorted(x_axis_values)
# Get y values.
y_cost_values = collections.defaultdict(list)
y_cost_lowers = collections.defaultdict(list)
y_cost_uppers = collections.defaultdict(list)
for x_axis_value in x_values:
k = argument_class(**{x_axis_label: x_axis_value})
u = data[k][_UPPER_BOUND].costs
for algorithm, values in data[k].items():
print(algorithm)
y = values.costs / (u + 1e-10)
if algorithm == _LOWER_BOUND and y_cost_values[algorithm] and x_axis_label == 'deployment_size':
y_cost_values[algorithm].append(y_cost_values[algorithm][-1])
y_cost_uppers[algorithm].append(y_cost_uppers[algorithm][-1])
y_cost_lowers[algorithm].append(y_cost_lowers[algorithm][-1])
else:
y_cost_values[algorithm].append(np.mean(y))
lower, upper = errors(y)
y_cost_uppers[algorithm].append(upper)
y_cost_lowers[algorithm].append(lower)
plt.figure()
for algorithm in _ORDER_AS:
if algorithm in _IGNORE:
continue
values = y_cost_values[algorithm]
v = np.array(values, np.float32)
u = np.array(y_cost_uppers[algorithm], np.float32)
l = np.array(y_cost_lowers[algorithm], np.float32)
ls = _LINESTYLE[algorithm] if algorithm in _LINESTYLE else _DEFAULT_LINESTYLE
plt.plot(x_values, v, linestyle=ls, color=_COLORS[algorithm], lw=2, label=algorithm, marker='o', ms=8)
plt.fill_between(x_values, l, u, facecolor=_COLORS[algorithm], alpha=.5)
plt.xlabel(x_axis_label)
plt.ylabel('cost')
plt.legend()
plt.show(block=False)
input('Hit ENTER to close figure')
plt.close()
if __name__ == '__main__':
msgpack_numpy.patch() # Magic.
parser = argparse.ArgumentParser(description='Plots experimental results')
parser.add_argument('--input_results', action='store', required=True, help='Where the results are stored.')
args = parser.parse_args()
run(args.input_results)
| 2.078125 | 2 |
app/views.py | configuresystems/restful-api-with-flask | 2 | 12767896 | <reponame>configuresystems/restful-api-with-flask<gh_stars>1-10
"""So that we can modularize our application, we will use this as our
our master file for application endpoints"""
from .modules.todo import views
| 1.257813 | 1 |
src/abaqus/OdbDisplay/ViewCut.py | Haiiliin/PyAbaqus | 7 | 12767897 | import typing
from abaqusConstants import *
class ViewCut:
"""The ViewCut object is used to store values and attributes associate with ViewCut type
objects. ViewCut objects can be created using the methods described below. The methods
accessed via the OdbDisplay object cause the ViewCut object to be added to the
session.viewports[name].odbDisplay.viewCuts repository.
Attributes
----------
angle: float
A Float specifying the rotation angle of the cut defined with a **shape** set to PLANE.
motion: SymbolicConstant
A SymbolicConstant specifying the type of motion for the cut defined with a **shape** set
to PLANE. Possible values are TRANSLATE and ROTATE. The default value is TRANSLATE.
position: float
A Float specifying the position of the cut defined with a **shape** set to PLANE.
radius: float
A Float specifying the radius of the cut defined with a **shape** set to CYLINDER or
SPHERE.
rotationAxis: SymbolicConstant
A SymbolicConstant specifying the rotation axis for the cut defined with a **shape** set
to PLANE. Possible values are AXIS_1, AXIS_2, and AXIS_3. The default value is AXIS_2.
value: float
A Float specifying the value of the cut defined with a **shape** set to ISOSURFACE.
showModelAboveCut: Boolean
A Boolean specifying whether to display the model above the cut. The default value is
OFF.
showModelOnCut: Boolean
A Boolean specifying whether to display the model on the cut. The default value is ON.
showModelBelowCut: Boolean
A Boolean specifying whether to display the model below the cut. The default value is
ON.
showFreeBodyCut: Boolean
A Boolean specifying whether to display the free body cut. The default value is OFF.
active: Boolean
A Boolean specifying whether the cut is displayed.
cutRange: tuple[float]
A pair of Floats specifying the acceptable range for positioning the cut.
crossSectionalArea: float
A Float returning the cross-sectional area of the cut when **showFreeBodyCut** is set to
ON.
Notes
-----
This object can be accessed by:
.. code-block:: python
import visualization
session.viewports[name].layers[name].odbDisplay.viewCuts[name]
session.viewports[name].odbDisplay.viewCuts[name]
"""
# A Float specifying the rotation angle of the cut defined with a *shape* set to PLANE.
angle: float = None
# A SymbolicConstant specifying the type of motion for the cut defined with a *shape* set
# to PLANE. Possible values are TRANSLATE and ROTATE. The default value is TRANSLATE.
motion: SymbolicConstant = TRANSLATE
# A Float specifying the position of the cut defined with a *shape* set to PLANE.
position: float = None
# A Float specifying the radius of the cut defined with a *shape* set to CYLINDER or
# SPHERE.
radius: float = None
# A SymbolicConstant specifying the rotation axis for the cut defined with a *shape* set
# to PLANE. Possible values are AXIS_1, AXIS_2, and AXIS_3. The default value is AXIS_2.
rotationAxis: SymbolicConstant = AXIS_2
# A Float specifying the value of the cut defined with a *shape* set to ISOSURFACE.
value: float = None
# A Boolean specifying whether to display the model above the cut. The default value is
# OFF.
showModelAboveCut: Boolean = OFF
# A Boolean specifying whether to display the model on the cut. The default value is ON.
showModelOnCut: Boolean = ON
# A Boolean specifying whether to display the model below the cut. The default value is
# ON.
showModelBelowCut: Boolean = ON
# A Boolean specifying whether to display the free body cut. The default value is OFF.
showFreeBodyCut: Boolean = OFF
# A Boolean specifying whether the cut is displayed.
active: Boolean = OFF
# A pair of Floats specifying the acceptable range for positioning the cut.
cutRange: tuple[float] = ()
# A Float returning the cross-sectional area of the cut when *showFreeBodyCut* is set to
# ON.
crossSectionalArea: float = None
def __init__(self, name: str, shape: SymbolicConstant, origin: tuple,
normal: typing.Union[SymbolicConstant, float],
axis2: typing.Union[SymbolicConstant, float], csysName: str,
cylinderAxis: typing.Union[SymbolicConstant, float], followDeformation: Boolean = OFF,
overrideAveraging: Boolean = ON, referenceFrame: SymbolicConstant = FIRST_FRAME):
"""This method creates a ViewCut object.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.viewports[name].layers[name].odbDisplay.ViewCut
session.viewports[name].odbDisplay.ViewCut
Parameters
----------
name
A String specifying the repository key.
shape
A SymbolicConstant specifying the shape of the ViewCut object. Possible values are
PLANE, CYLINDER, SPHERE, and ISOSURFACE.
origin
A sequence of three Floats specifying the X-, Y-, and Z-coordinates of the origin of the
plane, cylinder or sphere cut. This origin is not required if the cut shape is
ISOSURFACE or if the cut is defined by the *csysName* argument.
normal
A sequence of Floats specifying the X-, Y-, and Z-coordinates of the normal axis to the
plane defining the cut, when the plane is defined using the *origin* argument or a
SymbolicConstant defining this normal axis, when the cut is defined by the *csysName*
argument. Possible values are AXIS_1, AXIS_2, AXIS_3. This axis is not required if the
cut *shape* is CYLINDER, SPHERE or ISOSURFACE.
axis2
A sequence of three Floats specifying the X-, Y-, and Z-coordinates of the second axis
of the plane defining the cut, when the plane is defined using the *origin* argument or
a SymbolicConstant defining this second axis, when the cut is defined by the *csysName*
argument. Possible values are AXIS_1, AXIS_2, AXIS_3. This axis is used to rotate the
plane cut. It is not required if the cut *shape* is CYLINDER, SPHERE or ISOSURFACE.
csysName
A String specifying the name of the DatumCsys object to be used to define the cut. This
name is not required if the cut *shape* is ISOSURFACE or if the cut is defined by the
*origin* argument.
cylinderAxis
A sequence of Floats specifying the X-, Y-, and Z-coordinates of the cylinder axis
defining the cut, when the cut is defined using the *origin* argument or a
SymbolicConstant defining this cylinder axis, when the cut is defined by the *csysName*
argument. Possible values are AXIS_1, AXIS_2, AXIS_3. This axis is not required if the
cut *shape* is PLANE, SPHERE, or ISOSURFACE.
followDeformation
A Boolean specifying whether the cut will follow the deformation or be static. The
default value is OFF.
overrideAveraging
A Boolean specifying averaging for element based fields associated with isosurface cuts
will be set to compute-average with a threshold of 100% when true. The current field
options will be used when false. The default value is ON.
referenceFrame
A SymbolicConstant specifying which reference frame will be used when the cut follows
the deformation. Possible values are FIRST_FRAME, LAST_FRAME, and CURRENT_FRAME. The
default value is FIRST_FRAME.
Returns
-------
A ViewCut object.
"""
pass
def setValues(self, angle: float = None, motion: SymbolicConstant = TRANSLATE, position: float = None,
radius: float = None, rotationAxis: SymbolicConstant = AXIS_2, value: float = None,
showModelAboveCut: Boolean = OFF, showModelOnCut: Boolean = ON,
showModelBelowCut: Boolean = ON, showFreeBodyCut: Boolean = OFF, csysName: str = '',
origin: tuple = (),
normal: typing.Union[SymbolicConstant, float] = AXIS_1,
axis2: typing.Union[SymbolicConstant, float] = AXIS_2,
cylinderAxis: typing.Union[SymbolicConstant, float] = AXIS_3,
followDeformation: Boolean = OFF, overrideAveraging: Boolean = ON,
referenceFrame: SymbolicConstant = FIRST_FRAME):
"""This method modifies the ViewCut object.
Parameters
----------
angle
A Float specifying the rotation angle of the cut defined with a *shape* set to PLANE.
motion
A SymbolicConstant specifying the type of motion for the cut defined with a *shape* set
to PLANE. Possible values are TRANSLATE and ROTATE. The default value is TRANSLATE.
position
A Float specifying the position of the cut defined with a *shape* set to PLANE.
radius
A Float specifying the radius of the cut defined with a *shape* set to CYLINDER or
SPHERE.
rotationAxis
A SymbolicConstant specifying the rotation axis for the cut defined with a *shape* set
to PLANE. Possible values are AXIS_1, AXIS_2, and AXIS_3. The default value is AXIS_2.
value
A Float specifying the value of the cut defined with a *shape* set to ISOSURFACE.
showModelAboveCut
A Boolean specifying whether to display the model above the cut. The default value is
OFF.
showModelOnCut
A Boolean specifying whether to display the model on the cut. The default value is ON.
showModelBelowCut
A Boolean specifying whether to display the model below the cut. The default value is
ON.
showFreeBodyCut
A Boolean specifying whether to display the free body cut. The default value is OFF.
csysName
A String specifying the name of the DatumCsys object to be used to define the cut. This
name is not required if the cut *shape* is ISOSURFACE or if the cut is defined by the
*origin* argument.
origin
A sequence of three Floats specifying the X-, Y-, and Z-coordinates of the origin of the
plane, cylinder or sphere cut. This origin is not required if the cut shape is
ISOSURFACE or if the cut is defined by the *csysName* argument.
normal
A sequence of Floats specifying the X-, Y-, and Z-coordinates of the normal axis to the
plane defining the cut, when the plane is defined using the *origin* argument or a
SymbolicConstant defining this normal axis, when the cut is defined by the *csysName*
argument. Possible values are AXIS_1, AXIS_2, AXIS_3. This axis is not required if the
cut *shape* is CYLINDER, SPHERE or ISOSURFACE.
axis2
A sequence of three Floats specifying the X-, Y-, and Z-coordinates of the second axis
of the plane defining the cut, when the plane is defined using the *origin* argument or
a SymbolicConstant defining this second axis, when the cut is defined by the *csysName*
argument. Possible values are AXIS_1, AXIS_2, AXIS_3. This axis is used to rotate the
plane cut. It is not required if the cut *shape* is CYLINDER, SPHERE or ISOSURFACE.
cylinderAxis
A sequence of Floats specifying the X-, Y-, and Z-coordinates of the cylinder axis
defining the cut, when the cut is defined using the *origin* argument or a
SymbolicConstant defining this cylinder axis, when the cut is defined by the *csysName*
argument. Possible values are AXIS_1, AXIS_2, AXIS_3. This axis is not required if the
cut *shape* is PLANE, SPHERE, or ISOSURFACE.
followDeformation
A Boolean specifying whether the cut will follow the deformation or be static. The
default value is OFF.
overrideAveraging
A Boolean specifying averaging for element based fields associated with isosurface cuts
will be set to compute-average with a threshold of 100% when true. The current field
options will be used when false. The default value is ON.
referenceFrame
A SymbolicConstant specifying which reference frame will be used when the cut follows
the deformation. Possible values are FIRST_FRAME, LAST_FRAME, and CURRENT_FRAME. The
default value is FIRST_FRAME.
"""
pass
def updateVariable(self):
"""This method updates the field associated with an isosurface cut to be consistent with
the current primary variable.
"""
pass
| 2.8125 | 3 |
memos/memos/models/MemoActivity.py | iotexpert/docmgr | 0 | 12767898 | import enum
from flask import current_app
class MemoActivity(enum.Enum):
Create = 1 # Memo has been created and put into draft
Signoff = 2 # Memo has been submitted to signoff
Sign = 3 # A signature has been added
Unsign = 4 # A signature has been removed
Activate = 5 # Memo moved from signoff to active
Obsolete = 6 # Memo has been obsoleted
Cancel = 7 # Memo has been canceled and deleted from the system
Reject = 8 # Memo has been rejected and put back into draft
IllegalFile = 9 # User tried to access a file that was not authorized
@staticmethod
def convert(value):
if value == MemoActivity.Create:
return "Create"
if value == MemoActivity.Signoff:
return "Signoff"
if value == MemoActivity.Sign:
return "Sign"
if value == MemoActivity.Unsign:
return "Unsign"
if value == MemoActivity.Activate:
return "Activate"
if value == MemoActivity.Obsolete:
return "Obsolete"
if value == MemoActivity.Cancel:
return "Cancel"
if value == MemoActivity.Reject:
return "Reject"
if value == MemoActivity.IllegalFile:
return "IllegalFile"
return "Unknown"
"""
def __str__(self):
current_app.logger.info("checking string value")
if self.value == 1:
return "Create"
if self.value == 2:
return "Signoff"
if self.value == 3:
return "Sign"
if self.value == 4:
return "Unsign"
if self.value == 5:
return "Activate"
if self.value == 6:
return "Obsolete"
if self.value == 7:
return "Cancel"
if self.value == 8:
return "Reject"
""" | 3.03125 | 3 |
lib/blackboxprotobuf/lib/protofile.py | nccgroup/blackboxprotobuf | 261 | 12767899 | """
Python methods for importing and exporting '.proto' files from the BBP type
definition format.
"""
# TODO get custom exceptions for these methods
import io
import re
import logging
from blackboxprotobuf.lib.exceptions import TypedefException
import blackboxprotobuf.lib.api
PROTO_FILE_TYPE_MAP = {
"uint": "uint64",
"int": "int64",
"sint": "sint64",
"fixed32": "fixed32",
"sfixed32": "sfixed32",
"float": "float",
"fixed64": "fixed64",
"sfixed64": "sfixed64",
"double": "double",
"bytes": "bytes",
"bytes_hex": "bytes",
"string": "string",
}
PACKABLE_TYPES = [
"uint",
"int",
"sint",
"fixed32",
"sfixed32",
"float",
"fixed64",
"sfixed64",
"double",
]
# Inverse of the above, but we have to include more types
PROTO_FILE_TYPE_TO_BBP = {
"double": "double",
"float": "float",
"int32": "int",
"int64": "int",
"uint32": "uint",
"uint64": "uint",
"sint32": "sint",
"sint64": "sint",
"fixed32": "fixed32",
"fixed64": "fixed64",
"sfixed32": "sfixed32",
"sfixed64": "sfixed64",
"bool": "uint",
"string": "string",
# should be default_binary_type, but can't handle that well here
"bytes": "bytes",
}
NAME_REGEX = re.compile(r"\A[a-zA-Z_][a-zA-Z0-9_]*\Z")
# add packed types to the list
for packable_type in PACKABLE_TYPES:
packed_type = "packed_" + packable_type
PROTO_FILE_TYPE_MAP[packed_type] = PROTO_FILE_TYPE_MAP[packable_type]
def _print_message(message_name, typedef, output_file, depth=0):
indent = u" " * depth
if not NAME_REGEX.match(message_name):
raise TypedefException("Message name: %s is not valid" % message_name)
# sort typedef for better looking output
typedef = blackboxprotobuf.lib.api.sort_typedef(typedef)
message_name = message_name.strip()
output_file.write(u"\n")
output_file.write(indent)
output_file.write(u"message %s {\n" % message_name)
for field_number, field_typedef in typedef.items():
# TODO Default to all fields as repeated? or optional
proto_type = None
field_name = None
field_options = ""
# a repeated field with one element is indistinduishable from a
# repeated field so we just put repeated if we have proof that it is
# repeatable, but this might be wrong sometimes
# maybe some sort of protobuf discovery tool can detect this
is_repeated = field_typedef.get("seen_repeated", False)
if "name" in field_typedef and field_typedef["name"] != "":
field_name = field_typedef["name"]
field_name = field_name.strip()
if not NAME_REGEX.match(field_name):
field_name = None
if field_name is None:
field_name = u"field%s" % field_number
if field_typedef["type"] == "message":
# If we have multiple typedefs, this means is something like the Any
# message, and has to be manually reparsed to each type
if "alt_typedefs" in field_typedef:
proto_type = "bytes"
else:
proto_type = field_name + "_type"
_print_message(
proto_type, field_typedef["message_typedef"], output_file, depth + 1
)
else:
if field_typedef["type"] not in PROTO_FILE_TYPE_MAP:
raise TypedefException(
"Type %s does not have a mapping to protobuf types."
% field_typedef["type"]
)
proto_type = PROTO_FILE_TYPE_MAP[field_typedef["type"]]
# we're using proto3 syntax. Repeated numeric fields are packed by default
# if it's repeated and not packed, then make sure we specify it's not packed
if is_repeated and field_typedef["type"] in PACKABLE_TYPES:
field_options = u" [packed=false]"
# if it's a packed type, we'll explicitoly set that too, can't hurt
elif field_typedef["type"].startswith("packed_"):
field_options = u" [packed=true]"
is_repeated = True
output_file.write(indent)
output_file.write(
u" %s%s %s = %s%s;\n"
% (
"repeated " if is_repeated else "",
proto_type,
field_name,
field_number,
field_options,
)
)
output_file.write(indent)
output_file.write(u"}\n\n")
def export_proto(typedef_map, output_filename=None, output_file=None, package=None):
"""Export the given type definitons as a '.proto' file. Typedefs are
expected as a dictionary of {'message_name': typedef }
Write to output_file or output_filename if provided, otherwise return a string
output_filename will be overwritten if it exists
"""
return_string = False
if output_filename is not None:
output_file = io.open(output_filename, "w+")
if output_file is None:
return_string = True
output_file = io.StringIO()
# preamble
output_file.write(u'syntax = "proto3";\n\n')
if package:
output_file.write(u"package %s;\n\n" % package)
for typedef_name, typedef in typedef_map.items():
_print_message(typedef_name, typedef, output_file)
if return_string:
return output_file.getvalue()
# close the file if we opened it
elif output_filename is not None:
output_file.close()
return None
MESSAGE_START_REGEX = re.compile(r"^message +([a-zA-Z_0-9]+) *{.*")
FIELD_REGEX = re.compile(
r"^ *(repeated|optional|required)? *([a-zA-Z0-9_]+) +([a-zA-Z0-9_]+) += +([0-9]+) *(\[[a-z]+=[a-z]*\])?.*;.*$"
)
SYNTAX_REGEX = re.compile(r'^ *syntax += +"(proto\d)" *;.*')
ENUM_REGEX = re.compile(r"^ *enum +([a-zA-Z0-9_]+) *{.*")
PACKAGE_REGEX = re.compile(r"^ *package +([a-zA-Z0-9_.]+) *;.*")
def import_proto(config, input_string=None, input_filename=None, input_file=None):
typedef_map = {}
if input_string is not None:
input_file = io.StringIO(input_string)
if input_file is None and input_filename is not None:
input_file = io.open(input_filename, "r")
if input_file is None:
raise ValueError("No file provided to import_proto")
syntax_version = "proto2"
package_prefix = ""
enum_names = []
message_trees = []
message_names = []
line = input_file.readline()
while line:
line = line.strip()
if line.startswith("syntax") and SYNTAX_REGEX.match(line):
syntax_version = SYNTAX_REGEX.match(line).group(1)
elif line.startswith("package") and PACKAGE_REGEX.match(line):
package_prefix = PACKAGE_REGEX.match(line).group(1) + "."
elif line.startswith("import"):
logging.warn(
"Proto file has import which is not supported "
"by the parser. Ensure the imported files are "
"processed first: %s",
line,
)
elif line.startswith("enum") and ENUM_REGEX.match(line):
enum_name = _parse_enum(line, input_file)
enum_names.append(enum_name)
elif line.startswith("message") and MESSAGE_START_REGEX.match(line):
message_tree = _preparse_message(line, input_file)
message_trees.append(message_tree)
line = input_file.readline()
# TODO parse the message data
for tree in message_trees:
new_message_names, new_enum_names = _collect_names(package_prefix, tree)
enum_names += new_enum_names
message_names += new_message_names
logging.debug("Got the following enum_names: %s", enum_names)
logging.debug("Got the following message_names: %s", message_names)
for tree in message_trees:
_parse_message(
tree,
typedef_map,
message_names,
enum_names,
package_prefix,
syntax_version == "proto3",
config,
)
return typedef_map
def _parse_enum(line, input_file):
"""Parse an enum out of the file. Goes from enum declaration to next }
Returns the enum's name
"""
enum_name = ENUM_REGEX.match(line).group(1)
# parse until the next '}'
while "}" not in line:
line = input_file.readline()
if not line:
raise ValueError("Did not find close of enum")
return enum_name
def _preparse_message(line, input_file):
"""Parse out a message name and the lines that make it up"""
message_name = MESSAGE_START_REGEX.match(line).group(1)
message_lines = []
inner_enums = []
inner_messages = []
while "}" not in line:
line = input_file.readline()
if not line:
raise ValueError("Did not find close of message")
line = line.strip()
if line.startswith("enum") and ENUM_REGEX.match(line):
enum_name = _parse_enum(line, input_file)
inner_enums.append(enum_name)
elif line.startswith("message") and MESSAGE_START_REGEX.match(line):
message_tree = _preparse_message(line, input_file)
inner_messages.append(message_tree)
# not an inner enum or message
else:
message_lines.append(line)
return {
"name": message_name,
"data": message_lines,
"enums": inner_enums,
"inner_messages": inner_messages,
}
def _collect_names(prefix, message_tree):
message_names = []
enum_names = []
name = prefix + message_tree["name"]
message_names.append(name)
for enum_name in message_tree["enums"]:
enum_names.append(prefix + enum_name)
for inner_message in message_tree["inner_messages"]:
new_message_names, new_enum_names = _collect_names(name + ".", inner_message)
message_names += new_message_names
enum_names += new_enum_names
return message_names, enum_names
def _check_message_name(current_path, name, known_message_names, config):
# Verify message name against preparsed message names and global
# known_messages
# For example, if we have:
# Message.InnerMesage
# referenced from:
# PackageA.Message2
# we would look up:
# PackageA.Message2.Message.InnerMessage
# PackageA.Message.InnerMessage
# should also work for enums
if name in config.known_types:
return True
# search for anything under a common prefix in known_message_names
logging.debug("Testing message name: %s", name)
prefix_options = [""]
for part in current_path.split("."):
if part:
prefix_options = [prefix_options[0] + part + "."] + prefix_options
logging.debug("prefix_options: %s", prefix_options)
for prefix in prefix_options:
logging.debug("Testing message name: %s", prefix + name)
if prefix + name in known_message_names:
return prefix + name
# remove the last bit of the prefix
if "." not in prefix:
break
prefix = ".".join(prefix.split(".")[:-1])
logging.debug(
"Message %s not found from %s Known names are: %s",
name,
current_path,
known_message_names,
)
return None
def _parse_message(
message_tree, typdef_map, known_message_names, enum_names, prefix, is_proto3, config
):
message_typedef = {}
message_name = prefix + message_tree["name"]
prefix = message_name + "."
# parse the actual message fields
for line in message_tree["data"]:
# lines should already be stripped and should not have messages or enums
# logging.debug("Line before assert: %s", line)
assert all([not line.strip().startswith(x) for x in ["message ", "enum "]])
# Check if the line matches the field regex
match = FIELD_REGEX.match(line)
if match:
field_number, field_typedef = _parse_field(
match, known_message_names, enum_names, prefix, is_proto3, config
)
message_typedef[field_number] = field_typedef
# add the messsage to tyep returned typedefs
logging.debug("Adding message %s to typedef maps", message_name)
typdef_map[message_name] = message_typedef
for inner_message in message_tree["inner_messages"]:
# TODO prefix should be added to?
_parse_message(
inner_message,
typdef_map,
known_message_names,
enum_names,
prefix,
is_proto3,
config,
)
# parse a field into a dictionary for the typedef
def _parse_field(match, known_message_names, enum_names, prefix, is_proto3, config):
typedef = {}
field_name = match.group(3)
if not field_name:
raise ValueError("Could not parse field name from line: %s" % match)
typedef["name"] = field_name
field_number = match.group(4)
if not field_number:
raise ValueError("Could not parse field number from line: %s" % match)
# figure out repeated
field_rule = match.group(1)
is_repeated = False
if field_rule and "repeated" in field_rule:
is_repeated = True
typedef["seen_repeated"] = True
field_type = match.group(2)
if not field_type:
raise ValueError("Could not parse field type from line: %s" % match)
# check normal types
bbp_type = PROTO_FILE_TYPE_TO_BBP.get(field_type, None)
if not bbp_type:
logging.debug("Got non-basic type: %s, checking enums", field_type)
# check enum names
if _check_message_name(prefix, field_type, enum_names, config):
# enum = uint
bbp_type = "uint"
if not bbp_type:
# Not enum or normal type, check messages
message_name = _check_message_name(
prefix, field_type, known_message_names, config
)
if message_name:
bbp_type = "message"
typedef["message_type_name"] = message_name
if not bbp_type:
# If we don't have a type now, then fail
raise ValueError(
"Could not get a type for field %s: %s" % (field_name, field_type)
)
# figure out packed
# default based on repeated + proto3, fallback to options
field_options = match.group(5)
is_packed = is_repeated and is_proto3 and (field_type in PACKABLE_TYPES)
if is_packed and field_options and "packed=false" in field_options:
is_packed = False
elif is_repeated and field_options and "packed=true" in field_options:
is_packed = True
# make sure the type lines up with packable
if is_packed and bbp_type not in PACKABLE_TYPES:
raise ValueError(
"Field %s set as packable, but not a packable type: %s"
% (field_name, bbp_type)
)
if is_packed:
bbp_type = "packed_" + bbp_type
typedef["type"] = bbp_type
logging.debug("Parsed field number %s: %s", field_number, typedef)
return field_number, typedef
| 2.46875 | 2 |
cfgov/v1/tests/test_documents.py | adebisi-aden/consumerfinance.gov | 37 | 12767900 | import json
from datetime import datetime
from io import StringIO
from django.test import TestCase, override_settings
from wagtail.core.models import Site
import dateutil.relativedelta
from dateutil.relativedelta import relativedelta
from pytz import timezone
from search.elasticsearch_helpers import ElasticsearchTestsMixin
from v1.documents import (
EnforcementActionFilterablePagesDocumentSearch,
EventFilterablePagesDocumentSearch, FilterablePagesDocument,
FilterablePagesDocumentSearch
)
from v1.models.base import CFGOVPageCategory
from v1.models.blog_page import BlogPage
from v1.models.enforcement_action_page import (
EnforcementActionPage, EnforcementActionProduct, EnforcementActionStatus
)
from v1.models.learn_page import AbstractFilterPage, EventPage
from v1.tests.wagtail_pages.helpers import publish_page
class FilterablePagesDocumentTest(TestCase):
def test_model_class_added(self):
self.assertEqual(FilterablePagesDocument.django.model, AbstractFilterPage)
def test_ignore_signal_default(self):
self.assertFalse(FilterablePagesDocument.django.ignore_signals)
def test_auto_refresh_default(self):
self.assertFalse(FilterablePagesDocument.django.auto_refresh)
def test_fields_populated(self):
mapping = FilterablePagesDocument._doc_type.mapping
self.assertCountEqual(
mapping.properties.properties.to_dict().keys(),
[
'tags', 'categories', 'language', 'title', 'url',
'is_archived', 'date_published', 'start_dt', 'end_dt',
'statuses', 'products', 'initial_filing_date', 'model_class',
'content', 'preview_description'
]
)
def test_get_queryset(self):
test_event = EventPage(
title="Testing",
start_dt=datetime.now(timezone('UTC'))
)
qs = FilterablePagesDocument().get_queryset()
self.assertFalse(qs.filter(title=test_event.title).exists())
def test_prepare_statuses(self):
enforcement = EnforcementActionPage(
title="Great Test Page",
preview_description='This is a great test page.',
initial_filing_date=datetime.now(timezone('UTC'))
)
status = EnforcementActionStatus(status='expired-terminated-dismissed')
enforcement.statuses.add(status)
doc = FilterablePagesDocument()
prepared_data = doc.prepare(enforcement)
self.assertEqual(prepared_data['statuses'], ['expired-terminated-dismissed'])
def test_prepare_content_no_content_defined(self):
event = EventPage(
title='Event Test',
start_dt=datetime.now(timezone('UTC'))
)
doc = FilterablePagesDocument()
prepared_data = doc.prepare(event)
self.assertIsNone(prepared_data['content'])
def test_prepare_content_exists(self):
blog = BlogPage(
title='Test Blog',
content=json.dumps([
{
'type': 'full_width_text',
'value': [
{
'type':'content',
'value': 'Blog Text'
}]
}
])
)
doc = FilterablePagesDocument()
prepared_data = doc.prepare(blog)
self.assertEqual(prepared_data['content'], 'Blog Text')
def test_prepare_content_empty(self):
blog = BlogPage(
title='Test Blog',
content=json.dumps([])
)
doc = FilterablePagesDocument()
prepared_data = doc.prepare(blog)
self.assertIsNone(prepared_data['content'])
def test_prepare_products(self):
enforcement = EnforcementActionPage(
title="Great Test Page",
preview_description='This is a great test page.',
initial_filing_date=datetime.now(timezone('UTC'))
)
product = EnforcementActionProduct(product='Fair Lending')
enforcement.products.add(product)
doc = FilterablePagesDocument()
prepared_data = doc.prepare(enforcement)
self.assertEqual(prepared_data['products'], ['Fair Lending'])
class FilterablePagesDocumentSearchTest(ElasticsearchTestsMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.site = Site.objects.get(is_default_site=True)
content = json.dumps([
{
'type': 'full_width_text',
'value': [
{
'type':'content',
'value': 'Foo Test Content'
}]
}
])
event = EventPage(
title='Event Test',
start_dt=datetime(2021, 2, 16, tzinfo=timezone('UTC')),
end_dt=datetime(2021, 2, 16, tzinfo=timezone('UTC'))
)
event.tags.add('test-topic')
event.categories.add(CFGOVPageCategory(name='test-category'))
event.language = 'es'
publish_page(event)
enforcement = EnforcementActionPage(
title="Great Test Page",
preview_description='This is a great test page.',
initial_filing_date=datetime.now(timezone('UTC'))
)
status = EnforcementActionStatus(status='expired-terminated-dismissed')
enforcement.statuses.add(status)
product = EnforcementActionProduct(product='Debt Collection')
enforcement.products.add(product)
publish_page(enforcement)
blog = BlogPage(
title="Blog Page"
)
publish_page(blog)
blog_title_match = BlogPage(
title="Foo Bar"
)
publish_page(blog_title_match)
blog_preview_match = BlogPage(
title="Random Title",
preview_description="Foo blog"
)
publish_page(blog_preview_match)
blog_content_match = BlogPage(
title="Some Title",
content=content
)
publish_page(blog_content_match)
blog_topic_match = BlogPage(
title="Another Blog Post"
)
blog_topic_match.tags.add("Foo Tag")
publish_page(blog_topic_match)
cls.event = event
cls.enforcement = enforcement
cls.blog = blog
cls.blog_title_match = blog_title_match
cls.blog_preview_match = blog_preview_match
cls.blog_content_match = blog_content_match
cls.blog_topic_match = blog_topic_match
cls.rebuild_elasticsearch_index('v1', stdout=StringIO())
def test_search_event_all_fields(self):
to_date_dt = datetime(2021, 3, 16)
to_date = datetime.date(to_date_dt)
from_date_dt = datetime(2021, 1, 16)
from_date = datetime.date(from_date_dt)
search = EventFilterablePagesDocumentSearch(prefix='/')
search.filter(
topics=['test-topic'],
categories=['test-category'],
language=['es'],
to_date=to_date,
from_date=from_date,
archived=['no']
)
results = search.search(title='Event Test')
self.assertTrue(results.filter(title=self.event.title).exists())
def test_search_blog_dates(self):
to_date_dt = datetime.today() + relativedelta(months=1)
to_date = datetime.date(to_date_dt)
from_date_dt = datetime.today() - relativedelta(months=1)
from_date = datetime.date(from_date_dt)
search = FilterablePagesDocumentSearch(prefix='/')
search.filter(
topics=[],
categories=[],
language=[],
to_date=to_date,
from_date=from_date,
archived=None,
)
results = search.search(title=None)
self.assertTrue(results.filter(title=self.blog.title).exists())
def test_search_enforcement_actions(self):
to_date_dt = datetime.today() + relativedelta(months=1)
to_date = datetime.date(to_date_dt)
from_date_dt = datetime.today() - relativedelta(months=1)
from_date = datetime.date(from_date_dt)
search = EnforcementActionFilterablePagesDocumentSearch(prefix='/')
search.filter(
topics=[],
categories=[],
language=[],
to_date=to_date,
from_date=from_date,
statuses=['expired-terminated-dismissed'],
products=['Debt Collection'],
archived=None
)
results = search.search(title=None)
self.assertTrue(results.filter(title=self.enforcement.title).exists())
def test_search_enforcement_actions_no_statuses(self):
to_date_dt = datetime.today() + relativedelta(months=1)
to_date = datetime.date(to_date_dt)
from_date_dt = datetime.today() - relativedelta(months=1)
from_date = datetime.date(from_date_dt)
search = EnforcementActionFilterablePagesDocumentSearch(prefix='/')
search.filter(
topics=[],
categories=[],
language=[],
to_date=to_date,
from_date=from_date,
statuses=[],
products=[],
archived=None
)
results = search.search(title=None)
self.assertTrue(results.filter(title=self.enforcement.title).exists())
def test_search_title_uses_multimatch(self):
search = FilterablePagesDocumentSearch(prefix='/')
search.filter(
topics=[],
categories=[],
language=[],
to_date=None,
from_date=None,
archived=None
)
results = search.search(title="Foo")
self.assertTrue(results.filter(title=self.blog_title_match).exists())
self.assertTrue(results.filter(title=self.blog_content_match.title).exists())
self.assertTrue(results.filter(title=self.blog_preview_match.title).exists())
self.assertTrue(results.filter(title=self.blog_topic_match.title).exists())
| 2 | 2 |
adam/language_specific/english/__init__.py | isi-vista/adam | 8 | 12767901 | from immutablecollections import immutableset
ENGLISH_DETERMINERS = immutableset(["the", "a"])
DETERMINERS = immutableset(
[
"the",
"a",
"yi1_ge4",
"yi1_jang1",
"yi1_ben3",
"yi1_jyan1",
"yi1_lyang4",
"yi1_bei1",
"yi1_ba3",
"yi1_jr1",
"yi1_shan4",
"yi1_ding3",
"yi1_kwai4",
"yi1_tiao2",
"yi1_zhi1",
]
)
"""
These are determiners we automatically add to the beginning of non-proper English noun phrases.
This is a language-specific hack since learning determiners is out of our scope:
https://github.com/isi-vista/adam/issues/498
"""
ENGLISH_BLOCK_DETERMINERS = immutableset(["you", "me", "your", "my"]).union(
ENGLISH_DETERMINERS
)
"""
These words block the addition of the determiners above to English noun phrases.
"""
| 2.78125 | 3 |
wedos_hook/wedos_hook.py | Amunak/acmetool-wapi-hook | 1 | 12767902 | #!/usr/bin/python3
import argparse
import logging
import sys
import time
from configparser import ConfigParser
from pathlib import Path
from random import randint
from typing import Optional, Callable
from dns import resolver, rdtypes
from tldextract import tldextract
from .wapi import Wapi
# Constants that might change at some point but that probably don't need to be configurable:
# how long to wait (in seconds) between DNS queries for validating that the record change has propagated
PROPAGATION_CHECK_DELAY: float = 10
# how many retries will be done before giving up the record validation
PROPAGATION_MAX_RETRIES: int = round(3600 / PROPAGATION_CHECK_DELAY)
# name servers used to validate DNS record addition
# those should be public servers far away from you, not your local resolver
NAMESERVERS = ['1.1.1.1', '8.8.8.8']
DOC_LINK = 'https://github.com/hlandau/acmetool/blob/master/_doc/SCHEMA.md#challenge-dns-start-challenge-dns-stop'
OPT_TEST = 'test'
OPT_DNS_CHALLENGE_STOP = 'challenge-dns-stop'
OPT_DNS_CHALLENGE_START = 'challenge-dns-start'
OPT_HTTP_CHALLENGE_START = 'challenge-http-start'
OPT_HTTP_CHALLENGE_STOP = 'challenge-http-stop'
OPT_LIVE_UPDATED = 'live-updated'
wapi: Wapi
def test(domain: str, name: str):
logging.info('Pinging API to make sure basic functionality works')
wapi.ping()
name = ('_test-challenge.' + name).rstrip('.')
data_prefix = '_TEST-CHALLENGE.'
data = data_prefix + str(randint(0, 10000000))
logging.info('Creating record')
wapi.dns_row_add(domain, name, data, 'Wedos Hook Test Record')
wapi.dns_domain_commit(domain)
result = wait_for_record_propagation(domain, name, data)
if not result:
logging.critical('Record propagation failed! Attempts timed out after all retries.')
ids_to_delete = find_row_ids_for_delete(domain, name, lambda record_data: record_data.startswith(data_prefix))
result = do_delete(domain, ids_to_delete)
if not result:
sys.exit(5)
wapi.dns_domain_commit(domain)
logging.info('Test success')
sys.exit(0)
def challenge_start(domain: str, name: str, data: str):
name = ('_acme-challenge.' + name).rstrip('.')
logging.info('Creating record')
wapi.dns_row_add(domain, name, data, 'AcmeTool Wedos Hook')
wapi.dns_domain_commit(domain)
result = wait_for_record_propagation(domain, name, data)
if result:
logging.info('Record created and propagated')
else:
logging.critical('Record propagation failed')
sys.exit(0 if result else 42)
def challenge_stop(domain: str, name: str, data: str):
name = ('_acme-challenge.' + name).rstrip('.')
ids_to_delete = find_row_ids_for_delete(domain, name, lambda record_data: record_data == data)
result = do_delete(domain, ids_to_delete)
wapi.dns_domain_commit(domain)
if result:
logging.info('Record removed successfully')
else:
logging.critical('Record removal failure')
sys.exit(0 if result else 42)
def find_row_ids_for_delete(domain: str, name: str, data_matches: Callable[[str], bool]):
logging.info('Looking up records for deletion')
rows = wapi.dns_rows_list(domain)['response']['data']['row']
ids_to_delete = []
for row in rows:
# if row['ttl'] != str(wapi.default_dns_record_ttl):
# continue
if row['rdtype'] != wapi.default_dns_record_type:
continue
if row['name'] != name:
continue
if not data_matches(str(row['rdata'])):
continue
ids_to_delete.append(row['ID'])
return ids_to_delete
def do_delete(domain, ids_to_delete) -> bool:
# Check that we actually found our record, otherwise something is quite wrong
if len(ids_to_delete) == 0:
logging.error('Found 0 rows to delete')
return False
logging.info(f'Deleting row IDs: {", ".join(ids_to_delete)}')
for rid in ids_to_delete:
wapi.dns_row_delete(domain, int(rid))
return True
def wait_for_record_propagation(domain: str, name: str, data: str) -> bool:
"""Waits for DNS record propagation, aborting after a set amount of delayed retries
:param domain: the domain to verify
:param name: the name (subdomain), if any
:param data: record data (used to verify the exact data in case there are multiple records for the same domain)
:return: whether propagation succeeded (`True`), `False` otherwise
"""
full_name = f'{name}.{domain}'.lstrip('.')
my_resolver = resolver.Resolver()
my_resolver.nameservers = NAMESERVERS
has_propagated: Optional[bool] = None
tries = 1
logging.info(
f'Checking for DNS record propagation for a maximum of {PROPAGATION_MAX_RETRIES} tries with {PROPAGATION_CHECK_DELAY}s delays (for a total of {PROPAGATION_MAX_RETRIES * PROPAGATION_CHECK_DELAY} seconds)')
while tries <= PROPAGATION_MAX_RETRIES and (has_propagated is None or not has_propagated):
logging.debug(f'Checking whether record propagated (try {tries} of {PROPAGATION_MAX_RETRIES})')
try:
answer = my_resolver.query(full_name, wapi.default_dns_record_type)
has_propagated = record_has_propagated(answer, data)
except (resolver.NoAnswer, resolver.NXDOMAIN):
has_propagated = False
if not has_propagated:
tries += 1
logging.debug(f'Sleeping for {PROPAGATION_CHECK_DELAY}s')
time.sleep(PROPAGATION_CHECK_DELAY)
else:
logging.info(f'Match found after {tries} tries ({(tries - 1) * PROPAGATION_CHECK_DELAY} seconds)')
return has_propagated
def record_has_propagated(answer: resolver.Answer, data: str) -> bool:
record: rdtypes.ANY.TXT.TXT
for record in answer:
record_data = record.to_text().strip('"')
logging.debug(f'Reading TXT record data: {record_data}')
if record_data == data:
return True
logging.debug('No match')
return False
pass
def read_config() -> dict:
base = Path(__file__).resolve().parents[1]
dist_config_path = base.joinpath('./config.ini.dist')
config_path = base.joinpath('./config.ini')
if not dist_config_path.exists():
logging.error(f'Distributable config file not found at path "{dist_config_path}".')
if not config_path.exists():
raise FileNotFoundError(f'Config file not found. Create file "{config_path}" (ideally by copying "{dist_config_path}") and edit it.')
parser = ConfigParser()
parser.read([dist_config_path, config_path])
return {
'wapi': {
'username': parser.get('wapi', 'username'),
'password_sha1': parser.get('wapi', '<PASSWORD>'),
},
'hook': {
'verbosity': max(0, min(10, parser.getint('hook', 'override_verbosity', fallback=0))),
},
}
def get_arg_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description='AcmeTool DNS-01 validation hook for Wedos API (WAPI)',
epilog=f'Read {DOC_LINK}for more information about AcmeTool Hooks\n',
)
parser.add_argument('--verbose', '-v', action='count', default=0, help='log verbosity; use multiple times for higher')
subparsers = parser.add_subparsers(title='Actions', dest='action', description='Hook actions; pick one and call it with --help for more help')
test_parser = subparsers.add_parser(OPT_TEST, help='test the integration on a selected domain')
test_parser.add_argument('--verbose', '-v', action='count', default=0, help='log verbosity; use multiple times for higher')
test_parser.add_argument('domain', type=str)
start_parser = subparsers.add_parser(OPT_DNS_CHALLENGE_START, help='hook action used before the challenge')
start_parser.add_argument('--verbose', '-v', action='count', default=0, help='log verbosity; use multiple times for higher')
start_parser.add_argument('domain', type=str)
start_parser.add_argument('file', help='not used, passed here by AcmeTool')
start_parser.add_argument('record', type=str, help='the TXT record')
stop_parser = subparsers.add_parser(OPT_DNS_CHALLENGE_STOP, help='hook action used after the challenge')
stop_parser.add_argument('--verbose', '-v', action='count', default=0, help='log verbosity; use multiple times for higher')
stop_parser.add_argument('domain', type=str)
stop_parser.add_argument('file', help='not used, passed here by AcmeTool')
stop_parser.add_argument('record', type=str, help='the TXT record')
# additional parsers that run a dummy function so that there are no errors in AcmeTool output
subparsers.add_parser(OPT_HTTP_CHALLENGE_START, help='dummy hook action').add_argument('dummy', nargs=3)
subparsers.add_parser(OPT_HTTP_CHALLENGE_STOP, help='dummy hook action').add_argument('dummy', nargs=3)
subparsers.add_parser(OPT_LIVE_UPDATED, help='dummy hook action')
return parser
def main():
global wapi
# Read config
config = read_config()
# Parse args
arg_parser = get_arg_parser()
args = arg_parser.parse_args()
verbosity = max(args.verbose, config['hook']['verbosity'])
if verbosity >= 2:
loglevel = logging.DEBUG
elif verbosity >= 1:
loglevel = logging.INFO
else:
loglevel = logging.WARNING
logging.basicConfig(level=loglevel)
logging.debug(args)
# In case no arguments / action is specified, exit
if 'action' not in args or args.action is None:
arg_parser.print_help()
sys.exit(3)
# Extract domain/subdomain
if 'domain' in args:
extract_result = tldextract.extract(args.domain)
info_prefix = f'Domain "{args.domain}" extracted as {extract_result.registered_domain} (TLD {extract_result.suffix}, '
if extract_result.subdomain == '':
logging.info(info_prefix + 'NO SUBDOMAIN)')
else:
logging.info(info_prefix + f'SUBDOMAIN {extract_result.subdomain})')
# Initialize Wapi
logging.info(f'Using account "{config["wapi"]["username"]}"')
wapi = Wapi(config['wapi']['username'], config['wapi']['password_sha1'])
# Finally decide what to do and run the given function
{
OPT_TEST: lambda: test(extract_result.registered_domain, extract_result.subdomain),
OPT_DNS_CHALLENGE_START: lambda: challenge_start(extract_result.registered_domain, extract_result.subdomain, args.record),
OPT_DNS_CHALLENGE_STOP: lambda: challenge_stop(extract_result.registered_domain, extract_result.subdomain, args.record),
OPT_HTTP_CHALLENGE_START: lambda: exit_not_implemented(),
OPT_HTTP_CHALLENGE_STOP: lambda: exit_not_implemented(),
OPT_LIVE_UPDATED: lambda: exit_not_implemented(),
}[args.action]()
# Commands should exit by themselves - if they don't, we return with error here
logging.error('Subcommand did not exit on its own.')
sys.exit(255)
def exit_not_implemented():
logging.debug('Not implemented.')
sys.exit(4)
if __name__ == '__main__':
main()
| 2.09375 | 2 |
ssd/utils/box_utils.py | xbcReal/ssd_fcn_multitask_text_detection_pytorch1.0 | 2 | 12767903 | import torch
import math
import numpy as np
def convert_locations_to_boxes(locations, priors, center_variance,
size_variance):
"""Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w).
The conversion:
$$predicted\_center * center_variance = \frac {real\_center - prior\_center} {prior\_hw}$$
$$exp(predicted\_hw * size_variance) = \frac {real\_hw} {prior\_hw}$$
We do it in the inverse direction here.
Args:
locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well.
priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes.
center_variance: a float used to change the scale of center.
size_variance: a float used to change of scale of size.
Returns:
boxes: priors: [[center_x, center_y, h, w]]. All the values
are relative to the image size.
"""
# priors can have one dimension less.
# if priors.dim() + 1 == locations.dim():
# priors = priors.unsqueeze(0)
# return torch.cat([
# locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],
# torch.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
# ], dim=locations.dim() - 1)
#print('locations:',locations)
# print('priors.size():',priors.size)
return locations*center_variance+torch.from_numpy(priors).cuda()
def convert_boxes_to_locations(quad_form_boxes, quad_form_priors, center_variance, size_variance):
# priors can have one dimension less
# if center_form_priors.dim() + 1 == center_form_boxes.dim():
# center_form_priors = center_form_priors.unsqueeze(0)
# return torch.cat([
# (center_form_boxes[..., :2] - center_form_priors[..., :2]) / center_form_priors[..., 2:] / center_variance,
# torch.log(center_form_boxes[..., 2:] / center_form_priors[..., 2:]) / size_variance
# ], dim=center_form_boxes.dim() - 1)
return (quad_form_boxes-quad_form_priors) / center_variance
def area_of(left_top, right_bottom) -> torch.Tensor:
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = torch.clamp(right_bottom - left_top, min=0.0)
return hw[..., 0] * hw[..., 1]
import shapely
from shapely.geometry import Polygon,MultiPoint #多边形
from itertools import product
import time
#萨瑟兰-Hodgman算法
def clip(subjectPolygon, clipPolygon):
def inside(p):
return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
def computeIntersection():
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0/(dc[0] * dp[1] - dc[1] * dp[0])
return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
if inputList==[]:
return [[0,0]]*4
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
return(outputList)
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area)/2.0
return area
def calc_iou_Hodgman(quad1,quad2):
intersection = clip(quad1, quad2)
if intersection == 0:
return 0
intersection_area = PolygonArea(intersection)
print('intersection_area:',intersection_area)
print('PolygonArea(quad1):',PolygonArea(quad1))
print('PolygonArea(quad2):',PolygonArea(quad2))
print('PolygonArea(quad1) + PolygonArea(quad2):',PolygonArea(quad1) + PolygonArea(quad2))
union_area=(PolygonArea(quad1) + PolygonArea(quad2) - intersection_area)
print('union_area:',union_area)
iou = intersection_area / union_area
return iou
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (1,N,8): ground truth boxes.
boxes1 (N,1,8): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
start = time.time()
# print('boxes0.shape:',np.shape(boxes0))
# print('boxes1.shape:',np.shape(boxes1))
boxes0=np.reshape(boxes0,(-1,4,2))
boxes1=np.reshape(boxes1,(-1,4,2))
iou_result=np.zeros(shape=(np.shape(boxes1)[0],np.shape(boxes0)[0]),dtype=np.float32)
for i, j in product(range(np.shape(boxes1)[0]),range(np.shape(boxes0)[0])):
quad1=boxes0[j]
quad2=boxes1[i]
quad1=np.reshape(np.array(quad1),(4,2))
quad2=np.reshape(np.array(quad2),(4,2))
# iou=calc_iou_Hodgman(quad1,quad2)
# if iou > 1 or iou < 0:
# print('iou:',iou)
# assert iou <= 1 and iou >=0
# iou_result[i][j] = iou
poly1 = Polygon(quad1.reshape(4,2)).convex_hull
poly2 = Polygon(quad2.reshape(4,2)).convex_hull
union_poly = np.concatenate((quad1.reshape(4,2),quad2.reshape(4,2))) # 合并两个box坐标,变为8*2
if not poly1.intersects(poly2): # 如果两四边形不相交
iou = 0
else:
try:
inter_area = poly1.intersection(poly2).area # 相交面积
#print(inter_area)
union_area = MultiPoint(union_poly).convex_hull.area
if union_area == 0:
iou = 0
else:
iou = float(inter_area) / union_area
iou_result[i][j] = iou
except shapely.geos.TopologicalError:
print('shapely.geos.TopologicalError occured, iou set to 0')
iou = 0
assert iou <= 1 and iou >= 0
end = time.time()
#print('time consuming:',end-start)
return iou_result
def distance_sum(quad_gt,quad_from_priors):
ret = []
# print('quad_gt.size:', np.shape(quad_gt))
quad_gt=np.reshape(np.array(quad_gt),(-1,4,2))
quad_from_priors=np.reshape(np.array(quad_from_priors),(-1,4,2))
for i in range(np.shape(quad_gt)[0]):
# ret_temp=b-a[i,:].sum(axis=1,keepdims=True)
ret_temp = np.sum(np.sqrt(np.sum(np.power(quad_from_priors - quad_gt[i, ...],2), axis=2, keepdims=False)),axis=1,keepdims=True)
#print('ret_temp.shape:',np.shape(ret_temp))
ret.append(ret_temp)
# print('ret.size:',len(ret))
ret = np.concatenate(ret, axis=1)
#print('ret.shape:', np.shape(ret))
# print('quad_gt.shape:',np.shape(quad_gt))
# print('quad_from_priors.shape:',np.shape(quad_from_priors))
# print('ret.shape:',np.shape(ret))
return ret
# overlap_left_top = torch.max(boxes0[..., :2], boxes1[..., :2])
# overlap_right_bottom = torch.min(boxes0[..., 2:], boxes1[..., 2:])
#
# overlap_area = area_of(overlap_left_top, overlap_right_bottom)
# area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
# area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
# return overlap_area / (area0 + area1 - overlap_area + eps)
def get_pos_distance_array(pos_distance_threshold):
#根据不同尺度的default box自适应决定default box和gt距离的阈值
# print('distance_threshold:',distance_threshold)
# scale = [0.039,0.098,0.156,0.215,0.273,0.332,0.391]
# diff_from_ratio = [1.656,1.588,1.491,1.403,1.323,1.261,1.203,1.068]#this if for different aspect ratio settings
# diff_from_ratio = [1.656,1.656,1.656,1.656,1.656,1.656,1.656,1.656]
# pos_distance_array = []
# pos_distance_array += 64 * 64 * list(np.array([18 * [scale[0] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 32 * 32 * list(np.array([18 * [scale[1] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 16 * 16 * list(np.array([18 * [scale[2] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 8 * 8 * list(np.array([18 * [scale[3] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 4 * 4 * list(np.array([18 * [scale[4] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 2 * 2 * list(np.array([18 * [scale[5] * item] for item in diff_from_ratio]).reshape(-1))
# pos_distance_array += 1 * 1 * list(np.array([18 * [scale[5] * item] for item in diff_from_ratio]).reshape(-1))
# print('len(pos_distance_array):',len(pos_distance_array))
# print('pos_distance_threshold:',pos_distance_threshold)
n = 144
pos_distance_array = []
pos_distance_array+=64*64*n*[pos_distance_threshold[0]]#0~32768
pos_distance_array+=32*32*n*[pos_distance_threshold[1]]#32768~40960
pos_distance_array+=16*16*n*[pos_distance_threshold[2]]#40960~43008
pos_distance_array+=8*8*n*[pos_distance_threshold[3]]#43008~43520
pos_distance_array+=4*4*n*[pos_distance_threshold[4]]#43520~43648
pos_distance_array+=2*2*n*[pos_distance_threshold[5]]#43648~43680
pos_distance_array+=1*1*n*[pos_distance_threshold[6]]#43680~43688
# print('distance_array.size:',np.shape(distance_array))
# print('len:distance_array:',len(pos_distance_array))
return np.array(pos_distance_array)
def get_ignore_distance_array(ignore_distance_threshold):
#根据不同尺度的default box自适应决定default box和gt距离的阈值
# print('distance_threshold:',distance_threshold)
ignore_distance_array = []
n = 126
ignore_distance_array+=64*64*n*[ignore_distance_threshold[0]]#0~32768
ignore_distance_array+=32*32*n*[ignore_distance_threshold[1]]#32768~40960
ignore_distance_array+=16*16*n*[ignore_distance_threshold[2]]#40960~43008
ignore_distance_array+=8*8*n*[ignore_distance_threshold[3]]#43008~43520
ignore_distance_array+=4*4*n*[ignore_distance_threshold[4]]#43520~43648
ignore_distance_array+=2*2*n*[ignore_distance_threshold[5]]#43648~43680
ignore_distance_array+=1*1*n*[ignore_distance_threshold[6]]#43680~43688
# print('distance_array.size:',np.shape(distance_array))
return np.array(ignore_distance_array)
def assign_priors(quad_gt, quad_form_priors,iou_threshold,pos_distance_threshold):
"""Assign ground truth boxes and targets to priors.
Args:
gt_boxes (num_targets, 4): ground truth boxes.
gt_labels (num_targets): labels of targets.
priors (num_priors, 4): corner form priors
Returns:
boxes (num_priors, 4): real values for priors.
labels (num_priros): labels for priors.
"""
# size: num_priors x num_targets
#ious = iou_of(quad_gt, quad_form_priors)
#ious = iou_of(quad_gt, quad_form_priors)
distance = distance_sum(quad_gt,quad_form_priors)
# size: num_priors
# 表示每一个prior对应distance最小的target的distance值
best_target_per_prior=np.min(distance,axis=1)
# 表示每一个prior对应distance最小的target的target的index值
best_target_per_prior_index=np.argmin(distance,axis=1)
#print(np.shape(best_target_per_prior))
#print(np.shape(best_target_per_prior_index))
# size: num_targets
# 表示每一个target对应distance最小的prior的distance值
best_prior_per_target=np.min(distance,axis=0)
# 表示每一个target对应distance最小的prior的index
best_prior_per_target_index=np.argmin(distance,axis=0)
# 将每一个target对应的最大的prior赋值给这个prior对应最大的target
for target_index, prior_index in enumerate(best_prior_per_target_index):
best_target_per_prior_index[prior_index] = target_index
# 2.0 is used to make sure every target has a prior assigned
best_target_per_prior[best_prior_per_target_index]=2
# size: num_priors
gt_labels=np.ones(shape=np.shape(quad_gt)[0])
labels = gt_labels[best_target_per_prior_index]
# print('distance_threshold:',distance_threshold)
pos_distance_array=get_pos_distance_array(pos_distance_threshold)
ignore_distance_array=pos_distance_array * 1.995#1.995是根据曼哈顿距离度量中iou=0.3算出来的一个倍数关系
labels[best_target_per_prior > pos_distance_array] = 0 # the backgournd id
# print('shape:',np.shape(best_target_per_prior > pos_distance_array))
#ignore_mask = np.multiply(best_target_per_prior > pos_distance_array ,best_target_per_prior < ignore_distance_array)
# print('ignore_mask.size1:',ignore_mask.sum())
#labels[ignore_mask] = -1
quad = quad_gt[best_target_per_prior_index]
# np.savetxt("/home/binchengxiong/boxes.txt", quad)
# np.savetxt("/home/binchengxiong/labels.txt", labels)
return quad,labels
def hard_negative_mining(loss, labels, neg_pos_ratio):
"""
It used to suppress the presence of a large number of negative prediction.
It works on image level not batch level.
For any example/image, it keeps all the positive predictions and
cut the number of negative predictions to make sure the ratio
between the negative examples and positive examples is no more
the given ratio for an image.
Args:
loss (N, num_priors): the loss for each example.
labels (N, num_priors): the labels.
neg_pos_ratio: the ratio between the negative examples and positive examples.
"""
pos_mask = labels == 1
#ignore_mask = labels == -1
# print('ignore_mask.size',ignore_mask.size())
# print('ignore_mask2.size:',ignore_mask.sum())
num_pos = pos_mask.long().sum(dim=1, keepdim=True)
# print('num_pos:',num_pos)
num_neg = num_pos * neg_pos_ratio
# print('pos_mask.size()[1]:',pos_mask.size()[1])
# print('total train sample num:',num_pos * (neg_pos_ratio + 1))
#把正样本对应的loss设为负无穷大,这样对loss进行降序排序的时候正样本的loss就会处于最后面
# print('loss.size',loss.size())
loss[pos_mask] = -math.inf
#loss[ignore_mask] = -math.inf
try:
ordered_loss, indexes = loss.sort(dim=1, descending=True)
# print('ordered_loss:',ordered_loss)
# print('loss.size:',loss.size())
except RuntimeError:
print('loss.size()',loss.size())
print('loss:',loss)
_, orders = indexes.sort(dim=1)
neg_mask = orders < num_neg
return pos_mask | neg_mask
#顶点形式的default box表示形式
def center_form_to_corner_form(locations):
return torch.cat([locations[..., :2] - locations[..., 2:] / 2,
locations[..., :2] + locations[..., 2:] / 2], locations.dim() - 1)
def corner_form_to_center_form(boxes):
return torch.cat([
(boxes[..., :2] + boxes[..., 2:]) / 2,
boxes[..., 2:] - boxes[..., :2]
], boxes.dim() - 1)
| 2.890625 | 3 |
main_vgg19.py | nirmorgo/Style-Transferer | 4 | 12767904 | <gh_stars>1-10
from style_transfer_funcs_vgg19 import Style_Transferer_VGG19
from image_utils import load_image, plot_input_images
#%%
content_image_path= 'data/trump.jpg'
style_image_path= 'data/pasta.jpg'
img_resize = 720
content_img = load_image(content_image_path, img_resize)
style_img = load_image(style_image_path)
plot_input_images(content_img, style_img)
transferer_params = {
'content_image' : content_img,
'style_image' : style_img,
'content_layer' : 'conv4_2',
'content_to_style_ratio' : 1e-3,
'style_layers_list' : [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 1.0)],
'tv_weight' : 1e-2,
'initial_lr': 12.5, #optimizer learning rate at begining
'decayed_lr': 3
}
transferer = Style_Transferer_VGG19(**transferer_params)
opt_params = {
'iter_num':500,
'draw_every':200,
'print_every':50,
'decay_lr_at':350,
'initial_lr':None, # can be used to update learning rate without initializing the entire graph
'decayed_lr':None # can be used to update learning rate without initializing the entire graph
}
transferer.generate_image(**opt_params)
| 2.046875 | 2 |
skmine/periodic/__init__.py | sdall/scikit-mine | 0 | 12767905 | """
The :mod:`skmine.periodic` implements tools to tackle periodicity in
datasets, preferably presentend in the form of event logs
"""
from .cycles import PeriodicCycleMiner
| 1.445313 | 1 |
app/crawler_app/crawler_app/celeryconfig.py | KimKiHyuk/BenefitObserver | 0 | 12767906 |
import os, sys, json
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR)
secret_file = os.path.join(BASE_DIR, 'secrets.json')
with open(secret_file) as f:
secrets = json.loads(f.read())
BROKER_URL = secrets['RABBITMQ_CONNECTION']
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json' | 1.960938 | 2 |
src/pds_doi_service/core/input/test/__init__.py | NASA-PDS/pds-doi-service | 2 | 12767907 | <reponame>NASA-PDS/pds-doi-service<filename>src/pds_doi_service/core/input/test/__init__.py
# encoding: utf-8
"""
Planetary Data System's Digital Object Identifier service — tests for core inputs
"""
import unittest
from . import input_util_test
from . import read_bundle
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.defaultTestLoader.loadTestsFromModule(input_util_test))
suite.addTests(unittest.defaultTestLoader.loadTestsFromModule(read_bundle))
return suite
| 1.695313 | 2 |
pygeotoolbox/sharedtools/fonts/svg/svgicons.py | raugustyn/doctest | 0 | 12767908 | # -*- coding: utf-8 -*-
__author__ = "<EMAIL>"
from pygeotoolbox.sharedtools.fonts.svg.svgfontreader import SVGFontReader
import pygeotoolbox.sharedtools.log as log
from pygeotoolbox.sharedtools import makeDirForFile
__readers = {}
def extractSVGIcon(svgFontFileName, glyphName, iconFileName=None):
global __readers
if not svgFontFileName in __readers:
__readers[svgFontFileName] = SVGFontReader(svgFontFileName)
reader = __readers[svgFontFileName]
for glyphUnicode, glyph in reader.glyphs.iteritems():
if glyphName == glyph.name:
result = '<?xml version="1.0"?>\n<svg>\n\t%s\n</svg>' % glyph.glyphXMLContent.replace("glyph", "path")
if iconFileName:
makeDirForFile(iconFileName)
open(iconFileName, "w").write(result)
log.debug("Saving icon '%s' --> '%s'." % (glyphName, iconFileName))
return result
log.warning("extractSVGIcon('%s', '%s', '%s') - glyph [%s] not found." % (svgFontFileName, glyphName, str(iconFileName), glyphName))
return ""
if __name__ == "__main__":
extractSVGIcon("C:/ms4w/Apache/htdocs/Generalizace/MapGen/projects/zm/zm10/zm10fonts/zm10x1.svg", "105_kostel", "C:/ms4w/Apache/htdocs/Generalizace/MapGen/ms4w/Apache/htdocs/mgFiddle/Maps/zm10/105_kostel.svg")
| 2.390625 | 2 |
src/orientation.py | mackorone/catan | 5 | 12767909 | from enum import Enum
class Orientation(Enum):
NORTH_EAST = 0
NORTH = 1
NORTH_WEST = 2
SOUTH_WEST = 3
SOUTH = 4
SOUTH_EAST = 5
| 3.1875 | 3 |
scripts/practiceScripts/matrixplot.py | czbiohub/scRFE | 11 | 12767910 | <filename>scripts/practiceScripts/matrixplot.py
#!/usr/bin/env python
# coding: utf-8
# # Visualtization: Matrix Plot
# In[1]:
# this is for comparing results from 3m and 24m data
# In[2]:
# imports
import numpy as np
import pandas as pd
import scanpy as sc
from anndata import read_h5ad
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import RFE
from matplotlib import pyplot as plt
# In[3]:
# read in raw data
adata = read_h5ad('/Users/madelinepark/Downloads/Limb_Muscle_facs.h5ad')
# In[4]:
# read in results and sort by gini
results_24 = pd.read_csv('/Users/madelinepark/src2/maca-data-analysis/results_age_first_24m.csv')
results_sorted_24 = results_24.sort_values(by='24m_gini',ascending=False)
results_3 = pd.read_csv('/Users/madelinepark/src2/maca-data-analysis/results_age_first_3m.csv')
results_sorted_3 = results_3.sort_values(by='3m_gini',ascending=False)
# In[15]:
# take top genes and ginis, here we chose 10
results_top_24_gene = results_sorted_24['24m'][0:10]
results_top_24_gini = results_sorted_24['24m_gini'][0:10]
results_top_3_gene = results_sorted_3['3m'][0:10]
results_top_3_gini = results_sorted_3['3m_gini'][0:10]
# In[10]:
results_top_genes = list(set(results_top_gene_list) & set(adata.var_names.values))
# In[11]:
results_top_gene_list = []
results_top_gene_list.extend(results_top_24_gene)
results_top_gene_list.extend(results_top_3_gene)
# In[12]:
adatasubset = adata[adata.obs['age'].isin(['3m','24m'])]
# In[13]:
# Need to change the order of the ages
adatasubset.obs['age_num'] = adatasubset.obs['age']
adatasubset.obs['age_num'] = [an.split('m')[0] for an in adatasubset.obs['age_num']]
# In[14]:
sc.pl.matrixplot(adatasubset, results_top_genes,
groupby='age_num', dendrogram=False,log=True,cmap='Blues',save = '_top_30_droplet_test_8.pdf')
# In[ ]:
# In[ ]:
| 2.734375 | 3 |
python/publish.py | sogartar/faqap | 1 | 12767911 | #!/usr/bin/env python3
from subprocess import run
from os import path, mkdir
import sys
from datetime import datetime
curr_script_dir = path.abspath(path.dirname(__file__))
publish_dir = datetime.now().strftime("publish-%Y-%m-%d-%H-%M-%S.%f")
mkdir(publish_dir)
run(
[sys.executable, path.join(curr_script_dir, "build.py")],
check=True,
cwd=publish_dir,
)
run(["twine", "check", "dist/*"], check=True, cwd=publish_dir)
run(["twine", "upload", "dist/*"], check=True, cwd=publish_dir)
| 1.804688 | 2 |
dagauss/DaGauss.py | JonasMoss/dagauss | 0 | 12767912 | # -*- coding: utf-8 -*-
from itertools import product
import sympy as sp
import networkx
import copy
# This should be made into a method.
def get_order(G, values = [], keep = False):
H = G.graph["dependency_graph"]
order = copy.deepcopy(H.graph["sort"])
if not values: return order
if keep == False:
for value in values:
order.remove(value)
else:
for value in set(order).difference(values):
order.remove(value)
return order
def variable_indices(G, values, restrictions = [], sort = False):
if(not restrictions):
restrictions = get_order(G)
order = get_order(G, restrictions, keep = True)
indices = [order.index(value) for value in values]
if sort: indices.sort()
return indices
# This is the ___init___ method.
def to_dagauss(G):
""" Calculate the unconditional mean vector of a multivariate normal DAG.
This function calculates the mean vector of a multivariate normal DAG. It
only calculates the vector and stores the values in the dag. Use the
function 'mean_vector' to get to the vector.
Args:
G (DagNormal): A DagNormal object representing a multivariate normal.
Returns:
None: The function modifies G in place.
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> a = [1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
"""
V = list(networkx.topological_sort(G))
""" Populates a directed graph G with attributes. """
for node in G.nodes:
G.nodes[node]["beta"] = sp.Symbol("beta_" + node, real = True )
G.nodes[node]["sigma"] = sp.Symbol("sigma_" + node, positive = True)
for edge in G.edges:
G.edges[edge]["beta"] = sp.Symbol("beta_" + edge[0] + edge[1], real = True)
H = G.to_undirected()
# This loop takes care of the means of the unconditional model.
for v in V:
edges = list(G.in_edges(v))
vertices = [x for (x, _) in edges]
self_contribution = H.nodes[v]["beta"]
parent_contribution = sum([H.nodes[vertex]["mu"]*H.edges[edge]["beta"]
for vertex, edge
in zip(vertices, edges)])
H.nodes[v]["mu"] = self_contribution + parent_contribution
# This loop takes care of sigmas of the unconditional model.
for i, v in enumerate(V):
edges = list(G.in_edges(v))
vertices = [x for (x, _) in edges]
product_edges = product(vertices, vertices)
parent_contribution = sum([H.edges[(x, y)]["psi"]*H.edges[(x, v)]["beta"]*H.edges[(y, v)]["beta"]
for (x, y)
in product_edges])
self_contribution = H.nodes[v]["sigma"]**2
H.add_edge(v, v, psi = self_contribution + parent_contribution)
predecessors = V[:i]
for w in predecessors:
contribution = sum([H.edges[edge]["beta"]*H.edges[(edge[0], w)]["psi"] for edge in edges])
H.add_edge(v, w, psi = contribution)
H.graph["sort"] = list(set(V))
H.graph["sort"].sort()
G.graph["dependency_graph"] = H
# This is the parameters() method.
def parameters(G, variables = [], conditionants = []):
""" Calculate the conditional mean vector and covariance matrix
Args:
G: A DaGauss object representing a multivariate normal.
variables: The variables in the regression. Can be more than one.
conditionants: The variables to condition on.
Returns:
A dictionary containing the theoretical conditional mean vector and
the theoretical conditional covariance matrix.
"""
if(not variables):
variables = get_order(G, conditionants)
H = G.graph["dependency_graph"]
order = get_order(G)
mean_ = sp.Matrix([H.nodes[value]["mu"] for value in order])
cov = sp.zeros(len(order), len(order))
for (i, j) in product(range(len(order)), range(len(order))):
cov[i, j] = H.edges[(order[i], order[j])]["psi"]
if(not conditionants):
return {"mean": mean_[variable_indices(G, variables, sort = True), 0],
"cov": cov[variable_indices(G, variables, sort = True),
variable_indices(G, variables, sort = True)]}
V = get_order(G)
variables_indices = variable_indices(G, variables, sort = True)
conditionants_indices = variable_indices(G, conditionants, sort = True)
cov_AA = cov[variables_indices, variables_indices]
cov_AB = cov[variables_indices, conditionants_indices]
cov_BA = cov[conditionants_indices, variables_indices]
cov_BB_inv = sp.Inverse(cov[conditionants_indices, conditionants_indices])
mean_A = sp.Matrix([mean_[index] for index in variables_indices])
mean_B = sp.Matrix([mean_[index] for index in conditionants_indices])
new_mean = mean_A + cov_AB*cov_BB_inv*(sp.Matrix(conditionants) - mean_B)
new_cov = cov_AA - cov_AB*cov_BB_inv*cov_BA
return {"mean": sp.simplify(new_mean),
"cov": sp.simplify(new_cov)}
# This is the mean() method.
def mean(G, variables = [], conditionants = []):
""" Calculate the conditional mean vector
Args:
G: A DaGauss object representing a multivariate normal.
variables: The variables in the regression. Can be more than one.
conditionants: The variables to condition on.
Returns:
The theoretical conditional mean vector
"""
return parameters(G, variables = variables,
conditionants = conditionants)["mean"]
# This is the covariance() method.
def covariance(G, variables = [], conditionants = []):
""" Calculate the conditional covariance matrix
Args:
G: A DaGauss object representing a multivariate normal.
variables: The variables in the regression. Can be more than one.
conditionants: The variables to condition on.
Returns:
The theoretical conditional covariance matrix
"""
return parameters(G, variables = variables,
conditionants = conditionants )["cov"]
# The variance method picks the only item from the covariance matrix.
def variance(G, variables = [], conditionants = []):
""" Calculate the conditional covariance matrix
Args:
G: A DaGauss object representing a multivariate normal.
variables: The variables in the regression. Can be more than one.
conditionants: The variables to condition on.
Returns:
The theoretical regression coefficient.
"""
cov = covariance(G, variables = variables,
conditionants = conditionants)
if len(variables) == 1:
return cov[0]
else:
return cov
def beta(G, responses = [], covariates = [], conditionants = []):
""" Calculate the theoretical beta coefficient of a regression
Args:
G: A DaGauss object representing a multivariate normal.
responses: The responses in the regression. Can be more than one.
covariates: The covariates of the regression.
conditionants: The variables the regression is conditioned on.
Returns:
The theoretical regression coefficient.
"""
variables = covariates + conditionants
means = mean(G, responses, variables)
def collect(index):
return sp.collect(expr = sp.expand(means[index]),
syms = variables)
collections = [collect(index) for index in range(len(means))]
betas = sp.Matrix([collection.coeff(variables)
for collection, variables
in product(collections, variables)])
betas.reshape(len(collections), len(variables)).T
indices = variable_indices(G, values = covariates,
restrictions = variables,
sort = True)
return betas[indices, :]
def rsquared(G, responses, covariates, conditionants = [], norm = "trace"):
""" Calculates the theoretical R squared.
This function calculates R squared, also known as the coefficient of
determination.
Args:
G: A DaGauss object representing a multivariate normal.
responses: The responses in the regression. Can be more than one.
covariates: The covariates of the regression.
conditionants: The variables the regression is conditioned on.
norm: Optional covariance matrix norm. Defaults to "trace", which is
recommended.
Returns:
The caclulated R squared. A scalar sympy object.
"""
betas = beta(G, responses = responses,
covariates = covariates,
conditionants = conditionants)
cov_covariates = variance(G, variables = covariates,
conditionants = conditionants)
cov_conditional = betas.T*cov_covariates*betas
cov_unconditional = covariance(G, variables = responses,
conditionants = conditionants)
if(norm == "trace"):
return sp.trace(cov_conditional)/sp.trace(cov_unconditional)
else:
return cov_conditional.norm(norm)/cov_unconditional.norm(norm)
def correlation(G, variables = [], conditionants = []):
""" Calculates the conditional correlation.
Args:
G: A DaGauss object representing a multivariate normal.
variables: The variables you wish to find the correlation matrix for.
conditionants: The variables the correlation matrix is conditioned on.
Returns:
A correlation matrix.
"""
cov = covariance(G, variables = variables,
conditionants = conditionants)
k = cov.shape[0]
sds = sp.Matrix([1/sp.sqrt(cov[i, i]) for i
in range(0, k)]*k).reshape(k, k)
cor = cov.multiply_elementwise(sds).multiply_elementwise(sds.T)
return cor.applyfunc(sp.simplify)
| 3.046875 | 3 |
examples/helloworld/helloworld/hooks.py | ycvbcvfu/SoDo | 4 | 12767913 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sodo.hook import HooKBase
# 第一种方式直接继承HooKBase,然后书写hook 逻辑
class FirstLogHook(HooKBase):
def process_hook(self, *args, **kwargs):
# self.crawler 有全局settings
# settings = self.crawler.settings
print("This is First LogHook")
print("FirstLogHook get global settings:", self.crawler.settings)
# 第二种方式,直接全套逻辑自己书写
class SecondLogHook(object):
"Base class for implementing Hook"
def __init__(self, crawler):
self.crawler = crawler
@classmethod
def from_crawler(cls, crawler):
# 初始化的参数逻辑
return cls(crawler)
def process_hook(self, *args, **kwargs):
print("This is Second LogHook")
print("SecondLogHook get global settings:", self.crawler.settings)
| 2.609375 | 3 |
Jisui.py | hoppiece/Jisui | 0 | 12767914 | import PyPDF2
PDF_odd = 'odd.pdf' #奇数ページpdf
PDF_even = 'even.pdf' #偶数ページpdf
OutputName = 'output.pdf' #出力pdf
angle_odd = 0 #奇数ページの回転角度, 時計回り弧度法
angle_even = 0
File_odd = open(PDF_odd, 'rb')
File_even = open(PDF_even, 'rb')
Reader_odd = PyPDF2.PdfFileReader(File_odd)
Reader_even = PyPDF2.PdfFileReader(File_even)
Writer = PyPDF2.PdfFileWriter()
for page in range(Reader_odd.numPages):
obj = Reader_odd.getPage(page)
obj.rotateClockwise(angle_odd)
Writer.addPage(obj)
obj = Reader_even.getPage(Reader_odd.numPages - page - 1)
obj.rotateClockwise(angle_even)
Writer.addPage(obj)
Output = open(OutputName, 'wb')
Writer.write(Output)
Output.close()
File_odd.close()
File_even.close()
| 3.546875 | 4 |
bob/io/base/__init__.py | bioidiap/bob.io.base | 0 | 12767915 | # import Libraries of other lib packages
import numpy
import bob.core
# import our own Library
import bob.extension
bob.extension.load_bob_library('bob.io.base', __file__)
from ._library import File as _File_C, HDF5File as _HDF5File_C, extensions
from . import version
from .version import module as __version__
from .version import api as __api_version__
import os
class File(_File_C):
__doc__ = _File_C.__doc__
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class HDF5File(_HDF5File_C):
__doc__ = _HDF5File_C.__doc__
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.close()
def __contains__(self, x):
__doc__ = self.has_key.__doc__
return self.has_key(x)
def __iter__(self):
__doc__ = self.keys.__doc__
return iter(self.keys())
def __getitem__(self, name):
__doc__ = self.get.__doc__
return self.get(name)
def __setitem__(self, name, value):
__doc__ = self.set.__doc__
return self.set(name, value)
def values(self):
'''Yields the datasets contained in the current directory.
Yields
-------
object
The datasets that are being read.
'''
return (self[key] for key in self)
def items(self):
'''Yields the keys and the datasets contained in the current directory.
Yields
-------
tuple
The key and the datasets that are being read in a tuple.
'''
return ((key, self[key]) for key in self)
def _is_string(s):
"""Returns ``True`` if the given object is a string
This method can be used with Python-2.x or 3.x and returns a string
respecting each environment's constraints.
"""
from sys import version_info
return (version_info[0] < 3 and isinstance(s, (str, unicode))) or \
isinstance(s, (bytes, str))
@numpy.deprecate(new_name="os.makedirs(directory, exist_ok=True)")
def create_directories_safe(directory, dryrun=False):
"""Creates a directory if it does not exists, with concurrent access support.
This function will also create any parent directories that might be required.
If the dryrun option is selected, it does not actually create the directory,
but just writes the (Linux) command that would have been executed.
**Parameters:**
``directory`` : str
The directory that you want to create.
``dryrun`` : bool
Only ``print`` the command to console, but do not execute it.
"""
if dryrun:
print("[dry-run] mkdir -p '%s'" % directory)
else:
os.makedirs(directory, exist_ok=True)
def load(inputs):
"""load(inputs) -> data
Loads the contents of a file, an iterable of files, or an iterable of
:py:class:`bob.io.base.File`'s into a :py:class:`numpy.ndarray`.
**Parameters:**
``inputs`` : various types
This might represent several different entities:
1. The name of a file (full path) from where to load the data. In this
case, this assumes that the file contains an array and returns a loaded
numpy ndarray.
2. An iterable of filenames to be loaded in memory. In this case, this
would assume that each file contains a single 1D sample or a set of 1D
samples, load them in memory and concatenate them into a single and
returned 2D :py:class:`numpy.ndarray`.
3. An iterable of :py:class:`File`. In this case, this would assume
that each :py:class:`File` contains a single 1D sample or a set
of 1D samples, load them in memory if required and concatenate them into
a single and returned 2D :py:class:`numpy.ndarray`.
4. An iterable with mixed filenames and :py:class:`File`. In this
case, this would returned a 2D :py:class:`numpy.ndarray`, as described
by points 2 and 3 above.
**Returns:**
``data`` : :py:class:`numpy.ndarray`
The data loaded from the given ``inputs``.
"""
from collections import Iterable
import numpy
if _is_string(inputs):
if not os.path.exists(inputs):
raise RuntimeError(f"`{inputs}' does not exist!")
return File(inputs, 'r').read()
elif isinstance(inputs, Iterable):
retval = []
for obj in inputs:
if _is_string(obj):
retval.append(load(obj))
elif isinstance(obj, File):
retval.append(obj.read())
else:
raise TypeError(
"Iterable contains an object which is not a filename nor a "
"bob.io.base.File.")
return numpy.vstack(retval)
else:
raise TypeError(
"Unexpected input object. This function is expecting a filename, "
"or an iterable of filenames and/or bob.io.base.File's")
def merge(filenames):
"""merge(filenames) -> files
Converts an iterable of filenames into an iterable over read-only
:py:class:`bob.io.base.File`'s.
**Parameters:**
``filenames`` : str or [str]
A list of file names.
This might represent:
1. A single filename. In this case, an iterable with a single
:py:class:`File` is returned.
2. An iterable of filenames to be converted into an iterable of
:py:class:`File`'s.
**Returns:**
``files`` : [:py:class:`File`]
The list of files.
"""
from collections import Iterable
from .utils import is_string
if is_string(filenames):
return [File(filenames, 'r')]
elif isinstance(filenames, Iterable):
return [File(k, 'r') for k in filenames]
else:
raise TypeError(
"Unexpected input object. This function is expecting an "
"iterable of filenames.")
def save(array, filename, create_directories=False):
"""Saves the contents of an array-like object to file.
Effectively, this is the same as creating a :py:class:`File` object
with the mode flag set to ``'w'`` (write with truncation) and calling
:py:meth:`File.write` passing ``array`` as parameter.
Parameters:
``array`` : array_like
The array-like object to be saved on the file
``filename`` : str
The name of the file where you need the contents saved to
``create_directories`` : bool
Automatically generate the directories if required (defaults to ``False``
because of compatibility reasons; might change in future to default to
``True``)
"""
# create directory if not existent yet
if create_directories:
create_directories_safe(os.path.dirname(filename))
# requires data is c-contiguous and aligned, will create a copy otherwise
array = numpy.require(array, requirements=('C_CONTIGUOUS', 'ALIGNED'))
return File(filename, 'w').write(array)
# Just to make it homogenous with the C++ API
write = save
read = load
def append(array, filename):
"""append(array, filename) -> position
Appends the contents of an array-like object to file.
Effectively, this is the same as creating a :py:class:`File` object
with the mode flag set to ``'a'`` (append) and calling
:py:meth:`File.append` passing ``array`` as parameter.
**Parameters:**
``array`` : array_like
The array-like object to be saved on the file
``filename`` : str
The name of the file where you need the contents saved to
**Returns:**
``position`` : int
See :py:meth:`File.append`
"""
# requires data is c-contiguous and aligned, will create a copy otherwise
array = numpy.require(array, requirements=('C_CONTIGUOUS', 'ALIGNED'))
return File(filename, 'a').append(array)
def peek(filename):
"""peek(filename) -> dtype, shape, stride
Returns the type of array (frame or sample) saved in the given file.
Effectively, this is the same as creating a :py:class:`File` object
with the mode flag set to `r` (read-only) and calling
:py:meth:`File.describe`.
**Parameters**:
``filename`` : str
The name of the file to peek information from
**Returns:**
``dtype, shape, stride`` : see :py:meth:`File.describe`
"""
return File(filename, 'r').describe()
def peek_all(filename):
"""peek_all(filename) -> dtype, shape, stride
Returns the type of array (for full readouts) saved in the given file.
Effectively, this is the same as creating a :py:class:`File` object
with the mode flag set to ``'r'`` (read-only) and returning
``File.describe`` with its parameter ``all`` set to ``True``.
**Parameters:**
``filename`` : str
The name of the file to peek information from
**Returns:**
``dtype, shape, stride`` : see :py:meth:`File.describe`
"""
return File(filename, 'r').describe(all=True)
# Keeps compatibility with the previously existing API
open = File
def get_config():
"""Returns a string containing the configuration information.
"""
return bob.extension.get_config(__name__, version.externals, version.api)
def get_include_directories():
"""get_include_directories() -> includes
Returns a list of include directories for dependent libraries, such as HDF5.
This function is automatically used by
:py:func:`bob.extension.get_bob_libraries` to retrieve the non-standard
include directories that are required to use the C bindings of this library
in dependent classes. You shouldn't normally need to call this function by
hand.
**Returns:**
``includes`` : [str]
The list of non-standard include directories required to use the C bindings
of this class. For now, only the directory for the HDF5 headers are
returned.
"""
# try to use pkg_config first
try:
from bob.extension.utils import find_header
# locate pkg-config on our own
header = 'hdf5.h'
candidates = find_header(header)
if not candidates:
raise RuntimeError(
"could not find %s's `%s' - have you installed %s on this "
"machine?" % ('hdf5', header, 'hdf5'))
return [os.path.dirname(candidates[0])]
except RuntimeError:
from bob.extension import pkgconfig
pkg = pkgconfig('hdf5')
return pkg.include_directories()
def get_macros():
"""get_macros() -> macros
Returns a list of preprocessor macros, such as ``(HAVE_HDF5, 1)``. This
function is automatically used by :py:func:`bob.extension.get_bob_libraries`
to retrieve the prerpocessor definitions that are required to use the C
bindings of this library in dependent classes. You shouldn't normally need to
call this function by hand.
**Returns:**
``macros`` : [(str,str)]
The list of preprocessor macros required to use the C bindings of this
class. For now, only ``('HAVE_HDF5', '1')`` is returned, when applicable.
"""
# get include directories
if get_include_directories():
return [('HAVE_HDF5', '1')]
def _generate_features(reader, paths, same_size=False):
"""Load and stack features in a memory efficient way. This function is
meant to be used inside :py:func:`vstack_features`.
Parameters
----------
reader : ``collections.Callable``
See the documentation of :py:func:`vstack_features`.
paths : ``collections.Iterable``
See the documentation of :py:func:`vstack_features`.
same_size : :obj:`bool`, optional
See the documentation of :py:func:`vstack_features`.
Yields
------
object
The first object returned is a tuple of :py:class:`numpy.dtype` of
features and the shape of the first feature. The rest of objects are
the actual values in features. The features are returned in C order.
"""
shape_determined = False
for i, path in enumerate(paths):
feature = numpy.atleast_2d(reader(path))
feature = numpy.ascontiguousarray(feature)
if not shape_determined:
shape_determined = True
dtype = feature.dtype
shape = list(feature.shape)
yield (dtype, shape)
else:
# make sure all features have the same shape and dtype
if same_size:
assert shape == list(feature.shape)
else:
assert shape[1:] == list(feature.shape[1:])
assert dtype == feature.dtype
if same_size:
yield (feature.ravel(),)
else:
for feat in feature:
yield (feat.ravel(),)
def vstack_features(reader, paths, same_size=False, dtype=None):
"""Stacks all features in a memory efficient way.
Parameters
----------
reader : ``collections.Callable``
The function to load the features. The function should only take one
argument ``path`` and return loaded features. Use :any:`functools.partial`
to accommodate your reader to this format.
The features returned by ``reader`` are expected to have the same
:py:class:`numpy.dtype` and the same shape except for their first
dimension. First dimension should correspond to the number of samples.
paths : ``collections.Iterable``
An iterable of paths to iterate on. Whatever is inside path is given to
``reader`` so they do not need to be necessarily paths to actual files.
If ``same_size`` is ``True``, ``len(paths)`` must be valid.
same_size : :obj:`bool`, optional
If ``True``, it assumes that arrays inside all the paths are the same
shape. If you know the features are the same size in all paths, set this
to ``True`` to improve the performance.
dtype : :py:class:`numpy.dtype`, optional
If provided, the data will be casted to this format.
Returns
-------
numpy.ndarray
The read features with the shape ``(n_samples, *features_shape[1:])``.
Examples
--------
This function in a simple way is equivalent to calling
``numpy.vstack([reader(p) for p in paths])``.
>>> import numpy
>>> from bob.io.base import vstack_features
>>> def reader(path):
... # in each file, there are 5 samples and features are 2 dimensional.
... return numpy.arange(10).reshape(5,2)
>>> paths = ['path1', 'path2']
>>> all_features = vstack_features(reader, paths)
>>> numpy.allclose(all_features, numpy.array(
... [[0, 1],
... [2, 3],
... [4, 5],
... [6, 7],
... [8, 9],
... [0, 1],
... [2, 3],
... [4, 5],
... [6, 7],
... [8, 9]]))
True
>>> all_features_with_more_memory = numpy.vstack([reader(p) for p in paths])
>>> numpy.allclose(all_features, all_features_with_more_memory)
True
You can allocate the array at once to improve the performance if you know
that all features in paths have the same shape and you know the total number
of the paths:
>>> all_features = vstack_features(reader, paths, same_size=True)
>>> numpy.allclose(all_features, numpy.array(
... [[0, 1],
... [2, 3],
... [4, 5],
... [6, 7],
... [8, 9],
... [0, 1],
... [2, 3],
... [4, 5],
... [6, 7],
... [8, 9]]))
True
"""
iterable = _generate_features(reader, paths, same_size)
data_dtype, shape = next(iterable)
if dtype is None:
dtype = data_dtype
if same_size:
# numpy black magic: https://stackoverflow.com/a/12473478/1286165
field_dtype = [("", (dtype, (numpy.prod(shape),)))]
total_size = len(paths)
all_features = numpy.fromiter(iterable, field_dtype, total_size)
else:
field_dtype = [("", (dtype, (numpy.prod(shape[1:]),)))]
all_features = numpy.fromiter(iterable, field_dtype)
# go from a field array to a normal array
all_features = all_features.view(dtype)
# the shape is assumed to be (n_samples, ...) it can be (5, 2) or (5, 3, 4).
shape = list(shape)
shape[0] = -1
return numpy.reshape(all_features, shape, order="C")
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
| 2.34375 | 2 |
server/pyser.py | SiggyTheViking/doors | 0 | 12767916 | #!/usr/bin/env python3
import serial
import datetime
import paho.mqtt.publish as pub
import redis
import psycopg2 as pg
ser = serial.Serial('/dev/ttyACM0', 9600)
doors = [
{'door':'front','open':None},
{'door':'french','open':None},
{'door':'kitchen','open':None},
{'door':'music','open':None},
{'door':'prayer','open':None}]
def set_door_state(bt,state):
changes = []
for i,d in enumerate(state):
thisDoor = bool(bt & (1 << i))
if (state[i]['open'] != thisDoor):
state[i]['open'] = thisDoor
changes.append(state[i])
return changes
r = redis.StrictRedis(host='mylocalipaddr', port=6379, db=0)
conn = pg.connect(host='mylocalipaddr',port=5433,dbname='flintstone',user='fred',password='<PASSWORD>')
cur = conn.cursor()
old = None
while 1:
raw = ser.readline()
this = int(raw.strip()[0])
if (this != old):
changes = set_door_state(this,doors)
print(datetime.datetime.now(), ' ', changes)
pub.single('doors',this)
old = this
for change in changes:
r.set('doors.'+change['door'],change['open'])
cur.execute("insert into discretehistory (point,eventtime,value) values(%s,%s,%s)",
('doors.'+change['door'],datetime.datetime.now(),change['open']))
conn.commit()
| 2.53125 | 3 |
iucn_sim/run_sim.py | tobiashofmann88/iucn_extinction_simulator | 11 | 12767917 | <filename>iucn_sim/run_sim.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Run future simulations based on IUCN data and status transition rates
Created on Wed Oct 30 20:59:28 2019
@author: <NAME> (<EMAIL>)
"""
import iucn_sim.iucn_sim as iucn_sim
def add_arguments(parser):
parser.add_argument(
'--input_data',
required=True,
help="Path to 'simulation_input_data.pkl' file created by transition_rates function."
)
parser.add_argument(
'--outdir',
required=True,
help="Provide path to outdir where results will be saved."
)
parser.add_argument(
'--n_years',
default=100,
help="How many years to simulate into the future."
)
parser.add_argument(
'--n_sim',
default=10000,
help="How many simulation replicates to run. At least 10,000 simulations are recommended for accurate rate estimation (default). If the number of simulation replicates exceeds the number of available transition rate estimates (produced by the 'transition_rates' function), these rates will be randomely resampled for the remaining simulations."
)
parser.add_argument(
'--status_change',
default=1,
help="Model IUCN status changes in future simulations. 0=off, 1=on (default=1)."
)
parser.add_argument(
'--conservation_increase_factor',
default=1,
help="The transition rates leading to improvements in IUCN conservation status are multiplied by this factor."
)
parser.add_argument(
'--threat_increase_factor',
default=1,
help="Opposite of conservation_increase_factor, multiplies the transition rates leading to worsening in IUCN conservation status."
)
parser.add_argument(
'--model_unknown_as_lc',
default=0,
help="Model new status for all DD and NE species as LC (best case scenario). 0=off, 1=on (default=0)."
)
parser.add_argument(
'--until_n_taxa_extinct',
default=0,
help="Setting this value will stop the simulations when n taxa have gone extinct. This can be used to simulate the expected time until n extinctions. The value of the --n_years flag in this case will be interpreted as the maximum possible time frame, so set it large enough to cover a realistic time-frame for these extinctions to occur. Set to 0 to disable this function (default=0)."
)
parser.add_argument(
'--extinction_rates',
default=1,
help="Estimation of extinction rates from simulation results: 0=off, 1=on (default=1)."
)
parser.add_argument(
'--n_gen',
default=100000,
help="Number of generations for MCMC for extinction rate estimation (default=100000)."
)
parser.add_argument(
'--burnin',
default=1000,
help="Burn-in for MCMC for extinction rate estimation (default=1000)."
)
parser.add_argument(
'--plot_diversity_trajectory',
default=1,
help="Plots the simulated diversity trajectory: 0=off, 1=on (default=1)."
)
parser.add_argument(
'--plot_status_trajectories',
default=1,
help="Plots the simulated IUCN status trajectory: 0=off, 1=on (default=0)."
)
parser.add_argument(
'--plot_histograms',
default=0,
help="Plots histograms of simulated extinction times for each species: 0=off, 1=on (default=0)."
)
parser.add_argument(
'--plot_posterior',
default=0,
help="Plots histograms of posterior rate estimates for each species: 0=off, 1=on (default=0)."
)
parser.add_argument(
'--plot_status_piechart',
default=1,
help="Plots pie charts of status distribution: 0=off, 1=on (default=1)."
)
parser.add_argument(
'--seed',
default=None,
help="Set starting seed for future simulations."
)
def main(args):
simulation_output = iucn_sim.run_sim(
input_data = args.input_data,
outdir = args.outdir,
n_years = args.n_years,
n_sim = args.n_sim,
status_change = args.status_change,
conservation_increase_factor = args.conservation_increase_factor,
threat_increase_factor = args.threat_increase_factor,
model_unknown_as_lc = args.model_unknown_as_lc,
until_n_taxa_extinct = args.until_n_taxa_extinct,
plot_diversity_trajectory = args.plot_diversity_trajectory,
plot_status_trajectories = args.plot_status_trajectories,
plot_histograms = args.plot_histograms,
plot_status_piechart = args.plot_status_piechart,
seed = args.seed,
load_from_file = True
)
if args.extinction_rates:
ext_rates = iucn_sim.estimate_extinction_rates(
simulation_output._extinction_times,
int(args.n_years),
args.outdir,
n_gen = args.n_gen,
burnin = args.burnin,
plot_posterior = args.plot_posterior,
seed = simulation_output._seed,
load_from_file=False # since in this case the input is parsed as an object
)
| 2.75 | 3 |
nuvolasdk/__main__.py | tiliado/nuvolasdk | 3 | 12767918 | import sys
import nuvolasdk
sys.exit(nuvolasdk.run(".", sys.argv))
| 1.125 | 1 |
venv/lib/python3.6/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py | usegalaxy-no/usegalaxy | 1 | 12767919 | # -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 3.0.0
# Copyright (C) 2018-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_network
from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
from ansible_collections.dellemc.openmanage.tests.unit.compat.mock import MagicMock, patch, Mock
from io import StringIO
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from pytest import importorskip
importorskip("omsdk.sdkfile")
importorskip("omsdk.sdkcreds")
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
class TestConfigNetwork(FakeAnsibleModule):
module = idrac_network
@pytest.fixture
def idrac_configure_network_mock(self):
omsdk_mock = MagicMock()
idrac_obj = MagicMock()
omsdk_mock.file_share_manager = idrac_obj
omsdk_mock.config_mgr = idrac_obj
type(idrac_obj).create_share_obj = Mock(return_value="networkstatus")
type(idrac_obj).set_liason_share = Mock(return_value="networkstatus")
return idrac_obj
@pytest.fixture
def idrac_file_manager_config_networking_mock(self, mocker):
try:
file_manager_obj = mocker.patch(
MODULE_PATH + 'idrac_network.file_share_manager')
except AttributeError:
file_manager_obj = MagicMock()
obj = MagicMock()
file_manager_obj.create_share_obj.return_value = obj
return file_manager_obj
@pytest.fixture
def idrac_connection_configure_network_mock(self, mocker, idrac_configure_network_mock):
idrac_conn_class_mock = mocker.patch(MODULE_PATH +
'idrac_network.iDRACConnection',
return_value=idrac_configure_network_mock)
idrac_conn_class_mock.return_value.__enter__.return_value = idrac_configure_network_mock
return idrac_configure_network_mock
def test_main_idrac_configure_network_success_case(self, idrac_connection_configure_network_mock, mocker,
idrac_default_args, idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename"})
message = {'changed': False, 'msg': {'Status': "Success", "message": "No changes found to commit!"}}
mocker.patch(MODULE_PATH +
'idrac_network.run_idrac_network_config', return_value=message)
result = self._run_module(idrac_default_args)
assert result == {'msg': 'Successfully configured the idrac network settings.',
'network_status': {
'changed': False,
'msg': {'Status': 'Success', 'message': 'No changes found to commit!'}},
'changed': False, 'failed': False}
def test_run_idrac_network_config_success_case01(self, idrac_connection_configure_network_mock, idrac_default_args,
idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "register_idrac_on_dns": "Enabled",
"dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns",
"setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled",
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
"enable_dhcp": "Enabled", "ip_address": "172.16.17.32", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
message = {"changes_applicable": True, "message": "changes are applicable"}
idrac_connection_configure_network_mock.config_mgr.is_change_applicable.return_value = message
f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module)
assert msg == {'changes_applicable': True, 'message': 'changes are applicable'}
def test_run_idrac_network_config_success_case02(self, idrac_connection_configure_network_mock, idrac_default_args,
idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "register_idrac_on_dns": "Enabled",
"dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns",
"setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled",
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
"enable_dhcp": "Enabled", "ip_address": "172.16.17.32", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True,
"Status": "Success"}
idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module)
assert msg == {'Status': 'Success',
'changed': True,
'changes_applicable': True,
'message': 'changes found to commit!'}
def test_run_idrac_network_config_success_case03(self, idrac_connection_configure_network_mock, idrac_default_args,
idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "register_idrac_on_dns": "Enabled",
"dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns",
"setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled",
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
"enable_dhcp": "Enabled", "ip_address": "172.16.17.32", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False,
"Status": "Success"}
idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module)
assert msg == {'Message': 'No changes found to commit!',
'Status': 'Success',
'changed': False,
'changes_applicable': False}
def test_run_idrac_network_config_success_case04(self, idrac_connection_configure_network_mock,
idrac_default_args, idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "register_idrac_on_dns": "Enabled",
"dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns",
"setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled",
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
"enable_dhcp": "Enabled", "ip_address": "172.16.17.32", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "Success"}
idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module)
assert msg == {'Message': 'No changes were applied',
'Status': 'Success',
'changed': False,
'changes_applicable': False}
def test_run_idrac_network_config_success_case05(self, idrac_connection_configure_network_mock, idrac_default_args,
idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "register_idrac_on_dns": None,
"dns_idrac_name": None, "auto_config": None, "static_dns": None,
"setup_idrac_nic_vlan": None, "vlan_id": None, "vlan_priority": None,
"enable_nic": None, "nic_selection": None,
"failover_network": None, "auto_detect": None, "auto_negotiation": None,
"network_speed": None, "duplex_mode": None, "nic_mtu": None,
"enable_dhcp": None, "ip_address": None, "enable_ipv4": None,
"dns_from_dhcp": None, "static_dns_1": None, "static_dns_2": None,
"static_gateway": None, "static_net_mask": None})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "Success"}
idrac_connection_configure_network_mock.config_mgr.configure_dns.return_value = message
idrac_connection_configure_network_mock.config_mgr.configure_nic_vlan.return_value = message
idrac_connection_configure_network_mock.config_mgr.configure_network_settings.return_value = message
idrac_connection_configure_network_mock.config_mgr.configure_ipv4.return_value = message
idrac_connection_configure_network_mock.config_mgr.configure_static_ipv4.return_value = message
idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module)
assert msg == {'Message': 'No changes were applied',
'Status': 'Success',
'changed': False,
'changes_applicable': False}
def test_run_idrac_network_config_failed_case01(self, idrac_connection_configure_network_mock, idrac_default_args,
idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "register_idrac_on_dns": "Enabled",
"dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns",
"setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled",
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
"enable_dhcp": "Enabled", "ip_address": "172.16.17.32", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}}
idrac_connection_configure_network_mock.file_share_manager.create_share_obj.return_value = "mnt/iso"
idrac_connection_configure_network_mock.config_mgr.set_liason_share.return_value = message
f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
result = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module)
assert result == idrac_connection_configure_network_mock.config_mgr.is_change_applicable()
def test_run_idrac_network_config_failed_case02(self, idrac_connection_configure_network_mock,
idrac_default_args, idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "register_idrac_on_dns": "Enabled",
"dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns",
"setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled",
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
"enable_dhcp": "Enabled", "ip_address": "172.16.17.32", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "failed"}
idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module)
assert msg == {'Message': 'No changes were applied',
'Status': 'failed',
'changed': False,
'changes_applicable': False}
def test_run_idrac_network_config_failed_case03(self, idrac_connection_configure_network_mock,
idrac_default_args, idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "register_idrac_on_dns": "Enabled",
"dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns",
"setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled",
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
"enable_dhcp": "Enabled", "ip_address": "172.16.17.32", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
message = {'Status': 'Failed', "Data": {'Message': "Failed to found changes"}}
idrac_connection_configure_network_mock.file_share_manager.create_share_obj.return_value = "mnt/iso"
idrac_connection_configure_network_mock.config_mgr.set_liason_share.return_value = message
f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module)
assert msg == idrac_connection_configure_network_mock.config_mgr.is_change_applicable()
@pytest.mark.parametrize("exc_type", [RuntimeError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError, HTTPError, URLError])
def test_main_idrac_configure_network_exception_handling_case(self, exc_type, mocker, idrac_default_args,
idrac_connection_configure_network_mock,
idrac_file_manager_config_networking_mock):
idrac_default_args.update({"share_name": "sharename"})
json_str = to_text(json.dumps({"data": "out"}))
if exc_type not in [HTTPError, SSLValidationError]:
mocker.patch(
MODULE_PATH + 'idrac_network.run_idrac_network_config',
side_effect=exc_type('test'))
else:
mocker.patch(
MODULE_PATH + 'idrac_network.run_idrac_network_config',
side_effect=exc_type('http://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
assert result['failed'] is True
else:
result = self._run_module(idrac_default_args)
assert 'msg' in result
| 1.617188 | 2 |
greenhouse/greenhouse/wsgi.py | wnzlff/greenhouse-web | 1 | 12767920 | <filename>greenhouse/greenhouse/wsgi.py<gh_stars>1-10
"""
WSGI config for greenhouse project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'greenhouse.settings')
sys.path.append('/home/pi/Repository/greenhouse-web-env/greenhouse-web/greenhouse')
application = get_wsgi_application()
| 1.867188 | 2 |
reputation/__init__.py | kabirkbr/reputation | 0 | 12767921 | <reponame>kabirkbr/reputation
name = "reputation"
from reputation.aigents_reputation_api import AigentsAPIReputationService
| 1.273438 | 1 |
src/third_party/wiredtiger/test/suite/test_bug018.py | benety/mongo | 0 | 12767922 | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from helper import copy_wiredtiger_home
from suite_subprocess import suite_subprocess
import os
import wiredtiger, wttest
# test_bug018.py
# JIRA WT-3590: if writing table data fails during close then tables
# that were updated within the same transaction could get out of sync with
# each other.
class test_bug018(wttest.WiredTigerTestCase, suite_subprocess):
'''Test closing/reopening/recovering tables when writes fail'''
conn_config = 'log=(enabled)'
basename = 'bug018.'
baseuri = 'file:' + basename
flist = []
uri1 = baseuri + '01.wt'
uri2 = baseuri + '02.wt'
def setUp(self):
# This test uses Linux-specific code so skip on any other system.
if os.name != 'posix' or os.uname()[0] != 'Linux':
self.skipTest('Linux-specific test skipped on ' + os.name)
super(test_bug018, self).setUp()
def close_files(self):
for f in self.flist:
f.close()
def open_files(self):
numfiles = 6
dir = self.conn.get_home()
for i in range(1, numfiles):
fname = dir + '/file.' + str(i)
self.flist.append(open(fname, 'w'))
def create_table(self, uri):
self.session.create(uri, 'key_format=S,value_format=S')
return self.session.open_cursor(uri)
def subprocess_bug018(self):
'''Test closing multiple tables'''
# The first thing we do is open several files. We will close them later. The reason is
# that sometimes, without that, this test would fail to report an error as expected. We
# hypothesize, but could not prove (nor reproduce under strace), that after closing the
# file descriptor that an internal thread would open a file, perhaps a pre-allocated log
# file, and then would open the file descriptor we just closed. So on close, instead of
# getting an error, we would actually write to the wrong file.
#
# So we'll open some files now, and then close them before closing the one of interest to
# the test so that any stray internal file opens will use the file descriptor of one of
# the earlier files we just closed.
self.open_files()
c1 = self.create_table(self.uri1)
c2 = self.create_table(self.uri2)
self.session.begin_transaction()
c1['key'] = 'value'
c2['key'] = 'value'
self.session.commit_transaction()
self.close_files()
# Simulate a write failure by closing the file descriptor for the second
# table out from underneath WiredTiger. We do this right before
# closing the connection so that the write error happens during close
# when writing out the final data. Allow table 1 to succeed and force
# an error writing out table 2.
#
# This is Linux-specific code to figure out the file descriptor.
for f in os.listdir('/proc/self/fd'):
try:
if os.readlink('/proc/self/fd/' + f).endswith(self.basename + '02.wt'):
os.close(int(f))
except OSError:
pass
# Expect an error and messages, so turn off stderr checking.
with self.expectedStderrPattern(''):
try:
self.close_conn()
except wiredtiger.WiredTigerError:
self.conn = None
def test_bug018(self):
'''Test closing multiple tables'''
self.close_conn()
subdir = 'SUBPROCESS'
[ignore_result, new_home_dir] = self.run_subprocess_function(subdir,
'test_bug018.test_bug018.subprocess_bug018')
# Make a backup for forensics in case something goes wrong.
backup_dir = 'BACKUP'
copy_wiredtiger_home(self, new_home_dir, backup_dir, True)
# After reopening and running recovery both tables should be in
# sync even though table 1 was successfully written and table 2
# had an error on close.
self.open_conn(new_home_dir)
results1 = list(self.session.open_cursor(self.uri1))
# It's possible the second table can't even be opened.
# That can happen only if the root page was not pushed out.
# We can't depend on the text of a particular error message to be
# emitted, so we'll just ignore the error.
self.captureerr.check(self) # check there is no error output so far
try:
results2 = list(self.session.open_cursor(self.uri2))
except:
# Make sure there's some error, but we don't care what.
self.captureerr.checkAdditionalPattern(self, '.')
results2 = []
self.assertEqual(results1, results2)
if __name__ == '__main__':
wttest.run()
| 1.375 | 1 |
BasicProgramming/Problem1/11-Patern-4.py | SyamsulAlterra/Alta | 0 | 12767923 | <filename>BasicProgramming/Problem1/11-Patern-4.py<gh_stars>0
num=5
for i in range(1,num+1):
toPrint=""
end=int(i*(i+1)/2)
a=list(j for j in range(end+1-i,end+1))
for x in a:
toPrint+=" "+str(x)
print(toPrint)
toPrint=""
#output
'''
1
2 3
4 5 6
7 8 9 10
11 12 13 14 15
''' | 3.5 | 4 |
broker/persistence/persistence_interface.py | javanlacerda/asperathos-manager | 7 | 12767924 | <filename>broker/persistence/persistence_interface.py
# Copyright (c) 2019 UFCG-LSD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
def required(fun):
return abc.abstractmethod(fun)
@six.add_metaclass(abc.ABCMeta)
class PersistenceInterface(object):
@required
def put(self, key, value):
pass
@required
def get(self, key):
pass
@required
def delete(self, key):
pass
@required
def delete_all(self):
pass
@required
def get_all(self):
pass
| 2.109375 | 2 |
tests/schema_mapping/structures/example6.py | loyada/typed-py | 14 | 12767925 | <gh_stars>10-100
from typedpy import Structure, mappers
class Example6(Structure):
a: int
ssss_ttt: str
_serialization_mapper = [{"a": "bb_cc"}, mappers.TO_CAMELCASE, {"ssssTtt": "x"}]
| 2.21875 | 2 |
tests/test_webhook_payloads.py | DowneyTung/saleor | 19 | 12767926 | <gh_stars>10-100
import json
import pytest
from saleor.order import OrderStatus
from saleor.webhook import WebhookEventType
from saleor.webhook.payloads import (
generate_customer_payload,
generate_order_payload,
generate_product_payload,
generate_sample_payload,
)
@pytest.mark.parametrize(
"event_name, order_status",
[
(WebhookEventType.ORDER_CREATED, OrderStatus.UNFULFILLED),
(WebhookEventType.ORDER_UPDATED, OrderStatus.CANCELED),
(WebhookEventType.ORDER_CANCELLED, OrderStatus.CANCELED),
(WebhookEventType.ORDER_FULFILLED, OrderStatus.UNFULFILLED),
(WebhookEventType.ORDER_FULLY_PAID, OrderStatus.UNFULFILLED),
],
)
def test_generate_sample_payload_order(
event_name, order_status, fulfilled_order, payment_txn_captured
):
fulfilled_order.status = order_status
fulfilled_order.save()
payload = generate_sample_payload(event_name)
assert payload == json.loads(generate_order_payload(fulfilled_order))
@pytest.mark.parametrize(
"event_name",
[
WebhookEventType.ORDER_CREATED,
WebhookEventType.ORDER_UPDATED,
WebhookEventType.ORDER_CANCELLED,
WebhookEventType.ORDER_FULFILLED,
WebhookEventType.ORDER_FULLY_PAID,
WebhookEventType.PRODUCT_CREATED,
WebhookEventType.CUSTOMER_CREATED,
"Non_existing_event",
None,
"",
],
)
def test_generate_sample_payload_empty_response_(event_name):
assert generate_sample_payload(event_name) is None
def test_generate_sample_customer_payload(customer_user):
payload = generate_sample_payload(WebhookEventType.CUSTOMER_CREATED)
assert payload == json.loads(generate_customer_payload(customer_user))
def test_generate_sample_product_payload(variant):
payload = generate_sample_payload(WebhookEventType.PRODUCT_CREATED)
assert payload == json.loads(generate_product_payload(variant.product))
| 2.109375 | 2 |
library/password_file.py | lukasic/ansible-role-ispconf | 0 | 12767927 | <gh_stars>0
from ansible.module_utils.basic import AnsibleModule
import os
import sys
import string
def generate_password(length):
chars = string.ascii_letters + string.digits
if sys.version_info.major == 3:
return "".join(chars[c % len(chars)] for c in os.urandom(length))
else:
return "".join(chars[ord(c) % len(chars)] for c in os.urandom(length))
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=True, type='path'),
state = dict(default='present', choices=['present', 'absent']),
length = dict(required=False, default=16, type="int"),
set_fact = dict(required=False, type='str', default="")
# mode: 600
# owner: root
)
)
changed = None
path = module.params['path']
length = module.params['length']
password = None
if module.params['state'] == 'present':
if os.path.isfile(path):
changed = False
with open(path, "rt") as f:
password = f.read().replace("\n", "")
else:
changed = True
password = generate_password(length)
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(path, "wt") as f:
f.write("%s\n" % password)
elif module.params['state'] == 'absent':
if os.path.isfile(path):
changed = True
os.unlink(path)
else:
changed = False
else:
raise module.fail_json(msg='parameter "state": unknown value')
if changed is None:
raise module.fail_json(msg='bug: no changed value was set')
facts = dict()
if module.params['set_fact']:
facts[module.params['set_fact']] = password
module.exit_json(
changed=changed,
ansible_facts=facts
)
if __name__ == '__main__':
main()
| 2.28125 | 2 |
Python/UploadHeroLoginDownload.py | nerzhul/MiscScripts | 1 | 12767928 | from requests import Request, Session
import bs4 as BeautifulSoup
import re
import sys
conn_url="http://uploadhero.com/lib/connexion.php"
dl_url="http://uploadhero.co/dl/filetodl"
conn_proxies = {
# "http": "http://proxy.example.net:3128",
}
conn_data={"pseudo_login": "mylogin","password_login": "<PASSWORD>"}
s = Session()
req = Request('POST', conn_url,
data=conn_data,
)
prepped = req.prepare()
resp = s.send(prepped,
proxies=conn_proxies,
)
soup = BeautifulSoup.BeautifulSoup(resp.text).find('div', attrs={'id': 'cookietransitload'})
if soup == None:
print "Unable to authenticate to UploadHero"
sys.exit(1)
cookiestring = soup.string
req = Request('GET', dl_url)
req.headers["Cookie"] = "uh=%s" % cookiestring
prepped = req.prepare()
resp = s.send(prepped,
proxies=conn_proxies,
)
soup = BeautifulSoup.BeautifulSoup(resp.text).find('div', attrs={'class':'conteneur_page'}).find('div', attrs={'class':'conteneur_page'})
if soup == None:
print "File unavailable"
sys.exit(2)
tags_a = soup.findAll('a')
dl_link = None
for tag_a in tags_a:
soup = BeautifulSoup.BeautifulSoup("%s" % tag_a)
if 'href' in soup.a.attrs and re.search('http:.+uploadhero', soup.a.attrs['href']):
dl_link = soup.a.attrs['href']
if dl_link != None:
filename = dl_link.split('/')[-1]
req = Request('GET', dl_link)
prepped = req.prepare()
r = s.send(prepped,
stream=True,
proxies=conn_proxies,
)
with open(filename, 'wb') as f:
for block in r.iter_content(1024):
if block:
f.write(block)
f.flush()
print "Download finished"
| 2.78125 | 3 |
python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/resources.py | hspak/dagster | 3 | 12767929 | <reponame>hspak/dagster<filename>python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/resources.py
import google.api_core.exceptions
import six
from google.cloud import bigquery
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery.retry import DEFAULT_RETRY
from dagster import check, resource
from .configs import bq_resource_config
from .types import BigQueryError, BigQueryLoadSource
class BigQueryClient(bigquery.Client):
def __init__(self, project=None):
check.opt_str_param(project, 'project')
super(BigQueryClient, self).__init__(project=project)
def create_dataset(self, dataset, exists_ok=False, retry=DEFAULT_RETRY):
try:
super(BigQueryClient, self).create_dataset(dataset, exists_ok, retry)
except google.api_core.exceptions.Conflict:
six.raise_from(
BigQueryError('Dataset "%s" already exists and exists_ok is false' % dataset), None
)
def delete_dataset(
self, dataset, delete_contents=False, retry=DEFAULT_RETRY, not_found_ok=False
):
try:
super(BigQueryClient, self).delete_dataset(
dataset, delete_contents=delete_contents, retry=retry, not_found_ok=not_found_ok
)
except google.api_core.exceptions.NotFound:
six.raise_from(
BigQueryError('Dataset "%s" does not exist and not_found_ok is false' % dataset),
None,
)
def load_table_from_dataframe(
self,
dataframe,
destination,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
parquet_compression="snappy",
):
try:
return super(BigQueryClient, self).load_table_from_dataframe(
dataframe,
destination,
num_retries,
job_id,
job_id_prefix,
location,
project,
job_config,
parquet_compression,
)
except ImportError as e:
six.raise_from(
BigQueryError(
'loading data to BigQuery from pandas DataFrames requires either '
'pyarrow or fastparquet to be installed. %s' % str(e)
),
None,
)
def load_table_from_filepath(self, file_path, destination, job_config):
with open(file_path, 'rb') as file_obj:
return super(BigQueryClient, self).load_table_from_file(
file_obj, destination, job_config=job_config
)
def load_table_from_source(self, source, load_input, destination, job_config):
# Load from DataFrame. See: https://bit.ly/2GDhVt1
if source == BigQueryLoadSource.DataFrame:
return self.load_table_from_dataframe(load_input, destination, job_config=job_config)
# Load from file. See: https://cloud.google.com/bigquery/docs/loading-data-local
elif source == BigQueryLoadSource.File:
return self.load_table_from_filepath(load_input, destination, job_config=job_config)
# Load from GCS. See: https://cloud.google.com/bigquery/docs/loading-data-cloud-storage
elif source == BigQueryLoadSource.GCS:
return self.load_table_from_uri(load_input, destination, job_config=job_config)
@resource(config=bq_resource_config(), description='Dagster resource for connecting to BigQuery')
def bigquery_resource(context):
return BigQueryClient(context.resource_config.get('project'))
| 2.0625 | 2 |
modules/todo.py | kijimanh/multibud | 1 | 12767930 | <reponame>kijimanh/multibud
import command
import module
class TodoModule(module.Module):
name = 'To-do'
def on_load(self):
if 'todo' not in self.bot.config:
self.bot.config['todo'] = {}
print('Initialized to-do list data in config')
@command.desc('Add an item to the todo list')
@command.alias('t')
def cmd_todo(self, msg, args):
if not args:
return '__Provide an item to add to the todo list.__'
if args.startswith('list ') or args == "list":
return self.bot.cmd_todolist(msg, args[5:])
if args.startswith('del '):
return self.bot.cmd_tododel(msg, args[4:])
item = args
l_name = 'main'
if l_name not in self.bot.config['todo']:
self.bot.config['todo'][l_name] = []
self.bot.config['todo'][l_name].append(item)
self.bot.save_config()
idx = len(self.bot.config['todo'][l_name])
return f'Added item `{item}` as entry {idx}.'
@command.desc('Show the todo list')
@command.alias('tl')
def cmd_todolist(self, msg, l_name):
if not l_name:
l_name = 'main'
if l_name not in self.bot.config['todo']:
return f'__List \'{l_name}\' doesn\'t exist.'
if not self.bot.config['todo'][l_name]:
return '__Todo list is empty.__'
out = 'Todo list:'
for idx, item in enumerate(self.bot.config['todo'][l_name]):
out += f'\n {idx + 1}. {item}'
return out
@command.desc('Delete an item from the todo list')
@command.alias('td', 'tdd', 'tld', 'tr', 'trm', 'dt', 'done')
def cmd_tododel(self, msg, idx_str):
if not idx_str:
return '__Provide the entry number or entry text to delete.__'
lst = self.bot.config['todo']['main']
try:
idx = int(idx_str)
except ValueError:
try:
idx = lst.index(idx_str) + 1
except ValueError:
return '__Invalid entry number or text to delete.__'
l = len(lst)
if idx > l:
return f'__Entry number out of range, there are {l} entries.__'
idx -= 1
item = lst[idx]
del lst[idx]
self.bot.save_config()
return f'Item #{idx + 1} `{item}` deleted.'
| 2.625 | 3 |
weatherAppGUI.py | prakharsaxena1/Shape_AI_Project | 0 | 12767931 | <gh_stars>0
#!/bin/python3
# SHAPE AI PROJECT - Weather App(GUI)
# IMPORTS
import tkinter as tk
from tkinter import ttk
from tkinter.constants import LEFT, RIGHT
from datetime import datetime
import requests
# -- Windows only configuration --
try:
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1)
except:
pass
# -- End Windows only configuration --
# MAIN
API_KEY = "<KEY>" # from a throwaway account (Obviously)
# Log last successful API request
def logFile(dataX):
with open("logPreviousGUI.txt", 'w') as f:
f.write(dataX)
# Get data from API
def getDatafromAPI(city):
r = requests.get(
f'https://api.openweathermap.org/data/2.5/weather?q={city}&appid={API_KEY}'
)
return r.json()
# Shows weather in
def showWeather():
city = str(cityVariable.get())
data = getDatafromAPI(city)
if data['cod'] != 200:
outLabel['text'] = data['message']
else:
cityTemp = ((data['main']['temp']) - 273.15)
weatherInfo = data['weather'][0]['description']
humidityInfo = data['main']['humidity']
windSpeed = data['wind']['speed']
degree_sign = u"\N{DEGREE SIGN}"
dataX = f'''\nWeather Stats for - {city.upper()} | {datetime.now().strftime("%d %b %Y | %I:%M:%S %p")}\n\t Current temperature is: {round(cityTemp,2)}{degree_sign}C\n\t Current weather desc : {weatherInfo}\n\t Current Humidity : {humidityInfo}%\n\t Current wind speed : {windSpeed}kmph\n'''
outLabel['text'] = dataX
logFile(dataX)
# Root window
root = tk.Tk()
root.geometry("580x420")
root.title("Weather App")
root.resizable(0, 0)
# Variables
cityVariable = tk.StringVar()
# Window title
titleLabel = ttk.Label(root, padding=5, text="Weather App")
titleLabel.config(font=("Comic Sans MS", 40))
titleLabel.grid(row=0, column=0, padx=(5), pady=(5), columnspan=4)
# City Label
cityNameLabel = ttk.Label(root, padding=5, text="City")
cityNameLabel.grid(row=1, column=0, padx=(5), pady=(5), ipadx=(5), ipady=(5), columnspan=2)
cityNameLabel.config(font=("Comic Sans MS", 16))
# Output Label
outLabel = ttk.Label(root, padding=5, text=" ")
outLabel.config(font=("Comic Sans MS", 15))
outLabel.grid(row=3, column=0, padx=(10), pady=(10), columnspan=4)
# City Entry
cityEntry = ttk.Entry(root, textvariable=cityVariable, width=50)
cityEntry.grid(row=1, column=2, padx=(10), pady=(10), ipadx=(10), ipady=(10), columnspan=2)
cityEntry.config(font=("Comic Sans MS", 16))
# Show weather button
showWeatherBtn = tk.Button(root, text="Report weather", command=showWeather)
showWeatherBtn.grid(row=2, column=0, padx=(10), pady=(10), ipadx=(10), ipady=(5), columnspan=4)
showWeatherBtn['font'] = ("Comic Sans MS", 15)
root.mainloop() | 3.03125 | 3 |
publication_runs/ecoli_models/ecolicore2comp_load_and_save_with_cobrapy_and_clean.py | klamt-lab/CommModelPy | 3 | 12767932 | <reponame>klamt-lab/CommModelPy
#!/usr/bin/env python3
#
# Copyright 2021 PSB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This scripts loads and cleans the EcoliCore2 SBML as given in its publication (Hädicke & Klamt, 2017) and saves it again with cobrapy.
This is done in order to gain a version which is not altered by cobrapy while loading it.
The "cleaning" contains steps such as setting 1000 flux bounds to inf.
References:
<NAME>., & <NAME>. (2017). EColiCore2: a reference network model of the central metabolism
of Escherichia coli and relationships to its genome-scale parent model.
Scientific reports, 7, 39647.
"""
import cobra
print("=>Loading and saving of EcoliCore2 while cleaning up some wrong reaction and metabolite ID parts")
print("Loading original EcoliCore2 SBML as given in its publication...")
model = cobra.io.read_sbml_model("publication_runs/ecoli_models/original_sbml_models/ecolicore2compressed.xml")
print("Set -1000/1000 bounds to -inf/inf...")
for reaction in model.reactions:
if reaction.upper_bound >= 1000:
reaction.upper_bound = float("inf")
if reaction.lower_bound <= -1000:
reaction.lower_bound = -float("inf")
print("Cleaning reaction ID parts...")
for reaction in model.reactions:
if "_DASH_" in reaction.id:
reaction.id = reaction.id.replace("_DASH_", "__")
if "_LPAREN_" in reaction.id:
reaction.id = reaction.id.replace("_LPAREN_", "_")
if "_RPAREN_" in reaction.id:
reaction.id = reaction.id.replace("_RPAREN_", "")
print("Delete boundary-condition-free exchange metabolites ending with _ex...")
boundary_free_metabolite_ids = [
x.id.replace("EX_", "") for x in model.reactions
if x.id.startswith("EX_") and x.id.endswith("_ex")
]
boundary_free_metabolites = []
for x in boundary_free_metabolite_ids:
try:
metabolite = model.metabolites.get_by_id(x)
boundary_free_metabolites.append(metabolite)
except KeyError:
continue
for metabolite in boundary_free_metabolites:
model.remove_metabolites([metabolite])
print("Rename wrong metabolite name parts...")
for metabolite in model.metabolites:
if "_DASH_" in metabolite.id:
metabolite.id = metabolite.id.replace("_DASH_", "__")
if "_LPAREN_" in metabolite.id:
metabolite.id = metabolite.id.replace("_LPAREN_", "_")
if "_RPAREN_" in metabolite.id:
metabolite.id = metabolite.id.replace("_RPAREN_", "")
print("Delete redundant glucose _ex metabolite...")
model.remove_metabolites([model.metabolites.get_by_id("glc__D_ex")])
print("Delete unused boundary-free metabolite exchange reactions...")
ex_reaction_ids = [x.id for x in model.reactions if x.id.startswith("EX_")]
for ex_reaction_id in ex_reaction_ids:
if model.reactions.get_by_id(ex_reaction_id).metabolites == {}:
model.remove_reactions([ex_reaction_id])
print("Add periplasmic metabolite for all metabolties in EX_ reactions ending with _c...")
ex_c_reaction_ids = [x.id for x in model.reactions if x.id.startswith("EX_") and x.id.endswith("_c")]
for ex_c_reaction_id in ex_c_reaction_ids:
reaction = model.reactions.get_by_id(ex_c_reaction_id)
metabolite = list(reaction.metabolites.keys())[0]
new_p_metabolite = cobra.Metabolite(id=metabolite.id.replace("_c", "_p"), compartment="p")
reaction.add_metabolites({
new_p_metabolite: 1
})
new_ex_reaction = cobra.Reaction(id="EX_"+new_p_metabolite.id,
lower_bound=reaction.lower_bound,
upper_bound=reaction.upper_bound)
new_ex_reaction.add_metabolites ({
new_p_metabolite: -1
})
model.add_reactions([new_ex_reaction])
reaction.id = "Transport_c_to_p_" + metabolite.id.replace("_c", "")
print("Get all reaction IDs ending with Ex and Up...")
ex_reaction_ids = [x.id for x in model.reactions if x.id.endswith("Ex")]
up_reaction_ids = [x.id for x in model.reactions if x.id.endswith("Up")]
all_exchange_ids = ex_reaction_ids + up_reaction_ids
class ExchangedMetabolite:
def __init__(self, metabolite_id, ex_reaction_id, up_reaction_id):
self.metabolite_id = metabolite_id
self.ex_reaction_id = ex_reaction_id
self.up_reaction_id = up_reaction_id
exchanged_metabolites = []
for exchange_id in all_exchange_ids:
ex_reaction_id = ""
up_reaction_id = ""
if exchange_id.endswith("Up"):
up_reaction_id = exchange_id
elif exchange_id.endswith("Ex"):
ex_reaction_id = exchange_id
else:
print(exchange_id)
print("Error 1!")
input()
reaction = model.reactions.get_by_id(exchange_id)
reaction_id_start = reaction.id[:2].lower()
exchanged_metabolite = None
for metabolite in reaction.metabolites:
if metabolite.id.startswith(reaction_id_start):
if exchanged_metabolite != None:
print("Error 1B!")
input()
exchanged_metabolite = metabolite
current_ids = [x.metabolite_id for x in exchanged_metabolites]
if exchanged_metabolite.id in current_ids:
element_index = current_ids.index(exchanged_metabolite.id)
if ex_reaction_id != "":
exchanged_metabolites[element_index].ex_reaction_id = ex_reaction_id
elif up_reaction_id != "":
exchanged_metabolites[element_index].up_reaction_id = up_reaction_id
else:
print("Error 2!")
input()
else:
if ex_reaction_id != "":
up_reaction_id = ""
elif up_reaction_id != "":
ex_reaction_id = ""
else:
print("Error 3!")
input()
exchanged_metabolites.append(ExchangedMetabolite(
metabolite_id=exchanged_metabolite.id,
ex_reaction_id=ex_reaction_id,
up_reaction_id=up_reaction_id
))
print("Create new EX_ metabolites with, if not given, new periplasmic intermediates...")
for exchanged_metabolite in exchanged_metabolites:
has_ex = exchanged_metabolite.ex_reaction_id != ""
has_up = exchanged_metabolite.up_reaction_id != ""
if exchanged_metabolite.metabolite_id.endswith("_p"):
if has_ex and has_up:
ex_reaction = model.reactions.get_by_id(exchanged_metabolite.ex_reaction_id)
up_reaction = model.reactions.get_by_id(exchanged_metabolite.up_reaction_id)
new_ex_reaction = cobra.Reaction(id="EX_"+exchanged_metabolite.metabolite_id,
lower_bound=-up_reaction.upper_bound,
upper_bound=ex_reaction.upper_bound)
new_ex_reaction.add_metabolites({
model.metabolites.get_by_id(exchanged_metabolite.metabolite_id): -1
})
model.add_reactions([new_ex_reaction])
model.remove_reactions([
ex_reaction,
up_reaction
])
elif has_ex:
ex_reaction = model.reactions.get_by_id(exchanged_metabolite.ex_reaction_id)
if len(list(ex_reaction.metabolites.keys())) > 1:
print("Error A1!")
input()
ex_reaction.id = "EX_" + exchanged_metabolite.metabolite_id
elif has_up:
up_reaction = model.reactions.get_by_id(exchanged_metabolite.up_reaction_id)
if len(list(up_reaction.metabolites.keys())) > 1:
print("Error A2!")
input()
up_reaction.id = "EX_" + exchanged_metabolite.metabolite_id
old_lower_bound = up_reaction.lower_bound
old_upper_bound = up_reaction.upper_bound
if exchanged_metabolite.metabolite_id.startswith("glc__"):
print("A")
up_reaction.lower_bound = -old_upper_bound
up_reaction.upper_bound = -old_lower_bound
up_reaction.add_metabolites({
model.metabolites.get_by_id(exchanged_metabolite.metabolite_id): -2
})
else:
print("Error Zeta!")
input()
elif exchanged_metabolite.metabolite_id.endswith("_c"):
new_p_metabolite_id = exchanged_metabolite.metabolite_id.replace("_c", "_p")
new_p_metabolite = cobra.Metabolite(id=new_p_metabolite_id, compartment="p")
model.add_metabolites(new_p_metabolite)
new_p_ex_reaction = cobra.Reaction(id="EX_" + new_p_metabolite_id)
new_p_ex_reaction.add_metabolites({
new_p_metabolite: -1
})
if has_ex and has_up:
ex_reaction = model.reactions.get_by_id(exchanged_metabolite.ex_reaction_id)
up_reaction = model.reactions.get_by_id(exchanged_metabolite.up_reaction_id)
new_p_ex_reaction.lower_bound = -up_reaction.upper_bound
new_p_ex_reaction.upper_bound = ex_reaction.upper_bound
ex_reaction.add_metabolites({
new_p_metabolite: 1
})
up_reaction.add_metabolites({
new_p_metabolite: -1
})
ex_reaction.id = "Transport_c_to_p_" + new_p_metabolite.id.replace("_p", "")
up_reaction.id = "Transport_p_to_c_" + new_p_metabolite.id.replace("_p", "")
elif has_ex:
ex_reaction = model.reactions.get_by_id(exchanged_metabolite.ex_reaction_id)
new_p_ex_reaction.lower_bound = 0
new_p_ex_reaction.upper_bound = ex_reaction.upper_bound
ex_reaction.add_metabolites({
new_p_metabolite: 1
})
ex_reaction.id = "Transport_c_to_p_" + new_p_metabolite.id.replace("_p", "")
elif has_up:
up_reaction = model.reactions.get_by_id(exchanged_metabolite.up_reaction_id)
new_p_ex_reaction.lower_bound = -up_reaction.upper_bound
up_reaction.add_metabolites({
new_p_metabolite: -1
})
up_reaction.id = "Transport_p_to_c_" + new_p_metabolite.id.replace("_p", "")
else:
print("Error Beta!")
input()
model.add_reactions([new_p_ex_reaction])
else:
print("Error Alpha!")
input()
print("Delete biomass metabolite and associated reaction...")
model.remove_metabolites([model.metabolites.get_by_id("Biomass")])
model.remove_reactions([model.reactions.get_by_id("EX_Biomass")])
print("Deactivate all C sources except of D-glucose...")
model.reactions.EX_succ_p.lower_bound = 0
model.reactions.EX_glyc_p.lower_bound = 0
model.reactions.EX_ac_p.lower_bound = 0
model.reactions.EX_glc__D_p.lower_bound = -10
print("Test cleaned model with FBA...")
with model:
print("Single model FBA solution:")
fba_solution = model.optimize()
print(model.summary())
for reaction in model.reactions:
if not reaction.id.startswith("EX_"):
continue
if fba_solution.fluxes[reaction.id] != 0:
print(f"{reaction.id}: {fba_solution.fluxes[reaction.id] }")
print("~~~")
print("Print exchange metabolites...")
in_string = ""
out_string = ""
for reaction in model.reactions:
if reaction.id.startswith("EX_"):
string_part = f'"{reaction.id.replace("EX_", "")}",\n'
if reaction.lower_bound < 0:
in_string += string_part
if reaction.upper_bound > 0:
out_string += string_part
print("In metabolites:")
print(in_string)
print("Out metabolites:")
print(out_string)
print("Saving SBML of cleaned EcoliCore2compressed model...")
cobra.io.write_sbml_model(model, "./publication_runs/ecoli_models/original_sbml_models_in_cleaned_form/ecc2comp_loaded_and_saved_by_cobrapy_cleaned.xml")
print("Done!")
print("")
for reaction in model.reactions:
if (reaction.lower_bound < 0) and (reaction.id.startswith("EX_")):
print(reaction.id)
| 2.28125 | 2 |
HackerRank/BasicDataTypes/ListComprehensions.py | OAPJ/Python | 0 | 12767933 | <gh_stars>0
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
ar = []
#p=0
for i in range(x+1):
for j in range(y+1):
for k in range(z+1):
if (i+j+k) != n:
ar.append([i,j,k])
print(ar)
"""for i in range ( x + 1 ):
for j in range( y + 1):
if i+j != n:
ar.append([])
ar[p] = [ i , j ]
p+=1
print ar """
| 3.1875 | 3 |
src/genie/libs/parser/nxos/tests/ShowSpanningTreeDetail/cli/equal/golden_output_1_expected.py | balmasea/genieparser | 204 | 12767934 | <filename>src/genie/libs/parser/nxos/tests/ShowSpanningTreeDetail/cli/equal/golden_output_1_expected.py
expected_output = {
'mstp': {
'mst_instances': {
0: {
'mst_id': 0,
'bridge_priority': 32768,
'bridge_sysid': 0,
'bridge_address': '00e3.04ff.ad03',
'topology_change_flag': False,
'topology_detected_flag': False,
'topology_changes': 0,
'time_since_topology_change': '142:22:13',
'times': {
'hold': 1,
'topology_change': 70,
'notification': 10,
'max_age': 40,
'hello': 10,
'forwarding_delay': 30,
},
'timers' : {
'hello': 0,
'topology_change': 0,
'notification': 0,
},
'root_of_the_spanning_tree': True,
'interfaces': {
'Port-channel30': {
'name': 'Port-channel30',
'bridge_assurance_inconsistent': True,
'vpc_peer_link_inconsistent': True,
'port_num': 4125,
'status': 'broken',
'cost': 500,
'port_priority': 128,
'port_identifier': '128.4125',
'designated_root_priority': 32768,
'designated_root_address': '0023.04ff.ad03',
'designated_bridge_priority': 61440,
'designated_bridge_address': '4055.39ff.fee7',
'designated_port_id': '128.4125',
'designated_path_cost': 0,
'timers': {
'message_age': 0,
'forward_delay': 0,
'hold': 0,
},
'port_type' : 'network',
'number_of_forward_transitions': 0,
'link_type': 'point-to-point',
'internal': True,
'peer_type': 'STP',
'pvst_simulation': True,
'counters': {
'bpdu_sent': 110,
'bpdu_received': 0
}
}
}
}
},
'hello_time': 10,
'max_age': 40,
'forwarding_delay': 30
}
}
| 1.578125 | 2 |
hyperglass/cache/base.py | blkmajik/hyperglass | 298 | 12767935 | <reponame>blkmajik/hyperglass<gh_stars>100-1000
"""Base Redis cache handler."""
# Standard Library
import re
import json
from typing import Any, Optional
# Third Party
from pydantic import SecretStr
class BaseCache:
"""Redis cache handler."""
def __init__(
self,
db: int,
host: str = "localhost",
port: int = 6379,
password: Optional[SecretStr] = None,
decode_responses: bool = True,
**kwargs: Any,
) -> None:
"""Initialize Redis connection."""
self.db: int = db
self.host: str = str(host)
self.port: int = port
self.password: Optional[SecretStr] = password
self.decode_responses: bool = decode_responses
self.redis_args: dict = kwargs
def __repr__(self) -> str:
"""Represent class state."""
return "HyperglassCache(db={}, host={}, port={}, password={})".format(
self.db, self.host, self.port, self.password
)
def parse_types(self, value: str) -> Any:
"""Parse a string to standard python types."""
def parse_string(str_value: str):
is_float = (re.compile(r"^(\d+\.\d+)$"), float)
is_int = (re.compile(r"^(\d+)$"), int)
is_bool = (re.compile(r"^(True|true|False|false)$"), bool)
is_none = (re.compile(r"^(None|none|null|nil|\(nil\))$"), lambda v: None)
is_jsonable = (re.compile(r"^[\{\[].*[\}\]]$"), json.loads)
for pattern, factory in (is_float, is_int, is_bool, is_none, is_jsonable):
if isinstance(str_value, str) and bool(re.match(pattern, str_value)):
str_value = factory(str_value)
break
return str_value
if isinstance(value, str):
value = parse_string(value)
elif isinstance(value, bytes):
value = parse_string(value.decode("utf-8"))
elif isinstance(value, list):
value = [parse_string(i) for i in value]
elif isinstance(value, tuple):
value = tuple(parse_string(i) for i in value)
elif isinstance(value, dict):
value = {k: self.parse_types(v) for k, v in value.items()}
return value
| 2.609375 | 3 |
__main__.py | DARKPOISON-yt/miyu-animals | 2 | 12767936 | <reponame>DARKPOISON-yt/miyu-animals
import animalapi as a
print(a.animal_data("dogs")) | 1.148438 | 1 |
saleor/account/migrations/0059_merge_20220221_1025.py | victor-abz/saleor | 1,392 | 12767937 | <filename>saleor/account/migrations/0059_merge_20220221_1025.py
# Generated by Django 3.2.12 on 2022-02-21 10:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("account", "0057_clear_user_addresses"),
("account", "0058_update_user_search_document"),
]
operations = []
| 1.171875 | 1 |
GUI.py | rodrickcalvin/Musical-note-identifier-Chromatic-tuner | 3 | 12767938 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/thanos/Qt/Guitar Tuna/guitarTunaDialog.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(422, 271)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
spacerItem = QtWidgets.QSpacerItem(20, 13, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.curentKeyLabel_3 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(48)
self.curentKeyLabel_3.setFont(font)
self.curentKeyLabel_3.setAlignment(QtCore.Qt.AlignCenter)
self.curentKeyLabel_3.setObjectName("curentKeyLabel_3")
self.horizontalLayout_15.addWidget(self.curentKeyLabel_3)
self.verticalLayout.addLayout(self.horizontalLayout_15)
self.horizontalLayout_16 = QtWidgets.QHBoxLayout()
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.frequencyLabel_3 = QtWidgets.QLabel(Dialog)
self.frequencyLabel_3.setAlignment(QtCore.Qt.AlignCenter)
self.frequencyLabel_3.setObjectName("frequencyLabel_3")
self.horizontalLayout_16.addWidget(self.frequencyLabel_3)
self.verticalLayout.addLayout(self.horizontalLayout_16)
self.horizontalLayout_17 = QtWidgets.QHBoxLayout()
self.horizontalLayout_17.setObjectName("horizontalLayout_17")
spacerItem1 = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_17.addItem(spacerItem1)
self.levelLeft_Progress_3 = QtWidgets.QProgressBar(Dialog)
self.levelLeft_Progress_3.setProperty("value", 0)
self.levelLeft_Progress_3.setTextVisible(False)
self.levelLeft_Progress_3.setInvertedAppearance(True)
self.levelLeft_Progress_3.setObjectName("levelLeft_Progress_3")
self.horizontalLayout_17.addWidget(self.levelLeft_Progress_3)
self.nstructionLabel_3 = QtWidgets.QLabel(Dialog)
self.nstructionLabel_3.setAlignment(QtCore.Qt.AlignCenter)
self.nstructionLabel_3.setObjectName("nstructionLabel_3")
self.horizontalLayout_17.addWidget(self.nstructionLabel_3)
self.levelRight_Progress_3 = QtWidgets.QProgressBar(Dialog)
self.levelRight_Progress_3.setProperty("value", 0)
self.levelRight_Progress_3.setTextVisible(False)
self.levelRight_Progress_3.setInvertedAppearance(False)
self.levelRight_Progress_3.setObjectName("levelRight_Progress_3")
self.horizontalLayout_17.addWidget(self.levelRight_Progress_3)
spacerItem2 = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_17.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout_17)
self.horizontalLayout_18 = QtWidgets.QHBoxLayout()
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
spacerItem3 = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_18.addItem(spacerItem3)
self.instrumentChooserComboxBox_3 = QtWidgets.QComboBox(Dialog)
self.instrumentChooserComboxBox_3.setObjectName("instrumentChooserComboxBox_3")
self.instrumentChooserComboxBox_3.addItem("")
self.instrumentChooserComboxBox_3.addItem("")
self.instrumentChooserComboxBox_3.addItem("")
self.instrumentChooserComboxBox_3.addItem("")
self.horizontalLayout_18.addWidget(self.instrumentChooserComboxBox_3)
spacerItem4 = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_18.addItem(spacerItem4)
self.verticalLayout.addLayout(self.horizontalLayout_18)
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
spacerItem5 = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_19.addItem(spacerItem5)
self.startBtn_3 = QtWidgets.QPushButton(Dialog)
self.startBtn_3.setObjectName("startBtn_3")
self.horizontalLayout_19.addWidget(self.startBtn_3)
spacerItem6 = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_19.addItem(spacerItem6)
self.verticalLayout.addLayout(self.horizontalLayout_19)
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setObjectName("horizontalLayout_20")
spacerItem7 = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_20.addItem(spacerItem7)
self.stopBtn_3 = QtWidgets.QPushButton(Dialog)
self.stopBtn_3.setObjectName("stopBtn_3")
self.horizontalLayout_20.addWidget(self.stopBtn_3)
self.exitBtn_3 = QtWidgets.QPushButton(Dialog)
self.exitBtn_3.setObjectName("exitBtn_3")
self.horizontalLayout_20.addWidget(self.exitBtn_3)
spacerItem8 = QtWidgets.QSpacerItem(30, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_20.addItem(spacerItem8)
self.verticalLayout.addLayout(self.horizontalLayout_20)
spacerItem9 = QtWidgets.QSpacerItem(20, 13, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem9)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Chromatic Tuner"))
self.curentKeyLabel_3.setText(_translate("Dialog", "__"))
self.frequencyLabel_3.setText(_translate("Dialog", "(0 KHz)"))
self.nstructionLabel_3.setText(_translate("Dialog", "|"))
self.instrumentChooserComboxBox_3.setItemText(0, _translate("Dialog", "Acoustic/Electric Guitar"))
self.instrumentChooserComboxBox_3.setItemText(1, _translate("Dialog", "Bass Guitar"))
self.instrumentChooserComboxBox_3.setItemText(2, _translate("Dialog", "Ukulele"))
self.instrumentChooserComboxBox_3.setItemText(3, _translate("Dialog", "Piano"))
self.startBtn_3.setText(_translate("Dialog", "Start Tuning"))
self.stopBtn_3.setText(_translate("Dialog", "Stop"))
self.exitBtn_3.setText(_translate("Dialog", "Exit"))
| 1.601563 | 2 |
evidently/model_monitoring/data_drift.py | alex-zenml/evidently | 1 | 12767939 | <filename>evidently/model_monitoring/data_drift.py
from evidently.analyzers.data_drift_analyzer import DataDriftAnalyzer
from evidently.model_monitoring.monitoring import ModelMonitor, ModelMonitoringMetric
class DataDriftMetrics:
p_value = ModelMonitoringMetric("data_drift:p_value", ["feature", "feature_type"])
dataset_drift = ModelMonitoringMetric("data_drift:dataset_drift")
share_drifted_features = ModelMonitoringMetric("data_drift:share_drifted_features")
n_drifted_features = ModelMonitoringMetric("data_drift:n_drifted_features")
class DataDriftMonitor(ModelMonitor):
def monitor_id(self) -> str:
return "data_drift"
def analyzers(self):
return [DataDriftAnalyzer]
def metrics(self, analyzer_results):
data_drift_results = analyzer_results[DataDriftAnalyzer]
features = data_drift_results['cat_feature_names'] + data_drift_results['num_feature_names']
yield DataDriftMetrics.share_drifted_features.create(data_drift_results['metrics']['share_drifted_features'])
yield DataDriftMetrics.n_drifted_features.create(data_drift_results['metrics']['n_drifted_features'])
yield DataDriftMetrics.dataset_drift.create(data_drift_results['metrics']['dataset_drift'])
for feature in features:
feature_metric = data_drift_results['metrics'][feature]
yield DataDriftMetrics.p_value.create(
feature_metric['p_value'],
dict(feature=feature, feature_type=feature_metric['feature_type']))
| 2.34375 | 2 |
disteval_visualization.py | shrestha-bikash/disteval-1 | 0 | 12767940 | <reponame>shrestha-bikash/disteval-1
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
from matplotlib.colors import LinearSegmentedColormap as lsCmap
# Don't currently have the ss generation code in so:
ss_dict_glbl = {}
# #######################################################################################
# #######################################################################################
# Complete Scatter Plot System by Jamie
## Numpy scheme helpers
def validate_dimensionality_and_return(ground_truth, prediction):
gt_dim = ground_truth.ndim
pd_dim = prediction.ndim
# Assert dimensionality
assert gt_dim == pd_dim, "ERROR: Ground truth and prediction dimensions do not match!"
assert gt_dim < 3, "ERROR: Ground truth dimension >= 3 !"
assert pd_dim < 3, "ERROR: Prediction dimension >= 3"
assert gt_dim > 0, "ERROR: Ground truth dimension < 1 !"
assert pd_dim > 0, "ERROR: Prediction dimension < 1 !"
return ground_truth.ndim
def validate_sequence_length_and_return(ground_truth, prediction):
assert ground_truth.shape == prediction.shape, "ERROR: Ground truth and prediction have different shapes!"
return ground_truth.shape[0]
def get_flattened(dmap):
if dmap.ndim == 1:
return dmap
elif dmap.ndim == 2:
return dmap[np.triu_indices_from(dmap, k=1)]
else:
assert False, "ERROR: the passes array has dimension not equal to 2 or 1!"
def pre_process_for_metrics(ground_truth, prediction):
_ = validate_sequence_length_and_return(ground_truth, prediction)
_ = validate_dimensionality_and_return(ground_truth, prediction)
gt_flat = get_flattened(ground_truth)
pd_flat = get_flattened(prediction)
return gt_flat, pd_flat
"""## Helpers"""
def get_separations(dmap):
t_indices = np.triu_indices_from(dmap, k=1)
separations = np.abs(t_indices[0] - t_indices[1])
return separations
# return a 1D boolean array indicating where the sequence separation in the
# upper triangle meets the threshold comparison
def get_sep_thresh_b_indices(dmap, thresh, comparator):
assert comparator in {'gt', 'lt', 'ge', 'le'}, "ERROR: Unknown comparator for thresholding!"
dmap_flat = get_flattened(dmap)
separations = get_separations(dmap)
if comparator == 'gt':
threshed = separations > thresh
elif comparator == 'lt':
threshed = separations < thresh
elif comparator == 'ge':
threshed = separations >= thresh
elif comparator == 'le':
threshed = separations <= thresh
return threshed
# return a 1D boolean array indicating where the distance in the
# upper triangle meets the threshold comparison
def get_dist_thresh_b_indices(dmap, thresh, comparator):
assert comparator in {'gt', 'lt', 'ge', 'le'}, "ERROR: Unknown comparator for thresholding!"
dmap_flat = get_flattened(dmap)
if comparator == 'gt':
threshed = dmap_flat > thresh
elif comparator == 'lt':
threshed = dmap_flat < thresh
elif comparator == 'ge':
threshed = dmap_flat >= thresh
elif comparator == 'le':
threshed = dmap_flat <= thresh
return threshed
"""## Function"""
# plot true distance (Y) against predicted distance (X) for locations where
def plot_distance_correlation_double_alpha(true_map, pred_map,
min_sep=12, max_dist=[8, 20],
squeeze=True, color_sep=False,
num_alpha_buckets=5, protein_name=None,
pearson_8=0, pearson_20=0):
s_thresh = min_sep
d_thresh = max_dist
# flattened
true_map_flat = get_flattened(true_map)
pred_map_flat = get_flattened(pred_map)
# remove nans
_nan_indices_true = np.isnan(true_map_flat)
_nan_indices_pred = np.isnan(pred_map_flat)
nan_indices = _nan_indices_true * _nan_indices_pred
true_map_flat = true_map_flat[~nan_indices]
pred_map_flat = pred_map_flat[~nan_indices]
# boolean indexers
b_indices_sep_gt_6 = get_sep_thresh_b_indices(true_map, s_thresh, 'gt')
# separation colormap (if applicable)
sep_cmap = lsCmap.from_list("",
[(0.00, "green"),
# (0.05, "green"),
(0.15, "xkcd:puke green"),
(0.5, "xkcd:dark red"),
(1.00, "xkcd:bright red")
]
)
# get separations and remove nan
separations = get_separations(true_map)
separations = separations[~nan_indices]
height = 8
width = 8
fig, ax = plt.subplots(1, 2, figsize=(width, height),
gridspec_kw={"width_ratios": d_thresh},
sharey=True)
# setup plot 1
# ##########################################################################
b_indices_true_dist_8 = get_dist_thresh_b_indices(true_map, d_thresh[0], 'lt')
# locations where the separation is > sep_thresh and the true distance < d_thresh
b_indices_for_plot_1 = b_indices_sep_gt_6 & b_indices_true_dist_8
# plot data
true_data_1 = true_map_flat[b_indices_for_plot_1]
pred_data_1 = pred_map_flat[b_indices_for_plot_1]
err_data_1 = np.abs(true_data_1 - pred_data_1)
# determine tick bounds
true_max_1 = np.ceil(true_data_1.max()).astype(int)
min_tick_1 = np.floor(min(true_data_1.min(), pred_data_1.min())).astype(int)
max_tick_1 = np.ceil(max(true_data_1.max(), pred_data_1.max())).astype(int)
# settings
x_ticks_1 = range(min_tick_1, d_thresh[0]+1)
# setup plot 2
# ###################################################################################
b_indices_true_dist_20 = get_dist_thresh_b_indices(true_map, d_thresh[1], 'lt')
# locations where the separation is > sep_thresh and the true distance < dist_thresh
b_indices_for_plot_2 = b_indices_sep_gt_6 & b_indices_true_dist_20
# plot data
true_data_2 = true_map_flat[b_indices_for_plot_2]
pred_data_2 = pred_map_flat[b_indices_for_plot_2]
err_data_2 = np.abs(true_data_2 - pred_data_2)
err_bounds = np.linspace(-1, np.nanmax(err_data_2), num_alpha_buckets)
# SETUP ALPHA
alpha_list = np.geomspace(0.15, 10, num_alpha_buckets)
# determine tick bounds
true_max_2 = np.ceil(true_data_2.max()).astype(int)
min_tick_2 = np.floor(min(true_data_2.min(), pred_data_2.min())).astype(int)
max_tick_2 = np.ceil(max(true_data_2.max(), pred_data_2.max())).astype(int)
# settings
# x_ticks_2 = range(min_tick_2, d_thresh[1]+1)
# PLOT FOR D < 8A
# ###################################################################################
if color_sep:
separations_1 = separations[b_indices_for_plot_1]
sc_1 = ax[0].scatter(true_data_1, pred_data_1,
alpha=0.4, c=err_data_1, cmap=sep_cmap, vmax=20)
else:
ax[0].scatter(true_data_1, pred_data_1, alpha=0.55)
ax[0].plot([min_tick_1, true_max_1+1], [min_tick_1, true_max_1+1], ls='--', c='k')
ax[0].set_xlabel("True distance", fontsize=14)
ax[0].set_ylabel("Predicted distance", fontsize=14)
# _ = ax[0].set_xticks(x_ticks_1)
# _ = ax[0].set_title(f"True < {d_thresh[0]} : r = {get_pearson(true_data_1, pred_data_1):.2f}",
# fontsize=16)
_ = ax[0].text(s=f"r = {pearson_8:.4f}",
fontsize=14, x=4, y=2)
#######################
# PLOT FOR D < 20A
# ###################################################################################
if color_sep:
separations_2 = separations[b_indices_for_plot_2]
for i in range(len(err_bounds) - 1):
_plot_data_indices = (err_data_2 > err_bounds[i]) & (err_data_2 <= err_bounds[i+1])
_true_plot_data = true_data_2[_plot_data_indices]
_pred_plot_data = pred_data_2[_plot_data_indices]
_err_plot_data = err_data_2[_plot_data_indices]
sc_2 = ax[1].scatter(_true_plot_data, _pred_plot_data,
alpha=alpha_list[i], c=_err_plot_data, cmap=sep_cmap, vmin=0, vmax=20
)
# sc_2 = ax[1].scatter(true_data_2, pred_data_2,
# alpha=0.4, c=err_data_2, cmap=sep_cmap, vmax=20)
else:
ax[1].scatter(true_data_2, pred_data_2, alpha=0.55)
ax[1].plot([min_tick_2, true_max_2+1], [min_tick_2, true_max_2+1], ls='--', c='k')
ax[1].set_xlabel("True distance", fontsize=14)
# _ = ax[1].set_xticks(x_ticks_2)
# _ = ax[1].set_title(f"True < {d_thresh[1]} : r = {get_pearson(true_data_2, pred_data_2):.2f}",
# fontsize=16)
_ = ax[1].text(s=f"r = {pearson_20:.4f}",
fontsize=14, x=14, y=2)
if color_sep:
# colorbar is same for both
cb = fig.colorbar(sc_2,
ticks=np.arange(2, 22, 2).astype(int),
label="Absolute Error (in Å)",
)
# ########## GLOBAL ########
# ############################################
# y ticks are shared between the two
y_ticks = range(1, 30)
_ = ax[0].set_yticks(y_ticks)
_ = ax[1].set_yticks(y_ticks)
title_str = "Residue separations ≥ 12"
if protein_name is not None:
title_str += f" - {protein_name}"
fig.suptitle(title_str,
y=1.015, fontsize=16)
fig.tight_layout()
return fig
# #######################################################################################
# #######################################################################################
# #### Complete heatmap system by Jamie
### Generic helpers
def clip_distance_map(in_map, lower_thresh, upper_thresh):
_map = in_map.copy()
_map[in_map > upper_thresh] = upper_thresh
_map[in_map < lower_thresh] = lower_thresh
return _map
# just check if the argument is valid
# put in tiny wrapper since it is used often
def check_upper_lower(_arg):
assert _arg in {"upper", "lower"}
# Determine if a map is binary, that is, the unique non-nan values are 0, 1
def is_binary(in_map):
unq = np.unique(in_map)
unq = unq[~np.isnan(unq)]
unq = set(unq)
return unq == {0, 1}
# Get a binary contact map from a generic map
# Accepts: R-valued distances, contact probabilities, and binary contact maps
def get_binary_contact(in_map, thresh=None):
map_bin = in_map.copy()
if np.nanmax(in_map) > 2:
if thresh is None:
_thr = 8
else:
_thr = Thresh
map_bin[in_map < _thr] = 1
map_bin[in_map >= _thr] = 0
else:
if not is_binary(map_bin):
if thresh is None:
_thr = 0.5
else:
_thr = Thresh
map_bin[in_map >= _thr] = 1
map_bin[in_map < _thr] = 0
return map_bin
# Return maps of confusion metrics in this order:
# true_positive_map, false_positive_map, true_negative_map, false_negative_map
def get_confusion_maps(true_map, pred_map):
true_binary = get_binary_contact(true_map)
pred_binary = get_binary_contact(pred_map)
tp_map = np.full_like(true_binary, np.nan)
fp_map = np.full_like(true_binary, np.nan)
tn_map = np.full_like(true_binary, np.nan)
fn_map = np.full_like(true_binary, np.nan)
tp_map[(pred_binary == 1) & (true_binary == 1)] = 1.
fp_map[(pred_binary == 1) & (true_binary == 0)] = 1.
tn_map[(pred_binary == 0) & (true_binary == 0)] = 1.
fn_map[(pred_binary == 0) & (true_binary == 1)] = 1
return tp_map, fp_map, tn_map, fn_map
def get_absolute_error_map(true_map, pred_map):
return np.abs(true_map - pred_map)
def get_log10_error_map(true_map, pred_map):
_err = get_absolute_error_map(true_map, pred_map)
_err = np.log10(_err + 1)
_err[_err < 0] = 0
return _err
# blank the triangle NOT in the used_triangle argument
def blank_unused_triangle(_map, used_triangle):
check_upper_lower(used_triangle)
indices = np.tril_indices_from(_map)
_out_map = _map.copy()
_out_map[indices] = np.nan
if used_triangle == "lower":
_out_map = _out_map.T
return _out_map
a = np.arange(5).astype(float)
b = np.arange(3, 8).astype(float)
a[2] = np.nan
a-b
"""### Build helpers"""
# build semantic color maps from list of form
# [
# (np.nan, mp_color, bool=label this Amstrong tick),
# (int: Amstrongs, mpl_color, bool=label this Amstrong tick),
# ...,
# (int: Amstrongs, mpl_color, bool=label this Amstrong tick),
# (np.inf: Amstrongs, mpl_color, bool=label this Amstrong tick)
# ]
# Note: np.nan and np.inf are special values for map minimum and maximum
def build_cmap_from_param_list(_cmap_param_list, _map):
norm_cols = lambda l, h, x: (x - l) / (h - l)
map_min = np.nanmin(_map)
map_max = np.nanmax(_map)
col_list = [(num, col) for num, col, _ in _cmap_param_list[1:-1] if col is not None]
col_list.insert(0, (map_min, _cmap_param_list[0][1]))
col_list.append((map_max, _cmap_param_list[-1][1]))
col_list = [(norm_cols(map_min, map_max, c), n) for (c, n) in col_list]
dist_cmap = lsCmap.from_list("", col_list)
return dist_cmap
# Wrappper to render a map in a single triangle of an axis
def render_map(_map, _cmap, _axis, _vmin, _vmax, upper_or_lower):
check_upper_lower(upper_or_lower)
_cm = matplotlib.cm.get_cmap(_cmap)
if _vmin is None:
_min = np.nanmin(_map)
else:
_min = _vmin
if _vmax is None:
_max = np.nanmax(_map)
else:
_max = _vmax
map_to_render = blank_unused_triangle(_map, used_triangle=upper_or_lower)
_ims = _axis.imshow(map_to_render, _cm, vmin=_min, vmax=_max,
aspect='auto')#, interpolation="antialiased")
return _ims
# call as one of final steps
# must construct ticks on the map axis first
def render_orthogonal_markers(_L, _axis, stride, upper_or_lower):
check_upper_lower(upper_or_lower)
# place at every other tick
if upper_or_lower == "upper":
_ticks = _axis.get_xticks()
else:
_ticks = _axis.get_yticks()
_orth_ticks = list(range(0, _L, stride))
_orth_ticks.pop(0)
# line style
_line_params = {
"ls": '-',
"color": "blue",
"lw": 2,
"alpha": 0.3
}
# render
for t in _orth_ticks:
if upper_or_lower == "upper":
_axis.axhline(t, t/_L, 1, **_line_params)
_axis.axvline(t, (1-t/_L), 1, **_line_params)
else:
_axis.axhline(t, 0, t/_L, **_line_params)
_axis.axvline(t, 0, (1-t/_L), **_line_params)
# diagonal lines at 6, 12, 24
def render_diagonal_markers(_L, _axis, upper_or_lower):
check_upper_lower(upper_or_lower)
# line style
_line_params = {
"ls": '--',
"lw": 2,
"color": "blue",
"alpha": 0.3
}
# render
for t in [6, 12, 24]:
if upper_or_lower == "upper":
_x_bounds = [t, _L-1]
_y_bounds = [1, _L - t]
else:
_x_bounds = [1, _L - t]
_y_bounds = [t, _L-1]
_axis.plot(_x_bounds, _y_bounds, **_line_params)
def build_right_color_bar(_up_ims, _axis, _ticks, _tick_lbls):
_vert_cbar = plt.colorbar(_up_ims, cax=_axis,
ticks=_ticks)
_vert_cbar.ax.set_yticklabels(_tick_lbls)
return _vert_cbar
def build_bottom_color_bar(_dn_ims, _axis, _ticks, _tick_lbls):
_hori_cbar = plt.colorbar(_dn_ims, cax=_axis,
ticks=_ticks, orientation="horizontal")
_hori_cbar.ax.set_xticklabels(_tick_lbls)
_hori_cbar.ax.invert_xaxis()
return _hori_cbar
def build_sequence_color_bar(_L, _axis, _cmap, _width, _ticks, _prot_name):
_cm = matplotlib.cm.get_cmap(_cmap)
_seq_arr = np.arange(0, _L)
_seq_arr = np.broadcast_to(_seq_arr, (_width, _L))
_seq_cbar = _axis.imshow(_seq_arr, _cm, aspect="auto")
_seq_title = "Color for residue indices"
if _prot_name is not None:
_seq_title += f" of {_prot_name}"
_axis.set_title(_seq_title, fontsize=14)
_axis.set_xticks(_ticks)
_axis.tick_params(axis='y', which='both', left=False, labelleft=False)
_axis.tick_params(axis='x', which='both', bottom=True, labelbottom=False)
return _seq_cbar
# put the sequence index color map on the main diagonal.
# without this main diagonal is black. may cover first couple neighbor diagonals
def render_main_diagonal(_L, _axis, _ss_dict, _cmap):
_diag_map = matplotlib.cm.get_cmap(_cmap)
for i in range(_L-1):
_color = _diag_map(float(i) / _L)
if i != 0:
if i+1 in _ss_dict and _ss_dict[i+1] == 'H': _color = 'red'
elif i+1 in _ss_dict and _ss_dict[i+1] == 'E': _color = 'green'
elif i+1 in _ss_dict and _ss_dict[i+1] == 'C': _color = "#eeeeee"
_axis.plot([i, i+1], [i, i+1],
# linewidth = max([-0.025 * _L + 8.5, 8]),
linewidth=8,
alpha = 1, color=_color
)
# various ticks and labels for various purposes
def get_strided_ticks(_L, _stride):
_ticks = list(range(0, _L + 1, _stride))
_tick_lbls = [str(t) for t in _ticks]
return _ticks, _tick_lbls
def get_probability_ticks():
_ticks = [0.0, 0.25, 0.5, 0.75, 1.0]
_tick_lbls = [f"{t:.2f}" for t in _ticks]
return _ticks, _tick_lbls
def get_absolute_error_ticks():
_ticks = [0, 5, 10, 15, 20]
_tick_lbls = [str(t) for t in _ticks]
return _ticks, _tick_lbls
def get_log10_error_ticks():
_ticks = np.arange(0, 1.25, 0.1)
_tick_lbls = [f"{t:.2f}" for t in _ticks]
return _ticks, _tick_lbls
def get_standard_distance_ticks(_map):
_ticks = [np.nanmin(_map).min(), 6, 8, 10, 12, 14, 16, 18, 20]
_tick_labels = [f"{_ticks[0]:.2f}", "6", "8", '', "12", '', "16", '', "20+"]
return _ticks, _tick_labels
# select the ticks
def find_ticks(kind=None, _map=None):
if kind is None:
return None
elif kind in {"distance", "true_distance", "pred_distance"}:
return get_standard_distance_ticks(_map)
elif kind in {"probability", "pred_contact"}:
return get_probability_ticks()
elif kind == "absolute_error":
return get_absolute_error_ticks()
elif kind == "log10_error":
return get_log10_error_ticks()
else:
return None
# select the label for x/y of main heatmap axis
def find_map_label(kind=None, sub_kind=None):
if kind == None:
return None
elif kind == "distance":
return "Distance (in Å)"
elif kind == "true_distance":
return "True Distance (in Å)"
elif kind == "pred_distance":
return "Predicted Distance (in Å)"
elif kind == "true_contact":
return "True Contacts"
elif kind in {"probability", "pred_contact"}:
return "Predicted Contact Probabilities"
elif kind == "absolute_error":
return "Absolute Error (in Å)"
elif kind == "log10_error":
return r"log$_{10}($Absolute Error$)$"
elif kind == "confusion":
return "Confusion Map\n(green = TP, red = FP, blue = FN"
else:
return None
# construct ticks and labels from a semantic color list as above
def get_cbar_ticks_and_labels(_cmap_param_list, _map, capped=True):
map_min = np.nanmin(_map)
map_max = np.nanmax(_map)
# get ticks
_ticks = [n for n, _, _ in _cmap_param_list]
_ticks[0] = map_min
_ticks[-1] = map_max
_ticks = [map_min, 6, 8, 10, 12, 14, 16, 18, map_max]
# get labels
_tick_lbls = [str(n) if b else '' for n, _, b in _cmap_param_list]
_tick_lbls[0] = f"{map_min:.3f}"
max_lbl = str(int(map_max))
if capped:
max_lbl += '+'
_tick_lbls[-1] = max_lbl
return _ticks, _tick_lbls
# put ticks and labels on the heatmap, color main diagonal black
def decorate_heatmap(_L, _axis, _ticks, _tick_lbls,
_right_label, _bottom_label,
use_upper=True, use_lower=True):
_axis.set_xticks(_ticks)
_axis.set_yticks(_ticks)
_identity = np.eye(_L)
_identity[_identity == 0] = np.nan
_axis.imshow(_identity, "binary_r", aspect="auto")
_axis.tick_params(which='both',
left=use_lower, labelleft=use_lower,
right=False, labelright=False,
top=use_upper, labeltop=use_upper,
bottom=False, labelbottom=False
)
if _right_label is not None:
_axis.yaxis.set_label_position("right")
_axis.set_ylabel(_right_label, labelpad=16)
if _bottom_label is not None:
_axis.set_xlabel(_bottom_label, labelpad=16)
if not use_upper:
_axis.spines["top"].set_visible(False)
_axis.spines["right"].set_visible(False)
if not use_lower:
_axis.spines["bottom"].set_visible(False)
_axis.spines["left"].set_visible(False)
"""### Inner Heatmap Build Function"""
def build_full_heatmap(upper_map=None, lower_map=None,
upper_type=None, lower_type=None,
protein_name=None,
seq_cmap=None,
lower_cmap=None, upper_cmap=None,
u_vmin=None, u_vmax=None,
l_vmin=None, l_vmax=None,
use_right_bar=False,
use_bottom_bar=False,
use_main_diagonal=False,
ss_dict=None
):
# derive sequence length
if upper_map is not None:
seq_len = len(upper_map)
if lower_map is not None:
seq_len = len(lower_map)
# Get upper map color map
_uvmin = u_vmin
_uvmax = u_vmax
if upper_cmap is not None:
# case for the semantic coloring list
if isinstance(upper_cmap, list):
u_cm = build_cmap_from_param_list(upper_cmap, upper_map)
_uvmin = None
_uvmax = None
else:
u_cm = matplotlib.cm.get_cmap(upper_cmap)
_uvmin = u_vmin
_uvmax = u_vmax
upper_label = find_map_label(upper_type)
# Get lower map color map
_lvmin = l_vmin
_lvmax = l_vmax
if lower_cmap is not None:
# case for the semantic coloring list
if isinstance(lower_cmap, list):
l_cm = build_cmap_from_param_list(lower_cmap, lower_map)
_lvmin = None
_lvmax = None
else:
l_cm = matplotlib.cm.get_cmap(lower_cmap)
lower_label = find_map_label(lower_type)
# ############################################################################
# Build Figure and Gridspec
hm_fig = plt.figure(figsize=(8, 8), constrained_layout=True)
_wr = None
_hr = [2, 96]
_nbars = 0
_ncols = 1
_nrows = 2
if use_right_bar:
_wr = [96, 4]
_ncols += 1
if use_bottom_bar:
_hr += [4]
_nrows += 1
hm_gs = hm_fig.add_gridspec(_nrows, _ncols,
width_ratios=_wr,
height_ratios=_hr,
wspace=0, hspace=0
)
# Place sequence and heatmap subplots
sbar_ax = hm_fig.add_subplot(hm_gs[0, 0])
cmap_ax = hm_fig.add_subplot(hm_gs[1, 0], sharex=sbar_ax)
# Calculate tick strides from length
if seq_len < 100:
tick_stride = 10
elif seq_len <= 200:
tick_stride = 20
elif seq_len <= 400:
tick_stride = 40
elif seq_len <= 1000:
tick_stride = 100
elif seq_len <= 2000:
tick_stride = 200
elif seq_len <= 5000:
tick_stride = 500
else:
tick_stride = 1000
# Get ticks for sequence / heatmap
idx_ticks, idx_tick_lbls = get_strided_ticks(seq_len, tick_stride)
# Decorate heatmap - ticks, labels
if upper_type is not None:
_use_upp = True
else:
_use_upp = False
if lower_type is not None:
_use_low = True
else:
_use_low = False
r_lbl = find_map_label(upper_type)
b_lbl = find_map_label(lower_type)
decorate_heatmap(seq_len, cmap_ax, idx_ticks, idx_tick_lbls,
_right_label=r_lbl, _bottom_label=b_lbl,
use_upper=_use_upp, use_lower=_use_low
)
# Place additional components
build_sequence_color_bar(seq_len, sbar_ax, seq_cmap, 2, idx_ticks, protein_name)
if upper_type is not None:
if upper_type == "absolute_error":
_up_map = get_absolute_error_map(upper_map, lower_map)
elif upper_type == "log10_error":
_up_map = get_log10_error_map(upper_map, lower_map)
elif upper_type == "confusion":
_up_map = upper_map
_tp, _fp, _, _fn = get_confusion_maps(lower_map, upper_map)
_ = render_map(_tp, "summer_r", cmap_ax, 0, 1, "upper")
_ = render_map(_fp, "autumn_r", cmap_ax, 0, 1, "upper")
_ = render_map(_fn, "cool_r", cmap_ax, 0, 1, "upper")
else:
_up_map = upper_map
if upper_type != "confusion":
upper_ims = render_map(_up_map, u_cm, cmap_ax, _uvmin, _uvmax, "upper")
# Markers
render_orthogonal_markers(seq_len, cmap_ax, tick_stride*2, "upper")
render_diagonal_markers(seq_len, cmap_ax, "upper")
else:
lower_ims = None
# Lower heatmap
if lower_type is not None:
if lower_type == "absolute_error":
_low_map = get_absolute_error_map(upper_map, lower_map)
elif lower_type == "log10_error":
_low_map = get_log10_error_map(upper_map, lower_map)
elif lower_type == "confusion":
_low_map = lower_map
_tp, _fp, _, _fn = get_confusion_maps(lower_map, upper_map)
_ = render_map(_tp, "summer_r", cmap_ax, 0, 1, "lower")
_ = render_map(_fp, "autumn_r", cmap_ax, 0, 1, "lower")
_ = render_map(_fn, "cool_r", cmap_ax, 0, 1, "lower")
else:
_low_map = lower_map
if lower_type != "confusion":
lower_ims = render_map(_low_map, l_cm, cmap_ax, _lvmin, _lvmax, "lower")
# Markers
render_orthogonal_markers(seq_len, cmap_ax, tick_stride*2, "lower")
render_diagonal_markers(seq_len, cmap_ax, "lower")
else:
lower_ims = None
# Handle rightside bar
if use_right_bar:
rbar_ax = hm_fig.add_subplot(hm_gs[1, 1])
right_ticks, right_tick_lbls = find_ticks(upper_type, upper_map)
_ = build_right_color_bar(upper_ims, rbar_ax,
right_ticks, right_tick_lbls
)
# Handle bottom bar
if use_bottom_bar:
bbar_ax = hm_fig.add_subplot(hm_gs[2, 0])
bottom_ticks, bottom_tick_lbls = find_ticks(lower_type, lower_map)
_ = build_bottom_color_bar(lower_ims, bbar_ax,
bottom_ticks, bottom_tick_lbls
)
if use_main_diagonal:
render_main_diagonal(seq_len, cmap_ax, ss_dict, seq_cmap)
return hm_fig
"""## Build parameter packs and find plot
* use this to determine your parameters from two simple arguments
* then unpack the parameter dictionary in the build heatmap function
* the protein_name is optional, if set to None it won't be printed
"""
# PARAMETER PACKS
def get_parameter_pack(plot_type, color_scheme='standard', protein_name=None):
if plot_type is None:
return None
true_dmap_cols = [
(np.nan, 'xkcd:light khaki', True),
(6, 'xkcd:light khaki', True),
(8, 'xkcd:dusty orange', True),
(10, 'xkcd:easter purple', False),
(12, "xkcd:robin's egg", True),
(14, 'xkcd:yellowish green', False),
(16, None, True),
(18, None, False),
(np.inf, 'xkcd:cool grey', True)
]
pred_dmap_cols = [
(np.nan, 'xkcd:light khaki', True),
(6, 'xkcd:light khaki', True),
(8, 'xkcd:faded orange', True),
(10, 'xkcd:pale violet', False),
(12, 'xkcd:pale aqua', True),
(14, 'xkcd:pale olive green', False),
(16, None, True),
(18, None, False),
(np.inf, 'xkcd:cool grey', True)
]
# Three color schemes
if color_scheme == "standard":
true_d_cm = "YlGn_r"
pred_d_cm = "OrRd_r"
prob_c_cm = "Reds"
log_er_cm = "gray_r"
abs_er_cm = "Reds"
seq_id_cm = "jet"
elif color_scheme == "semantic":
true_d_cm = true_dmap_cols
pred_d_cm = pred_dmap_cols
prob_c_cm = "Reds"
log_er_cm = "gray_r"
abs_er_cm = "Reds"
seq_id_cm = "jet"
elif color_scheme == "uniform":
true_d_cm = "plasma_r"
pred_d_cm = "viridis_r"
prob_c_cm = "cividis_r"
log_er_cm = "cividis_r"
abs_er_cm = "inferno_r"
seq_id_cm = "jet"
# Distance vs. Distance
_pp_dist_vs_dist = dict(
upper_type="pred_distance", lower_type="true_distance",
seq_cmap=seq_id_cm,
lower_cmap=true_d_cm, upper_cmap=pred_d_cm,
u_vmin=None, u_vmax=20,
l_vmin=None, l_vmax=20,
use_right_bar=True,
use_bottom_bar=True,
use_main_diagonal=True,
protein_name=protein_name
)
# Contact probabilities vs. true distance
_pp_con_vs_prob = dict(
upper_type="pred_contact", lower_type="true_distance",
seq_cmap=seq_id_cm,
lower_cmap=true_d_cm, upper_cmap=prob_c_cm,
u_vmin=0.0, u_vmax=1.0,
l_vmin=None, l_vmax=20,
use_right_bar=True,
use_bottom_bar=True,
use_main_diagonal=True,
protein_name=protein_name
)
# Only true distance
_pp_true_dist = dict(
upper_type="true_distance", lower_type=None,
seq_cmap=seq_id_cm,
lower_cmap=None, upper_cmap=true_d_cm,
u_vmin=None, u_vmax=20,
l_vmin=None, l_vmax=None,
use_right_bar=True,
use_bottom_bar=False,
use_main_diagonal=True,
protein_name=protein_name
)
# Only predicted distance
_pp_pred_dist = dict(
upper_type="pred_distance", lower_type=None,
seq_cmap=seq_id_cm,
lower_cmap=None, upper_cmap=pred_d_cm,
u_vmin=None, u_vmax=20,
l_vmin=None, l_vmax=None,
use_right_bar=True,
use_bottom_bar=False,
use_main_diagonal=True,
protein_name=protein_name
)
# Only contact probabilities
_pp_pred_prob = dict(
upper_type="probability", lower_type=None,
seq_cmap=seq_id_cm,
lower_cmap=None, upper_cmap=prob_c_cm,
u_vmin=0, u_vmax=1,
l_vmin=None, l_vmax=None,
use_right_bar=True,
use_bottom_bar=False,
use_main_diagonal=True,
protein_name=protein_name
)
# Log error vs. confusion
_pp_err_vs_conf = dict(
upper_type="log10_error", lower_type="confusion",
seq_cmap=seq_id_cm,
lower_cmap=None, upper_cmap=log_er_cm,
u_vmin=0, u_vmax=1.25,
l_vmin=0, l_vmax=1,
use_right_bar=True,
use_bottom_bar=False,
use_main_diagonal=True,
protein_name=protein_name
)
# Log error vs. absolute error
_pp_log_vs_abs = dict(
upper_type="log10_error", lower_type="absolute_error",
seq_cmap=seq_id_cm,
lower_cmap=abs_er_cm, upper_cmap=log_er_cm,
u_vmin=0, u_vmax=1.25,
l_vmin=0, l_vmax=None,
use_right_bar=True,
use_bottom_bar=True,
use_main_diagonal=True,
protein_name=protein_name
)
# Log error vs. true distance
_pp_err_vs_dist = dict(
upper_type="log10_error", lower_type="distance",
seq_cmap=seq_id_cm,
lower_cmap=true_d_cm, upper_cmap=log_er_cm,
u_vmin=0, u_vmax=1.25,
l_vmin=None, l_vmax=20,
use_right_bar=True,
use_bottom_bar=True,
use_main_diagonal=True,
protein_name=protein_name
)
# Only Log error
_pp_log_error = dict(
upper_type="log10_error", lower_type=None,
seq_cmap=seq_id_cm,
lower_cmap=None, upper_cmap=log_er_cm,
u_vmin=0, u_vmax=1.25,
l_vmin=None, l_vmax=None,
use_right_bar=True,
use_bottom_bar=False,
use_main_diagonal=True,
protein_name=protein_name
)
# Only Confusion
_pp_confusion = dict(
upper_type="confusion", lower_type=None,
seq_cmap=seq_id_cm,
lower_cmap=None, upper_cmap=None,
u_vmin=0, u_vmax=1,
l_vmin=None, l_vmax=None,
use_right_bar=False,
use_bottom_bar=False,
use_main_diagonal=True,
protein_name=protein_name
)
if plot_type == "distance_vs_distance":
pp_pack = _pp_dist_vs_dist
elif plot_type in {"distance_vs_probability", "distance_vs_contact"}:
pp_pack = _pp_con_vs_prob
elif plot_type == "only_pred_distance":
pp_pack = _pp_pred_dist
elif plot_type == "only_true_distance":
pp_pack = _pp_true_dist
elif plot_type == "only_probability":
pp_pack = _pp_pred_prob
elif plot_type == "log_error_vs_confusion":
pp_pack = _pp_err_vs_conf
elif plot_type == "log_error_vs_abs_error":
pp_pack = _pp_log_vs_abs
elif plot_type == "only_log_error":
pp_pack = _pp_log_error
elif plot_type == "only_confusion":
pp_pack = _pp_confusion
elif plot_type == "log_error_vs_true_distance":
pp_pack = _pp_err_vs_dist
return pp_pack
# #######################################################################################
# #######################################################################################
# ########### CHORD DIAGRAMS
def dmap2chordimage(ND = None, chord_file = None, ss={}, cmap_for_chord='jet'):
chord_fig, chord_ax = plt.subplots(1, 1, figsize=(16, 16))
cb_map = np.copy(ND)
L = len(ND)
size_of_each_protein_in_chord_map = 360 / L
rnum2angle = {}
rnum2xy = {}
rnum2indexxy = {}
num_2_index_xy = {}
for i in range(L + 1):
a1 = round((i-1) * 2 * math.pi / L, 5)
a2 = round(i * 2 * math.pi / L, 5)
x = round(0.50 + 0.495 * math.cos((a1+a2)/2), 5)
y = round(0.50 + 0.50 * math.sin((a1+a2)/2), 5)
rnum2angle[str(i) + "a1"] = str(a1)
rnum2angle[str(i) + "a2"] = str(a2)
rnum2xy[str(i) + "x"] = x
rnum2xy[str(i) + "y"] = y
lx = round(0.47 + 0.52 *math.cos((a1+a2)/2) + 0.04 * math.cos(i * 2 * math.pi / L), 5)
ly = round(0.49 + 0.52 * math.sin((a1+a2)/2) + 0.02 * math.sin(i * 2 * math.pi / L), 5)
rnum2indexxy[str(i) + "x"] = lx
rnum2indexxy[str(i) + "y"] = ly
num_2_index_xy[i-1] = {"x": lx, "y": ly}
cb_map[np.isnan(cb_map)] = 1000.0
cb_map[cb_map < 4.0] = 4.0
cb_map = 4.0 / cb_map
## ######### ADDED BY JAMIE
cmap = matplotlib.cm.get_cmap(cmap_for_chord)
## ######### DONE WITH ADDITION BY JAMIE
pair_n_intensity = {}
for i in range(L):
for j in range(i, L):
# Only show the strong connections
if cb_map[i, j] < 0.1: continue
# Skip local connections
if abs(i - j ) < 6: continue
pair_n_intensity[str(i+1) + ' ' + str(j+1)] = cb_map[i, j] * cb_map[i, j]
tokeep = {}
keepcount = 0
for pair, intensity in reversed(sorted(pair_n_intensity.items(), key = lambda x: x[1])):
tokeep[pair + ' ' + str(intensity)] = int(pair.split()[0])
keepcount += 1
if keepcount > 5 * L: break
for pair_n_intensity, ii in sorted(tokeep.items(), key = lambda x: x[1]):
i, j, intensity = pair_n_intensity.split()
intensity = float(intensity)
chord_ax.plot([rnum2xy[str(i) + "x"], rnum2xy[str(j) + "x"]], [rnum2xy[str(i) + "y"], rnum2xy[str(j) + "y"]],
linewidth = 6 * intensity, color= cmap(float(i) / L), alpha = intensity, zorder = -1)
for i in range(L):
mycolor = cmap(float(i) / L)
if i == 0: mycolor = cmap(float(i) / L)
e1 = matplotlib.patches.Arc(xy=(.5, .5), width=1, height=1, linewidth=12, angle=i * size_of_each_protein_in_chord_map,
theta2=size_of_each_protein_in_chord_map, color=mycolor)
chord_ax.add_patch(e1)
step = int(L/20)
if i % step != 0: continue
if i > L - step/2: continue
chord_ax.text(num_2_index_xy[i]["x"], num_2_index_xy[i]["y"], i+1, fontsize=32, color= cmap(float(i) / L))
for i in range(L):
mycolor = cmap(float(i) / L)
if i+1 in ss and ss[i+1] == 'H': mycolor = 'red'
if i+1 in ss and ss[i+1] == 'E': mycolor = 'green'
if i+1 in ss and ss[i+1] == 'C': mycolor = "#eeeeee"
if i == 0: mycolor = cmap(float(i) / L)
e1 = matplotlib.patches.Arc(xy=(.5, .5), width=1, height=1, linewidth=7, angle=i * size_of_each_protein_in_chord_map,
theta2=size_of_each_protein_in_chord_map, color=mycolor)
chord_ax.add_patch(e1)
step = int(L/20)
if i % step != 0:
continue
if i > L - step/2:
continue
chord_ax.text(num_2_index_xy[i]["x"], num_2_index_xy[i]["y"], i+1, fontsize=32, color= cmap(float(i) / L))
chord_ax.axis('off')
return chord_fig
# #######################################################################################
# #######################################################################################
# Create the visualizations
def do_visualization(script_args, native_basename, basename, ND, D, NC, C, pearson_8=0, pearson_20=0):
# Cases: Have ND & D:
# heatmap: distance_vs_distance
# errormap: log_error_vs_true_distance
# Have ND & C:
# heatmap: distance_vs_probability
# errormap: only_confusion
# Have only D (or ND):
# heatmap: only_pred_distance (or only_true_distance)
# errormap: None
# Have C:
# heatmap: only_pred_prob
# errormap: None
# Have only NC: *not possible / supported*
# native_basename : filename for native map
# basename : filename for prediction
# ss : filename for secondary structure
print("\n\n")
if ND is not None:
ND_clipped = clip_distance_map(ND, 3.5, 20)
else:
ND_clipped = None
if D is not None:
D_clipped = clip_distance_map(D, 3.5, 20)
else:
D_clipped = None
# Decode colorscheme argument
if script_args.color_scheme == 1: _col_scheme = "standard"
elif script_args.color_scheme == 2: _col_scheme = "semantic"
elif script_args.color_scheme == 3: _col_scheme = "uniform"
# Heatmap parameter packs
hm_pack = None
er_pack = None
# Filenames
hm_file_name = None
er_file_name = None
if ND_clipped is not None:
if D_clipped is not None:
hm_low_map = ND_clipped
hm_upp_map = D_clipped
hm_pack = get_parameter_pack(plot_type="distance_vs_distance", color_scheme=_col_scheme)
er_pack = get_parameter_pack(plot_type="log_error_vs_abs_error", color_scheme=_col_scheme)
hm_file_name = native_basename + '.vs.' + basename + '.heatmap.' + script_args.vis_filetype
er_file_name = native_basename + '.vs.' + basename + '.errormap.' + script_args.vis_filetype
print("Visualizing true distances vs. predicted distances, saving to:", hm_file_name)
print("Visualizing errors as log(absolute_error) vs. absolute error, saving to:", er_file_name)
elif C is not None:
hm_low_map = ND_clipped
hm_upp_map = C
hm_pack = get_parameter_pack(plot_type="distance_vs_probability", color_scheme=_col_scheme)
er_pack = get_parameter_pack(plot_type="only_confusion", color_scheme=_col_scheme)
hm_file_name = native_basename + '.vs.' + basename + '.heatmap.' + script_args.vis_filetype
er_file_name = native_basename + '.vs.' + basename + '.errormap.' + script_args.vis_filetype
print("Visualizing true distances vs. predicted contacts, saving to:", hm_file_name)
print("Visualizing errors as confusion map, saving to:", er_file_name)
else:
hm_upp_map = ND_clipped
hm_low_map = None
hm_pack = get_parameter_pack(plot_type="only_true_distance", color_scheme=_col_scheme)
hm_file_name = native_basename + '.vs.' + basename + '.heatmap.' + script_args.vis_filetype
print("Visualizing true distances, saving to:", hm_file_name)
elif D_clipped is not None:
hm_upp_map = D_clipped
hm_low_map = None
hm_pack = get_parameter_pack(plot_type="only_pred_distance", color_scheme=_col_scheme)
hm_file_name = native_basename + '.vs.' + basename + '.heatmap.' + script_args.vis_filetype
print("Visualizing predicted distances, saving to:", hm_file_name)
elif C is not None:
hm_upp_map = C
hm_low_map = None
hm_pack = get_parameter_pack(plot_type="only_pred_prob", color_scheme=_col_scheme)
hm_file_name = native_basename + '.vs.' + basename + '.heatmap.' + script_args.vis_filetype
print("Visualizing predicted contacts, saving to:", hm_file_name)
else:
print("Nothing to visualize.")
# Make heatmap plots
if hm_pack is not None:
hm_fig = build_full_heatmap(upper_map=hm_upp_map, lower_map = hm_low_map, ss_dict=ss_dict_glbl, **hm_pack)
hm_fig.savefig(hm_file_name, bbox_inches='tight', format=script_args.vis_filetype)
if er_pack is not None:
er_fig = build_full_heatmap(upper_map=hm_upp_map, lower_map = hm_low_map, ss_dict=ss_dict_glbl, **er_pack)
er_fig.savefig(er_file_name, bbox_inches='tight', format=script_args.vis_filetype)
# Make chord diagrams
c_seq_cmap = hm_pack["seq_cmap"]
if ND is not None:
native_chord_fig_name = native_basename + '.chord_diagram.' + script_args.vis_filetype
native_chord_fig = dmap2chordimage(ND=ND, ss=ss_dict_glbl, cmap_for_chord=c_seq_cmap)
native_chord_fig.savefig(native_chord_fig_name, bbox_inches='tight', format=script_args.vis_filetype)
print("Saving chord diagram: predicted, to:", native_chord_fig_name)
if D is not None:
pred_chord_fig_name = basename + '.chord_diagram.' + script_args.vis_filetype
pred_chord_fig = dmap2chordimage(ND=D, ss=ss_dict_glbl, cmap_for_chord=c_seq_cmap)
pred_chord_fig.savefig(pred_chord_fig_name, bbox_inches='tight', format=script_args.vis_filetype)
print("Saving chord diagram: predicted, to:", pred_chord_fig_name)
# Make scatter plots
if (ND is not None) and (D is not None):
scatter_fig_name = native_basename + '.vs.' + basename + '.scatterplot.' + script_args.vis_filetype
scatter_fig = plot_distance_correlation_double_alpha(ND, D,
min_sep=12, max_dist=[8, 20],
squeeze=True, color_sep=True,
num_alpha_buckets=5, protein_name=None,
pearson_8=pearson_8, pearson_20=pearson_20)
scatter_fig.savefig(scatter_fig_name, bbox_inches='tight', format=script_args.vis_filetype)
print("Saving scatterplot to:", scatter_fig_name) | 2.375 | 2 |
btools/__init__.py | belzecue/building_tool | 0 | 12767941 | import bpy
from .road import register_road, unregister_road
from .building import register_building, unregister_building
bl_info = {
"name": "Building Tools",
"author": "<NAME> (ranjian0), <NAME> (luckykadam), Marcus (MCrafterzz)",
"version": (1, 0, 6),
"blender": (2, 80, 0),
"location": "View3D > Toolshelf > Building Tools",
"description": "Building Creation Tools",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Mesh",
}
class BTOOLS_PT_road_tools(bpy.types.Panel):
bl_label = "Road Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Building Tools"
def draw(self, context):
layout = self.layout
# Draw Operators
# ``````````````
col = layout.column(align=True)
col.operator("btools.add_road")
col.operator("btools.finalize_road")
col = layout.column(align=True)
col.operator("btools.add_array")
col.operator("btools.finalize_array")
class BTOOLS_PT_building_tools(bpy.types.Panel):
bl_label = "Building Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Building Tools"
def draw(self, context):
layout = self.layout
# Draw Operators
# ``````````````
col = layout.column(align=True)
col.operator("btools.add_floorplan")
row = col.row(align=True)
row.operator("btools.add_floors")
row.operator("btools.add_roof")
col = layout.column(align=True)
col.operator("btools.add_balcony")
col.operator("btools.add_stairs")
col = layout.column(align=True)
row = col.row(align=True)
row.operator("btools.add_window")
row.operator("btools.add_door")
col.operator("btools.add_multigroup")
col.operator("btools.add_fill")
col = layout.column(align=True)
col.operator("btools.add_custom")
col.prop(context.scene, "btools_custom_object", text="")
class BTOOLS_PT_material_tools(bpy.types.Panel):
bl_label = "Material Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Building Tools"
bl_options = {"DEFAULT_CLOSED"}
@classmethod
def poll(cls, context):
obj = context.object
return obj and obj.type == "MESH"
def draw(self, context):
layout = self.layout
ob = context.object
facemap = ob.face_maps.active
rows = 2
if facemap:
rows = 4
if not len(ob.face_maps):
return
layout.label(text="Face Maps")
row = layout.row()
args = ob, "face_maps", ob.face_maps, "active_index"
row.template_list("BTOOLS_UL_fmaps", "", *args, rows=rows)
col = row.column(align=True)
col.operator("object.face_map_add", icon="ADD", text="")
col.operator("object.face_map_remove", icon="REMOVE", text="")
col.separator()
col.operator("btools.face_map_clear", icon="TRASH", text="")
if ob.face_maps and (ob.mode == "EDIT" and ob.type == "MESH"):
row = layout.row()
sub = row.row(align=True)
sub.operator("object.face_map_assign", text="Assign")
sub.operator("object.face_map_remove_from", text="Remove")
sub = row.row(align=True)
sub.operator("object.face_map_select", text="Select")
sub.operator("object.face_map_deselect", text="Deselect")
if ob.face_maps:
face_map_index = ob.face_maps.active_index
face_map_material = ob.facemap_materials[face_map_index]
layout.label(text="UV Mapping")
col = layout.column()
row = col.row(align=True)
row.alignment = "LEFT"
row.prop(face_map_material, "auto_map", text="Auto")
row.prop(face_map_material, "uv_mapping_method", text="")
layout.label(text="Material")
layout.operator("btools.create_facemap_material")
layout.template_ID_preview(face_map_material, "material", hide_buttons=True)
classes = (BTOOLS_PT_road_tools, BTOOLS_PT_building_tools, BTOOLS_PT_material_tools)
def register():
register_road()
register_building()
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
unregister_road()
unregister_building()
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
import os
os.system("clear")
# -- custom unregister for script watcher
for tp in dir(bpy.types):
if "BTOOLS_" in tp:
bpy.utils.unregister_class(getattr(bpy.types, tp))
register()
| 2.203125 | 2 |
Hangman.py | AlejandroPenaloza/Hangman | 0 | 12767942 | #THIS IS HANGMAN
print('"Hangman"\nA game where you will try to guess which the hidden word is!')
print('\n')
word = input('Input the word to guess:\n')
while True:
if word.isalpha():
break
else:
word = input('Wrong input, type a valid word:\n')
number_of_letters = len(word)
word_listed_letters = list(word)
print('\n'*15 + 'This space is given for hiding the word\n')
print('_ '*(number_of_letters - 1) + '_')
letters = [letter for letter in bytearray(range(97, 123)).decode("utf-8")]
letters_left = letters
#Function to check if the letter to use is valid
def isletter():
letter = input('')
while True:
if letter in letters_left:
break
else:
letter = input('Wrong input, type a valid letter:\n')
return letter
#Function to display the hangman according to the chances remaining
def hangman_display(c):
if c == 1:
print('-'*10)
elif c == 2:
print('|\n'*10)
hangman_display(1)
elif c == 3:
print('_'*6)
hangman_display(2)
elif c == 4:
print('_'*6)
print('| |\n'*3 + '|\n'*7)
elif c == 5:
print('_'*6)
print('| |\n'*3 + '| O\n' + '|\n'*6)
elif c == 6:
print('_' * 6)
print('| |\n' * 3 + '| O\n' + '| |' + '|\n' * 5)
elif c == 7:
print('_' * 6)
print('| |\n' * 3 + '| O\n' + '| |\n' + '| /|\n' + '|\n' * 5)
elif c == 8:
print('_' * 6)
print('| |\n' * 3 + '| O\n' + '| |\n' + '| /|\\\n' + '|\n' * 5)
elif c == 9:
print('_' * 6)
print('| |\n' * 3 + '| O\n' + '| |\n' + '| /|\\\n' + '/\n' + '|\n' * 4)
elif c == 10:
print('_' * 6)
print('| |\n' * 3 + '| O\n' + '| |\n' + '| /|\\\n' + '/\\\n' + '|\n' * 4)
'''count = 0
if try in word:
ind = [i for i in range(number_of_letters) if word[i] == try]
else:
letters_left.remove(try)
count += 1
hangman_display(count)
'''
while True:
#To check if the input is a valid letter
while True:
letter = input('\nInput letter to guess:\n')
if letter in letters_left:
break
else:
letter = input('Wrong input, type a valid letter:\n')
if letter in word:
position = [p for p in range(number_of_letters) if word[p] == letter]
for times in range(len(position)):
blanks = position[0]
print('_ ' * blanks + word[position[0]] + ' ', end='')
if len(position) > 1:
blanks = position[1] - position[0] - 1
position = position[1:]
| 4.34375 | 4 |
examples/workflows/plot_simulate_evoked.py | mkhalil8/hnn-core | 0 | 12767943 | <gh_stars>0
"""
============================================
01. Simulate Event Related Potentials (ERPs)
============================================
This example demonstrates how to simulate a threshold level tactile
evoked response, as detailed in the `HNN GUI ERP tutorial
<https://jonescompneurolab.github.io/hnn-tutorials/erp/erp>`_,
using HNN-core. We recommend you first review the GUI tutorial.
The workflow below recreates an example of the threshold level tactile
evoked response, as observed in Jones et al. J. Neuroscience 2007 [1]_
(e.g. Figure 7 in the GUI tutorial), albeit without a direct comparison
to the recorded data.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# sphinx_gallery_thumbnail_number = 3
import os.path as op
import tempfile
import matplotlib.pyplot as plt
###############################################################################
# Let us import hnn_core
import hnn_core
from hnn_core import simulate_dipole, jones_2009_model
from hnn_core.viz import plot_dipole
###############################################################################
# Let us first create our default network and visualize the cells
# inside it.
net = jones_2009_model()
net.plot_cells()
net.cell_types['L5_pyramidal'].plot_morphology()
###############################################################################
# The network of cells is now defined, to which we add external drives as
# required. Weights are prescribed separately for AMPA and NMDA receptors
# (receptors that are not used can be omitted or set to zero). The possible
# drive types include the following (click on the links for documentation):
#
# - :meth:`hnn_core.Network.add_evoked_drive`
# - :meth:`hnn_core.Network.add_poisson_drive`
# - :meth:`hnn_core.Network.add_bursty_drive`
###############################################################################
# First, we add a distal evoked drive
weights_ampa_d1 = {'L2_basket': 0.006562, 'L2_pyramidal': .000007,
'L5_pyramidal': 0.142300}
weights_nmda_d1 = {'L2_basket': 0.019482, 'L2_pyramidal': 0.004317,
'L5_pyramidal': 0.080074}
synaptic_delays_d1 = {'L2_basket': 0.1, 'L2_pyramidal': 0.1,
'L5_pyramidal': 0.1}
net.add_evoked_drive(
'evdist1', mu=63.53, sigma=3.85, numspikes=1, weights_ampa=weights_ampa_d1,
weights_nmda=weights_nmda_d1, location='distal',
synaptic_delays=synaptic_delays_d1, event_seed=4)
###############################################################################
# Then, we add two proximal drives
weights_ampa_p1 = {'L2_basket': 0.08831, 'L2_pyramidal': 0.01525,
'L5_basket': 0.19934, 'L5_pyramidal': 0.00865}
synaptic_delays_prox = {'L2_basket': 0.1, 'L2_pyramidal': 0.1,
'L5_basket': 1., 'L5_pyramidal': 1.}
# all NMDA weights are zero; pass None explicitly
net.add_evoked_drive(
'evprox1', mu=26.61, sigma=2.47, numspikes=1, weights_ampa=weights_ampa_p1,
weights_nmda=None, location='proximal',
synaptic_delays=synaptic_delays_prox, event_seed=4)
# Second proximal evoked drive. NB: only AMPA weights differ from first
weights_ampa_p2 = {'L2_basket': 0.000003, 'L2_pyramidal': 1.438840,
'L5_basket': 0.008958, 'L5_pyramidal': 0.684013}
# all NMDA weights are zero; omit weights_nmda (defaults to None)
net.add_evoked_drive(
'evprox2', mu=137.12, sigma=8.33, numspikes=1,
weights_ampa=weights_ampa_p2, location='proximal',
synaptic_delays=synaptic_delays_prox, event_seed=4)
###############################################################################
# Now let's simulate the dipole, running 2 trials with the
# :class:`~hnn_core.parallel_backends.Joblib` backend.
# To run them in parallel we could set ``n_jobs`` to equal the number of
# trials. The ``Joblib`` backend allows running the simulations in parallel
# across trials.
from hnn_core import JoblibBackend
with JoblibBackend(n_jobs=2):
dpls = simulate_dipole(net, tstop=170., n_trials=2)
###############################################################################
# Rather than reading smoothing and scaling parameters from file, we recommend
# explicit use of the :meth:`~hnn_core.dipole.Dipole.smooth` and
# :meth:`~hnn_core.dipole.Dipole.scale` methods instead. Note that both methods
# operate in-place, i.e., the objects are modified.
window_len, scaling_factor = 30, 3000
for dpl in dpls:
dpl.smooth(window_len).scale(scaling_factor)
###############################################################################
# Plot the amplitudes of the simulated aggregate dipole moments over time
import matplotlib.pyplot as plt
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(6, 6),
constrained_layout=True)
plot_dipole(dpls, ax=axes[0], layer='agg', show=False)
net.cell_response.plot_spikes_hist(ax=axes[1],
spike_types=['evprox', 'evdist'])
###############################################################################
# Now, let us try to make the exogenous driving inputs to the cells
# synchronous and see what happens. This is achieved by setting
# ``n_drive_cells=1`` and ``cell_specific=False`` when adding each drive.
net_sync = jones_2009_model()
n_drive_cells=1
cell_specific=False
net_sync.add_evoked_drive(
'evdist1', mu=63.53, sigma=3.85, numspikes=1, weights_ampa=weights_ampa_d1,
weights_nmda=weights_nmda_d1, location='distal', n_drive_cells=n_drive_cells,
cell_specific=cell_specific, synaptic_delays=synaptic_delays_d1, event_seed=4)
net_sync.add_evoked_drive(
'evprox1', mu=26.61, sigma=2.47, numspikes=1, weights_ampa=weights_ampa_p1,
weights_nmda=None, location='proximal', n_drive_cells=n_drive_cells,
cell_specific=cell_specific, synaptic_delays=synaptic_delays_prox, event_seed=4)
net_sync.add_evoked_drive(
'evprox2', mu=137.12, sigma=8.33, numspikes=1,
weights_ampa=weights_ampa_p2, location='proximal', n_drive_cells=n_drive_cells,
cell_specific=cell_specific, synaptic_delays=synaptic_delays_prox, event_seed=4)
###############################################################################
# You may interrogate current values defining the spike event time dynamics by
print(net_sync.external_drives['evdist1']['dynamics'])
###############################################################################
# Finally, let's simulate this network. Rather than modifying the dipole
# object, this time we make a copy of it before smoothing and scaling.
dpls_sync = simulate_dipole(net_sync, tstop=170., n_trials=1)
trial_idx = 0
dpls_sync[trial_idx].copy().smooth(window_len).scale(scaling_factor).plot()
net_sync.cell_response.plot_spikes_hist()
###############################################################################
# References
# ----------
# .. [1] Jones, <NAME>., et al. "Neural correlates of tactile detection:
# a combined magnetoencephalography and biophysically based computational
# modeling study." Journal of Neuroscience 27.40 (2007): 10751-10764.
| 2.421875 | 2 |
core/migrations/0002_auto_20180418_1558.py | araceli24/ticross | 0 | 12767944 | # Generated by Django 2.0.4 on 2018-04-18 13:58
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='activityjournal',
name='time_lapse',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='activityjournal',
name='end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='activityjournal',
name='start',
field=models.DateTimeField(default=datetime.datetime(2018, 4, 18, 15, 58, 34, 603734)),
),
migrations.AlterField(
model_name='registry',
name='end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='registry',
name='start',
field=models.DateTimeField(default=datetime.datetime(2018, 4, 18, 15, 58, 34, 605193)),
),
]
| 1.703125 | 2 |
memorious/logic/stage.py | x0rzkov/memorious | 0 | 12767945 | from importlib import import_module
from servicelayer.extensions import get_entry_point
from memorious.model import Crawl
class CrawlerStage(object):
"""A single step in a data processing crawler."""
def __init__(self, crawler, name, config):
self.crawler = crawler
self.name = name
self.config = config
self.method_name = config.get('method')
self.params = config.get('params') or {}
self.handlers = config.get('handle') or {}
@property
def method(self):
# method A: via a named Python entry point
func = get_entry_point('memorious.operations', self.method_name)
if func is not None:
return func
# method B: direct import from a module
if ':' not in self.method_name:
raise ValueError("Unknown method: %s", self.method_name)
package, method = self.method_name.rsplit(':', 1)
module = import_module(package)
return getattr(module, method)
@property
def op_count(self):
"""Total operations performed for this stage"""
return Crawl.op_count(self.crawler, self)
def __str__(self):
return self.name
def __repr__(self):
return '<CrawlerStage(%r, %s)>' % (self.crawler, self.name)
| 2.640625 | 3 |
main.py | ParkHyeonSeong/FAST_API_STUDY | 0 | 12767946 | from fastapi import FastAPI, Request, Form, Header
from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi.responses import JSONResponse
from fastapi.responses import Response
from fastapi.staticfiles import StaticFiles # 정적파일
from fastapi.templating import Jinja2Templates # 템플릿화
from pydantic import BaseModel # 모델링
from typing import Optional # 데이터 옵션
from urllib import parse # 데이터 url 인코딩
import auth # 인증 모듈 호출
class Item(BaseModel):
user_id : str = None
user_pwd : str = None
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name = "static")
templates = Jinja2Templates(directory="templates")
@app.get('/')
async def page(request : Request):
return templates.TemplateResponse("/index.html", {"request":request}) # 기본화면
@app.post('/')
async def page(request : Request):
return templates.TemplateResponse("/index.html", {"request":request}) # 기본화면
@app.get('/login')
async def login(request : Request):
id = ""
pwd = ""
return templates.TemplateResponse("/login.html", {"request":request, "id":id, "pwd":<PASSWORD>, "token": "0"}) # 로그인화면
@app.post('/login', response_class=HTMLResponse)
async def login_auth(request : Request, response : Response, id : str = Form(...), pwd : str = Form(...), x_token : Optional[str] = Header(None)):
info = {"id" : id, "pwd" : <PASSWORD>}
result = auth.user_login_compare(info["id"], info["pwd"])
if result == "0" or result == "2":
return templates.TemplateResponse("/login.html", {"request":request, "id":id, "pwd":<PASSWORD>, "error":"로그인 실패"})
else :
content = {"msg":"msg"}
headers = {"Token":result}
# response.headers["X-Token"] = result
return templates.TemplateResponse("/index.html", {"request":request}, headers=headers)
# return RedirectResponse("/")
# parse.urlencode(info) # 인코딩하여 값 전송
# templates.TemplateResponse("index.html", {"request":request, "id" : id})
@app.get('/auth')
async def auth_check(request : Request, x_token : str):
return x_token
| 2.640625 | 3 |
dcase_task2/train.py | f0k/dcase_task2 | 32 | 12767947 |
from __future__ import print_function
import os
import argparse
import numpy as np
from dcase_task2.lasagne_wrapper.network import Network
from utils.data_tut18_task2 import load_data as load_data_tut18_task2
from utils.data_tut18_task2 import ID_CLASS_MAPPING as id_class_mapping_tut18_task2
from config.settings import EXP_ROOT
# seed seed for reproducibility
np.random.seed(4711)
def select_model(model_path):
""" select model """
model_str = os.path.basename(model_path)
model_str = model_str.split('.py')[0]
import_root = ".".join((model_path.split(os.path.sep))[:-1])
exec("from %s import %s as model" % (import_root, model_str))
model.EXP_NAME = model_str
return model
def load_data(data_set, fold, args):
""" select data """
if "tut18T2ver" in data_set:
normalize = "norm" in data_set
spec_dir = data_set.split("-")[1]
data = load_data_tut18_task2(fold=fold, n_workers=1, spec_dir=spec_dir,
train_verified=True, train_unverified=False, normalize=normalize,
fix_lengths=args.no_len_fix, max_len=args.max_len, min_len=args.min_len,
train_file=args.train_file, train_on_all=args.train_on_all,
validate_verified=not args.validate_unverified)
id_class_mapping = id_class_mapping_tut18_task2
elif "tut18T2unver" in data_set:
normalize = "norm" in data_set
spec_dir = data_set.split("-")[1]
data = load_data_tut18_task2(fold=fold, n_workers=1, spec_dir=spec_dir,
train_verified=False, train_unverified=True, normalize=normalize,
fix_lengths=args.no_len_fix, max_len=args.max_len, min_len=args.min_len,
train_file=args.train_file, train_on_all=args.train_on_all,
validate_verified=not args.validate_unverified)
id_class_mapping = id_class_mapping_tut18_task2
elif "tut18T2" in data_set:
normalize = "norm" in data_set
spec_dir = data_set.split("-")[1]
data = load_data_tut18_task2(fold=fold, n_workers=1, spec_dir=spec_dir,
train_verified=True, train_unverified=True, normalize=normalize,
fix_lengths=args.no_len_fix, max_len=args.max_len, min_len=args.min_len,
train_file=args.train_file, train_on_all=args.train_on_all,
validate_verified=not args.validate_unverified)
id_class_mapping = id_class_mapping_tut18_task2
return data, id_class_mapping
def get_dump_file_paths(out_path, fold):
par = 'params.pkl' if fold is None else 'params_%d.pkl' % fold
log = 'results.pkl' if fold is None else 'results_%d.pkl' % fold
dump_file = os.path.join(out_path, par)
log_file = os.path.join(out_path, log)
return dump_file, log_file
if __name__ == '__main__':
""" main """
# add argument parser
parser = argparse.ArgumentParser(description='Train audio tagging network.')
parser.add_argument('--model', help='select model to train.')
parser.add_argument('--data', help='select model to train.')
parser.add_argument('--fold', help='train split.', type=int, default=None)
parser.add_argument('--ini_params', help='path to pretrained parameters.', type=str, default=None)
parser.add_argument('--tag', help='add tag to result files.', type=str, default=None)
parser.add_argument('--fine_tune', help='use fine-tune train configuration.', action='store_true')
# tut18 task2
parser.add_argument('--train_file', help='train data file.', type=str, default="train.csv")
parser.add_argument('--max_len', help='maximum spectrogram length.', type=int, default=None)
parser.add_argument('--min_len', help='minimum spectrogram length.', type=int, default=None)
parser.add_argument('--no_len_fix', help='do not fix lengths of spectrograms.', action='store_false')
parser.add_argument('--train_on_all', help='use all files for training.', action='store_true')
parser.add_argument('--validate_unverified', help='validate also on unverified samples.', action='store_true')
args = parser.parse_args()
# select model
model = select_model(args.model)
# load data
print("\nLoading data ...")
data, _ = load_data(args.data, args.fold, args)
# set model dump file
print("\nPreparing model ...")
out_path = os.path.join(os.path.join(EXP_ROOT), model.EXP_NAME)
dump_file, log_file = get_dump_file_paths(out_path, args.fold)
# change parameter dump files
if not args.fine_tune:
dump_file = dump_file.replace(".pkl", "_it0.pkl")
log_file = log_file.replace(".pkl", "_it0.pkl")
print("parameter file", dump_file)
print("log file", log_file)
# compile network
net = model.build_model()
# initialize neural network
my_net = Network(net)
# load initial parametrization
if args.ini_params:
ini_params = args.ini_params % args.fold
ini_params = dump_file.replace(os.path.basename(dump_file).split(".")[0], ini_params)
my_net.load(ini_params)
print("initial parameter file %s" % ini_params)
# add tag to results
if args.tag:
dump_file = dump_file.replace(".pkl", "_%s.pkl" % args.tag)
log_file = log_file.replace(".pkl", "_%s.pkl" % args.tag)
print("tagged parameter file %s" % dump_file)
# train network
train_strategy = model.compile_train_strategy(args.fine_tune)
my_net.fit(data, train_strategy, log_file=log_file, dump_file=dump_file)
| 2.140625 | 2 |
configs/_base_/datasets/supervisely_3class.py | supervisely-ecosystem/mmdetection3d | 0 | 12767948 |
dataset_type = "SuperviselyDataset"
data_root = '/data/slyproject'
class_names = ['Car', 'Pedestrian', 'Cyclist', 'DontCare']
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
input_modality = dict(use_lidar=True, use_camera=False)
file_client_args = dict(backend='disk')
# db_sampler = dict(
# data_root=data_root,
# info_path=data_root + 'train/dataset.npy',
# rate=1.0,
# prepare=dict(
# filter_by_difficulty=[-1],
# filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)),
# classes=class_names,
# sample_groups=dict(Car=1, Pedestrian=1, Cyclist=1),
# points_loader=dict(
# type='LoadPointsFromSlyFile',
# coord_type='LIDAR',
# load_dim=4,
# use_dim=[0, 1, 2, 3],
# file_client_args=file_client_args))
train_pipeline = [
dict(
type='LoadPointsFromSlyFile'),
dict(
type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
#dict(type='ObjectSample', db_sampler=db_sampler),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
dict(type='LoadPointsFromSlyFile')]
# construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client)
eval_pipeline = [
dict(
type='LoadPointsFromSlyFile'),
dict(
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
data_root=data_root)),
val=dict(
type=dataset_type,
data_root=data_root),
test=dict(
type=dataset_type,
data_root=data_root))
evaluation = dict(interval=1, pipeline=eval_pipeline)
| 1.742188 | 2 |
sematch/classify.py | dhimmel/sematch | 397 | 12767949 | <gh_stars>100-1000
from gsitk.datasets.datasets import DatasetManager
from nltk.corpus import opinion_lexicon
from collections import Counter
def prepare_lexicon(process=True, dim=250, save=False):
if process:
dm = DatasetManager()
data = dm.prepare_datasets()
nega = set(opinion_lexicon.negative())
posi = set(opinion_lexicon.positive())
lexicon = opinion_lexicon.words()
lexicon_dic = {x: 0 for x in lexicon}
for t in data['vader']['text']:
for w in t:
if w in lexicon_dic:
lexicon_dic[w] += 1
for t in data['sentiment140']['text']:
for w in t:
if w in lexicon_dic:
lexicon_dic[w] += 1
L = Counter(lexicon_dic).most_common(4000)
N = []
P = []
for w, _ in L:
if w in nega:
N.append(w)
elif w in posi:
P.append(w)
l = P[:dim] + N[:dim]
if save:
with open('senti.lexicon', 'w') as f:
for d in l:
f.write(d)
f.write('\n')
return l
else:
with open('senti.lexicon', 'r') as f:
data = [line.strip() for line in f]
return data
from gensim.models import Word2Vec
from numpy import array, dot
from gensim import matutils
import collections
import functools
class memoized(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
class WordRelatedness:
def __init__(self, model):
self._model = model
self._words = set([w for w in self._model.vocab])
def check_word(self, word):
return True if word in self._words else False
def check_words(self, words):
return [w for w in words if self.check_word(w)]
def similar_words(self, word):
return self._model.most_similar(word) if self.check_word(word) else []
@memoized
def word_similarity(self, w1, w2):
return self._model.similarity(w1, w2) if self.check_word(w1) and self.check_word(w2) else 0.0
def words_similarity(self, words1, words2):
w1 = self.check_words(words1)
w2 = self.check_words(words2)
return self._model.n_similarity(w1, w2) if w1 and w2 else 0.0
def word_vector(self, w):
return matutils.unitvec(self._model[w]) if self.check_word(w) else None
def words_vector(self, words):
v_words = [self._model[w] for w in self.check_words(words)]
return matutils.unitvec(array(v_words).mean(axis=0)) if v_words else None
def consine_similarity(self, v1, v2):
return dot(v1, v2)
from gsitk.features.word2vec import Word2VecFeatures
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
import numpy as np
import nltk
class SimVectorizer:
def __init__(self, senti_lexicon):
w2v_feat = Word2VecFeatures(w2v_model_path='/data/w2vmodel_500d_5mc')
sim_model = WordRelatedness(w2v_feat.model)
self._sim = sim_model.word_similarity
self._lexicon = senti_lexicon
self._N = len(self._lexicon)
# self._vectorizer = DictVectorizer(sparse=False)
self._stopwords = set(nltk.corpus.stopwords.words('english'))
def word_process(self, words):
return [w for w in words if w not in self._stopwords and len(w) > 2]
def similarity(self, words, feature):
return max([self._sim(w, feature) for w in words] + [0.0])
def transform(self, X):
X_transformed = np.zeros((len(X), self._N))
for i, x in enumerate(X):
# if i % 10000 == 0:
# print(i)
words = self.word_process(x)
words = set(words)
for j, f in enumerate(self._lexicon):
X_transformed[i, j] = self.similarity(words, f)
return X_transformed
from nltk.corpus import opinion_lexicon
from collections import Counter
import numpy as np
import nltk
Punc = [".", "!", "?", ",", ";", ":", "-", "'", "\"",
"!!", "!!!", "??", "???", "?!?", "!?!", "?!?!", "!?!?"]
Negate = ["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"]
Booster = ["absolutely", "amazingly", "awfully", "completely", "considerably",
"decidedly", "deeply", "effing", "enormously", "entirely", "especially", "exceptionally",
"extremely", "fabulously", "flipping", "flippin", "fricking", "frickin", "frigging",
"friggin", "fully", "fucking", "greatly", "hella", "highly", "hugely", "incredibly",
"intensely", "majorly", "more", "most", "particularly", "purely", "quite", "really",
"remarkably", "so", "substantially", "thoroughly", "totally", "tremendously",
"uber", "unbelievably", "unusually", "utterly", "very", "almost", "barely", "hardly",
"just enough", "kind of", "kinda", "kindof", "kind-of", "less", "little", "marginally",
"occasionally", "partly", "scarcely", "slightly", "somewhat", "sort of", "sorta",
"sortof", "sort-of"]
Extra_Lexicon = Punc + Negate + Booster
def create_lexicon(corpus, embedding, num=250):
stopwords = set(nltk.corpus.stopwords.words('english'))
V = set([w for w in embedding.vocab])
tags = corpus['polarity']
texts = corpus['text']
P = [t for i, t in texts.iteritems() if int(tags[i]) == 1]
N = [t for i, t in texts.iteritems() if int(tags[i]) == -1]
def word_count(X):
d = {}
for x in X:
for w in x:
if w not in stopwords and w in V and len(w) > 1:
d[w] = d[w] + 1 if w in d else 1
return d
P_dict = word_count(P)
N_dict = word_count(N)
L_p = Counter(P_dict).most_common(num)
L_n = Counter(N_dict).most_common(num)
Words_p, Counts_p = zip(*L_p)
Words_n, Counts_n = zip(*L_n)
P_sum = sum(Counts_p)
N_sum = sum(Counts_n)
P_score = [x * 1.0 / P_sum for x in Counts_p]
N_score = [x * 1.0 / N_sum for x in Counts_n]
return Words_p + Words_n, P_score + N_score
def prepare_lexicon(corpus, embedding, num=250, extra=False):
V = set([w for w in embedding.vocab])
neg = set(opinion_lexicon.negative())
pos = set(opinion_lexicon.positive())
senti_lexicon = opinion_lexicon.words()
senti_lexicon = [w for w in senti_lexicon if w in V]
lexicon_dic = {x: 0 for x in senti_lexicon}
for sent in corpus:
for w in sent:
if w in lexicon_dic:
lexicon_dic[w] += 1
L = Counter(lexicon_dic).most_common(5000)
N = []
N_count = []
P = []
P_count = []
for word, count in L:
if word in neg:
N.append(word)
N_count.append(count)
elif word in pos:
P.append(word)
P_count.append(count)
Senti_L = P[:num] + N[:num]
P_sum = sum(P_count[:num])
P_score = [x * 1.0 / P_sum for x in P_count[:num]]
N_sum = sum(N_count[:num])
N_score = [x * 1.0 / N_sum for x in N_count[:num]]
Senti_W = P_score + N_score
if extra:
Extra_L = [l for l in Extra_Lexicon if l in V]
Extra_W = [1.0 for l in Extra_L]
return Senti_L + Extra_L, Senti_W + Extra_W
return Senti_L, Senti_W
class SimVectorizer:
def __init__(self, lexicon, weight, embedding, stopword=True, weighted=False):
self._stopwords = set(nltk.corpus.stopwords.words('english'))
self._model = embedding
self._W = weight
self._V = set([w for w in self._model.vocab])
self._L = self.word_vectors(lexicon).T
self._filter = lambda x: self.vectorization(self.word_process(x))
self.sim_vectorization = self._filter if stopword else self.vectorization
self._weighter = lambda x: np.multiply(self.sim_vectorization(x), self._W)
self.sim_vector = self._weighter if weighted else self.sim_vectorization
def word_process(self, words):
return [w for w in words if w not in self._stopwords and len(w) > 1]
def word_vectors(self, x):
return np.array([self._model[w] for _, w in enumerate(x) if w in self._V])
def vectorization(self, x):
v = self.word_vectors(x)
if v.shape[0] == 0:
return np.zeros(self._L.shape[1])
s = np.dot(v, self._L)
return s.max(axis=0)
def transform(self, X):
return np.array([self.sim_vector(x) for _, x in enumerate(X)])
| 2.40625 | 2 |
qt__pyqt__pyside__pyqode/test_sql_fetchMore__QSqlTableModel_QSqlQueryModel/main__QSqlQueryModel.py | gil9red/SimplePyScripts | 117 | 12767950 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QApplication, QTableView
from PyQt5.QtSql import QSqlDatabase, QSqlQueryModel, QSqlQuery
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName('database.sqlite')
if not db.open():
raise Exception(db.lastError().text())
TABLE = 'word2emoji'
query = QSqlQuery()
query.exec(f'SELECT COUNT(*) FROM {TABLE}')
query.next()
TABLE_ROW_COUNT = query.value(0)
def update_window_title():
mw.setWindowTitle(f'{model.rowCount()} / {TABLE_ROW_COUNT}')
app = QApplication([])
model = QSqlQueryModel()
model.rowsInserted.connect(update_window_title)
model.setQuery(f"SELECT * FROM {TABLE}")
mw = QTableView()
mw.setEditTriggers(QTableView.NoEditTriggers)
mw.setModel(model)
mw.resize(600, 480)
mw.show()
update_window_title()
app.exec()
| 2.421875 | 2 |