max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
yt_dlp/extractor/imdb.py | trassshhub/yt-dlp | 1 | 12763651 | from __future__ import unicode_literals
import base64
import json
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
mimetype2ext,
qualities,
traverse_obj,
try_get,
url_or_none,
)
class ImdbIE(InfoExtractor):
IE_NAME = 'imdb'
IE_DESC = 'Internet Movie Database trailers'
_VALID_URL = r'https?://(?:www|m)\.imdb\.com/(?:video|title|list).*?[/-]vi(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.imdb.com/video/imdb/vi2524815897',
'info_dict': {
'id': '2524815897',
'ext': 'mp4',
'title': 'No. 2',
'description': 'md5:87bd0bdc61e351f21f20d2d7441cb4e7',
'duration': 152,
'thumbnail': r're:^https?://.+\.jpg',
}
}, {
'url': 'https://www.imdb.com/video/vi3516832537',
'info_dict': {
'id': '3516832537',
'ext': 'mp4',
'title': 'Paul: U.S. Trailer #1',
'description': 'md5:17fcc4fe11ec29b4399be9d4c5ef126c',
'duration': 153,
'thumbnail': r're:^https?://.+\.jpg',
}
}, {
'url': 'http://www.imdb.com/video/_/vi2524815897',
'only_matching': True,
}, {
'url': 'http://www.imdb.com/title/tt1667889/?ref_=ext_shr_eml_vi#lb-vi2524815897',
'only_matching': True,
}, {
'url': 'http://www.imdb.com/title/tt1667889/#lb-vi2524815897',
'only_matching': True,
}, {
'url': 'http://www.imdb.com/videoplayer/vi1562949145',
'only_matching': True,
}, {
'url': 'http://www.imdb.com/title/tt4218696/videoplayer/vi2608641561',
'only_matching': True,
}, {
'url': 'https://www.imdb.com/list/ls009921623/videoplayer/vi260482329',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(f'https://www.imdb.com/video/vi{video_id}', video_id)
info = self._search_nextjs_data(webpage, video_id)
video_info = traverse_obj(info, ('props', 'pageProps', 'videoPlaybackData', 'video'), default={})
title = (traverse_obj(video_info, ('name', 'value'), ('primaryTitle', 'titleText', 'text'))
or self._html_search_meta(('og:title', 'twitter:title'), webpage, default=None)
or self._html_extract_title(webpage))
data = video_info.get('playbackURLs') or try_get(self._download_json(
'https://www.imdb.com/ve/data/VIDEO_PLAYBACK_DATA', video_id,
query={
'key': base64.b64encode(json.dumps({
'type': 'VIDEO_PLAYER',
'subType': 'FORCE_LEGACY',
'id': 'vi%s' % video_id,
}).encode()).decode(),
}), lambda x: x[0]['videoLegacyEncodings'])
quality = qualities(('SD', '480p', '720p', '1080p'))
formats, subtitles = [], {}
for encoding in data:
if not encoding or not isinstance(encoding, dict):
continue
video_url = url_or_none(encoding.get('url'))
if not video_url:
continue
ext = mimetype2ext(encoding.get(
'mimeType')) or determine_ext(video_url)
if ext == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
preference=1, m3u8_id='hls', fatal=False)
subtitles = self._merge_subtitles(subtitles, subs)
formats.extend(fmts)
continue
format_id = traverse_obj(encoding, ('displayName', 'value'), 'definition')
formats.append({
'format_id': format_id,
'url': video_url,
'ext': ext,
'quality': quality(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'alt_title': info.get('videoSubTitle'),
'formats': formats,
'description': try_get(video_info, lambda x: x['description']['value']),
'thumbnail': url_or_none(try_get(video_info, lambda x: x['thumbnail']['url'])),
'duration': int_or_none(try_get(video_info, lambda x: x['runtime']['value'])),
'subtitles': subtitles,
}
class ImdbListIE(InfoExtractor):
IE_NAME = 'imdb:list'
IE_DESC = 'Internet Movie Database lists'
_VALID_URL = r'https?://(?:www\.)?imdb\.com/list/ls(?P<id>\d{9})(?!/videoplayer/vi\d+)'
_TEST = {
'url': 'https://www.imdb.com/list/ls009921623/',
'info_dict': {
'id': '009921623',
'title': 'The Bourne Legacy',
'description': 'A list of trailers, clips, and more from The Bourne Legacy, starring <NAME> and <NAME>.',
},
'playlist_count': 8,
}
def _real_extract(self, url):
list_id = self._match_id(url)
webpage = self._download_webpage(url, list_id)
entries = [
self.url_result('http://www.imdb.com' + m, 'Imdb')
for m in re.findall(r'href="(/list/ls%s/videoplayer/vi[^"]+)"' % list_id, webpage)]
list_title = self._html_search_regex(
r'<h1[^>]+class="[^"]*header[^"]*"[^>]*>(.*?)</h1>',
webpage, 'list title')
list_description = self._html_search_regex(
r'<div[^>]+class="[^"]*list-description[^"]*"[^>]*><p>(.*?)</p>',
webpage, 'list description')
return self.playlist_result(entries, list_id, list_title, list_description)
| 2.1875 | 2 |
pkg/tasks.py | snackattas/ShutTheBox | 1 | 12763652 | <reponame>snackattas/ShutTheBox<filename>pkg/tasks.py
"""tasks.py - This file contains handlers that are called by cronjobs."""
import webapp2
import datetime
from models import User, Game
from collections import namedtuple
from google.appengine.api import mail, app_identity
class SendReminderEmail(webapp2.RequestHandler):
def get(self):
"""If the User has not made a move in an active game for more than 12
hours, send a reminder email that includes the current game state."""
users = User.query(User.email != None).fetch()
if users is None:
return
app_id = app_identity.get_application_id()
twelve_hours_ago = datetime.datetime.now() - \
datetime.timedelta(minutes=1)
inactive_users = []
GameData = namedtuple('GameData',
['urlsafe_key',
'dice_operation', 'number_of_tiles',
'game_start_datetime', 'last_turn_datetime',
'active_tiles', 'roll', 'turn'])
for n, user in enumerate(users):
games_query = Game.query(ancestor=user.key)
games_query = games_query.filter(Game.game_over == False)
games_query = games_query.order(-Game.timestamp)
games = games_query.fetch()
# If games are not found, pass over this user
if not games:
continue
inactive_games = []
for game in games:
recent_turn = game.most_recent_turn()
# if the most recent turn is more recent than 12 hours ago,
# pass over this game
if recent_turn.timestamp > twelve_hours_ago:
continue
game_data = GameData(
game.key.urlsafe(),
game.dice_operation, game.number_of_tiles,
game.timestamp, recent_turn.timestamp,
recent_turn.active_tiles, recent_turn.roll,
recent_turn.turn)
inactive_games.append(game_data)
if inactive_games:
inactive_users.append([user, inactive_games])
for inactive_user in inactive_users:
user = inactive_user[0]
games = inactive_user[1]
number_of_games = len(games)
subject = "This is a reminder!"
salutation = """Hello, {0}.
You have incomplete game(s) of Shut The Box that have not progressed in over 12 hours. This is a reminder to finish these incomplete games.
Number of incomplete games: {1}
""".format(user.username, number_of_games)
formatted_games = ""
for game in games:
formatted_games += """
urlsafe key: {0}
Last move: {1}
Game start: {2}
Active tiles: {3}
Most recent roll: {4}
Turn: {5}
Dice operation: {6}
Number of tiles: {7}
""".format(game.urlsafe_key,
game.last_turn_datetime,
game.game_start_datetime,
game.active_tiles,
game.roll,
game.turn,
game.dice_operation,
game.number_of_tiles)
body = salutation + formatted_games
mail.send_mail('<EMAIL>'.<EMAIL>(app_id),
user.email,
subject,
body)
app = webapp2.WSGIApplication([
('/crons/send_reminder', SendReminderEmail)], debug=True)
| 2.609375 | 3 |
kiestze_django/kiestze/migrations/0006_auto_20180721_0951.py | oSoc18/kiest_ze | 3 | 12763653 | <filename>kiestze_django/kiestze/migrations/0006_auto_20180721_0951.py
# Generated by Django 2.0.7 on 2018-07-21 09:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kiestze', '0005_auto_20180721_0945'),
]
operations = [
migrations.RenameModel(
old_name='User_edits',
new_name='User_edit',
),
migrations.AlterModelOptions(
name='user_edit',
options={'verbose_name': 'User_edit', 'verbose_name_plural': 'User_edit'},
),
]
| 1.617188 | 2 |
backend/serializers.py | Total-Conversion/eco4coin | 0 | 12763654 | # from django.contrib.auth.models import User
# from django.contrib.auth.models import User
import uuid
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from .models import Trade, Sale, Purchase, ApplicationVersion
from accounts.models import CustomUser
from rest_framework import serializers
from django.db.models.query_utils import Q
class ApplicationVersionSerializer(serializers.ModelSerializer):
class Meta:
model = ApplicationVersion
fields = ['version']
class CreateTradeSerializer(serializers.ModelSerializer):
recipient = serializers.CharField(min_length=34, max_length=34)
class Meta:
model = Trade
fields = ['trade_type', 'trade_value', 'recipient', 'notes']
def __init__(self, instance=None, user=None, **kwargs):
self.user = user
super().__init__(instance, **kwargs)
def validate_user(self, data):
return data
def validate_recipient(self, data):
print(data)
print(self.user.wallet_id)
if self.user.wallet_id == data:
raise serializers.ValidationError("You cant send Coins to Yourself ")
return data
def validate_trade_value(self, data):
if self.user.coin_balance < data:
raise serializers.ValidationError("You don't have enough coins")
return data
def create(self, validated_data):
if CustomUser.objects.filter(wallet_id=validated_data['recipient']).exists():
trade_instance = Trade.objects.create(
trade_type=validated_data['trade_type'],
trade_value=validated_data['trade_value'],
sender=validated_data['sender'],
notes=validated_data['notes'],
recipient=CustomUser.objects.get(wallet_id=validated_data['recipient']),
recipient_wallet_id=validated_data['recipient']
)
return trade_instance
else:
trade_instance = Trade.objects.create(
trade_type=validated_data['trade_type'],
trade_value=validated_data['trade_value'],
sender=validated_data['sender'],
notes=validated_data['notes'],
recipient_wallet_id=validated_data['recipient']
)
return trade_instance
class TradeSerializer(serializers.ModelSerializer):
sender = serializers.CharField(source='sender.wallet_id')
class Meta:
model = Trade
fields = ['sender', 'trade_type', 'trade_price', 'sale_order', 'purchase_order', 'trade_value', 'recipient_wallet_id', 'created_at', 'notes']
class NestedTradeSerializer(serializers.ModelSerializer):
sender = serializers.CharField(source='sender.wallet_id')
class Meta:
model = Trade
fields = ['wallet_id', 'sender', 'trade_type', 'trade_value', 'recipient_wallet_id', 'created_at', 'notes']
class CreateUserSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = ['device']
extra_kwargs = {"device": {"required": True}}
def create(self, validated_data):
if CustomUser.objects.filter(device=validated_data['device']).exists():
user = CustomUser.objects.create(
username=validated_data['username'],
device=validated_data['device'],
cash_balance=0,
coin_balance=0,
)
else:
user = CustomUser.objects.create(
username=validated_data['username'],
device=validated_data['device'],
)
return user
class StandardUserSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = ['username', 'device', 'email', 'cash_balance', 'coin_balance', 'id', 'wallet_id', 'cash_locked', 'coin_locked']
class UserSerializer(serializers.ModelSerializer):
trades = serializers.SerializerMethodField(
source='trade', read_only=True)
def get_trades(self):
return NestedTradeSerializer(
Trade.objects.filter(
Q(sender=self.context.get("request").user) |
Q(recipient=self.context.get("request").user)
).order_by("-created_at"),
many=True, read_only=True).data
class Meta:
model = CustomUser
# queryset = Trade.objects.all()
fields = ['cash_balance', 'coin_balance', 'trades', 'wallet_id', 'cash_locked', 'coin_locked']
class UserWalletSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = ['wallet_id', 'cash_balance', 'coin_balance', 'cash_locked', 'coin_locked']
# ================
class SaleSerializer(serializers.ModelSerializer):
class Meta:
model = Sale
fields = ['id', 'sale_price', 'init_amount', 'amount', 'sale_status', 'created_at']
class RevokeSaleSerializer(serializers.ModelSerializer):
class Meta:
model = Sale
fields = []
class CreateSaleSerializer(serializers.ModelSerializer):
class Meta:
model = Sale
fields = ['sale_price', 'amount']
def __init__(self, instance=None, user=None, **kwargs):
self.user = user
super().__init__(instance, **kwargs)
def validate_user(self, data):
return data
def validate_amount(self, data):
if self.user.coin_balance < data:
raise serializers.ValidationError("not enough coin") # todo z error_messages
return data
@transaction.atomic
def create(self, validated_data):
user = CustomUser.objects.select_for_update().get(id=self.user.id)
user.coin_balance -= validated_data['amount']
user.coin_locked += validated_data['amount']
user.save()
sale_instance = Sale.objects.create(
amount=validated_data['amount'],
init_amount=validated_data['amount'],
sale_price=validated_data['sale_price'],
sale_user=validated_data['sale_user']
)
return sale_instance
class PurchaseSerializer(serializers.ModelSerializer):
class Meta:
model = Purchase
fields = ['id', 'purchase_price', 'init_amount', 'amount', 'purchase_status', 'created_at']
class RevokePurchaseSerializer(serializers.ModelSerializer):
class Meta:
model = Purchase
fields = []
class CreatePurchaseSerializer(serializers.ModelSerializer):
class Meta:
model = Purchase
fields = ['purchase_price', 'amount']
def __init__(self, instance=None, user=None, **kwargs):
self.user = user
super().__init__(instance, **kwargs)
def validate(self, data):
if self.user.cash_balance < data['amount'] * data['purchase_price']:
raise serializers.ValidationError("not enough cash") # todo z error_messages
return data
# def validate_amount(self, data):
# if self.user.cash_balance < data:
# raise serializers.ValidationError("not enough cash")
# return data
@transaction.atomic
def create(self, validated_data):
# self.user.cash_balance -= validated_data['amount']
# self.user.save()
user = CustomUser.objects.select_for_update().get(id=self.user.id)
user.cash_balance -= validated_data['amount'] * validated_data['purchase_price'] # max price
user.cash_locked += validated_data['amount'] * validated_data['purchase_price'] # max price
user.save()
purchase_instance = Purchase.objects.create(
amount=validated_data['amount'],
init_amount=validated_data['amount'],
purchase_price=validated_data['purchase_price'],
purchase_user=validated_data['purchase_user']
)
return purchase_instance | 2.1875 | 2 |
db_sqlite3.py | umarsear/solar-energy-notification | 0 | 12763655 | __author__ = '<NAME>'
import os
import sqlite3 as lite
def open_database(db):
"""
:rtype : sqlite3 database connection
"""
if os.path.isfile(db):
db_connection = lite.connect(db)
else:
db_connection = lite.connect(db)
db_cursor = db_connection.cursor()
db_cursor.execute("CREATE TABLE IF NOT EXISTS production(id INTEGER PRIMARY KEY, site_id INTEGER, "
"date TIMESTAMP, datetime TIMESTAMP, energy REAL, notified INTEGER)")
db_cursor.execute("CREATE TABLE IF NOT EXISTS power (id INTEGER NOT NULL, site_id INTEGER NOT NULL,"
"time_stamp TIMESTAMP, power_level REAL, PRIMARY KEY(id))")
db_cursor.execute('CREATE TABLE IF NOT EXISTS sites (id INTEGER NOT NULL, site_id INTEGER NOT NULL, '
'site_name TEXT NOT NULL, site_owner TEXT NOT NULL, closest_capital_city TEXT NOT NULL, '
'api_key TEXT NOT NULL, email_address TEXT NOT NULL, pushover_user_key INTEGER, '
'last_update TIMESTAMP, PRIMARY KEY(id)) ')
db_connection.commit()
return db_connection
def write_energy_to_database(db, site_id, date_time, energy):
db_connection = open_database(db)
db_cursor = db_connection.cursor()
db_cursor.execute("INSERT INTO production(site_id, datetime, energy) VALUES(?,?,?)",
(site_id, date_time, energy))
db_connection.commit()
db_connection.close()
def write_power_to_database(db, site_id, power_values):
db_connection = open_database(db)
db_cursor = db_connection.cursor()
for date_time, power in power_values.items():
db_cursor.execute("INSERT INTO power(site_id, time_stamp, power_level) VALUES(?,?,?)",
(site_id, date_time, power))
db_connection.commit()
db_connection.close()
def touch_site(db, site_id, touch_date):
db_connection = open_database(db)
db_cursor = db_connection.cursor()
db_cursor.execute("UPDATE sites SET last_update=? WHERE site_id=?", (touch_date, site_id))
db_connection.commit()
db_connection.close()
def get_db_row_count(db, table_name, where_clause=""):
if where_clause == "":
query = "SELECT COUNT(*) FROM {}".format(table_name)
else:
query = "SELECT COUNT(*) FROM {} WHERE {}".format(table_name, where_clause)
db_connection = open_database(db)
db_cursor = db_connection.cursor()
db_cursor.execute(query)
count = db_cursor.fetchall()
return count[0][0]
def get_site_details(db, db_index):
db_connection = open_database(db)
db_cursor = db_connection.cursor()
query = "SELECT site_id, site_name, site_owner, closest_capital_city, api_key, email_address, pushover_user_key," \
" last_update FROM sites WHERE id={}".format(db_index)
db_cursor.execute(query)
row = db_cursor.fetchall()
return row[0] | 2.84375 | 3 |
d2go/data/dataset_mappers/rotated_dataset_mapper.py | wenliangzhao2018/d2go | 687 | 12763656 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from detectron2.data import detection_utils as utils, transforms as T
from detectron2.structures import BoxMode, Instances, RotatedBoxes
from .build import D2GO_DATA_MAPPER_REGISTRY
logger = logging.getLogger(__name__)
@D2GO_DATA_MAPPER_REGISTRY.register()
class RotatedDatasetMapper(D2GoDatasetMapper):
def _original_call(self, dataset_dict):
"""
Modified from detectron2's original __call__ in DatasetMapper
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = self._read_image(dataset_dict, format=self.img_format)
if not self.backfill_size:
utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
# Crop around an instance if there are instances in the image.
# USER: Remove if you don't use cropping
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2] # h, w
dataset_dict["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
assert not self.load_proposals, "Not supported!"
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# Convert dataset_dict["annotations"] to dataset_dict["instances"]
annotations = [
obj
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# Convert either rotated box or horizontal box to XYWHA_ABS format
original_boxes = [
BoxMode.convert(
box=obj["bbox"],
from_mode=obj["bbox_mode"],
to_mode=BoxMode.XYWHA_ABS,
)
for obj in annotations
]
transformed_boxes = transforms.apply_rotated_box(
np.array(original_boxes, dtype=np.float64)
)
instances = Instances(image_shape)
instances.gt_classes = torch.tensor(
[obj["category_id"] for obj in annotations], dtype=torch.int64
)
instances.gt_boxes = RotatedBoxes(transformed_boxes)
instances.gt_boxes.clip(image_shape)
dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
return dataset_dict
| 2.21875 | 2 |
SVG_Prepare_Photos/prepare_svg-photos.py | jpenrici/Extensions_Inkscape | 0 | 12763657 | <reponame>jpenrici/Extensions_Inkscape
# -*- Mode: Python3; coding: utf-8; indent-tabs-mpythoode: nil; tab-width: 4 -*-
'''
Objective:
Insert images (photos) in SVG files.
Details:
Files in the Images directory.
Txt list with the names of the files.
Requires:
Python Imaging Library (PIL)
'''
import os
import sys
from PIL import Image
EOL = '\n'
EMPTY = ""
TARGET = "#IMAGEBASE#"
PATH, _ = os.path.split(os.path.realpath(__file__))
TEMPLATE = "template_landscape_21x15cm.txt"
IMG_PATH = PATH + "/Images/"
SVG_PATH = PATH + "/Output/"
def load(path):
lines = []
print("Loaded: {}".format(path))
try:
f = open(path)
lines = [line for line in f]
f.close()
except Exception:
print("error: couldn't open file.")
pass
return lines
def save(path, text):
print("Saved: {}".format(path))
try:
f = open(path, "w")
f.write(text)
f.close()
except Exception:
print("error: cannot save " + path)
exit(0)
def convert(data):
text = ""
for item in data:
text += item
return text
def prepare(data):
svg = convert(load(TEMPLATE))
if not os.path.exists(SVG_PATH):
print("Create " + SVG_PATH)
os.makedirs(SVG_PATH)
for filename in data:
filename = filename.replace(EOL, EMPTY)
if filename == EMPTY:
continue
if filename[0] == '#':
continue
path = IMG_PATH + filename
try:
img = Image.open(path)
img_width, img_height = img.size
info = "Prepare {}\nImage {} x {}".format(filename, img_width,
img_height)
if img_width < img_height:
info += "\tRotate ..."
imgr = img.transpose(Image.ROTATE_90)
filename = "rotate_" + filename
path = IMG_PATH + filename
imgr.save(path)
print(info)
out = svg.replace(TARGET, path)
filename = filename.split('.')[0]
save(SVG_PATH + filename + ".svg", out)
except:
print("There is something wrong.")
continue
if __name__ == '__main__':
# Test
data = load("./list_images.txt")
prepare(data)
| 2.90625 | 3 |
setup.py | anarkiwi/dovesnap | 16 | 12763658 | <filename>setup.py
"""package setup."""
from setuptools import setup
setup(
name='dovesnap',
version=[ f.split('"')[1] for f in open('main.go', 'r').readlines() if 'version' in f ][0],
license='Apache License 2.0',
description='graphviz generator of dovesnap networks',
url='https://github.com/IQTLabs/dovesnap',
scripts=['bin/graph_dovesnap', 'bin/cleanup_dovesnap'],
setup_requires=['pbr>=1.9', 'setuptools>=17.1'],
pbr=True,
)
| 1.304688 | 1 |
oop 1-1.py | DirkTayab/OOP-1-1 | 0 | 12763659 | <reponame>DirkTayab/OOP-1-1<gh_stars>0
from tkinter import *
window = Tk()
window.geometry("400x300")
window.title("Welcome to Python Programming")
#Button Widget
btn = Button(window, text = "Click to add", fg = "Blue")
btn.place(x = 50, y = 120)
#Label Widget
lbl = Label(window, text = "Student Personal Information", fg = "Blue", bg = "Light Blue")
lbl.place(relx = .5, rely = 0.2, anchor = "center")
lbl2 = Label(window, text = "Gender", fg = "Blue", bg = "Light Blue")
lbl2.place(x = 50, y = 150)
lbl3 = Label(window, text = "Sports", fg = "Blue", bg = "Light Blue")
lbl3.place(x = 50, y = 210)
lbl4 = Label(window, text = "Subjects:", fg = "Blue", bg = "Light Blue")
lbl4.place(x = 50, y = 255)
#Text Field Widget
txtfld = Entry(window, bd = 2, font = ("Bebas Neue", 12))
txtfld.place(x = 140, y = 120)
#Radio Button
v1 = StringVar()
v2 = StringVar()
v1.set(1)
rd1 = Radiobutton(window, text = "Male", value = v1)
rd1.place(x = 50, y = 170)
r2 = Radiobutton(window, text = "Female", value = v2)
r2.place(x = 50, y = 190)
#CheckBox Widget
v3 = IntVar()
v4 = IntVar()
v5 = IntVar()
chkbx = Checkbutton(window, text = "Basketball", variable = v3)
chkbx2 = Checkbutton(window, text = "Tennis", variable = v4)
chkbx3 = Checkbutton(window, text = "Swimming", variable =v5)
chkbx.place(relx = .25, y = 230)
chkbx2.place(relx = .5, y = 230)
chkbx3.place(relx = .7, y = 230)
#ListBox Widget
var = StringVar()
var.set("Student 1")
data1 = "Arithmetic"
data2 = "Reading"
data3 = "Writing"
lstbx = Listbox(window, height = 5, selectmode = "multiple")
lstbx.insert(END, data1, data2, data3)
lstbx.place(x = 50, y = 280)
window.mainloop() | 3.71875 | 4 |
lang/py/pylib/code/difflib/difflib_unified.py | ch1huizong/learning | 13 | 12763660 | <filename>lang/py/pylib/code/difflib/difflib_unified.py
#!/usr/bin/env python
#
# Copyright 2007 <NAME>.
#
"""Unified diff example
"""
#end_pymotw_header
import difflib
from difflib_data import *
diff = difflib.unified_diff(text1_lines,
text2_lines,
lineterm='',
)
print '\n'.join(list(diff))
| 2.09375 | 2 |
src/advanced python/monitor.py | sudeep0901/python | 0 | 12763661 | <filename>src/advanced python/monitor.py
from time import sleep
import os
print(os.getpid())
def set_proc_name(newname):
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6')
buff = create_string_buffer(len(newname)+1)
buff.value = newname
libc.prctl(15, byref(buff), 0, 0, 0)
def get_proc_name():
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6')
buff = create_string_buffer(128)
# 16 == PR_GET_NAME from <linux/prctl.h>
libc.prctl(16, byref(buff), 0, 0, 0)
return buff.value
import sys
# sys.argv[0] == 'python'
# outputs 'python'
get_proc_name()
set_proc_name('pythontesting')
# outputs 'testing yeah'
get_proc_name()
def func(i):
print(i)
sleep(1)
for i in range(1, 100000000000000):
func(i) | 2.5 | 2 |
bvp/DB.py | marklescroart/bvp | 2 | 12763662 | <reponame>marklescroart/bvp
"""
each class should have: from_docdict(doc), from_blender(blender_object),
docdict [property], to_blender [sets blend props?]
- Actions -
Action - must be linked to specific class of armatures (which will be a property of bvpObjects)
- Armature_class
- wordnet_label
- semantic_category
-
-> Add armature field to objects
-> All animations must be based on armatures, and we should be able to flexibly define classes of armatures.
-> Positioning (for START of action) will still be handled by pos3D, rot3D, size3D.
-> Actions will need bounding boxes, which will have to be multiplied by the bounding boxes for objects.
TODO:
-
"""
from __future__ import absolute_import
# Imports
import numpy as np
import subprocess
import sys
import os
import time
import json
from .options import config
from . import dbqueries
from .Classes.action import Action
from .Classes.background import Background
from .Classes.camera import Camera
#from .Classes.constraint import ObConstraint, CamConstraint
from .Classes.material import Material
from .Classes.object import Object
#from .Classes.render_options import RenderOptions
#from .Classes.scene import Scene
#from .Classes.scene_list import SceneList
from .Classes.shadow import Shadow
#from .Classes.Shape import Shape # Move to Object...?
from .Classes.sky import Sky
try:
import docdb_lite as docdb
docdb.is_verbose = False
docdb.orm.class_type.update(
Action=Action,
Background=Background,
Camera=Camera,
Material=Material,
Object=Object,
#RenderOptions=RenderOptions,
#Scene=Scene,
#SceneList=SceneList,
Shadow=Shadow,
#Shape=Shape,
Sky=Sky,
)
# add db queries for bvp stuff
setattr(docdb.dbqueries, 'bvp', dbqueries)
except ImportError:
print("No docdb_lite present! Database functionality won't work!") # Make me a better error message
# Defaults
dbhost = config.get('db','dbhost')
dbname = config.get('db','dbname')
is_verbose = config.get('db','is_verbose').lower() in ('t','true','yes','1')
return_objects = config.get('db','return_objects').lower() in ('t','true','yes','1')
verbosity_level = 3
# Make sure that all files in these directories contain objects / backgrounds / skies that you want to use. Otherwise, modify the lists of objects / bgs / skies below.
class DBInterface(docdb.couchclient.CouchDocDBClient):
def __init__(self, dbhost=dbhost, dbname=dbname, user=None, password=None, queries=('basic', 'bvp'),
is_verbose=is_verbose, return_objects=return_objects):
"""Class to interface with bvp elements stored in couch db
Files in the library directory must be stored according to bvp directory structure:
BaseDirectory/ object/*.blend
background/*.blend
sky/*.blend
shadow/*.blend
Parameters
----------
dbhost : string
Name for host server. Read from config file. Config default is intialized to be 'localhost'
dbname : string
Database name. Read from config file. Config default is intialized to be 'bvp_1.0'
"""
super(DBInterface, self).__init__(dbhost, dbname, user=user, password=password, queries=queries,
is_verbose=is_verbose, return_objects=return_objects)
# Set database root dir
try:
self.db_dir = os.path.expanduser(self.db['config']['db_dir'])
except:
# TODO: Make this an error
self.db_dir = None
def _cleanup(self):
"""Remove all .blend1 and .blend2 backup files from database"""
for root, _, files in os.walk(self.dbpath, topdown=True):
ff = [f for f in files if 'blend1' in f or 'blend2' in f]
for f in ff:
os.unlink(os.path.join(root, f))
def posed_object_list(self):
"""Get a list of posed objects as bvpObjects - duplicate each object for however many poses it has
"""
raise NotImplementedError("Not yet!")
ObList = []
for o in self.objects:
if o['nPoses']:
for p in range(o['nPoses']):
ObList.append(Object(o['name'], self, size3D=None, pose=p))
else:
ObList.append(Object(o['name'], self, size3D=None, pose=None))
return ObList
def render_objects(self, query_dict, rtype=('Image', ), rot_list=(0, ), render_pose=True, render_group_size=1, is_overwrite=False, scale_obj=None):
"""
Render (all) objects in bvpLibrary
IS THIS NECESSARY? maybe. More flexible render class for simple renders:
define scene, diff camera angles, put objects into it?
or: rotate objects? (define armature?)
or: render all actions?
ScaleObj = optional scale object to render along with this object (NOT FINISHED!)
"""
raise NotImplementedError("Not yet!")
RO = RenderOptions() # Should be an input, as should scene
RO.BVPopts['BasePath'] = os.path.join(self.LibDir, 'Images', 'Objects', 'Scenes', '%s')
RO.resolution_x = RO.resolution_y = 256 # smaller images
if subCat:
ToRender = self.getSCL(subCat, 'objects')
else:
ToRender = self.objects # all objects
ObCt = 0
ScnL = []
for o in ToRender:
# Get all object variations to add as separate scenes
ObToAdd = []
for rotZ in rotList:
if o['nPoses'] and render_Pose:
for p in range(o['nPoses']):
O = Object(obID=o['name'], Lib=self, pos3D=(0, 0, 0), size3D=10, rot3D=(0, 0, rotZ), pose=p)
ObToAdd.append(O)
if scaleObj:
ScObSz = 10.*scaleObj.size3D/O.size3D
ScObToAdd.append
else:
O = Object(obID=o['name'], Lib=self, pos3D=(0, 0, 0), size3D=10, rot3D=(0, 0, rotZ))
ObToAdd.append(O)
# Add scale object in here somehwhere... Scale for each object!
if scaleObj:
ScObSz = 10.*scaleObj.size3D/O.size3D
ScObToAdd.append
# Camera, Lights (Sky), Background
Cam = Camera()
Sky = Sky()
BG = Background()
# Objects
for Obj in ObToAdd:
# Create Scene
ObCt+=1
if Obj.pose or Obj.pose==0:
pNum = Obj.pose+1
else:
pNum = 1
fpath = '%s_%s_p%d_r%d_fr##'%(Obj.semantic_category[0], Obj.name, pNum, Obj.rot3D[2])
ScnL.append(Scene(Num=ObCt, Obj=(Obj, ), BG=BG, Sky=Sky,
Shadow=None, Cam=Cam, FrameRange=(1, 1),
fpath=fpath, FrameRate=15))
# Convert list of scenes to SceneList
SL = SceneList(ScnList=ScnL, RenderOptions=RO)
SL.RenderSlurm(RenderGroupSize=renderGroupSize, RenderType=Type)
#SL.Render(RenderGroupSize=renderGroupSize, RenderType=Type)
def RenderBGs(self, subCat=None, dummyObjects=(), nCamLoc=5, Is_Overwrite=False):
"""
Render (all) backgrounds in bvpLibrary to folder <LibDir>/Images/Backgrounds/<category>_<name>.png
subCat = None #lambda x: x['name']=='BG_201_mHouse_1fl_1' #None #'outdoor'
dummyObjects = ['*human', '*artifact', '*vehicle']
"""
raise NotImplementedError("Not yet!")
RO = RenderOptions()
RO.BVPopts['BasePath'] = os.path.join(self.LibDir, 'Images', 'Backgrounds', '%s')
RO.resolution_x = RO.resolution_y = 256 # smaller images
if subCat:
ToRender = self.getSCL(subCat, 'backgrounds')
else:
ToRender = self.backgrounds # all backgrounds
# Frame count
frames = (1, 1)
# Get dummy objects to put in scenes:
# Misc Setup
BGCt = 0;
ScnL = []
for bg in ToRender:
BGCt+=1
# Create Scene
BG = Background(bgID=bg['name'], Lib=self)
ObL = []
for o in dummyObjects:
ObL.append(Object(obID=o, Lib=self, size3D=None))
for p in range(nCamLoc):
cNum = p+1
fpath = '%s_%s_cp%02d_fr##'%(BG.semantic_category[0], BG.name, cNum)
fChk = RO.BVPopts['BasePath']%fpath.replace('##', '01.'+RO.image_settings['file_format'].lower())
print('Checking for file: %s'%(fChk))
if os.path.exists(fChk) and not Is_Overwrite:
print('Found it!')
# Only append scenes to render that DO NOT have previews already rendered!
continue
Cam = Camera(location=BG.CamConstraint.sample_cam_pos(frames), fixPos=BG.CamConstraint.sampleFixPos(frames), frames=frames)
Sky = Sky('*'+BG.sky_semantic_category[0], Lib=self)
if Sky.semantic_category:
if 'dome' in Sky.semantic_category:
if len(Sky.lightLoc)>1:
Shad=None
elif len(Sky.lightLoc)==1:
if 'sunset' in Sky.semantic_category:
Shad = Shadow('*west', self)
else:
fn = lambda x: 'clouds' in x['semantic_category'] and not 'west' in x['semantic_category']
Shad = Shadow(fn, self)
else:
Shad=None
else:
Shad = None
S = Scene(Num=BGCt, BG=BG, Sky=Sky, Obj=None,
Shadow=Shad, Cam=Cam, FrameRange=frames,
fpath=fpath,
FrameRate=15)
try:
# Allow re-set of camera position with each attempt to populate scene
S.populate_scene(ObL, ResetCam=True)
except:
print('Unable to populate scene %s!'%S.fpath)
ScnL.append(S)
# Convert list of scenes to SceneList
SL = SceneList(ScnList=ScnL, RenderOptions=RO)
SL.RenderSlurm(RenderGroupSize=nCamLoc)
def RenderSkies(self, subCat=None, Is_Overwrite=False):
"""
Render (all) skies in bvpLibrary to folder <LibDir>/LibBackgrounds/<category>_<name>.png
subCat = None # lambda x: 'dome' in x['semantic_category']
"""
raise Exception('Not done yet!')
RO = RenderOptions()
RO.BVPopts['BasePath'] = os.path.join(self.LibDir, 'Images', 'Skies', '%s')
RO.resolution_x = RO.resolution_y = 256 # smaller images
if subCat:
ToRender = self.getSCL(subCat, 'backgrounds')
else:
ToRender = self.backgrounds # all backgrounds
# Frame count
frames = (1, 1)
# set standard lights (Sky)
Sky = Sky()
# Get dummy objects to put in scenes:
ObL = []
for o in dummyObjects:
ObL.append(Object(obID=o, Lib=self, size3D=None))
# Misc Setup
BGCt = 0;
ScnL = []
for bg in ToRender:
BGCt+=1
# Create Scene
BG = Background(bgID=bg['name'], Lib=self)
for p in range(nCamLoc):
cNum = p+1
fpath = '%s_%s_cp%d_fr##'%(BG.semantic_category[0], BG.name, cNum)
fChk = RO.BVPopts['BasePath']%fpath.replace('##', '01.'+RO.file_format.lower())
print('Checking for file: %s'%(fChk))
if os.path.exists(fChk) and not Is_Overwrite:
print('Found it!')
# Only append scenes to render that DO NOT have previews already rendered!
continue
Cam = Camera(location=BG.CamConstraint.sample_cam_pos(frames), fixPos=BG.CamConstraint.sampleFixPos(frames), frames=frames)
S = Scene(Num=BGCt, BG=BG, Sky=Sky, Obj=None,
Shadow=None, Cam=Cam, FrameRange=(1, 1),
fpath=fpath,
FrameRate=15)
#try:
# Allow re-set of camera position with each attempt to populate scene
S.populate_scene(ObL, ResetCam=True)
#except:
# print('Unable to populate scene %s!'%S.fpath)
ScnL.append(S)
# Convert list of scenes to SceneList
SL = SceneList(ScnList=ScnL, RenderOptions=RO)
SL.RenderSlurm(RenderGroupSize=nCamLoc)
def CreateSolidVol(self, obj=None, vRes=96, buf=4):
"""
Searches for extant .voxverts files in <LibDir>/Objects/VOL_Files/, and from them creates
3D, filled object mask matrices
Saves this voxelized verison of an object as a .vol file in the <LibDir>/Objects/VOL_Files/ directory.
Can not be called from inside Blender, since it relies on numpy
Volume for voxelized object mask is vRes+4 (usually 96+4=100) to allow for a couple voxels' worth
of "wiggle room" for imprecise scalings of objects (not all will be exactly 10 units - that part
of object creation is manual and can be difficult to get exactly right)
Voxelizations are used to create shape skeletons in subsequent processing.
Since the voxelized mesh surfaces of objects qualifies as meta-data about the objects,
this function might be expected to be a method of the RenderOptions class. However, this
isn't directly used by any models (yet); thus it has been saved in a separate place, as
the data about real-world size, number of mesh vertices, etc.
"""
# Imports
import re, os
from scipy.ndimage.morphology import binary_fill_holes as imfill # Fills holes in multi-dim images
if not obj:
obj = self.objects
for o in obj:
# Check for existence of .verts file:
ff = '%s_%s.%dx%dx%d.verts'%(o['semantic_category'][0].capitalize(), o['name'], vRes+buf, vRes+buf, vRes+buf*2)
fNm = os.path.join(Settings['Paths']['LibDir'], 'Objects', 'VOL_Files', ff)
if not os.path.exists(fNm):
if verbosity_level>3:
print('Could not find .verts file for %s'%o['name'])
print('(Searched for %s'%fNm)
continue
# Get voxelized vert list
with open(fNm, 'r') as fid:
Pt = fid.readlines()
vL = np.array([[float(x) for x in k.split(', ')] for k in Pt])
# Get dimensions
dim = [len(np.unique(vL.T[i])) for i in range(3)]
# Create blank matrix
z = np.zeros((vRes+buf, vRes+buf, vRes+buf*2), dtype=bool)
# Normalize matrix to indices for volume
vLn = vL/(10./vRes) -.5 + buf/2. # .5 is a half-voxel shift down
vLn.T[0:2]+= vRes/2. # Move X, Y to center
vLn.T[2] += buf/2. # Move Z up (off floor) by "buf"/2 again
# Check for closeness of values to rounded values
S = np.sqrt(np.sum((np.round(vLn)-vLn)**2))/len(vLn.flatten())
if S>.001:
raise Exception('Your voxelized coordinates do not round to whole number indices!')
# Index into volume
idx = np.cast['int'](np.round(vLn))
z[tuple(idx.T)] = True
# Fill holes w/ python
# May need fancier strel (structure element - 2nd argumnet) for some objects
hh = imfill(z)
# Trim?? for more efficient computation?
# ?
# Save volume in binary format for pfSkel (or other) code:
PF = o['fname']
fDir = os.path.split(PF)[0]
Cat = re.search('(?<=Category_)[^_^.]*', PF).group()
Res = '%dx%dx%d'%(vRes+buf, vRes+buf, vRes+buf+buf)
fName = os.path.join(fDir, 'VOL_Files', Cat+'_'+o['name']+'.'+Res+'.vol')
# Write to binary file
print('Saving %s'%fName)
with open(fName, 'wb') as fid:
hh.T.tofile(fid) # Transpose to put it in column-major form...
# Done with this object!
@classmethod
def create_db_from_json(cls, fname, dbname, dbhost, db_dir):
"""Creates database from a json file
If fname for json file is None, just creates an empty database"""
import couchdb
server = couchdb.Server(dbhost)
print("Creating database {} on {}".format(dbname, dbhost))
server.create(dbname)
server[dbname].save(dict(_id='config', db_dir=db_dir))
dbi = cls.__new__(cls)
dbi.__init__(dbname=dbname, dbhost=dbhost)
print("Setting up queries...")
dbi.set_up_db()
if fname is not None:
docs = json.load(open(fname))
# Exclude config file, we did that already. Shouldn't be here anyway...
docs = [d for d in docs if not d['_id'] == 'config']
print("Uploading documents...")
dbi.put_documents(docs)
print("Done!")
@classmethod
def start_db_server(cls, cmd=None): # set cmd from config file
# Options for cmd from particular os
raise NotImplementedError("Not yet!")
subprocess.call(cmd) # Or whatever to run this in the backgroud.
# @property
# def n_object(self):
# return len(self.objects)
# @property
# def n_object_count_poses(self):
# nOb = 0
# for o in self.objects:
# if o['nPoses']:
# nOb+=o['nPoses']
# else:
# nOb+=1
# return nOb
# @property
# def n_background(self):
# return len(self.backgrounds)
# @property
# def n_sky(self):
# return len(self.skies)
# @property
# def n_shadow(self):
# return len(self.shadows)
| 1.890625 | 2 |
gltf/converter.py | rdb/panda3d-gltf | 1 | 12763663 | import base64
import collections
import itertools
import os
import math
import struct
import pprint # pylint: disable=unused-import
from panda3d.core import * # pylint: disable=wildcard-import
try:
from panda3d import bullet
HAVE_BULLET = True
except ImportError:
HAVE_BULLET = False
load_prc_file_data(
__file__,
'interpolate-frames #t\n'
)
GltfSettings = collections.namedtuple('GltfSettings', (
'physics_engine',
'print_scene',
'skip_axis_conversion',
))
GltfSettings.__new__.__defaults__ = (
'builtin', # physics engine
False, # print_scene
False, # skip_axis_conversion
)
class Converter():
_COMPONENT_TYPE_MAP = {
5120: GeomEnums.NT_int8,
5121: GeomEnums.NT_uint8,
5122: GeomEnums.NT_int16,
5123: GeomEnums.NT_uint16,
5124: GeomEnums.NT_int32,
5125: GeomEnums.NT_uint32,
5126: GeomEnums.NT_float32,
}
_COMPONENT_NUM_MAP = {
'MAT4': 16,
'VEC4': 4,
'VEC3': 3,
'VEC2': 2,
'SCALAR': 1,
}
_ATTRIB_CONENT_MAP = {
'vertex': GeomEnums.C_point,
'normal': GeomEnums.C_normal,
'texcoord': GeomEnums.C_texcoord,
'color': GeomEnums.C_color,
'weights': GeomEnums.C_point,
'joints': GeomEnums.C_point,
}
_ATTRIB_NAME_MAP = {
'position': InternalName.get_vertex().get_name(),
'weights': InternalName.get_transform_weight().get_name(),
'joints': InternalName.get_transform_index().get_name(),
}
_PRIMITIVE_MODE_MAP = {
0: GeomPoints,
1: GeomLines,
3: GeomLinestrips,
4: GeomTriangles,
5: GeomTristrips,
6: GeomTrifans,
}
def __init__(
self,
indir=Filename.from_os_specific(os.getcwd()),
outdir=Filename.from_os_specific(os.getcwd()),
settings=GltfSettings()
):
self.indir = indir
self.outdir = outdir
self.settings = settings
self.cameras = {}
self.buffers = {}
self.lights = {}
self.textures = {}
self.mat_states = {}
self.mat_mesh_map = {}
self.meshes = {}
self.nodes = {}
self.node_paths = {}
self.scenes = {}
self.characters = {}
self.joint_map = {}
# Coordinate system transform matrix
self.csxform = LMatrix4.convert_mat(CS_yup_right, CS_default)
self.csxform_inv = LMatrix4.convert_mat(CS_default, CS_yup_right)
self.compose_cs = CS_yup_right
self._joint_nodes = set()
# Scene props
self.active_scene = NodePath(ModelRoot('default'))
self.background_color = (0, 0, 0)
self.active_camera = None
def update(self, gltf_data, writing_bam=False):
#pprint.pprint(gltf_data)
skip_axis_conversion = (
'extensionsUsed' in gltf_data and 'BP_zup' in gltf_data['extensionsUsed'] or
self.settings.skip_axis_conversion
)
if skip_axis_conversion:
self.csxform = LMatrix4.ident_mat()
self.csxform_inv = LMatrix4.ident_mat()
self.compose_cs = CS_zup_right
# Convert data
for buffid, gltf_buffer in enumerate(gltf_data.get('buffers', [])):
self.load_buffer(buffid, gltf_buffer)
for camid, gltf_cam in enumerate(gltf_data.get('cameras', [])):
self.load_camera(camid, gltf_cam)
if 'extensions' in gltf_data and 'KHR_lights' in gltf_data['extensions']:
lights = gltf_data['extensions']['KHR_lights'].get('lights', [])
for lightid, gltf_light in enumerate(lights):
self.load_light(lightid, gltf_light)
for texid, gltf_tex in enumerate(gltf_data.get('textures', [])):
self.load_texture(texid, gltf_tex, gltf_data)
self.load_fallback_texture()
for matid, gltf_mat in enumerate(gltf_data.get('materials', [])):
self.load_material(matid, gltf_mat)
for skinid, gltf_skin in enumerate(gltf_data.get('skins', [])):
self.load_skin(skinid, gltf_skin, gltf_data)
for meshid, gltf_mesh in enumerate(gltf_data.get('meshes', [])):
self.load_mesh(meshid, gltf_mesh, gltf_data)
for nodeid, gltf_node in enumerate(gltf_data.get('nodes', [])):
node_name = gltf_node.get('name', 'node'+str(nodeid))
node = self.nodes.get(nodeid, PandaNode(node_name))
self.nodes[nodeid] = node
# If we support writing bam 6.40, we can safely write out
# instanced lights. If not, we have to copy it.
copy_lights = writing_bam and not hasattr(BamWriter, 'root_node')
# Build scenegraphs
def add_node(root, gltf_scene, nodeid):
try:
gltf_node = gltf_data['nodes'][nodeid]
except IndexError:
print("Could not find node with index: {}".format(nodeid))
return
node_name = gltf_node.get('name', 'node'+str(nodeid))
if nodeid in self._joint_nodes:
# don't handle joints here
return
panda_node = self.nodes[nodeid]
if 'extras' in gltf_scene and 'hidden_nodes' in gltf_scene['extras']:
if nodeid in gltf_scene['extras']['hidden_nodes']:
panda_node = panda_node.make_copy()
np = self.node_paths.get(nodeid, root.attach_new_node(panda_node))
self.node_paths[nodeid] = np
if 'mesh' in gltf_node:
mesh = self.meshes[gltf_node['mesh']]
np_tmp = np
if 'skin' in gltf_node:
char = self.characters[gltf_node['skin']]
np_tmp = np.attach_new_node(char)
self.combine_mesh_skin(mesh, gltf_node['skin'])
np_tmp.attach_new_node(mesh)
if 'skin' in gltf_node and not 'mesh' in gltf_node:
print(
"Warning: node {} has a skin but no mesh"
.format(primitiveid)
)
if 'camera' in gltf_node:
camid = gltf_node['camera']
cam = self.cameras[camid]
np.attach_new_node(cam)
if 'extensions' in gltf_node:
if 'KHR_lights' in gltf_node['extensions']:
lightid = gltf_node['extensions']['KHR_lights']['light']
light = self.lights[lightid]
if copy_lights:
light = light.make_copy()
lnp = np.attach_new_node(light)
if isinstance(light, Light):
root.set_light(lnp)
if 'BLENDER_physics' in gltf_node['extensions']:
gltf_collisions = gltf_node['extensions']['BLENDER_physics']
gltf_rigidbody = gltf_node['extensions']['BLENDER_physics']
collision_shape = gltf_collisions['collisionShapes'][0]
shape_type = collision_shape['shapeType']
bounding_box = collision_shape['boundingBox']
radius = max(bounding_box[0], bounding_box[1]) / 2.0
height = bounding_box[2]
geomnode = None
if 'mesh' in collision_shape:
try:
geomnode = self.meshes[collision_shape['mesh']]
except KeyError:
print(
"Could not find physics mesh ({}) for object ({})"
.format(collision_shape['mesh'], nodeid)
)
if 'BP_physics_engine' in gltf_data['extensions']:
use_bullet = (
gltf_data['extensions']['BP_physics_engine']['engine'] == 'bullet'
)
else:
use_bullet = self.settings.physics_engine == 'bullet'
if use_bullet and not HAVE_BULLET:
print(
'Warning: attempted to export for Bullet, which is unavailable, falling back to builtin'
)
use_bullet = False
if use_bullet:
phynode = self.load_physics_bullet(
node_name,
geomnode,
shape_type,
bounding_box,
radius,
height,
gltf_rigidbody
)
else:
phynode = self.load_physics_builtin(
node_name,
geomnode,
shape_type,
bounding_box,
radius,
height,
gltf_rigidbody
)
if phynode is not None:
np.attach_new_node(phynode)
if 'extras' in gltf_node:
for key, value in gltf_node['extras'].items():
np.set_tag(key, str(value))
for child_nodeid in gltf_node.get('children', []):
add_node(np, gltf_scene, child_nodeid)
# Handle visibility after children are loaded
def visible_recursive(node, visible):
if visible:
node.show()
else:
node.hide()
for child in node.get_children():
visible_recursive(child, visible)
if 'extras' in gltf_scene and 'hidden_nodes' in gltf_scene['extras']:
if nodeid in gltf_scene['extras']['hidden_nodes']:
#print('Hiding', np)
visible_recursive(np, False)
else:
#print('Showing', np)
visible_recursive(np, True)
# Check if we need to deal with negative scale values
scale = panda_node.get_transform().get_scale()
negscale = scale.x * scale.y * scale.z < 0
if negscale:
for geomnode in np.find_all_matches('**/+GeomNode'):
tmp = geomnode.get_parent().attach_new_node(PandaNode('ReverseCulling'))
tmp.set_attrib(CullFaceAttrib.make_reverse())
geomnode.reparent_to(tmp)
for sceneid, gltf_scene in enumerate(gltf_data.get('scenes', [])):
scene_name = gltf_scene.get('name', 'scene'+str(sceneid))
scene_root = NodePath(ModelRoot(scene_name))
node_list = gltf_scene['nodes']
if 'extras' in gltf_scene and 'hidden_nodes' in gltf_scene['extras']:
node_list += gltf_scene['extras']['hidden_nodes']
for nodeid in node_list:
add_node(scene_root, gltf_scene, nodeid)
self.scenes[sceneid] = scene_root
# Update node transforms for glTF nodes that have a NodePath
for nodeid, gltf_node in enumerate(gltf_data.get('nodes', [])):
if nodeid not in self.node_paths:
continue
np = self.node_paths[nodeid]
gltf_pos = LVector3(*gltf_node.get('translation', [0, 0, 0]))
gltf_rot = self.load_quaternion_as_hpr(gltf_node.get('rotation', [0, 0, 0, 1]))
gltf_scale = LVector3(*gltf_node.get('scale', [1, 1, 1]))
gltf_mat = LMatrix4()
compose_matrix(gltf_mat, gltf_scale, gltf_rot, gltf_pos, self.compose_cs)
if np.has_parent():
parent_mat = np.get_parent().get_mat()
else:
parent_mat = LMatrix4.ident_mat()
parent_inv = LMatrix4(parent_mat)
parent_inv.invert_in_place()
np.set_mat(self.csxform * gltf_mat * self.csxform_inv)
# Set the active scene
sceneid = gltf_data.get('scene', 0)
if sceneid in self.scenes:
self.active_scene = self.scenes[sceneid]
if 'scenes' in gltf_data:
gltf_scene = gltf_data['scenes'][sceneid]
if 'extras' in gltf_scene:
if 'background_color' in gltf_scene['extras']:
self.background_color = gltf_scene['extras']['background_color']
if 'active_camera' in gltf_scene['extras']:
self.active_camera = gltf_scene['extras']['active_camera']
def load_matrix(self, mat):
lmat = LMatrix4()
for i in range(4):
lmat.set_row(i, LVecBase4(*mat[i * 4: i * 4 + 4]))
return lmat
def load_quaternion_as_hpr(self, quaternion):
quat = LQuaternion(quaternion[3], quaternion[0], quaternion[1], quaternion[2])
return quat.get_hpr()
def load_buffer(self, buffid, gltf_buffer):
uri = gltf_buffer['uri']
if uri.startswith('data:application/octet-stream;base64'):
buff_data = gltf_buffer['uri'].split(',')[1]
buff_data = base64.b64decode(buff_data)
elif uri.endswith('.bin'):
buff_fname = os.path.join(self.indir.to_os_specific(), uri)
with open(buff_fname, 'rb') as buff_file:
buff_data = buff_file.read()
else:
print(
"Buffer {} has an unsupported uri ({}), using a zero filled buffer instead"
.format(buffid, uri)
)
buff_data = bytearray(gltf_buffer['byteLength'])
self.buffers[buffid] = buff_data
def make_texture_srgb(self, texture):
if texture.get_num_components() == 3:
texture.set_format(Texture.F_srgb)
elif texture.get_num_components() == 4:
texture.set_format(Texture.F_srgb_alpha)
def load_fallback_texture(self):
texture = Texture('pbr-fallback')
texture.setup_2d_texture(1, 1, Texture.T_unsigned_byte, Texture.F_rgba)
texture.set_clear_color(LColor(1, 1, 1, 1))
self.textures['__bp-pbr-fallback'] = texture
def load_texture(self, texid, gltf_tex, gltf_data):
if 'source' not in gltf_tex:
print("Texture '{}' has no source, skipping".format(texid))
return
source = gltf_data['images'][gltf_tex['source']]
uri = source['uri']
def write_tex_image(ext):
texname = 'tex{}.{}'.format(gltf_tex['source'], ext)
texdata = base64.b64decode(uri.split(',')[1])
texfname = os.path.join(self.outdir.to_os_specific(), texname)
with open(texfname, 'wb') as texfile:
texfile.write(texdata)
return texfname
if uri.startswith('data:image/png;base64'):
uri = write_tex_image('png')
elif uri.startswith('data:image/jpeg;base64'):
uri = write_tex_image('jpeg')
else:
uri = Filename.fromOsSpecific(uri)
texture = TexturePool.load_texture(uri, 0, False, LoaderOptions())
if 'sampler' in gltf_tex:
gltf_sampler = gltf_data['samplers'][gltf_tex['sampler']]
if 'magFilter' in gltf_sampler:
if gltf_sampler['magFilter'] == 9728:
texture.set_magfilter(SamplerState.FT_nearest)
elif gltf_sampler['magFilter'] == 9729:
texture.set_magfilter(SamplerState.FT_linear)
else:
print(
"Sampler {} has unsupported magFilter type {}"
.format(gltf_tex['sampler'], gltf_sampler['magFilter'])
)
if 'minFilter' in gltf_sampler:
if gltf_sampler['minFilter'] == 9728:
texture.set_minfilter(SamplerState.FT_nearest)
elif gltf_sampler['minFilter'] == 9729:
texture.set_minfilter(SamplerState.FT_linear)
elif gltf_sampler['minFilter'] == 9984:
texture.set_minfilter(SamplerState.FT_nearest_mipmap_nearest)
elif gltf_sampler['minFilter'] == 9985:
texture.set_minfilter(SamplerState.FT_linear_mipmap_nearest)
elif gltf_sampler['minFilter'] == 9986:
texture.set_minfilter(SamplerState.FT_nearest_mipmap_linear)
elif gltf_sampler['minFilter'] == 9987:
texture.set_minfilter(SamplerState.FT_linear_mipmap_linear)
else:
print(
"Sampler {} has unsupported minFilter type {}"
.format(gltf_tex['sampler'], gltf_sampler['minFilter'])
)
wraps = gltf_sampler.get('wrapS', 10497)
if wraps == 33071:
texture.set_wrap_u(SamplerState.WM_clamp)
elif wraps == 33648:
texture.set_wrap_u(SamplerState.WM_mirror)
elif wraps == 10497:
texture.set_wrap_u(SamplerState.WM_repeat)
else:
print(
"Sampler {} has unsupported wrapS type {}"
.format(gltf_tex['sampler'], gltf_sampler['wrapS'])
)
wrapt = gltf_sampler.get('wrapT', 10497)
if wrapt == 33071:
texture.set_wrap_v(SamplerState.WM_clamp)
elif wrapt == 33648:
texture.set_wrap_v(SamplerState.WM_mirror)
elif wrapt == 10497:
texture.set_wrap_v(SamplerState.WM_repeat)
else:
print(
"Sampler {} has unsupported wrapT type {}"
.format(gltf_tex['sampler'], gltf_sampler['wrapT'])
)
self.textures[texid] = texture
def load_material(self, matid, gltf_mat):
matname = gltf_mat.get('name', 'mat'+str(matid))
state = self.mat_states.get(matid, RenderState.make_empty())
if matid not in self.mat_mesh_map:
self.mat_mesh_map[matid] = []
pmat = Material(matname)
pbr_fallback = {'index': '__bp-pbr-fallback', 'texCoord': 0}
texinfos = []
if 'extensions' in gltf_mat and 'BP_materials_legacy' in gltf_mat['extensions']:
matsettings = gltf_mat['extensions']['BP_materials_legacy']['bpLegacy']
pmat.set_shininess(matsettings['shininessFactor'])
pmat.set_ambient(LColor(*matsettings['ambientFactor']))
if 'diffuseTexture' in matsettings:
texinfo = matsettings['diffuseTexture']
texinfos.append(texinfo)
if matsettings['diffuseTextureSrgb'] and texinfo['index'] in self.textures:
self.make_texture_srgb(self.textures[texinfo['index']])
else:
pmat.set_diffuse(LColor(*matsettings['diffuseFactor']))
if 'emissionTexture' in matsettings:
texinfo = matsettings['emissionTexture']
texinfos.append(texinfo)
if matsettings['emissionTextureSrgb'] and texinfo['index'] in self.textures:
self.make_texture_srgb(self.textures[texinfo['index']])
else:
pmat.set_emission(LColor(*matsettings['emissionFactor']))
if 'specularTexture' in matsettings:
texinfo = matsettings['specularTexture']
texinfos.append(texinfo)
if matsettings['specularTextureSrgb'] and texinfo['index'] in self.textures:
self.make_texture_srgb(self.textures[texinfo['index']])
else:
pmat.set_specular(LColor(*matsettings['specularFactor']))
elif 'pbrMetallicRoughness' in gltf_mat:
pbrsettings = gltf_mat['pbrMetallicRoughness']
pmat.set_base_color(LColor(*pbrsettings.get('baseColorFactor', [1.0, 1.0, 1.0, 1.0])))
texinfos.append(pbrsettings.get('baseColorTexture', pbr_fallback))
if texinfos[-1]['index'] in self.textures:
self.make_texture_srgb(self.textures[texinfos[-1]['index']])
pmat.set_metallic(pbrsettings.get('metallicFactor', 1.0))
pmat.set_roughness(pbrsettings.get('roughnessFactor', 1.0))
texinfos.append(pbrsettings.get('metallicRoughnessTexture', pbr_fallback))
pmat.set_twoside(gltf_mat.get('doubleSided', False))
state = state.set_attrib(MaterialAttrib.make(pmat))
# Setup textures
tex_attrib = TextureAttrib.make()
for i, texinfo in enumerate(texinfos):
texdata = self.textures.get(texinfo['index'], None)
if texdata is None:
print("Could not find texture for key: {}".format(texinfo['index']))
continue
texstage = TextureStage(str(i))
texstage.set_texcoord_name(InternalName.get_texcoord_name(str(texinfo.get('texCoord', 0))))
tex_attrib = tex_attrib.add_on_stage(texstage, texdata)
state = state.set_attrib(tex_attrib)
# Setup Alpha mode
alpha_mode = gltf_mat.get('alphaMode', 'OPAQUE')
if alpha_mode == 'MASK':
alpha_cutoff = gltf_mat.get('alphaCutoff', 0.5)
alpha_attrib = AlphaTestAttrib.make(AlphaTestAttrib.M_greater_equal, alpha_cutoff)
state = state.set_attrib(alpha_attrib)
elif alpha_mode == 'BLEND':
transp_attrib = TransparencyAttrib.make(TransparencyAttrib.M_alpha)
state = state.set_attrib(transp_attrib)
elif alpha_mode != 'OPAQUE':
print(
"Warning: material {} has an unsupported alphaMode: {}"
.format(matid, alpha_mode)
)
# Remove stale meshes
self.mat_mesh_map[matid] = [
pair for pair in self.mat_mesh_map[matid] if pair[0] in self.meshes
]
# Reload the material
for meshid, geom_idx in self.mat_mesh_map[matid]:
self.meshes[meshid].set_geom_state(geom_idx, state)
self.mat_states[matid] = state
def create_anim(self, character, root_bone_id, animid, gltf_anim, gltf_data):
anim_name = gltf_anim.get('name', 'anim'+str(animid))
samplers = gltf_anim['samplers']
# Blender exports the same number of elements in each time parameter, so find
# one and assume that the number of elements is the number of frames
time_acc_id = samplers[0]['input']
time_acc = gltf_data['accessors'][time_acc_id]
time_bv = gltf_data['bufferViews'][time_acc['bufferView']]
start = time_acc['byteOffset'] + time_bv['byteOffset']
end = start + time_acc['count'] * 4
time_data = [
struct.unpack_from('<f', self.buffers[time_bv['buffer']], idx)[0]
for idx in range(start, end, 4)
]
num_frames = time_acc['count']
fps = num_frames / time_data[-1]
bundle_name = anim_name
bundle = AnimBundle(bundle_name, fps, num_frames)
skeleton = AnimGroup(bundle, '<skeleton>')
def create_anim_channel(parent, boneid):
bone = gltf_data['nodes'][boneid]
bone_name = bone.get('name', 'bone'+str(boneid))
channels = [chan for chan in gltf_anim['channels'] if chan['target']['node'] == boneid]
joint_mat = character.find_joint(bone_name).get_transform()
group = AnimChannelMatrixXfmTable(parent, bone_name)
def get_accessor(path):
accessors = [
gltf_data['accessors'][samplers[chan['sampler']]['output']]
for chan in channels
if chan['target']['path'] == path
]
return accessors[0] if accessors else None
def extract_chan_data(path):
vals = []
acc = get_accessor(path)
buff_view = gltf_data['bufferViews'][acc['bufferView']]
buff_data = self.buffers[buff_view['buffer']]
start = acc['byteOffset'] + buff_view['byteOffset']
if path == 'rotation':
end = start + acc['count'] * 4 * 4
data = [struct.unpack_from('<ffff', buff_data, idx) for idx in range(start, end, 4 * 4)]
vals = [
[i[0] for i in data],
[i[1] for i in data],
[i[2] for i in data],
[i[3] for i in data]
]
#convert quats to hpr
vals = list(zip(*[LQuaternion(i[3], i[0], i[1], i[2]).get_hpr() for i in zip(*vals)]))
else:
end = start + acc['count'] * 3 * 4
data = [struct.unpack_from('<fff', buff_data, idx) for idx in range(start, end, 3 * 4)]
vals = [
[i[0] for i in data],
[i[1] for i in data],
[i[2] for i in data]
]
return vals
# Create default animaton data
translation = LVector3()
rotation = LVector3()
scale = LVector3()
decompose_matrix(joint_mat, scale, rotation, translation, self.compose_cs)
loc_vals = list(zip(
*[(translation.get_x(), translation.get_y(), translation.get_z()) for i in range(num_frames)]
))
rot_vals = list(zip(
*[(rotation.get_x(), rotation.get_y(), rotation.get_z()) for i in range(num_frames)]
))
scale_vals = list(zip(
*[(scale.get_x(), scale.get_y(), scale.get_z()) for i in range(num_frames)]
))
# Override defaults with any found animation data
if get_accessor('translation') is not None:
loc_vals = extract_chan_data('translation')
if get_accessor('rotation') is not None:
rot_vals = extract_chan_data('rotation')
if get_accessor('scale') is not None:
scale_vals = extract_chan_data('scale')
# Write data to tables
group.set_table(b'x', CPTAFloat(PTAFloat(loc_vals[0])))
group.set_table(b'y', CPTAFloat(PTAFloat(loc_vals[1])))
group.set_table(b'z', CPTAFloat(PTAFloat(loc_vals[2])))
group.set_table(b'h', CPTAFloat(PTAFloat(rot_vals[0])))
group.set_table(b'p', CPTAFloat(PTAFloat(rot_vals[1])))
group.set_table(b'r', CPTAFloat(PTAFloat(rot_vals[2])))
group.set_table(b'i', CPTAFloat(PTAFloat(scale_vals[0])))
group.set_table(b'j', CPTAFloat(PTAFloat(scale_vals[1])))
group.set_table(b'k', CPTAFloat(PTAFloat(scale_vals[2])))
for childid in bone.get('children', []):
create_anim_channel(group, childid)
create_anim_channel(skeleton, root_bone_id)
character.add_child(AnimBundleNode(character.name, bundle))
def load_skin(self, skinid, gltf_skin, gltf_data):
skinname = gltf_skin.get('name', 'char'+str(skinid))
#print("Creating character for", skinname)
root = gltf_data['nodes'][gltf_skin['skeleton']]
character = Character(skinname)
bundle = character.get_bundle(0)
skeleton = PartGroup(bundle, "<skeleton>")
jvtmap = {}
bind_mats = []
ibmacc = gltf_data['accessors'][gltf_skin['inverseBindMatrices']]
ibmbv = gltf_data['bufferViews'][ibmacc['bufferView']]
start = ibmacc['byteOffset'] + ibmbv['byteOffset']
end = start + ibmacc['count'] * 16 * 4
ibmdata = self.buffers[ibmbv['buffer']][start:end]
joint_ids = set()
for i in range(ibmacc['count']):
mat = struct.unpack_from('<{}'.format('f'*16), ibmdata, i * 16 * 4)
#print('loaded', mat)
mat = self.load_matrix(mat)
mat.invert_in_place()
bind_mats.append(mat)
def create_joint(parent, nodeid, node, transform):
node_name = node.get('name', 'bone'+str(nodeid))
inv_transform = LMatrix4(transform)
inv_transform.invert_in_place()
joint_index = None
joint_mat = LMatrix4.ident_mat()
if nodeid in gltf_skin['joints']:
joint_index = gltf_skin['joints'].index(nodeid)
joint_mat = bind_mats[joint_index]
self._joint_nodes.add(nodeid)
# glTF uses an absolute bind pose, Panda wants it local
bind_pose = joint_mat * inv_transform
joint = CharacterJoint(character, bundle, parent, node_name, bind_pose)
# Non-deforming bones are not in the skin's jointNames, don't add them to the jvtmap
if joint_index is not None:
jvtmap[joint_index] = JointVertexTransform(joint)
joint_ids.add(nodeid)
for child in node.get('children', []):
#print("Create joint for child", child)
bone_node = gltf_data['nodes'][child]
create_joint(joint, child, bone_node, bind_pose * transform)
create_joint(skeleton, gltf_skin['skeleton'], root, LMatrix4.ident_mat())
self.characters[skinid] = character
self.joint_map[skinid] = jvtmap
# convert animations
#print("Looking for actions for", skinname, joint_ids)
anims = [
(animid, anim)
for animid, anim in enumerate(gltf_data.get('animations', []))
if joint_ids & {chan['target']['node'] for chan in anim['channels']}
]
if anims:
#print("Found anims for", skinname)
for animid, gltf_anim in anims:
#print("\t", gltf_anim.get('name', 'anim'+str(animid)))
self.create_anim(character, gltf_skin['skeleton'], animid, gltf_anim, gltf_data)
def load_primitive(self, geom_node, gltf_primitive, gltf_data):
# Build Vertex Format
vformat = GeomVertexFormat()
mesh_attribs = gltf_primitive['attributes']
accessors = [
{**gltf_data['accessors'][acc_idx], 'attrib': attrib_name}
for attrib_name, acc_idx in mesh_attribs.items()
]
accessors = sorted(accessors, key=lambda x: x['bufferView'])
data_copies = []
is_skinned = 'JOINTS_0' in mesh_attribs
for buffview, accs in itertools.groupby(accessors, key=lambda x: x['bufferView']):
buffview = gltf_data['bufferViews'][buffview]
accs = sorted(accs, key=lambda x: x.get('byteOffset', 0))
is_interleaved = len(accs) > 1 and accs[1]['byteOffset'] < buffview['byteStride']
varray = GeomVertexArrayFormat()
for acc in accs:
# Gather column information
attrib_parts = acc['attrib'].lower().split('_')
attrib_name = self._ATTRIB_NAME_MAP.get(attrib_parts[0], attrib_parts[0])
if attrib_name == 'texcoord' and len(attrib_parts) > 1:
internal_name = InternalName.make(attrib_name+'.', int(attrib_parts[1]))
else:
internal_name = InternalName.make(attrib_name)
num_components = self._COMPONENT_NUM_MAP[acc['type']]
numeric_type = self._COMPONENT_TYPE_MAP[acc['componentType']]
content = self._ATTRIB_CONENT_MAP.get(attrib_name, GeomEnums.C_other)
# Add this accessor as a column to the current vertex array format
varray.add_column(internal_name, num_components, numeric_type, content)
if not is_interleaved:
# Start a new vertex array format
vformat.add_array(varray)
varray = GeomVertexArrayFormat()
data_copies.append((
buffview['buffer'],
acc.get('byteOffset', 0) + buffview.get('byteOffset', 0),
acc['count'],
buffview.get('byteStride', 4 * num_components)
))
if is_interleaved:
vformat.add_array(varray)
data_copies.append((
buffview['buffer'],
buffview['byteOffset'],
accs[0]['count'],
buffview.get('byteStride', varray.get_stride())
))
# Copy data from buffers
reg_format = GeomVertexFormat.register_format(vformat)
vdata = GeomVertexData(geom_node.name, reg_format, GeomEnums.UH_stream)
for array_idx, data_info in enumerate(data_copies):
handle = vdata.modify_array(array_idx).modify_handle()
handle.unclean_set_num_rows(data_info[2])
buff = self.buffers[data_info[0]]
start = data_info[1]
end = start + data_info[2] * data_info[3]
handle.copy_data_from(buff[start:end])
handle = None
# Flip UVs
num_uvs = len({i for i in gltf_primitive['attributes'] if i.startswith('TEXCOORD')})
for i in range(num_uvs):
uv_data = GeomVertexRewriter(vdata, InternalName.get_texcoord_name(str(i)))
while not uv_data.is_at_end():
uvs = uv_data.get_data2f()
uv_data.set_data2f(uvs[0], 1 - uvs[1])
# Repack mesh data
vformat = GeomVertexFormat()
varray_vert = GeomVertexArrayFormat()
varray_skin = GeomVertexArrayFormat()
skip_columns = (
InternalName.get_transform_index(),
InternalName.get_transform_weight(),
InternalName.get_transform_blend()
)
for arr in reg_format.get_arrays():
for column in arr.get_columns():
varray = varray_skin if column.get_name() in skip_columns else varray_vert
varray.add_column(
column.get_name(),
column.get_num_components(),
column.get_numeric_type(),
column.get_contents()
)
vformat.add_array(varray_vert)
if is_skinned:
aspec = GeomVertexAnimationSpec()
aspec.set_panda()
vformat.set_animation(aspec)
varray_blends = GeomVertexArrayFormat()
varray_blends.add_column(InternalName.get_transform_blend(), 1, GeomEnums.NT_uint16, GeomEnums.C_index)
vformat.add_array(varray_blends)
vformat.add_array(varray_skin)
reg_format = GeomVertexFormat.register_format(vformat)
vdata = vdata.convert_to(reg_format)
# Construct primitive
primitiveid = geom_node.get_num_geoms()
primitivemode = gltf_primitive.get('mode', 4)
try:
prim = self._PRIMITIVE_MODE_MAP[primitivemode](GeomEnums.UH_static)
except KeyError:
print(
"Warning: primitive {} on mesh {} has an unsupported mode: {}"
.format(primitiveid, geom_node.name, primitivemode)
)
return
if 'indices' in gltf_primitive:
index_acc = gltf_data['accessors'][gltf_primitive['indices']]
prim.set_index_type(self._COMPONENT_TYPE_MAP[index_acc['componentType']])
handle = prim.modify_vertices(index_acc['count']).modify_handle()
handle.unclean_set_num_rows(index_acc['count'])
buffview = gltf_data['bufferViews'][index_acc['bufferView']]
buff = self.buffers[buffview['buffer']]
start = buffview['byteOffset']
end = start + index_acc['count'] * buffview.get('byteStride', 1) * prim.index_stride
handle.copy_data_from(buff[start:end])
handle = None
# Assign a material
matid = gltf_primitive.get('material', None)
if matid is None:
print(
"Warning: mesh {} has a primitive with no material, using an empty RenderState"
.format(geom_node.name)
)
mat = RenderState.make_empty()
elif matid not in self.mat_states:
print(
"Warning: material with name {} has no associated mat state, using an empty RenderState"
.format(matid)
)
mat = RenderState.make_empty()
else:
mat = self.mat_states[gltf_primitive['material']]
self.mat_mesh_map[gltf_primitive['material']].append((geom_node.name, primitiveid))
# Add this primitive back to the geom node
#ss = StringStream()
#vdata.write(ss)
###prim.write(ss, 2)
#print(ss.data.decode('utf8'))
geom = Geom(vdata)
geom.add_primitive(prim)
#geom.transform_vertices(self.csxform)
geom_node.add_geom(geom, mat)
def load_mesh(self, meshid, gltf_mesh, gltf_data):
mesh_name = gltf_mesh.get('name', 'mesh'+str(meshid))
node = self.meshes.get(meshid, GeomNode(mesh_name))
# Clear any existing mesh data
node.remove_all_geoms()
# Load primitives
for gltf_primitive in gltf_mesh['primitives']:
self.load_primitive(node, gltf_primitive, gltf_data)
# Save mesh
self.meshes[meshid] = node
def read_vert_data(self, gvd, column_name):
gvr = GeomVertexReader(gvd, column_name)
data = []
while not gvr.is_at_end():
data.append(LVecBase4(gvr.get_data4()))
return data
def combine_mesh_skin(self, geom_node, skinid):
jvtmap = collections.OrderedDict(sorted(self.joint_map[skinid].items()))
for geom in geom_node.modify_geoms():
gvd = geom.modify_vertex_data()
tbtable = TransformBlendTable()
tdata = GeomVertexWriter(gvd, InternalName.get_transform_blend())
jointdata = self.read_vert_data(gvd, InternalName.get_transform_index())
weightdata = self.read_vert_data(gvd, InternalName.get_transform_weight())
for joints, weights in zip(jointdata, weightdata):
tblend = TransformBlend()
for joint, weight in zip(joints, weights):
try:
jvt = jvtmap[joint]
except KeyError:
print(
"Could not find joint in jvtmap:\n\tjoint={}\n\tjvtmap={}"
.format(joint, jvtmap)
)
continue
tblend.add_transform(jvt, weight)
tdata.add_data1i(tbtable.add_blend(tblend))
tbtable.set_rows(SparseArray.lower_on(gvd.get_num_rows()))
gvd.set_transform_blend_table(tbtable)
def load_camera(self, camid, gltf_camera):
camname = gltf_camera.get('name', 'cam'+str(camid))
node = self.cameras.get(camid, Camera(camname))
if gltf_camera['type'] == 'perspective':
gltf_lens = gltf_camera['perspective']
lens = PerspectiveLens()
lens.set_fov(math.degrees(gltf_lens['yfov'] * gltf_lens['aspectRatio']), math.degrees(gltf_lens['yfov']))
lens.set_near_far(gltf_lens['znear'], gltf_lens['zfar'])
lens.set_view_vector((0, 0, -1), (0, 1, 0))
node.set_lens(lens)
self.cameras[camid] = node
def load_light(self, lightid, gltf_light):
node = self.lights.get(lightid, None)
lightname = gltf_light.get('name', 'light'+str(lightid))
ltype = gltf_light['type']
# Construct a new light if needed
if node is None:
if ltype == 'point':
node = PointLight(lightname)
elif ltype == 'directional':
node = DirectionalLight(lightname)
node.set_direction((0, 0, -1))
elif ltype == 'spot':
node = Spotlight(lightname)
else:
print("Unsupported light type for light with name {}: {}".format(lightname, gltf_light['type']))
node = PandaNode(lightname)
# Update the light
if ltype == 'unsupported':
lightprops = {}
else:
lightprops = gltf_light[ltype]
if ltype in ('point', 'directional', 'spot'):
node.set_color(LColor(*lightprops['color'], w=1))
if ltype in ('point', 'spot'):
att = LPoint3(
lightprops['constantAttenuation'],
lightprops['linearAttenuation'],
lightprops['quadraticAttenuation']
)
node.set_attenuation(att)
self.lights[lightid] = node
def load_physics_bullet(self, node_name, geomnode, shape_type, bounding_box, radius, height, gltf_rigidbody):
shape = None
static = 'static' in gltf_rigidbody and gltf_rigidbody['static']
if shape_type == 'BOX':
shape = bullet.BulletBoxShape(LVector3(*bounding_box) / 2.0)
elif shape_type == 'SPHERE':
shape = bullet.BulletSphereShape(max(bounding_box) / 2.0)
elif shape_type == 'CAPSULE':
shape = bullet.BulletCapsuleShape(radius, height - 2.0 * radius, bullet.ZUp)
elif shape_type == 'CYLINDER':
shape = bullet.BulletCylinderShape(radius, height, bullet.ZUp)
elif shape_type == 'CONE':
shape = bullet.BulletConeShape(radius, height, bullet.ZUp)
elif shape_type == 'CONVEX_HULL':
if geomnode:
shape = bullet.BulletConvexHullShape()
for geom in geomnode.get_geoms():
shape.add_geom(geom)
elif shape_type == 'MESH':
if geomnode:
mesh = bullet.BulletTriangleMesh()
for geom in geomnode.get_geoms():
mesh.add_geom(geom)
shape = bullet.BulletTriangleMeshShape(mesh, dynamic=not static)
else:
print("Unknown collision shape ({}) for object ({})".format(shape_type, nodeid))
if shape is not None:
phynode = bullet.BulletRigidBodyNode(node_name)
phynode.add_shape(shape)
if not static:
phynode.set_mass(gltf_rigidbody['mass'])
return phynode
else:
print("Could not create collision shape for object ({})".format(nodeid))
def load_physics_builtin(self, node_name, geomnode, shape_type, bounding_box, radius, height, _gltf_rigidbody):
phynode = CollisionNode(node_name)
if shape_type == 'BOX':
phynode.add_solid(CollisionBox(Point3(0, 0, 0), *LVector3(*bounding_box) / 2.0))
elif shape_type == 'SPHERE':
phynode.add_solid(CollisionSphere(0, 0, 0, radius))
elif shape_type in ('CAPSULE', 'CYLINDER', 'CONE'):
if shape_type != 'CAPSULE':
print(
'Warning: builtin collisions do not support shape type {} for object {}, falling back to {}'.format(
shape_type,
node_name,
'CAPSULE'
))
half_height = height / 2.0 - radius
start = LPoint3(0, 0, -half_height)
end = LPoint3(0, 0, half_height)
phynode.add_solid(CollisionCapsule(start, end, radius))
elif shape_type in ('MESH', 'CONVEX_HULL'):
if shape_type != 'MESH':
print(
'Warning: builtin collisions do not support shape type {} for object {}, falling back to {}'.format(
shape_type,
node_name,
'MESH'
))
if geomnode:
verts = []
for geom in geomnode.get_geoms():
vdata = self.read_vert_data(geom.get_vertex_data(), InternalName.get_vertex())
for prim in geom.primitives:
prim_tmp = prim.decompose()
verts += [
vdata[i].get_xyz() for i in
prim_tmp.get_vertex_list()
]
polys = zip(*([iter(verts)] * 3))
for poly in polys:
phynode.add_solid(CollisionPolygon(*poly))
else:
print("Unknown collision shape ({}) for object ({})".format(shape_type, node_name))
if phynode.solids:
return phynode
else:
print("Could not create collision shape for object ({})".format(nodeid))
def convert(src, dst, settings=GltfSettings()):
import json
if not isinstance(src, Filename):
src = Filename.from_os_specific(src)
if not isinstance(dst, Filename):
dst = Filename.from_os_specific(dst)
with open(src) as gltf_file:
gltf_data = json.load(gltf_file)
indir = Filename(src.get_dirname())
outdir = Filename(dst.get_dirname())
get_model_path().prepend_directory(indir)
get_model_path().prepend_directory(outdir)
converter = Converter(indir=indir, outdir=outdir, settings=settings)
converter.update(gltf_data, writing_bam=True)
if settings.print_scene:
converter.active_scene.ls()
converter.active_scene.write_bam_file(dst)
def load_model(loader, file_path, gltf_settings=GltfSettings(), **loader_kwargs):
'''Load a glTF file from file_path and return a ModelRoot'''
import tempfile
with tempfile.NamedTemporaryFile(suffix='.bam') as bamfile:
try:
convert(file_path, bamfile.name, gltf_settings)
return loader.load_model(bamfile.name, **loader_kwargs)
except:
raise RuntimeError("Failed to convert glTF file")
| 1.882813 | 2 |
HW7/zipcode_map_Yang.py | MengyuanZoe/HomeIn | 5 | 12763664 | # -*- coding: utf-8 -*-
"""
Folium interact with GeoJSON data
Examples: overlay another GeoJSON zipcode map to the original map
Author: <NAME>
"""
import pandas as pd
import folium
def show_zipcode_map(zipcode_path, data, col):
"""
Interact zipcode GeoJSON data with other data set (house price or crime)
and generate another layer of zipcode map onto the original map
Parameters
----------
path : string
URL or File path to GeoJSON zipcode data
data : pandas dataframe
The other data set to interact with (house price or crime)
col : string
The column name in dataset to bound zipcode with
Return
----------
Save map as .html file
return folium map
"""
# Generate original map
zipcode = folium.Map(location=[data['lat'].mean(),
data['long'].mean()], zoom_start=10)
# Add zipcode map layer to orignial map
zipcode.choropleth(geo_path=zipcode_path, data=data,
columns=['zipcode', col],
key_on='feature.properties.ZCTA5CE10',
fill_color='OrRd', fill_opacity=0.5, line_opacity=0.2)
zipcode.save('zipcode_' + col + '.html')
return zipcode
if __name__ == "__main__":
"""
Example of using show_zipcode_map function
"""
# Load King County house price data
house_data = pd.read_csv("../Data/kc_house_data.csv",
parse_dates=['date'])
house_data['zipcode'] = house_data['zipcode'].astype(str)
# Group data by zipcode and calculate the mean value in each zipcode
zipcode_data = pd.groupby(house_data, 'zipcode').mean()
# Add new fields in dataset, the count of house sold in each zipcode
zipcode_data['count'] = pd.groupby(house_data, 'zipcode').count()['id']
zipcode_data.reset_index(inplace=True)
# Path for GeoJSON zipcode data
zipcodepath = '../Data/zipcode_king_county.geojson'
# Generate a layer of zipcode map with house sold and save to html file
show_zipcode_map(zipcodepath, zipcode_data, 'count')
# Generate a layer of zipcode map with average house price and save to html file
show_zipcode_map(zipcodepath, zipcode_data, 'price')
| 3.796875 | 4 |
src/ufoLib2/pointPens/glyphPointPen.py | JeremieHornus/ufoLib2 | 0 | 12763665 | <gh_stars>0
from typing import TYPE_CHECKING, Any, Optional, Tuple
from fontTools.misc.transform import Transform
from fontTools.pens.pointPen import AbstractPointPen
from ufoLib2.objects.component import Component
from ufoLib2.objects.deepComponent import DeepComponent
from ufoLib2.objects.contour import Contour
from ufoLib2.objects.point import Point
if TYPE_CHECKING:
from ufoLib2.objects.glyph import Glyph
class GlyphPointPen(AbstractPointPen):
"""A point pen.
See :mod:`fontTools.pens.basePen` and :mod:`fontTools.pens.pointPen` for an
introduction to pens.
"""
__slots__ = "_glyph", "_contour"
def __init__(self, glyph: "Glyph") -> None:
self._glyph: "Glyph" = glyph
self._contour: Optional[Contour] = None
def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
self._contour = Contour(identifier=identifier)
def endPath(self) -> None:
if self._contour is None:
raise ValueError("Call beginPath first.")
self._glyph.contours.append(self._contour)
self._contour = None
def addPoint(
self,
pt: Tuple[float, float],
segmentType: Optional[str] = None,
smooth: bool = False,
name: Optional[str] = None,
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
if self._contour is None:
raise ValueError("Call beginPath first.")
x, y = pt
self._contour.append(
Point(
x, y, type=segmentType, smooth=smooth, name=name, identifier=identifier
)
)
def addComponent(
self,
baseGlyph: str,
transformation: Transform,
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
component = Component(baseGlyph, transformation, identifier=identifier)
self._glyph.components.append(component)
def addDeepComponent(
self,
baseGlyph: str,
transformation: list,
coord: list
) -> None:
deepComponent = DeepComponent(baseGlyph, transformation, coord)
self._glyph.deepComponents.append(deepComponent)
def addVariationGlyphs(self, variationGlyphs: list):
self._glyph.variationGlyphs = variationGlyphs
def addGlyphVariationLayers(self, glyphVariationLayers: list):
self._glyph.glyphVariationLayers = glyphVariationLayers | 2.34375 | 2 |
backend/polzyFunctions/tests/test_misc.py | Athos1972/PoLZy | 0 | 12763666 | import os
import sys
from polzyFunctions.utils import get_file_path
from polzyFunctions.tests.utils import company, user
from polzyFunctions.scripts.AddNotification import *
from polzyFunctions.LogLevelUpdater import LogLevelUpdater
from polzyFunctions.FileManager.AddressParser import Parser
def test_get_file_path():
get_file_path("test_misc.py")
get_file_path("test_misc.py", os.getcwd())
get_file_path("testing.py")
def test_AddNotification(company, user):
sys.argv = []
sys.argv.append("AddNotification.py")
sys.argv.extend(["--message", "testing"])
sys.argv.extend(["--company-id", "test"])
sys.argv.extend(["--user-id", "test"])
message = get_message()
get_company()
get_user()
sys.argv = ["--company-name", "test"]
sys.argv = ["--user-name", "test"]
get_company()
get_user()
assert message
def test_logLevelUpdater():
lLogLevelUpdater = LogLevelUpdater()
lLogLevelUpdater.create_thread = False
lLogLevelUpdater.startThread()
for thread in lLogLevelUpdater.threads:
thread.kill()
def test_AddressParser():
lParser = Parser("100 street 10/11")
address = lParser.to_dict()
assert address["postCode"] == "100"
assert address["street"] == "street"
assert address["streetNumber"] == "10"
assert address["houseNumber"] == "11"
| 2.25 | 2 |
Strings/example_10.py | abdullahfareed454/PythonCourses | 0 | 12763667 | # -*- coding: utf-8 -*-
string1 = "Becomes"
string2 = "becomes"
string3 = "BEAR"
string4 = " bEautiful"
string1 = string1.lower()
# (string2 will pass unmodified)
string3 = string3.lower()
string4 = string4.strip().lower()
print(string1.startswith("be"))
print(string2.startswith("be"))
print(string3.startswith("be"))
print(string4.startswith("be"))
| 3.65625 | 4 |
src/shart/__init__.py | insert-username/shart | 0 | 12763668 | <reponame>insert-username/shart<gh_stars>0
#!/usr/bin/env python3
import math
import numpy as np
import shapely as sh
import shapely.affinity
import shapely.geometry
__all__ = [ "box", "coordinates", "group", "utils" ]
| 1.234375 | 1 |
pytest_tests/test_repositories.py | koichiro8/learning | 0 | 12763669 | from datetime import datetime
import pytest
from sqlalchemy import select
from learning.database import get_session
from learning.entities import Todo
from learning.repositories import TodoRepository
from learning.schemas import CreateTodo
from .utils import create_todo
pytestmark = [pytest.mark.asyncio, pytest.mark.integration]
async def test_get_todos():
todos = create_todo(5)
repo = TodoRepository()
async with get_session() as session:
async with session.begin():
session.add_all(todos)
await session.commit()
got = await repo.get_todos(session)
assert len(got) == 5
async def test_create_todo():
now = datetime.now()
repo = TodoRepository()
async with get_session() as session:
await repo.create_todo(Todo("create todo"), session)
await session.commit()
result = await session.execute(select(Todo))
created: Todo = result.scalar_one_or_none()
assert created
assert created.title == "create todo"
assert not created.done
assert created.created_at >= now
async def test_update_todo():
repo = TodoRepository()
async with get_session() as session:
await repo.create_todo(Todo("create todo"), session)
await session.commit()
result = await session.execute(select(Todo))
created: Todo = result.scalar_one_or_none()
async with get_session() as session:
await repo.update_todo(created.id, CreateTodo(title="update todo"), session)
await session.commit()
result = await session.execute(select(Todo))
updated: Todo = result.scalar_one_or_none()
assert updated
assert updated.title == "update todo"
assert not updated.done
assert updated.created_at == created.created_at
async def test_done():
repo = TodoRepository()
async with get_session() as session:
await repo.create_todo(Todo("create todo"), session)
await session.commit()
result = await session.execute(select(Todo))
created: Todo = result.scalar_one_or_none()
async with get_session() as session:
await repo.done(created.id, session)
await session.commit()
result = await session.execute(select(Todo))
done: Todo = result.scalar_one_or_none()
assert done
assert done.title == "create todo"
assert done.done
assert done.created_at == created.created_at
async def test_delete():
repo = TodoRepository()
async with get_session() as session:
await repo.create_todo(Todo("create todo"), session)
await session.commit()
result = await session.execute(select(Todo))
created: Todo = result.scalar_one_or_none()
async with get_session() as session:
await repo.delete(created.id, session)
result = await session.execute(select(Todo))
deleted: Todo = result.scalar_one_or_none()
assert not deleted
async def test_delete_done():
todos = create_todo(5)
repo = TodoRepository()
async with get_session() as session:
async with session.begin():
session.add_all(todos)
await session.commit()
result = await session.execute(select(Todo))
created: Todo = result.scalars().all()
done_ids = [created[1].id, created[3].id]
async with get_session() as session:
for done_id in done_ids:
await repo.done(done_id, session)
await session.commit()
async with get_session() as session:
await repo.delete_done(session)
await session.commit()
result = await session.execute(select(Todo))
not_doned: Todo = result.scalars().all()
assert len(not_doned) == 3
not_doned_ids = [d.id for d in not_doned]
for done_id in done_ids:
assert done_id not in not_doned_ids
| 2.265625 | 2 |
torchflare/metrics/meters.py | glenn-jocher/torchflare | 1 | 12763670 | """Implementation of utilities for metrics."""
from typing import Tuple
import torch
from einops import reduce
class _BaseMetric:
"""A Class which decides type of classification i.e. binary,multilabel or multiclass."""
def __init__(self, multilabel: bool = False):
"""Constructor class for BaseMetric class.
Args:
multilabel: Set to True if problem type is multilabel.
"""
self.multilabel = multilabel
self.case_type = None
@staticmethod
def _check_shape(outputs: torch.Tensor, targets: torch.Tensor):
"""Function to check if there is a mismatch between outputs and targets.
Args:
outputs: The outputs of the net.
targets: The targets.
Raises:
ValueError: If shapes does not match.
"""
if not (outputs.ndim == targets.ndim or outputs.ndim == targets.ndim + 1):
raise ValueError("Preds and Targets must have same number of dimensions")
@staticmethod
def _convert_to_onehot(num_classes: int, indices: torch.Tensor) -> torch.Tensor:
"""Converts tensor to one_hot representation.
Args:
num_classes: The number of classes.
indices: torch.Tensor.
Returns:
one_hot converted tensor.
"""
onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], dtype=indices.dtype)
index = indices.long().unsqueeze(1).expand_as(onehot)
return onehot.scatter_(1, index, 1.0)
@staticmethod
def detach_tensor(x: torch.Tensor) -> torch.Tensor:
"""Detaches the tensor."""
return x.detach().cpu()
# noinspection PyUnboundLocalVariable
def _check_type(self, outputs: torch.Tensor, targets: torch.Tensor):
"""Method to infer type of the problem."""
self._check_shape(outputs, targets)
if targets.ndim + 1 == outputs.ndim:
if outputs.shape[1] == 1:
case_type = "binary"
else:
case_type = "multiclass"
elif outputs.ndim == targets.ndim:
if self.multilabel:
case_type = "multilabel"
else:
case_type = "binary"
if self.case_type is None:
self.case_type = case_type
class _BaseInputHandler(_BaseMetric):
"""Class to handle shapes for various classification tasks."""
def __init__(
self,
num_classes: int,
threshold: float = 0.5,
multilabel: bool = False,
average: str = "macro",
):
"""Constructor method.
Args:
num_classes: The number of classes.
threshold: The threshold for binarization.
multilabel: Whether the problem is multilabel or not.
average: One of macro or micro.
"""
super(_BaseInputHandler, self).__init__(multilabel=multilabel)
self.num_classes = num_classes
self.threshold = threshold
self.multilabel = multilabel
self.eps = 1e-20
self.average = average
assert self.average in ["micro", "macro"], "Average should be one of ['micro , 'macro'] " # noqa: S101
@staticmethod
def _calculate_stats(
true_preds: torch.Tensor,
false_preds: torch.Tensor,
pos_preds: torch.Tensor,
neg_preds: torch.Tensor,
):
tp = true_preds * pos_preds
fp = false_preds * pos_preds
tn = true_preds * neg_preds
fn = false_preds * neg_preds
return tp, fp, tn, fn
def compute_stats(
self,
outputs: torch.Tensor,
targets: torch.Tensor,
):
"""Computes true_positives, false_positives, true_negatives, false_negatives.
Args:
outputs: The outputs of the net.
targets: The targets.
Returns:
True positives , false positives, true negatives , false negatives.
"""
outputs, targets = self._compute(outputs=outputs, targets=targets)
true_preds = torch.eq(targets, outputs)
false_preds = ~true_preds
pos_preds = torch.eq(outputs, 1.0)
neg_preds = torch.eq(outputs, 0.0)
# Some einops operations
pattern = "r c -> c" if self.average == "macro" else "r c -> "
tp, fp, tn, fn = self._calculate_stats(true_preds, false_preds, pos_preds, neg_preds)
# einops reductions
tp = reduce(tp, pattern, reduction="sum")
fp = reduce(fp, pattern, reduction="sum")
tn = reduce(tn, pattern, reduction="sum")
fn = reduce(fn, pattern, reduction="sum")
return tp, fp, tn, fn
def reduce(self, numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor:
"""Method to perform macro or micro reduction."""
frac = numerator / (denominator + self.eps)
return torch.mean(frac) if self.average == "macro" else frac
def _compute(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
self._check_type(outputs=outputs, targets=targets)
if self.case_type == "multiclass":
targets = self._convert_to_onehot(num_classes=self.num_classes, indices=targets.view(-1))
# We receive logits need argmax on preds
outputs = torch.argmax(outputs, dim=1)
outputs = self._convert_to_onehot(num_classes=self.num_classes, indices=outputs.view(-1))
else:
# Handling multilabel and binary cases
outputs = torch.sigmoid(outputs).float()
outputs = (outputs >= self.threshold).long()
outputs = outputs.reshape(outputs.shape[0], -1)
targets = targets.reshape(targets.shape[0], -1)
return outputs, targets
def calculate_segmentation_statistics(outputs: torch.Tensor, targets: torch.Tensor, class_dim: int = 1, threshold=None):
"""Compute calculate segmentation statistics.
Args:
outputs: torch.Tensor.
targets: torch.Tensor.
threshold: threshold for binarization of predictions.
class_dim: indicates class dimension (K).
Returns:
True positives , false positives , false negatives for segmentation task.
"""
num_dims = len(outputs.shape)
assert num_dims > 2, "Found only two dimensions, shape should be [bs , C , ...]" # noqa: S101
assert outputs.shape == targets.shape, "shape mismatch" # noqa: S101
if threshold is not None:
outputs = (outputs > threshold).float()
dims = [dim for dim in range(num_dims) if dim != class_dim]
true_positives = torch.sum(outputs * targets, dim=dims)
false_positives = torch.sum(outputs * (1 - targets), dim=dims)
false_negatives = torch.sum(targets * (1 - outputs), dim=dims)
return true_positives, false_positives, false_negatives
class MetricMeter:
"""Base Class to structuring your metrics."""
def accumulate(self, outputs, targets):
"""Method to accumulate outputs and targets per the batch."""
raise NotImplementedError
def reset(self):
"""Method to reset the accumulation lists."""
raise NotImplementedError
__all__ = [
"_BaseMetric",
"_BaseInputHandler",
"MetricMeter",
"calculate_segmentation_statistics",
]
| 2.9375 | 3 |
tests/functional/modules/ims_psb_gen/seq_dataset/test_valid_input.py | thedoubl3j/ibm_zos_ims | 7 | 12763671 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
from pprint import pprint
import pytest
import ansible.utils
import ansible.errors
import ansible.constants
import warnings
import os
import sys
CURR_DIR = os.path.dirname(__file__) + "/../helpers"
# print(CURR_DIR)
sys.path.append(CURR_DIR)
# for path in sys.path:
# print(path)
import run_validate_success # pylint: disable=import-error
from ibm_zos_ims.tests.functional.module_utils.ims_test_gen_utils import PSBInputParameters as ip
__metaclass__ = type
"""
Following datasets should be provisioned for the list of tests below
1. OMVSADM.IMSTESTU.ANSIBLE.PSBLIB : psb lib for destination
2. Non empty input sequential dataset OMVSADM.IMSTESTU.ANS.SEQ
3. Syslibs: IMSBLD.I15RTSMM.SDFSMAC, SYS1.MACLIB
"""
DESTINATION = ip.DESTINATION
SYSLIB = ip.SYSLIB
SOURCE = ip.SEQ_SOURCE
GEN_SUCCESS_MSG = 'PSBGEN execution was successful.'
BATCH_SUCCESS_RETURN_TEXT = 'success'
def process_single_src(hosts, dest, sys_lib, src, location='DATA_SET', replace=True, member_list=None, psb_name=None):
# print(srcList)
response = hosts.all.ims_psb_gen(dest=dest, sys_lib=sys_lib, src=src, location=location, replace=replace, member_list=member_list, psb_name=psb_name)
for result in response.contacted.values():
print("Changed:", result['changed'])
assert result['changed']
assert result['rc'] == 0
# Check for success message (if we remove return codes)
assert result['msg'] == GEN_SUCCESS_MSG
def process_batch(hosts, batch_list, dest, sys_lib):
print(batch_list)
response = hosts.all.ims_psb_gen(
batch=batch_list, dest=dest, sys_lib=sys_lib)
for result in response.contacted.values():
print("Changed:", result['changed'])
assert result['changed']
assert result['rc'] == 0
# Check for success message (if we remove return codes)
assert result['msg'] == GEN_SUCCESS_MSG
# Check return code for array of output for each source
for src_result in result['batch_result']:
assert src_result['return_text'] == BATCH_SUCCESS_RETURN_TEXT
# Here we pass valid seq data set as input source to expect successful generation of psblib
def test_valid_seq_data_input_single_src(ansible_zos_module):
hosts = ansible_zos_module
process_single_src(hosts, DESTINATION, SYSLIB, src=SOURCE, replace=True, location='DATA_SET', psb_name='SEQ1')
def test_valid_seq_data_input_batch(ansible_zos_module):
hosts = ansible_zos_module
batch_list = [{'src': SOURCE, 'replace': True, 'location': "DATA_SET", 'psb_name': 'SEQ1'}]
process_batch(hosts, batch_list, DESTINATION, SYSLIB)
| 1.765625 | 2 |
codes/2_image_prediction_multiThreading.py | nguyenanhtuan1008/ImageAI | 0 | 12763672 | from tmodules.Prediction import ImagePrediction
import os
import threading
class PredictionThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
multiple_prediction = ImagePrediction()
multiple_prediction.setModelTypeAsDenseNet()
multiple_prediction.setModelPath(os.path.join(execution_path, r"E:\acuity\tuan_experiment\yolo\ImageAI\weights\DenseNet-BC-121-32.h5"))
multiple_prediction.loadModel(prediction_speed="fastest") # fastest faster fast normal
all_images_array = []
folder_path = r"E:\acuity\tuan_experiment\yolo\ImageAI\data-images"
all_files = os.listdir(folder_path)
for each_file in all_files:
print(folder_path + "\\" + each_file)
path_file = folder_path + "\\" + each_file
if(each_file.endswith(".jpg") or each_file.endswith(".png")):
all_images_array.append(path_file)
results_array = multiple_prediction.predictMultipleImages(all_images_array, result_count_per_image=5)
for each_result in results_array:
predictions, percentage_probabilities = each_result["predictions"], each_result["percentage_probabilities"]
for index in range(len(predictions)):
print(predictions[index] , " : " , percentage_probabilities[index])
print("-----------------------")
class PredictionThread2(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
print("###########################################################")
prediction = ImagePrediction()
prediction.setModelTypeAsInceptionV3()
prediction.setModelPath(os.path.join(execution_path, r"E:\acuity\tuan_experiment\yolo\ImageAI\weights\inception_v3_weights_tf_dim_ordering_tf_kernels.h5")) # Download the model via this link https://github.com/OlafenwaMoses/ImageAI/releases/tag/1.0
prediction.loadModel()
predictions, probabilities = prediction.predictImage(os.path.join(execution_path, r"E:\acuity\tuan_experiment\yolo\ImageAI\data-images\1.jpg"), result_count=10 , input_type="stream") # input_type="array"
for eachPrediction, eachProbability in zip(predictions, probabilities):
print(eachPrediction , " : " , eachProbability)
# https://github.com/nguyenanhtuan1008/ImageAI/blob/master/imageai/Prediction/README.md
#Run
execution_path = os.getcwd()
# Thread 1
predictionThread = PredictionThread ()
predictionThread.start()
# Thread 2
predictionThread2 = PredictionThread2 ()
predictionThread2.start() | 2.484375 | 2 |
integration/python/integration_api/models/identification.py | sumit4-ttn/SDK | 0 | 12763673 | # coding: utf-8
"""
Hydrogen Integration API
The Hydrogen Integration API # noqa: E501
OpenAPI spec version: 1.2.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Identification(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'country_of_issue': 'str',
'doc_number': 'str',
'doc_type': 'str',
'expiry_date': 'datetime',
'issue_date': 'datetime',
'issuing_authority': 'str',
'state_of_issue': 'str'
}
attribute_map = {
'country_of_issue': 'country_of_issue',
'doc_number': 'doc_number',
'doc_type': 'doc_type',
'expiry_date': 'expiry_date',
'issue_date': 'issue_date',
'issuing_authority': 'issuing_authority',
'state_of_issue': 'state_of_issue'
}
def __init__(self, country_of_issue=None, doc_number=None, doc_type=None, expiry_date=None, issue_date=None, issuing_authority=None, state_of_issue=None): # noqa: E501
"""Identification - a model defined in Swagger""" # noqa: E501
self._country_of_issue = None
self._doc_number = None
self._doc_type = None
self._expiry_date = None
self._issue_date = None
self._issuing_authority = None
self._state_of_issue = None
self.discriminator = None
if country_of_issue is not None:
self.country_of_issue = country_of_issue
if doc_number is not None:
self.doc_number = doc_number
if doc_type is not None:
self.doc_type = doc_type
if expiry_date is not None:
self.expiry_date = expiry_date
if issue_date is not None:
self.issue_date = issue_date
if issuing_authority is not None:
self.issuing_authority = issuing_authority
if state_of_issue is not None:
self.state_of_issue = state_of_issue
@property
def country_of_issue(self):
"""Gets the country_of_issue of this Identification. # noqa: E501
:return: The country_of_issue of this Identification. # noqa: E501
:rtype: str
"""
return self._country_of_issue
@country_of_issue.setter
def country_of_issue(self, country_of_issue):
"""Sets the country_of_issue of this Identification.
:param country_of_issue: The country_of_issue of this Identification. # noqa: E501
:type: str
"""
self._country_of_issue = country_of_issue
@property
def doc_number(self):
"""Gets the doc_number of this Identification. # noqa: E501
:return: The doc_number of this Identification. # noqa: E501
:rtype: str
"""
return self._doc_number
@doc_number.setter
def doc_number(self, doc_number):
"""Sets the doc_number of this Identification.
:param doc_number: The doc_number of this Identification. # noqa: E501
:type: str
"""
self._doc_number = doc_number
@property
def doc_type(self):
"""Gets the doc_type of this Identification. # noqa: E501
:return: The doc_type of this Identification. # noqa: E501
:rtype: str
"""
return self._doc_type
@doc_type.setter
def doc_type(self, doc_type):
"""Sets the doc_type of this Identification.
:param doc_type: The doc_type of this Identification. # noqa: E501
:type: str
"""
self._doc_type = doc_type
@property
def expiry_date(self):
"""Gets the expiry_date of this Identification. # noqa: E501
:return: The expiry_date of this Identification. # noqa: E501
:rtype: datetime
"""
return self._expiry_date
@expiry_date.setter
def expiry_date(self, expiry_date):
"""Sets the expiry_date of this Identification.
:param expiry_date: The expiry_date of this Identification. # noqa: E501
:type: datetime
"""
self._expiry_date = expiry_date
@property
def issue_date(self):
"""Gets the issue_date of this Identification. # noqa: E501
:return: The issue_date of this Identification. # noqa: E501
:rtype: datetime
"""
return self._issue_date
@issue_date.setter
def issue_date(self, issue_date):
"""Sets the issue_date of this Identification.
:param issue_date: The issue_date of this Identification. # noqa: E501
:type: datetime
"""
self._issue_date = issue_date
@property
def issuing_authority(self):
"""Gets the issuing_authority of this Identification. # noqa: E501
:return: The issuing_authority of this Identification. # noqa: E501
:rtype: str
"""
return self._issuing_authority
@issuing_authority.setter
def issuing_authority(self, issuing_authority):
"""Sets the issuing_authority of this Identification.
:param issuing_authority: The issuing_authority of this Identification. # noqa: E501
:type: str
"""
self._issuing_authority = issuing_authority
@property
def state_of_issue(self):
"""Gets the state_of_issue of this Identification. # noqa: E501
:return: The state_of_issue of this Identification. # noqa: E501
:rtype: str
"""
return self._state_of_issue
@state_of_issue.setter
def state_of_issue(self, state_of_issue):
"""Sets the state_of_issue of this Identification.
:param state_of_issue: The state_of_issue of this Identification. # noqa: E501
:type: str
"""
self._state_of_issue = state_of_issue
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Identification, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Identification):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.726563 | 2 |
cinderella/modules/cleaner.py | jerinjohny-ktnm/MarvinBot | 2 | 12763674 | import html
from typing import List
from telegram import Bot, Update, ParseMode
from telegram.ext import CommandHandler, MessageHandler, Filters, run_async
from cinderella import ALLOW_EXCL, dispatcher, CustomCommandHandler
from cinderella.modules.disable import DisableAbleCommandHandler
from cinderella.modules.helper_funcs.chat_status import user_admin, bot_can_delete, dev_plus, connection_status
from cinderella.modules.sql import cleaner_sql as sql
if ALLOW_EXCL:
CMD_STARTERS = ('/', '!')
else:
CMD_STARTERS = ('/')
BLUE_TEXT_CLEAN_GROUP = 15
CommandHandlerList = (CommandHandler, CustomCommandHandler, DisableAbleCommandHandler)
command_list = ["cleanblue", "ignoreblue", "unignoreblue", "listblue", "ungignoreblue", "gignoreblue"
"start", "help", "settings", "donate", "stalk", "aka", "leaderboard"]
for handler_list in dispatcher.handlers:
for handler in dispatcher.handlers[handler_list]:
if any(isinstance(handler, cmd_handler) for cmd_handler in CommandHandlerList):
command_list += handler.command
@run_async
def clean_blue_text_must_click(bot: Bot, update: Update):
chat = update.effective_chat
message = update.effective_message
if chat.get_member(bot.id).can_delete_messages:
if sql.is_enabled(chat.id):
fst_word = message.text.strip().split(None, 1)[0]
if len(fst_word) > 1 and any(fst_word.startswith(start) for start in CMD_STARTERS):
command = fst_word[1:].split('@')
chat = update.effective_chat
ignored = sql.is_command_ignored(chat.id, command[0])
if ignored:
return
if command[0] not in command_list:
message.delete()
@run_async
@connection_status
@bot_can_delete
@user_admin
def set_blue_text_must_click(bot: Bot, update: Update, args: List[str]):
chat = update.effective_chat
message = update.effective_message
if len(args) >= 1:
val = args[0].lower()
if val == "off" or val == "no":
sql.set_cleanbt(chat.id, False)
reply = "Bluetext cleaning has been disabled for <b>{}</b>".format(html.escape(chat.title))
message.reply_text(reply, parse_mode=ParseMode.HTML)
elif val == "yes" or val == "on":
sql.set_cleanbt(chat.id, True)
reply = "Bluetext cleaning has been enabled for <b>{}</b>".format(html.escape(chat.title))
message.reply_text(reply, parse_mode=ParseMode.HTML)
else:
reply = "Invalid argument.Accepted values are 'yes', 'on', 'no', 'off'"
message.reply_text(reply)
else:
clean_status = sql.is_enabled(chat.id)
if clean_status:
clean_status = "Enabled"
else:
clean_status = "Disabled"
reply = "Bluetext cleaning for <b>{}</b> : <b>{}</b>".format(chat.title, clean_status)
message.reply_text(reply, parse_mode=ParseMode.HTML)
@run_async
@user_admin
def add_bluetext_ignore(bot: Bot, update: Update, args: List[str]):
message = update.effective_message
chat = update.effective_chat
if len(args) >= 1:
val = args[0].lower()
added = sql.chat_ignore_command(chat.id, val)
if added:
reply = "<b>{}</b> has been added to bluetext cleaner ignore list.".format(args[0])
else:
reply = "Command is already ignored."
message.reply_text(reply, parse_mode=ParseMode.HTML)
else:
reply = "No command supplied to be ignored."
message.reply_text(reply)
@run_async
@user_admin
def remove_bluetext_ignore(bot: Bot, update: Update, args: List[str]):
message = update.effective_message
chat = update.effective_chat
if len(args) >= 1:
val = args[0].lower()
removed = sql.chat_unignore_command(chat.id, val)
if removed:
reply = "<b>{}</b> has been removed from bluetext cleaner ignore list.".format(args[0])
else:
reply = "Command isn't ignored currently."
message.reply_text(reply, parse_mode=ParseMode.HTML)
else:
reply = "No command supplied to be unignored."
message.reply_text(reply)
@run_async
@user_admin
def add_bluetext_ignore_global(bot: Bot, update: Update, args: List[str]):
message = update.effective_message
if len(args) >= 1:
val = args[0].lower()
added = sql.global_ignore_command(val)
if added:
reply = "<b>{}</b> has been added to global bluetext cleaner ignore list.".format(args[0])
else:
reply = "Command is already ignored."
message.reply_text(reply, parse_mode=ParseMode.HTML)
else:
reply = "No command supplied to be ignored."
message.reply_text(reply)
@run_async
@dev_plus
def remove_bluetext_ignore_global(bot: Bot, update: Update, args: List[str]):
message = update.effective_message
if len(args) >= 1:
val = args[0].lower()
removed = sql.global_unignore_command(val)
if removed:
reply = "<b>{}</b> has been removed from global bluetext cleaner ignore list.".format(args[0])
else:
reply = "Command isn't ignored currently."
message.reply_text(reply, parse_mode=ParseMode.HTML)
else:
reply = "No command supplied to be unignored."
message.reply_text(reply)
@run_async
@dev_plus
def bluetext_ignore_list(bot: Bot, update: Update):
message = update.effective_message
chat = update.effective_chat
global_ignored_list, local_ignore_list = sql.get_all_ignored(chat.id)
text = ""
if global_ignored_list:
text = "The following commands are currently ignored globally from bluetext cleaning :\n"
for x in global_ignored_list:
text += f" - <code>{x}</code>\n"
if local_ignore_list:
text += "\nThe following commands are currently ignored locally from bluetext cleaning :\n"
for x in local_ignore_list:
text += f" - <code>{x}</code>\n"
if text == "":
text = "No commands are currently ignored from bluetext cleaning."
message.reply_text(text)
return
message.reply_text(text, parse_mode=ParseMode.HTML)
return
__help__ = """
- /cleanblue <on/off/yes/no> - clean commands after sending
- /ignoreblue <word> - prevent auto cleaning of the command
- /unignoreblue <word> - remove prevent auto cleaning of the command
- /listblue - list currently whitelisted commands
Following are Disasters only commands, admins cannot use these:
- /gignoreblue <word> - globally ignore bluetext cleaning.
- /ungignoreblue <word> - remove said command from global cleaning list
"""
SET_CLEAN_BLUE_TEXT_HANDLER = CommandHandler("cleanblue", set_blue_text_must_click, pass_args=True)
ADD_CLEAN_BLUE_TEXT_HANDLER = CommandHandler("ignoreblue", add_bluetext_ignore, pass_args=True)
REMOVE_CLEAN_BLUE_TEXT_HANDLER = CommandHandler("unignoreblue", remove_bluetext_ignore, pass_args=True)
ADD_CLEAN_BLUE_TEXT_GLOBAL_HANDLER = CommandHandler("gignoreblue", add_bluetext_ignore_global, pass_args=True)
REMOVE_CLEAN_BLUE_TEXT_GLOBAL_HANDLER = CommandHandler("ungignoreblue", remove_bluetext_ignore_global, pass_args=True)
LIST_CLEAN_BLUE_TEXT_HANDLER = CommandHandler("listblue", bluetext_ignore_list)
CLEAN_BLUE_TEXT_HANDLER = MessageHandler(Filters.command & Filters.group, clean_blue_text_must_click)
dispatcher.add_handler(SET_CLEAN_BLUE_TEXT_HANDLER)
dispatcher.add_handler(ADD_CLEAN_BLUE_TEXT_HANDLER)
dispatcher.add_handler(REMOVE_CLEAN_BLUE_TEXT_HANDLER)
dispatcher.add_handler(ADD_CLEAN_BLUE_TEXT_GLOBAL_HANDLER)
dispatcher.add_handler(REMOVE_CLEAN_BLUE_TEXT_GLOBAL_HANDLER)
dispatcher.add_handler(LIST_CLEAN_BLUE_TEXT_HANDLER)
dispatcher.add_handler(CLEAN_BLUE_TEXT_HANDLER, BLUE_TEXT_CLEAN_GROUP)
__mod_name__ = "Bluetext Cleaner"
__handlers__ = [SET_CLEAN_BLUE_TEXT_HANDLER, ADD_CLEAN_BLUE_TEXT_HANDLER, REMOVE_CLEAN_BLUE_TEXT_HANDLER,
ADD_CLEAN_BLUE_TEXT_GLOBAL_HANDLER, REMOVE_CLEAN_BLUE_TEXT_GLOBAL_HANDLER,
LIST_CLEAN_BLUE_TEXT_HANDLER, (CLEAN_BLUE_TEXT_HANDLER, BLUE_TEXT_CLEAN_GROUP)]
| 2.046875 | 2 |
pplot/geo.py | thorwhalen/ut | 4 | 12763675 | <filename>pplot/geo.py
__author__ = 'thor'
from numpy import *
from mpl_toolkits.basemap import Basemap
def map_records(d, basemap_kwargs={}, plot_kwargs={}, lat_col='latitude', lng_col='longitude'):
# Create the Basemap
basemap_kwargs = dict(dict(projection='merc', # there are other choices though
resolution='l', # c(rude), l(ow), i(ntermediate), h(igh), and f(ull)
area_thresh=1000.0,
llcrnrlat=max([-89.999999, min(d[lat_col])]),
llcrnrlon=min(d[lng_col]), # Lower left corner
urcrnrlat=min([89.999999, max(d[lat_col])]),
urcrnrlon=max(d[lng_col]) # Upper right corner
),
**basemap_kwargs)
event_map = Basemap(**basemap_kwargs)
# Draw important features
event_map.drawcoastlines()
event_map.drawcountries()
event_map.fillcontinents(color='0.8') # Light gray
event_map.drawmapboundary()
plot_kwargs = dict(dict(marker='o',
markersize=7,
color='b',
alpha=0.1),
**plot_kwargs)
y, x = event_map(array(d[lng_col]), array(d[lat_col]))
event_map.plot(y, x, 'bo', **plot_kwargs)
return event_map
| 2.546875 | 3 |
src/apistblz/downloadonce.py | nfwstg/apistblz | 0 | 12763676 | import pickle
import os
import copy
import shutil
import hashlib
from .exceptions import *
# External Variables
dumpdir = '.dlo_dump'
force_on_disk = False
reportmode = None
# Internal Variables
dlo_memory = {}
prefix_list = []
keytable = {}
# Common
def _generate_keystring(is_method, prefix, args, kwargs):
if is_method:
desc = '_'.join(
[str(x) for x in args[1:]] +
['{}-{}'.format(x, y) for x, y in sorted(
kwargs.items(), key=lambda x:x[0])])
else:
desc = '_'.join(
[str(x) for x in args] +
['{}-{}'.format(x, y) for x, y in sorted(
kwargs.items(), key=lambda x:x[0])])
hashed_desc = hashlib.md5(desc.encode()).hexdigest()
keystring = '{}_{}'.format(prefix, hashed_desc)
if keystring in keytable.keys():
if desc != keytable[keystring]:
raise DownloadOnceDuplexHash()
else:
keytable[keystring] = desc
with open(_keytablepath(), 'wb') as fd:
pickle.dump(keytable, fd)
return keystring
def _filepath(keystring):
if not os.path.isdir(dumpdir):
os.makedirs(dumpdir)
return os.path.join(dumpdir, "{}".format(keystring))
def _keytablepath():
if not os.path.isdir(dumpdir):
os.makedirs(dumpdir)
return os.path.join(dumpdir, "keytable")
def _report(line):
if reportmode == 'stdout':
print(line)
# Function for data on disk
def _check_disk(keystring):
return os.path.isfile(_filepath(keystring))
def _dump_to_disk(keystring, data):
with open(_filepath(keystring), 'wb') as fd:
pickle.dump(data, fd)
def _load_from_disk(keystring):
with open(_filepath(keystring), 'rb') as fd:
data = pickle.load(fd)
return data
# Functions for users
def clear():
if os.path.isdir(dumpdir):
shutil.rmtree(dumpdir)
def cache(keystring):
if kestring in dlo_memory.keys():
_dump_to_disk(keystring, dlo_memory[keystring])
def uncache(keystring):
global dlo_memory
if kestring in dlo_memory.keys():
del dlo_memory[keystring]
if os.path.isfile(_filepath(keystring)):
os.remove(_filepath(keystring))
def dump():
for k, v in dlo_memory.items():
_dump_to_disk(k, v)
# Decorator
def downloadonce(prefix, on_disk=False, is_method=False):
"""Decorator to download data through API or from cache.
Args:
prefix(str): prefix for on disk cache data.
on_disk: Save and load cached returns on disk. Default: False.
is_method: For cache on disk, name cache data with Args strings.
For method in class, need to ignore first Args(self or cls) for
cache data name. Set True for class instance method and set
False for non-class instance method.
Default: False(Non-class instance method).
Note:
Add following additional kwargs for original function.
force_run(bool): Ignore cached data and run the function again.
Cached data will be overridden by new Returns. Default: False
not_save_on_disk(bool): Load cached data or run and save cache
in memory, but never save on disk. Default: False
dlo_cmd(str): Special command.
is_cached_in_memory:
Check returns is cashed in memory or not(bool).
is_cached_on_disk:
Check returns is cashed on disk or not(bool).
uncache_in_memory: Delete returns in memory(bool).
uncache_on_disk: Delete returns on disk(bool).
cache_on_disk: Cache data in memory to disk(bool).
"""
special_args = ['force_run', 'not_save_on_disk', 'dlo_cmd']
def dlo_deco(func):
def _get(keystring, force_run, not_save_on_disk, *args, **kwargs):
global dlo_memory
def _get_from_memory():
_report(
"[downloadonce] Return from memory {}".format(keystring))
return dlo_memory[keystring]
def _get_from_disk():
_report("[downloadonce] Return from disk {}".format(keystring))
output = _load_from_disk(keystring)
dlo_memory[keystring] = output
return output
def _get_from_func():
_report("[downloadonce] Download {}".format(keystring))
output = func(*args, **kwargs)
dlo_memory[keystring] = output
if (force_on_disk or on_disk) and not not_save_on_disk:
_dump_to_disk(keystring, output)
return output
# Get output
output = None
if force_run:
output = _get_from_func()
elif keystring in dlo_memory.keys():
output = _get_from_memory()
elif (on_disk or force_on_disk) and _check_disk(keystring):
output = _get_from_disk()
else:
output = _get_from_func()
return copy.deepcopy(output)
def decorated_func(*args, **kwargs):
# Check special args.
for argname in ['force_run', 'not_save_on_disk', 'dlo_cmd']:
if argname in func.__code__.co_varnames:
raise DownloadOnceDuplexArgs(argname)
force_run = kwargs.pop('force_run', None)
not_save_on_disk = kwargs.pop('not_save_on_disk', None)
dlo_cmd = kwargs.pop('dlo_cmd', None)
keystring = _generate_keystring(is_method, prefix, args, kwargs)
# Switch with special arguments.
if not dlo_cmd:
return _get(keystring, force_run, not_save_on_disk,
*args, **kwargs)
if dlo_cmd == 'is_cached_in_memory':
return keystring in dlo_memory.keys()
if dlo_cmd == 'is_cached_on_disk':
return _check_disk(keystring)
if dlo_cmd == 'uncache_in_memory':
if keystring in dlo_memory.keys():
del dlo_memory[keystring]
return True
return False
if dlo_cmd == 'uncache_on_disk':
if _check_disk(keystring):
os.remove(_filepath(keystring))
return True
return False
if dlo_cmd == 'cache_on_disk':
if keystring in dlo_memory.keys():
_dump_to_disk(keystring, dlo_memory[keystring])
return True
return False
raise DownloadOnceInvalidCmd(argname)
return decorated_func
global prefix_list
if prefix in prefix_list:
raise DownloadOnceDuplexPrefix(prefix)
prefix_list.append(prefix)
return dlo_deco
| 2.296875 | 2 |
nipy/core/image/affine_image.py | neurospin/nipy | 1 | 12763677 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The base image interface.
"""
import numpy as np
from scipy import ndimage
# Local imports
from .image import Image
from ..transforms.affines import to_matrix_vector
from ..reference.coordinate_system import CoordinateSystem
from ..reference.coordinate_map import (AffineTransform,
product as cmap_product)
################################################################################
# class `AffineImage`
################################################################################
class AffineImage(Image):
""" The affine image for nipy.
This object is a subclass of Image that
assumes the first 3 coordinates
are spatial.
**Attributes**
:metadata: dictionnary
Optional, user-defined, dictionnary used to carry around
extra information about the data as it goes through
transformations. The Image class does not garanty consistency
of this information as the data is modified.
:_data:
Private pointer to the data.
**Properties**
:affine: 4x4 ndarray
Affine mapping from voxel axes to world coordinates
(world coordinates are always forced to be 'x', 'y', 'z').
:spatial_coordmap: AffineTransform
Coordinate map describing the spatial coordinates
(always forced to be 'x', 'y', 'z') and the coordinate
axes with names axis_names[:3].
:coordmap: AffineTransform
Coordinate map describing the relationship between
all coordinates and axis_names.
**Notes**
The data is stored in an undefined way: prescalings might need to
be applied to it before using it, or the data might be loaded on
demand. The best practice to access the data is not to access the
_data attribute, but to use the `get_data` method.
"""
#---------------------------------------------------------------------------
# Attributes, BaseImage interface
#---------------------------------------------------------------------------
# The name of the reference coordinate system
coord_sys = ''
# User defined meta data
metadata = dict()
# The data (ndarray)
_data = None
# XXX: Need an attribute to determine in a clever way the
# interplation order/method
def __init__(self, data, affine, coord_sys, metadata=None):
""" Creates a new nipy image with an affine mapping.
Parameters
----------
data : ndarray
ndarray representing the data.
affine : 4x4 ndarray
affine transformation to the reference coordinate system
coord_system : string
name of the reference coordinate system.
"""
affine = np.asarray(affine)
if affine.shape != (4,4):
raise ValueError('Affine image takes 4x4 affine as input')
function_domain = CoordinateSystem(['axis%d' % i for i in range(3)],
name=coord_sys)
function_range = CoordinateSystem(['x','y','z'], name='world')
spatial_coordmap = AffineTransform(function_domain, function_range,
affine)
nonspatial_names = ['axis%d' % i for i in range(3, data.ndim)]
if nonspatial_names:
nonspatial_coordmap = AffineTransform.from_start_step(nonspatial_names, nonspatial_names, [0]*(data.ndim-3), [1]*(data.ndim-3))
full_coordmap = cmap_product(spatial_coordmap, nonspatial_coordmap)
else:
full_coordmap = spatial_coordmap
self._spatial_coordmap = spatial_coordmap
self.coord_sys = coord_sys
Image.__init__(self, data, full_coordmap)
if metadata is not None:
self.metadata = metadata
def _get_spatial_coordmap(self):
"""
Returns 3 dimensional AffineTransform, which is the same
as self.coordmap if self.ndim == 3.
"""
return self._spatial_coordmap
spatial_coordmap = property(_get_spatial_coordmap)
def _get_affine(self):
"""
Returns the affine of the spatial coordmap which will
always be a 4x4 matrix.
"""
return self._spatial_coordmap.affine
affine = property(_get_affine)
def get_data(self):
# XXX What's wrong with __array__? Wouldn't that be closer to numpy?
""" Return data as a numpy array.
"""
return np.asarray(self._data)
def resampled_to_affine(self, affine_transform, world_to_world=None,
interpolation_order=3,
shape=None):
""" Resample the image to be an affine image.
Parameters
----------
affine_transform : AffineTransform
Affine of the new grid.
XXX In the original proposal, it said something about "if only 3x3 it is assumed
to be a rotation", but this wouldn't work the way the code was written becuase
it was written as if affine was the affine of an AffineImage. So, if you input
a "rotation matrix" that is assuming you have voxels of size 1....
This rotation can now be expressed with the world_to_world argument.
world_to_world: 4x4 ndarray, optional
A matrix representing a mapping from the target's (affine_transform) "world"
to self's "world". Defaults to np.identity(4)
interpolation_order : int, optional
Order of the spline interplation. If 0, nearest-neighbour
interpolation is performed.
shape: tuple
Shape of the resulting image. Defaults to self.shape.
Returns
-------
resampled_image : nipy AffineImage
New nipy image with the data resampled in the given
affine.
Notes
-----
The coordinate system of the resampled_image is the world
of affine_transform. Therefore, if world_to_world=np.identity(4),
the coordinate system is not changed: the
returned image points to the same world space.
"""
shape = shape or self.shape
shape = shape[:3]
if world_to_world is None:
world_to_world = np.identity(4)
world_to_world_transform = AffineTransform(affine_transform.function_range,
self.spatial_coordmap.function_range,
world_to_world)
# Delayed import to avoid circular imports
from ...algorithms.resample import resample
if self.ndim == 3:
im = resample(self, affine_transform, world_to_world_transform,
shape, order=interpolation_order)
return AffineImage(np.array(im), affine_transform.affine,
affine_transform.function_domain.name)
# XXX this below wasn't included in the original AffineImage proposal
# and it would fail for an AffineImage with ndim == 4.
# I don't know if it should be included as a special case in the AffineImage,
# but then we should at least raise an exception saying that these resample_* methods
# only work for AffineImage's with ndim==3.
#
# This is part of the reason nipy.core.image.Image does not have
# resample_* methods...
elif self.ndim == 4:
result = np.empty(shape + (self.shape[3],))
data = self.get_data()
for i in range(self.shape[3]):
tmp_affine_im = AffineImage(data[...,i], self.affine,
self.axis_names[:-1])
tmp_im = tmp_affine_im.resampled_to_affine(affine_transform,
world_to_world,
interpolation_order,
shape)
result[...,i] = np.array(tmp_im)
return AffineImage(result, affine_transform.affine,
affine_transform.function_domain.name)
else:
raise ValueError('resampling only defined for 3d and 4d AffineImage')
def resampled_to_img(self, target_image, world_to_world=None, interpolation_order=3):
""" Resample the image to be on the same grid than the target image.
Parameters
----------
target_image : AffineImage
Nipy image onto the grid of which the data will be
resampled.
XXX In the proposal, target_image was assumed to be a matrix if it had no attribute "affine". It now has to have a spatial_coordmap attribute.
world_to_world: 4x4 ndarray, optional
A matrix representing a mapping from the target's "world"
to self's "world". Defaults to np.identity(4)
interpolation_order : int, optional
Order of the spline interplation. If 0, nearest neighboor
interpolation is performed.
Returns
-------
resampled_image : nipy_image
New nipy image with the data resampled.
Notes
-----
The coordinate system of the resampled_image is the world
of target_image. Therefore, if world_to_world=np.identity(4),
the coordinate system is not changed: the
returned image points to the same world space.
XXX Since you've enforced the outputs always to be 'x','y','z' -- EVERY image is embedded in the same coordinate system (i.e. 'x','y','z'), but images can have different coordinate axes. The term "embedding" that was here in the proposal refers to something in the range of a function, not its domain. By adding a world_to_world transformation, i.e. a rotation or something, we
now change the coordinate system of the resampled_image
"""
return self.resampled_to_affine(target_image.spatial_coordmap,
world_to_world,
interpolation_order,
target_image.shape)
def values_in_world(self, x, y, z, interpolation_order=3):
""" Return the values of the data at the world-space positions given by
x, y, z
Parameters
----------
x : number or ndarray
x positions in world space, in other words milimeters
y : number or ndarray
y positions in world space, in other words milimeters.
The shape of y should match the shape of x
z : number or ndarray
z positions in world space, in other words milimeters.
The shape of z should match the shape of x
interpolation_order : int, optional
Order of the spline interplation. If 0, nearest neighboor
interpolation is performed.
Returns
-------
values : number or ndarray
Data values interpolated at the given world position.
This is a number or an ndarray, depending on the shape of
the input coordinate.
"""
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
shape = x.shape
if not ((x.shape == y.shape) and (x.shape == z.shape)):
raise ValueError('x, y and z shapes should be equal')
x = x.ravel()
y = y.ravel()
z = z.ravel()
xyz = np.c_[x, y, z]
world_to_voxel = self.spatial_coordmap.inverse()
ijk = world_to_voxel(xyz)
values = ndimage.map_coordinates(self.get_data(), ijk.T,
order=interpolation_order)
values = np.reshape(values, shape)
return values
#---------------------------------------------------------------------------
# AffineImage interface
#---------------------------------------------------------------------------
def xyz_ordered(self):
""" Returns an image with the affine diagonal and positive
in its coordinate system.
"""
A, b = to_matrix_vector(self.affine)
if not np.all((np.abs(A) > 0.001).sum(axis=0) == 1):
raise CoordSystemError(
'Cannot reorder the axis: the image affine contains rotations'
)
axis_numbers = list(np.argmax(np.abs(A), axis=1))
axis_names = [self.spatial_coordmap.function_domain.coord_names[a] for a in axis_numbers]
reordered_coordmap = self.spatial_coordmap.reordered_domain(axis_names)
data = self.get_data()
transposed_data = np.transpose(data, axis_numbers + range(3, self.ndim))
return AffineImage(transposed_data, reordered_coordmap.affine,
reordered_coordmap.function_domain.name)
#---------------------------------------------------------------------------
# Private methods
#---------------------------------------------------------------------------
def __repr__(self):
options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=2)
representation = \
'AffineImage(\n data=%s,\n affine=%s,\n coord_sys=%s)' % (
'\n '.join(repr(self._data).split('\n')),
'\n '.join(repr(self.affine).split('\n')),
repr(self.coord_sys))
np.set_printoptions(**options)
return representation
def __copy__(self):
""" Copy the Image and the arrays and metadata it contains.
"""
return self.__class__(data=self.get_data().copy(),
affine=self.affine.copy(),
coord_sys=self.coord_sys,
metadata=self.metadata.copy())
def __deepcopy__(self, option):
""" Copy the Image and the arrays and metadata it contains.
"""
import copy
return self.__class__(data=self.get_data().copy(),
affine=self.affine.copy(),
coord_sys=self.coord_sys,
metadata=copy.deepcopy(self.metadata))
def __eq__(self, other):
return ( isinstance(other, self.__class__)
and np.all(self.get_data() == other.get_data())
and np.all(self.affine == other.affine)
and (self.coord_sys == other.coord_sys))
| 2.390625 | 2 |
solutions/binary_search_tree/insert_bst.py | cmok4290/jubilant-octo-adventure | 0 | 12763678 | class Node(object):
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
class BinarySearchTree(object):
def __init__(self, root=None):
self.root = root
def get_root(self):
return self.root
def insert(self, item):
if self.root is None:
self.root = Node(item)
else:
cur_node = self.root
while cur_node is not None:
if item < cur_node.data:
if cur_node.left is None:
cur_node = Node(item)
return
else:
cur_node = cur_node.left
else:
if cur_node.right is None:
cur_node = Node(item)
return
else:
cur_node = cur_node.right
| 3.921875 | 4 |
ethereum_common/src/ethereum_common/nodes.py | Vourhey/robonomics_comm | 16 | 12763679 | # -*- coding: utf-8 -*-
#
# ethereum common related nodes.
#
from .erc20_services import ERC20Services
from .eth_services import ETHServices
from .signer import Signer
def erc20_node():
ERC20Services().spin()
def eth_node():
ETHServices().spin()
def signer_node():
Signer().spin()
| 1.8125 | 2 |
programming/5.3/main.py | Sasha-hk/saceit-practice | 1 | 12763680 | <filename>programming/5.3/main.py
from random import randint
n = 9
arr = [[randint(0, 100) for i in range(n)] for j in range(n)]
max_value = arr[0][0]
min_value = arr[0][0]
tt = 0
for i in range(0, int(n / 2 + 1)):
for j in range(tt, n - tt):
if arr[i][j] > max_value:
max_value = arr[i][j]
tt += 1
bt = int(n / 2 - 1)
for i in range(int(n / 2 + 1), n):
for j in range(bt, n - bt):
if arr[i][j] > max_value:
max_value = arr[i][j]
bt -= 1
rt = n
for i in range(0, int(n / 2)):
for j in range(rt, n):
if arr[i][j] > max_value:
max_value = arr[i][j]
rt -= 1
lt = 1
for i in range(0, n):
for j in range(0, lt):
if arr[i][j] < min_value:
min_value = arr[i][j]
if i >= n / 2:
lt -= 1
else:
lt += 1
rp = int(n / 2)
for i in range(0, int(n / 2 + 1)):
for j in range(rp, n):
if arr[i][j] > max_value:
max_value = arr[i][j]
# output
for i in range(0, n):
for j in range(0, n):
print(arr[i][j], end=' ')
print()
print()
print(' - Min value:', min_value)
print(' - Max value:', max_value)
| 3.25 | 3 |
leetcode/0073_set-matrix-zeroes.py | heyf/cloaked-octo-adventure | 0 | 12763681 | #
# @lc app=leetcode id=73 lang=python3
#
# [73] Set Matrix Zeroes
#
from typing import List
# @lc code=start
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
# null check
if not matrix and not matrix[0]:
return
nrows, ncols = len(matrix), len(matrix[0])
row_zero, col_zero = False, False
for c in range(ncols):
if matrix[0][c] == 0:
row_zero = True
for r in range(nrows):
if matrix[r][0] == 0:
col_zero = True
for r in range(1,nrows):
for c in range(1,ncols):
if matrix[r][c] == 0:
matrix[r][0] = 0
matrix[0][c] = 0
for r in range(1,nrows):
for c in range(1,ncols):
if matrix[r][0] == 0 or matrix[0][c] == 0:
matrix[r][c] = 0
if row_zero:
for c in range(ncols):
matrix[0][c] = 0
if col_zero:
for r in range(nrows):
matrix[r][0] = 0
return
# @lc code=end
s = Solution()
wa1 = [[1,2,3,4],[5,0,5,2],[8,9,2,0],[5,7,2,1]]
a = wa1
s.setZeroes(a)
print(a) | 3.734375 | 4 |
source/lambda/invite_accounts/index.py | stackArmor/compliant-framework-for-federal-and-dod-workloads-in-aws-govcloud-us | 36 | 12763682 | <reponame>stackArmor/compliant-framework-for-federal-and-dod-workloads-in-aws-govcloud-us<filename>source/lambda/invite_accounts/index.py
######################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import boto3
SSM_GOVCLOUD_ACCESS_KEY_ID = '/compliant/framework/central/aws-us-gov/access-key-id'
SSM_GOVCLOUD_SECRET_ACCESS_KEY = '/compliant/framework/central/aws-us-gov/secret-access-key'
SSM_GOVCLOUD_LOGGING_ACCOUNT_ID = '/compliant/framework/accounts/core/logging/aws-us-gov/id'
SSM_GOVCLOUD_TRANSIT_ACCOUNT_ID = '/compliant/framework/accounts/prod/transit/aws-us-gov/id'
SSM_GOVCLOUD_MS_ACCOUNT_ID = '/compliant/framework/accounts/prod/management-services/aws-us-gov/id'
def invite_govcloud_account(sts_client,
org_client,
account_id):
response = org_client.list_accounts()
print('list_accounts')
print(response)
for account in response['Accounts']:
if account['Id'] == account_id:
print('Account already part of organization')
return
response = org_client.invite_account_to_organization(
Target={
'Id': account_id,
'Type': 'ACCOUNT'
}
)
print('invite_account_to_organization')
print(response)
handshake_id = response['Handshake']['Id']
child_role = sts_client.assume_role(
RoleArn=f'arn:aws-us-gov:iam::{account_id}:role/CompliantFrameworkAccountAccessRole',
RoleSessionName='CompliantFrameworkInstall'
)
child_org_client = boto3.client(
'organizations',
aws_access_key_id=child_role['Credentials']['AccessKeyId'],
aws_secret_access_key=child_role['Credentials']['SecretAccessKey'],
aws_session_token=child_role['Credentials']['SessionToken'],
region_name='us-gov-west-1')
response = child_org_client.accept_handshake(HandshakeId=handshake_id)
print('accept_handshake')
print(response)
def lambda_handler(event, context):
ssm_client = boto3.client('ssm')
govcloud_access_key_id = ssm_client.get_parameter(
Name=SSM_GOVCLOUD_ACCESS_KEY_ID
)['Parameter']['Value']
govcloud_secret_access_key = ssm_client.get_parameter(
Name=SSM_GOVCLOUD_SECRET_ACCESS_KEY,
WithDecryption=True
)['Parameter']['Value']
govcloud_region = 'us-gov-west-1'
org_client_gc = boto3.client('organizations',
aws_access_key_id=govcloud_access_key_id,
aws_secret_access_key=govcloud_secret_access_key,
region_name=govcloud_region)
sts_client_gc = boto3.client('sts',
aws_access_key_id=govcloud_access_key_id,
aws_secret_access_key=govcloud_secret_access_key,
region_name=govcloud_region)
#
logging_account_id = ssm_client.get_parameter(
Name=SSM_GOVCLOUD_LOGGING_ACCOUNT_ID
)['Parameter']['Value']
transit_account_id = ssm_client.get_parameter(
Name=SSM_GOVCLOUD_TRANSIT_ACCOUNT_ID
)['Parameter']['Value']
management_services_account_id = ssm_client.get_parameter(
Name=SSM_GOVCLOUD_MS_ACCOUNT_ID
)['Parameter']['Value']
invite_govcloud_account(sts_client_gc, org_client_gc, logging_account_id)
invite_govcloud_account(sts_client_gc, org_client_gc, transit_account_id)
invite_govcloud_account(sts_client_gc, org_client_gc,
management_services_account_id)
return {}
| 1.171875 | 1 |
tests/helpers/test_temperature.py | don66/home-assistant | 37 | 12763683 | <gh_stars>10-100
"""Tests Home Assistant temperature helpers."""
import unittest
from tests.common import get_test_home_assistant
from homeassistant.const import (
TEMP_CELSIUS, PRECISION_WHOLE, TEMP_FAHRENHEIT, PRECISION_HALVES,
PRECISION_TENTHS)
from homeassistant.helpers.temperature import display_temp
from homeassistant.util.unit_system import METRIC_SYSTEM
TEMP = 24.636626
class TestHelpersTemperature(unittest.TestCase):
"""Setup the temperature tests."""
def setUp(self):
"""Setup the tests."""
self.hass = get_test_home_assistant()
self.hass.config.unit_system = METRIC_SYSTEM
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_temperature_not_a_number(self):
"""Test that temperature is a number."""
temp = "Temperature"
with self.assertRaises(Exception) as context:
display_temp(self.hass, temp, TEMP_CELSIUS, PRECISION_HALVES)
self.assertTrue("Temperature is not a number: {}".format(temp)
in str(context.exception))
def test_celsius_halves(self):
"""Test temperature to celsius rounding to halves."""
self.assertEqual(24.5, display_temp(
self.hass, TEMP, TEMP_CELSIUS, PRECISION_HALVES))
def test_celsius_tenths(self):
"""Test temperature to celsius rounding to tenths."""
self.assertEqual(24.6, display_temp(
self.hass, TEMP, TEMP_CELSIUS, PRECISION_TENTHS))
def test_fahrenheit_wholes(self):
"""Test temperature to fahrenheit rounding to wholes."""
self.assertEqual(-4, display_temp(
self.hass, TEMP, TEMP_FAHRENHEIT, PRECISION_WHOLE))
| 2.828125 | 3 |
covid19/covid19/spiders/Covid19.py | mzs9540/covid19-scrapy | 0 | 12763684 | <reponame>mzs9540/covid19-scrapy
import scrapy
import locale
from ..items import Covid19Item
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
class FirstSpider(scrapy.Spider):
name = 'covid19'
start_urls = [
'https://www.worldometers.info/coronavirus/'
]
def parse(self, response):
items = Covid19Item()
t = response.css('table')
countries = []
total_cases = []
new_cases = []
total_deaths = []
new_deaths = []
total_recovered = []
active_cases = []
total_cases_per_million = []
death_per_million = []
for data in t.css('td:nth-child(1)'):
value = "".join(data.css('::text').get(default='0'))
countries.append(value)
for data in t.css('td:nth-child(2)'):
value = "".join(data.css('::text').get(default='0'))
total_cases.append(locale.atoi(value))
for data in t.css('td:nth-child(3)'):
value = "".join(data.css('::text').get(default='0'))
new_cases.append(locale.atoi(value))
for data in t.css('td:nth-child(4)'):
value = "".join(data.css('::text').get(default='0'))
if value == ' ' or ' ':
value = '0'
total_deaths.append(locale.atoi(value))
for data in t.css('td:nth-child(5)'):
value = "".join(data.css('::text').get(default='0'))
new_deaths.append(locale.atoi(value))
for data in t.css('td:nth-child(6)'):
value = "".join(data.css('::text').get(default='0'))
total_recovered.append(locale.atoi(value))
for data in t.css('td:nth-child(7)'):
value = "".join(data.css('::text').get(default='0'))
active_cases.append(locale.atoi(value))
for data in t.css('td:nth-child(9)'):
value = "".join(data.css('::text').get(default='0'))
total_cases_per_million.append(locale.atof(value))
for data in t.css('td:nth-child(10)'):
value = "".join(data.css('::text').get(default='0'))
death_per_million.append(locale.atof(value))
# new_cases = t.css('td:nth-child(3)::text').get(default=0)
# total_deaths = t.css('td:nth-child(4)::text').get(default=0)
# new_deaths = t.css('td:nth-child(5)::text').get(default=0)
# total_recovered = t.css('td:nth-child(6)::text').get(default=0)
# active_cases = t.css('td:nth-child(7)::text').get(default=0)
# total_cases_per_million = t.css('td:nth-child(9)::text').extract()
# death_per_million = t.css('td:nth-child(10)::text').extract()
for i in range(len(total_cases)):
items['countries'] = countries[i]
items['total_cases'] = total_cases[i]
items['new_cases'] = new_cases[i]
items['total_deaths'] = total_cases[i]
items['new_deaths'] = new_deaths[i]
items['total_recovered'] = total_recovered[i]
items['active_cases'] = active_cases[i]
items['total_cases_per_million'] = total_cases_per_million[i]
items['death_per_million'] = death_per_million[i]
yield items
| 2.734375 | 3 |
speedysvc/serialisation/ArrowSerialisation.py | mcyph/shmrpc | 4 | 12763685 | <filename>speedysvc/serialisation/ArrowSerialisation.py
import pyarrow
class ArrowSerialisation:
"""
TODO!
"""
mimetype = 'application/octet-stream'
@staticmethod
def dumps(o):
return pyarrow.serialize(o).to_buffer()
@staticmethod
def loads(o):
return pyarrow.deserialize(o)
| 2.390625 | 2 |
roots/visualization.py | bingsome/Roots | 2 | 12763686 | import numpy as np
from mayavi import mlab
import os
from roots.swcToolkit import swcToolkit
class swcVisualizer():
"""
mfile = 'fileonpath.swc'
visualizer = swcVisualizer()
visualizer.mplot_mfile(mfile)
"""
def __init__(self):
self.swcTool = swcToolkit()
def create_cylinders(self,coords,diams,data,num_pts):
x = []
y = []
z = []
connections = []
D = []
offset = 0
for kk in range(len(coords)):
# Define points
C1 = np.array(coords[kk][0])
C2 = np.array(coords[kk][1])
# Define normal plane
p = C1-C2
d = np.dot(p,C1)
# Get normal vectors on plane
z_idx = np.arange(3)[p==0]
nz_idx = np.arange(3)[p!=0]
if len(nz_idx) == 3:
x1 = 1.
y1 = 1.
z1 = (d-(np.dot(p[:2],[x1,y1])))/p[2]
a = np.array([x1,y1,z1])
elif len(nz_idx) == 2:
a = np.zeros(3)
a[z_idx] = 1.
a[nz_idx[0]] = 1.
a[nz_idx[1]] = (d-p[nz_idx[0]])/p[nz_idx[1]]
else:
a = np.zeros(3)
a[z_idx] = 1.
a[nz_idx] = d/p[nz_idx]
a = a-C1
if len(p[p!=0]) == 3:
x2 = 1.
y2 = (a[2]*p[0]/p[2] - a[0]) / (a[1] - a[2]*p[1]/p[2])
z2 = -(p[1]*y2+p[0])/p[2]
b = np.array([x2,y2,z2])
elif len(p[p!=0]) == 2:
b = np.zeros(3)
b[z_idx] = 1.
b[nz_idx[0]] = a[z_idx]/(a[nz_idx[1]]*p[nz_idx[0]]/p[nz_idx[1]] - a[nz_idx[0]])
b[nz_idx[1]] = -p[nz_idx[0]]*b[nz_idx[0]]/p[nz_idx[1]]
else:
b = np.zeros(3)
b[nz_idx] = 0
b[z_idx[0]] = 1.
b[z_idx[1]] = -a[z_idx[0]]/a[z_idx[1]]
# Convert to unit vectors
a = a/np.linalg.norm(a)
b = b/np.linalg.norm(b)
theta_step = np.pi*2/num_pts
# Define set of points at a defined radius around
# the original points, C1 and C2
P1 = np.zeros((num_pts,3))
P2 = np.zeros((num_pts,3))
r1 = diams[kk][0]
r2 = diams[kk][1]
theta = 0
for ii in range(num_pts):
for jj in range(3):
P1[ii][jj] = C1[jj] + r1*np.cos(theta)*a[jj] + r1*np.sin(theta)*b[jj]
P2[ii][jj] = C2[jj] + r2*np.cos(theta)*a[jj] + r2*np.sin(theta)*b[jj]
theta += theta_step
# Define triangles
for ii in range(2*num_pts):
if ii < num_pts:
connections.append((ii+offset,(ii+1)%num_pts+offset,ii+num_pts+offset))
else:
connections.append((ii+offset,(ii+1-num_pts)%num_pts+offset+num_pts,(ii-num_pts+1)%num_pts+offset))
for ii in range(num_pts):
x.append(P1[ii][0])
y.append(P1[ii][1])
z.append(P1[ii][2])
D.append(data[kk])
for ii in range(num_pts):
x.append(P2[ii][0])
y.append(P2[ii][1])
z.append(P2[ii][2])
D.append(data[kk])
offset += 2*num_pts
x = np.array(x)
y = np.array(y)
z = np.array(z)
D = np.array(D)
return x, y, z, connections, D
def insert_midpoint(self,a,b):
midpoint = [item/2.0 for item in [a[0]+b[0],a[1]+b[1],a[2]+b[2],a[3]+b[3]]]
return([list(a),midpoint,list(b)])
def segment_branch(self,branch):
segments =[]
for i,seg_end in enumerate(branch[:-1]):
segments.append([branch[i],branch[i+1]])
return(segments)
def unzip_sectioned_arbor(self,arbor):
if arbor is None:
return({},{},{},{})
x = {}
y = {}
z = {}
r = {}
for branch in arbor.keys():
x[branch] = []
y[branch] = []
z[branch] = []
r[branch] = []
for section in arbor[branch]:
for point in section:
x[branch].append(point[0])
y[branch].append(point[1])
z[branch].append(point[2])
r[branch].append(point[3])
return(x,y,z,r)
def rgb_to_mlabcolor(self,rgb):
return((rgb[0]/255.0,rgb[1]/255.0,rgb[2]/255.0))
def mplot_sectioned_arbor_simplified(self,arbor,arbor_labels,view=True,DefaultDiameters=True):
fig = mlab.figure(bgcolor=(42/255.0,56/255.0,54/255.0),size=(1280,720))
keys = ['node','paranode1','paranode2','internode','interbouton','bouton']
diams = [0.75,1.54,1.54,1.54,0.2,1.0]
values = [self.rgb_to_mlabcolor(item) for item in [(255, 22, 84),(112, 193, 179),(178, 219, 191),(36, 123, 160),((243, 255, 189)),(255, 22, 84)]]
color_dict = dict(zip(keys,values))
diam_dict = dict(zip(keys,diams))
mobjs = []
for branch in arbor.keys():
# if branch not in [0,1]:
# continue
for s,section in enumerate(arbor[branch]):
if DefaultDiameters:
mobjs.append(mlab.plot3d([sec[0] for sec in section],[sec[1] for sec in section],[sec[2] for sec in section],color=color_dict[arbor_labels[branch][s]],tube_radius=diam_dict[arbor_labels[branch][s]],tube_sides=6,representation='wireframe'))
else:
mobjs.append(mlab.plot3d([sec[0] for sec in section],[sec[1] for sec in section],[sec[2] for sec in section],color=color_dict[arbor_labels[branch][s]],tube_radius=section[-1][-1],tube_sides=6))
mobjs[-1].actor.property.lighting = False
mlab.view(azimuth=0,elevation=0)
if view:
mlab.show()
def plot_electrode(self,arbor,arbor_labels,view=False):
keys = ['contact','noncontact','activecontact']
values = [self.rgb_to_mlabcolor(item) for item in [(42,56,54),(224, 224, 224),(173,42,42)]]
color_dict = dict(zip(keys,values))
electrode_parts = []
electrode_parts.append(mlab.points3d([arbor[1][0][0][0]],[arbor[1][0][0][1]],[arbor[1][0][0][2]],color=color_dict['noncontact'],scale_factor=arbor[1][0][0][3]*1,mode='sphere',resolution=16))
for s,section in enumerate(arbor[0]):
if s in arbor_labels:
col = color_dict['contact']
if s == 3:
col = color_dict['activecontact']
else:
col = color_dict['noncontact']
electrode_parts.append(mlab.plot3d([sec[0] for sec in section],[sec[1] for sec in section],[sec[2] for sec in section],color=col,tube_radius=section[-1][-1]/2.0,tube_sides=16))
for part in electrode_parts:
part.actor.property.backface_culling=True
part.actor.property.frontface_culling=True
part.actor.property.shading=True
if view:
mlab.show()
def mplot_sectioned_arbors(self,arbors,colors = [(0.29, 0.58, 0.67),(0.82, 0.35, 0.24)],view=True):
fig = mlab.figure(bgcolor=(42/255.0,56/255.0,54/255.0),size=(1280,720))
colors = [(item[0]/255.0,item[1]/255.0,item[2]/255.0) for item in [[0,119,187],[51,187,238],[0,153,136],[238,119,51],[204,51,17],[238,51,119],[221,170,51]]]
colors.reverse()
col_index = 0
for arbor in arbors:
myav_coords = []
myav_diams = []
x,y,z,r = self.unzip_sectioned_arbor(arbor)
coords = []
diams = []
for bnum in x:
tcoords = []
tdiams = []
for i,tem in enumerate(x[bnum]):
tcoords.append([x[bnum][i],y[bnum][i],z[bnum][i]])
tdiams.append(r[bnum][i])
tdiams[-1] *= 3.0
coords.extend(self.segment_branch(tcoords))
diams.extend(self.segment_branch(tdiams))
myav_coords.extend(coords)
myav_diams.extend(diams)
myav_vs = [20 for i in range(len(myav_coords)-len(coords))]+[2 for j in range(len(coords))]
num_pts = 20
tx,ty,tz,tconn,tD = self.create_cylinders(myav_coords,myav_diams,myav_vs,num_pts)
tmsh = mlab.triangular_mesh(tx,ty,tz,tconn,scalars=tD,vmin=1,vmax=20,representation='wireframe',color=colors[col_index])
tmsh.actor.property.frontface_culling = True
tmsh.actor.property.backface_culling = True
tmsh.actor.property.lighting = False
col_index+=1
if col_index==len(colors):
col_index=0
mlab.view(azimuth=0,elevation=0)
# for ii in range(D.shape[1]):
# _=mlab.triangular_mesh(x,y,z,connection,scalars = D[:,ii],vmin=Min,vmax=Max)
# _=mlab.view(azimuth=0,elevation=0)
# _=mlab.savefig('pic%.4d.png' % ii, size=(800,600))
# mlab.savefig('pic%.4d.png' % tstep,size=(1200,900))
if view:
mlab.show()
def view(self):
mlab.show()
def close(self):
mlab.close(all=True)
def mplot_sectioned_arbor(self,fig=None,arbor=None,colors = [(0.29, 0.58, 0.67),(0.82, 0.35, 0.24)],view=True):
if fig is None:
fig = mlab.figure(bgcolor=(42/255.0,56/255.0,54/255.0),size=(1280,720))
colorind = 0
myav_coords = []
myav_diams = []
x,y,z,r = self.unzip_sectioned_arbor(arbor)
coords = []
diams = []
for bnum in x:
tcoords = []
tdiams = []
for i,tem in enumerate(x[bnum]):
tcoords.append([x[bnum][i],y[bnum][i],z[bnum][i]])
tdiams.append(r[bnum][i])
# tdiams[-1] = 0.025
coords.extend(self.segment_branch(tcoords))
diams.extend(self.segment_branch(tdiams))
myav_coords.extend(coords)
myav_diams.extend(diams)
myav_vs = [20 for i in range(len(myav_coords)-len(coords))]+[2 for j in range(len(coords))]
num_pts = 20
tx,ty,tz,tconn,tD = self.create_cylinders(myav_coords,myav_diams,myav_vs,num_pts)
mlab.triangular_mesh(tx,ty,tz,tconn,scalars=tD,vmin=1,vmax=20,representation='wireframe')
colorind+=1
mlab.view(azimuth=0,elevation=0)
# for ii in range(D.shape[1]):
# _=mlab.triangular_mesh(x,y,z,connection,scalars = D[:,ii],vmin=Min,vmax=Max)
# _=mlab.view(azimuth=0,elevation=0)
# _=mlab.savefig('pic%.4d.png' % ii, size=(800,600))
# mlab.savefig('pic%.4d.png' % tstep,size=(1200,900))
if view:
mlab.show()
def mplot_mfile(self,swcfile,colors = [(0.29, 0.58, 0.67),(0.82, 0.35, 0.24)]):
colorind = 0
myav_coords = []
myav_diams = []
x,y,z,r = self.swcTool.load_swc(swcfile,asTree=False)
coords = []
diams = []
for bnum in x:
tcoords = []
tdiams = []
for i,tem in enumerate(x[bnum]):
tcoords.append([x[bnum][i],y[bnum][i],z[bnum][i]])
tdiams.append(r[bnum][i])
# tdiams[-1] = 0.025
coords.extend(self.segment_branch(tcoords))
diams.extend(self.segment_branch(tdiams))
myav_coords.extend(coords)
myav_diams.extend(diams)
myav_vs = [20 for i in range(len(myav_coords)-len(coords))]+[2 for j in range(len(coords))]
num_pts = 6
tx,ty,tz,tconn,tD = self.create_cylinders(myav_coords,myav_diams,myav_vs,num_pts)
mlab.triangular_mesh(tx,ty,tz,tconn,scalars=tD,vmin=1,vmax=20,color=colors[colorind])
colorind+=1
mlab.view(azimuth=0,elevation=0)
# for ii in range(D.shape[1]):
# _=mlab.triangular_mesh(x,y,z,connection,scalars = D[:,ii],vmin=Min,vmax=Max)
# _=mlab.view(azimuth=0,elevation=0)
# _=mlab.savefig('pic%.4d.png' % ii, size=(800,600))
# mlab.savefig('pic%.4d.png' % tstep,size=(1200,900))
mlab.show()
| 2.546875 | 3 |
deeplearning.py | thanakijwanavit/flowerai | 1 | 12763687 | <filename>deeplearning.py
import torch
# method for validation
def validation(model, validloader, criterion):
valid_loss = 0
accuracy = 0
for images, labels in validloader:
images, labels = images.to('cuda'), labels.to('cuda')
output = model.forward(images)
valid_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return valid_loss, accuracy
def do_deep_learning(model, trainloader, epochs, print_every, criterion, optimizer,validloader, device='cpu'):
epochs = epochs
print_every = print_every
steps = 0
# change to cuda
if device == 'gpu':
model.to('cuda')
for e in range(epochs):
running_loss = 0
model.train()
for ii, (inputs, labels) in enumerate(trainloader):
steps += 1
inputs, labels = inputs.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
#to test the model
model.eval()
with torch.no_grad():
valid_loss, valid_accuracy = validation(model, validloader, criterion)
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every),
"Validation Loss: {:.4f}".format(valid_loss/len(validloader)),
"Validation Accuracy: {:.4f}".format(valid_accuracy/len(validloader)))
running_loss = 0
else:
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(trainloader):
steps += 1
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
running_loss = 0
return model
| 3 | 3 |
specs/default/chef/site-cookbooks/pbspro/files/default/autoscale_hook.py | themorey/cyclecloud-pbspro | 8 | 12763688 | <filename>specs/default/chef/site-cookbooks/pbspro/files/default/autoscale_hook.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
import json
import os
import shutil
import subprocess
import traceback
import pbs
def debug(msg):
pbs.logmsg(pbs.EVENT_DEBUG, "azpbs_autoscale - %s" % msg)
def error(msg):
pbs.logmsg(pbs.EVENT_ERROR, "azpbs_autoscale - %s" % msg)
def perform_hook():
"""
See /var/spool/pbs/server_logs/* or /opt/cycle/jetpack/logs/autoscale.log for log messages
"""
try:
if not pbs.hook_config_filename:
raise RuntimeError("Hook config for this plugin was not defined.")
with open(pbs.hook_config_filename) as fr:
hook_config = json.load(fr)
azpbs_path = hook_config.get("azpbs_path")
if not azpbs_path:
azpbs_path = shutil.which("azpbs")
if not azpbs_path:
default_azpbs_path = "/opt/cycle/pbspro/venv/bin/azpbs"
if not os.path.exists(default_azpbs_path):
raise RuntimeError("Could not find azpbs in the path: %s" % os.environ)
debug("Using default az path: %s" % default_azpbs_path)
azpbs_path = default_azpbs_path
cmd = [azpbs_path, "autoscale"]
if hook_config.get("autoscale_json"):
cmd.append("-c")
cmd.append(hook_config["autoscale_json"])
environ = {}
environ.update(os.environ)
assert pbs.pbs_conf.get(
"PBS_EXEC"
), "PBS_EXEC was not defined in pbs.pbs_conf. This is a PBS error."
pbs_bin = pbs.pbs_conf["PBS_EXEC"] + os.sep + "bin"
environ["PATH"] = environ.get("PATH", ".") + os.pathsep + pbs_bin
debug("Running %s with env %s" % (cmd, environ))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=environ
)
stdout, stderr = proc.communicate()
if hasattr(stdout, "decode"):
stdout = stdout.decode()
stderr = stderr.decode()
debug(stderr)
if proc.returncode != 0:
raise RuntimeError(
'autoscale failed!\n\tstdout="%s"\n\tstderr="%s"' % (stdout, stderr)
)
except Exception as e:
error(str(e))
error(traceback.format_exc())
raise
# hooks must not have a __name__ == "__main__" guard
perform_hook()
| 1.929688 | 2 |
B站/Python自动化办公 · 一课通(适合小白)/Chapter2/S2-1-2/PracticeAnswer/2.1.2exercise.py | zhaofeng092/python_auto_office | 46 | 12763689 | from docx import Document
from docx.enum.style import WD_STYLE_TYPE
document = Document()
styles = document.styles
for i in styles:
if i.type == WD_STYLE_TYPE.TABLE:
document.add_paragraph("表格样式 : " + i.name)
table = document.add_table(4, 5, style=i)
document.add_paragraph("\n\n")
document.save('所有表格样式.docx')
| 2.640625 | 3 |
test.py | appfirst/python-hbase-client | 0 | 12763690 | <reponame>appfirst/python-hbase-client
#
# Testing integration of libhdfs with Python
#
import argparse
import sys
import hbase
diag = False
def debug():
import pdb
pdb.set_trace()
def modRead(connect, args, data):
hbase.Get(connect, args.rdtable,
args.rdrow,
args.rdcolfam,
args.rdcolumn)
if diag:
print(("Get took {0} usecs".format(hbase.DiagGet())))
hbdata = ''
while hbdata != None:
hbdata = hbase.Data(connect)
print(("*** Returned Data *** ", hbdata))
if diag:
print(("Data took {0} usecs".format(hbase.DiagGet())))
if (hbdata != None) and (data[:5] != hbdata[:5]):
print('ERROR: data does not match')
sys.exit(1)
def modCell(connect, args):
hbase.Get(connect, args.rdtable,
args.rdrow,
args.rdcolfam,
args.rdcolumn)
if diag:
print(("Get took {0} usecs".format(hbase.DiagGet())))
print("Read Cell")
hbdata = []
while hbdata != None:
hbdata = hbase.Cell(connect)
print(("*** Returned Data *** ", hbdata))
if diag:
print(("Cell took {0} usecs".format(hbase.DiagGet())))
def modWrite(connect, args, data):
try:
hbase.Put(connect,
args.wrtable,
args.wrrow,
args.wrcolfam,
args.wrcolumn,
data)
except Exception as e:
raise e
if diag:
print(("Put took {0} usecs".format(hbase.DiagGet())))
hbase.Flush(connect, '')
if diag:
print(("Flush took {0} usecs".format(hbase.DiagGet())))
def modScan(connect, args):
hbase.Scan(connect, args.rdtable,
args.rdrow, args.rdrow, '', '', '')
if diag:
print(("Scan took {0} usecs".format(hbase.DiagGet())))
cell = ''
while cell != None:
cell = hbase.DataScan(connect)
print(("******** Scan Data *** ", cell))
if diag:
print(("DataScan took {0} usecs".format(hbase.DiagGet())))
def memTest(connect, args, data):
i = 0
while 1:
modWrite(client, parsed_arguments, data)
modRead(client, parsed_arguments, data)
i += 1
if (i % 100):
hbase.Flush(client, parsed_arguments.wrtable)
def Log(msg):
print(msg)
pass
def Err(msg):
print(msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--zookeeper", required=False, default='zookeeper0:2181')
parser.add_argument("--hbmaster", required=False, default='hbasemaster0:60000')
parser.add_argument("--table_test", required=False, default=b'process_test')
parser.add_argument("--row_test", required=False, default=b'987:654:asdfg')
parser.add_argument("--colfam_test", required=False, default=b'name')
parser.add_argument("--column_test", required=False, default=b'Amy')
parser.add_argument("--wrtable", required=False, default=b'summary')
parser.add_argument("--wrrow", required=False, default=b'test')
parser.add_argument("--wrcolfam", required=False, default=b'summary')
parser.add_argument("--wrcolumn", required=False, default=b'\x00\x00A')
parser.add_argument("--rdtable", required=False, default=b'summary')
parser.add_argument("--rdrow", required=False, default=b'test')
parser.add_argument("--rdcolfam", required=False, default=b"summary")
parser.add_argument("--rdcolumn", required=False, default=b"\x00\x00A")
parser.add_argument("--maxmem", required=False, default='8192k')
parsed_arguments = parser.parse_args(sys.argv[1:])
data = b'\xa5\x05T4244,1455553829.3;1103\r\x12(0955.45;1152\x13\x00d75;32715,1456204199.41;472\x05\x13\x01L$968.7;5994\x01%$5908186.92\x01L\x001\x05\x13\x08550\x01_\x1875;2718\x05`06199388.02;73\x119(3942.16;936\x198 2.81;1159\x19\x13\x185.76;94\x1d\x12\x003\x057\x0450\x19$\x05\x12\x08602\t%4908203.58;4712\r&\x05\xba\x1462;1112\xa8\x00\x1448;384\t\x95!,\x1009.49\x01\x83\x11\xf3\x000\x05\xce\x006!\x1a).!\x1a$229.66;377\r_\x180946.31\x01&\x004\x05:\x05&\x1801.13;9.r\x00\x082.5!?\x05$\x01\xaa%?!\x9e\x006\x11\xaa\tq\x0c7;962\xe2\x00!\xb0\x08027\x15p\x1054.78\x01p\t\xe3\x01p!\xb0\x002\x01\xe3.:\x00%@\x08115\x1d:\r\x13\x0066\x13\x00\x01\xf6\x0424\x19`\x085.5\x01\x13\x04182\x13\x00\x102;100=B\x004E[\x0062%\x00\x0479\x01\x98)\xa1\x01\x98!.L47;976,1455550953.94'
hbase.Logger(Log)
hbase.Error(Err)
hbase.JumpStart(parsed_arguments.maxmem)
print(("Hbase Client Version {0}".format(hbase.Version())))
print("Connect")
#print format(connect, '#08X')
client = hbase.Connect(parsed_arguments.zookeeper, parsed_arguments.hbmaster,
None, False, parsed_arguments.maxmem)
#flush = hbase.SetAutoFlush(client, parsed_arguments.wrtable, 1)
#print("Flush is set to {0}".format(flush))
diag = hbase.DiagSet(True)
modWrite(client, parsed_arguments, data)
parsed_arguments.wrcolumn = b'\x00\x00B'
modWrite(client, parsed_arguments, data)
parsed_arguments.wrcolumn = b'\x00\x00F'
modWrite(client, parsed_arguments, data)
hbase.Flush(client, '')
modRead(client, parsed_arguments, data)
modCell(client, parsed_arguments)
modScan(client, parsed_arguments)
hbase.Delete(client, parsed_arguments.wrtable, parsed_arguments.wrrow,
parsed_arguments.wrcolfam, parsed_arguments.wrcolumn)
if diag:
print(("Delete took {0} usecs".format(hbase.DiagGet())))
print("Disconnect")
hbase.Disconnect(client)
| 2.375 | 2 |
remove_duplicates.py | AnneliektH/EVEs_arthropod | 0 | 12763691 | <reponame>AnneliektH/EVEs_arthropod
#<NAME>, 2017
# load pandas
import pandas as pd
import sys
# load dataframe
df = pd.DataFrame.from_csv(sys.argv[1])
# sort on postion query start to keep highest later on
df.sort_values('position_on_query_start', inplace=True)
# drop duplicates based on start and direction keep first so highest
df.drop_duplicates(["direction", "position_on_query_start"], inplace=True, keep="first")
# sort on postion query stop to keep highest later on
df.sort_values("position_on_query_stop", inplace=True)
# drop duplicates based on stop and direction keep first so highest
df.drop_duplicates(["direction", "position_on_query_stop"], inplace=True, keep="first")
print len(df)
# show dataframe
df.to_csv(sys.argv[2])
| 2.984375 | 3 |
setup.py | explabs/ad-ctf-paas-lib | 0 | 12763692 | from setuptools import setup
setup(
name='ad-ctf-paas-lib',
version='0.0.3',
packages=['checker'],
url='',
license='',
author='ivanh',
author_email='<EMAIL>',
description='Library that help to write checkers'
)
| 1.0625 | 1 |
2015/2015_07a.py | davidxiao93/Advent-of-Code | 0 | 12763693 | input = """af AND ah -> ai
NOT lk -> ll
hz RSHIFT 1 -> is
NOT go -> gp
du OR dt -> dv
x RSHIFT 5 -> aa
at OR az -> ba
eo LSHIFT 15 -> es
ci OR ct -> cu
b RSHIFT 5 -> f
fm OR fn -> fo
NOT ag -> ah
v OR w -> x
g AND i -> j
an LSHIFT 15 -> ar
1 AND cx -> cy
jq AND jw -> jy
iu RSHIFT 5 -> ix
gl AND gm -> go
NOT bw -> bx
jp RSHIFT 3 -> jr
hg AND hh -> hj
bv AND bx -> by
er OR es -> et
kl OR kr -> ks
et RSHIFT 1 -> fm
e AND f -> h
u LSHIFT 1 -> ao
he RSHIFT 1 -> hx
eg AND ei -> ej
bo AND bu -> bw
dz OR ef -> eg
dy RSHIFT 3 -> ea
gl OR gm -> gn
da LSHIFT 1 -> du
au OR av -> aw
gj OR gu -> gv
eu OR fa -> fb
lg OR lm -> ln
e OR f -> g
NOT dm -> dn
NOT l -> m
aq OR ar -> as
gj RSHIFT 5 -> gm
hm AND ho -> hp
ge LSHIFT 15 -> gi
jp RSHIFT 1 -> ki
hg OR hh -> hi
lc LSHIFT 1 -> lw
km OR kn -> ko
eq LSHIFT 1 -> fk
1 AND am -> an
gj RSHIFT 1 -> hc
aj AND al -> am
gj AND gu -> gw
ko AND kq -> kr
ha OR gz -> hb
bn OR by -> bz
iv OR jb -> jc
NOT ac -> ad
bo OR bu -> bv
d AND j -> l
bk LSHIFT 1 -> ce
de OR dk -> dl
dd RSHIFT 1 -> dw
hz AND ik -> im
NOT jd -> je
fo RSHIFT 2 -> fp
hb LSHIFT 1 -> hv
lf RSHIFT 2 -> lg
gj RSHIFT 3 -> gl
ki OR kj -> kk
NOT ak -> al
ld OR le -> lf
ci RSHIFT 3 -> ck
1 AND cc -> cd
NOT kx -> ky
fp OR fv -> fw
ev AND ew -> ey
dt LSHIFT 15 -> dx
NOT ax -> ay
bp AND bq -> bs
NOT ii -> ij
ci AND ct -> cv
iq OR ip -> ir
x RSHIFT 2 -> y
fq OR fr -> fs
bn RSHIFT 5 -> bq
0 -> c
14146 -> b
d OR j -> k
z OR aa -> ab
gf OR ge -> gg
df OR dg -> dh
NOT hj -> hk
NOT di -> dj
fj LSHIFT 15 -> fn
lf RSHIFT 1 -> ly
b AND n -> p
jq OR jw -> jx
gn AND gp -> gq
x RSHIFT 1 -> aq
ex AND ez -> fa
NOT fc -> fd
bj OR bi -> bk
as RSHIFT 5 -> av
hu LSHIFT 15 -> hy
NOT gs -> gt
fs AND fu -> fv
dh AND dj -> dk
bz AND cb -> cc
dy RSHIFT 1 -> er
hc OR hd -> he
fo OR fz -> ga
t OR s -> u
b RSHIFT 2 -> d
NOT jy -> jz
hz RSHIFT 2 -> ia
kk AND kv -> kx
ga AND gc -> gd
fl LSHIFT 1 -> gf
bn AND by -> ca
NOT hr -> hs
NOT bs -> bt
lf RSHIFT 3 -> lh
au AND av -> ax
1 AND gd -> ge
jr OR js -> jt
fw AND fy -> fz
NOT iz -> ja
c LSHIFT 1 -> t
dy RSHIFT 5 -> eb
bp OR bq -> br
NOT h -> i
1 AND ds -> dt
ab AND ad -> ae
ap LSHIFT 1 -> bj
br AND bt -> bu
NOT ca -> cb
NOT el -> em
s LSHIFT 15 -> w
gk OR gq -> gr
ff AND fh -> fi
kf LSHIFT 15 -> kj
fp AND fv -> fx
lh OR li -> lj
bn RSHIFT 3 -> bp
jp OR ka -> kb
lw OR lv -> lx
iy AND ja -> jb
dy OR ej -> ek
1 AND bh -> bi
NOT kt -> ku
ao OR an -> ap
ia AND ig -> ii
NOT ey -> ez
bn RSHIFT 1 -> cg
fk OR fj -> fl
ce OR cd -> cf
eu AND fa -> fc
kg OR kf -> kh
jr AND js -> ju
iu RSHIFT 3 -> iw
df AND dg -> di
dl AND dn -> do
la LSHIFT 15 -> le
fo RSHIFT 1 -> gh
NOT gw -> gx
NOT gb -> gc
ir LSHIFT 1 -> jl
x AND ai -> ak
he RSHIFT 5 -> hh
1 AND lu -> lv
NOT ft -> fu
gh OR gi -> gj
lf RSHIFT 5 -> li
x RSHIFT 3 -> z
b RSHIFT 3 -> e
he RSHIFT 2 -> hf
NOT fx -> fy
jt AND jv -> jw
hx OR hy -> hz
jp AND ka -> kc
fb AND fd -> fe
hz OR ik -> il
ci RSHIFT 1 -> db
fo AND fz -> gb
fq AND fr -> ft
gj RSHIFT 2 -> gk
cg OR ch -> ci
cd LSHIFT 15 -> ch
jm LSHIFT 1 -> kg
ih AND ij -> ik
fo RSHIFT 3 -> fq
fo RSHIFT 5 -> fr
1 AND fi -> fj
1 AND kz -> la
iu AND jf -> jh
cq AND cs -> ct
dv LSHIFT 1 -> ep
hf OR hl -> hm
km AND kn -> kp
de AND dk -> dm
dd RSHIFT 5 -> dg
NOT lo -> lp
NOT ju -> jv
NOT fg -> fh
cm AND co -> cp
ea AND eb -> ed
dd RSHIFT 3 -> df
gr AND gt -> gu
ep OR eo -> eq
cj AND cp -> cr
lf OR lq -> lr
gg LSHIFT 1 -> ha
et RSHIFT 2 -> eu
NOT jh -> ji
ek AND em -> en
jk LSHIFT 15 -> jo
ia OR ig -> ih
gv AND gx -> gy
et AND fe -> fg
lh AND li -> lk
1 AND io -> ip
kb AND kd -> ke
kk RSHIFT 5 -> kn
id AND if -> ig
NOT ls -> lt
dw OR dx -> dy
dd AND do -> dq
lf AND lq -> ls
NOT kc -> kd
dy AND ej -> el
1 AND ke -> kf
et OR fe -> ff
hz RSHIFT 5 -> ic
dd OR do -> dp
cj OR cp -> cq
NOT dq -> dr
kk RSHIFT 1 -> ld
jg AND ji -> jj
he OR hp -> hq
hi AND hk -> hl
dp AND dr -> ds
dz AND ef -> eh
hz RSHIFT 3 -> ib
db OR dc -> dd
hw LSHIFT 1 -> iq
he AND hp -> hr
NOT cr -> cs
lg AND lm -> lo
hv OR hu -> hw
il AND in -> io
NOT eh -> ei
gz LSHIFT 15 -> hd
gk AND gq -> gs
1 AND en -> eo
NOT kp -> kq
et RSHIFT 5 -> ew
lj AND ll -> lm
he RSHIFT 3 -> hg
et RSHIFT 3 -> ev
as AND bd -> bf
cu AND cw -> cx
jx AND jz -> ka
b OR n -> o
be AND bg -> bh
1 AND ht -> hu
1 AND gy -> gz
NOT hn -> ho
ck OR cl -> cm
ec AND ee -> ef
lv LSHIFT 15 -> lz
ks AND ku -> kv
NOT ie -> if
hf AND hl -> hn
1 AND r -> s
ib AND ic -> ie
hq AND hs -> ht
y AND ae -> ag
NOT ed -> ee
bi LSHIFT 15 -> bm
dy RSHIFT 2 -> dz
ci RSHIFT 2 -> cj
NOT bf -> bg
NOT im -> in
ev OR ew -> ex
ib OR ic -> id
bn RSHIFT 2 -> bo
dd RSHIFT 2 -> de
bl OR bm -> bn
as RSHIFT 1 -> bl
ea OR eb -> ec
ln AND lp -> lq
kk RSHIFT 3 -> km
is OR it -> iu
iu RSHIFT 2 -> iv
as OR bd -> be
ip LSHIFT 15 -> it
iw OR ix -> iy
kk RSHIFT 2 -> kl
NOT bb -> bc
ci RSHIFT 5 -> cl
ly OR lz -> ma
z AND aa -> ac
iu RSHIFT 1 -> jn
cy LSHIFT 15 -> dc
cf LSHIFT 1 -> cz
as RSHIFT 3 -> au
cz OR cy -> da
kw AND ky -> kz
lx -> a
iw AND ix -> iz
lr AND lt -> lu
jp RSHIFT 5 -> js
aw AND ay -> az
jc AND je -> jf
lb OR la -> lc
NOT cn -> co
kh LSHIFT 1 -> lb
1 AND jj -> jk
y OR ae -> af
ck AND cl -> cn
kk OR kv -> kw
NOT cv -> cw
kl AND kr -> kt
iu OR jf -> jg
at AND az -> bb
jp RSHIFT 2 -> jq
iv AND jb -> jd
jn OR jo -> jp
x OR ai -> aj
ba AND bc -> bd
jl OR jk -> jm
b RSHIFT 1 -> v
o AND q -> r
NOT p -> q
k AND m -> n
as RSHIFT 2 -> at"""
values = {}
mapping = {}
for line in input.split("\n"):
destination = line.split("->")[-1].strip()
provider = line.split("->")[0].strip()
mapping[destination] = provider
# evaluate for a
def evaluate(target):
if target.isnumeric():
return int(target)
if target in values:
return values[target]
if target not in mapping:
print("Unknown target", target)
exit(1)
target_provider = mapping[target]
args = target_provider.split()
if len(args) == 1:
# Assignment
values[target] = evaluate(args[0])
elif len(args) == 2:
# NOT
values[target] = ~evaluate(args[1])
else:
if args[1] == "AND":
values[target] = evaluate(args[0]) & evaluate(args[2])
elif args[1] == "OR":
values[target] = evaluate(args[0]) | evaluate(args[2])
elif args[1] == "LSHIFT":
values[target] = evaluate(args[0]) << evaluate(args[2])
elif args[1] == "RSHIFT":
values[target] = evaluate(args[0]) >> evaluate(args[2])
else:
print("unknown operator", args[1])
exit(1)
if target not in values:
print("How did i get here")
exit(1)
return values[target]
print(evaluate("a")) | 2.03125 | 2 |
projects/CenterNet2/centernet/modeling/dense_heads/centernet_head.py | collector-m/CenterNet2 | 2 | 12763694 | <filename>projects/CenterNet2/centernet/modeling/dense_heads/centernet_head.py
import math
from typing import List
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, get_norm
from ..layers.deform_conv import DFConv2d
__all__ = ["CenterNetHead"]
class Scale(nn.Module):
def __init__(self, init_value=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input):
return input * self.scale
class CenterNetHead(nn.Module):
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super().__init__()
self.num_classes = cfg.MODEL.CENTERNET.NUM_CLASSES
self.with_agn_hm = cfg.MODEL.CENTERNET.WITH_AGN_HM
self.only_proposal = cfg.MODEL.CENTERNET.ONLY_PROPOSAL
self.out_kernel = 3
norm = cfg.MODEL.CENTERNET.NORM
head_configs = {"cls": (cfg.MODEL.CENTERNET.NUM_CLS_CONVS \
if not self.only_proposal else 0,
cfg.MODEL.CENTERNET.USE_DEFORMABLE),
"bbox": (cfg.MODEL.CENTERNET.NUM_BOX_CONVS,
cfg.MODEL.CENTERNET.USE_DEFORMABLE),
"share": (cfg.MODEL.CENTERNET.NUM_SHARE_CONVS,
cfg.MODEL.CENTERNET.USE_DEFORMABLE)}
in_channels = [s.channels for s in input_shape]
assert len(set(in_channels)) == 1, \
"Each level must have the same channel!"
in_channels = in_channels[0]
channels = {
'cls': in_channels,
'bbox': in_channels,
'share': in_channels,
}
for head in head_configs:
tower = []
num_convs, use_deformable = head_configs[head]
channel = channels[head]
for i in range(num_convs):
if use_deformable and i == num_convs - 1:
conv_func = DFConv2d
else:
conv_func = nn.Conv2d
tower.append(conv_func(
in_channels if i == 0 else channel,
channel,
kernel_size=3, stride=1,
padding=1, bias=True
))
if norm == 'GN' and channel % 32 != 0:
tower.append(nn.GroupNorm(25, channel))
elif norm != '':
tower.append(get_norm(norm, channel))
tower.append(nn.ReLU())
self.add_module('{}_tower'.format(head),
nn.Sequential(*tower))
self.bbox_pred = nn.Conv2d(
in_channels, 4, kernel_size=self.out_kernel,
stride=1, padding=self.out_kernel // 2
)
self.scales = nn.ModuleList(
[Scale(init_value=1.0) for _ in input_shape])
for modules in [
self.cls_tower, self.bbox_tower,
self.share_tower,
self.bbox_pred,
]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
torch.nn.init.constant_(self.bbox_pred.bias, 8.)
prior_prob = cfg.MODEL.CENTERNET.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
if self.with_agn_hm:
self.agn_hm = nn.Conv2d(
in_channels, 1, kernel_size=self.out_kernel,
stride=1, padding=self.out_kernel // 2
)
torch.nn.init.constant_(self.agn_hm.bias, bias_value)
torch.nn.init.normal_(self.agn_hm.weight, std=0.01)
if not self.only_proposal:
cls_kernel_size = self.out_kernel
self.cls_logits = nn.Conv2d(
in_channels, self.num_classes,
kernel_size=cls_kernel_size,
stride=1,
padding=cls_kernel_size // 2,
)
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
torch.nn.init.normal_(self.cls_logits.weight, std=0.01)
def forward(self, x):
clss = []
bbox_reg = []
agn_hms = []
for l, feature in enumerate(x):
feature = self.share_tower(feature)
cls_tower = self.cls_tower(feature)
bbox_tower = self.bbox_tower(feature)
if not self.only_proposal:
clss.append(self.cls_logits(cls_tower))
else:
clss.append(None)
if self.with_agn_hm:
agn_hms.append(self.agn_hm(bbox_tower))
else:
agn_hms.append(None)
reg = self.bbox_pred(bbox_tower)
reg = self.scales[l](reg)
bbox_reg.append(F.relu(reg))
return clss, bbox_reg, agn_hms | 2.203125 | 2 |
packages/M2Crypto-0.21.1/demo/medusa/virtual_handler.py | RaphaelPrevost/Back2Shops | 0 | 12763695 | <gh_stars>0
# -*- Mode: Python; tab-width: 4 -*-
import socket
import default_handler
import re
HOST = re.compile ('Host: ([^:/]+).*', re.IGNORECASE)
get_header = default_handler.get_header
class virtual_handler:
"""HTTP request handler for an HTTP/1.0-style virtual host. Each
Virtual host must have a different IP"""
def __init__ (self, handler, hostname):
self.handler = handler
self.hostname = hostname
try:
self.ip = socket.gethostbyname (hostname)
except socket.error:
raise ValueError, "Virtual Hostname %s does not appear to be registered in the DNS" % hostname
def match (self, request):
if (request.channel.addr[0] == self.ip):
return 1
else:
return 0
def handle_request (self, request):
return self.handler.handle_request (request)
def __repr__ (self):
return '<virtual request handler for %s>' % self.hostname
class virtual_handler_with_host:
"""HTTP request handler for HTTP/1.1-style virtual hosts. This
matches by checking the value of the 'Host' header in the request.
You actually don't _have_ to support HTTP/1.1 to use this, since
many browsers now send the 'Host' header. This is a Good Thing."""
def __init__ (self, handler, hostname):
self.handler = handler
self.hostname = hostname
def match (self, request):
host = get_header (HOST, request.header)
if host == self.hostname:
return 1
else:
return 0
def handle_request (self, request):
return self.handler.handle_request (request)
def __repr__ (self):
return '<virtual request handler for %s>' % self.hostname
| 3.0625 | 3 |
4/main.py | misterwilliam/advent-of-code | 0 | 12763696 | <filename>4/main.py<gh_stars>0
import md5
import unittest
def GetDigest(string):
m = md5.new()
m.update(string)
return m.hexdigest()
def DoesStartWithZeros(string, numZeros):
return string[:numZeros] == "".join("0" for i in xrange(numZeros))
def Mine(string):
i = 1
while True:
digest = GetDigest(string + str(i))
if DoesStartWithZeros(digest, 5):
break
i += 1
return i
def Mine2(string):
i = 1
while True:
digest = GetDigest(string + str(i))
if DoesStartWithZeros(digest, 6):
break
i += 1
return i
print Mine2("bgvyzdsv")
class MyTests(unittest.TestCase):
def test_GetDigest(self):
self.assertEqual(GetDigest("abcdef609043"), "000001dbbfa3a5c83a2d506429c7b00e")
def test_DoesStartWithFiveZeros(self):
self.assertFalse(DoesStartWithZeros("0000x", 5))
self.assertTrue(DoesStartWithZeros("00000x", 5))
def test_Mine(self):
self.assertEqual(Mine("abcdef"), 609043)
self.assertEqual(Mine("pqrstuv"), 1048970)
if __name__ == "__main__":
unittest.main() | 2.96875 | 3 |
keyboards/inline/menu_profile_inline.py | vR4eslav/DatingBot | 0 | 12763697 | <filename>keyboards/inline/menu_profile_inline.py<gh_stars>0
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
async def get_profile_keyboard(verification):
markup = InlineKeyboardMarkup()
if not verification:
verification_btn = InlineKeyboardButton(text="✅ Верификация", callback_data="verification")
markup.row(verification_btn)
edit_profile = InlineKeyboardButton(text="Изменить анкету", callback_data="change_profile")
instagram = InlineKeyboardButton(text="📸 Instagram", callback_data="add_inst")
turn_off = InlineKeyboardButton(text="❌ Удалить анкету", callback_data="disable")
back = InlineKeyboardButton(text="⏪ Назад", callback_data="back_with_delete")
markup.row(edit_profile, instagram)
markup.add(turn_off)
markup.add(back)
return markup
| 2.5625 | 3 |
pylark/api_service_drive_comment_patch.py | chyroc/pylark | 7 | 12763698 | <filename>pylark/api_service_drive_comment_patch.py
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class UpdateDriveCommentPatchReq(object):
file_type: lark_type.FileType = attr.ib(
factory=lambda: lark_type.FileType(),
metadata={"req_type": "query", "key": "file_type"},
) # 文档类型, 示例值:"doc", 可选值有: `doc`:文档, `sheet`:表格, `file`:文件
file_token: str = attr.ib(
default="", metadata={"req_type": "path", "key": "file_token"}
) # 文档token, 示例值:"<KEY>"
comment_id: str = attr.ib(
default="", metadata={"req_type": "path", "key": "comment_id"}
) # 评论ID, 示例值:"6916106822734578184"
is_solved: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "is_solved"}
) # 评论解决标志, 示例值:true
@attr.s
class UpdateDriveCommentPatchResp(object):
pass
def _gen_update_drive_comment_patch_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=UpdateDriveCommentPatchResp,
scope="Drive",
api="UpdateDriveCommentPatch",
method="PATCH",
url="https://open.feishu.cn/open-apis/drive/v1/files/:file_token/comments/:comment_id",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
need_user_access_token=True,
)
| 1.804688 | 2 |
src/edubot/snapext/joystick/__init__.py | wendlers/edubot-snap | 0 | 12763699 | <filename>src/edubot/snapext/joystick/__init__.py
##
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
from __future__ import unicode_literals
import pygame
import blockext
import edubot.snapext
from edubot.snapext.joystick.constants import ALL_JS, ALL_AXIS, ALL_BUTTONS, AXIS, BUTTONS
from edubot.snapext.joystick.mappings import JS_MAPPINGS
class Blocks:
def __init__(self):
pygame.init()
pygame.joystick.init()
js_count = pygame.joystick.get_count()
# support a maximum of two joysticks
if js_count > 2:
js_count = 2
self.joysticks = {}
for i in range(js_count):
js = pygame.joystick.Joystick(i)
js.init()
print("Init JS % d (%s)" % (i, js.get_name()))
js_id = "js%d" % (i + 1)
if js.get_numaxes() >= 1:
if js.get_name() in JS_MAPPINGS:
self.joysticks[js_id] = JS_MAPPINGS[js.get_name()]
else:
self.joysticks[js_id] = JS_MAPPINGS["Generic"]
self.joysticks[js_id]["js"] = js
# print(self.joysticks)
def _problem(self):
pass
def _on_reset(self):
pass
@blockext.reporter("Joystick %m.joysticks axis %m.axis", defaults=[ALL_JS[0], ALL_AXIS[0]], is_blocking=True)
def axis(self, js_id, axis):
try:
pygame.event.get()
js = self.joysticks[js_id]["js"]
axis_id = self.joysticks[js_id][AXIS][axis]
value = js.get_axis(axis_id)
except KeyError:
value = 0.0
print("Joystick %s %s: %f" % (js_id, axis, value))
return round(value, 3)
@blockext.reporter("Joystick %m.joysticks button %m.buttons", defaults=[ALL_JS[0], ALL_BUTTONS[0]], is_blocking=True)
def buttons(self, js_id, button):
try:
pygame.event.get()
js = self.joysticks[js_id]["js"]
button_id = self.joysticks[js_id][BUTTONS][button]
value = js.get_button(button_id)
except KeyError:
value = 0
print("Joystick %s %s: %d" % (js_id, button, value))
return value
class Extension(edubot.snapext.BaseExtension):
def __init__(self, port=10002):
edubot.snapext.BaseExtension.__init__(
self,
Blocks,
port,
"Joystick",
"Joystick and Gamepad",
dict(
joysticks=ALL_JS,
axis=ALL_AXIS,
buttons=ALL_BUTTONS
))
| 2.125 | 2 |
enwiki/inactive_interface_admins.py | JJMC89/JJMC89_bot | 5 | 12763700 | #!/usr/bin/env python3
"""Report inactive interface admins."""
# Author : JJMC89
# License: MIT
from functools import lru_cache
from itertools import chain
from typing import Optional, Set, Tuple, Union
import pywikibot
from dateutil.relativedelta import relativedelta
from pywikibot.logentries import LogEntry
PageSource = Union[
pywikibot.Page, pywikibot.site.BaseSite, pywikibot.page.BaseLink
]
UserContrib = Tuple[pywikibot.Page, int, pywikibot.Timestamp, str]
def get_inactive_users(
site: pywikibot.site.APISite = None,
) -> Set[pywikibot.User]:
"""
Get a set of inactive interface admins.
:param site: site to work on
"""
users = set()
if not site:
site = pywikibot.Site()
for user_dict in site.allusers(group='interface-admin'):
user = User(site, user_dict['name'])
if not user.is_active:
users.add(user)
return users
class User(pywikibot.User):
"""Extended pywikibot.User."""
@property
def is_active(self) -> bool:
"""
Return True if the user is active, False otherwise.
A user is active if they have both
1) a CSS/JS edit in the last 6 months
2) an edit or log entry in the last 2 months
"""
cutoff = self.site.server_time() + relativedelta(months=-2)
if self.has_cssjs_edit is False:
return False
if self.last_edit and self.last_edit[2] >= cutoff:
return True
if self.last_event and self.last_event.timestamp() >= cutoff:
return True
return False
@property
@lru_cache()
def last_edit(self) -> Optional[UserContrib]:
"""Return the user's last edit."""
return super().last_edit
@property
@lru_cache()
def last_event(self) -> Optional[LogEntry]:
"""Return the user's last log entry."""
return super().last_event
@property
@lru_cache()
def has_cssjs_edit(self) -> Optional[bool]:
"""
Return True if the user has edited a CSS/JS page in the last 6 months.
None if the user has not been an interface-admin for 6 months.
False otherwise.
"""
kwa = dict(
namespaces=(2, 8),
end=self.site.server_time() + relativedelta(months=-6),
)
for page, _, _, summary in self.contributions(total=None, **kwa):
if not (
page.content_model not in ('css', 'javascript')
or page.title().startswith(f'{self.title()}/')
or 'while renaming the user' in summary
):
return True
pywikibot.log(f'{self!r}: No CSS/JS edit')
got_group = kwa['end']
rights_events = sorted(
chain(
self.site.logevents(logtype='rights', page=self),
pywikibot.Site('meta', 'meta').logevents(
logtype='rights',
page=f'{self.title()}@{self.site.dbName()}',
),
),
key=lambda logevent: logevent.timestamp(),
reverse=True,
)
for logevent in rights_events:
added_groups = set(logevent.newgroups) - set(logevent.oldgroups)
if 'interface-admin' in added_groups:
got_group = logevent.timestamp()
break
if kwa['end'] < got_group:
pywikibot.log(f'{self!r}: Not iadmin for 6 mo.')
return None
return False
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
:param args: command line arguments
"""
pywikibot.handle_args(args)
site = pywikibot.Site()
site.login()
users = get_inactive_users(site=site)
if not users:
return
heading = (
'Inactive interface administrators '
f'{site.server_time().date().isoformat()}'
)
text = 'The following interface administrator(s) are inactive:'
for user in sorted(users):
text += f'\n* {{{{admin|1={user.username}}}}}'
text += '\n~~~~'
pywikibot.Page(
site, "Wikipedia:Interface administrators' noticeboard"
).save(text=text, section='new', summary=heading, botflag=False)
if __name__ == '__main__':
main()
| 2.546875 | 3 |
Commented/dfirwizard-v2.py | dlcowen/dfirwizard | 60 | 12763701 | <reponame>dlcowen/dfirwizard<filename>Commented/dfirwizard-v2.py
#!/usr/bin/python
# Sample program or step 2 in becoming a DFIR Wizard!
# No license as this code is simple and free!
import sys
import pytsk3
# Import for manipulation of timestamps into user readable information
import datetime
# Hardcode the name of the image file, without path info has to be in the same directory
imagefile = "Stage2.vhd"
# Creates image object using Img_Info function from TSK and stores it in a variable so image can be accessed
imagehandle = pytsk3.Img_Info(imagefile)
# Retrieves the partition table for the image and retains it in the variable
partitionTable = pytsk3.Volume_Info(imagehandle)
# Use a loop to print up the partition table
for partition in partitionTable:
# For each entry print the partition address, description, start, start * 512, and the length
# Partition.start gives the sector it starts at, multiplying by 512 to get the actual start byte
print partition.addr, partition.desc, '%ss(%s)' % (partition.start, partition.start * 512), partition.len
# Variable to store location of NTFS partition to access file system. Hard coded start byte of partition
fileSystemObject = pytsk3.FS_Info(imagehandle, offset=65536)
# Obtain the file table ($MFT) file for further access and analysis by acquiring it from fileSystemObject
fileObject = fileSystemObject.open("/$MFT")
# Print metadata of the $MFT file, times will be returned as time from epoch
print "File Inode: ",fileObject.info.meta.addr
print "File Name: ",fileObject.info.name.name
# Because of being returned in epoch must use datetime/strftime to convert into something of use to humans
print "File Creation Time: ", datetime.datetime.fromtimestamp(fileObject.info.meta.crtime).strftime("%Y-%m-%d %H:%M:%S")
# Open a file to output the file from earlier to, 'w' tells python to write. This is the file handle for writing to output file
outfile = open('DFIRWizard-output', 'w')
# Read the file data into a variable starting at the beginning and ending at the last byte which is the size attribute of the object
# This is probably not the best way to do this
filedata = fileObject.read_random(0, fileObject.info.meta.size)
# Take the data from $MFT that's been stored in filedate and write it the file we opened a minute ago
outfile.write(filedata) | 2.625 | 3 |
ecs/settings.py | programmierfabrik/ecs | 9 | 12763702 | <reponame>programmierfabrik/ecs
# Django settings for ecs project.
import os, sys, platform, logging
from datetime import timedelta
from urllib.parse import urlparse
# root dir of project
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# standard django settings
##########################
# Default is DEBUG, others may override it later
DEBUG = True
# database configuration defaults, may get overwritten in local_settings
DATABASES = {}
if os.getenv('DATABASE_URL'):
url = urlparse(os.getenv('DATABASE_URL'))
DATABASES['default'] = {
'NAME': url.path[1:] or '',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': url.username,
'PASSWORD': <PASSWORD>,
'HOST': url.hostname or '',
'PORT': url.port or '5432',
'ATOMIC_REQUESTS': True
}
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'ecs',
'ATOMIC_REQUESTS': True,
}
# Local time zone for this installation. See http://en.wikipedia.org/wiki/List_of_tz_zones_by_name,
# although not all choices may be available on all operating systems.
TIME_ZONE = 'Europe/Vienna'
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'de-AT'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# workaround: we can not use the django gettext function in the settings
# because it depends on the settings.
gettext = lambda s: s
# path where django searches for *.mo files
LOCALE_PATHS = (os.path.join(PROJECT_DIR, "locale"),)
# declare supported languages for i18n. English is the internal project language.
# We do not want to expose our internal denglish to the end-user, so disable english
# in the settings
LANGUAGES = (
#('en', gettext('English')),
('de', gettext('German')),
)
# default site id, some thirdparty libraries expect it to be set
SITE_ID = 1
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# start of url matching
ROOT_URLCONF = 'ecs.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'ecs.wsgi.application'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# additional fixture search paths. implicitly used by every app the needs fixtures
FIXTURE_DIRS = [os.path.join(PROJECT_DIR, "fixtures")]
# cache backend, warning, this is seperate for each process, for production use memcache
if os.getenv('MEMCACHED_URL'):
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': os.getenv('MEMCACHED_URL').split('//')[1],
}
}
else:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
# django.contrib.messages
MESSAGE_STORE = 'django.contrib.messages.storage.session.SessionStorage'
# Session Settings
SESSION_COOKIE_AGE = 28800 # logout after 8 hours of inactivity
SESSION_SAVE_EVERY_REQUEST = True # so, every "click" on the pages resets the expiry time
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # session cookie expires at close of browser
# Make this unique, and don't share it with anybody.
SECRET_KEY = '<KEY>($z*otufbvlk%x1vflb&!5k94f$i3w'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.request",
'django.template.context_processors.csrf',
"django.contrib.messages.context_processors.messages",
"ecs.core.context_processors.ecs_settings",
]
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'ecs.utils.forceauth.ForceAuth',
'ecs.userswitcher.middleware.UserSwitcherMiddleware',
'ecs.pki.middleware.ClientCertMiddleware',
#'ecs.TestMiddleware',
'ecs.users.middleware.GlobalUserMiddleware',
'reversion.middleware.RevisionMiddleware',
'ecs.tasks.middleware.RelatedTasksMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'compressor',
'reversion',
'django_countries',
'raven.contrib.django.raven_compat',
'widget_tweaks',
'ecs.core',
'ecs.checklists',
'ecs.votes',
'ecs.utils',
'ecs.docstash',
'ecs.userswitcher',
'ecs.workflow',
'ecs.tasks',
'ecs.communication',
'ecs.dashboard',
'ecs.bootstrap',
'ecs.billing',
'ecs.users',
'ecs.documents',
'ecs.meetings',
'ecs.notifications',
'ecs.authorization',
'ecs.integration',
'ecs.boilerplate',
'ecs.scratchpad',
'ecs.pki',
'ecs.statistics',
'ecs.tags',
)
# authenticate with email address
AUTHENTICATION_BACKENDS = ('ecs.users.backends.EmailAuthBackend',)
# Force Django to always use real files, not an InMemoryUploadedFile.
# The document processing pipeline depends on the file objects having
# a fileno().
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'django': {
'level': 'NOTSET',
},
'django.db.backends': {
# All SQL queries are logged with level DEBUG. Settings the logger
# level to INFO prevents those messages from being propagated to
# the root logger.
'level': 'INFO',
},
},
}
# ecs settings
##############
# used by ecs.pki
ECS_CA_ROOT = os.path.join(PROJECT_DIR, '..', 'ecs-ca')
# if set to true: users of internal groups need a client certificate to logon
# ECS_REQUIRE_CLIENT_CERTS = false # default
# this is used by the EthicsCommission model to identify the system
ETHICS_COMMISSION_UUID = 'ecececececececececececececececec'
# users in these groups receive messages even when they are not related to studies
ECS_MEETING_AGENDA_RECEIVER_GROUPS = (
'Resident Board Member', 'Omniscient Board Member',
)
ECS_MEETING_PROTOCOL_RECEIVER_GROUPS = (
'Meeting Protocol Receiver', 'Resident Board Member',
'Omniscient Board Member',
)
ECS_AMG_MPG_VOTE_RECEIVERS = ('<EMAIL>',)
ECS_MEETING_GRACE_PERIOD = timedelta(days=5)
# authorization
AUTHORIZATION_CONFIG = 'ecs.auth_conf'
# registration/login settings
REGISTRATION_SECRET = '!brihi7#cxrd^twvj$r=398mdp4neo$xa-rm7b!8w1jfa@7zu_'
PASSWORD_RESET_SECRET = 'j2obdvrb-hm$$x949k*f5gk_2$1x%2etxhd!$+*^qs8$4ra3=a'
LOGIN_REDIRECT_URL = '/dashboard/'
# PDF Signing will use fake signing if PDFAS_SERVICE is "mock:"
# deployment should use 'https://hostname/pdf-as-web/'
PDFAS_SERVICE = 'mock:'
# directory where to store zipped submission patientinformation and submission form pdfs
ECS_DOWNLOAD_CACHE_DIR = os.path.realpath(os.path.join(PROJECT_DIR, "..", "ecs-cache"))
ECS_DOWNLOAD_CACHE_MAX_AGE = 30 * 24 * 60 * 60 # 30 days
# Storage Vault settings
STORAGE_VAULT = {
'dir': os.path.join(PROJECT_DIR, '..', 'ecs-storage-vault'),
'gpghome' : os.path.join(PROJECT_DIR, '..', 'ecs-gpg'),
'encryption_uid': 'ecs_mediaserver',
'signature_uid': 'ecs_authority',
}
# domain to use
DOMAIN= "localhost"
# absolute URL prefix w/out trailing slash
ABSOLUTE_URL_PREFIX = "http://"+ DOMAIN+ ":8000"
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_BACKEND_UNFILTERED = 'django.core.mail.backends.console.EmailBackend'
EMAIL_UNFILTERED_DOMAINS = () # = ('example.com', )
EMAIL_UNFILTERED_INDIVIDUALS = () # = ('<EMAIL>', '<EMAIL>')
# EMAIL_BACKEND_UNFILTERED will be used for
# User registration & invitation, password reset, send client certificate,
# and all mails to domains in EMAIL_UNFILTERED_DOMAINS and user
# listed in EMAIL_UNFILTERED_INDIVIDUALS
if os.getenv('SMTP_URL'):
url = urlparse(os.getenv('SMTP_URL'))
EMAIL_HOST = url.hostname
EMAIL_PORT = url.port or 25
EMAIL_HOST_USER = url.username or ''
EMAIL_HOST_PASSWORD = url.password or ''
SMTPD_CONFIG = {
'listen_addr': ('127.0.0.1', 8025),
'domain': DOMAIN,
'store_exceptions': False,
}
# thirdparty settings
######################
# ### celery ### default uses memory transport and always eager
CELERY_IMPORTS = (
'ecs.communication.tasks',
'ecs.core.tasks',
'ecs.core.tests.test_tasks',
'ecs.documents.tasks',
'ecs.integration.tasks',
'ecs.meetings.tasks',
'ecs.tasks.tasks',
'ecs.users.tasks',
'ecs.votes.tasks',
)
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_ACCEPT_CONTENT = (CELERY_TASK_SERIALIZER,)
# try to propagate exceptions back to caller
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
if os.getenv('REDIS_URL'):
BROKER_URL = os.getenv('REDIS_URL')
BROKER_TRANSPORT_OPTIONS = {
'fanout_prefix': True,
'fanout_patterns': True
}
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_ALWAYS_EAGER = False
else:
# dont use queueing backend but consume it right away
CELERY_ALWAYS_EAGER = True
# ### django_compressor ###
COMPRESS_ENABLED = True
COMPRESS_PRECOMPILERS = (
(
'text/x-scss',
'pyscss -I {} -o {{outfile}} {{infile}}'.format(
os.path.join(STATIC_ROOT, 'css'))
),
)
# settings override
###################
#these are local fixes, they default to a sane value if unset
#ECS_USERSWITCHER_ENABLED = True/False
# default to True, Userswitcher will be shown so user can switch to testusers quickly
if os.getenv('ECS_USERSWITCHER_ENABLED'):
ECS_USERSWITCHER_ENABLED = os.getenv('ECS_USERSWITCHER_ENABLED','').lower() == 'true'
#ECS_DEBUGTOOLBAR = True/False defaults to False if empty
# loads support for django-debug-toolbar
#ECS_WORDING = True/False defaults to False if empty
# activates django-rosetta
# import and execute ECS_SETTINGS from environment as python code if they exist
if os.getenv('ECS_SETTINGS'):
exec(os.getenv('ECS_SETTINGS'))
# overwrite settings from local_settings.py if it exists
try:
from ecs.local_settings import *
except ImportError:
pass
# try to get ECS_VERSION, ECS_GIT_REV from version.py
if not all([k in locals() for k in ['ECS_VERSION', 'ECS_GIT_REV', 'ECS_GIT_BRANCH']]):
try:
from ecs.version import ECS_VERSION, ECS_GIT_REV, ECS_GIT_BRANCH
except ImportError:
ECS_VERSION = 'unknown'
ECS_GIT_BRANCH = 'unknown'
ECS_GIT_REV = 'badbadbadbadbadbadbadbadbadbadbadbadbad0'
DEFAULT_FROM_EMAIL = SERVER_EMAIL = '<EMAIL>(DOMAIN)
# https
if 'SECURE_PROXY_SSL' in locals() and SECURE_PROXY_SSL:
CSRF_COOKIE_SECURE= True
SESSION_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# sentry/raven
if 'SENTRY_DSN' in locals():
import raven
from raven.transport.threaded_requests import ThreadedRequestsHTTPTransport
# if no threading support: from raven.transport.requests import RequestsHTTPTransport
RAVEN_CONFIG = {
'dsn': SENTRY_DSN,
'release': ECS_GIT_REV,
'transport': ThreadedRequestsHTTPTransport,
'site': DOMAIN,
}
SENTRY_CLIENT = 'ecs.utils.ravenutils.DjangoClient'
# user switcher
if 'ECS_USERSWITCHER_ENABLED' not in locals():
ECS_USERSWITCHER_ENABLED = True
if not ECS_USERSWITCHER_ENABLED:
MIDDLEWARE_CLASSES = tuple(item for item in MIDDLEWARE_CLASSES if item != 'ecs.userswitcher.middleware.UserSwitcherMiddleware')
# django rosetta activation
if 'ECS_WORDING' in locals() and ECS_WORDING:
INSTALLED_APPS +=('rosetta',) # anywhere
# django-debug-toolbar activation
if 'ECS_DEBUGTOOLBAR' in locals() and ECS_DEBUGTOOLBAR:
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
# hack some settings for test and runserver
if 'test' in sys.argv:
CELERY_ALWAYS_EAGER = True
INSTALLED_APPS += ('ecs.workflow.tests',)
if 'runserver' in sys.argv:
logging.basicConfig(
level = logging.DEBUG,
format = '%(asctime)s %(levelname)s %(message)s',
)
| 2.078125 | 2 |
app/db/migrations/versions/ed535bd21f09_add_three_time_filed.py | zyxyuanxiao/fastapi-1 | 2 | 12763703 | <gh_stars>1-10
"""add three time filed
Revision ID: ed<PASSWORD>
Revises: 0a43<PASSWORD>
Create Date: 2019-12-12 13:44:51.913300
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '0a43<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 1.101563 | 1 |
src/waclient/service_controller/__init__.py | WitnessAngel/witness-angel-sensor | 4 | 12763704 | from kivy.utils import platform
if platform == "android":
from .android_service_controller import ServiceController
else:
from .subprocess_service_controller import ServiceController
| 1.828125 | 2 |
notifyme.py | vsr/pyscripts | 0 | 12763705 | <filename>notifyme.py<gh_stars>0
#!/usr/bin/python
"""
Notifies with a fortune cookie every X minutes.
Usage: "python notifyme.py 10" to notify every 10 minutes
"""
import os
import sys
import datetime
import time
import pynotify
def notify(count):
fortune = os.popen('fortune -n 100 -s', 'r').read()
time = datetime.datetime.now().strftime("%H:%M:%S")
pynotify.Notification("%s (%s) >>" % (time, count), fortune).show()
if __name__ == "__main__":
pynotify.init('Notify Me')
try:
interval = float(sys.argv[1])
except:
interval = 20
notification_count = 0
try:
while True:
notification_count += 1
notify(notification_count)
time.sleep(interval * 60)
except (KeyboardInterrupt, SystemExit):
print "\nNotified %s times." % notification_count
exit
| 3.015625 | 3 |
corus/sources/toloka.py | Ilseyar/corus | 205 | 12763706 |
from corus.record import Record
from corus.io import (
load_lines,
parse_tsv,
skip_header,
)
class LRWCRecord(Record):
__attributes__ = ['hyponym', 'hypernym', 'genitive', 'judgement', 'confidence']
def __init__(self, hyponym, hypernym, genitive, judgement, confidence):
self.hyponym = hyponym
self.hypernym = hypernym
self.genitive = genitive
self.judgement = judgement
self.confidence = confidence
# INPUT:hyponym INPUT:hypernym INPUT:genitive OUTPUT:judgement CONFIDENCE:judgement
# автомобиль автомашина автомашины true 99.75%
# автомобиль автомототранспорт автомототранспорта true 99.96%
# автомобиль автомототранспортный автомототранспортного true 99.99%
def parse_judgement(value):
if value == 'true':
return 1.0
elif value == 'false':
return 0.0
def parse_confidence(value):
return float(value[:-1])
def parse_toloka_lrwc(lines):
skip_header(lines)
records = parse_tsv(lines)
for record in records:
hyponym, hypernym, genitive, judgement, confidence = record
judgement = parse_judgement(judgement)
confidence = parse_confidence(confidence)
yield LRWCRecord(hyponym, hypernym, genitive, judgement, confidence)
def load_toloka_lrwc(path):
lines = load_lines(path)
return parse_toloka_lrwc(lines)
class RuADReCTRecord(Record):
__attributes__ = ['tweet_id', 'tweet', 'label']
def __init__(self, tweet_id, tweet, label):
self.tweet_id = tweet_id
self.tweet = tweet
self.label = label
# – tweet_id: уникальный номер сообщения в системе twitter;
# – tweet: текст сообщения (твита);
# - label: класс твита, 1 - содержит упоминание побочного эффекта, 0 - не содердит
def parse_ruadrect(lines):
rows = parse_tsv(lines)
skip_header(rows)
for cells in rows:
yield RuADReCTRecord(*cells)
def load_ruadrect(path):
lines = load_lines(path)
return parse_ruadrect(lines)
| 2.671875 | 3 |
ckan_cloud_operator/providers/users/gcloud/constants.py | MuhammadIsmailShahzad/ckan-cloud-operator | 14 | 12763707 | PROVIDER_ID='gcloud'
| 1.117188 | 1 |
genmol/vae/samples.py | bayeslabs/genmol | 17 | 12763708 | # -*- coding: utf-8 -*-
from tqdm import tqdm
import pandas as pd
n_samples = 3000
n_jobs = 1
max_len = 100
class sample():
def take_samples(model,n_batch):
n = n_samples
samples = []
with tqdm(total=n_samples, desc='Generating samples') as T:
while n > 0:
current_samples = model.sample(min(n, n_batch), max_len)
samples.extend(current_samples)
n -= len(current_samples)
T.update(len(current_samples))
samples = pd.DataFrame(samples, columns=['SMILES'])
return samples | 2.84375 | 3 |
Code/python/Py/PostProcess.py | cy15196/FastCAE | 117 | 12763709 | #-------关联C++库---------------
import ctypes
import platform
system = platform.system()
if system == "Windows":
pre = "./"
suff = ".dll"
else:
pre = "./lib"
suff = ".so"
libfile = ctypes.cdll.LoadLibrary
filename = pre+"GraphicsAnalyse"+suff
postPro = libfile(filename)
import MainWindow
#---------------------------------
#-------定义函数------------------
def script_openFile(id, type, file):
MainWindow.script_openFile(id, type, file)
pass
def script_applyClicked(id, type):
MainWindow.script_applyClicked(id, type)
pass
def script_Properties_Opacity(id, type, obj_id, mOpacity):
MainWindow.script_Properties_Opacity(id, type, obj_id, mOpacity)
pass
def script_Properties_colorColumn(id, type, obj_id, mColorColumnStyle):
MainWindow.script_Properties_colorColumn(id, type, obj_id, mColorColumnStyle)
pass
def script_Properties_scalarBarTitle(id, type, obj_id, colName, m_title):
MainWindow.script_Properties_scalarBarTitle(id, type, obj_id, colName, m_title)
pass
def script_Properties_scalarBarFontSize(id, type, obj_id, colName, m_fontSize):
MainWindow.script_Properties_scalarBarFontSize(id, type, obj_id, colName, m_fontSize)
pass
def script_Properties_scalarBarNumLables(id, type, obj_id, colName, m_numLables):
MainWindow.script_Properties_scalarBarNumLables(id, type, obj_id, colName, m_numLables)
pass
def script_Properties_lineWidth(id, type, obj_id, mLineWidth):
MainWindow.script_Properties_lineWidth(id, type, obj_id, mLineWidth)
pass
def script_Properties_pointSize(id, type, obj_id, mPointSize):
MainWindow.script_Properties_pointSize(id, type, obj_id, mPointSize)
pass
def script_Properties_translate(id, type, obj_id, x, y, z):
MainWindow.script_Properties_translate(id, type, obj_id, x, y, z)
pass
def script_Properties_origin(id, type, obj_id, x, y, z):
MainWindow.script_Properties_origin(id, type, obj_id, x, y, z)
pass
def script_Properties_scale(id, type, obj_id, x, y, z):
MainWindow.script_Properties_scale(id, type, obj_id, x, y, z)
pass
def script_Properties_orientation(id, type, obj_id, x, y, z):
MainWindow.script_Properties_orientation(id, type, obj_id, x, y, z)
pass
def script_Properties_representation(id, type, obj_id, m_enum_representationtype):
MainWindow.script_Properties_representation(id, type, obj_id, m_enum_representationtype)
pass
def script_Properties_specular(id, type, obj_id, mSpecular):
MainWindow.script_Properties_specular(id, type, obj_id, mSpecular)
pass
def script_Properties_diffuse(id, type, obj_id, mDiffuse):
MainWindow.script_Properties_diffuse(id, type, obj_id, mDiffuse)
pass
def script_Properties_ambient(id, type, obj_id, mAmbient):
MainWindow.script_Properties_ambient(id, type, obj_id, mAmbient)
pass
def script_Properties_specularPower(id, type, obj_id, mSpecularPower):
MainWindow.script_Properties_specularPower(id, type, obj_id, mSpecularPower)
pass
def script_Properties_specularColor(id, type, obj_id, r, g, b):
MainWindow.script_Properties_specularColor(id, type, obj_id, r, g, b)
pass
def script_Properties_solidColor(id, type, obj_id, r, g, b):
MainWindow.script_Properties_solidColor(id, type, obj_id, r, g, b)
pass
def script_Properties_edgeColor(id, type, obj_id, r, g, b):
MainWindow.script_Properties_edgeColor(id, type, obj_id, r, g, b)
pass
def script_Properties_interpolation(id, type, obj_id, m_enum_interpolationtype):
MainWindow.script_Properties_interpolation(id, type, obj_id, m_enum_interpolationtype)
pass
def script_Properties_Flag_scalarBar(id, type, obj_id, mColorColumnStyle):
MainWindow.script_Properties_Flag_scalarBar(id, type, obj_id, mColorColumnStyle)
pass
def script_Properties_EnableOpacityMap(id, type, obj_id, val):
MainWindow.script_Properties_EnableOpacityMap(id, type, obj_id, val)
pass
def script_Properties_visible(id, type, obj_id, flag_show_actors):
MainWindow.script_Properties_visible(id, type, obj_id, flag_show_actors)
pass
def script_Properties_show_scalarBars(id, type, obj_id, mScalarBarVisible):
MainWindow.script_Properties_show_scalarBars(id, type, obj_id, mScalarBarVisible)
pass
def script_Properties_show_cubeAxes(id, type, obj_id, flag_cubeAxes):
MainWindow.script_Properties_show_cubeAxes(id, type, obj_id, flag_cubeAxes)
pass
def script_Properties_scalarBarPosition(id, type, obj_id, colName, tep_orietation, pos0, pos1, pos2, pos3):
MainWindow.script_Properties_scalarBarPosition(id, type, obj_id,colName, tep_orietation, pos0, pos1, pos2, pos3)
pass
def script_Clip(id, type, obj_id):
MainWindow.script_FilterClip(id, type, obj_id)
pass
def script_Slice(id, type, obj_id):
MainWindow.script_FilterSlice(id, type, obj_id)
pass
def script_Contour(id, type, obj_id):
MainWindow.script_FilterContour(id, type, obj_id)
pass
def script_Vector(id, type, obj_id):
MainWindow.script_FilterVector(id, type, obj_id)
pass
def script_Reflection(id, type, obj_id):
MainWindow.script_FilterReflection(id, type, obj_id)
pass
def script_Smooth(id, type, obj_id):
MainWindow.script_FilterSmooth(id, type, obj_id)
pass
def script_StreamLine(id, type, obj_id):
MainWindow.script_FilterStreamLine(id, type, obj_id)
pass
###################
def script_Vector_GlyphVector(id, type, obj_id, val):
MainWindow.script_Properties_vector_GlyphVector(id, type, obj_id, val)
pass
def script_Vector_scalar(id, type, obj_id, val):
MainWindow.script_Properties_vector_scalar(id, type, obj_id, val)
pass
def script_Vector_normal(id, type, obj_id, val):
MainWindow.script_Properties_vector_normal(id, type, obj_id, val)
pass
def script_Vector_numPoints(id, type, obj_id, val):
MainWindow.script_Properties_vector_numPoints(id, type, obj_id, val)
pass
def script_Vector_glyph_type(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_type(id, type, obj_id, val)
pass
def script_Vector_glyph_tipRes(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_tipRes(id, type, obj_id, val)
pass
def script_Vector_glyph_tipRad(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_tipRad(id, type, obj_id, val)
pass
def script_Vector_glyph_tipLen(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_tipLen(id, type, obj_id, val)
pass
def script_FilterStreamLine(id, type, obj_id):
MainWindow.script_FilterStreamLine(id, type, obj_id)
pass
def script_Vector_glyph_shaftRes(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_shaftRes(id, type, obj_id, val)
pass
def script_Vector_glyph_shaftRad(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_shaftRad(id, type, obj_id, val)
pass
def script_Properties_view_backgroundType(id, type, obj_id, val):
MainWindow.script_Properties_view_backgroundType(id, type, obj_id, val)
pass
def script_Properties_view_backgroundColor(id, type, obj_id, red, green, blue):
MainWindow.script_Properties_view_backgroundColor(id, type, obj_id, red, green, blue)
pass
def script_Properties_view_background2Color(id, type, obj_id, red, green, blue):
MainWindow.script_Properties_view_background2Color(id, type, obj_id, red, green, blue)
pass
def script_Properties_view_axesVisible(id, type, a):
MainWindow.script_Properties_view_axesVisible(id, type, a)
pass
def script_Properties_view_cameraParallel(id, type, a):
MainWindow.script_Properties_view_cameraParallel(id, type, a)
pass
def script_Properties_view_interaction(id, type, a):
MainWindow.script_Properties_view_interaction(id, type, a)
pass
def script_Properties_renderView(id, type):
MainWindow.script_Properties_renderView(id, type)
pass
def script_Camera_Position(id, type, pos0, pos1, pos2):
MainWindow.script_Camera_Position(id, type, pos0, pos1, pos2)
pass
def script_Camera_FocalPoint(id, type, focalPoint0, focalPoint1, focalPoint2):
MainWindow.script_Camera_FocalPoint(id, type, focalPoint0, focalPoint1, focalPoint2)
pass
def script_Camera_ClippingRange(id, type, clippingRange0, clippingRange1):
MainWindow.script_Camera_ClippingRange(id, type, clippingRange0, clippingRange1)
pass
def script_Camera_ViewUp(id, type, viewup0, viewup1, viewup2):
MainWindow.script_Camera_ViewUp(id, type, viewup0, viewup1, viewup2)
pass
def script_Camera_ViewAngle(id, type, angle):
MainWindow.script_Camera_ViewAngle(id, type, angle)
pass
def script_Camera_Zoom(id, type, zoom):
MainWindow.script_Camera_Zoom(id, type, zoom)
pass
def script_Camera_Reset(id, type,):
MainWindow.script_Camera_Reset(id, type,)
pass
def script_Properties_planeOrigin(id, type, obj_id, x, y, z):
MainWindow.script_Properties_planeOrigin(id, type, obj_id, x, y, z)
pass
def script_Properties_planeNormal(id, type, obj_id, x, y, z):
MainWindow.script_Properties_planeNormal(id, type, obj_id, x, y, z)
pass
def script_Properties_planeVisible(id, type, obj_id, a):
MainWindow.script_Properties_planeVisible(id, type, obj_id, a)
pass
def script_Properties_insideOut(id, type, obj_id, a):
MainWindow.script_Properties_insideOut(id, type, obj_id, a)
pass
def script_Contour_Column(id, type, obj_id, val):
MainWindow.script_Properties_contourColumn(id, type, obj_id, val)
pass
def script_Contour_value(id, type, obj_id, val):
MainWindow.script_Properties_contourValue(id, type, obj_id, val)
pass
def script_Contour_reflection(id, type, obj_id, aaa):
MainWindow.script_Properties_contour_reflection(id, type, obj_id, aaa)
pass
def script_Contour_reflectionAxes(id, type, obj_id, val):
MainWindow.script_Properties_contour_reflectionAxes(id, type, obj_id, val)
pass
def script_Properties_reflectionAxes(id, type, obj_id, reflection_axis):
MainWindow.script_Properties_reflectionAxes(id, type, obj_id, reflection_axis)
pass
def Smooth_smooth(id, type, obj_id, smotype, coef):
MainWindow.script_Properties_smooth(id, type, obj_id, smotype, coef)
pass
def script_Streamline_vector(id, type, obj_id, val):
MainWindow.script_Properties_streamline_vector(id, type, obj_id, val)
pass
def script_Streamline_integration_type(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_type(id, type, obj_id, val)
pass
def script_Streamline_integration_direction(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_direction(id, type, obj_id, val)
pass
def script_Streamline_integration_stepUnit(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_type(id, type, obj_id, val)
pass
def script_Properties_streamline_integration_stepUnit(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_stepUnit(id, type, obj_id, val)
pass
def script_Streamline_integration_initStepLen(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_initStepLen(id, type, obj_id, val)
pass
def script_Streamline_integration_miniStepLen(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_miniStepLen(id, type, obj_id, val)
pass
def script_Streamline_integration_maxiStepLen(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_maxiStepLen(id, type, obj_id, val)
pass
def script_Streamline_stream_maxiSteps(id, type, obj_id, val):
MainWindow.script_Properties_streamline_stream_maxiSteps(id, type, obj_id, val)
pass
def script_Streamline_stream_maxiStreamLen(id, type, obj_id, val):
MainWindow.script_Properties_streamline_stream_maxiStreamLen(id, type, obj_id, val)
pass
###########
def script_Streamline_stream_terminalSpeed(id, type, obj_id, val):
MainWindow.script_Properties_streamline_stream_terminalSpeed(id, type, obj_id, val)
pass
def script_Streamline_stream_maxiError(id, type, obj_id, val):
MainWindow.script_Properties_streamline_stream_maxiError(id, type, obj_id, val)
pass
def script_Streamline_seeds_type(id, type, obj_id, val):
MainWindow.script_Properties_streamline_seeds_type(id, type, obj_id, val)
pass
def script_Streamline_seeds_mPoint(id, type, obj_id, val0, val1, val2):
MainWindow.script_Properties_streamline_seeds_mPoint(id, type, obj_id, val0, val1, val2)
pass
def script_Streamline_seeds_num_points(id, type, obj_id, val):
MainWindow.script_Properties_streamline_seeds_num_points(id, type, obj_id, val)
pass
def script_Streamline_seeds_radius(id, type, obj_id, val):
MainWindow.script_Properties_streamline_seeds_radius(id, type, obj_id, val)
pass
def script_Streamline_vorticity(id, type, obj_id, val):
MainWindow.script_Properties_streamline_vorticity(id, type, obj_id, val)
pass
def script_Streamline_interpolatorType(id, type, obj_id, val):
MainWindow.script_Properties_streamline_interpolatorType(id, type, obj_id, val)
pass
def script_Streamline_surface_streamLines(id, type, obj_id, val):
MainWindow.script_Properties_streamline_surface_streamLines(id, type, obj_id, val)
pass
def script_Properties_streamline_reflection(id, type, obj_id, val):
MainWindow.script_Properties_streamline_reflection(id, type, obj_id, val)
pass
def script_Properties_streamline_reflectionAxes(id, type, obj_id, val):
MainWindow.script_Properties_streamline_reflectionAxes(id, type, obj_id, val)
pass
| 2.3125 | 2 |
unit_test.py | cakel/ppomppu_naver_point_telegram_bot | 2 | 12763710 | <filename>unit_test.py
import unittest
import os
import json
from unittest.mock import patch
from _url import get_sending_list_from_record, fetch_html_to_string, get_record_list_info_from_html
from _file import load_db_from_file, save_db_to_file
class TestSuite(unittest.TestCase):
def test_save_and_load(self):
DB_DUMMY_LIST = []
DB_TEST_LIST = []
DB_DUMMY1 = {
"no": 1,
"link": "http",
"title": "2021-08-29",
"sent": False
}
DB_DUMMY2 = {
"no": 2,
"link": "http",
"title": "2021-08-29",
"sent": False
}
DB_DUMMY_LIST.append(DB_DUMMY1)
DB_DUMMY_LIST.append(DB_DUMMY2)
save_db_to_file(DB_DUMMY_LIST, "_db.json")
DB_TEST_LIST = load_db_from_file("_db.json")
self.assertTrue(DB_TEST_LIST == DB_DUMMY_LIST)
if os.path.exists("_db.json"):
os.remove("_db.json")
@patch('requests.get')
def test_fetch_string_from_url(self, test_patch):
class dummy_result():
text = "TEST"
test_patch.return_value = dummy_result()
ret = fetch_html_to_string(None)
self.assertTrue(ret == "TEST")
pass
def test_get_record_list_info_from_html(self):
with open("./_test/test_get_record_list_info_from_html_in.txt", "r", encoding='utf-8') as readInFile:
with open("./_test/test_get_record_list_info_from_html_out.txt", "r", encoding='utf-8') as readOutFile:
actual_result = str(
get_record_list_info_from_html(readInFile.read()))
expect_result = str(json.load(readOutFile))
self.assertTrue(actual_result == expect_result)
pass
def test_get_sending_list_from_record(self):
existing_result_list = [
{
"no": "71339",
"title": "[네이버페이] 참존 스토어찜 80원",
"link": "https://www.ppomppu.co.kr/zboard/view.php?id=coupon&page=1&divpage=13&search_type=subject&keyword=%B3%D7%C0%CC%B9%F6&no=71339"
}
]
retrieved_result_list = [
{
"no": "71345",
"title": "[네이버페이] 캐롯손해보험 20원 받으세요",
"link": "https://www.ppomppu.co.kr/zboard/view.php?id=coupon&page=1&divpage=13&search_type=subject&keyword=%B3%D7%C0%CC%B9%F6&no=71345"
},
{
"no": "71339",
"title": "[네이버페이] 참존 스토어찜 80원",
"link": "https://www.ppomppu.co.kr/zboard/view.php?id=coupon&page=1&divpage=13&search_type=subject&keyword=%B3%D7%C0%CC%B9%F6&no=71339"
}
]
expect_result = [{
"no": "71345",
"title": "[네이버페이] 캐롯손해보험 20원 받으세요",
"link": "https://www.ppomppu.co.kr/zboard/view.php?id=coupon&page=1&divpage=13&search_type=subject&keyword=%B3%D7%C0%CC%B9%F6&no=71345"
}]
actual_result = get_sending_list_from_record(
existing_result_list, retrieved_result_list)
self.assertTrue(actual_result == expect_result)
pass
if __name__ == '__main__':
unittest.main()
| 3.015625 | 3 |
mopidy_local/library.py | jodal/mopidy-local | 59 | 12763711 | import logging
import operator
import sqlite3
import uritools
from mopidy import backend, models
from mopidy.models import Ref, SearchResult
from . import Extension, schema
logger = logging.getLogger(__name__)
def date_ref(date):
return Ref.directory(
uri=uritools.uricompose("local", None, "directory", {"date": date}), name=date
)
def genre_ref(genre):
return Ref.directory(
uri=uritools.uricompose("local", None, "directory", {"genre": genre}),
name=genre,
)
class LocalLibraryProvider(backend.LibraryProvider):
ROOT_DIRECTORY_URI = "local:directory"
root_directory = models.Ref.directory(uri=ROOT_DIRECTORY_URI, name="Local media")
def __init__(self, backend, config):
super().__init__(backend)
self._config = ext_config = config[Extension.ext_name]
self._data_dir = Extension.get_data_dir(config)
self._directories = []
for line in ext_config["directories"]:
name, uri = line.rsplit(None, 1)
ref = Ref.directory(uri=uri, name=name)
self._directories.append(ref)
self._dbpath = self._data_dir / "library.db"
self._connection = None
def load(self):
with self._connect() as connection:
version = schema.load(connection)
logger.debug("Using SQLite database schema v%s", version)
return schema.count_tracks(connection)
def lookup(self, uri):
try:
if uri.startswith("local:album"):
return list(schema.lookup(self._connect(), Ref.ALBUM, uri))
elif uri.startswith("local:artist"):
return list(schema.lookup(self._connect(), Ref.ARTIST, uri))
elif uri.startswith("local:track"):
return list(schema.lookup(self._connect(), Ref.TRACK, uri))
else:
raise ValueError("Invalid lookup URI")
except Exception as e:
logger.error("Lookup error for %s: %s", uri, e)
return []
def browse(self, uri):
try:
if uri == self.ROOT_DIRECTORY_URI:
return self._directories
elif uri.startswith("local:directory"):
return self._browse_directory(uri)
elif uri.startswith("local:artist"):
return self._browse_artist(uri)
elif uri.startswith("local:album"):
return self._browse_album(uri)
else:
raise ValueError("Invalid browse URI")
except Exception as e:
logger.error("Error browsing %s: %s", uri, e)
return []
def search(self, query=None, limit=100, offset=0, uris=None, exact=False):
limit = self._config["max_search_results"]
q = []
for field, values in query.items() if query else []:
q.extend((field, value) for value in values)
filters = [f for uri in uris or [] for f in self._filters(uri) if f]
with self._connect() as c:
tracks = schema.search_tracks(c, q, limit, offset, exact, filters)
uri = uritools.uricompose("local", path="search", query=q)
return SearchResult(uri=uri, tracks=tracks)
def get_images(self, uris):
images = {}
with self._connect() as c:
for uri in uris:
if uri.startswith("local:album"):
images[uri] = schema.get_album_images(c, uri)
elif uri.startswith("local:track"):
images[uri] = schema.get_track_images(c, uri)
return images
def get_distinct(self, field, query=None):
q = []
for key, values in query.items() if query else []:
q.extend((key, value) for value in values)
# Gracefully handle both old and new field values for this API.
compat_field = {"track": "track_name"}.get(field, field)
return set(schema.list_distinct(self._connect(), compat_field, q))
def _connect(self):
if not self._connection:
self._connection = sqlite3.connect(
self._dbpath,
factory=schema.Connection,
timeout=self._config["timeout"],
check_same_thread=False,
)
return self._connection
def _browse_album(self, uri, order=("disc_no", "track_no", "name")):
return schema.browse(self._connect(), Ref.TRACK, order, album=uri)
def _browse_artist(self, uri, order=("type", "name COLLATE NOCASE")):
with self._connect() as c:
albums = schema.browse(c, Ref.ALBUM, order, albumartist=uri)
refs = schema.browse(c, order=order, artist=uri)
album_uris, tracks = {ref.uri for ref in albums}, []
for ref in refs:
if ref.type == Ref.ALBUM and ref.uri not in album_uris:
albums.append(
Ref.directory(
uri=uritools.uricompose(
"local",
None,
"directory",
dict(type=Ref.TRACK, album=ref.uri, artist=uri),
),
name=ref.name,
)
)
elif ref.type == Ref.TRACK:
tracks.append(ref)
else:
logger.debug("Skipped SQLite browse result %s", ref.uri)
albums.sort(key=operator.attrgetter("name"))
return albums + tracks
def _browse_directory(self, uri, order=("type", "name COLLATE NOCASE")):
query = dict(uritools.urisplit(uri).getquerylist())
type = query.pop("type", None)
role = query.pop("role", None)
# TODO: handle these in schema (generically)?
if type == "date":
format = query.get("format", "%Y-%m-%d")
return list(map(date_ref, schema.dates(self._connect(), format=format)))
if type == "genre":
return list(map(genre_ref, schema.list_distinct(self._connect(), "genre")))
# Fix #38: keep sort order of album tracks; this also applies
# to composers and performers
if type == Ref.TRACK and "album" in query:
order = ("disc_no", "track_no", "name")
if type == Ref.ARTIST and self._config["use_artist_sortname"]:
order = ("coalesce(sortname, name) COLLATE NOCASE",)
roles = role or ("artist", "albumartist") # FIXME: re-think 'roles'...
refs = []
for ref in schema.browse(
self._connect(), type, order, role=roles, **query
): # noqa
if ref.type == Ref.TRACK or (not query and not role):
refs.append(ref)
elif ref.type == Ref.ALBUM:
refs.append(
Ref.directory(
uri=uritools.uricompose(
"local",
None,
"directory",
dict(query, type=Ref.TRACK, album=ref.uri), # noqa
),
name=ref.name,
)
)
elif ref.type == Ref.ARTIST:
refs.append(
Ref.directory(
uri=uritools.uricompose(
"local", None, "directory", dict(query, **{role: ref.uri})
),
name=ref.name,
)
)
else:
logger.warning("Unexpected SQLite browse result: %r", ref)
return refs
def _filters(self, uri):
if uri.startswith("local:directory"):
return [dict(uritools.urisplit(uri).getquerylist())]
elif uri.startswith("local:artist"):
return [{"artist": uri}, {"albumartist": uri}]
elif uri.startswith("local:album"):
return [{"album": uri}]
else:
return []
| 2.28125 | 2 |
cohesity_management_sdk/models/centrify_schema_enum.py | nick6655/management-sdk-python | 18 | 12763712 | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class CentrifySchemaEnum(object):
"""Implementation of the 'CentrifySchema' enum.
Specifies the schema of this Centrify zone.
The below list of schemas and their values are taken from the document
Centrify Server Suite 2016 Windows API Programmer's Guide
https://docs.centrify.com/en/css/suite2016/centrify-win-progguide.pdf
'kCentrifyDynamicSchema_1_0' specifies dynamic schema, version 1.0.
'kCentrifyDynamicSchema_2_0' specifies dynamic schema, version 2.0.
'kCentrifyDynamicSchema_3_0' specifies dynamic schema, version 3.0.
'kCentrifyDynamicSchema_5_0' specifies dynamic schema, version 5.0.
'kCentrifySfu_3_0' specifies sfu schema, version 3.0.
'kCentrifySfu_3_0_V5' specifies sfu schema, 3.0.5.
'kCentrifySfu_4_0' specifies sfu schema, version 4.0.
'kCentrifyCdcRfc2307' specifies cdcrfc2307 schema.
'kCentrifyCdcRfc2307_2' specifies cdcrfc2307, version 2.
'kCentrifyCdcRfc2307_3' specifies cdcrfc2307, version 3.
Attributes:
KCENTRIFYDYNAMICSCHEMA_1_0: TODO: type description here.
KCENTRIFYDYNAMICSCHEMA_2_0: TODO: type description here.
KCENTRIFYSFU_3_0: TODO: type description here.
KCENTRIFYSFU_4_0: TODO: type description here.
KCENTRIFYCDCRFC2307: TODO: type description here.
KCENTRIFYDYNAMICSCHEMA_3_0: TODO: type description here.
KCENTRIFYCDCRFC2307_2: TODO: type description here.
KCENTRIFYDYNAMICSCHEMA_5_0: TODO: type description here.
KCENTRIFYCDCRFC2307_3: TODO: type description here.
KCENTRIFYSFU_3_0_V5: TODO: type description here.
"""
KCENTRIFYDYNAMICSCHEMA_1_0 = 'kCentrifyDynamicSchema_1_0'
KCENTRIFYDYNAMICSCHEMA_2_0 = 'kCentrifyDynamicSchema_2_0'
KCENTRIFYSFU_3_0 = 'kCentrifySfu_3_0'
KCENTRIFYSFU_4_0 = 'kCentrifySfu_4_0'
KCENTRIFYCDCRFC2307 = 'kCentrifyCdcRfc2307'
KCENTRIFYDYNAMICSCHEMA_3_0 = 'kCentrifyDynamicSchema_3_0'
KCENTRIFYCDCRFC2307_2 = 'kCentrifyCdcRfc2307_2'
KCENTRIFYDYNAMICSCHEMA_5_0 = 'kCentrifyDynamicSchema_5_0'
KCENTRIFYCDCRFC2307_3 = 'kCentrifyCdcRfc2307_3'
KCENTRIFYSFU_3_0_V5 = 'kCentrifySfu_3_0_V5'
| 1.664063 | 2 |
Project/UnitTests/test_utility_weights.py | Itsuke/PurchaseAdvisor | 0 | 12763713 | """
Unit tests to verify utility_rank module.
"""
import unittest
import numpy as np
from pymcdm import weights
from utility_weights import UtilityWeights
class TestUtilityNormalization(unittest.TestCase):
"""
Class used for the verification of implementation of normalization formulas
"""
def setUp(self):
self.test_mat = np.array([[1, 1, 2, 3, 3, 1],
[2, 3, 1, 2, 1, 2],
[4, 5, 3, 1, 2, 3]])
self.uw = UtilityWeights(self.test_mat)
def test_equal_weights(self):
"""
The standard deviations weights should all be equal. They should have the value of
1/number_of_criteria
:return:
"""
out_weights = np.array(self.uw.weights_equal())
expected_weights = weights.equal_weights(self.test_mat)
# Because of summing a very long floating numbers the calculation error shows up. There is a
# need to use assertAlomstEqual method
self.assertAlmostEqual(sum(out_weights), 1, 1)
self.assertEqual(out_weights[0], 1 / len(out_weights))
self.assertTrue(all(element == out_weights[0] for element in out_weights))
np.testing.assert_array_equal(out_weights, expected_weights)
def test_standard_deviation_weights(self):
"""
The sum of standard deviations weights should be equal one .
:return:
"""
out_weights = np.array(self.uw.weights_std())
self.assertEqual(sum(out_weights), 1)
def test_entrophy_weights(self):
"""
The sum of entropy weights should be equal one .
"""
out_weights = np.array(self.uw.weights_entropy())
self.assertEqual(sum(out_weights), 1)
if __name__ == '__main__':
unittest.main()
| 3.1875 | 3 |
PythonExercicios/ex016.py | cedricgenaro/Python | 0 | 12763714 | <gh_stars>0
# Quebrando um número
'''Minha Solução'''
'''from math import floor
valor = float(input('Digite um valor: '))
numQuebrado = floor(valor)
print('O valor digitado foi {} e a sua porção inteira é {}'.format(valor, numQuebrado))'''
'''Solução Professor '''
'''from math import trunc
num = float(input('Digite um valor: '))
print('O valor digitado foi {} e a sua porção inteira é {}'.format(num, trunc(num)))'''
num = float(input('Digite um valor: '))
print('o valor digitado foi {} e a sua porção inteira é {}'.format(num, int(num)))
| 3.921875 | 4 |
examples/legacy/signs/sign.py | jameszha/ARENA-py | 4 | 12763715 | <reponame>jameszha/ARENA-py
# robot-arm.py
#
import time
import arena
import random
import os
import json
import sys
# export HOST=arenaxr.org
# export REALM=realm
# export MQTTH=arenaxr.org
# export MID=MID_1234
# Optional:
# export LINKS = "Link1,https://www.duckduckgo.com,Link 2,https:www.f1.com,Link 3,https://www.eet.com"
# export LOC = "3,0,-10"
# LINKS env will overwrite this default:
sign_links = ['Link 1','https://www.duckduckgo.com','Link 2','https://www.f1.com','Link 3','https://www.eet.com']
# Loc env will overwrite this default:
sign_location = [3,0,-10]
def draw_ray(click_pos, position):
line = arena.Object(
ttl=1,
objType=arena.Shape.thickline,
thickline=arena.Thickline( # slightly below camera so you can see line vs head-on
{
(click_pos[0],click_pos[1]-0.2,click_pos[2]),
(position[0],position[1],position[2])
},5,"#FF00FF")
)
animateState = False
def target1_handler(event=None):
global target1
if event.event_type == arena.EventType.mouseenter:
target1.update(color=(0,255,0), transparency=arena.Transparency(True, 0.5) )
if event.event_type == arena.EventType.mouseleave:
target1.update(transparency=arena.Transparency(True, 0.0) )
if event.event_type == arena.EventType.mousedown:
draw_ray(event.click_pos, event.position)
target1.update(transparency=arena.Transparency(True, 0.0) )
def target2_handler(event=None):
global target2
if event.event_type == arena.EventType.mouseenter:
target2.update(color=(0,255,0),transparency=arena.Transparency(True, 0.5) )
if event.event_type == arena.EventType.mouseleave:
target2.update( transparency=arena.Transparency(True, 0.0) )
if event.event_type == arena.EventType.mousedown:
draw_ray(event.click_pos, event.position)
target2.update( transparency=arena.Transparency(True, 0.0) )
def target3_handler(event=None):
global target3
if event.event_type == arena.EventType.mouseenter:
target3.update(color=(0,255,0),transparency=arena.Transparency(True, 0.5) )
if event.event_type == arena.EventType.mouseleave:
target3.update( transparency=arena.Transparency(True, 0.0) )
if event.event_type == arena.EventType.mousedown:
draw_ray(event.click_pos, event.position)
target3.update( transparency=arena.Transparency(True, 0.0) )
# start the fun shall we?
if (os.environ.get('SCENE') is not None) and (os.environ.get('REALM') is not None) and (os.environ.get('MQTTH') is not None):
SCENE = os.environ["SCENE"]
HOST = os.environ["MQTTH"]
REALM = os.environ["REALM"]
print("Loading (prgm,scene,real,host): " + sys.argv[0] + "," + SCENE + "," + REALM + "," + HOST)
else:
print( "You need to set SCENE, MQTTH and REALM as environmental variables to specify the program target")
exit(-1)
if os.environ.get('MID') is not None:
MID = os.environ["MID"]
print( "MID:" + MID )
MID = MID + '-'
if os.environ.get('LINKS') is not None:
# Links is base64 encoded
LINKS = os.environ["LINKS"]
# LINKS = unquote(LINKS)
# take the string and parse out CSV parameters
print( "LINKS:" + LINKS)
sign_links= LINKS.split(",")
if os.environ.get('LOC') is not None:
# Links is base64 encoded
LOC = os.environ["LOC"]
# LOC = unquote(LOC)
print( "LOC:" + LOC)
# take the string and parse out CSV parameters
sign_location = LOC.split(",")
arena.init(HOST, REALM, SCENE)
print("starting sign main loop")
signParent = arena.Object(
persist=True,
objName=MID+"signParent",
objType=arena.Shape.cube,
location=(0, 0, 0),
transparency=arena.Transparency(True, 0),
)
sign1 = arena.Object(
objName=MID+"sign1-model",
url="store/users/wiselab/models/blank-sign/scene.gltf",
objType=arena.Shape.gltf_model,
scale=(0.02,0.02,0.02),
location=(0,0,0),
clickable=False,
persist=True,
parent=MID+"signParent"
)
dataStr='{"goto-url": { "on": "mousedown", "url": "' + sign_links[1] + '"} } '
target1 = arena.Object(
objName=MID+"target1",
objType=arena.Shape.cube,
scale=(0.6,0.15,0.01),
location=( -0.292,1.522, 0.027),
rotation=( 0.017,-0.182, 0.003, 0.983 ),
color=(170,200,255),
clickable=True,
callback=target1_handler,
data=dataStr,
transparency=arena.Transparency(True, 0),
persist=True,
parent=MID+"signParent"
)
dataStr='{"text":"' + sign_links[0] + '"}'
text1 = arena.Object(
objName=MID+"text1",
objType=arena.Shape.text,
scale=(0.5,0.5,0.5),
location=( -0.292,1.522, 0.037),
rotation=( 0.017,-0.182, 0.003, 0.983 ),
clickable=False,
data=dataStr,
color=(100,100,255),
persist=True,
parent=MID+"signParent"
)
dataStr='{"goto-url": { "on": "mousedown", "url": "' + sign_links[3] + '"} } '
target2 = arena.Object(
objName=MID+"target2",
objType=arena.Shape.cube,
scale=(0.6,0.15,0.01),
location=( -.12,1.22,-.013),
rotation=( 0.017,0, 0, 1 ),
color=(170,200,255),
clickable=True,
callback=target2_handler,
data=dataStr,
transparency=arena.Transparency(True, 0),
persist=True,
parent=MID+"signParent"
)
dataStr='{"text":"' + sign_links[2] + '"}'
text2 = arena.Object(
objName=MID+"text2",
objType=arena.Shape.text,
scale=(0.5,0.5,0.5),
location=( -.12,1.22,-.003),
rotation=( 0.017,0, 0, 1 ),
clickable=False,
data=dataStr,
color=(100,100,255),
persist=True,
parent=MID+"signParent"
)
dataStr='{"goto-url": { "on": "mousedown", "url": "' + sign_links[5] + '"} } '
target3 = arena.Object(
objName=MID+"target3",
objType=arena.Shape.cube,
scale=(0.6,0.15,0.01),
location=( -.3,0.91,0.023),
rotation=( 0.017,0.225, 0, 1 ),
color=(170,200,255),
clickable=True,
callback=target3_handler,
data=dataStr,
transparency=arena.Transparency(True, 0),
persist=True,
parent=MID+"signParent"
)
dataStr='{"text":"' + sign_links[4] + '"}'
text3 = arena.Object(
objName=MID+"text3",
objType=arena.Shape.text,
scale=(0.5,0.5,0.5),
location=( -.3,0.91,0.033),
rotation=( 0.017,0.225, 0, 1 ),
data=dataStr,
color=(100,100,255),
persist=True,
parent=MID+"signParent"
)
signParent.update(location=(float(sign_location[0]),float(sign_location[1]),float(sign_location[2])))
print( "Go to URL: https://" + HOST + "/" + SCENE)
arena.handle_events()
| 2.484375 | 2 |
jigsaw/models/linear_models/base_model.py | alexvishnevskiy/jigsaw | 0 | 12763716 | <gh_stars>0
from sklearn.feature_extraction.text import TfidfVectorizer
from ...utils.glove import convert_glove_to_features, load_glove
from ...utils.fast_text import convert_fasttext_to_features
from sklearn.base import BaseEstimator
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from pathlib import Path
import hashlib
import numpy as np
import joblib
import os
class BaseModel(BaseEstimator):
def __init__(self, cfg):
self.cfg = cfg
def fit(self, X, y):
X = self._convert_to_features(X)
super().fit(X, y)
def predict(self, X):
if self.cfg.emb_type == 'tfidf':
X = self.vectorizer.transform(X)
else:
X = self._convert_to_features(X)
return super().predict(X)
def _convert_to_features(self, X):
hash_X = (
' '.join(list(X)) + self.cfg.emb_type + \
self.cfg.get('emb_path') if self.cfg.get('emb_path') is not None else ''
).encode()
hash_X = hashlib.sha256(hash_X).hexdigest()
cache_dir = os.path.join(Path(__file__).parents[3], '.cache')
filename = os.path.join(cache_dir, f'{hash_X}.npy')
if os.path.exists(filename):
print('loading features from cache')
features = np.load(filename)
elif self.cfg.cache_features:
print(f'caching features to {filename}')
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
if self.cfg.emb_type == 'tfidf':
features = self._convert_text_to_tfidf(X)
return features
if self.cfg.emb_type == 'glove':
emmbed_dict = load_glove(self.cfg.emb_path)
features = self._convert_glove_to_features(X, emmbed_dict)
if self.cfg.emb_type == 'fasttext':
features = self._convert_fasttext_to_features(X)
#cache features
np.save(filename, features)
return features
def _convert_text_to_tfidf(self, X):
self.vectorizer = TfidfVectorizer(
analyzer='char_wb' if self.cfg.get('tokenizer') is not None else 'word',
ngram_range=self.cfg.ngram_range,
max_df=self.cfg.max_df, min_df=self.cfg.min_df
)
features = self.vectorizer.fit_transform(X)
return features
def _convert_glove_to_features(self, X, emmbed_dict):
return convert_glove_to_features(X, emmbed_dict)
def _convert_fasttext_to_features(self, X):
return convert_fasttext_to_features(X, self.cfg.emb_path, self.cfg.get('tokenizer'))
def save(self, path):
path = Path(path)
if not os.path.exists(path.parent):
os.makedirs(path.parent)
if self.cfg.emb_type == 'tfidf':
#dump tfidf
tfidf_save_path = path.with_name(f"{path.stem}-tfidf.joblib")
joblib.dump(self.vectorizer, tfidf_save_path)
#delete tfidf in order to clean memory
delattr(self, 'vectorizer')
joblib.dump(self, path)
return path.parent
@classmethod
def load(self, path):
path = Path(path)
cls = joblib.load(path)
#add feature: loading from folder
try:
tfidf_path = path.with_name(f"{path.stem}-tfidf.joblib")
cls.vectorizer = joblib.load(tfidf_path)
except FileNotFoundError:
pass
return cls
class LinearModel(BaseModel, Ridge):
def __init__(self, cfg, alpha=1, random_state=None):
BaseModel.__init__(self, cfg)
Ridge.__init__(self, alpha=alpha, random_state=random_state)
class KernelModel(BaseModel, KernelRidge):
def __init__(self, cfg, alpha=1, kernel='linear', gamma=None, degree=3):
BaseModel.__init__(self, cfg)
KernelRidge.__init__(self, alpha=alpha, kernel=kernel, gamma=gamma, degree=degree)
class SVRModel(BaseModel, SVR):
def __init__(self, cfg, kernel='rbf', degree=3, gamma='scale', C=1):
BaseModel.__init__(self, cfg)
SVR.__init__(self, kernel=kernel, degree=degree, gamma=gamma, C=C)
| 2.296875 | 2 |
sauronx/sensors.py | dmyersturnbull/sauronx | 0 | 12763717 | import datetime
import logging
import multiprocessing as mp
import threading
import wave
from enum import Enum
from multiprocessing import Process
from os.path import dirname
from typing import Dict, List
import numpy as np
import pandas as pd
import pyaudio
# from scipy.io import wavfile # TODO broken
from hipsterplot import HipsterPlotter
from sauronx import stamp
from .arduino import Board
from .configuration import config
from .paths import *
class SensorParams:
# this allows us to add new info without refactoring
def __init__(self, duration: int, output_dir: str):
self.duration = duration
self.output_dir = output_dir
class SensorTrigger(Enum):
"""Start times for sensors."""
CAMERA_START = 1
EXPERIMENT_START = 0
SOUND_TEST = 4
LIGHT_TEST = 5
TEMPERATURE_TEST = 6
class Sensor:
# triggers = None # type: list of SensorTrigger
def __init__(self, params: SensorParams) -> None:
self.params = params
# logging.info("Set up {}".format(self.name()))
def file_path(self) -> str:
raise NotImplementedError()
@classmethod
def sensor_name(cls) -> str:
return cls.__name__.lower()
def __str__(self):
return self.name()
def name(self) -> str:
return self.__class__.__name__.lower()
def start(self, **kwargs) -> None:
raise NotImplementedError()
def test(self, **kwargs) -> None:
raise NotImplementedError()
def save(self, **kwargs) -> None:
raise NotImplementedError()
def term(self) -> None:
pass # assume it closes automatically
def _record(self, *args) -> None:
raise NotImplementedError()
def plot(self) -> str:
return ""
def _get_board(self, kwargs) -> Board:
try:
return kwargs["board"]
except KeyError:
raise KeyError(
"The sensor was not passed an initialized PyMata board object and could not start"
)
class Microphone(Sensor):
triggers = [SensorTrigger.EXPERIMENT_START, SensorTrigger.SOUND_TEST]
def __init__(self, params: SensorParams) -> None:
super(Microphone, self).__init__(params)
self.params = params
self.coll = config.get_coll(self.params.output_dir)
self._stream = None
self._p = None
self._timestamps = None
self._frames = None
self.audio_format = pyaudio.paInt32
self.channels = 1
self.sample_rate = int(config.sensors["microphone.sample_rate"])
self.frames_per_buffer = int(config.sensors["microphone.frames_per_buffer"])
self.log_file = None
self.should_kill = [None]
self._thread = threading.Thread(target=self._record)
def file_path(self) -> str:
# TODO
return self.coll.microphone_wav_path()
def timestamp_file_path(self) -> str:
# TODO
return self.coll.microphone_timestamps_path()
def start(self, **kwargs) -> None:
logging.debug("Recording {} to {}".format(self.name(), self.file_path()))
self.log_file = self.file_path()
self._init()
self._thread.start()
def test(self, **kwargs) -> None:
log_file_path = kwargs["log_file_path"]
self.log_file = log_file_path
logging.debug("Recording {} to {}".format(self.name(), log_file_path))
self._init()
self._record()
def _init(self):
make_dirs(dirname(self.log_file))
self.timestamps = []
self.frames = []
try:
self._p = pyaudio.PyAudio()
self._stream = self._p.open(
format=self.audio_format,
channels=self.channels,
rate=self.sample_rate,
input=True,
frames_per_buffer=self.frames_per_buffer,
)
except Exception as e:
logging.exception("Failed to start microphone.")
warn_user("Failed to start microphone.")
raise e
self.should_kill = [False]
def _record(self) -> None:
# TODO exception handling got a bit much here
try:
while not self.should_kill[0]:
data = self._stream.read(self.frames_per_buffer)
self.frames.append(data)
self.timestamps.append(datetime.datetime.now())
except Exception as e:
logging.exception("Microphone failed while capturing")
warn_user("Microphone failed while capturing")
raise e
self.save()
self._kill()
def term(self):
self.should_kill[0] = True
def save(self):
try:
logging.info("Writing microphone data...")
logging.debug("Writing microphone timestamps")
with open(self.timestamp_file_path(), "w") as f:
for ts in self.timestamps:
f.write(stamp(ts) + "\n")
logging.debug("Writing microphone WAV data")
wf = wave.open(self.log_file, "wb")
try:
wf.setnchannels(self.channels)
wf.setsampwidth(self._p.get_sample_size(self.audio_format))
wf.setframerate(self.sample_rate)
wf.writeframes(b"".join(self.frames))
finally:
wf.close()
except Exception as e:
warn_user("Microphone failed while writing the .wav file")
raise e
logging.info("Finished writing microphone data.")
def plot(self):
logging.info("Plotting microphone data (may take a couple minutes)...")
logging.warning("Sometimes plotting the microphone data can crash the interpreter.")
from scipy.io import wavfile
with open(self.file_path(), "rb") as f:
sampling_rate, data = wavfile.read(f)
ms = np.array([i / sampling_rate * 1000 for i in range(0, len(data))])
low_x = self.timestamps[0].strftime("%H:%M:%S")
high_x = self.timestamps[-1].strftime("%H:%M:%S")
s = HipsterPlotter(num_y_chars=10).plot(
data, title=self.name(), low_x_label=low_x, high_x_label=high_x
)
with open(self.file_path() + ".plot.txt", "w", encoding="utf8") as f:
f.write(s)
return s
def _kill(self):
logging.info("Terminating microphone...")
logging.debug("Ending microphone process")
try:
self._p.terminate() # failing here is probably bad
except Exception as b:
logging.warning("Failed to terminate microphone process")
logging.debug(b, exc_info=True)
logging.debug("Ending microphone stream")
try:
self._stream.stop_stream()
except Exception as b:
logging.warning("Failed to stop microphone stream")
logging.debug(b, exc_info=True)
logging.debug("Closing microphone stream")
try:
self._stream.close()
except Exception as b:
logging.warning("Failed to close microphone process")
logging.debug(b, exc_info=True)
self._p = None # ; self._thread = None
logging.debug("Microphone exited")
logging.info("Terminated microphone.")
class CsvSensor(Sensor):
def _record(self, data) -> None:
self.vals.append((data[2], datetime.datetime.now()))
def start(self, **kwargs) -> None:
logging.debug("Recording {} to {}".format(self.name(), self.file_path()))
self.board = self._get_board(kwargs)
make_dirs(dirname(self.file_path()))
self.board.register_sensor(self.pin, self._record)
def test(self, **kwargs) -> None:
self.start(**kwargs)
def term(self):
self.save()
if self.board is not None: # None if it failed before initializing
self.board.reset_sensor(self.pin)
def save(self):
with open(self.file_path(), "w", encoding="utf8") as log_file:
log_file.write("Value,Time\n")
for v, t in self.vals:
log_file.write("%s,%s\n" % (v, t))
logging.info("Saved {} data".format(self.name()))
def plot(self):
fmt = "%Y-%m-%d %H:%M:%S.%f"
df = pd.read_csv(self.file_path())
if len(df) == 0:
return "{}: <no data>".format(self.sensor_name())
low_x = datetime.datetime.strptime(df["Time"].iloc[0], fmt).strftime("%H:%M:%S")
high_x = datetime.datetime.strptime(df["Time"].iloc[-1], fmt).strftime("%H:%M:%S")
s = HipsterPlotter(num_y_chars=10).plot(
df["Value"], title=self.name(), low_x_label=low_x, high_x_label=high_x
)
with open(self.file_path() + ".plot.txt", "w", encoding="utf8") as f:
f.write(s)
return s
class Thermometer(CsvSensor):
triggers = [SensorTrigger.EXPERIMENT_START, SensorTrigger.TEMPERATURE_TEST]
def __init__(self, params: SensorParams) -> None:
super(Thermometer, self).__init__(params)
self.board = None
self.params = params
self.pin = int(config.sensors["analog_pins.thermometer"])
self.vals = []
def file_path(self) -> str:
return pjoin(self.params.output_dir, "sensors", "thermometer_log.csv")
class Photometer(CsvSensor):
triggers = [SensorTrigger.EXPERIMENT_START, SensorTrigger.LIGHT_TEST]
def __init__(self, params: SensorParams) -> None:
super(Photometer, self).__init__(params)
self.params = params
self.pin = int(config.sensors["analog_pins.photometer"])
self.vals = []
def file_path(self) -> str:
return pjoin(self.params.output_dir, "sensors", "photometer_log.csv")
class SensorRegistry:
permitted = [Microphone, Thermometer, Photometer]
enabled = None # type: List[type]
params = None # type: SensorParams
_ready = None # type: Dict[SensorTrigger, List[Sensor]]
_name_to_sensor = {s.sensor_name(): s for s in permitted}
def __init__(self, params: SensorParams) -> None:
self.params = params
self.enabled = []
for sensor in self.permitted:
if sensor.sensor_name() in config.sensors["registry"]:
self.enabled.append(sensor)
for sensor in config.sensors["registry"]:
assert sensor in [
z.sensor_name() for z in self.permitted
], "Sensor {} not found".format(sensor)
regs = (
": " + ", ".join([str(z.sensor_name()) for z in self.enabled])
if len(self.enabled) > 0
else ""
)
logging.info("Detected {} registered sensors{}".format(len(self.enabled), regs))
self._ready = {trigger: [] for trigger in SensorTrigger}
def fetch(self):
x = []
for k, v in self._ready.items():
x.extend(v)
return x
def __len__(self) -> int:
return len(self.enabled)
def __contains__(self, item):
if isinstance(item, str):
return self._name_to_sensor[item] in self.enabled
elif isinstance(item, type):
# a bit weird, but I feel safer about matching on the name
if not hasattr(item, "sensor_name"):
raise TypeError(
"Must look up sensor by name or Sensor class; got invalid type {}.".format(
item.__name__
)
)
return self._name_to_sensor[item.sensor_name()] in self.enabled
else:
raise TypeError("Must look up sensor by name or Sensor class; got {}.".format(item))
def ready(self, trigger: SensorTrigger) -> None:
logging.debug("Readying sensors to start with trigger {}".format(trigger))
for sensor in self.enabled:
if trigger in sensor.triggers:
self._ready[trigger].append(sensor(self.params))
def start(self, trigger: SensorTrigger, board: object = None) -> None:
logging.debug("Firing sensors for trigger {}".format(trigger))
for sensor in self._ready[trigger]:
sensor.start(board=board)
def test(self, trigger: SensorTrigger, board: object = None, file_path: str = None) -> None:
logging.debug("Firing sensors for trigger {}".format(trigger))
for sensor in self._ready[trigger]:
sensor.test(board=board, log_file_path=file_path)
def save(self, trigger: SensorTrigger) -> None:
logging.debug("Saving sensors for trigger {}".format(trigger))
for sensor in self._ready[trigger]:
sensor.save()
def terminate(self, trigger: SensorTrigger) -> None:
logging.debug("Deactivating sensors for trigger {}".format(trigger))
for sensor in self._ready[trigger]:
sensor.term()
| 2.296875 | 2 |
fractal/core/command_bus/command_handler.py | douwevandermeij/fractal | 2 | 12763718 | from abc import ABC, abstractmethod
from typing import List, Type
from fractal.core.command_bus.command import Command
class CommandHandler(ABC):
@abstractmethod
def commands(self) -> List[Type[Command]]:
"""Returns list of commands that this handler can handle."""
@abstractmethod
def handle(self, command: Command):
"""Handle command, might return a value."""
| 3.15625 | 3 |
setup.py | namin/docker-echo-kernel | 32 | 12763719 | from distutils.core import setup
with open('README.rst') as f:
readme = f.read()
setup(
name='echo_kernel',
version='1.1',
packages=['echo_kernel'],
description='Simple example kernel for Jupyter',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jupyter/echo_kernel',
install_requires=[
'jupyter_client', 'IPython', 'ipykernel'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
],
)
| 1.273438 | 1 |
Aula12/app.py | refatorando/curso_python_iniciantes | 3 | 12763720 | <gh_stars>1-10
#open("caminho","r")
# Mode
# r - Leitura
# a - Append / incrementar
# w - Escrita
# x - Criar Arquivo
# r+ - Leitura + Escrita
arquivo = open("Aula12/test3.txt","x")
print(arquivo.readable())
print(arquivo.read())
print(arquivo.readline())
print(arquivo.readline())
print(arquivo.readline())
print(arquivo.readline())
lista = arquivo.readlines()
print(lista)
print(lista[3])
arquivo.write("Python\n")
arquivo.write("C++\n")
arquivo.write("Terraform\n")
arquivo.close()
import os
if os.path.exists("Aula12/test2.txt"):
os.remove("Aula12/test2.txt")
else :
print("Arquivo não existe")
os.rmdir("Aula12/nova_pasta") | 2.8125 | 3 |
regex_search/regex_search.py | zspatter/automate-the-boring-stuff | 15 | 12763721 | #! /usr/bin/env python3
# regex_search.py - simple text search utilizing regex engine
import re
from pathlib import Path
def regex_search(regex, path):
"""
Searches text file at path for matches to the specified pattern
:param re.Pattern regex: pattern to search for
:param Path path: path to source file to search
:return:
"""
path = path.resolve()
if not path.is_dir():
print('The file path must be to a directory.')
return
regex = re.compile(regex)
matches = []
generator = (file for file in path.iterdir() if file.suffix == '.txt')
for file in generator:
text = file.read_text().split('\n')
match_gen = (line for line in text if regex.search(line))
for line in match_gen:
matches.append(line)
return matches
if __name__ == '__main__':
email_regex = re.compile(r'''(
[a-zA-Z0-9._%+-]+ # username
@ # @ symbol
[a-zA-Z0-9.-]+ # domain name
(\.[a-zA-Z]{2,4}) # dot-something
)''', re.VERBOSE)
print('\n'.join(regex_search(regex=email_regex, path=Path('./sample_data'))))
| 4.1875 | 4 |
tests/test_permissions.py | amehta1/t1-python | 24 | 12763722 | <filename>tests/test_permissions.py
from __future__ import print_function
from __future__ import absolute_import
import unittest
import responses
import requests
from .requests_patch import patched_extract_cookies_to_jar
from terminalone import T1
mock_credentials = {
'username': 'user;',
'password': 'password',
'api_key': 'api_key',
}
API_BASE = 'api.mediamath.com'
requests.sessions.extract_cookies_to_jar = patched_extract_cookies_to_jar
requests.adapters.extract_cookies_to_jar = patched_extract_cookies_to_jar
class TestPermissions(unittest.TestCase):
def setup(self):
"""set up test fixtures"""
with open('tests/fixtures/xml/session.xml') as f:
fixture = f.read()
responses.add(responses.POST, 'https://api.mediamath.com/api/v2.0/login',
body=fixture,
adding_headers={
'Set-Cookie': 'adama_session=1',
},
content_type='application/xml')
self.t1 = T1(auth_method='cookie',
api_base=API_BASE,
**mock_credentials)
@responses.activate
def test_get_permissions(self):
self.setup()
with open('tests/fixtures/xml/permissions.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/users/10000/permissions',
body=fixture,
content_type='application/xml')
p = self.t1.get('users', 10000, child='permissions')
assert p._type == 'permission', 'Expected permission entity, got: {}'.format(p._type)
assert p.parent_id == 10000, 'Expected parent id to be 1000, got: {}'.format(p.parent_id)
@responses.activate
def test_remove_advertiser(self):
self.setup()
with open('tests/fixtures/xml/permissions.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/users/10000/permissions',
body=fixture,
content_type='application/xml')
p = self.t1.get('users', 10000, child='permissions')
remove_id = 6
assert remove_id in p.advertiser.keys(), 'Expected advertiser {} to be in access flags'.format(remove_id)
p.remove('advertiser', 6)
assert remove_id not in p.advertiser.keys(), 'advertiser {} should have been removed but is still there'\
.format(remove_id)
@responses.activate
def test_it_should_remove_child_advertisers_when_removing_agency(self):
self.setup()
with open('tests/fixtures/xml/permissions.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/users/10000/permissions',
body=fixture,
content_type='application/xml')
p = self.t1.get('users', 10000, child='permissions')
remove_ids = [6, 7]
for ad_id in remove_ids:
assert ad_id in p.advertiser.keys(), 'Expected advertiser {} to be in access flags'.format(ad_id)
p.remove('agency', 3)
for ad_id in remove_ids:
assert ad_id not in p.advertiser.keys(), 'child advertiser {} should have been removed but is still there'\
.format(ad_id)
@responses.activate
def test_it_should_remove_child_agencies_and_advertisers_when_removing_organization(self):
self.setup()
with open('tests/fixtures/xml/permissions.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/users/10000/permissions',
body=fixture,
content_type='application/xml')
p = self.t1.get('users', 10000, child='permissions')
remove_advertiser_ids = [8, 9, 10]
remove_agency_ids = [4, 5]
for advertiser_id in remove_advertiser_ids:
assert advertiser_id in p.advertiser.keys(), 'Expected advertiser {} to be in access flags'.format(advertiser_id)
for agency_id in remove_agency_ids:
assert agency_id in p.agency.keys(), 'Expected agency {} to be in access flags'.format(agency_id)
p.remove('organization', 2)
for advertiser_id in remove_advertiser_ids:
assert advertiser_id not in p.advertiser.keys(), 'child advertiser {} should have been removed but is still there'\
.format(advertiser_id)
for agency_id in remove_agency_ids:
assert agency_id not in p.agency.keys(), 'child agency {} should have been removed but is still there'\
.format(agency_id)
@responses.activate
def test_it_should_add_entity_ids_on_save(self):
self.setup()
with open('tests/fixtures/xml/permissions.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/users/10000/permissions',
body=fixture,
content_type='application/xml')
p = self.t1.get('users', 10000, child='permissions')
p.add('organization', 10)
data = p._generate_save_data()
assert sorted(data['organization_id']) == [1, 2, 10], data['organization_id']
@responses.activate
def test_it_should_add_access_to_empty_permissions(self):
self.setup()
with open('tests/fixtures/xml/permissions_none.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/users/10000/permissions',
body=fixture,
content_type='application/xml')
p = self.t1.get('users', 10000, child='permissions')
p.add('organization', 10)
data = p._generate_save_data()
assert sorted(data['organization_id']) == [10], data['organization_id']
| 2.15625 | 2 |
output/models/ms_data/complex_type/ct_d033_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 12763723 | from output.models.ms_data.complex_type.ct_d033_xsd.ct_d033 import (
FooType,
MyType,
Root,
)
__all__ = [
"FooType",
"MyType",
"Root",
]
| 1.203125 | 1 |
app/PartitionMethods.py | karrnb/Region-Graph-Exploration | 0 | 12763724 | <gh_stars>0
import graph_tool.all as gt
# import networkx as nx
import numpy as np
from subprocess import Popen, PIPE
from HierarchicalPartitioningTree import PartitionTree, PartitionNode
# from networkx.algorithms.flow import edmonds_karp, shortest_augmenting_path
def connected_components(G, vertex_indices=None, edge_indices=None):
"""Partition by connected components.
Partition Type: Vertex
Description: Given graph G and sets of both vertex and edge indices,
induce subgraph and partition vertices by connected components.
Args:
G (graph_tool.Graph): The graph instance.
vertex_indices (list): List of vertex indices to induce upon.
edge_indices (list): List of edge indices to induce upon.
Returns:
A list of information dicts about the newly-created children nodes
after partitioning/decomposition.
"""
if not isinstance(G, gt.Graph):
err_msg = 'G must be a graph_tool.Graph instance'
raise ValueError(err_msg)
if vertex_indices is None and edge_indices is None:
err_msg = 'Must provide either vertex indices or edge indices'
raise ValueError(err_msg)
vp = G.new_vp('bool', vals=False)
ep = G.new_ep('bool', vals=False)
try:
vp.a[vertex_indices] = True
ep.a[edge_indices] = True
except:
err_msg = 'vertex or edge indices not in G'
raise IndexError(err_msg)
G.set_vertex_filter(vp)
G.set_edge_filter(ep)
# label connected components
comp, _ = gt.label_components(G)
# avoids having to induce subgraph each time
# downfall: edge iterator...
vlists = {}
elists = {}
for idx, e in enumerate(G.edges()):
src = e.source()
tar = e.target()
assert comp[src] == comp[tar]
CC = comp[src]
if CC not in vlists:
vlists[CC] = set()
if CC not in elists:
elists[CC] = set()
vlists[CC].add(G.vertex_index[src])
vlists[CC].add(G.vertex_index[tar])
elists[CC].add(G.edge_index[e])
if idx % 500e3 == 0 and idx > 0:
print idx
non_isolated_vertices = set()
children = []
keys = sorted(vlists.keys())
for idx, CC in enumerate(keys):
non_isolated_vertices.update(vlists[CC])
v_idx = list(vlists[CC])
e_idx = list(elists[CC])
node = PartitionNode(vertex_indices=v_idx,
edge_indices=e_idx,
partition_type='vertex',
label='CC_{}_{}'.format(CC, idx),
note='Connected Components')
children.append(node)
# TODO: Decide whether or not to group all isolated vertices together
if vertex_indices is not None:
for idx, v in enumerate(set(vertex_indices) - non_isolated_vertices):
node = PartitionNode(vertex_indices=[v],
edge_indices=[],
partition_type='vertex',
label='CC_{}_{}'.format(comp[v], idx),
note='Connected Components')
children.append(node)
G.clear_filters()
return children
def biconnected_components(G, vertex_indices=None, edge_indices=None):
"""Partition by biconnected components.
Partition Type: Edge
Description: Given graph G and sets of both vertex and edge indices,
induce subgraph and partition vertices by biconnected components.
Args:
G (graph_tool.Graph): The graph instance.
vertex_indices (list): List of vertex indices to induce upon.
edge_indices (list): List of edge indices to induce upon.
Returns:
A list of information dicts about the newly-created children nodes
after partitioning/decomposition.
"""
if not isinstance(G, gt.Graph):
err_msg = 'G must be a graph_tool.Graph instance'
raise ValueError(err_msg)
if vertex_indices is None and edge_indices is None:
err_msg = 'Must provide either vertex indices or edge indices'
raise ValueError(err_msg)
vp = G.new_vp('bool', vals=False)
ep = G.new_ep('bool', vals=False)
try:
vp.a[vertex_indices] = True
ep.a[edge_indices] = True
except:
err_msg = 'vertex or edge indices not in G'
raise IndexError(err_msg)
G.set_vertex_filter(vp)
G.set_edge_filter(ep)
# label connected components
bicomp, art, _ = gt.label_biconnected_components(G)
# avoids having to induce subgraph each time
# downfall: edge iterator...
vlists = {}
elists = {}
for idx, e in enumerate(G.edges()):
src = e.source()
tar = e.target()
BCC = bicomp[e]
if BCC not in vlists:
vlists[BCC] = set()
if BCC not in elists:
elists[BCC] = set()
vlists[BCC].add(G.vertex_index[src])
vlists[BCC].add(G.vertex_index[tar])
elists[BCC].add(G.edge_index[e])
if idx % 500e3 == 0 and idx > 0:
print idx
children = []
print('No. of BCC\'s: {}'.format(len(elists)))
keys = sorted(elists.keys())
for idx, BCC in enumerate(keys):
v_idx = list(vlists[BCC])
e_idx = list(elists[BCC])
node = PartitionNode(vertex_indices=v_idx,
edge_indices=e_idx,
partition_type='edge',
label='BCC_{}_{}'.format(BCC, idx),
note='Biconnected Components')
children.append(node)
# label articulation points
if 'is_articulation' not in G.vp:
G.vp['is_articulation'] = G.new_vp('bool', vals=False)
ap_indices = np.where(art.a == 1)[0]
G.vp['is_articulation'].a[ap_indices] = True
G.clear_filters()
return children
def edge_peel(G, vertex_indices=None, edge_indices=None):
"""Partition by edge peeling.
Partition Type: Edge
Description: Given graph G and sets of both vertex and edge indices,
induce subgraph and partition edges by means of iterative peeling.
Args:
G (graph_tool.Graph): The graph instance.
vertex_indices (list): List of vertex indices to induce upon.
edge_indices (list): List of edge indices to induce upon.
Returns:
A list of information dicts about the newly-created children nodes
after partitioning/decomposition.
"""
if not isinstance(G, gt.Graph):
err_msg = 'G must be a graph_tool.Graph instance'
raise ValueError(err_msg)
if vertex_indices is None and edge_indices is None:
err_msg = 'Must provide either vertex indices or edge indices'
raise ValueError(err_msg)
cmd = './app/bin/graph_peeling.bin -t core -o core'
vp = G.new_vp('bool', vals=False)
ep = G.new_ep('bool', vals=False)
try:
vp.a[vertex_indices] = True
ep.a[edge_indices] = True
except:
err_msg = 'vertex or edge indices not in G'
raise IndexError(err_msg)
G.set_vertex_filter(vp)
G.set_edge_filter(ep)
efilt = ep
children = []
idx = 0
while G.num_edges() > 0:
p = Popen([cmd], shell=True, stdout=PIPE, stdin=PIPE)
for e in G.edges():
p.stdin.write('{} {}\n'.format(e.source(), e.target()))
p.stdin.flush()
p.stdin.close()
# get line from stdout that contains top peel layer
top_layer_line = ''
top_peel = -1
while True:
line = p.stdout.readline()
if line == '':
break
if not line.startswith('Core'):
continue
peel = int(line.split(' = ')[0].split('_')[-1])
if not top_layer_line:
top_layer_line = line
top_peel = peel
continue
if peel > top_peel:
top_layer_line = line
top_peel = peel
# line processing
label, vertices = top_layer_line.strip().split(' = ')
peel = int(label.split('_')[-1])
assert peel == top_peel
v_idx = [int(v) for v in vertices.split()]
# keep only relevant vertices/edges and label edge peels
vfilt = G.new_vp('bool', vals=False)
vfilt.a[v_idx] = True
G.set_vertex_filter(vfilt)
print('peel: {}, |V|: {}, |E|: {}'.format(peel,
G.num_vertices(),
G.num_edges()))
e_idx = np.where(G.new_ep('bool', vals=True).a == 1)[0]
efilt.a[e_idx] = False
node = PartitionNode(vertex_indices=v_idx,
edge_indices=e_idx,
partition_type='edge',
label='EPL_{}_{}'.format(peel, idx),
note='peel {}'.format(peel))
children.append(node)
idx += 1
G.set_vertex_filter(vp)
G.set_edge_filter(efilt)
G.clear_filters()
return children
def peel_one(G):
"""Separate into vertices of peel one (and isolated vertices) and vertices
of peel greater than one.
Partition Type: Node
Description: Given graph G and sets of both vertex and edge indices,
induce subgraph and group nodes as either peel less than or equal to 1,
or greater than 1.
NOTE: Input graph G will have its filters cleared, if any
NOTE: Usage recommended only at beginning of tree exploration
NOTE: peel one is not a proper edge partition
Args:
G (graph_tool.Graph): The graph instance.
vertex_indices (list): List of vertex indices to induce upon.
edge_indices (list): List of edge indices to induce upon.
Returns:
A list of information dicts about the newly-created children nodes
after partitioning/decomposition.
"""
if not isinstance(G, gt.Graph):
err_msg = 'G must be a graph_tool.Graph instance'
raise ValueError(err_msg)
G.clear_filters()
vertex_indices = range(G.num_vertices())
edge_indices = range(G.num_edges())
kcore = gt.kcore_decomposition(G)
# peel one vertex and edge indices
peel_one_vertex_idx = np.where(kcore.a <= 1)[0]
vfilt = G.new_vp('bool', vals=False)
vfilt.a[peel_one_vertex_idx] = True
G.set_vertex_filter(vfilt)
efilt = G.new_ep('bool', vals=True)
peel_one_edge_idx = np.where(efilt.a == 1)[0]
G.clear_filters()
children = []
# Cases where all nodes are either at most peel 1 or at least peel 1
if len(peel_one_vertex_idx) == len(vertex_indices):
node = PartitionNode(vertex_indices=peel_one_vertex_idx,
edge_indices=peel_one_edge_idx,
label='VP_LTE1_{}'.format(0),
note='peel values less than or equal to (LTE) 1')
children.append(node)
return children
elif len(peel_one_vertex_idx) == 0:
node = PartitionNode(vertex_indices=vertex_indices,
edge_indices=edge_indices,
label='VP_GT1_{}'.format(0),
note='peel values greater than (GT) 1')
children.append(node)
return children
# Case where there are mixed peel values
higher_peel_vertex_idx = np.where(kcore.a > 1)[0]
vfilt = G.new_vp('bool', vals=False)
vfilt.a[higher_peel_vertex_idx] = True
G.set_vertex_filter(vfilt)
efilt = G.new_ep('bool', vals=True)
higher_peel_edge_idx = np.where(efilt.a == 1)[0]
G.clear_filters()
node = PartitionNode(vertex_indices=peel_one_vertex_idx,
edge_indices=peel_one_edge_idx,
label='VP_LTE1_{}'.format(0),
note='peel values less than or equal to (LTE) 1')
children.append(node)
node = PartitionNode(vertex_indices=higher_peel_vertex_idx,
edge_indices=higher_peel_edge_idx,
label='VP_GT1_{}'.format(1),
note='peel values greater than (GT) 1')
children.append(node)
return children
def landmark_cluster_partition(G, vlist, elist, cluster_assignment):
"""Partition by landmark clustering.
Partition Type: Node
Description: Given graph G and sets of both vertex and edge indices,
induce subgraph and perform landmark clustering.
Args:
G (graph_tool.Graph): The graph instance.
vertex_indices (list): List of vertex indices to induce upon.
edge_indices (list): List of edge indices to induce upon.
cluster_assignment (dict): Mapping of vertices to their cluster
assignments.
Returns:
A list of information dicts about the newly-created children nodes
after partitioning/decomposition.
"""
if not isinstance(G, gt.Graph):
err_msg = 'G must be a graph_tool.Graph instance'
raise ValueError(err_msg)
G.clear_filters()
efilt = G.new_ep('bool', vals=False)
efilt.a[elist] = True
G.set_edge_filter(efilt)
cluster_assignment = {int(k): v for k, v in cluster_assignment.iteritems()}
# create reverse map
# k: cluster | v: list of vertex_ids
clusters = {}
for k, v in cluster_assignment.iteritems():
if v not in clusters:
clusters[v] = []
clusters[v].append(k)
# parses vertex ids as ints
clusters = {k: [int(i) for i in v] for k, v in clusters.iteritems()}
# cross edges and counts of metagraph
# k: tuple (v1, v2) | v: list of edge indices
cross_edges = {}
for e in G.edges():
src = cluster_assignment[G.vertex_index[e.source()]]
tar = cluster_assignment[G.vertex_index[e.target()]]
if src == tar:
continue
if (src, tar) in cross_edges:
cross_edges[(src, tar)].append(G.edge_index[e])
elif (tar, src) in cross_edges:
cross_edges[(tar, src)].append(G.edge_index[e])
else:
cross_edges[(src, tar)] = [G.edge_index[e]]
cluster_keys = sorted(clusters.keys())
children = []
for k in cluster_keys:
v_idx = clusters[k]
vfilt = G.new_vp('bool', vals=False)
vfilt.a[v_idx] = True
G.set_vertex_filter(vfilt)
e_idx = np.where(G.new_ep('bool', vals=True).a == 1)[0]
node = PartitionNode(vertex_indices=v_idx,
edge_indices=e_idx,
label='LMK_{}_{}'.format(k, len(children)),
note='landmark cluster {}'.format(k))
children.append(node)
return children, cross_edges
def k_connected_components(G, vertex_indices=None, edge_indices=None):
"""Partition by k-connected components
TODO: Implement
Partition Type: TBD (kind of vertex and edge, but not really a partition)
Description: Given graph G and sets of both vertex and edge indices,
induce subgraph and maximally break (NOT PARTITION) into k-connected
components.
Args:
G (graph_tool.Graph): The graph instance.
vertex_indices (list): List of vertex indices to induce upon.
edge_indices (list): List of edge indices to induce upon.
Returns:
A list of information dicts about the newly-created children nodes
after partitioning/decomposition.
"""
"""
if not isinstance(G, gt.Graph):
err_msg = 'G must be a graph_tool.Graph instance'
raise ValueError(err_msg)
if vertex_indices is None and edge_indices is None:
err_msg = 'Must provide either vertex indices or edge indices'
raise ValueError(err_msg)
vp = G.new_vp('bool', vals=False)
ep = G.new_ep('bool', vals=False)
try:
vp.a[vertex_indices] = True
ep.a[edge_indices] = True
except:
err_msg = 'vertex or edge indices not in G'
raise IndexError(err_msg)
G.set_vertex_filter(vp)
G.set_edge_filter(ep)
H = nx.Graph()
for e in G.edges():
src_idx = G.vertex_index[e.source()]
tar_idx = G.vertex_index[e.target()]
H.add_edge(src_idx, tar_idx)
if G.num_edges() > G.num_vertices() * np.log2(G.num_vertices()):
flow_func = shortest_augmenting_path
else:
flow_func = edmonds_karp
k_components = nx.k_components(H, flow_func=flow_func)
for k, vertex_sets in k_components.iteritems():
print('k = {} | No. of Components: {}'.format(k, len(vertex_sets)))
for vs in vertex_sets:
print(' {}'.format(vs))
print('')
# make meta graph
largest_k = max(k_components.keys())
N = len(k_components[largest_k])
for s in k_components[largest_k]:
pass
"""
return
| 2.359375 | 2 |
examples/ocean_examples/sim_3/config_sim3.py | idax4325/veropt | 2 | 12763725 | <reponame>idax4325/veropt<filename>examples/ocean_examples/sim_3/config_sim3.py<gh_stars>1-10
from veropt import BayesOptimiser
from veropt.obj_funcs.ocean_sims import OceanObjFunction
from veropt.slurm_support import slurm_set_up
class OceanObjSimThree(OceanObjFunction):
def __init__(self, target_min_vsf_depth_20N, measure_year=100, file_path=None):
bounds_lower = [500, 500, 2e-6]
bounds_upper = [1500, 1500, 2e-4]
bounds = [bounds_lower, bounds_upper]
n_params = 3
n_objs = 1
var_names = ["kappa_iso", "kappa_gm", "kappa_min"]
obj_names = ["min_vsf_depth_20N"]
self.measure_year = measure_year
self.target_min_vsf_depth_20N = target_min_vsf_depth_20N
self.file_path = file_path
param_dic = {
"measure_year": measure_year,
"target_min_vsf_depth_20N": target_min_vsf_depth_20N}
filetype = "overturning"
calc_y_method = (self.calc_y, filetype, param_dic)
super().__init__(bounds=bounds, n_params=n_params, n_objs=n_objs, calc_y_method=calc_y_method,
var_names=var_names, obj_names=obj_names, file_path=file_path)
@staticmethod
def calc_y(overturning, param_dic):
min_vsf_depth_20N = float(overturning["vsf_depth"][param_dic["measure_year"] - 1].min("zw")[25])
y = - (min_vsf_depth_20N - param_dic["target_min_vsf_depth_20N"])**2
return y, min_vsf_depth_20N
@staticmethod
def calc_y_log(overturning, param_dic):
import numpy as np
min_vsf_depth_20N = float(overturning["vsf_depth"][param_dic["measure_year"] - 1].min("zw")[25])
y = - np.log((min_vsf_depth_20N - param_dic["target_min_vsf_depth_20N"]) ** 2)
return y, min_vsf_depth_20N
if __name__ == '__main__':
n_init_points = 16
n_bayes_points = 48
n_evals_per_step = 8
measure_year = 100
target_min_vsf_depth_20N = -15 * 10**6
obj_func = OceanObjSimThree(target_min_vsf_depth_20N, measure_year=measure_year)
optimiser = BayesOptimiser(n_init_points, n_bayes_points, obj_func, n_evals_per_step=n_evals_per_step)
optimiser.save_optimiser()
# slurm_set_up.set_up(
# optimiser.file_name, ["modi_long", "modi_short"], "global_four_degree.py", make_new_slurm_controller=True,
# using_singularity=True, image_path="~/modi_images/hpc-ocean-notebook_latest.sif", conda_env="python3")
#
#
# slurm_set_up.start_opt_run("modi001")
| 1.914063 | 2 |
qiskit/aqua/utils/summarize_circuits.py | pistoia/qiskit-aqua | 1 | 12763726 | # -*- coding: utf-8 -*-
# Copyright 2018 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
from qiskit.converters import circuit_to_dag
def summarize_circuits(circuits):
"""Summarize circuits based on QuantumCircuit, and four metrics are summarized.
Number of qubits and classical bits, and number of operations and depth of circuits.
The average statistic is provided if multiple circuits are inputed.
Args:
circuits (QuantumCircuit or [QuantumCircuit]): the to-be-summarized circuits
"""
if not isinstance(circuits, list):
circuits = [circuits]
ret = ""
ret += "Submitting {} circuits.\n".format(len(circuits))
ret += "============================================================================\n"
stats = np.zeros(4)
for i, circuit in enumerate(circuits):
dag = circuit_to_dag(circuit)
depth = dag.depth()
width = dag.width()
size = dag.size()
classical_bits = dag.num_cbits()
op_counts = dag.count_ops()
stats[0] += width
stats[1] += classical_bits
stats[2] += size
stats[3] += depth
ret = ''.join([ret, "{}-th circuit: {} qubits, {} classical bits and {} operations with depth {}\n op_counts: {}\n".format(
i, width, classical_bits, size, depth, op_counts)])
if len(circuits) > 1:
stats /= len(circuits)
ret = ''.join([ret, "Average: {:.2f} qubits, {:.2f} classical bits and {:.2f} operations with depth {:.2f}\n".format(
stats[0], stats[1], stats[2], stats[3])])
ret += "============================================================================\n"
return ret
| 1.976563 | 2 |
nao_player_label.py | robogeekcanada/pybullet-tutorials | 0 | 12763727 | <reponame>robogeekcanada/pybullet-tutorials
import time
import pybullet as p
import pybullet_data
from qibullet import NaoVirtual
client = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.loadURDF("plane.urdf")
p.setRealTimeSimulation(1)
p.setGravity(0, 0, -10)
robot = NaoVirtual()
robot.loadRobot([0,0,0], [0,0,0,1])
time.sleep(2)
#Get body unique id, draw a line aabb for each link
cid = p.getBodyUniqueId(robot.robot_model)
#print(f'\nbody unique id is {cid}')
#Head is link 2, draw line to show aabb and display P1
head_aabb = p.getAABB(cid, 2)
p.addUserDebugLine(head_aabb[0], head_aabb[1], [0,1,0], 3, 0)
p.addUserDebugText('P1', head_aabb[1], [0,0,1], 3.5)
| 2.53125 | 3 |
torch_ecg/train/train_unet_cpsc2019/metrics.py | busyyang/torch_ecg | 0 | 12763728 | """
References:
-----------
[1] http://2019.icbeb.org/Challenge.html
"""
import math
from typing import Union, Optional, Sequence
from numbers import Real
import numpy as np
__all__ = [
"compute_metrics",
]
def compute_metrics(rpeaks_truths:Sequence[Union[np.ndarray,Sequence[int]]], rpeaks_preds:Sequence[Union[np.ndarray,Sequence[int]]], fs:Real, thr:float=0.075, verbose:int=0) -> float:
""" finished, checked,
Parameters:
-----------
rpeaks_truths: sequence,
sequence of ground truths of rpeaks locations from multiple records
rpeaks_preds: sequence,
predictions of ground truths of rpeaks locations for multiple records
fs: real number,
sampling frequency of ECG signal
thr: float, default 0.075,
threshold for a prediction to be truth positive,
with units in seconds,
verbose: int, default 0,
print verbosity
Returns:
--------
rec_acc: float,
accuracy of predictions
"""
assert len(rpeaks_truths) == len(rpeaks_preds), \
f"number of records does not match, truth indicates {len(rpeaks_truths)}, while pred indicates {len(rpeaks_preds)}"
n_records = len(rpeaks_truths)
record_flags = np.ones((len(rpeaks_truths),), dtype=float)
thr_ = thr * fs
if verbose >= 1:
print(f"number of records = {n_records}")
print(f"threshold in number of sample points = {thr_}")
for idx, (truth_arr, pred_arr) in enumerate(zip(rpeaks_truths, rpeaks_preds)):
false_negative = 0
false_positive = 0
true_positive = 0
extended_truth_arr = np.concatenate((truth_arr.astype(int), [int(9.5*fs)]))
for j, t_ind in enumerate(extended_truth_arr[:-1]):
next_t_ind = extended_truth_arr[j+1]
loc = np.where(np.abs(pred_arr - t_ind) <= thr_)[0]
if j == 0:
err = np.where((pred_arr >= 0.5*fs + thr_) & (pred_arr <= t_ind - thr_))[0]
else:
err = np.array([], dtype=int)
err = np.append(
err,
np.where((pred_arr >= t_ind+thr_) & (pred_arr <= next_t_ind-thr_))[0]
)
false_positive += len(err)
if len(loc) >= 1:
true_positive += 1
false_positive += len(loc) - 1
elif len(loc) == 0:
false_negative += 1
if false_negative + false_positive > 1:
record_flags[idx] = 0
elif false_negative == 1 and false_positive == 0:
record_flags[idx] = 0.3
elif false_negative == 0 and false_positive == 1:
record_flags[idx] = 0.7
if verbose >= 2:
print(f"for the {idx}-th record,\ntrue positive = {true_positive}\nfalse positive = {false_positive}\nfalse negative = {false_negative}")
rec_acc = round(np.sum(record_flags) / n_records, 4)
print(f'QRS_acc: {rec_acc}')
print('Scoring complete.')
return rec_acc
def score(r_ref, hr_ref, r_ans, hr_ans, fs_, thr_):
"""
the official scoring function
"""
HR_score = 0
record_flags = np.ones(len(r_ref))
for i in range(len(r_ref)):
FN = 0
FP = 0
TP = 0
if math.isnan(hr_ans[i]):
hr_ans[i] = 0
hr_der = abs(int(hr_ans[i]) - int(hr_ref[i]))
if hr_der <= 0.02 * hr_ref[i]:
HR_score = HR_score + 1
elif hr_der <= 0.05 * hr_ref[i]:
HR_score = HR_score + 0.75
elif hr_der <= 0.1 * hr_ref[i]:
HR_score = HR_score + 0.5
elif hr_der <= 0.2 * hr_ref[i]:
HR_score = HR_score + 0.25
r_ref[i] = r_ref[i].astype(int) # added by wenh06
for j in range(len(r_ref[i])):
loc = np.where(np.abs(r_ans[i] - r_ref[i][j]) <= thr_*fs_)[0]
if j == 0:
err = np.where((r_ans[i] >= 0.5*fs_ + thr_*fs_) & (r_ans[i] <= r_ref[i][j] - thr_*fs_))[0]
# comments by wenh06:
# elif j == len(r_ref[i])-1:
# the above would falsely omit the interval between the 0-th and the 1-st ref rpeaks
# for example for
# r_ref = [np.array([500, 1000])]
# r_ans = [np.array([500, 700, 1000])]
# a false positive is missed
if j == len(r_ref[i])-1:
err = np.where((r_ans[i] >= r_ref[i][j]+thr_*fs_) & (r_ans[i] <= 9.5*fs_ - thr_*fs_))[0]
else:
err = np.where((r_ans[i] >= r_ref[i][j]+thr_*fs_) & (r_ans[i] <= r_ref[i][j+1]-thr_*fs_))[0]
FP = FP + len(err)
if len(loc) >= 1:
TP += 1
FP = FP + len(loc) - 1
elif len(loc) == 0:
FN += 1
if FN + FP > 1:
record_flags[i] = 0
elif FN == 1 and FP == 0:
record_flags[i] = 0.3
elif FN == 0 and FP == 1:
record_flags[i] = 0.7
rec_acc = round(np.sum(record_flags) / len(r_ref), 4)
hr_acc = round(HR_score / len(r_ref), 4)
print('QRS_acc: {}'.format(rec_acc))
print('HR_acc: {}'.format(hr_acc))
print('Scoring complete.')
return rec_acc, hr_acc
| 2.859375 | 3 |
lang/Python/equilibrium-index-4.py | ethansaxenian/RosettaDecode | 5 | 12763729 | <gh_stars>1-10
from collections import defaultdict
def eqindex1Pass(data):
"One pass"
l, h = 0, defaultdict(list)
for i, c in enumerate(data):
l += c
h[l * 2 - c].append(i)
return h[l]
| 2.328125 | 2 |
model/api/loader.py | mvxxx/mimuw-hats | 0 | 12763730 | <gh_stars>0
import numpy as np
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
from keras.preprocessing import image as imageK
def load_image(img_path, shape):
"""Loads image in KERAS mdoe
Args:
img_path (string): path to image
shape (tuple<int>): shape of image
Returns:
keras image : image
"""
img = imageK.load_img(img_path, target_size=shape, color_mode='rgb')
return prepare_image(img)
def prepare_image(image, shape):
"""Prepares image
Args:
image (numpy array): image data
shape (tuple<int>): shape of image
Returns:
tensor image : image
"""
img_tensor = imageK.img_to_array(image)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
return img_tensor | 2.78125 | 3 |
back-end/aula/admin.py | Cicero-Forks/lccv-desafio-final | 3 | 12763731 | from django.contrib import admin
from . import models
from . import actions
class FornecedoresForm(admin.ModelAdmin):
readonly_fields = ['id_user_cad', 'id_user_alt']
search_fields = ['razao_social', 'cnpj', 'email', 'telefone']
list_filter = ['id_user_cad', 'id_user_alt']
actions = [actions.atualiza_fornecedores]
def save_model(self, request, obj, form, change):
if change:
obj.id_user_alt = request.user
else:
obj.id_user_cad = request.user
obj.save()
admin.site.register(models.NaturezaDespesa)
admin.site.register(models.Projetos)
admin.site.register(models.Fornecedores, FornecedoresForm)
admin.site.register(models.Ordens)
admin.site.register(models.Vigencias)
admin.site.register(models.Contratos)
admin.site.register(models.ItensContrato)
admin.site.register(models.ItensOrdem) | 2.03125 | 2 |
atp_classes/Rentrak.py | davidottogonzalez/Data-Fetcher | 0 | 12763732 | import atp_classes, requests, json
class Rentrak:
__user_token = None
__current_user = None
def __init__(self, api_url=None, username=None, password=<PASSWORD>):
config = atp_classes.Config().get_config()
if api_url:
self.api_url = api_url
else:
self.api_url = config['api']['rentrak']['baseURL']
if config['api']['rentrak']['auth_token'] != '':
self.__user_token = config['api']['rentrak']['auth_token']
self.__current_user = config['api']['rentrak']['username']
else:
self.login(username, password)
def login(self, username=None, password=<PASSWORD>):
headers = {"Content-Type": "application/json"}
if username and password:
response = requests.post(self.api_url + '/auth/login', headers=headers,
data=json.dumps({"user_id": username, "password": password}))
if response.status_code != 200:
raise Exception('Error while logging in. ' + json.loads(response.text)['message'])
else:
self.__user_token = json.loads(response.text)['authtoken']
self.__current_user = username
else:
config = atp_classes.Config().get_config()
response = requests.post(self.api_url + '/auth/login', headers=headers, data=json.dumps(
{"user_id": config['api']['rentrak']['username'], "password": config['api']['rentrak']['password']}))
if response.status_code != 200:
raise Exception('Error while logging in. ' + json.loads(response.text)['message'])
else:
self.__user_token = json.loads(response.text)['authtoken']
self.__current_user = config['api']['rentrak']['username']
def search_networks(self, search):
headers = {"Content-Type": "application/json", "Authorization": "RAP " + self.__user_token}
response = requests.get(self.api_url + '/networks', params={"search": search}, headers=headers)
if response.status_code != 200:
raise Exception('Error while getting network info. ' + json.loads(response.text)['message'])
else:
return json.loads(response.text)
def search_tags(self, search):
headers = {"Content-Type": "application/json", "Authorization": "RAP " + self.__user_token}
response = requests.get(self.api_url + '/tags/', params={"search": search}, headers=headers)
if response.status_code != 200:
raise Exception('Error while getting tag info. ' + json.loads(response.text)['message'])
else:
return json.loads(response.text)
def get_all_metrics(self):
headers = {"Content-Type": "application/json", "Authorization": "RAP " + self.__user_token}
response = requests.get(self.api_url + '/metrics/', headers=headers)
if response.status_code != 200:
raise Exception('Error while getting metrics. ' + json.loads(response.text)['message'])
else:
return json.loads(response.text)
def get_reports(self):
headers = {"Content-Type": "application/json", "Authorization": "RAP " + self.__user_token}
response = requests.get(self.api_url + '/users/' + self.__current_user + '/reports/', headers=headers)
if response.status_code != 200:
raise Exception('Error while getting reports. ' + json.loads(response.text)['message'])
else:
return json.loads(response.text)
def get_tags(self, per_page=10, page_num=1, search=''):
headers = {"Content-Type": "application/json", "Authorization": "RAP " + self.__user_token}
params = {"search": search, "per_page": per_page, "page": page_num}
response = requests.get(self.api_url + '/tags/', headers=headers, params=params)
if response.status_code != 200:
raise Exception('Error while getting tags. ' + json.loads(response.text)['message'])
else:
return json.loads(response.text)
def get_all_tags(self):
page = 1
all_tags = []
continue_gathering = True
while continue_gathering:
next_tags = self.get_tags(page_num=page)
page += 1
if len(next_tags) == 0:
continue_gathering = False
else:
all_tags+= next_tags
return all_tags
def submit_report(self, data):
headers = {"Content-Type": "application/json", "Authorization": "RAP " + self.__user_token}
response = requests.post(self.api_url + '/users/' + self.__current_user + '/reports/',
headers=headers, data=data)
if response.status_code != 202:
raise Exception('Error while submitting report. ' + json.loads(response.text)['message'])
else:
location_url = response.headers['Location']
temp_split = location_url.split('/')
return_object = {"report_id":temp_split[-1]}
return return_object
def get_report_status(self, report_id):
headers = {"Content-Type": "application/json", "Authorization": "RAP " + self.__user_token}
response = requests.get(self.api_url + '/users/' + self.__current_user + '/reportqueue/' + report_id,
headers=headers)
if response.status_code != 200:
raise Exception('Error while getting report status. ' + json.loads(response.text)['message'])
else:
response_json = json.loads(response.text)
if response_json.has_key('status') and response_json['status'] == "completed":
return "Completed"
else:
return str(response_json['pct_complete']) + "% complete"
def get_report_rows(self, report_id):
headers = {"Content-Type": "application/json", "Authorization": "RAP " + self.__user_token}
response = requests.get(self.api_url + '/users/' + self.__current_user + '/reports/' + report_id + '/rows',
headers=headers)
if response.status_code != 200:
raise Exception('Error while getting report rows. ' + json.loads(response.text)['message'])
else:
return json.loads(response.text) | 2.671875 | 3 |
pycqed/analysis_v3/tomography_analysis.py | QudevETH/PycQED_py3 | 7 | 12763733 | <filename>pycqed/analysis_v3/tomography_analysis.py
import logging
log = logging.getLogger(__name__)
import itertools
import scipy as sp
import numpy as np
import qutip as qtp
import matplotlib as mpl
from collections import OrderedDict
from pycqed.analysis_v2 import tomography_qudev as tomo
from pycqed.analysis_v3 import plotting as plot_mod
from pycqed.analysis_v3 import helper_functions as hlp_mod
from pycqed.analysis_v3 import processing_pipeline as pp_mod
from pycqed.analysis_v3 import data_extraction as dat_extr_mod
from pycqed.analysis_v3 import data_processing as dat_proc_mod
from copy import deepcopy
import sys
pp_mod.search_modules.add(sys.modules[__name__])
def standard_qubit_pulses_to_rotations(pulse_list):
"""
Converts lists of n-tuples of standard PycQED single-qubit pulse names to
the corresponding rotation matrices on the n-qubit Hilbert space.
:param pulse_list: list of n-tuples. The tuples contain strings that should
match the keys in standard_pulses dict below.
:return list of len(pulse_list) qutip quantum objects representing the
products of the pulses in each n-tuple.
"""
standard_pulses = {
'I': qtp.qeye(2),
'X0': qtp.qeye(2),
'Z0': qtp.qeye(2),
'X180': qtp.sigmax(),
'mX180': qtp.rotation(qtp.sigmax(), -np.pi),
'Y180': qtp.sigmay(),
'mY180': qtp.rotation(qtp.sigmay(), -np.pi),
'X90': qtp.rotation(qtp.sigmax(), np.pi/2),
'mX90': qtp.rotation(qtp.sigmax(), -np.pi/2),
'Y90': qtp.rotation(qtp.sigmay(), np.pi/2),
'mY90': qtp.rotation(qtp.sigmay(), -np.pi/2),
'Z90': qtp.rotation(qtp.sigmaz(), np.pi/2),
'mZ90': qtp.rotation(qtp.sigmaz(), -np.pi/2),
'Z180': qtp.sigmaz(),
'mZ180': qtp.rotation(qtp.sigmaz(), -np.pi),
'CZ': qtp.Qobj(np.diag([1, 1, 1, -1]), dims=[[2, 2], [2, 2]])
}
rotations = [qtp.tensor(*[standard_pulses[pulse] for pulse in qb_pulses])
for qb_pulses in pulse_list]
for i in range(len(rotations)):
rotations[i].dims = [[d] for d in rotations[i].shape]
return rotations
def state_tomography_analysis(data_dict, keys_in,
estimation_types=('least_squares',
'max_likelihood'), **params):
"""
State tomography analysis. Extracts density matrices based on
estimation_types, calculates purity, concurrence, and fidelity to
rho_target, prepares probability table plot, density matrix plots,
pauli basis plots.
:param data_dict: OrderedDict containing data to be processed and where
processed data is to be stored
:param keys_in: list of key names or dictionary keys paths in
data_dict for the data to be analyzed (expects thresholded shots)
:param estimation_types: list of strings indicating the methods to use to
construct the density matrix. It will do all the estimation types in
this list.
:param params: keyword argument.
Expects to find either in data_dict or in params:
- meas_obj_names: list of measurement object names
- basis_rots: list/tuple of strings specifying tomography rotations
Ex: ('I', 'X90', 'Y90', 'X180'), ('I', 'X90', 'Y90')
- n_readouts or CalibrationPoints + basis_rots + preselection
condition. Number of segments including preselection.
If n_readouts is not provided it will try to estimate it from
CalibrationPoints + basis_rots + preselection condition.
n_readouts is the total number of readouts including
preselection.
Other possible keyword arguments:
- do_preselection or preparation_params. If the former is not
provided, it will try to take it from preparation_params.
If preparation_params are not found, it will default to False.
Specifies whether to do preselection on the data.
- observables: measurement observables, see docstring of hlp_mod.
get_observables. If not provided, it will default to hlp_mod.
get_observables. See required input params there.
- rho_target (qutip Qobj; default: None): target density matrix or
state vector as qutip object
- prepare_plotting (bool, default: True): whether to prepare
plot dicts
- do_plotting (bool, default: True): whether to plot the
plot dicts
- do_bootstrapping (bool, default: False): whether to run the
bootstrapping statistical error estimation (see the function
bootstrapping_state_tomography)
- Nbstrp (int): REQUIRED IF do_bootstrapping IS TRUE! Number of
bootstrapping cycles, ie sample size for estimating errors
:return: adds to data_dict the following quantities:
- if not already there: basis_rots, n_readouts, do_preselection,
observables
- probability_table, probability_table_filtered
- measurement_ops, cov_matrix_meas_obs
- all_measurement_results, all_measurement_operators,
all_cov_matrix_meas_obs
- est_type.rho, est_type.purity, (est_type.concurrence if
len(meas_obj_names) == 2), (est_type.fidelity if rho_target
is provided) for est_type in estimation_types
- plot_dicts if prepare_plotting; figures, axes if do_plotting
- Nbstrp and est_type.bootstrapping_fidelities for est_type in
estimation_types if do_bootstrapping.
Assumptions:
- the data indicated by keys_in is assumed to be thresholded shots
"""
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
hlp_mod.pop_param('keys_out', data_dict, node_params=params)
cp = hlp_mod.get_measurement_properties(data_dict, props_to_extract=['cp'],
raise_error=False, **params)
basis_rots = hlp_mod.get_param('basis_rots', data_dict,
raise_error=True, **params)
if hlp_mod.get_param('basis_rots', data_dict) is None:
hlp_mod.add_param('basis_rots', basis_rots, data_dict, **params)
do_preselection = hlp_mod.get_param('do_preselection', data_dict,
**params)
if do_preselection is None:
prep_params = hlp_mod.get_param('preparation_params', data_dict,
default_value={}, **params)
do_preselection = \
prep_params.get('preparation_type', 'wait') == 'preselection'
hlp_mod.add_param('do_preselection', do_preselection, data_dict,
**params)
# get number of readouts
n_readouts = hlp_mod.get_param('n_readouts', data_dict, **params)
if n_readouts is None:
n_readouts = (do_preselection + 1) * (
len(basis_rots)**len(meas_obj_names) +
(len(cp.states) if cp is not None else 0))
hlp_mod.add_param('n_readouts', n_readouts, data_dict, **params)
# get observables
observables = hlp_mod.get_param('observables', data_dict, **params)
if observables is None:
hlp_mod.get_observables(data_dict, keys_out=['observables'],
**params)
observables = hlp_mod.get_param('observables', data_dict)
# get probability table
keys_in_extra = hlp_mod.get_param('keys_in_extra', data_dict,
default_value=[], **params)
dat_proc_mod.calculate_probability_table(data_dict, keys_in=keys_in+keys_in_extra,
keys_out=['probability_table'],
n_readouts=n_readouts,
observables=observables, **params)
# get measurement_ops and cov_matrix_meas_obs
measurement_ops = hlp_mod.get_param('measurement_ops', data_dict, **params)
correct_readout = hlp_mod.get_param('correct_readout', data_dict, **params)
if measurement_ops is None:
if cp is not None and (correct_readout or correct_readout is None):
dat_proc_mod.calculate_meas_ops_and_covariations_cal_points(
data_dict, keys_in, n_readouts=n_readouts,
keys_out=['measurement_ops', 'cov_matrix_meas_obs'],
observables=observables, **params)
else:
dat_proc_mod.calculate_meas_ops_and_covariations(
data_dict, keys_out=['measurement_ops', 'cov_matrix_meas_obs'],
meas_obj_names=meas_obj_names,
observables=observables,
correct_readout=correct_readout)
else:
if hlp_mod.get_param('measurement_ops', data_dict) is None:
hlp_mod.add_param('measurement_ops', measurement_ops, data_dict,
**params)
cov_matrix_meas_obs = hlp_mod.get_param('cov_matrix_meas_obs',
data_dict, **params)
if cov_matrix_meas_obs is None:
hlp_mod.add_param('cov_matrix_meas_obs',
np.diag(np.ones(len(meas_obj_names)**2)),
data_dict, **params)
else:
if hlp_mod.get_param('cov_matrix_meas_obs', data_dict) is None:
hlp_mod.add_param('cov_matrix_meas_obs', measurement_ops,
data_dict, **params)
# get all measurement ops, measurement results, and covariance matrices
all_msmt_ops_results_omegas(data_dict, observables, **params)
# get density matrices, purity, fidelity, concurrence
density_matrices(data_dict, estimation_types, **params)
# plotting
prepare_plotting = hlp_mod.pop_param('prepare_plotting', data_dict,
default_value=True, node_params=params)
do_plotting = hlp_mod.pop_param('do_plotting', data_dict,
default_value=True, node_params=params)
if prepare_plotting:
prepare_prob_table_plot(data_dict, do_preselection, **params)
for i, estimation_type in enumerate(estimation_types):
prepare_density_matrix_plot(data_dict, estimation_type,
plot_rho_target=(i == 0), **params)
prepare_pauli_basis_plot(data_dict, estimation_type, **params)
if do_plotting:
getattr(plot_mod, 'plot')(data_dict, keys_in=list(
data_dict['plot_dicts']), **params)
# error estimation with boostrapping
if hlp_mod.get_param('do_bootstrapping', data_dict, default_value=False,
**params):
hlp_mod.pop_param('do_bootstrapping', data_dict, default_value=False,
node_params=params)
bootstrapping_state_tomography(data_dict, keys_in, **params)
def all_msmt_ops_results_omegas(data_dict, observables, probability_table=None,
**params):
"""
Calculates all_measurement_results, all_measurement_operators, and
all_cov_matrix_meas_obs from measurement_ops, cov_matrix_meas_obs
:param data_dict: OrderedDict containing data to be processed and where
processed data is to be stored
:param observables: measurement observables, see docstring of
hlp_mod.get_observables.
:param probability_table: dictionary with observables as keys and
normalized counts for each segment (excluding preselection) as values
(see dat_proc_mod.calculate_probability_table).
IF NONE, IT MUST EXIST IN data_dict.
:param params: keyword arguments
Expects to find either in data_dict or in params:
- meas_obj_names: list of measurement object names
- basis_rots (see docstring of state_tomography)
- measurement_ops: list of array corresponding to measurement
operators
- cov_matrix_meas_obs: covariance matrix
Other possible keyword arguments used if both probability_table and
prob_table_filter are None:
- do_preselection or preparation_params. If the former is not
provided, it will try to take it from preparation_params.
If preparation_params are not found, it will default to False.
Specifies whether to do preselection on the data.
- prob_table_filter: filter for the probability table. If not given
it will calculate it from meas_obj_names + basis_rots +
preselection condition
:return: adds to data_dict:
- all_measurement_results: itertools.chain of the probability table
- all_measurement_operators: see tomography_qudev.
rotated_measurement_operators
- and all_cov_matrix_meas_obs: sp.linalg.block_diag(
*[Omega] * len(probability_table[0]))
"""
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
basis_rots = hlp_mod.get_param('basis_rots', data_dict,
raise_error=True, **params)
if probability_table is None:
prob_table_filter = hlp_mod.get_param('prob_table_filter', data_dict,
**params)
if prob_table_filter is None:
do_preselection = hlp_mod.get_param(
'do_preselection', data_dict, default_value=
hlp_mod.get_param('preparation_params', data_dict,
default_value={}, **params).get(
'preparation_type', 'wait') == 'preselection', **params)
def prob_table_filter(prob_table, pre=do_preselection,
basis_rots=basis_rots, n=len(meas_obj_names)):
prob_table = np.array(list(prob_table.values())).T
return prob_table[pre: (pre+1)*len(basis_rots)**n: (pre+1)]
dat_proc_mod.filter_data(data_dict, keys_in=['probability_table'],
keys_out=['probability_table_filtered'],
data_filter=prob_table_filter, **params)
probability_table = hlp_mod.get_param('probability_table_filtered',
data_dict)
try:
preselection_obs_idx = list(observables.keys()).index('pre')
except ValueError:
preselection_obs_idx = None
observabele_idxs = [i for i in range(len(observables))
if i != preselection_obs_idx]
prob_table = probability_table.T[observabele_idxs]
prob_table = prob_table.T
for i, v in enumerate(prob_table):
prob_table[i] = v / v.sum()
prob_table = prob_table.T
pulse_list = list(itertools.product(basis_rots,
repeat=len(meas_obj_names)))
rotations = tomo.standard_qubit_pulses_to_rotations(pulse_list)
rotations = [qtp.Qobj(U) for U in rotations]
msmt_ops = hlp_mod.get_param('measurement_ops', data_dict)
msmt_ops = [qtp.Qobj(F) for F in msmt_ops]
all_msmt_ops = tomo.rotated_measurement_operators(rotations, msmt_ops)
all_msmt_ops = list(itertools.chain(*np.array(
all_msmt_ops, dtype=np.object).T))
hlp_mod.add_param('all_measurement_operators', all_msmt_ops, data_dict,
**params)
all_msmt_res = np.array(list(itertools.chain(*prob_table.T)))
hlp_mod.add_param('all_measurement_results', all_msmt_res, data_dict,
**params)
omegas = hlp_mod.get_param('cov_matrix_meas_obs', data_dict)
all_omegas = sp.linalg.block_diag(*[omegas] * len(prob_table[0]))
hlp_mod.add_param('all_cov_matrix_meas_obs', all_omegas, data_dict,
**params)
def density_matrices(data_dict,
estimation_types=('least_squares', 'max_likelihood'),
**params):
"""
Estimates density matrices from all_measurement_results,
all_measurement_operators, and all_cov_matrix_meas_obs using the
estimation methods in estimation_types.
:param data_dict: OrderedDict containing data to be processed and where
processed data is to be stored
:param estimation_types: list of strings indicating the methods to use to
construct the density matrix. It will do all the estimation types in
this list.
:param params: keyword arguments:
Expects to find either in data_dict or in params:
- meas_obj_names: list of measurement object names
- all_measurement_results, all_measurement_operators
(see all_msmt_ops_results_omegas)
- all_cov_matrix_meas_obs if use_covariance_matrix is True
Other possible keyword arguments:
- rho_target (qutip Qobj; default: None): target density matrix or
state vector as qutip object
- use_covariance_matrix (bool; default: False): whether to use the
covariance matrices in the estimations
- tolerance (float; default: None): custom tolerance threshold for
iterative maximum likelihood estimation.
- iterations (int; default: None): custom iteration threshold for
iterative maximum likelihood estimation.
- rho_guess (qutip Qobj; default: None): initial rho used in Maximum
likelihood estimation (mle) or iterative mle instead of default
:return: adds to data_dict
- rho_target if not already there
- est_type.purity, (est_type.concurrence if len(meas_obj_names) == 2),
(est_type.fidelity if rho_target is provided) for est_type in
estimation_types
"""
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
rho_target = hlp_mod.get_param('rho_target', data_dict, **params)
if 'rho_target' not in data_dict:
hlp_mod.add_param('rho_target', rho_target, data_dict, **params)
for estimation_type in estimation_types:
if estimation_type == 'least_squares':
rho_ls = tomo.least_squares_tomography(
hlp_mod.get_param('all_measurement_results', data_dict,
raise_error=True, **params),
hlp_mod.get_param('all_measurement_operators', data_dict,
raise_error=True, **params),
hlp_mod.get_param('all_cov_matrix_meas_obs', data_dict)
if hlp_mod.get_param('use_covariance_matrix', data_dict,
default_value=False, **params) else None)
hlp_mod.add_param('least_squares.rho', rho_ls, data_dict, **params)
elif estimation_type == 'max_likelihood':
rho_guess = hlp_mod.get_param('rho_guess', data_dict, **params)
if rho_guess is None:
rho_guess = hlp_mod.get_param(
'least_squares.rho', data_dict, raise_error=True,
error_message='Maximum likelihood estimation needs a guess '
'rho but neither a rho_guess not a '
'least_squares.rho was found.', **params)
rho_mle = tomo.mle_tomography(
hlp_mod.get_param('all_measurement_results', data_dict,
raise_error=True, **params),
hlp_mod.get_param('all_measurement_operators', data_dict,
raise_error=True, **params),
hlp_mod.get_param('all_cov_matrix_meas_obs', data_dict)
if hlp_mod.get_param('use_covariance_matrix', data_dict,
default_value=False, **params) else None,
rho_guess=rho_guess)
hlp_mod.add_param('max_likelihood.rho', rho_mle, data_dict, **params)
elif estimation_type == 'iterative_mle':
rho_imle = tomo.imle_tomography(
hlp_mod.get_param('all_measurement_results', data_dict,
raise_error=True, **params),
hlp_mod.get_param('all_measurement_operators', data_dict,
raise_error=True, **params),
hlp_mod.get_param('iterations', data_dict, **params),
hlp_mod.get_param('tolerance', data_dict, **params),
hlp_mod.get_param('rho_guess', data_dict, **params))
hlp_mod.add_param('iterative_mle.rho', rho_imle, data_dict, **params)
elif estimation_type == 'pauli_values':
rho_pauli = tomo.pauli_values_tomography(
hlp_mod.get_param('all_measurement_results', data_dict,
raise_error=True, **params),
[qtp.Qobj(F) for F in hlp_mod.get_param('measurement_ops',
data_dict)],
hlp_mod.get_param('basis_rots', data_dict))
hlp_mod.add_param('pauli_values.rho', rho_pauli, data_dict, **params)
else:
raise ValueError(f'Unknown estimation_type "{estimation_type}."')
rho_meas = hlp_mod.get_param(f'{estimation_type}.rho', data_dict,
**params)
if rho_meas is not None:
hlp_mod.add_param(f'{estimation_type}.purity',
(rho_meas*rho_meas).tr().real, data_dict,
**params)
if rho_target is not None:
hlp_mod.add_param(f'{estimation_type}.fidelity',
fidelity(rho_meas, rho_target), data_dict,
**params)
if len(meas_obj_names) == 2:
hlp_mod.add_param(f'{estimation_type}.concurrence',
concurrence(rho_meas), data_dict,
**params)
def fidelity(rho1, rho2):
"""
Returns the fidelity between the two quantum states rho1 and rho2.
Uses the Jozsa definition (the smaller of the two), not the Nielsen-Chuang
definition.
F = Tr(√((√rho1) rho2 √(rho1)))^2
:param rho1: qtp.Qobj of measured rho
:param rho2: qtp.Qobj of target rho
"""
rho1 = tomo.convert_to_density_matrix(rho1).full()
rho2 = tomo.convert_to_density_matrix(rho2).full()
return sp.linalg.sqrtm(
sp.linalg.sqrtm(rho1) @ rho2 @ sp.linalg.sqrtm(rho1)).trace().real ** 2
def concurrence(rho):
"""
Calculates the concurrence of the two-qubit state rho given in the
qubits' basis according to https://doi.org/10.1103/PhysRevLett.78.5022
:param rho: qtp.Qobj of rho
"""
rho = tomo.convert_to_density_matrix(rho).full()
# convert to bell basis
b = [np.sqrt(0.5)*np.array(l) for l in
[[1, 0, 0, 1], [1j, 0, 0, -1j], [0, 1j, 1j, 0], [0, 1, -1, 0]]]
rhobell = np.zeros((4, 4), dtype=np.complex)
for i in range(4):
for j in range(4):
rhobell[i, j] = b[j].conj().T @ rho @ b[i]
R = sp.linalg.sqrtm(
sp.linalg.sqrtm(rhobell) @ rhobell.conj() @ sp.linalg.sqrtm(rhobell))
counter = 0
while counter < 5:
# hack needed because of a strange bug on my computer where
# np.linalg.eigvals sometimes fails the first time.
try:
C = max(0, 2*np.linalg.eigvals(R).max() - R.trace())
break
except Exception as e:
if counter != 4:
pass
else:
raise e
counter += 1
if not isinstance(C, int):
C = C.real
return C
def prepare_prob_table_plot(data_dict, exclude_preselection=False, **params):
"""
Prepares a plot of the probability table.
:param data_dict: OrderedDict containing data to be plotted and where
plot_dicts is to be stored
:param exclude_preselection: whether to exclude preselection segments
:param params: keyword arguments:
Expects to find either in data_dict or in params:
- probability_table: dictionary with observables as keys and
normalized counts for each segment as values
(see dat_proc_mod.calculate_probability_table).
- meas_obj_names: list of measurement object names
- observables: measurement observables (see docstring of hlp_mod.
get_observables).
- timestamps: list with the measurement timestamp
Other possible keyword arguments:
- prob_table_filter (func; default: [1::2] if exclude_preselection
else no filter): function for filtering probability_table
- obs_filter (func; default: np.arange(len(observables))): function
for filtering observables
:return: adds to data_dict: plot_dicts
"""
plot_dicts = OrderedDict()
figures_prefix = hlp_mod.get_param('figures_prefix', data_dict,
default_value='', **params)
if len(figures_prefix):
figures_prefix += '_'
probability_table = hlp_mod.get_param('probability_table', data_dict,
raise_error=True, **params)
probability_table = np.array(list(probability_table.values())).T
observables = hlp_mod.get_param('observables', data_dict,
raise_error=True, **params)
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
# colormap which has a lot of contrast for small and large values
v = [0, 0.1, 0.2, 0.8, 1]
c = [(1, 1, 1),
(191/255, 38/255, 11/255),
(155/255, 10/255, 106/255),
(55/255, 129/255, 214/255),
(0, 0, 0)]
cdict = {'red': [(v[i], c[i][0], c[i][0]) for i in range(len(v))],
'green': [(v[i], c[i][1], c[i][1]) for i in range(len(v))],
'blue': [(v[i], c[i][2], c[i][2]) for i in range(len(v))]}
cm = mpl.colors.LinearSegmentedColormap('customcmap', cdict)
prob_table_filter = hlp_mod.get_param('prob_table_filter', data_dict,
**params)
if prob_table_filter is not None:
plt_data = prob_table_filter(probability_table).T
else:
if exclude_preselection:
plt_data = probability_table[1::2].T
else:
plt_data = probability_table.T
ylist = list(range(len(plt_data.T)))
obs_filter = hlp_mod.get_param(
'obs_filter', data_dict, default_value=np.arange(len(observables)),
**params)
plt_data = plt_data[obs_filter]
timestamps = hlp_mod.get_param('timestamps', data_dict, raise_error=True,
**params)
if len(timestamps) > 1:
title = f'{timestamps[0]} - {timestamps[-1]} {",".join(meas_obj_names)}'
else:
title = f'{timestamps[-1]} {",".join(meas_obj_names)}'
plot_dicts[f'{figures_prefix}counts_table_{"".join(meas_obj_names)}'] = {
'axid': "ptable",
'plotfn': 'plot_colorx',
'plotsize': [6.4, 4.8],
'xvals': np.arange(len(observables))[obs_filter],
'yvals': np.array(len(observables)*[ylist]),
'zvals': plt_data,
'xlabel': "Channels",
'ylabel': "Segments",
'zlabel': "Counts",
'zrange': [0, 1],
'title': title,
'xunit': None,
'yunit': None,
'xtick_loc': np.arange(len(observables))[obs_filter],
'xtick_labels': list(np.array(list(observables.keys()))[obs_filter]),
'origin': 'upper',
'cmap': cm,
'aspect': 'equal'
}
hlp_mod.add_param('plot_dicts', plot_dicts, data_dict,
add_param_method='update')
def prepare_density_matrix_plot(data_dict, estimation_type='least_squares',
plot_rho_target=True, leakage=None, **params):
"""
Prepares plot of the density matrix estimated with method estimation_type.
:param data_dict: OrderedDict containing data to be plotted and where
plot_dicts is to be stored
:param estimation_type: string indicating the method that was used to
estimate the density matrix. Assumes estimation_type.rho exists in
data_dict.
:param plot_rho_target: whether to prepare a separate figure for rho_target
:param leakage: dict of leakages as returned by
data_processing.py/extract_leakage_classified_shots
:param params: keyword arguments:
Expects to find either in data_dict or in params:
- meas_obj_names: list of measurement object names
Other possible keyword arguments:
- rho_ticklabels (list of strings; default: kets of basis states):
x- and y-ticklabels
- rho_colormap (colormap; default: plot_mod.default_phase_cmap()):
colormap
- rho_target (qutip Qobj; default: None): target density matrix or
state vector as qutip object
:return: adds to data_dict: plot_dicts
Assumptions:
- estimation_type.rho exists in data_dict
"""
plot_dicts = OrderedDict()
figures_prefix = hlp_mod.get_param('figures_prefix', data_dict,
default_value='', **params)
if len(figures_prefix):
figures_prefix += '_'
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
d = 2**len(meas_obj_names)
xtick_labels = hlp_mod.get_param('rho_ticklabels', data_dict, **params)
ytick_labels = hlp_mod.get_param('rho_ticklabels', data_dict, **params)
if 2 ** (d.bit_length() - 1) == d:
nr_qubits = d.bit_length() - 1
fmt_string = '{{:0{}b}}'.format(nr_qubits)
labels = [fmt_string.format(i) for i in range(2 ** nr_qubits)]
if xtick_labels is None:
xtick_labels = ['$|' + lbl + r'\rangle$' for lbl in labels]
if ytick_labels is None:
ytick_labels = [r'$\langle' + lbl + '|$' for lbl in labels]
cmap = hlp_mod.get_param('rho_colormap', data_dict,
default_value=plot_mod.default_phase_cmap(),
**params)
rho_target = hlp_mod.get_param('rho_target', data_dict, **params)
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
if rho_target.type == 'ket':
rho_target = rho_target * rho_target.dag()
elif rho_target.type == 'bra':
rho_target = rho_target.dag() * rho_target
if plot_rho_target:
title = 'Target density matrix\n' + plot_mod.default_figure_title(
data_dict, ','.join(meas_obj_names))
plot_dicts[f'{figures_prefix}density_matrix_target'] = {
'plotfn': 'plot_bar3D',
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': np.abs(rho_target.full()),
'zrange': (0, 1),
'color': (0.5 * np.angle(rho_target.full()) / np.pi) % 1.,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': title,
'bar_kws': dict(zorder=1),
}
rho_meas = hlp_mod.get_param(f'{estimation_type}.rho', data_dict,
raise_error=True)
if estimation_type == 'least_squares':
base_title = 'Least squares fit of the density matrix\n'
elif estimation_type == 'max_likelihood':
base_title = 'Maximum likelihood fit of the density matrix\n'
elif estimation_type == 'iterative_mle':
base_title = 'Iterative maximum likelihood fit of the density matrix\n'
elif estimation_type == 'pauli_values':
base_title = 'Density matrix reconstructed from measured Pauli values\n'
else:
base_title = 'Density matrix\n'
legend_entries = get_legend_artists_labels(data_dict,
estimation_type=estimation_type,
**params)
title = base_title + plot_mod.default_figure_title(
data_dict, ','.join(meas_obj_names))
zvals = np.concatenate((np.abs(rho_target.full()),
np.abs(rho_meas.full())))
color_tar = (0.5 * np.angle(rho_target.full()) / np.pi) % 1.
color_meas = (0.5 * np.angle(rho_meas.full()) / np.pi) % 1.
color = np.concatenate((1.1*np.ones_like(color_tar), color_meas))
plot_dicts[f'{figures_prefix}density_matrix_' \
f'{estimation_type}_{"".join(meas_obj_names)}'] = {
'plotfn': 'plot_bar3D',
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': zvals,
'zrange': (0, 1),
'color': color,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': title,
'do_legend': len(legend_entries),
'legend_entries': legend_entries,
'legend_kws': dict(loc='upper center', bbox_to_anchor=(0.5, -0.05),
ncol=2, frameon=False),
'set_edgecolor': True
}
hlp_mod.add_param('plot_dicts', plot_dicts, data_dict,
add_param_method='update')
def prepare_pauli_basis_plot(data_dict, estimation_type='least_squares',
leakage=None, **params):
"""
Prepares plot of the density matrix estimated with method estimation_type.
:param data_dict: OrderedDict containing data to be plotted and where
plot_dicts is to be stored
:param estimation_type: string indicating the method that was used to
estimate the density matrix. Assumes estimation_type.rho exists in
data_dict.
:param leakage: dict of leakages as returned by
data_processing.py/extract_leakage_classified_shots
:param params: keyword arguments:
Expects to find either in data_dict or in params:
- meas_obj_names: list of measurement object names
Other possible keyword arguments:
- rho_target (qutip Qobj; default: None): target density matrix or
state vector as qutip object
:return: adds to data_dict: plot_dicts
Assumptions:
- estimation_type.rho exists in data_dict
"""
plot_dicts = OrderedDict()
figures_prefix = hlp_mod.get_param('figures_prefix', data_dict,
default_value='', **params)
if len(figures_prefix):
figures_prefix += '_'
rho_meas = hlp_mod.get_param(f'{estimation_type}.rho', data_dict,
raise_error=True)
rho_target = hlp_mod.get_param('rho_target', data_dict, **params)
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
nr_qubits = len(meas_obj_names)
yexp = tomo.density_matrix_to_pauli_basis(rho_meas)
labels = list(itertools.product(*[['I', 'X', 'Y', 'Z']]*nr_qubits))
labels = [''.join(label_list) for label_list in labels]
if nr_qubits == 1:
order = [1, 2, 3]
elif nr_qubits == 2:
order = [1, 2, 3, 4, 8, 12, 5, 6, 7, 9, 10, 11, 13, 14, 15]
elif nr_qubits == 3:
order = [1, 2, 3, 4, 8, 12, 16, 32, 48] + \
[5, 6, 7, 9, 10, 11, 13, 14, 15] + \
[17, 18, 19, 33, 34, 35, 49, 50, 51] + \
[20, 24, 28, 36, 40, 44, 52, 56, 60] + \
[21, 22, 23, 25, 26, 27, 29, 30, 31] + \
[37, 38, 39, 41, 42, 43, 45, 46, 47] + \
[53, 54, 55, 57, 58, 59, 61, 62, 63]
else:
order = np.arange(4**nr_qubits)[1:]
if estimation_type == 'least_squares':
fit_type = 'least squares fit\n'
elif estimation_type == 'max_likelihood':
fit_type = 'maximum likelihood estimation\n'
elif estimation_type == 'iterative_mle':
fit_type = 'iterative maximum likelihood estimation'
elif estimation_type == 'pauli_values':
fit_type = 'measured pauli values'
else:
fit_type = '\n'
legend_entries = get_legend_artists_labels(data_dict,
estimation_type=estimation_type,
**params)
figure_name = f'{figures_prefix}' \
f'pauli_basis_{estimation_type}_{"".join(meas_obj_names)}'
plot_dicts[figure_name] = {
'plotfn': 'plot_bar',
'plotsize': (4.5, 3),
'xcenters': np.arange(len(order)),
'xwidth': 0.4,
'xrange': (-1, len(order)),
'yvals': np.array(yexp)[order],
'xlabel': r'Pauli operator, $\hat{O}$',
'ylabel': r'Expectation value, $\mathrm{Tr}(\hat{O} \hat{\rho})$',
'title': 'Pauli operators, ' + fit_type +
plot_mod.default_figure_title(data_dict,
','.join(meas_obj_names)),
'yrange': (-1.1, 1.1),
'xtick_loc': np.arange(4**nr_qubits - 1),
'xtick_rotation': 90,
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(zorder=10),
'setlabel': 'Fit to experiment',
'do_legend': True
}
if nr_qubits > 2:
plot_dicts[figure_name]['plotsize'] = (10, 5)
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
ytar = tomo.density_matrix_to_pauli_basis(rho_target)
plot_dicts[f'{figures_prefix}pauli_basis_target_{estimation_type}_' \
f'{"".join(meas_obj_names)}'] = {
'plotfn': 'plot_bar',
'fig_id': figure_name,
'xcenters': np.arange(len(order)),
'xwidth': 0.8,
'yvals': np.array(ytar)[order],
'xtick_loc': np.arange(len(order)),
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(color='0.8', zorder=0),
'setlabel': 'Target values',
'legend_entries': legend_entries,
'legend_kws': dict(loc='center left', bbox_to_anchor=(1, 0.5),
ncol=1, frameon=False),
'do_legend': True
}
else:
plot_dicts[figure_name].update({
'legend_entries': legend_entries,
'legend_kws': dict(loc='center left', bbox_to_anchor=(1, 0.5),
ncol=2, frameon=False)})
hlp_mod.add_param('plot_dicts', plot_dicts, data_dict,
add_param_method='update')
def get_legend_artists_labels(data_dict, estimation_type='least_squares',
leakage=None, **params):
rho_target = hlp_mod.get_param('rho_target', data_dict, **params)
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
d = len(meas_obj_names)**2
empty_artist = mpl.patches.Rectangle((0, 0), 0, 0, visible=False)
purity = hlp_mod.get_param(f'{estimation_type}.purity', data_dict)
legend_entries = []
if purity is not None:
legend_entries += [(empty_artist,
r'Purity, $Tr(\rho^2) = {:.1f}\%$'.format(
100 * purity))]
if rho_target is not None:
fidelity = hlp_mod.get_param(f'{estimation_type}.fidelity', data_dict)
if fidelity is not None:
legend_entries += [
(empty_artist, r'Fidelity, $F = {:.1f}\%$'.format(
100 * fidelity))]
if d == 4:
concurrence = hlp_mod.get_param(f'{estimation_type}.concurrence',
data_dict)
if concurrence is not None:
legend_entries += [
(empty_artist, r'Concurrence, $C = {:.2f}$'.format(
concurrence))]
if leakage is None:
keys_in_leakage = hlp_mod.get_param('keys_in_leakage', data_dict,
**params)
if keys_in_leakage is not None:
leakage = hlp_mod.get_param(keys_in_leakage[0], data_dict)
if leakage is not None:
legend_entries += [
(empty_artist, f'Leakage, $L_{{{key}}} = {100*leak:.2f}\%$')
for key, leak in leakage.items()]
return legend_entries
def process_tomography_analysis(data_dict, Uideal=None,
n_qubits=None, prep_pulses_list=None,
estimation_types=('least_squares',
'max_likelihood'),
verbose=False, **params):
"""
Process tomography analysis. Extracts chi and error of gate_of_interest or
by comparing to Uideal.
:param data_dict: OrderedDict containing the keys [''.join(pp) for pp in
prep_pulses_list]. The values corresponding to these keys must either:
- be data_dicts from running state tomography analysis for each of
these prep_pulses
- or must contain est_type.rho for est_type in estimation_types (i.e.
the density matrices from doing state tomography for each prep state)
- or must contain the key measured_rhos containing a dict of the form
{est_type.rho: (list of meas ops) for est_type in estimation_types}
:param Uideal: qutip Qobj of the ideal unitary operator for the process.
Can also be specified with process_name, see keyword arguments below.
:param n_qubits: number of qubits
:param prep_pulses_list: list of tuples with length nr_qubits contanining
strings indicating the preparation pulses for each state tomography
measurement. If not specified, it will be constructed from product of
basis_rots.
:param estimation_types: list of strings indicating the methods that were
used to construct the density matrices stores in data_dict[prep_pulses].
This function will do process tomo for all the estimation types in
this list.
:param verbose: whether to show progress print statements
:param params: keyword arguments
Expects to find either in params, data_dict, or metadata:
- process_name: name of the process Uideal for which the error is
estimated. process_name will be used in the key name for storing
the results, so the user must ensure process_name corresponds
to the process Uideal for meaningful storing names.
If Uideal is None, specifying process_name = 'CZ' or 'CNOT'
will create the corresponding Uideal. Other gates are not
recognized yet.
- only if n_qubits is None:
- meas_obj_names: list of measurement object names
- only if prep_pulses_list is None:
- basis_rots: list/tuple of strings specifying the list of
pulse names used to construct the prep_pulses list
Ex: ('I', 'X90', 'Y90', 'X180'), ('I', 'X90', 'Y90')
Other possible keyword arguments:
- measured_rhos as {est_type.rho: list of meas ops for est_type
in estimation_types}
:return: adds to data_dict:
- chi_{process_name}.{estimation_type} and
measured_error_{process_name}.{estimation_type} for estimation_type
in estimation_types.
"""
if n_qubits is None:
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
n_qubits = len(meas_obj_names)
process_name = hlp_mod.get_param('process_name', data_dict,
raise_error=True, **params)
if Uideal is None:
if process_name == 'CZ':
Uideal = qtp.qip.operations.cphase(np.pi)
elif process_name == 'CNOT':
Uideal = qtp.qip.operations.cnot()
else:
raise ValueError(f'Unknown gate of interest {process_name}. '
f'Please provide the process unitary, Uideal.')
chi_ideal = qtp.to_chi(Uideal)/16
# add ideal chi matrix to data_dict
hlp_mod.add_param(f'chi_ideal_{process_name}',
chi_ideal.full(), data_dict, **params)
if prep_pulses_list is None:
basis_rots = hlp_mod.get_param(
'basis_rots', data_dict, raise_error=True,
error_message='Either prep_pulses_list or basis_rots needs to be '
'provided.', **params)
if hlp_mod.get_param('basis_rots', data_dict) is None:
hlp_mod.add_param('basis_rots', basis_rots, data_dict, **params)
prep_pulses_list = list(itertools.product(basis_rots, repeat=n_qubits))
meas_density_matrices = hlp_mod.get_param('measured_rhos', data_dict,
default_value={}, **params)
for estimation_type in estimation_types:
# get lambda array
if verbose:
print()
print(f'From {estimation_type} estimation')
print('Getting lambda array')
# get measured density matrices
measured_rhos = meas_density_matrices.get(estimation_type, None)
if measured_rhos is None:
measured_rhos = len(prep_pulses_list) * ['']
for i, prep_pulses in enumerate(prep_pulses_list):
prep_str = ''.join(prep_pulses)
if verbose:
print(prep_str)
measured_rhos[i] = \
hlp_mod.get_param(
f'{prep_str}.{estimation_type}.rho',
data_dict, raise_error=True,
error_message=f'Data for preparation pulses '
f'{prep_str} was not found in '
f'data_dict.').full().flatten()
else:
for i, mrho in enumerate(measured_rhos):
if isinstance(mrho, qtp.qobj.Qobj):
measured_rhos[i] = mrho.full().flatten()
elif mrho.ndim > 1:
measured_rhos[i] = mrho.flatten()
measured_rhos = np.asarray(measured_rhos)
# get density matrices for the preparation states
U1s = {pulse: standard_qubit_pulses_to_rotations([(pulse,)])[0]
for pulse in ['X90', 'Y90', 'mX90', 'mY90', 'I',
'X180', 'Y180', 'mY180', 'mX180']}
preped_rhos = len(prep_pulses_list) * ['']
preped_rhos_flatten = len(prep_pulses_list) * ['']
for i, prep_pulses in enumerate(prep_pulses_list):
prep_str = prep_pulses
if not isinstance(prep_pulses, str):
prep_str = ''.join(prep_pulses)
if verbose:
print(prep_str, [U1s[pp] for pp in prep_pulses])
psi_target = (qtp.tensor([U1s[pp] for pp in prep_pulses]) *
qtp.tensor(n_qubits*[qtp.basis(2)]))
rho_target = psi_target*psi_target.dag()
preped_rhos[i] = rho_target
preped_rhos_flatten[i] = rho_target.full().flatten()
preped_rhos_flatten = np.asarray(preped_rhos_flatten)
lambda_array = np.dot(measured_rhos, np.linalg.inv(preped_rhos_flatten))
# get beta array
if verbose:
print('Geting beta array')
standard_nqb_pauli_labels = list(itertools.product(
['I', 'X180', 'Y180', 'Z180'], repeat=n_qubits))
standard_nqb_pauli_ops = standard_qubit_pulses_to_rotations(
standard_nqb_pauli_labels)
if verbose:
print('len(standard_nqb_pauli_labels) ',
len(standard_nqb_pauli_labels))
print('len(standard_nqb_pauli_ops) ',
len(standard_nqb_pauli_ops))
prepared_rhos_rotated = []
cnt = 0
for i, prepared_rho in enumerate(preped_rhos):
for plhs in standard_nqb_pauli_ops:
plhs.dims = 2*[n_qubits*[2]]
for prhs in standard_nqb_pauli_ops:
prhs.dims = 2*[n_qubits*[2]]
cnt += 1
prepared_rhos_rotated += [
(plhs*prepared_rho*prhs.dag()).full().flatten()]
prepared_rhos_rotated = np.asarray(prepared_rhos_rotated)
if verbose:
print('prepared_rhos_rotated.shape ', prepared_rhos_rotated.shape)
print('preped_rhos_flatten.shape ', preped_rhos_flatten.shape)
beta_array = np.dot(prepared_rhos_rotated,
np.linalg.inv(preped_rhos_flatten))
# get chi matrix
if verbose:
print()
print('Getting chi matrix')
chunck_size = len(standard_nqb_pauli_ops)**2
beta_array_reshaped = np.zeros(shape=(len(preped_rhos)**2, chunck_size),
dtype='complex128')
for i in range(len(preped_rhos)):
beta_array_reshaped[i*len(preped_rhos): (i+1)*len(preped_rhos), :] = \
beta_array[i*chunck_size:(i+1)*chunck_size, :].T
if verbose:
print(beta_array[i*chunck_size:(i+1)*chunck_size, :].T.shape)
if verbose:
print('beta_array_reshaped.shape ', beta_array_reshaped.shape)
print('lambda_array.flatten().size ', lambda_array.flatten().size)
chi = np.linalg.solve(beta_array_reshaped, lambda_array.flatten())
chi = chi.reshape(chi_ideal.shape)
chi_qtp = qtp.Qobj(chi, dims=chi_ideal.dims)
# add found chi matrix to data_dict
hlp_mod.add_param(f'chi_{process_name}.{estimation_type}',
chi_qtp.full(), data_dict, **params)
# add gate error to data_dict
hlp_mod.add_param(f'measured_error_{process_name}.{estimation_type}',
1-np.real(qtp.process_fidelity(chi_qtp, chi_ideal)),
data_dict, **params)
def bootstrapping(data_dict=None, keys_in=None, measured_data=None, **params):
"""
Does one round of resampling of measured_data using a uniform distribution.
:param data_dict: OrderedDict containing data to be processed
:param keys_in: list of channel names or dictionary paths leading to
data to be processed of the form (n_readouts*n_shots, n_qubits).
:param measured_data: array of shape (n_readouts*n_shots, n_qubits)
containing raw data shots
:param params: keyword arguments:
n_readouts (required): number of segments including preselection
n_shots (required): number of data shots per segment
preselection: whether preselection was used
:return: array of shape (n_readouts*n_shots, n_qubits) with resampled raw
data shots.
"""
if measured_data is None:
if data_dict is None or keys_in is None:
raise ValueError('Make sure both data_dict and keys_in are '
'specified. Or provide measured_data.')
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
measured_data = np.concatenate([arr[:, np.newaxis] for arr in
list(data_to_proc_dict.values())],
axis=1)
if data_dict is None:
data_dict = {}
n_readouts = hlp_mod.get_param('n_readouts', data_dict, raise_error=True,
**params)
n_shots = hlp_mod.get_param('n_shots', data_dict, raise_error=True,
**params)
preselection = hlp_mod.get_param('preselection', data_dict,
default_value=False, **params)
sample_i = np.zeros(measured_data.shape)
for seg in range(n_readouts)[preselection::preselection+1]:
sample = deepcopy(measured_data[seg::n_readouts, :])
assert len(sample) == n_shots
# resample with replacement the shots for the segment seg
p = np.random.choice(np.arange(n_shots), n_shots)
sample_i[seg::n_readouts, :] = sample[p]
# preselection
if preselection:
sample = deepcopy(measured_data[seg-1::n_readouts, :])
assert len(sample) == n_shots
sample_i[seg-1::n_readouts, :] = sample[p]
return sample_i
def bootstrapping_state_tomography(data_dict, keys_in, store_rhos=False,
verbose=False, **params):
"""
Computes bootstrapping statistics of the density matrix fidelity.
:param data_dict: OrderedDict containing thresholded shots specified by
keys_in, and where processed results will be stored
:param keys_in: list of key names or dictionary keys paths in
data_dict for the data to be analyzed (expects thresholded shots)
:param store_rhos: whether to store the density matrices in addition to
the bootstrapping fidelities.
:param verbose: whether to show progress print statements
:param params: keyword arguments
Expects to find either in data_dict or in params:
- Nbstrp: int specifying the number of bootstrapping cycles,
i.e. sample size for estimating errors, the number of times
the raw data is resampled
- timestamps: list of with the timestamps of the state tomo msmt
:return: stores in data_dict:
- {estimation_type}.bootstrapping_fidelities
- (optionally) {estimation_type}.bootstrapping_rhos
for estimation_type in estimation_types
Assumptions:
- CURRENTLY ONLY SUPPORTS DATA FROM HDF FILES!
- !! This function calls state_tomography_analysis so all required input
params needed there must also be here
"""
Nbstrp = hlp_mod.get_param('Nbstrp', data_dict, raise_error=True, **params)
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
prep_params = hlp_mod.get_param('preparation_params', data_dict,
default_value={}, **params)
preselection = prep_params.get('preparation_type', 'wait') == 'preselection'
n_readouts = hlp_mod.get_param('n_readouts', data_dict, raise_error=True,
**params)
raw_data = np.concatenate([np.reshape(arr, (len(arr), 1))
for arr in data_to_proc_dict.values()],
axis=1)
n_shots = len(raw_data[:, 1]) // n_readouts
timestamp = hlp_mod.get_param('timestamps', data_dict, raise_error=True,
**params)
if len(timestamp) > 1:
raise ValueError(f'Bootstrapping can only be done for one data file. '
f'{len(timestamp)} timestamps were found.')
data_dict_temp = {}
hlp_mod.add_param('cal_points',
hlp_mod.get_param('cal_points', data_dict, **params),
data_dict_temp)
hlp_mod.add_param('meas_obj_value_names_map',
hlp_mod.get_param('meas_obj_value_names_map',
data_dict, **params),
data_dict_temp)
hlp_mod.add_param('preparation_params',
hlp_mod.get_param('preparation_params',
data_dict, **params),
data_dict_temp)
hlp_mod.add_param('rho_target',
hlp_mod.get_param('rho_target', data_dict),
data_dict_temp)
data_dict_temp = dat_extr_mod.extract_data_hdf(timestamps=timestamp,
data_dict=data_dict_temp)
estimation_types = hlp_mod.get_param('estimation_types', data_dict,
default_value=('least_squares',
'max_likelihood'),
**params)
fidelities = {est_type: np.zeros(Nbstrp) for est_type in estimation_types}
if store_rhos:
rhos = {est_type: Nbstrp*[''] for est_type in estimation_types}
params.pop('do_plotting', False)
params.pop('prepare_plotting', False)
replace_value = params.pop('replace_value', False)
# do bootstrapping Nbstrp times
for n in range(Nbstrp):
if verbose:
print('Bootstrapping run state tomo: ', n)
sample_i = bootstrapping(measured_data=raw_data, n_readouts=n_readouts,
n_shots=n_shots, preselection=preselection)
for i, keyi in enumerate(data_to_proc_dict):
hlp_mod.add_param(keyi, sample_i[:, i], data_dict_temp,
add_param_method='replace')
state_tomography_analysis(data_dict_temp, keys_in=keys_in,
do_plotting=False, prepare_plotting=False,
replace_value=True, **params)
for estimation_type in estimation_types:
fidelities[estimation_type][n] = hlp_mod.get_param(
f'{estimation_type}.fidelity', data_dict_temp, raise_error=True)
if store_rhos:
rhos[estimation_type][n] = hlp_mod.get_param(
f'{estimation_type}.rho', data_dict_temp, raise_error=True)
params['replace_value'] = replace_value
hlp_mod.add_param('Nbstrp', Nbstrp, data_dict, **params)
for estimation_type in fidelities:
hlp_mod.add_param(f'{estimation_type}.bootstrapping_fidelities',
fidelities[estimation_type], data_dict, **params)
if store_rhos:
hlp_mod.add_param(f'{estimation_type}.bootstrapping_rhos',
rhos[estimation_type], data_dict, **params)
def bootstrapping_process_tomography(
data_dict, keys_in, Nbstrp, gate_name='CZ', Uideal=None,
estimation_types=('least_squares', 'max_likelihood'),
prep_pulses_list=None, verbose=False, **params):
"""
Computes bootstrapping statistics of the error of gate_name.
:param data_dict: OrderedDict containing thresholded shots specified by
keys_in, and where processed results will be stored
:param keys_in: list of key names or dictionary keys paths in
data_dict for the data to be analyzed (expects thresholded shots)
:param Nbstrp: int specifying the number of bootstrapping cycles,
i.e. sample size for estimating errors, the number of times the raw data
is resampled
:param gate_name: name of the gate for which the error is estimated.
MUST CORRESPOND TO Uideal IF THE LATTER IS PROVIDED, since gate_name
will be used in the key name for storing the results
:param Uideal: qutip Qobj of the ideal unitary operator for gate_name
:param estimation_types: list of strings indicating the methods that were
used to construct the density matrices stores in data_dict[prep_pulses].
This function will do process tomo for all the estimation types in
this list.
:param prep_pulses_list: list of tuples with length nr_qubits contanining
strings indicating the preparation pulses for each state tomography
measurement. If not specified, it will be constructed from product of
basis_rots.
:param verbose: whether to show progress print statements
:param params: keyword_arguments
:return: stores in data_dict:
- bootstrapping_errors_{gate_name}.{estimation_type} for estimation_type
in estimation_types
Assumptions:
- CURRENTLY ONLY SUPPORTS DATA FROM HDF FILES!
- !! This function calls bootstrapping_state_tomography and
process_tomography_analysis so all required input params needed
there must also be here
"""
if prep_pulses_list is None:
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
n_qubits = len(meas_obj_names)
basis_rots = hlp_mod.get_param(
'basis_rots', data_dict, raise_error=True,
error_message='Either prep_pulses_list or basis_rots needs to be '
'provided.', **params)
if hlp_mod.get_param('basis_rots', data_dict) is None:
hlp_mod.add_param('basis_rots', basis_rots, data_dict, **params)
prep_pulses_list = list(itertools.product(basis_rots, repeat=n_qubits))
replace_value = params.pop('replace_value', False)
errors = {est_type: np.zeros(Nbstrp) for est_type in estimation_types}
for n in range(Nbstrp):
if verbose:
print('Bootstrapping run process tomo: ', n)
data_dict_temp = {}
measured_rhos = {est_type: len(prep_pulses_list) * [''] for
est_type in estimation_types}
for p, prep_pulses in enumerate(prep_pulses_list):
data_dict_state_tomo = hlp_mod.get_param(''.join(prep_pulses),
data_dict,
raise_error=True)
bootstrapping_state_tomography(data_dict_state_tomo, keys_in,
estimation_types=estimation_types,
Nbstrp=1, replace_value=True,
store_rhos=True, **params)
for estimation_type in estimation_types:
measured_rhos[estimation_type][p] = hlp_mod.get_param(
f'{estimation_type}.bootstrapping_rhos',
data_dict_state_tomo)[0]
process_tomography_analysis(data_dict_temp,
prep_pulses_list=prep_pulses_list,
measured_rhos=measured_rhos,
estimation_types=estimation_types,
gate_name=gate_name, Uideal=Uideal,
replace_value=True, **params)
for estimation_type in estimation_types:
errors[estimation_type][n] = hlp_mod.get_param(
f'measured_error_{gate_name}.{estimation_type}',
data_dict_temp)
params['replace_value'] = replace_value
hlp_mod.add_param('Nbstrp', Nbstrp, data_dict, **params)
for estimation_type in errors:
hlp_mod.add_param(
f'bootstrapping_errors_{gate_name}.{estimation_type}',
errors[estimation_type], data_dict, **params)
def get_tomo_data_subset(data_dict, keys_in, preselection=True, **params):
meas_obj_names = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], enforce_one_meas_obj=False,
**params)
n = len(meas_obj_names)
ignore_extra_seqs = hlp_mod.get_param(
'ignore_extra_sequences', data_dict, **params, default_value=False)
init_rots_basis = hlp_mod.get_param('init_rots_basis', data_dict, **params)
prep_pulses_list = list(itertools.product(init_rots_basis, repeat=n))
final_rots_basis = hlp_mod.get_param('final_rots_basis', data_dict, **params)
cal_points = hlp_mod.get_measurement_properties(data_dict,
props_to_extract=['cp'])
total_nr_segments = (len(final_rots_basis)**n + len(cal_points.states)) * \
(preselection + 1)
nr_shots = hlp_mod.get_param('nr_shots', data_dict, **params)
if nr_shots is None:
detectors = hlp_mod.get_param('exp_metadata.Detector Metadata.detectors',
data_dict)
nr_shots = detectors[list(detectors)[0]].get('nr_shots', None)
if nr_shots is None:
raise ValueError('Please provide nr_shots.')
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
for keyi, data in data_to_proc_dict.items():
if ignore_extra_seqs:
data = data[:len(init_rots_basis)**n * nr_shots*total_nr_segments]
data_reshaped = data.reshape((
len(init_rots_basis)**n, nr_shots*total_nr_segments))
if len(prep_pulses_list) != data_reshaped.shape[0]:
raise ValueError(f'len(prep_pulses_list)={len(prep_pulses_list)} '
f'does not match data shape {data_reshaped.shape}).')
for prep_pulses, row in zip(prep_pulses_list, data_reshaped):
hlp_mod.add_param(f'{"".join(prep_pulses)}.{keyi}',
row, data_dict, **params)
| 2.28125 | 2 |
mailase/api/controllers/root.py | greghaynes/Mailase | 1 | 12763734 | import os
from pecan import conf
from pecan.rest import RestController
from wsmeext.pecan import wsexpose
from mailase.api.model import (Mail,
Mailbox,
MailBrief,
MailSearchRecentResult)
import mailase.search.dbapi as search_api
class NotFound(Exception):
code = 404
msg = "Not Found"
def __init__(self):
super(NotFound, self).__init__()
class MailboxController(RestController):
@wsexpose([Mailbox])
def index(self):
mailboxes = []
for entry in os.listdir(conf.mail.maildirs):
mailbox = Mailbox(entry)
if mailbox.exists():
mailboxes.append(mailbox)
return mailboxes
@wsexpose(Mailbox, str, int, int)
def get(self, mailbox_id, offset=None, limit=None):
mailbox = Mailbox(mailbox_id)
if not mailbox.exists():
raise NotFound
return mailbox
class SearchRecentControler(RestController):
@wsexpose(MailSearchRecentResult, int, int)
def index(self, offset=None, limit=None):
offset = offset or 0
limit = limit or 100
res = search_api.get_recently_modified(offset, limit)
briefs = [MailBrief.inflate(x['brief']) for x in res]
return MailSearchRecentResult(offset, limit, briefs)
class SearchController(RestController):
recent = SearchRecentControler()
@wsexpose(bool)
def index(self):
reachable = search_api.server_is_reachable()
return reachable
class MailController(RestController):
@wsexpose(Mail, str, str)
def get(self, mailbox_id, mail_id):
try:
mail = Mail.from_id(mailbox_id, mail_id)
except ValueError:
raise NotFound
return mail
class RootController(object):
mailboxes = MailboxController()
mail = MailController()
search = SearchController()
| 2.15625 | 2 |
pychron/pipeline/nodes/base.py | ASUPychron/pychron | 31 | 12763735 | <filename>pychron/pipeline/nodes/base.py
# ===============================================================================
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Bool, Any, List, Str
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.column_sorter_mixin import ColumnSorterMixin
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
class BaseNode(ColumnSorterMixin):
name = "Base"
enabled = Bool(True)
visited = Bool(False)
skip_configure = Bool(False)
options_klass = None
options = Any
auto_configure = Bool(True)
configurable = Bool(True)
active = Bool(False)
# metadata = Event
_manual_configured = Bool(False)
# analyses = List
unknowns = List
references = List
required = List
index = -1
skip_meaning = Str
use_state_unknowns = True
use_state_references = True
def __init__(self, *args, **kw):
super(BaseNode, self).__init__(*args, **kw)
self.bind_preferences()
def bind_preferences(self):
pass
def resume(self, state):
pass
def clear_data(self):
self.unknowns = []
self.references = []
def reset(self):
self.visited = False
self._manual_configured = False
self.active = False
def pre_load(self, nodedict):
for k, v in nodedict.items():
if hasattr(self, k):
setattr(self, k, v)
def load(self, nodedict):
pass
def finish_load(self):
pass
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def _pre_run_hook(self, state):
pass
def pre_run(self, state, configure=True):
self._pre_run_hook(state)
if not self.auto_configure:
return True
if self._manual_configured:
return True
if state.unknowns:
self.unknowns = state.unknowns
if state.references:
self.references = state.references
if configure:
if self.skip_configure:
return True
if self.configure(refresh=False, pre_run=True):
return True
else:
state.canceled = True
else:
return True
def run(self, state):
raise NotImplementedError(self.__class__.__name__)
def post_run(self, engine, state):
pass
def refresh(self):
pass
def configure(self, pre_run=False, **kw):
if not pre_run:
self._manual_configured = True
return self._configure(**kw)
def _configure(self, obj=None, **kw):
if self.configurable:
if obj is None:
if self.options_klass:
obj = self.options
else:
obj = self
self._configure_hook()
info = obj.edit_traits(kind="livemodal")
if info.result:
self._finish_configure()
self.refresh()
return True
else:
return True
def _configure_hook(self):
pass
def _finish_configure(self):
pass
def to_template(self):
d = {"klass": self.__class__.__name__}
self._to_template(d)
return d
def _options_factory(self):
if self.options_klass:
return self.options_klass()
def _options_default(self):
return self._options_factory()
def _to_template(self, d):
pass
# return []
def _view_factory(self, *items, **kw):
if "title" not in kw:
kw["title"] = "Configure {}".format(self.name)
return okcancel_view(*items, **kw)
def __str__(self):
return "{}<{}>".format(self.name, self.__class__.__name__)
class SortableNode(BaseNode):
pass
# ============= EOF =============================================
| 1.554688 | 2 |
estudo/urls.py | bcunhasa/nutriodonto | 0 | 12763736 | """Define os padrões de URL para a aplicação"""
from django.conf.urls import url
from . import views
app_name="estudo"
urlpatterns = [
# Acompanhamento dos resultados do estudo
url(r'^estudo/$', views.EstudoView.as_view(), name='estudo'),
]
| 2.40625 | 2 |
control/excalibur/testing/test_detector.py | dls-controls/excalibur-detector | 0 | 12763737 | <reponame>dls-controls/excalibur-detector
"""
Test cases for the ExcaliburDetector class of the ODIN server EXCALIBUR plugin
<NAME>, STFC Application Engineering Group
"""
from nose.tools import *
import logging
from excalibur.detector import ExcaliburDetector, ExcaliburDetectorError
from excalibur.fem import ExcaliburFem
class TestExcaliburDetector():
@classmethod
def setup_class(cls):
ExcaliburFem.use_stub_api = True
cls.detector_fems = [
('192.168.0.1', 6969, '10.0.2.1'),
('192.168.0.2', 6969, '10.0.2.1'),
('192.168.0.3', 6969, '10.0.2.1')
]
cls.detector = ExcaliburDetector(cls.detector_fems)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
def test_detector_simple_init(self):
assert_equal(len(self.detector.fems), len(self.detector_fems))
def test_detector_single_fem(self):
detector = ExcaliburDetector(self.detector_fems[0])
assert_equal(len(detector.fems), 1)
def test_detector_bad_fem_spec(self):
with assert_raises_regexp(ExcaliburDetectorError, "Failed to initialise detector FEM list"):
detector = ExcaliburDetector([1, 2, 3])
with assert_raises_regexp(ExcaliburDetectorError, "Failed to initialise detector FEM list"):
detector = ExcaliburDetector('nonsense')
def test_detector_bad_fem_port(self):
bad_detector_fems = self.detector_fems[:]
bad_detector_fems[0] = ('192.168.0.1', 'bad_port', '10.0.2.1')
with assert_raises_regexp(ExcaliburDetectorError, "Failed to initialise detector FEM list"):
detector = ExcaliburDetector(bad_detector_fems)
def test_detector_connect_fems(self):
connect_params = {'state': True}
self.detector.connect(connect_params)
response = self.detector.get('')
assert_equal(response['status']['command_succeeded'], True)
def test_detector_disonnect_fems(self):
connect_params = {'state': False}
self.detector.connect(connect_params)
response = self.detector.get('')
assert_equal(response['status']['command_succeeded'], True)
def test_detector_powercard_idx(self):
detector = ExcaliburDetector(self.detector_fems)
powercard_idx = 1
detector.set_powercard_fem_idx(powercard_idx)
assert_equal(detector.powercard_fem_idx, powercard_idx)
def test_detector_bad_powercard_idx(self):
detector = ExcaliburDetector(self.detector_fems)
powercard_idx = 4
with assert_raises_regexp(
ExcaliburDetectorError, "Illegal FEM index {} specified for power card".format(powercard_idx)
):
detector.set_powercard_fem_idx(powercard_idx)
def test_detector_set_chip_enable_mask(self):
detector = ExcaliburDetector(self.detector_fems)
chip_enable_mask = [0xff, 0x3f, 0x7f]
detector.set_chip_enable_mask(chip_enable_mask)
assert_equal(chip_enable_mask, detector.chip_enable_mask)
def test_detector_set_chip_enable_mask_single(self):
detector = ExcaliburDetector(('192.168.0.1', 6969, '10.0.2.1'))
chip_enable_mask = 0xff
detector.set_chip_enable_mask(chip_enable_mask)
assert_equal([chip_enable_mask], detector.chip_enable_mask)
def test_detector_set_chip_enable_length_mistmatch(self):
detector = ExcaliburDetector(self.detector_fems)
chip_enable_mask = [0xff, 0x3f]
with assert_raises_regexp(ExcaliburDetectorError, 'Mismatch in length of asic enable mask'):
detector.set_chip_enable_mask(chip_enable_mask)
def test_detector_get(self):
response = self.detector.get('')
assert_equal(type(response), dict)
assert_true('status' in response)
def test_detector_bad_get(self):
bad_path = 'missing_path'
with assert_raises_regexp(ExcaliburDetectorError, 'The path {} is invalid'.format(bad_path)):
response = self.detector.get(bad_path)
def test_detector_bad_set(self):
bad_path = 'missing_path'
with assert_raises_regexp(ExcaliburDetectorError, 'Invalid path: {}'.format(bad_path)):
response = self.detector.set(bad_path, 1234)
def test_decrement_pending_cmd_succeeded(self):
self.detector.command_succeeded = True
self.detector._increment_pending()
self.detector._decrement_pending(True)
response = self.detector.get('')
assert_equal(response['status']['command_succeeded'], True)
def test_decrement_pending_cmd_failed(self):
self.detector.command_succeeded = True
self.detector._increment_pending()
self.detector._decrement_pending(False)
response = self.detector.get('')
assert_equal(response['status']['command_succeeded'], False)
| 2.28125 | 2 |
tenark/cataloguer/json_cataloguer.py | knowark/tenark | 1 | 12763738 | <reponame>knowark/tenark<filename>tenark/cataloguer/json_cataloguer.py
import json
from pathlib import Path
from typing import List, Dict, Any
from ..common import (
QueryParser, QueryDomain, TenantRetrievalError)
from ..models import Tenant
from .cataloguer import Cataloguer
class JsonCataloguer(Cataloguer):
def __init__(self, path: str, parser: QueryParser = None) -> None:
self.path = path
self.parser = parser or QueryParser()
self.catalog: Dict[str, Tenant] = {}
self.collection = 'tenants'
self.catalog_schema: Dict = {
self.collection: {}
}
self._setup()
def load(self, cache: bool = True) -> None:
catalog_file = Path(self.path)
with catalog_file.open('r') as f:
try:
data = json.load(f)
if self.collection in data:
for key, value in data[self.collection].items():
self.catalog[key] = Tenant(**value)
except json.JSONDecodeError as e:
pass
def add_tenant(self, tenant: Tenant) -> Tenant:
data: Dict[str, Any] = {self.collection: {}}
catalog_file = Path(self.path)
with catalog_file.open('r') as f:
data = json.load(f)
data[self.collection].update({tenant.id: vars(tenant)})
with catalog_file.open('w') as f:
json.dump(data, f, indent=2)
self.load(False)
return tenant
def get_tenant(self, tenant_id: str) -> Tenant:
tenant = self.catalog.get(tenant_id)
if not tenant:
raise TenantRetrievalError(
f"The entity with id {tenant_id} was not found.")
return tenant
def search_tenants(self, domain: QueryDomain) -> List[Tenant]:
tenants = []
filter_function = self.parser.parse(domain)
for tenant in self.catalog.values():
if filter_function(tenant):
tenants.append(tenant)
return tenants
def _setup(self) -> None:
catalog_file = Path(self.path)
if not catalog_file.exists():
with catalog_file.open('w') as f:
json.dump(self.catalog_schema, f, indent=2)
return
self.load()
if self.catalog:
return
with catalog_file.open('w') as f:
json.dump(self.catalog_schema, f, indent=2)
| 2.359375 | 2 |
recipes/o3tanks/utils/containers.py | loherangrin/o3tanks | 13 | 12763739 | <reponame>loherangrin/o3tanks
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..globals.o3de import O3DE_ENGINE_BUILD_DIR, O3DE_ENGINE_INSTALL_DIR, O3DE_ENGINE_SOURCE_DIR, O3DE_GEMS_DIR, O3DE_GEMS_EXTERNAL_DIR, O3DE_PACKAGES_DIR, O3DE_PROJECT_SOURCE_DIR
from ..globals.o3tanks import DATA_DIR, DEVELOPMENT_MODE, DISPLAY_ID, GPU_DRIVER_NAME, OPERATING_SYSTEM, REAL_USER, RUN_CONTAINERS, ROOT_DIR, USER_NAME, USER_GROUP, GPUDrivers, Images, Volumes, get_version_number
from .filesystem import clear_directory, is_directory_empty
from .input_output import Level, Messages, get_verbose, print_msg, throw_error
from .serialization import serialize_list
from .types import AutoEnum, LinuxOSNames, OSFamilies, User
import abc
import enum
import os
import pathlib
import re
if OPERATING_SYSTEM.family in [ OSFamilies.LINUX, OSFamilies.MAC ]:
import grp
import pwd
elif OPERATING_SYSTEM.family is OSFamilies.WINDOWS:
import getpass
else:
throw_error(Messages.INVALID_OPERATING_SYSTEM, OPERATING_SYSTEM.family)
# --- TYPES ---
class ContainerBackend(AutoEnum):
NONE = enum.auto()
DOCKER = enum.auto()
# --- ABSTRACT BASE CLIENT ---
class ContainerClient(abc.ABC):
def __init__(self, backend, in_container):
self._backend = backend
self._in_container = in_container
# --- GENERIC (BASE) ---
def _get_build_arguments(self):
container_user = self.get_container_user()
if OPERATING_SYSTEM.name is LinuxOSNames.ARCH:
os_image = "archlinux"
elif OPERATING_SYSTEM.name is LinuxOSNames.OPENSUSE_LEAP:
os_image = OPERATING_SYSTEM.name.value.replace('-', '/')
else:
os_image = OPERATING_SYSTEM.name.value
os_version = (OPERATING_SYSTEM.version if OPERATING_SYSTEM.version is not None else "latest")
os_image = "{}:{}".format(os_image, os_version)
if OPERATING_SYSTEM.name is LinuxOSNames.ARCH:
locale = "en_US.utf8"
elif OPERATING_SYSTEM.name in [ LinuxOSNames.DEBIAN, LinuxOSNames.UBUNTU ]:
locale = "C.UTF-8"
elif OPERATING_SYSTEM.name in [ LinuxOSNames.FEDORA, LinuxOSNames.OPENSUSE_LEAP ]:
locale = "C.utf8"
else:
locale = "POSIX"
return {
"LOCALE": locale,
"OS_IMAGE": os_image,
"OS_NAME": OPERATING_SYSTEM.name.value,
"OS_VERSION": os_version,
"USER_NAME": container_user.name,
"USER_GROUP": container_user.group,
"USER_UID": str(container_user.uid),
"USER_GID": str(container_user.gid)
}
@staticmethod
def _get_environment_variables():
environment = {
"O3TANKS_DEV_MODE": str(DEVELOPMENT_MODE).lower(),
"O3TANKS_VERBOSE": str(get_verbose())
}
if DATA_DIR is not None:
environment["O3TANKS_DATA_DIR"] = str(DATA_DIR)
return environment
@staticmethod
def _print_bytes_stream(stream, stdout, stderr):
stdout_line = ''
stderr_line = ''
for bytes_chunk in stream:
if isinstance(bytes_chunk, tuple):
if stdout:
stdout_line = ContainerClient.__print_bytes_stream(stdout_line, bytes_chunk[0])
if stderr:
stderr_line = ContainerClient.__print_bytes_stream(stderr_line, bytes_chunk[1])
else:
if stdout or stderr:
stdout_line = ContainerClient.__print_bytes_stream(stdout_line, bytes_chunk)
@staticmethod
def __print_bytes_stream(input_line, bytes_chunk):
if bytes_chunk is None:
return input_line
output_line = input_line
string_chunk = bytes_chunk.decode("utf-8")
if '\n' in string_chunk:
for char in string_chunk:
if char == '\n':
print_msg(Level.INFO, output_line)
output_line = ''
else:
output_line += char
else:
output_line += string_chunk
return output_line
@staticmethod
def _print_string_stream(stream):
for string_chunk in stream:
data = string_chunk.get("stream")
if data is not None:
for line in data.splitlines():
print_msg(Level.INFO, line)
@staticmethod
def calculate_is_in_container():
global RUN_CONTAINERS
if RUN_CONTAINERS:
return DockerContainerClient.calculate_is_in_container()
else:
NoneContainerClient.calculate_is_in_container()
@abc.abstractmethod
def close(self):
raise NotImplementedError
@abc.abstractmethod
def get_container_user(self, user_namespace = True):
raise NotImplementedError
def get_current_user(self):
if OPERATING_SYSTEM.family in [ OSFamilies.LINUX, OSFamilies.MAC ]:
uid = os.getuid()
gid = os.getgid()
name = pwd.getpwuid(uid).pw_name
group = grp.getgrgid(gid).gr_name
elif OPERATING_SYSTEM.family is OSFamilies.WINDOWS:
uid = None
gid = None
name = getpass.getuser()
group = None
else:
throw_error(Messages.INVALID_OPERATING_SYSTEM, OPERATING_SYSTEM.family)
return User(name, group, uid, gid)
def is_in_container(self):
return self._in_container
@abc.abstractmethod
def is_rootless_runtime(self):
raise NotImplementedError
@staticmethod
def open():
global RUN_CONTAINERS
if RUN_CONTAINERS:
return DockerContainerClient()
else:
return NoneContainerClient()
# --- CONTAINERS (BASE) ---
@abc.abstractmethod
def copy_to_container(self, container, from_path, to_path, content_only = False):
raise NotImplementedError
@abc.abstractmethod
def exec_in_container(self, container, command, stdout = False, stderr = False):
raise NotImplementedError
@abc.abstractmethod
def run_detached_container(self, image_name, wait, environment = {}, mounts = [], network_disabled = False):
raise NotImplementedError
@abc.abstractmethod
def run_foreground_container(self, image_name, command = [], environment = {}, interactive = True, mounts = [], display = False, gpu = False, network_disabled = False):
raise NotImplementedError
# --- IMAGES (BASE) ---
@abc.abstractmethod
def build_image_from_archive(self, tar_file, image_name, recipe, stage = None, arguments = {}):
raise NotImplementedError
@abc.abstractmethod
def build_image_from_directory(self, context_dir, image_name, recipe, stage = None, arguments = {}):
raise NotImplementedError
@abc.abstractmethod
def get_image_name(self, image_id, engine_version = None, engine_config = None):
image = "{}-{}".format(IMAGE_PREFIX, image_id.value)
if engine_version is not None:
image += "_{}".format(engine_version)
if engine_config is not None:
image += "_{}".format(engine_config.value)
if DEVELOPMENT_MODE:
image += ":development"
else:
image += ":{}".format(get_version_number())
if OPERATING_SYSTEM.name is not None:
image += "_{}".format(OPERATING_SYSTEM.name.value)
if OPERATING_SYSTEM.version is not None:
image += "_{}".format(OPERATING_SYSTEM.version)
return image
@abc.abstractmethod
def image_exists(self, image_name):
raise NotImplementedError
@abc.abstractmethod
def remove_image(self, image_name):
raise NotImplementedError
# --- VOLUMES (BASE) ---
@abc.abstractmethod
def create_volume(self, name):
raise NotImplementedError
def get_volume_name(self, volume_id, engine_version = None):
volume = VOLUME_PREFIX + '-' + volume_id.value
if engine_version is not None:
volume += "_{}".format(engine_version)
return volume
@abc.abstractmethod
def get_volume_path(self, volume_name):
raise NotImplementedError
@abc.abstractmethod
def list_volumes(self, filter):
raise NotImplementedError
def is_volume_empty(self, volume_name):
volume_dir = self.get_volume_path(volume_name)
if volume_dir is None:
throw_error(Messages.VOLUME_NOT_FOUND, volume_name)
return is_directory_empty(volume_dir)
@abc.abstractmethod
def remove_volume(self, volume_name):
raise NotImplementedError
@abc.abstractmethod
def volume_exists(self, volume_name):
raise NotImplementedError
# --- DOCKER ---
class DockerContainerClient(ContainerClient):
def __init__(self):
super().__init__(
ContainerBackend.DOCKER,
DockerContainerClient.calculate_is_in_container()
)
global docker
try:
import docker
except ModuleNotFoundError as error:
throw_error(Messages.MISSING_MODULE, error.name)
try:
self._client = docker.from_env(timeout = 7200)
except:
throw_error(Messages.MISSING_DOCKER)
# --- GENERIC (DOCKER) ---
def close(self):
self._client.close()
def get_container_user(self, user_namespace = True):
if self.is_in_container():
if user_namespace:
user_info = pwd.getpwnam(USER_NAME)
group_info = grp.getgrgid(user_info.pw_uid)
container_uid = user_info.pw_uid
container_gid = group_info.gr_gid
else:
container_uid = REAL_USER.uid
container_gid = REAL_USER.gid
else:
host_user = self.get_current_user()
if self.is_rootless_runtime() and not user_namespace:
container_uid = None
container_gid = None
user_pattern = re.compile(r"^{}:(\d+):\d+$".format(host_user.name))
with open("/etc/subuid") as subuid_handler:
for entry in subuid_handler:
matches = user_pattern.match(entry)
if matches is not None:
container_uid = matches.group(1)
break
group_pattern = re.compile(r"^{}:(\d+):\d+$".format(host_user.group))
with open("/etc/subgid") as subgid_handler:
for entry in subgid_handler:
matches = group_pattern.match(entry)
if matches is not None:
container_gid = matches.group(1)
break
if (container_uid is None) or (container_gid is None):
throw_error(Messages.INVALID_USER_NAMESPACE)
else:
container_uid = host_user.uid
container_gid = host_user.gid
return User(USER_NAME, USER_GROUP, container_uid, container_gid)
@staticmethod
def calculate_is_in_container():
return pathlib.Path("/.dockerenv").is_file()
def is_rootless_runtime(self):
info = self._client.info()
security_options = info.get("SecurityOptions")
has_rootless_flag = ("name=rootless" in security_options)
return has_rootless_flag
# --- CONTAINERS (DOCKER) ---
@staticmethod
def _calculate_mounts(binds, volumes):
mounts = []
for from_path, to_path in binds.items():
mounts.append(docker.types.Mount(type = "bind", source = from_path, target = to_path))
for volume_name, to_path in volumes.items():
mounts.append(docker.types.Mount(type = "volume", source = volume_name, target = to_path))
return mounts
def copy_to_container(self, container, from_path, to_path, content_only = False):
copied = False
if content_only:
root = str(from_path)
files = None
else:
root = str(from_path.parent)
if from_path.is_dir():
files = sorted([ str(child.relative_to(root)) for child in from_path.glob("**/*")])
else:
files = [ from_path.name ]
with docker.utils.build.create_archive(root, files = files) as tar_handler:
copied = container.put_archive(str(to_path), tar_handler)
return copied
def exec_in_container(self, container, command, stdout = False, stderr = False):
exit_code, logs = container.exec_run(
stdin = False,
stdout = (stdout or (not stdout and not stderr)),
stderr = stderr,
user = USER_NAME,
cmd = serialize_list(command),
stream = (stdout or stderr),
socket = False,
demux = (stdout and stderr)
)
if (stdout or stderr):
ContainerClient._print_bytes_stream(logs, stdout, stderr)
return True
else:
return (exit_code == 0)
def run_detached_container(self, image_name, wait, environment = {}, binds = {}, volumes = {}, network_disabled = False):
if wait:
entrypoint = "/bin/sh",
command = [ "-c", "tail --follow /dev/null" ]
else:
entrypoint = None
command = []
full_environment = ContainerClient._get_environment_variables()
if len(environment) > 0:
full_environment.update(environment)
mounts = DockerContainerClient._calculate_mounts(binds, volumes)
new_container = self._client.containers.run(
image_name,
entrypoint = entrypoint,
command = command,
network_disabled = network_disabled,
auto_remove = True,
detach = True,
mounts = mounts,
environment = full_environment
)
return new_container
def run_foreground_container(self, image_name, command = [], environment = {}, interactive = True, binds = {}, volumes = {}, display = False, gpu = False, network_disabled = False):
full_environment = ContainerClient._get_environment_variables()
mounts = DockerContainerClient._calculate_mounts(binds, volumes)
if display:
if DISPLAY_ID < 0:
throw_error(Messages.MISSING_DISPLAY)
x11_socket = pathlib.Path("/tmp/.X11-unix/X{}".format(DISPLAY_ID))
if not self.is_in_container() and not x11_socket.is_socket():
throw_error(Messages.INVALID_DISPLAY, DISPLAY_ID, x11_socket)
real_container_user = self.get_container_user(False)
full_environment["O3TANKS_REAL_USER_UID"] = str(real_container_user.uid)
full_environment["O3TANKS_DISPLAY_ID"] = str(DISPLAY_ID)
full_environment["DISPLAY"] = ":{}".format(DISPLAY_ID)
mounts.append(docker.types.Mount(type = "bind", source = str(x11_socket), target = str(x11_socket)))
devices = []
device_requests = []
if gpu:
if GPU_DRIVER_NAME is None:
print_msg(Level.WARNING, Messages.MISSING_GPU)
elif GPU_DRIVER_NAME is GPUDrivers.NVIDIA_PROPRIETARY:
device_requests.append(docker.types.DeviceRequest(count = -1, capabilities = [ [ "gpu", "display", "graphics", "video" ] ]))
vulkan_configs = [
"/usr/share/vulkan/implicit_layer.d/nvidia_layers.json",
"/usr/share/vulkan/icd.d/nvidia_icd.json"
]
for config_file in vulkan_configs:
mounts.append(docker.types.Mount(type = "bind", source = config_file, target = config_file, read_only = True))
elif GPU_DRIVER_NAME in [ GPUDrivers.AMD_OPEN, GPUDrivers.AMD_PROPRIETARY, GPUDrivers.INTEL ]:
devices.append("/dev/dri:/dev/dri")
else:
print_msg(Level.WARNING, Messages.INVALID_GPU, GPU_DRIVER_NAME.value)
if len(environment) > 0:
full_environment.update(environment)
try:
exit_status = None
container = self._client.containers.run(
image_name,
command = serialize_list(command),
network_disabled = network_disabled,
auto_remove = True,
detach = True,
devices = devices,
device_requests = device_requests,
mounts = mounts,
environment = full_environment
)
logs = container.attach(stdout = True, stderr = True, stream = True)
ContainerClient._print_bytes_stream(logs, stdout = True, stderr = True)
exit_status = container.wait()
finally:
if (exit_status is None) and (container is not None):
container.kill()
exit_code = exit_status.get("StatusCode")
if exit_code is None:
throw_error(Messages.EXIT_CODE_NOT_FOUND, image_name)
elif exit_code != 0:
print_msg(Level.ERROR, Messages.CONTAINER_ERROR, image_name, exit_code)
return False
return True
# --- IMAGES (DOCKER) ---
def build_image_from_archive(self, tar_file, image_name, recipe, stage = None, arguments = {}):
if not tar_file.is_file():
throw_error(Messages.CONTEXT_NOT_FOUND, tar_file)
print_msg(Level.INFO, Messages.BUILD_IMAGE_FROM_ARCHIVE, image_name, tar_file)
if image_name.endswith(":development") or (":development_" in image_name):
stage += "_dev"
full_buildargs = self._get_build_arguments()
if len(arguments) > 0:
full_buildargs.update(arguments)
try:
with tar_file.open() as tar_handler:
tar_handler.seek(0)
new_image, logs = self._client.images.build(
fileobj = tar_handler,
dockerfile = recipe,
custom_context = True,
tag = image_name,
target = stage,
buildargs = full_buildargs
)
ContainerClient._print_string_stream(logs)
except docker.errors.BuildError as error:
new_image = None
print_msg(Level.ERROR, str(error))
finally:
tar_handler.close()
return new_image
def build_image_from_directory(self, context_dir, image_name, recipe, stage = None, arguments = {}):
if not context_dir.is_dir():
throw_error(Messages.CONTEXT_NOT_FOUND, context_dir)
print_msg(Level.INFO, Messages.BUILD_IMAGE_FROM_DIRECTORY, image_name, context_dir)
if image_name.endswith(":development") or (":development_" in image_name):
stage += "_dev"
full_buildargs = self._get_build_arguments()
if len(arguments) > 0:
full_buildargs.update(arguments)
try:
new_image, logs = self._client.images.build(
path = str(context_dir),
dockerfile = str(context_dir / recipe),
tag = image_name,
target = stage,
buildargs = full_buildargs
)
ContainerClient._print_string_stream(logs)
except docker.errors.BuildError as error:
new_image = None
print_msg(Level.ERROR, str(error))
return new_image
def get_image_name(self, image_id, engine_version = None, engine_config = None):
return super().get_image_name(image_id, engine_version, engine_config)
def image_exists(self, image_name):
try:
image = self._client.images.get(image_name)
return True
except docker.errors.ImageNotFound:
return False
def remove_image(self, image_name):
if self.image_exists(image_name):
self._client.images.remove(image_name)
return True
# --- VOLUMES (DOCKER) ---
def create_volume(self, name):
self._client.volumes.create(name)
def get_volume_path(self, volume_name):
if volume_name is None:
return None
try:
volume = self._client.volumes.get(volume_name)
path = volume.attrs.get("Mountpoint")
if path is not None:
path = pathlib.Path(path)
if self.is_in_container():
try:
path = pathlib.Path("/var/lib/docker/volumes") / path.relative_to(REAL_VOLUMES_DIR)
except:
throw_error(Messages.VOLUMES_DIR_NOT_FOUND)
return path
except docker.errors.NotFound:
volume = None
def list_volumes(self, filter):
volumes = self._client.volumes.list(filters = { "name": filter })
if len(volumes) == 0:
return []
volume_names = []
for volume in volumes:
volume_names.append(volume.name)
return volume_names
def remove_volume(self, volume_name):
try:
volume = self._client.volumes.get(volume_name)
volume.remove()
except docker.errors.NotFound:
pass
def volume_exists(self, volume_name):
try:
volume = self._client.volumes.get(volume_name)
return True
except docker.errors.NotFound:
return False
# --- NONE ---
class NoneContainerClient(ContainerClient):
def __init__(self):
super().__init__(
ContainerBackend.NONE,
NoneContainerClient.calculate_is_in_container()
)
global subprocess
import subprocess
self._ENGINES_DIR = DATA_DIR / "engines"
self._GEMS_DIR = DATA_DIR / "gems"
self._PACKAGES_DIR = DATA_DIR / "packages"
# --- GENERIC (NONE) ---
def close(self):
pass
def get_container_user(self, user_namespace = True):
current_user = self.get_current_user()
return current_user
@staticmethod
def calculate_is_in_container():
return False
def is_rootless_runtime(self):
current_user = self.get_current_user()
return (current_user.uid != 0)
# --- CONTAINERS (NONE) ---
def _calculate_command(self, image_name, command):
if not self.image_exists(image_name):
throw_error(Messages.INVALID_IMAGE_IN_NO_CONTAINERS_MODE, image_name)
full_command = [ "python3", "-m", "o3tanks.{}".format(image_name) ]
if len(command) > 0:
full_command += serialize_list(command)
return full_command
def _calculate_mounts_mapping(self, binds, volumes):
mapping = {}
for from_path, to_path in volumes.items():
engine_volume_type = None
if to_path == str(O3DE_ENGINE_SOURCE_DIR):
engine_volume_type = Volumes.SOURCE
elif to_path == str(O3DE_ENGINE_BUILD_DIR):
engine_volume_type = Volumes.BUILD
elif to_path == str(O3DE_ENGINE_INSTALL_DIR):
engine_volume_type = Volumes.INSTALL
elif to_path == str(O3DE_GEMS_DIR):
mapping["O3DE_GEMS_DIR"] = str(self._GEMS_DIR)
elif to_path == str(O3DE_PACKAGES_DIR):
mapping["O3DE_PACKAGES_DIR"] = str(self._PACKAGES_DIR)
if engine_volume_type is not None:
engine_version = self._get_engine_version_from_volume(from_path, engine_volume_type)
mapping["O3DE_ENGINE_DIR"] = str(self._ENGINES_DIR / engine_version)
for from_path, to_path in binds.items():
if to_path == str(O3DE_PROJECT_SOURCE_DIR):
mapping["O3DE_PROJECT_DIR"] = str(from_path)
elif to_path == str(ROOT_DIR):
mapping["O3TANKS_DIR"] = str(from_path)
elif to_path.startswith(str(O3DE_GEMS_EXTERNAL_DIR)):
mapping["O3DE_GEMS_EXTERNAL_DIR"] = pathlib.Path(from_path).anchor
return mapping
@staticmethod
def _execute_python(command, environment, wait):
for python_binary in [ "python3", "python", "py" ]:
try:
command[0] = python_binary
if wait:
result = subprocess.run(command, env = environment)
return result
else:
handler = subprocess.Popen(command, env = environment)
return handler
except FileNotFoundError as error:
pass
throw_error(Messages.MISSING_PYTHON)
def _get_engine_version_from_volume(self, volume_name, volume_type):
volume_prefix = self.get_volume_name(volume_type)
if not volume_name.startswith(volume_prefix):
return None
generic_volume = self.get_volume_name(volume_type, 'a')
start_delimiter = len(volume_prefix)
end_delimiter = len(generic_volume)
volume_prefix_length = start_delimiter + (end_delimiter - start_delimiter - 1)
engine_version = volume_name[volume_prefix_length:]
return engine_version
@staticmethod
def _get_environment_variables():
environment = os.environ.copy()
global_variables = ContainerClient._get_environment_variables()
if len(global_variables) > 0:
environment.update(global_variables)
return environment
def copy_to_container(self, container, from_path, to_path, content_only = False):
throw_error(Messages.UNSUPPORTED_CONTAINERS_AND_NO_CLIENT)
def exec_in_container(self, container, command, stdout = False, stderr = False):
throw_error(Messages.UNSUPPORTED_CONTAINERS_AND_NO_CLIENT)
def run_detached_container(self, image_name, wait, environment = {}, binds = {}, volumes = {}, network_disabled = False):
if wait:
throw_error(Messages.UNSUPPORTED_CONTAINERS_AND_NO_CLIENT)
full_command = self._calculate_command(image_name, [])
full_environment = NoneContainerClient._get_environment_variables()
if len(environment) > 0:
full_environment.update(environment)
mapping = self._calculate_mounts_mapping(binds, volumes)
full_environment.update(mapping)
handler = NoneContainerClient._execute_python(full_command, full_environment, wait = False)
return handler
def run_foreground_container(self, image_name, command = [], environment = {}, interactive = True, binds = {}, volumes = {}, display = False, gpu = False, network_disabled = False):
full_command = self._calculate_command(image_name, command)
full_environment = NoneContainerClient._get_environment_variables()
if display and (OPERATING_SYSTEM.family is OSFamilies.LINUX):
if DISPLAY_ID < 0:
throw_error(Messages.MISSING_DISPLAY)
x11_socket = pathlib.Path("/tmp/.X11-unix/X{}".format(DISPLAY_ID))
if not x11_socket.is_socket():
throw_error(Messages.INVALID_DISPLAY, DISPLAY_ID, x11_socket)
full_environment["O3TANKS_DISPLAY_ID"] = str(DISPLAY_ID)
full_environment["DISPLAY"] = ":{}".format(DISPLAY_ID)
if len(environment) > 0:
full_environment.update(environment)
mapping = self._calculate_mounts_mapping(binds, volumes)
full_environment.update(mapping)
result = NoneContainerClient._execute_python(full_command, full_environment, wait = True)
exit_code = result.returncode
if exit_code != 0:
print_msg(Level.ERROR, Messages.CONTAINER_ERROR, image_name, exit_code)
return False
return True
# --- IMAGES (NONE) ---
def build_image_from_archive(self, tar_file, image_name, recipe, stage = None, arguments = {}):
return None
def build_image_from_directory(self, context_dir, image_name, recipe, stage = None, arguments = {}):
return None
def get_image_name(self, image_id, engine_version = None, engine_config = None):
if image_id in [ Images.INSTALL_BUILDER, Images.INSTALL_RUNNER ]:
return None
return image_id.value
def image_exists(self, image_name):
return (image_name is not None) and Images.has_value(image_name)
def remove_image(self, image_name):
return True
# --- VOLUMES (NONE) ---
def _calculate_volume_path(self, volume_name):
if volume_name is None:
return None
elif volume_name == self.get_volume_name(Volumes.GEMS):
volume_dir = self._GEMS_DIR
elif volume_name == self.get_volume_name(Volumes.PACKAGES):
volume_dir = self._PACKAGES_DIR
else:
volume_dir = None
for volume_type in [ Volumes.SOURCE, Volumes.BUILD, Volumes.INSTALL ]:
engine_version = self._get_engine_version_from_volume(volume_name, volume_type)
if engine_version is not None:
volume_dir = self._ENGINES_DIR / engine_version
if volume_type is not Volumes.SOURCE:
volume_dir /= volume_type.value
break
if volume_dir is None:
throw_error(Messages.INVALID_VOLUME_TYPE)
return volume_dir
def create_volume(self, name):
volume_dir = self._calculate_volume_path(name)
if volume_dir.is_dir():
return
elif volume_dir.exists():
throw_error(Messages.INVALID_VOLUME_DIRECTORY, name, volume_dir)
volume_dir.mkdir(parents = True)
def get_volume_path(self, volume_name):
volume_dir = self._calculate_volume_path(volume_name)
if not volume_dir.exists():
return None
elif not volume_dir.is_dir():
throw_error(Messages.INVALID_VOLUME_DIRECTORY, volume_name, volume_dir)
return volume_dir
def list_volumes(self, filter):
if not self._ENGINES_DIR.is_dir():
return []
volume_names = []
for child in self._ENGINES_DIR.iterdir():
if not child.is_dir():
continue
engine_version = child.name
volume_name = self.get_volume_name(Volumes.SOURCE, engine_version)
if re.search(filter, volume_name):
volume_names.append(volume_name)
return volume_names
def remove_volume(self, volume_name):
volume_dir = self.get_volume_path(volume_name)
if volume_dir is None:
return
cleared = clear_directory(volume_dir)
if cleared:
volume_dir.rmdir()
def volume_exists(self, volume_name):
volume_dir = self.get_volume_path(volume_name)
if volume_dir is None:
return False
elif volume_dir.is_dir():
return True
else:
throw_error(Messages.INVALID_VOLUME_DIRECTORY, volume_name, volume_dir)
# --- VARIABLES ---
REAL_VOLUMES_DIR = None
IMAGE_PREFIX = "o3tanks"
VOLUME_PREFIX = IMAGE_PREFIX
# --- FUNCTIONS (GENERIC) ---
def get_real_volumes_dir():
global REAL_VOLUMES_DIR
return REAL_VOLUMES_DIR
def set_real_volumes_dir(value):
global REAL_VOLUMES_DIR
REAL_VOLUMES_DIR = (pathlib.PurePath(value) / "volumes") if value is not None else None
| 1.53125 | 2 |
thelma/tools/iso/poolcreation/execution.py | fogathmann/TheLMA | 1 | 12763740 | <filename>thelma/tools/iso/poolcreation/execution.py
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Tools involved the execution of pool stock sample creation worklists.
AAB
"""
from thelma.tools.semiconstants import RACK_SHAPE_NAMES
from thelma.tools.semiconstants import RESERVOIR_SPECS_NAMES
from thelma.tools.semiconstants import get_positions_for_shape
from thelma.tools.semiconstants import get_reservoir_spec
from thelma.tools.iso.base import IsoRackContainer
from thelma.tools.iso.base import StockRackLayoutConverter
from thelma.tools.iso.base import StockRackVerifier
from thelma.tools.iso.base import StockTransferWriterExecutor
from thelma.tools.iso.poolcreation.base \
import PoolCreationStockRackLayoutConverter
from thelma.tools.iso.poolcreation.base \
import SingleDesignStockRackLayoutConverter
from thelma.tools.iso.poolcreation.base \
import StockSampleCreationLayout
from thelma.tools.iso.poolcreation.base \
import StockSampleCreationLayoutConverter
from thelma.tools.iso.poolcreation.base import LABELS
from thelma.tools.iso.poolcreation.generation \
import StockSampleCreationWorklistGenerator
from thelma.tools.iso.tracreporting import IsoStockTransferReporter
from thelma.tools.worklists.series import SampleDilutionJob
from thelma.tools.worklists.series import SampleTransferJob
from thelma.tools.writers import CsvColumnParameters
from thelma.tools.writers import CsvWriter
from thelma.tools.utils.base import VOLUME_CONVERSION_FACTOR
from thelma.tools.utils.base import add_list_map_element
from thelma.tools.utils.base import get_trimmed_string
from thelma.entities.iso import ISO_STATUS
from thelma.entities.iso import StockSampleCreationIso
from thelma.entities.liquidtransfer import ExecutedWorklist
__docformat__ = 'reStructuredText en'
__all__ = ['StockSampleCreationIsoExecutor',
'_StockSampleCreationStockLogFileWriter',
'StockSampleCreationStockTransferReporter']
class StockSampleCreationIsoExecutor(StockTransferWriterExecutor):
"""
Executes the worklist file for a pool stock sample creation ISO.
This comprises both buffer dilution and stock transfer.
**Return Value:** the updated ISO
"""
NAME = 'Stock Sample Creation Executor'
ENTITY_CLS = StockSampleCreationIso
#: The barcode for the buffer source reservoir.
BUFFER_RESERVOIR_BARCODE = 'buffer'
_MODES = [StockTransferWriterExecutor.MODE_EXECUTE]
def __init__(self, iso, user, **kw):
"""
Constructor.
:param iso: The stock sample creation ISO for which to execute the
worklists.
:type iso: :class:`thelma.entities.iso.StockSampleCreationIso`
:param user: The user conducting the execution.
:type user: :class:`thelma.entities.user.User`
"""
StockTransferWriterExecutor.__init__(self, user=user, entity=iso,
mode=StockTransferWriterExecutor.MODE_EXECUTE, **kw)
#: The stock sample creation layout for this ISO.
self.__ssc_layout = None
#: The :class:`IsoRackContainer` for each stock rack mapped onto
#: rack marker.
self.__rack_containers = None
#: The stock rack that serves as target rack (:class:`IsoStockRack`).
self.__pool_stock_rack = None
#: The stock transfer worklist is the only worklist in the stock rack
#: series.
self.__stock_transfer_worklist = None
#: The indices for the rack transfer jobs mapped onto the worklist
#: they belong to.
self.__rack_transfer_indices = None
#: Positions without ISO positions (i.e. without transfer).
self.__ignore_positions = None
def reset(self):
StockTransferWriterExecutor.reset(self)
self.__ssc_layout = None
self.__rack_containers = dict()
self.__pool_stock_rack = None
self.__stock_transfer_worklist = None
self.__rack_transfer_indices = dict()
self.__ignore_positions = []
def _create_transfer_jobs(self):
"""
Executes the pool creation worklists.
"""
if not self.has_errors():
self.__get_layout()
if not self.has_errors():
self.__get_racks()
if not self.has_errors():
self.__create_buffer_transfer_job()
if not self.has_errors():
self.__create_stock_transfer_jobs()
def get_stock_sample_creation_layout(self):
"""
Returns the working layout containing the molecule design pool ID data
(for reporting).
"""
return self._get_additional_value(self.__ssc_layout)
def _check_input(self):
"""
Checks the initialisation values.
"""
StockTransferWriterExecutor._check_input(self)
if not self.has_errors() and \
not self.entity.status == ISO_STATUS.QUEUED:
msg = 'Unexpected ISO status: "%s"' % (self.entity.status)
self.add_error(msg)
def __get_layout(self):
# Fetches the stock sample layout and sorts its positions into
# quadrants.
self.add_debug('Fetch stock sample layout ...')
converter = StockSampleCreationLayoutConverter(
self.entity.rack_layout,
parent=self)
self.__ssc_layout = converter.get_result()
if self.__ssc_layout is None:
msg = 'Error when trying to convert stock sample creation ISO ' \
'layout.'
self.add_error(msg)
else:
ssc_positions = self.__ssc_layout.get_positions()
for rack_pos in get_positions_for_shape(RACK_SHAPE_NAMES.SHAPE_96):
if not rack_pos in ssc_positions:
self.__ignore_positions.append(rack_pos)
def __get_racks(self):
# Fetches the ISO stock rack and the single molecule stock racks
# (barcodes for the single design racks are found in the worklist
# labels).
self.add_debug('Fetch stock racks ...')
isrs = self.entity.iso_stock_racks
for isr in isrs:
label = isr.label
label_values = self._run_and_record_error(
meth=LABELS.parse_stock_rack_label,
base_msg='Error when trying to parse stock rack ' \
'label "%s"' % (isr.label),
error_types=IndexError,
**dict(stock_rack_label=label))
if label_values is None:
continue
rack_marker = label_values[LABELS.MARKER_RACK_MARKER]
if rack_marker == LABELS.ROLE_POOL_STOCK:
if self.__pool_stock_rack is not None:
msg = 'There are several pool stock racks for this ISO!'
self.add_error(msg)
break
self.__pool_stock_rack = isr
else:
rack_container = IsoRackContainer(rack=isr.rack,
rack_marker=rack_marker, label=label)
self.__rack_containers[rack_marker] = rack_container
number_designs = self.entity.iso_request.number_designs
exp_lengths = ((number_designs + 1), 2)
if not len(isrs) in exp_lengths:
msg = 'There is an unexpected number of stock racks ' \
'attached to this ISO (%i). There should be %s! ' \
% (len(isrs), self._get_joined_str(exp_lengths, is_strs=False,
sort_items=False, separator=' or '))
self.add_error(msg)
return None
elif self.__pool_stock_rack is None and not self.has_errors():
msg = 'There is no pool stock rack for this ISO!'
self.add_error(msg)
elif self.__pool_stock_rack is not None:
ws = self.__pool_stock_rack.worklist_series
if len(ws) > 1:
msg = 'The stock rack worklist series has an unexpected ' \
'length (%i instead of 1)!' % (len(ws))
self.add_error(msg)
return None
self.__stock_transfer_worklist = ws.get_sorted_worklists()[0]
def __create_buffer_transfer_job(self):
# Creates the transfer job for the buffer worklist.
self.add_debug('Create buffer transfer jobs ...')
worklist_series = self.entity.iso_request.worklist_series
buffer_worklist = worklist_series.get_worklist_for_index(
StockSampleCreationWorklistGenerator.BUFFER_WORKLIST_INDEX)
rs = get_reservoir_spec(RESERVOIR_SPECS_NAMES.FALCON_MANUAL)
job_index = len(self._transfer_jobs)
cdj = SampleDilutionJob(index=job_index,
planned_worklist=buffer_worklist,
target_rack=self.__pool_stock_rack.rack,
reservoir_specs=rs,
source_rack_barcode=self.BUFFER_RESERVOIR_BARCODE,
ignored_positions=self.__ignore_positions)
self._transfer_jobs[job_index] = cdj
def __create_stock_transfer_jobs(self):
# Creates the transfer jobs for the pool creation. We do not need
# to regard potential empty (ignored) positions here, because the
# worklist creation is already based on the library layout.
self.add_debug('Create pool creation transfer jobs ...')
for rack_container in self.__rack_containers.values():
job_index = len(self._transfer_jobs)
stj = SampleTransferJob(index=job_index,
planned_worklist=self.__stock_transfer_worklist,
target_rack=self.__pool_stock_rack.rack,
source_rack=rack_container.rack)
self._transfer_jobs[job_index] = stj
def _verify_stock_racks(self):
"""
We convert the layouts separately because the layout have different
types. Also the pool stock rack is checked separately because the
referring tubes must all be empty.
"""
self.__verify_pool_stock_rack()
num_stock_racks = len(self.entity.iso_stock_racks)
incompatible = []
for isr in self.entity.iso_stock_racks:
layout = None
kw = dict(rack_layout=isr.rack_layout, parent=self)
rack_name = '%s (%s)' % (isr.rack.barcode, isr.label)
if isr.label == self.__pool_stock_rack.label:
continue # has been checked separately
elif num_stock_racks == 2:
converter_cls = SingleDesignStockRackLayoutConverter
else:
converter_cls = StockRackLayoutConverter
converter = converter_cls(**kw)
layout = converter.get_result()
if layout is None:
msg = 'Error when trying to convert stock rack layout for ' \
'rack %s!' % (rack_name)
self.add_error(msg)
else:
verifier = StockRackVerifier(isr,
stock_rack_layout=layout,
parent=self)
compatible = verifier.get_result()
if compatible is None:
msg = 'Error when trying to verify stock rack %s.' \
% (rack_name)
self.add_error(msg)
elif not compatible:
incompatible.append(rack_name)
if len(incompatible) > 0:
msg = 'The following stock racks are not compatible: %s.' \
% (self._get_joined_str(incompatible))
self.add_error(msg)
def __verify_pool_stock_rack(self):
# Makes sure there are empty tubes in all required positions and none
# in positions that must be empty.
converter = PoolCreationStockRackLayoutConverter(
self.__pool_stock_rack.rack_layout,
parent=self)
layout = converter.get_result()
if layout is None:
msg = 'Error when trying to convert pool stock rack layout!'
self.add_error(msg)
else:
additional_tubes = []
non_empty_tube = []
positions = layout.get_positions()
tube_positions = set()
rack = self.__pool_stock_rack.rack
for tube in rack.containers:
rack_pos = tube.location.position
tube_positions.add(rack_pos)
info = '%s (%s)' % (tube.barcode, rack_pos.label)
if not rack_pos in positions:
additional_tubes.append(info)
elif not tube.sample is None:
non_empty_tube.append(info)
missing_tube = []
for rack_pos in get_positions_for_shape(layout.shape):
sr_pos = layout.get_working_position(rack_pos)
if sr_pos is None:
continue
elif rack_pos not in tube_positions:
missing_tube.append(rack_pos.label)
if len(additional_tubes) > 0:
msg = 'There are unexpected tubes in the pool stock rack ' \
'(%s): %s. Please remove them and try again.' \
% (rack.barcode, self._get_joined_str(additional_tubes))
self.add_error(msg)
if len(non_empty_tube) > 0:
msg = 'The following tubes in the pool stock rack (%s) are ' \
'not empty: %s. Please replace them by empty tubes and ' \
'try again.' % (rack.barcode,
self._get_joined_str(non_empty_tube))
self.add_error(msg)
if len(missing_tube) > 0:
msg = 'There are tubes missing in the following positions ' \
'of the pool stock rack (%s): %s.' % (rack.barcode,
self._get_joined_str(missing_tube))
self.add_error(msg)
def _check_for_previous_execution(self):
if len(self.__stock_transfer_worklist.executed_worklists) > 0:
msg = 'The stock transfer has already been executed before.'
self.add_error(msg)
def _extract_executed_stock_worklists(self, executed_worklists):
"""
The worklists are recognized by label.
"""
exp_label = self.__stock_transfer_worklist.label
for ew in executed_worklists:
if ew.planned_worklist.label == exp_label:
self._executed_stock_worklists.append(ew)
def _update_iso_status(self):
self.entity.status = ISO_STATUS.DONE
self.__create_stock_samples()
def __create_stock_samples(self):
# Converts the new pool samples into :class:`StockSample` entities.
# We also compare expected and found molecule designs again. This has
# in theory already been done by the verifier. However, we have done a
# transfer in between and we want to exclude the (slight) chance that
# something went wrong during this process since afterwards it will
# hardly be possible to reconstruct the course of events and in case
# of the stock we better double-check.
self.add_debug('Generate stock samples ...')
mismatch = []
diff_supplier = []
for tube in self.__pool_stock_rack.rack.containers:
sample = tube.sample
if sample is None:
continue
# check whether expected pool
rack_pos = tube.location.position
ssc_pos = self.__ssc_layout.get_working_position(rack_pos)
pool = ssc_pos.pool
exp_mds = set()
for md in pool: exp_mds.add(md.id)
found_mds = set()
suppliers = set()
for sm in sample.sample_molecules:
md_id = sm.molecule.molecule_design.id
found_mds.add(md_id)
suppliers.add(sm.molecule.supplier)
if not exp_mds == found_mds:
info = '%s (pool: %s, expected designs: %s, found designs: ' \
'%s)' % (rack_pos, pool,
self._get_joined_str(exp_mds, is_strs=False,
separator='-'),
self._get_joined_str(found_mds, is_strs=False,
separator='-'))
mismatch.append(info)
continue
if len(suppliers) > 1:
info = '%s (pool: %s, found: %s)' % (rack_pos, pool,
', '.join(sorted([str(s.name) for s in suppliers])))
diff_supplier.append(info)
continue
else:
sample.convert_to_stock_sample()
if len(mismatch) > 0:
msg = 'The molecule designs for the following stock sample do ' \
'not match the expected designs for this sample. This ' \
'should not happen. Talk to the IT department, please. ' \
'Details: %s.' % (','.join(sorted(mismatch)))
self.add_error(msg)
if len(diff_supplier) > 0:
msg = 'The designs for some of the pools originate from ' \
'different suppliers: %s.' \
% (', '.join(sorted(diff_supplier)))
self.add_error(msg)
def _get_file_map(self, merged_stream_map, rack_transfer_stream): #pylint: disable=W0613
"""
We do not need to implement this method because printing mode is not
not allowed anyway.
"""
self.add_error('Printing mode is not allowed for this tool!')
class StockSampleCreationStockTransferReporter(IsoStockTransferReporter):
"""
A special reporter for stock sample creation ISOs.
**Return Value:** The log file as stream (arg 0) and comment (arg 1)s
"""
EXECUTOR_CLS = StockSampleCreationIsoExecutor
def __init__(self, executor, parent=None):
"""
Constructor.
:param executor: The executor tool (after run has been completed).
:type executor: :class:`_LabIsoWriterExecutorTool`
"""
IsoStockTransferReporter.__init__(self, executor, parent=parent)
#: The stock sample creation layout for this ISO.
self.__ssc_layout = None
def reset(self):
IsoStockTransferReporter.reset(self)
self.__ssc_layout = None
def _fetch_executor_data(self):
IsoStockTransferReporter._fetch_executor_data(self)
self.__ssc_layout = self.executor.get_stock_sample_creation_layout()
self._check_input_class('layout', self.__ssc_layout,
StockSampleCreationLayout)
def _set_ticket_id(self):
"""
The ticket ID is attached to the stock sample creation ISO.
"""
self._ticket_number = self.executor.entity.ticket_number
def _get_sample_type_str(self):
return 'new pooled stock samples'
def _get_rack_str(self):
"""
The rack string looks different, we use new ISO stock rack
(instead of the preparation plate).
"""
rack = self.executor.entity.iso_stock_racks[0].rack
rack_str = "'''New pool stock rack:''' %s" % (rack.barcode)
return rack_str
def _get_log_file_writer(self):
"""
For stock sample creation ISOs we use a special writer, the
:class:`StockSampleCreationStockLogFileWriter`.
"""
writer = _StockSampleCreationStockLogFileWriter(
self.__ssc_layout, self._executed_stock_worklists,
parent=self)
return writer
class _StockSampleCreationStockLogFileWriter(CsvWriter):
"""
Creates a log file after each pool creation stock transfer. The log
file contains molecule design pools, molecule designs, stock tube barcodes
and volumes and the barcode and positions in the target rack.
**Return Value:** file stream (CSV format)
"""
NAME = 'Stock Sample Creation Stock Transfer Log File Writer'
#: The index for the molecule design pool ID column.
POOL_INDEX = 0
#: The header for the molecule design pool ID column.
POOL_HEADER = 'Pool ID'
#: The index for the single molecule design pool ID column.
MOLECULE_DESIGN_INDEX = 1
#: The header for the molecule design pool ID column.
MOLECULE_DESIGN_HEADER = 'Molecule Design ID'
#: The index for the tube barcode column.
TUBE_BARCODE_INDEX = 2
#: The header for the tube barcode column.
TUBE_BARCODE_HEADER = 'Stock Tube Barcode'
#: The index for the volume column.
VOLUME_INDEX = 3
#: The header for the volume column.
VOLUME_HEADER = 'Volume (ul)'
#: The index for the target rack barcode column.
TARGET_RACK_BARCODE_INDEX = 4
#: The header for the target rack barcode column.
TARGET_RACK_BARCODE_HEADER = 'Target Rack Barcode'
#: The index for the target position column.
TARGET_POSITION_INDEX = 5
#: The header for the target position column.
TARGET_POSITION_HEADER = 'Target Position'
def __init__(self, stock_sample_creation_layout, executed_worklists,
parent=None):
"""
Constructor.
:param stock_sample_creation_layout: The working_layout containing the
molecule design pool data.
:type stock_sample_creation_layout: :class:`StockSampleCreationLayout`
:param list executed_worklists: The executed worklists that have been
generated by the executor (mapped onto transfer job indices).
"""
CsvWriter.__init__(self, parent=parent)
#: The executed worklists that have been generated by the executor.
self.executed_worklists = executed_worklists
#: The working layout containing the molecule design pool data.
self.stock_sample_creation_layout = stock_sample_creation_layout
#: Stores the values for the molecule design pool ID column.
self.__pool_values = None
#: Stores the values for the single molecule design IDs column.
self.__md_values = None
#: Stores the values for the tube barcode column.
self.__tube_barcode_values = None
#: Stores the values for the volume column.
self.__volume_values = None
#: Stores the values for the target rack barcode column.
self.__trg_rack_barcode_values = None
#: Stores the values for the target position column.
self.__trg_position_values = None
def reset(self):
CsvWriter.reset(self)
self.__pool_values = []
self.__md_values = []
self.__tube_barcode_values = []
self.__volume_values = []
self.__trg_rack_barcode_values = []
self.__trg_position_values = []
def _init_column_map_list(self):
"""
Creates the :attr:`_column_map_list`
"""
self.add_info('Start log file generation ...')
self.__check_input()
if not self.has_errors():
self.__store_column_values()
if not self.has_errors():
self.__generate_column_maps()
def __check_input(self):
self.add_debug('Check input values ...')
self._check_input_list_classes('executed_worklist',
self.executed_worklists, ExecutedWorklist)
self._check_input_class('stock sample creation layout',
self.stock_sample_creation_layout,
StockSampleCreationLayout)
def __store_column_values(self):
# Stores the values for the columns.
self.add_debug('Store values ...')
target_rack_map = dict()
for ew in self.executed_worklists:
for elt in ew.executed_liquid_transfers:
target_rack_barcode = elt.target_container.location.rack.barcode
if not target_rack_map.has_key(target_rack_barcode):
target_rack_map[target_rack_barcode] = []
target_rack_map[target_rack_barcode].append(elt)
barcodes = sorted(target_rack_map.keys())
well_containers = set()
for target_rack_barcode in barcodes:
non_single_md_src_pool = []
executed_transfers = target_rack_map[target_rack_barcode]
pool_map = self.__get_sorted_executed_transfers(executed_transfers,
target_rack_barcode)
if self.has_errors(): break
pools = sorted(pool_map.keys(), cmp=lambda p1, p2:
cmp(p1.id, p2.id))
for pool in pools:
elts = pool_map[pool]
for elt in elts:
plt = elt.planned_liquid_transfer
self.__pool_values.append(get_trimmed_string(pool.id))
volume = plt.volume * VOLUME_CONVERSION_FACTOR
self.__volume_values.append(get_trimmed_string(volume))
self.__trg_rack_barcode_values.append(target_rack_barcode)
trg_label = plt.target_position.label
self.__trg_position_values.append(trg_label)
src_tube = elt.source_container
self.__tube_barcode_values.append(src_tube.barcode)
md_id = self.__get_molecule_design_id(src_tube)
if md_id is None:
info = '%s (rack %s)' % (src_tube.barcode,
target_rack_barcode)
non_single_md_src_pool.append(info)
else:
self.__md_values.append(get_trimmed_string(md_id))
if len(non_single_md_src_pool) > 0:
msg = 'Some source container contain more than one ' \
'molecule design: %s.' \
% (self._get_joined_str(non_single_md_src_pool))
self.add_error(msg)
if len(well_containers) > 0:
msg = 'Some source containers in the worklists are wells: %s!' \
% (self._get_joined_str(well_containers))
self.add_error(msg)
def __get_sorted_executed_transfers(self, executed_transfers,
target_rack_barcode):
# Sorts the executed transfer of a worklist by pool and source tube
# barcode.
pool_map = dict()
no_pools = set()
for elt in executed_transfers:
rack_pos = elt.target_container.location.position
ssc_pos = self.stock_sample_creation_layout.get_working_position(
rack_pos)
if ssc_pos is None:
info = '%s (rack %s)' % (rack_pos.label, target_rack_barcode)
no_pools.add(info)
continue
pool = ssc_pos.pool
add_list_map_element(pool_map, pool, elt)
if len(no_pools) > 0:
msg = 'Could not find molecule design pools for the following ' \
'target positions: %s.' % (self._get_joined_str(no_pools))
self.add_error(msg)
for pool, elts in pool_map.iteritems():
elts.sort(cmp=lambda elt1, elt2: cmp(
elt1.source_container.barcode,
elt2.source_container.barcode))
return pool_map
def __get_molecule_design_id(self, tube):
# Returns the molecule design for a single molecule design pool stock
# tube.
sms = tube.sample.sample_molecules
if not len(sms) == 1:
result = None
else:
sm = sms[0]
result = sm.molecule.molecule_design.id
return result
def __generate_column_maps(self):
# Initialises the CsvColumnParameters object for the
# :attr:`_column_map_list`.
pool_column = CsvColumnParameters(self.POOL_INDEX, self.POOL_HEADER,
self.__pool_values)
md_column = CsvColumnParameters(self.MOLECULE_DESIGN_INDEX,
self.MOLECULE_DESIGN_HEADER, self.__md_values)
tube_column = CsvColumnParameters(self.TUBE_BARCODE_INDEX,
self.TUBE_BARCODE_HEADER, self.__tube_barcode_values)
volume_column = CsvColumnParameters(self.VOLUME_INDEX,
self.VOLUME_HEADER, self.__volume_values)
rack_barcode_column = CsvColumnParameters(
self.TARGET_RACK_BARCODE_INDEX,
self.TARGET_RACK_BARCODE_HEADER,
self.__trg_rack_barcode_values)
rack_position_column = CsvColumnParameters(self.TARGET_POSITION_INDEX,
self.TARGET_POSITION_HEADER, self.__trg_position_values)
self._column_map_list = [pool_column, md_column, tube_column,
volume_column, rack_barcode_column,
rack_position_column]
| 1.445313 | 1 |
visualization/polus-precompute-slide-plugin/src/utils.py | mmvih/polus-plugins | 0 | 12763741 | import copy, os, json, filepattern, imageio, pathlib, typing, abc, zarr
import bfio
import numpy as np
from numcodecs import Blosc
from concurrent.futures import ThreadPoolExecutor
from preadator import ProcessManager
from bfio.OmeXml import OMEXML
import logging
logging.getLogger("bfio").setLevel(logging.CRITICAL)
# Conversion factors to nm, these are based off of supported Bioformats length units
UNITS = {'m': 10**9,
'cm': 10**7,
'mm': 10**6,
'µm': 10**3,
'nm': 1,
'Å': 10**-1}
# Chunk Scale
CHUNK_SIZE = 1024
def _mode2(image: np.ndarray) -> np.ndarray:
""" Find mode of pixels in optical field 2x2 and stride 2
This method approximates the mode by finding the largest number that occurs
at least twice in a 2x2 grid of pixels, then sets that value to the output
pixel.
Args:
image - numpy array with only two dimensions (m,n)
Returns:
mode_img - numpy array with only two dimensions (round(m/2),round(n/2))
"""
y_max = image.shape[0] - image.shape[0] % 2
x_max = image.shape[1] - image.shape[1] % 2
# Initialize the mode output image (Half the size)
mode_img = np.zeros(np.ceil([d/2 for d in image.shape]).astype(int),dtype=image.dtype)
# Default the output to the upper left pixel value
mode_img[0:y_max//2,0:x_max//2] = image[0:-1:2, 0:-1:2]
# Handle images with odd-valued image dimensions
if y_max != image.shape[0]:
mode_img[-1,:x_max//2] = image[-1,0:x_max-1:2]
if x_max != image.shape[1]:
mode_img[:y_max//2,-1] = image[0:y_max-1:2,-1]
if y_max != image.shape[0] and x_max != image.shape[1]:
mode_img[-1,-1] = image[-1,-1]
# Garnering the four different pixels that we would find the modes of
# Finding the mode of:
# vals00[1], vals01[1], vals10[1], vals11[1]
# vals00[2], vals01[2], vals10[2], vals11[2]
# etc
vals00 = image[0:-1:2, 0:-1:2]
vals01 = image[0:-1:2, 1::2]
vals10 = image[ fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0:-1:2]
vals11 = image[ fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2]
# Finding where pixels adjacent to the top left pixel are not identical
index = (vals00 != vals01) | (vals00 != vals10)
# Initialize indexes where the two pixels are not the same
valueslist = [vals00[index], vals01[index], vals10[index], vals11[index]]
# Do a deeper mode search for non-matching pixels
temp_mode = mode_img[:y_max//2,:x_max//2]
for i in range(3):
rvals = valueslist[i]
for j in range(i+1,4):
cvals = valueslist[j]
ind = np.logical_and(cvals==rvals,rvals>temp_mode[index])
temp_mode[index][ind] = rvals[ind]
mode_img[:y_max//2,:x_max//2] = temp_mode
return mode_img
def _avg2(image: np.ndarray) -> np.ndarray:
""" Average pixels together with optical field 2x2 and stride 2
Args:
image - numpy array with only two dimensions (m,n)
Returns:
avg_img - numpy array with only two dimensions (round(m/2),round(n/2))
"""
# Since we are adding pixel values, we need to update the pixel type
# This helps to avoid integer overflow
if image.dtype == np.uint8:
dtype = np.uint16
elif image.dtype == np.uint16:
dtype = np.uint32
elif image.dtype == np.uint32:
dtype = np.uint64
elif image.dtype == np.int8:
dtype = np.int16
elif image.dtype == np.int16:
dtype = np.int32
elif image.dtype == np.int32:
dtype = np.int64
else:
dtype = image.dtype
odtype = image.dtype
image = image.astype(dtype)
y_max = image.shape[0] - image.shape[0] % 2
x_max = image.shape[1] - image.shape[1] % 2
# Calculate the mean
avg_img = np.zeros(np.ceil([d/2 for d in image.shape]).astype(int),dtype=dtype)
avg_img[0:y_max//2,0:x_max//2] = (image[0:y_max-1:2, 0:x_max-1:2] + \
image[1: y_max:2, 0:x_max-1:2] + \
image[0:y_max-1:2, 1:x_max:2] + \
image[1: y_max:2, 1:x_max:2]) // 4
# Fill in the final row if the image height is odd-valued
if y_max != image.shape[0]:
avg_img[-1,:x_max//2] = (image[-1,0:x_max-1:2] + \
image[-1,1:x_max:2]) // 2
# Fill in the final column if the image width is odd-valued
if x_max != image.shape[1]:
avg_img[:y_max//2,-1] = (image[0:y_max-1:2,-1] + \
image[1:y_max:2,-1]) // 2
# Fill in the lower right pixel if both image width and height are odd
if y_max != image.shape[0] and x_max != image.shape[1]:
avg_img[-1,-1] = image[-1,-1]
return avg_img.astype(odtype)
# Modified and condensed from FileAccessor class in neuroglancer-scripts
# https://github.com/HumanBrainProject/neuroglancer-scripts/blob/master/src/neuroglancer_scripts/file_accessor.py
class PyramidWriter():
""" Pyramid file writing base class
This class should not be called directly. It should be inherited by a pyramid
writing class type.
Inputs:
base_dir - Where pyramid folders and info file will be stored
"""
chunk_pattern = None
def __init__(self,
base_dir: typing.Union[pathlib.Path,str],
image_path: typing.Union[pathlib.Path,str],
image_depth: int = 0,
output_depth: int = 0,
max_output_depth: int = None,
image_type: str = "image"):
if isinstance(image_path,str):
image_path = pathlib.Path(image_path)
self.image_path = image_path
if isinstance(base_dir,str):
base_dir = pathlib.Path(base_dir)
self.base_path = base_dir
self.image_depth = image_depth
self.output_depth = output_depth
self.max_output_depth = max_output_depth
self.image_type = image_type
if image_type == 'image':
self.scale = _avg2
elif image_type == 'segmentation':
self.scale = _mode2
else:
raise ValueError('image_type must be one of ["image","segmentation"]')
self.info = bfio_metadata_to_slide_info(self.image_path,
self.base_path,
self.max_output_depth,
self.image_type)
self.dtype = self.info['data_type']
self.encoder = self._encoder()
@abc.abstractmethod
def _encoder(self):
pass
@abc.abstractmethod
def _write_chunk(self,key,chunk_path,buf):
pass
@abc.abstractmethod
def write_info(self):
pass
@abc.abstractmethod
def write_segment_info(self):
pass
def write_slide(self):
with ProcessManager.process(f'{self.base_path} - {self.output_depth}'):
ProcessManager.submit_thread(self._write_slide)
ProcessManager.join_threads()
def scale_info(self,S):
if S == -1:
return self.info['scales'][0]
scale_info = None
for res in self.info['scales']:
if int(res['key'])==S:
scale_info = res
break
if scale_info==None:
ValueError("No scale information for resolution {}.".format(S))
return scale_info
def store_chunk(self, image, key, chunk_coords):
""" Store a pyramid chunk
Inputs:
image: byte stream to save to disk
key: pyramid scale, folder to save chunk to
chunk_coords: X,Y,Z coordinates of data in buf
"""
buf = self.encoder.encode(image)
self._write_chunk(key,chunk_coords,buf)
def _chunk_path(self, key, chunk_coords, pattern=None):
if pattern is None:
pattern = self.chunk_pattern
chunk_coords = self._chunk_coords(chunk_coords)
chunk_filename = pattern.format(*chunk_coords, key=key)
return self.base_path / chunk_filename
def _chunk_coords(self,chunk_coords):
if len(chunk_coords) == 4:
chunk_coords = chunk_coords + (self.output_depth,self.output_depth+1)
elif len(chunk_coords) != 6:
raise ValueError('chunk_coords must be a 4-tuple or a 6-tuple.')
return chunk_coords
def _get_higher_res(S: int,
slide_writer: PyramidWriter,
X: typing.Tuple[int,int] = None,
Y: typing.Tuple[int,int] = None,
Z: typing.Tuple[int,int] = (0,1)):
""" Recursive function for pyramid building
This is a recursive function that builds an image pyramid by indicating
an original region of an image at a given scale. This function then
builds a pyramid up from the highest resolution components of the pyramid
(the original images) to the given position resolution.
As an example, imagine the following possible pyramid:
Scale S=0 1234
/ \
Scale S=1 12 34
/ \ / \
Scale S=2 1 2 3 4
At scale 2 (the highest resolution) there are 4 original images. At scale 1,
images are averaged and concatenated into one image (i.e. image 12). Calling
this function using S=0 will attempt to generate 1234 by calling this
function again to get image 12, which will then call this function again to
get image 1 and then image 2. Note that this function actually builds images
in quadrants (top left and right, bottom left and right) rather than two
sections as displayed above.
Due to the nature of how this function works, it is possible to build a
pyramid in parallel, since building the subpyramid under image 12 can be run
independently of the building of subpyramid under 34.
Args:
S: Top level scale from which the pyramid will be built
file_path: Path to image
slide_writer: object used to encode and write pyramid tiles
X: Range of X values [min,max] to get at the indicated scale
Y: Range of Y values [min,max] to get at the indicated scale
Returns:
image: The image corresponding to the X,Y values at scale S
"""
# Get the scale info
scale_info = slide_writer.scale_info(S)
if X == None:
X = [0,scale_info['size'][0]]
if Y == None:
Y = [0,scale_info['size'][1]]
# Modify upper bound to stay within resolution dimensions
if X[1] > scale_info['size'][0]:
X[1] = scale_info['size'][0]
if Y[1] > scale_info['size'][1]:
Y[1] = scale_info['size'][1]
if str(S)==slide_writer.scale_info(-1)['key']:
with ProcessManager.thread():
with bfio.BioReader(slide_writer.image_path,max_workers=1) as br:
image = br[Y[0]:Y[1],X[0]:X[1],Z[0]:Z[1],...].squeeze()
# Write the chunk
slide_writer.store_chunk(image,str(S),(X[0],X[1],Y[0],Y[1]))
return image
else:
# Initialize the output
image = np.zeros((Y[1]-Y[0],X[1]-X[0]),dtype=slide_writer.dtype)
# Set the subgrid dimensions
subgrid_dims = [[2*X[0],2*X[1]],[2*Y[0],2*Y[1]]]
for dim in subgrid_dims:
while dim[1]-dim[0] > CHUNK_SIZE:
dim.insert(1,dim[0] + ((dim[1] - dim[0]-1)//CHUNK_SIZE) * CHUNK_SIZE)
def load_and_scale(*args,**kwargs):
sub_image = _get_higher_res(**kwargs)
with ProcessManager.thread():
image = args[0]
x_ind = args[1]
y_ind = args[2]
image[y_ind[0]:y_ind[1],x_ind[0]:x_ind[1]] = kwargs['slide_writer'].scale(sub_image)
with ThreadPoolExecutor(1) as executor:
for y in range(0,len(subgrid_dims[1])-1):
y_ind = [subgrid_dims[1][y] - subgrid_dims[1][0],subgrid_dims[1][y+1] - subgrid_dims[1][0]]
y_ind = [np.ceil(yi/2).astype('int') for yi in y_ind]
for x in range(0,len(subgrid_dims[0])-1):
x_ind = [subgrid_dims[0][x] - subgrid_dims[0][0],subgrid_dims[0][x+1] - subgrid_dims[0][0]]
x_ind = [np.ceil(xi/2).astype('int') for xi in x_ind]
executor.submit(load_and_scale,
image,x_ind,y_ind, # args
X=subgrid_dims[0][x:x+2], # kwargs
Y=subgrid_dims[1][y:y+2],
Z=Z,
S=S+1,
slide_writer=slide_writer)
# Write the chunk
slide_writer.store_chunk(image,str(S),(X[0],X[1],Y[0],Y[1]))
return image
class NeuroglancerWriter(PyramidWriter):
""" Method to write a Neuroglancer pre-computed pyramid
Inputs:
base_dir - Where pyramid folders and info file will be stored
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.chunk_pattern = "{key}/{0}-{1}_{2}-{3}_{4}-{5}"
min_level = min([int(self.scale_info(-1)['key']),10])
self.info = bfio_metadata_to_slide_info(self.image_path,
self.base_path,
self.max_output_depth,
self.image_type,
min_level)
if self.image_type == 'segmentation':
self.labels = set()
def store_chunk(self, image, key, chunk_coords):
# Add in a label aggregator to the store_chunk operation
# Only aggregate labels at the highest resolution
if self.image_type == 'segmentation':
if key == self.scale_info(-1)['key']:
self.labels = self.labels.union(set(np.unique(image)))
elif key == self.info['scales'][-1]['key']:
root = zarr.open(str(self.base_path.joinpath("labels.zarr")))
if str(self.output_depth) not in root.array_keys():
labels = root.empty(str(self.output_depth),
shape=(len(self.labels),),
dtype=np.uint64)
else:
labels = root[str(self.output_depth)]
labels[:] = np.asarray(list(self.labels),np.uint64).squeeze()
super().store_chunk(image, key, chunk_coords)
def _write_chunk(self,key,chunk_coords,buf):
chunk_path = self._chunk_path(key,chunk_coords)
os.makedirs(str(chunk_path.parent), exist_ok=True)
with open(str(chunk_path.with_name(chunk_path.name)),'wb') as f:
f.write(buf)
def _encoder(self):
return NeuroglancerChunkEncoder(self.info)
def _write_slide(self):
pathlib.Path(self.base_path).mkdir(exist_ok=True)
# Don't create a full pyramid to help reduce bounding box size
start_level = int(self.info['scales'][-1]['key'])
image = _get_higher_res(start_level,self,
Z=(self.image_depth,self.image_depth+1))
def write_info(self):
""" This creates the info file specifying the metadata for the precomputed format """
# Create an output path object for the info file
op = pathlib.Path(self.base_path)
op.mkdir(exist_ok=True,parents=True)
op = op.joinpath("info")
# Write the neuroglancer info file
with open(op,'w') as writer:
json.dump(self.info,writer,indent=2)
if self.image_type == 'segmentation':
self._write_segment_info()
def _write_segment_info(self):
""" This function creates the info file needed to segment the image """
if self.image_type != 'segmentation':
raise TypeError('The NeuroglancerWriter object must have image_type = "segmentation" to use write_segment_info.')
op = pathlib.Path(self.base_path).joinpath("infodir")
op.mkdir(exist_ok=True)
op = op.joinpath("info")
# Get the labels
root = zarr.open(str(self.base_path.joinpath("labels.zarr")))
labels = set()
for d in root.array_keys():
labels = labels.union(set(root[d][:].squeeze().tolist()))
inlineinfo = {
"ids":[str(item) for item in labels],
"properties":[
{
"id":"label",
"type":"label",
"values":[str(item) for item in labels]
},
{
"id":"description",
"type":"label",
"values": [str(item) for item in labels]
}
]
}
info = {
"@type": "neuroglancer_segment_properties",
"inline": inlineinfo
}
# writing all the information into the file
with open(op,'w') as writer:
json.dump(info,writer,indent=2)
class ZarrWriter(PyramidWriter):
""" Method to write a Zarr pyramid
Inputs:
base_dir - Where pyramid folders and info file will be stored
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
out_name = self.base_path.name.replace(''.join(self.base_path.suffixes),'')
self.base_path = self.base_path.with_name(out_name)
self.base_path.mkdir(exist_ok=True)
self.root = zarr.open(str(self.base_path.joinpath("data.zarr").resolve()),
mode='a')
if "0" in self.root.group_keys():
self.root = self.root["0"]
else:
self.root = self.root.create_group("0")
self.writers = {}
max_scale = int(self.scale_info(-1)['key'])
compressor = Blosc(cname='zstd', clevel=3, shuffle=Blosc.BITSHUFFLE)
for S in range(10,len(self.info['scales'])):
scale_info = self.scale_info(S)
key = str(max_scale - int(scale_info['key']))
if key not in self.root.array_keys():
self.writers[key] = self.root.zeros(key,
shape=(1,self.max_output_depth,1) + (scale_info['size'][1],scale_info['size'][0]),
chunks=(1,1,1,CHUNK_SIZE,CHUNK_SIZE),
dtype=self.dtype,
compressor=compressor)
else:
self.root[key].resize((1,self.max_output_depth,1) + (scale_info['size'][1],scale_info['size'][0]))
self.writers[key] = self.root[key]
def _write_chunk(self,key,chunk_coords,buf):
key = str(int(self.scale_info(-1)['key']) - int(key))
chunk_coords = self._chunk_coords(chunk_coords)
self.writers[key][0:1,
chunk_coords[4]:chunk_coords[5],
0:1,
chunk_coords[2]:chunk_coords[3],
chunk_coords[0]:chunk_coords[1]] = buf
def _encoder(self):
return ZarrChunkEncoder(self.info)
def _write_slide(self):
_get_higher_res(10,self,Z=(self.image_depth,self.image_depth+1))
def write_info(self):
""" This creates the multiscales metadata for zarr pyramids """
# https://forum.image.sc/t/multiscale-arrays-v0-1/37930
multiscales = [{
"version": "0.1",
"name": self.base_path.name,
"datasets": [],
"metadata": {
"method": "mean"
}
}]
pad = len(self.scale_info(-1)['key'])
max_scale = int(self.scale_info(-1)['key'])
for S in reversed(range(10,len(self.info['scales']))):
scale_info = self.scale_info(S)
key = str(max_scale - int(scale_info['key']))
multiscales[0]["datasets"].append({"path": key})
self.root.attrs["multiscales"] = multiscales
with bfio.BioReader(self.image_path,max_workers=1) as bfio_reader:
metadata = OMEXML(str(bfio_reader.metadata))
metadata.image(0).Pixels.SizeC = self.max_output_depth
metadata.image(0).Pixels.channel_count = self.max_output_depth
for c in range(self.max_output_depth):
metadata.image().Pixels.Channel(c).Name = f'Channel {c}'
with open(self.base_path.joinpath("METADATA.ome.xml"),'x') as fw:
fw.write(str(metadata).replace("<ome:","<").replace("</ome:","</"))
class DeepZoomWriter(PyramidWriter):
""" Method to write a DeepZoom pyramid
Inputs:
base_dir - Where pyramid folders and info file will be stored
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.chunk_pattern = "{key}/{0}_{1}.png"
self.base_path = self.base_path.joinpath(str(self.output_depth) + '_files')
def _chunk_coords(self,chunk_coords):
chunk_coords = [chunk_coords[0]//CHUNK_SIZE,chunk_coords[2]//CHUNK_SIZE]
return chunk_coords
def _write_chunk(self,key,chunk_coords,buf):
chunk_path = self._chunk_path(key,chunk_coords)
os.makedirs(str(chunk_path.parent), exist_ok=True)
imageio.imwrite(str(chunk_path.with_name(chunk_path.name)),buf,format='PNG-FI',compression=1)
def write_info(self):
# Create an output path object for the info file
op = pathlib.Path(self.base_path).parent.joinpath("{}.dzi".format(self.output_depth))
# DZI file template
DZI = '<?xml version="1.0" encoding="utf-8"?><Image TileSize="{}" Overlap="0" Format="png" xmlns="http://schemas.microsoft.com/deepzoom/2008"><Size Width="{}" Height="{}"/></Image>'
# write the dzi file
with open(op,'w') as writer:
writer.write(DZI.format(CHUNK_SIZE,self.info['scales'][0]['size'][0],self.info['scales'][0]['size'][1]))
def _write_slide(self):
pathlib.Path(self.base_path).mkdir(exist_ok=False)
_get_higher_res(0,self,Z=(self.image_depth,self.image_depth+1))
def _encoder(self):
return DeepZoomChunkEncoder(self.info)
def write_segment_info(self):
raise NotImplementedError('DeepZoom does not have a segmentation format.')
# Modified and condensed from multiple functions and classes
# https://github.com/HumanBrainProject/neuroglancer-scripts/blob/master/src/neuroglancer_scripts/chunk_encoding.py
class ChunkEncoder:
# Data types used by Neuroglancer
DATA_TYPES = ("uint8", "int8",
"uint16", "int16",
"uint32", "int32",
"uint64", "int64",
"float32")
def __init__(self, info):
try:
data_type = info["data_type"]
num_channels = info["num_channels"]
except KeyError as exc:
raise KeyError("The info dict is missing an essential key {0}".format(exc)) from exc
if not isinstance(num_channels, int) or not num_channels > 0:
raise KeyError("Invalid value {0} for num_channels (must be a positive integer)".format(num_channels))
if data_type not in ChunkEncoder.DATA_TYPES:
raise KeyError("Invalid data_type {0} (should be one of {1})".format(data_type, ChunkEncoder.DATA_TYPES))
self.info = info
self.num_channels = num_channels
self.dtype = np.dtype(data_type).newbyteorder("<")
@abc.abstractmethod
def encode(self,chunk):
pass
class NeuroglancerChunkEncoder(ChunkEncoder):
def encode(self, chunk):
""" Encode a chunk from a Numpy array into bytes.
Inputs:
chunk - array with 2 dimensions
Outputs:
buf - encoded chunk (byte stream)
"""
# Rearrange the image for Neuroglancer
chunk = np.moveaxis(chunk.reshape(chunk.shape[0],chunk.shape[1],1,1),
(0, 1, 2, 3), (2, 3, 1, 0))
chunk = np.asarray(chunk).astype(self.dtype)
assert chunk.ndim == 4
assert chunk.shape[0] == self.num_channels
buf = chunk.tobytes()
return buf
class ZarrChunkEncoder(ChunkEncoder):
def encode(self, chunk):
""" Encode a chunk from a Numpy array into bytes.
Inputs:
chunk - array with 2 dimensions
Outputs:
buf - encoded chunk (byte stream)
"""
# Rearrange the image for Neuroglancer
chunk = chunk.reshape(chunk.shape[0],chunk.shape[1],1,1,1).transpose(4,2,3,0,1)
chunk = np.asarray(chunk).astype(self.dtype)
return chunk
class DeepZoomChunkEncoder(ChunkEncoder):
def encode(self, chunk):
""" Encode a chunk for DeepZoom
Nothing special to do for encoding except checking the number of
dimentions.
Inputs:
chunk - array with 2 dimensions
Outputs:
buf - encoded chunk (byte stream)
"""
# Check to make sure the data is formatted properly
assert chunk.ndim == 2
return chunk
def bfio_metadata_to_slide_info(image_path,outPath,stackheight,imagetype,min_scale=0):
""" Generate a Neuroglancer info file from Bioformats metadata
Neuroglancer requires an info file in the root of the pyramid directory.
All information necessary for this info file is contained in Bioformats
metadata, so this function takes the metadata and generates the info file.
Inputs:
bfio_reader - A BioReader object
outPath - Path to directory where pyramid will be generated
Outputs:
info - A dictionary containing the information in the info file
"""
with bfio.BioReader(image_path,max_workers=1) as bfio_reader:
# Get metadata info from the bfio reader
sizes = [bfio_reader.X,bfio_reader.Y,stackheight]
phys_x = bfio_reader.ps_x
if None in phys_x:
phys_x = (1000,'nm')
phys_y = bfio_reader.ps_y
if None in phys_y:
phys_y = (1000,'nm')
phys_z = bfio_reader.ps_z
if None in phys_z:
phys_z = ((phys_x[0] + phys_y[0]) / 2,phys_x[1])
resolution = [phys_x[0] * UNITS[phys_x[1]]]
resolution.append(phys_y[0] * UNITS[phys_y[1]])
resolution.append(phys_z[0] * UNITS[phys_z[1]]) # Just used as a placeholder
dtype = str(np.dtype(bfio_reader.dtype))
num_scales = int(np.ceil(np.log2(max(sizes))))
# create a scales template, use the full resolution8
scales = {
"chunk_sizes":[[CHUNK_SIZE,CHUNK_SIZE,1]],
"encoding":"raw",
"key": str(num_scales),
"resolution":resolution,
"size":sizes,
"voxel_offset":[0,0,0]
}
# initialize the json dictionary
info = {
"data_type": dtype,
"num_channels": 1,
"scales": [scales],
"type": imagetype,
}
if imagetype == "segmentation":
info["segment_properties"] = "infodir"
for i in reversed(range(min_scale,num_scales)):
previous_scale = info['scales'][-1]
current_scale = copy.deepcopy(previous_scale)
current_scale['key'] = str(i)
current_scale['size'] = [int(np.ceil(previous_scale['size'][0]/2)),int(np.ceil(previous_scale['size'][1]/2)),stackheight]
current_scale['resolution'] = [2*previous_scale['resolution'][0],2*previous_scale['resolution'][1],previous_scale['resolution'][2]]
info['scales'].append(current_scale)
return info
| 2.265625 | 2 |
DEPENDENCIES/seash/tests/ut_seash_splitjoin.py | kevinkenzhao/Repy2 | 0 | 12763742 | """
Joins two nodes owned by guest0 and splits them again using resources.offcut
to test if the two commands work properly on the basic scale.
"""
#pragma out
import seash
import sys
command_list = [
'loadkeys guest3',
'as guest3',
'browse',
'on browsegood',
'update',
'join',
'on %3',
'split resources.offcut'
]
seash.command_loop(command_list)
| 2.03125 | 2 |
telegram.py | Santistox/instasavebot | 2 | 12763743 | <reponame>Santistox/instasavebot
import telebot
from time import gmtime, strftime
from private import teletoken
from instalooter.looters import PostLooter
bot = telebot.TeleBot(teletoken)
def log(time, message):
log_print(time, message.from_user.username, message.chat.id, message.text)
def log_print(time, user, user_id, command):
log_file = open('message_log.txt', 'a')
log_file.write('{}:@{}({}) \"{}\"\n'.format(time, user, user_id, command))
log_file.close()
@bot.message_handler(commands=['start'])
def start_message(message):
log(strftime("%Y-%m-%d %H:%M:%S", gmtime()), message)
bot.send_message(message.chat.id, 'Welcome to InstaSaveBot, {}'.format(message.from_user.first_name))
bot.send_message(message.chat.id, 'Send me an instagram link or a user\'s username like @username.\n\nNeed more help?\nJust tap: /help')
bot.send_message(509291958, 'added new user, @{}\nchatid: {}'.format(message.from_user.username, message.chat.id))
@bot.message_handler(commands=['help'])
def help_message(message):
log(strftime("%Y-%m-%d %H:%M:%S", gmtime()), message)
help_output = 'This bot is easiest and fastet way to download from instagram.\n\n'
help_output += 'To download instagram posts, send the post \'s URL to the bot.\n\n'
help_output += 'To download stories or HD profile photos, send the account\'s ID to the bot.\nExample:\n@google\n\n'
help_output += 'I\'ve sent you a picture that shows how to copy post URLs 👇'
bot.send_message(message.chat.id, help_output)
bot.send_photo(message.chat.id, open('./service_images/help.jpeg', 'rb'))
@bot.message_handler(commands=['status'])
def status_message(message):
log(strftime("%Y-%m-%d %H:%M:%S", gmtime()), message)
status_output = 'STATUS:\n🟢Online\n'
status_output += 'Server datatime: '
status_output += strftime("%Y-%m-%d %H:%M:%S", gmtime())
status_output += '\nActive tasks: 0/10 (0%)✅'
status_output += '\nYour chatId: '
status_output += str(message.chat.id)
status_output += '\nBot version: v1.0.0'
bot.send_message(message.chat.id, status_output)
@bot.message_handler(commands=['youtube'])
def youtube_message(message):
log(strftime("%Y-%m-%d %H:%M:%S", gmtime()), message)
bot.send_message(message.chat.id, 'Now this function unavailable❌\n\nSorry 😓😓😓\n\nBut you can download Instagram photos!\nJust tap: /help')
@bot.message_handler(commands=['fix'])
def fix_message(message):
log(strftime("%Y-%m-%d %H:%M:%S", gmtime()), message)
bot.send_message(message.chat.id, 'Report sent‼️\n\nThank You for helping to develop the project\n')
@bot.message_handler(content_types=['text'])
def send_text(message):
log(strftime("%Y-%m-%d %H:%M:%S", gmtime()), message)
bot.send_message(509291958, '@{} send(id{}):\n\'{}\''.format(message.from_user.username, message.chat.id, message.text))
if 'instagram.com/p/' in message.text:
path = message.text
looter = PostLooter(path)
if looter.info['__typename'] == 'GraphImage':
picture_id = looter.info['id']
looter.download('./pictures/')
bot.send_photo(message.chat.id, open('./pictures/{}.jpg'.format(picture_id), 'rb'), caption='🤖 Downloaded with @instsave_bot')
elif looter.info['__typename'] == 'GraphVideo':
video_id = looter.info['id']
looter.download_videos('./videos/')
bot.send_video(message.chat.id, open('./videos/{}.mp4'.format(video_id), 'rb'), caption='🤖 Downloaded with @instsave_bot')
elif looter.info['__typename'] == 'GraphSidecar':
bot.send_message(message.chat.id, 'Sorry, I can\'t send you post with more than 1 photo\n\nPlease try again')
elif 'private' in message.text:
bot.send_message(436264579, message.text[7:])
else:
bot.send_message(message.chat.id, 'Please, send link or username\n\nNeed more help?\nJust tap: /help')
bot.polling()
| 2.546875 | 3 |
app/email.py | lootfee/writerrific | 0 | 12763744 | <gh_stars>0
from flask import render_template
from flask_mail import Message
from app import mail, app
from threading import Thread
def send_email(subject, sender, recipients, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
#msg.body = text_body
msg.html = html_body
mail.send(msg)
def send_password_reset_email(user):
token = <PASSWORD>.get_reset_password_token()
send_email('[Writerrific] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
#text_body=render_template('email/reset_password.txt', user=user, token=token),
html_body=render_template('email/reset_password.html', user=user, token=token)) | 2.40625 | 2 |
app/models.py | FikaNote/FikaNote | 2 | 12763745 | from django.db import models
import mongoengine
from mongoengine import Document, EmbeddedDocument
from mongoengine.fields import *
import os
# Create your models here.
class Greeting(models.Model):
when = models.DateTimeField('date created', auto_now_add=True)
MONGODB_USER = os.environ.get("DATABASE_USER")
MONGODB_PASSWORD = os.environ.get("DATABASE_PASSWORD")
MONGODB_URI = "mongodb+srv://" + MONGODB_USER + ":" + MONGODB_PASSWORD + "@fikanotedb.ltkpy.mongodb.net/fikanotedb?retryWrites=true&w=majority"
mongoengine.connect('fikanotedb', host=MONGODB_URI)
class Shownote(EmbeddedDocument):
url = URLField()
title = StringField()
date = DateTimeField()
class FikanoteDB(Document):
title = StringField()
number = IntField()
person = ListField(StringField())
agenda = StringField()
date = DateTimeField()
shownotes = ListField(EmbeddedDocumentField(Shownote))
meta = {'collection': 'fikanotedb'}
class AgendaDB(Document):
url = URLField()
title = StringField()
date = DateTimeField()
meta = {'collection': 'agendadb'}
| 2.421875 | 2 |
examples/pin_input_irq_debounce.py | charkster/u2if | 79 | 12763746 | <gh_stars>10-100
import time
from machine import u2if, Pin
def irq_callback(pin, event=None):
is_rising = event & Pin.IRQ_RISING
is_falling = event & Pin.IRQ_FALLING
info_interrupt = '%s pin %d =>' % (time.time()*1000, pin)
if is_rising:
info_interrupt += ' RISING'
if is_falling:
info_interrupt += ' FALLING'
print(info_interrupt)
# Configure first switch
switch = Pin(u2if.GP9, Pin.IN, pull=Pin.PULL_UP)
switch.irq(handler=irq_callback, trigger=Pin.IRQ_RISING | Pin.IRQ_FALLING)
# Configure second switch with debouncer
switch_debounced = Pin(u2if.GP8, Pin.IN, pull=Pin.PULL_UP)
switch_debounced.irq(handler=irq_callback, trigger=Pin.IRQ_FALLING, debounce=True)
while True:
# Retrieve all irq recorded and call handler
Pin.process_irq()
time.sleep(0.005)
| 2.8125 | 3 |
src/config/svc-monitor/svc_monitor/tests/test_lbaas_custom_attributes.py | jnpr-pranav/contrail-controller | 37 | 12763747 | <gh_stars>10-100
# Copyright (c) 2016 Symantec
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, Symantec.
from future import standard_library
standard_library.install_aliases()
import unittest
import mock
import svc_monitor.services.loadbalancer.drivers.ha_proxy.custom_attributes.haproxy_validator as validator
from sys import version_info
if version_info.major == 2:
import builtins as builtins
else:
import builtins
custom_attributes_dict = {
'global': {
'ssl_ciphers': {
'type': 'str',
'limits': [1, 100],
'cmd': 'ssl-default-bind-ciphers %s'
},
},
'default': {
'server_timeout': {
'type': 'int',
'limits': [1, 5000000],
'cmd': 'timeout server %d'
},
'client_timeout': {
'type': 'int',
'limits': [1, 5000000],
'cmd': 'timeout client %d'
},
},
'frontend': {
'tls_container': {
'type': 'CustomAttrTlsContainer',
'limits': None,
'cmd': None
}
},
'backend': {},
}
class CustomAttributeTest(unittest.TestCase):
def test_false_custom_attributes(self):
fake_config = {
'key1': 'value1', 'key2': 'value2'
}
resp_dict = validator.validate_custom_attributes(custom_attributes_dict,
'global', fake_config)
self.assertFalse('key1' in list(resp_dict.keys()) or \
'key2' in list(resp_dict.keys()))
def test_mixed_custom_attributes(self):
fake_config = {
'key': 'value', 'server_timeout': '50000'
}
resp_dict = validator.validate_custom_attributes(custom_attributes_dict,
'default', fake_config)
self.assertTrue('key' not in list(resp_dict.keys()) and \
'server_timeout' in list(resp_dict.keys()))
| 1.828125 | 2 |
twisted/plugins/txgsm_plugin.py | smn/txgsm | 12 | 12763748 | from txgsm.service import TxGSMServiceMaker
serviceMaker = TxGSMServiceMaker()
| 1.25 | 1 |
nbaspa/data/tasks/__init__.py | ak-gupta/nbaspa | 1 | 12763749 | """Import the cleaning tasks."""
from typing import List
from .gamelog import AddWinPercentage, GamesInLastXDays
from .io import (
GenericLoader,
FactoryGetter,
PlayByPlayLoader,
WinProbabilityLoader,
GameLogLoader,
LineupLoader,
RotationLoader,
ShotChartLoader,
BoxScoreLoader,
ShotZoneLoader,
GeneralShootingLoader,
SaveData,
)
from .lineup import AddLineupPlusMinus
from .margin import FillMargin
from .nba_win_prob import AddNBAWinProbability
from .net_rating import AddNetRating
from .scoreboard import AddLastMeetingResult, AddTeamID
from .shotchart import AddExpectedShotValue, AddShotDetail
from .target import CreateTarget
from .time import DeDupeTime, SurvivalTime
__all__: List[str] = [
"AddWinPercentage",
"GenericLoader",
"FactoryGetter",
"PlayByPlayLoader",
"WinProbabilityLoader",
"GameLogLoader",
"LineupLoader",
"RotationLoader",
"ShotChartLoader",
"BoxScoreLoader",
"ShotZoneLoader",
"GeneralShootingLoader",
"SaveData",
"GamesInLastXDays",
"AddLineupPlusMinus",
"FillMargin",
"AddNBAWinProbability",
"AddNetRating",
"AddLastMeetingResult",
"AddTeamID",
"AddExpectedShotValue",
"AddShotDetail",
"CreateTarget",
"DeDupeTime",
"SurvivalTime",
]
| 1.851563 | 2 |
2019/20b.py | msullivan/advent-of-code | 8 | 12763750 | #!/usr/bin/env python3
from __future__ import print_function
from collections import defaultdict, deque
import sys
import time
import math
DIRS = [(0, -1), (1, 0), (0, 1), (-1, 0)]
def add(v1, v2):
return tuple(x + y for x, y in zip(v1, v2))
def main(args):
data = [s.rstrip() for s in sys.stdin]
board = defaultdict(lambda: " ")
m = data
for y in range(len(m)):
for x in range(len(m[y])):
board[x,y] = m[y][x]
# Parsing this is annoying!
portals = {}
for (x, y), val in list(board.items()):
if not val.isupper():
continue
if board[x+1,y].isupper():
s = val + board[x+1,y]
ny = y
if board[x+2,y] == ".":
nx = x+2
else:
nx = x-1
elif board[x,y+1].isupper():
s = val + board[x,y+1]
nx = x
if board[x,y+2] == ".":
ny = y+2
else:
ny = y-1
else:
continue
portals.setdefault(s, []).append((nx, ny))
portal_dests = {}
for vs in portals.values():
if len(vs) > 1:
a, b = vs
portal_dests[a] = b
portal_dests[b] = a
xvals = [x for x, y in portal_dests]
yvals = [y for x, y in portal_dests]
outerx = {min(xvals), max(xvals)}
outery = {min(yvals), max(yvals)}
print(outerx, outery)
print(portal_dests)
source = (portals['AA'][0], 0)
dest = (portals['ZZ'][0], 0)
# Another BFS.
q = deque([(0, source)])
seen = {source}
while True:
steps, (pos, level) = q.popleft()
if (pos, level) == dest:
break
options = [(add(pos, DIRS[dir]), level) for dir in range(0, 4)]
if pos in portal_dests:
if pos[0] in outerx or pos[1] in outery:
if level > 0:
options.append((portal_dests[pos], level-1))
else:
options.append((portal_dests[pos], level+1))
for nextpos in options:
if nextpos in seen:
continue
seen.add(nextpos)
if board[nextpos[0]] == ".":
q.append((steps+1, nextpos))
print(steps)
if __name__ == '__main__':
main(sys.argv)
| 3.25 | 3 |